repo_name
string
path
string
copies
string
size
string
content
string
license
string
rharshit/android_kernel_mediatek_sprout
drivers/leds/dell-led.c
7997
4349
/* * dell_led.c - Dell LED Driver * * Copyright (C) 2010 Dell Inc. * Louis Davis <louis_davis@dell.com> * Jim Dailey <jim_dailey@dell.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as * published by the Free Software Foundation. * */ #include <linux/acpi.h> #include <linux/leds.h> #include <linux/slab.h> #include <linux/module.h> MODULE_AUTHOR("Louis Davis/Jim Dailey"); MODULE_DESCRIPTION("Dell LED Control Driver"); MODULE_LICENSE("GPL"); #define DELL_LED_BIOS_GUID "F6E4FE6E-909D-47cb-8BAB-C9F6F2F8D396" MODULE_ALIAS("wmi:" DELL_LED_BIOS_GUID); /* Error Result Codes: */ #define INVALID_DEVICE_ID 250 #define INVALID_PARAMETER 251 #define INVALID_BUFFER 252 #define INTERFACE_ERROR 253 #define UNSUPPORTED_COMMAND 254 #define UNSPECIFIED_ERROR 255 /* Device ID */ #define DEVICE_ID_PANEL_BACK 1 /* LED Commands */ #define CMD_LED_ON 16 #define CMD_LED_OFF 17 #define CMD_LED_BLINK 18 struct bios_args { unsigned char length; unsigned char result_code; unsigned char device_id; unsigned char command; unsigned char on_time; unsigned char off_time; }; static int dell_led_perform_fn(u8 length, u8 result_code, u8 device_id, u8 command, u8 on_time, u8 off_time) { struct bios_args *bios_return; u8 return_code; union acpi_object *obj; struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; struct acpi_buffer input; acpi_status status; struct bios_args args; args.length = length; args.result_code = result_code; args.device_id = device_id; args.command = command; args.on_time = on_time; args.off_time = off_time; input.length = sizeof(struct bios_args); input.pointer = &args; status = wmi_evaluate_method(DELL_LED_BIOS_GUID, 1, 1, &input, &output); if (ACPI_FAILURE(status)) return status; obj = output.pointer; if (!obj) return -EINVAL; else if (obj->type != ACPI_TYPE_BUFFER) { kfree(obj); return -EINVAL; } bios_return = ((struct bios_args *)obj->buffer.pointer); return_code = bios_return->result_code; kfree(obj); return return_code; } static int led_on(void) { return dell_led_perform_fn(3, /* Length of command */ INTERFACE_ERROR, /* Init to INTERFACE_ERROR */ DEVICE_ID_PANEL_BACK, /* Device ID */ CMD_LED_ON, /* Command */ 0, /* not used */ 0); /* not used */ } static int led_off(void) { return dell_led_perform_fn(3, /* Length of command */ INTERFACE_ERROR, /* Init to INTERFACE_ERROR */ DEVICE_ID_PANEL_BACK, /* Device ID */ CMD_LED_OFF, /* Command */ 0, /* not used */ 0); /* not used */ } static int led_blink(unsigned char on_eighths, unsigned char off_eighths) { return dell_led_perform_fn(5, /* Length of command */ INTERFACE_ERROR, /* Init to INTERFACE_ERROR */ DEVICE_ID_PANEL_BACK, /* Device ID */ CMD_LED_BLINK, /* Command */ on_eighths, /* blink on in eigths of a second */ off_eighths); /* blink off in eights of a second */ } static void dell_led_set(struct led_classdev *led_cdev, enum led_brightness value) { if (value == LED_OFF) led_off(); else led_on(); } static int dell_led_blink(struct led_classdev *led_cdev, unsigned long *delay_on, unsigned long *delay_off) { unsigned long on_eighths; unsigned long off_eighths; /* The Dell LED delay is based on 125ms intervals. Need to round up to next interval. */ on_eighths = (*delay_on + 124) / 125; if (0 == on_eighths) on_eighths = 1; if (on_eighths > 255) on_eighths = 255; *delay_on = on_eighths * 125; off_eighths = (*delay_off + 124) / 125; if (0 == off_eighths) off_eighths = 1; if (off_eighths > 255) off_eighths = 255; *delay_off = off_eighths * 125; led_blink(on_eighths, off_eighths); return 0; } static struct led_classdev dell_led = { .name = "dell::lid", .brightness = LED_OFF, .max_brightness = 1, .brightness_set = dell_led_set, .blink_set = dell_led_blink, .flags = LED_CORE_SUSPENDRESUME, }; static int __init dell_led_init(void) { int error = 0; if (!wmi_has_guid(DELL_LED_BIOS_GUID)) return -ENODEV; error = led_off(); if (error != 0) return -ENODEV; return led_classdev_register(NULL, &dell_led); } static void __exit dell_led_exit(void) { led_classdev_unregister(&dell_led); led_off(); } module_init(dell_led_init); module_exit(dell_led_exit);
gpl-2.0
moddingg33k/android_kernel_google
arch/mips/txx9/rbtx4927/prom.c
9533
1725
/* * rbtx4927 specific prom routines * * Author: MontaVista Software, Inc. * source@mvista.com * * Copyright 2001-2002 MontaVista Software Inc. * * Copyright (C) 2004 MontaVista Software Inc. * Author: Manish Lachwani, mlachwani@mvista.com * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/init.h> #include <asm/bootinfo.h> #include <asm/txx9/generic.h> #include <asm/txx9/rbtx4927.h> void __init rbtx4927_prom_init(void) { add_memory_region(0, tx4927_get_mem_size(), BOOT_MEM_RAM); txx9_sio_putchar_init(TX4927_SIO_REG(0) & 0xfffffffffULL); }
gpl-2.0
CyanogenMod/android_kernel_sony_tianchi
arch/mips/cobalt/lcd.c
13885
1549
/* * Registration of Cobalt LCD platform device. * * Copyright (C) 2008 Yoichi Yuasa <yuasa@linux-mips.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/errno.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/platform_device.h> static struct resource cobalt_lcd_resource __initdata = { .start = 0x1f000000, .end = 0x1f00001f, .flags = IORESOURCE_MEM, }; static __init int cobalt_lcd_add(void) { struct platform_device *pdev; int retval; pdev = platform_device_alloc("cobalt-lcd", -1); if (!pdev) return -ENOMEM; retval = platform_device_add_resources(pdev, &cobalt_lcd_resource, 1); if (retval) goto err_free_device; retval = platform_device_add(pdev); if (retval) goto err_free_device; return 0; err_free_device: platform_device_put(pdev); return retval; } device_initcall(cobalt_lcd_add);
gpl-2.0
kaber/netlink-mmap
drivers/target/target_core_sbc.c
62
16132
/* * SCSI Block Commands (SBC) parsing and emulation. * * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc. * Copyright (c) 2005, 2006, 2007 SBE, Inc. * Copyright (c) 2007-2010 Rising Tide Systems * Copyright (c) 2008-2010 Linux-iSCSI.org * * Nicholas A. Bellinger <nab@kernel.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/ratelimit.h> #include <asm/unaligned.h> #include <scsi/scsi.h> #include <target/target_core_base.h> #include <target/target_core_backend.h> #include <target/target_core_fabric.h> #include "target_core_internal.h" #include "target_core_ua.h" static int sbc_emulate_readcapacity(struct se_cmd *cmd) { struct se_device *dev = cmd->se_dev; unsigned char *buf; unsigned long long blocks_long = dev->transport->get_blocks(dev); u32 blocks; if (blocks_long >= 0x00000000ffffffff) blocks = 0xffffffff; else blocks = (u32)blocks_long; buf = transport_kmap_data_sg(cmd); buf[0] = (blocks >> 24) & 0xff; buf[1] = (blocks >> 16) & 0xff; buf[2] = (blocks >> 8) & 0xff; buf[3] = blocks & 0xff; buf[4] = (dev->se_sub_dev->se_dev_attrib.block_size >> 24) & 0xff; buf[5] = (dev->se_sub_dev->se_dev_attrib.block_size >> 16) & 0xff; buf[6] = (dev->se_sub_dev->se_dev_attrib.block_size >> 8) & 0xff; buf[7] = dev->se_sub_dev->se_dev_attrib.block_size & 0xff; transport_kunmap_data_sg(cmd); target_complete_cmd(cmd, GOOD); return 0; } static int sbc_emulate_readcapacity_16(struct se_cmd *cmd) { struct se_device *dev = cmd->se_dev; unsigned char *buf; unsigned long long blocks = dev->transport->get_blocks(dev); buf = transport_kmap_data_sg(cmd); buf[0] = (blocks >> 56) & 0xff; buf[1] = (blocks >> 48) & 0xff; buf[2] = (blocks >> 40) & 0xff; buf[3] = (blocks >> 32) & 0xff; buf[4] = (blocks >> 24) & 0xff; buf[5] = (blocks >> 16) & 0xff; buf[6] = (blocks >> 8) & 0xff; buf[7] = blocks & 0xff; buf[8] = (dev->se_sub_dev->se_dev_attrib.block_size >> 24) & 0xff; buf[9] = (dev->se_sub_dev->se_dev_attrib.block_size >> 16) & 0xff; buf[10] = (dev->se_sub_dev->se_dev_attrib.block_size >> 8) & 0xff; buf[11] = dev->se_sub_dev->se_dev_attrib.block_size & 0xff; /* * Set Thin Provisioning Enable bit following sbc3r22 in section * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled. */ if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws) buf[14] = 0x80; transport_kunmap_data_sg(cmd); target_complete_cmd(cmd, GOOD); return 0; } int spc_get_write_same_sectors(struct se_cmd *cmd) { u32 num_blocks; if (cmd->t_task_cdb[0] == WRITE_SAME) num_blocks = get_unaligned_be16(&cmd->t_task_cdb[7]); else if (cmd->t_task_cdb[0] == WRITE_SAME_16) num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]); else /* WRITE_SAME_32 via VARIABLE_LENGTH_CMD */ num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]); /* * Use the explicit range when non zero is supplied, otherwise calculate * the remaining range based on ->get_blocks() - starting LBA. */ if (num_blocks) return num_blocks; return cmd->se_dev->transport->get_blocks(cmd->se_dev) - cmd->t_task_lba + 1; } EXPORT_SYMBOL(spc_get_write_same_sectors); static int sbc_emulate_verify(struct se_cmd *cmd) { target_complete_cmd(cmd, GOOD); return 0; } static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors) { return cmd->se_dev->se_sub_dev->se_dev_attrib.block_size * sectors; } static int sbc_check_valid_sectors(struct se_cmd *cmd) { struct se_device *dev = cmd->se_dev; unsigned long long end_lba; u32 sectors; sectors = cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size; end_lba = dev->transport->get_blocks(dev) + 1; if (cmd->t_task_lba + sectors > end_lba) { pr_err("target: lba %llu, sectors %u exceeds end lba %llu\n", cmd->t_task_lba, sectors, end_lba); return -EINVAL; } return 0; } static inline u32 transport_get_sectors_6(unsigned char *cdb) { /* * Use 8-bit sector value. SBC-3 says: * * A TRANSFER LENGTH field set to zero specifies that 256 * logical blocks shall be written. Any other value * specifies the number of logical blocks that shall be * written. */ return cdb[4] ? : 256; } static inline u32 transport_get_sectors_10(unsigned char *cdb) { return (u32)(cdb[7] << 8) + cdb[8]; } static inline u32 transport_get_sectors_12(unsigned char *cdb) { return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9]; } static inline u32 transport_get_sectors_16(unsigned char *cdb) { return (u32)(cdb[10] << 24) + (cdb[11] << 16) + (cdb[12] << 8) + cdb[13]; } /* * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants */ static inline u32 transport_get_sectors_32(unsigned char *cdb) { return (u32)(cdb[28] << 24) + (cdb[29] << 16) + (cdb[30] << 8) + cdb[31]; } static inline u32 transport_lba_21(unsigned char *cdb) { return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3]; } static inline u32 transport_lba_32(unsigned char *cdb) { return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5]; } static inline unsigned long long transport_lba_64(unsigned char *cdb) { unsigned int __v1, __v2; __v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5]; __v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32; } /* * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs */ static inline unsigned long long transport_lba_64_ext(unsigned char *cdb) { unsigned int __v1, __v2; __v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15]; __v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19]; return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32; } static int sbc_write_same_supported(struct se_device *dev, unsigned char *flags) { if ((flags[0] & 0x04) || (flags[0] & 0x02)) { pr_err("WRITE_SAME PBDATA and LBDATA" " bits not supported for Block Discard" " Emulation\n"); return -ENOSYS; } /* * Currently for the emulated case we only accept * tpws with the UNMAP=1 bit set. */ if (!(flags[0] & 0x08)) { pr_err("WRITE_SAME w/o UNMAP bit not" " supported for Block Discard Emulation\n"); return -ENOSYS; } return 0; } static void xdreadwrite_callback(struct se_cmd *cmd) { unsigned char *buf, *addr; struct scatterlist *sg; unsigned int offset; int i; int count; /* * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command * * 1) read the specified logical block(s); * 2) transfer logical blocks from the data-out buffer; * 3) XOR the logical blocks transferred from the data-out buffer with * the logical blocks read, storing the resulting XOR data in a buffer; * 4) if the DISABLE WRITE bit is set to zero, then write the logical * blocks transferred from the data-out buffer; and * 5) transfer the resulting XOR data to the data-in buffer. */ buf = kmalloc(cmd->data_length, GFP_KERNEL); if (!buf) { pr_err("Unable to allocate xor_callback buf\n"); return; } /* * Copy the scatterlist WRITE buffer located at cmd->t_data_sg * into the locally allocated *buf */ sg_copy_to_buffer(cmd->t_data_sg, cmd->t_data_nents, buf, cmd->data_length); /* * Now perform the XOR against the BIDI read memory located at * cmd->t_mem_bidi_list */ offset = 0; for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) { addr = kmap_atomic(sg_page(sg)); if (!addr) goto out; for (i = 0; i < sg->length; i++) *(addr + sg->offset + i) ^= *(buf + offset + i); offset += sg->length; kunmap_atomic(addr); } out: kfree(buf); } int sbc_parse_cdb(struct se_cmd *cmd, struct spc_ops *ops) { struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev; struct se_device *dev = cmd->se_dev; unsigned char *cdb = cmd->t_task_cdb; unsigned int size; u32 sectors = 0; int ret; switch (cdb[0]) { case READ_6: sectors = transport_get_sectors_6(cdb); cmd->t_task_lba = transport_lba_21(cdb); cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; cmd->execute_cmd = ops->execute_rw; break; case READ_10: sectors = transport_get_sectors_10(cdb); cmd->t_task_lba = transport_lba_32(cdb); cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; cmd->execute_cmd = ops->execute_rw; break; case READ_12: sectors = transport_get_sectors_12(cdb); cmd->t_task_lba = transport_lba_32(cdb); cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; cmd->execute_cmd = ops->execute_rw; break; case READ_16: sectors = transport_get_sectors_16(cdb); cmd->t_task_lba = transport_lba_64(cdb); cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; cmd->execute_cmd = ops->execute_rw; break; case WRITE_6: sectors = transport_get_sectors_6(cdb); cmd->t_task_lba = transport_lba_21(cdb); cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; cmd->execute_cmd = ops->execute_rw; break; case WRITE_10: case WRITE_VERIFY: sectors = transport_get_sectors_10(cdb); cmd->t_task_lba = transport_lba_32(cdb); if (cdb[1] & 0x8) cmd->se_cmd_flags |= SCF_FUA; cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; cmd->execute_cmd = ops->execute_rw; break; case WRITE_12: sectors = transport_get_sectors_12(cdb); cmd->t_task_lba = transport_lba_32(cdb); if (cdb[1] & 0x8) cmd->se_cmd_flags |= SCF_FUA; cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; cmd->execute_cmd = ops->execute_rw; break; case WRITE_16: sectors = transport_get_sectors_16(cdb); cmd->t_task_lba = transport_lba_64(cdb); if (cdb[1] & 0x8) cmd->se_cmd_flags |= SCF_FUA; cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; cmd->execute_cmd = ops->execute_rw; break; case XDWRITEREAD_10: if ((cmd->data_direction != DMA_TO_DEVICE) || !(cmd->se_cmd_flags & SCF_BIDI)) goto out_invalid_cdb_field; sectors = transport_get_sectors_10(cdb); cmd->t_task_lba = transport_lba_32(cdb); cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; /* * Setup BIDI XOR callback to be run after I/O completion. */ cmd->execute_cmd = ops->execute_rw; cmd->transport_complete_callback = &xdreadwrite_callback; if (cdb[1] & 0x8) cmd->se_cmd_flags |= SCF_FUA; break; case VARIABLE_LENGTH_CMD: { u16 service_action = get_unaligned_be16(&cdb[8]); switch (service_action) { case XDWRITEREAD_32: sectors = transport_get_sectors_32(cdb); /* * Use WRITE_32 and READ_32 opcodes for the emulated * XDWRITE_READ_32 logic. */ cmd->t_task_lba = transport_lba_64_ext(cdb); cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; /* * Setup BIDI XOR callback to be run during after I/O * completion. */ cmd->execute_cmd = ops->execute_rw; cmd->transport_complete_callback = &xdreadwrite_callback; if (cdb[1] & 0x8) cmd->se_cmd_flags |= SCF_FUA; break; case WRITE_SAME_32: if (!ops->execute_write_same) goto out_unsupported_cdb; sectors = transport_get_sectors_32(cdb); if (!sectors) { pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not" " supported\n"); goto out_invalid_cdb_field; } size = sbc_get_size(cmd, 1); cmd->t_task_lba = get_unaligned_be64(&cdb[12]); if (sbc_write_same_supported(dev, &cdb[10]) < 0) goto out_unsupported_cdb; cmd->execute_cmd = ops->execute_write_same; break; default: pr_err("VARIABLE_LENGTH_CMD service action" " 0x%04x not supported\n", service_action); goto out_unsupported_cdb; } break; } case READ_CAPACITY: size = READ_CAP_LEN; cmd->execute_cmd = sbc_emulate_readcapacity; break; case SERVICE_ACTION_IN: switch (cmd->t_task_cdb[1] & 0x1f) { case SAI_READ_CAPACITY_16: cmd->execute_cmd = sbc_emulate_readcapacity_16; break; default: pr_err("Unsupported SA: 0x%02x\n", cmd->t_task_cdb[1] & 0x1f); goto out_invalid_cdb_field; } size = (cdb[10] << 24) | (cdb[11] << 16) | (cdb[12] << 8) | cdb[13]; break; case SYNCHRONIZE_CACHE: case SYNCHRONIZE_CACHE_16: if (!ops->execute_sync_cache) goto out_unsupported_cdb; /* * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE */ if (cdb[0] == SYNCHRONIZE_CACHE) { sectors = transport_get_sectors_10(cdb); cmd->t_task_lba = transport_lba_32(cdb); } else { sectors = transport_get_sectors_16(cdb); cmd->t_task_lba = transport_lba_64(cdb); } size = sbc_get_size(cmd, sectors); /* * Check to ensure that LBA + Range does not exceed past end of * device for IBLOCK and FILEIO ->do_sync_cache() backend calls */ if (cmd->t_task_lba || sectors) { if (sbc_check_valid_sectors(cmd) < 0) goto out_invalid_cdb_field; } cmd->execute_cmd = ops->execute_sync_cache; break; case UNMAP: if (!ops->execute_unmap) goto out_unsupported_cdb; size = get_unaligned_be16(&cdb[7]); cmd->execute_cmd = ops->execute_unmap; break; case WRITE_SAME_16: if (!ops->execute_write_same) goto out_unsupported_cdb; sectors = transport_get_sectors_16(cdb); if (!sectors) { pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); goto out_invalid_cdb_field; } size = sbc_get_size(cmd, 1); cmd->t_task_lba = get_unaligned_be64(&cdb[2]); if (sbc_write_same_supported(dev, &cdb[1]) < 0) goto out_unsupported_cdb; cmd->execute_cmd = ops->execute_write_same; break; case WRITE_SAME: if (!ops->execute_write_same) goto out_unsupported_cdb; sectors = transport_get_sectors_10(cdb); if (!sectors) { pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); goto out_invalid_cdb_field; } size = sbc_get_size(cmd, 1); cmd->t_task_lba = get_unaligned_be32(&cdb[2]); /* * Follow sbcr26 with WRITE_SAME (10) and check for the existence * of byte 1 bit 3 UNMAP instead of original reserved field */ if (sbc_write_same_supported(dev, &cdb[1]) < 0) goto out_unsupported_cdb; cmd->execute_cmd = ops->execute_write_same; break; case VERIFY: size = 0; cmd->execute_cmd = sbc_emulate_verify; break; default: ret = spc_parse_cdb(cmd, &size); if (ret) return ret; } /* reject any command that we don't have a handler for */ if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && !cmd->execute_cmd) goto out_unsupported_cdb; if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) { unsigned long long end_lba; if (sectors > su_dev->se_dev_attrib.fabric_max_sectors) { printk_ratelimited(KERN_ERR "SCSI OP %02xh with too" " big sectors %u exceeds fabric_max_sectors:" " %u\n", cdb[0], sectors, su_dev->se_dev_attrib.fabric_max_sectors); goto out_invalid_cdb_field; } if (sectors > su_dev->se_dev_attrib.hw_max_sectors) { printk_ratelimited(KERN_ERR "SCSI OP %02xh with too" " big sectors %u exceeds backend hw_max_sectors:" " %u\n", cdb[0], sectors, su_dev->se_dev_attrib.hw_max_sectors); goto out_invalid_cdb_field; } end_lba = dev->transport->get_blocks(dev) + 1; if (cmd->t_task_lba + sectors > end_lba) { pr_err("cmd exceeds last lba %llu " "(lba %llu, sectors %u)\n", end_lba, cmd->t_task_lba, sectors); goto out_invalid_cdb_field; } size = sbc_get_size(cmd, sectors); } ret = target_cmd_size_check(cmd, size); if (ret < 0) return ret; return 0; out_unsupported_cdb: cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; return -EINVAL; out_invalid_cdb_field: cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; return -EINVAL; } EXPORT_SYMBOL(sbc_parse_cdb);
gpl-2.0
intel-linux-graphics/drm-intel
drivers/misc/mei/client.c
62
20066
/* * * Intel Management Engine Interface (Intel MEI) Linux driver * Copyright (c) 2003-2012, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * */ #include <linux/pci.h> #include <linux/sched.h> #include <linux/wait.h> #include <linux/delay.h> #include <linux/mei.h> #include "mei_dev.h" #include "hbm.h" #include "client.h" /** * mei_me_cl_by_uuid - locate index of me client * * @dev: mei device * returns me client index or -ENOENT if not found */ int mei_me_cl_by_uuid(const struct mei_device *dev, const uuid_le *uuid) { int i, res = -ENOENT; for (i = 0; i < dev->me_clients_num; ++i) if (uuid_le_cmp(*uuid, dev->me_clients[i].props.protocol_name) == 0) { res = i; break; } return res; } /** * mei_me_cl_by_id return index to me_clients for client_id * * @dev: the device structure * @client_id: me client id * * Locking: called under "dev->device_lock" lock * * returns index on success, -ENOENT on failure. */ int mei_me_cl_by_id(struct mei_device *dev, u8 client_id) { int i; for (i = 0; i < dev->me_clients_num; i++) if (dev->me_clients[i].client_id == client_id) break; if (WARN_ON(dev->me_clients[i].client_id != client_id)) return -ENOENT; if (i == dev->me_clients_num) return -ENOENT; return i; } /** * mei_io_list_flush - removes list entry belonging to cl. * * @list: An instance of our list structure * @cl: host client */ void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl) { struct mei_cl_cb *cb; struct mei_cl_cb *next; list_for_each_entry_safe(cb, next, &list->list, list) { if (cb->cl && mei_cl_cmp_id(cl, cb->cl)) list_del(&cb->list); } } /** * mei_io_cb_free - free mei_cb_private related memory * * @cb: mei callback struct */ void mei_io_cb_free(struct mei_cl_cb *cb) { if (cb == NULL) return; kfree(cb->request_buffer.data); kfree(cb->response_buffer.data); kfree(cb); } /** * mei_io_cb_init - allocate and initialize io callback * * @cl - mei client * @fp: pointer to file structure * * returns mei_cl_cb pointer or NULL; */ struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, struct file *fp) { struct mei_cl_cb *cb; cb = kzalloc(sizeof(struct mei_cl_cb), GFP_KERNEL); if (!cb) return NULL; mei_io_list_init(cb); cb->file_object = fp; cb->cl = cl; cb->buf_idx = 0; return cb; } /** * mei_io_cb_alloc_req_buf - allocate request buffer * * @cb: io callback structure * @length: size of the buffer * * returns 0 on success * -EINVAL if cb is NULL * -ENOMEM if allocation failed */ int mei_io_cb_alloc_req_buf(struct mei_cl_cb *cb, size_t length) { if (!cb) return -EINVAL; if (length == 0) return 0; cb->request_buffer.data = kmalloc(length, GFP_KERNEL); if (!cb->request_buffer.data) return -ENOMEM; cb->request_buffer.size = length; return 0; } /** * mei_io_cb_alloc_resp_buf - allocate respose buffer * * @cb: io callback structure * @length: size of the buffer * * returns 0 on success * -EINVAL if cb is NULL * -ENOMEM if allocation failed */ int mei_io_cb_alloc_resp_buf(struct mei_cl_cb *cb, size_t length) { if (!cb) return -EINVAL; if (length == 0) return 0; cb->response_buffer.data = kmalloc(length, GFP_KERNEL); if (!cb->response_buffer.data) return -ENOMEM; cb->response_buffer.size = length; return 0; } /** * mei_cl_flush_queues - flushes queue lists belonging to cl. * * @cl: host client */ int mei_cl_flush_queues(struct mei_cl *cl) { if (WARN_ON(!cl || !cl->dev)) return -EINVAL; dev_dbg(&cl->dev->pdev->dev, "remove list entry belonging to cl\n"); mei_io_list_flush(&cl->dev->read_list, cl); mei_io_list_flush(&cl->dev->write_list, cl); mei_io_list_flush(&cl->dev->write_waiting_list, cl); mei_io_list_flush(&cl->dev->ctrl_wr_list, cl); mei_io_list_flush(&cl->dev->ctrl_rd_list, cl); mei_io_list_flush(&cl->dev->amthif_cmd_list, cl); mei_io_list_flush(&cl->dev->amthif_rd_complete_list, cl); return 0; } /** * mei_cl_init - initializes intialize cl. * * @cl: host client to be initialized * @dev: mei device */ void mei_cl_init(struct mei_cl *cl, struct mei_device *dev) { memset(cl, 0, sizeof(struct mei_cl)); init_waitqueue_head(&cl->wait); init_waitqueue_head(&cl->rx_wait); init_waitqueue_head(&cl->tx_wait); INIT_LIST_HEAD(&cl->link); INIT_LIST_HEAD(&cl->device_link); cl->reading_state = MEI_IDLE; cl->writing_state = MEI_IDLE; cl->dev = dev; } /** * mei_cl_allocate - allocates cl structure and sets it up. * * @dev: mei device * returns The allocated file or NULL on failure */ struct mei_cl *mei_cl_allocate(struct mei_device *dev) { struct mei_cl *cl; cl = kmalloc(sizeof(struct mei_cl), GFP_KERNEL); if (!cl) return NULL; mei_cl_init(cl, dev); return cl; } /** * mei_cl_find_read_cb - find this cl's callback in the read list * * @cl: host client * * returns cb on success, NULL on error */ struct mei_cl_cb *mei_cl_find_read_cb(struct mei_cl *cl) { struct mei_device *dev = cl->dev; struct mei_cl_cb *cb = NULL; struct mei_cl_cb *next = NULL; list_for_each_entry_safe(cb, next, &dev->read_list.list, list) if (mei_cl_cmp_id(cl, cb->cl)) return cb; return NULL; } /** mei_cl_link: allocte host id in the host map * * @cl - host client * @id - fixed host id or -1 for genereting one * * returns 0 on success * -EINVAL on incorrect values * -ENONET if client not found */ int mei_cl_link(struct mei_cl *cl, int id) { struct mei_device *dev; if (WARN_ON(!cl || !cl->dev)) return -EINVAL; dev = cl->dev; /* If Id is not asigned get one*/ if (id == MEI_HOST_CLIENT_ID_ANY) id = find_first_zero_bit(dev->host_clients_map, MEI_CLIENTS_MAX); if (id >= MEI_CLIENTS_MAX) { dev_err(&dev->pdev->dev, "id exceded %d", MEI_CLIENTS_MAX) ; return -ENOENT; } dev->open_handle_count++; cl->host_client_id = id; list_add_tail(&cl->link, &dev->file_list); set_bit(id, dev->host_clients_map); cl->state = MEI_FILE_INITIALIZING; dev_dbg(&dev->pdev->dev, "link cl host id = %d\n", cl->host_client_id); return 0; } /** * mei_cl_unlink - remove me_cl from the list * * @cl: host client */ int mei_cl_unlink(struct mei_cl *cl) { struct mei_device *dev; struct mei_cl *pos, *next; /* don't shout on error exit path */ if (!cl) return 0; /* wd and amthif might not be initialized */ if (!cl->dev) return 0; dev = cl->dev; list_for_each_entry_safe(pos, next, &dev->file_list, link) { if (cl->host_client_id == pos->host_client_id) { dev_dbg(&dev->pdev->dev, "remove host client = %d, ME client = %d\n", pos->host_client_id, pos->me_client_id); list_del_init(&pos->link); break; } } return 0; } void mei_host_client_init(struct work_struct *work) { struct mei_device *dev = container_of(work, struct mei_device, init_work); struct mei_client_properties *client_props; int i; mutex_lock(&dev->device_lock); bitmap_zero(dev->host_clients_map, MEI_CLIENTS_MAX); dev->open_handle_count = 0; /* * Reserving the first three client IDs * 0: Reserved for MEI Bus Message communications * 1: Reserved for Watchdog * 2: Reserved for AMTHI */ bitmap_set(dev->host_clients_map, 0, 3); for (i = 0; i < dev->me_clients_num; i++) { client_props = &dev->me_clients[i].props; if (!uuid_le_cmp(client_props->protocol_name, mei_amthif_guid)) mei_amthif_host_init(dev); else if (!uuid_le_cmp(client_props->protocol_name, mei_wd_guid)) mei_wd_host_init(dev); else if (!uuid_le_cmp(client_props->protocol_name, mei_nfc_guid)) mei_nfc_host_init(dev); } dev->dev_state = MEI_DEV_ENABLED; mutex_unlock(&dev->device_lock); } /** * mei_cl_disconnect - disconnect host clinet form the me one * * @cl: host client * * Locking: called under "dev->device_lock" lock * * returns 0 on success, <0 on failure. */ int mei_cl_disconnect(struct mei_cl *cl) { struct mei_device *dev; struct mei_cl_cb *cb; int rets, err; if (WARN_ON(!cl || !cl->dev)) return -ENODEV; dev = cl->dev; if (cl->state != MEI_FILE_DISCONNECTING) return 0; cb = mei_io_cb_init(cl, NULL); if (!cb) return -ENOMEM; cb->fop_type = MEI_FOP_CLOSE; if (dev->hbuf_is_ready) { dev->hbuf_is_ready = false; if (mei_hbm_cl_disconnect_req(dev, cl)) { rets = -ENODEV; dev_err(&dev->pdev->dev, "failed to disconnect.\n"); goto free; } mdelay(10); /* Wait for hardware disconnection ready */ list_add_tail(&cb->list, &dev->ctrl_rd_list.list); } else { dev_dbg(&dev->pdev->dev, "add disconnect cb to control write list\n"); list_add_tail(&cb->list, &dev->ctrl_wr_list.list); } mutex_unlock(&dev->device_lock); err = wait_event_timeout(dev->wait_recvd_msg, MEI_FILE_DISCONNECTED == cl->state, mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); mutex_lock(&dev->device_lock); if (MEI_FILE_DISCONNECTED == cl->state) { rets = 0; dev_dbg(&dev->pdev->dev, "successfully disconnected from FW client.\n"); } else { rets = -ENODEV; if (MEI_FILE_DISCONNECTED != cl->state) dev_dbg(&dev->pdev->dev, "wrong status client disconnect.\n"); if (err) dev_dbg(&dev->pdev->dev, "wait failed disconnect err=%08x\n", err); dev_dbg(&dev->pdev->dev, "failed to disconnect from FW client.\n"); } mei_io_list_flush(&dev->ctrl_rd_list, cl); mei_io_list_flush(&dev->ctrl_wr_list, cl); free: mei_io_cb_free(cb); return rets; } /** * mei_cl_is_other_connecting - checks if other * client with the same me client id is connecting * * @cl: private data of the file object * * returns ture if other client is connected, 0 - otherwise. */ bool mei_cl_is_other_connecting(struct mei_cl *cl) { struct mei_device *dev; struct mei_cl *pos; struct mei_cl *next; if (WARN_ON(!cl || !cl->dev)) return false; dev = cl->dev; list_for_each_entry_safe(pos, next, &dev->file_list, link) { if ((pos->state == MEI_FILE_CONNECTING) && (pos != cl) && cl->me_client_id == pos->me_client_id) return true; } return false; } /** * mei_cl_connect - connect host clinet to the me one * * @cl: host client * * Locking: called under "dev->device_lock" lock * * returns 0 on success, <0 on failure. */ int mei_cl_connect(struct mei_cl *cl, struct file *file) { struct mei_device *dev; struct mei_cl_cb *cb; int rets; if (WARN_ON(!cl || !cl->dev)) return -ENODEV; dev = cl->dev; cb = mei_io_cb_init(cl, file); if (!cb) { rets = -ENOMEM; goto out; } cb->fop_type = MEI_FOP_IOCTL; if (dev->hbuf_is_ready && !mei_cl_is_other_connecting(cl)) { dev->hbuf_is_ready = false; if (mei_hbm_cl_connect_req(dev, cl)) { rets = -ENODEV; goto out; } cl->timer_count = MEI_CONNECT_TIMEOUT; list_add_tail(&cb->list, &dev->ctrl_rd_list.list); } else { list_add_tail(&cb->list, &dev->ctrl_wr_list.list); } mutex_unlock(&dev->device_lock); rets = wait_event_timeout(dev->wait_recvd_msg, (cl->state == MEI_FILE_CONNECTED || cl->state == MEI_FILE_DISCONNECTED), mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); mutex_lock(&dev->device_lock); if (cl->state != MEI_FILE_CONNECTED) { rets = -EFAULT; mei_io_list_flush(&dev->ctrl_rd_list, cl); mei_io_list_flush(&dev->ctrl_wr_list, cl); goto out; } rets = cl->status; out: mei_io_cb_free(cb); return rets; } /** * mei_cl_flow_ctrl_creds - checks flow_control credits for cl. * * @cl: private data of the file object * * returns 1 if mei_flow_ctrl_creds >0, 0 - otherwise. * -ENOENT if mei_cl is not present * -EINVAL if single_recv_buf == 0 */ int mei_cl_flow_ctrl_creds(struct mei_cl *cl) { struct mei_device *dev; int i; if (WARN_ON(!cl || !cl->dev)) return -EINVAL; dev = cl->dev; if (!dev->me_clients_num) return 0; if (cl->mei_flow_ctrl_creds > 0) return 1; for (i = 0; i < dev->me_clients_num; i++) { struct mei_me_client *me_cl = &dev->me_clients[i]; if (me_cl->client_id == cl->me_client_id) { if (me_cl->mei_flow_ctrl_creds) { if (WARN_ON(me_cl->props.single_recv_buf == 0)) return -EINVAL; return 1; } else { return 0; } } } return -ENOENT; } /** * mei_cl_flow_ctrl_reduce - reduces flow_control. * * @cl: private data of the file object * * @returns * 0 on success * -ENOENT when me client is not found * -EINVAL when ctrl credits are <= 0 */ int mei_cl_flow_ctrl_reduce(struct mei_cl *cl) { struct mei_device *dev; int i; if (WARN_ON(!cl || !cl->dev)) return -EINVAL; dev = cl->dev; if (!dev->me_clients_num) return -ENOENT; for (i = 0; i < dev->me_clients_num; i++) { struct mei_me_client *me_cl = &dev->me_clients[i]; if (me_cl->client_id == cl->me_client_id) { if (me_cl->props.single_recv_buf != 0) { if (WARN_ON(me_cl->mei_flow_ctrl_creds <= 0)) return -EINVAL; dev->me_clients[i].mei_flow_ctrl_creds--; } else { if (WARN_ON(cl->mei_flow_ctrl_creds <= 0)) return -EINVAL; cl->mei_flow_ctrl_creds--; } return 0; } } return -ENOENT; } /** * mei_cl_read_start - the start read client message function. * * @cl: host client * * returns 0 on success, <0 on failure. */ int mei_cl_read_start(struct mei_cl *cl, size_t length) { struct mei_device *dev; struct mei_cl_cb *cb; int rets; int i; if (WARN_ON(!cl || !cl->dev)) return -ENODEV; dev = cl->dev; if (cl->state != MEI_FILE_CONNECTED) return -ENODEV; if (dev->dev_state != MEI_DEV_ENABLED) return -ENODEV; if (cl->read_cb) { dev_dbg(&dev->pdev->dev, "read is pending.\n"); return -EBUSY; } i = mei_me_cl_by_id(dev, cl->me_client_id); if (i < 0) { dev_err(&dev->pdev->dev, "no such me client %d\n", cl->me_client_id); return -ENODEV; } cb = mei_io_cb_init(cl, NULL); if (!cb) return -ENOMEM; /* always allocate at least client max message */ length = max_t(size_t, length, dev->me_clients[i].props.max_msg_length); rets = mei_io_cb_alloc_resp_buf(cb, length); if (rets) goto err; cb->fop_type = MEI_FOP_READ; cl->read_cb = cb; if (dev->hbuf_is_ready) { dev->hbuf_is_ready = false; if (mei_hbm_cl_flow_control_req(dev, cl)) { rets = -ENODEV; goto err; } list_add_tail(&cb->list, &dev->read_list.list); } else { list_add_tail(&cb->list, &dev->ctrl_wr_list.list); } return rets; err: mei_io_cb_free(cb); return rets; } /** * mei_cl_irq_write_complete - write a message to device * from the interrupt thread context * * @cl: client * @cb: callback block. * @slots: free slots. * @cmpl_list: complete list. * * returns 0, OK; otherwise error. */ int mei_cl_irq_write_complete(struct mei_cl *cl, struct mei_cl_cb *cb, s32 *slots, struct mei_cl_cb *cmpl_list) { struct mei_device *dev = cl->dev; struct mei_msg_hdr mei_hdr; size_t len = cb->request_buffer.size - cb->buf_idx; u32 msg_slots = mei_data2slots(len); mei_hdr.host_addr = cl->host_client_id; mei_hdr.me_addr = cl->me_client_id; mei_hdr.reserved = 0; if (*slots >= msg_slots) { mei_hdr.length = len; mei_hdr.msg_complete = 1; /* Split the message only if we can write the whole host buffer */ } else if (*slots == dev->hbuf_depth) { msg_slots = *slots; len = (*slots * sizeof(u32)) - sizeof(struct mei_msg_hdr); mei_hdr.length = len; mei_hdr.msg_complete = 0; } else { /* wait for next time the host buffer is empty */ return 0; } dev_dbg(&dev->pdev->dev, "buf: size = %d idx = %lu\n", cb->request_buffer.size, cb->buf_idx); dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(&mei_hdr)); *slots -= msg_slots; if (mei_write_message(dev, &mei_hdr, cb->request_buffer.data + cb->buf_idx)) { cl->status = -ENODEV; list_move_tail(&cb->list, &cmpl_list->list); return -ENODEV; } cl->status = 0; cl->writing_state = MEI_WRITING; cb->buf_idx += mei_hdr.length; if (mei_hdr.msg_complete) { if (mei_cl_flow_ctrl_reduce(cl)) return -ENODEV; list_move_tail(&cb->list, &dev->write_waiting_list.list); } return 0; } /** * mei_cl_write - submit a write cb to mei device assumes device_lock is locked * * @cl: host client * @cl: write callback with filled data * * returns numbe of bytes sent on success, <0 on failure. */ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking) { struct mei_device *dev; struct mei_msg_data *buf; struct mei_msg_hdr mei_hdr; int rets; if (WARN_ON(!cl || !cl->dev)) return -ENODEV; if (WARN_ON(!cb)) return -EINVAL; dev = cl->dev; buf = &cb->request_buffer; dev_dbg(&dev->pdev->dev, "mei_cl_write %d\n", buf->size); cb->fop_type = MEI_FOP_WRITE; rets = mei_cl_flow_ctrl_creds(cl); if (rets < 0) goto err; /* Host buffer is not ready, we queue the request */ if (rets == 0 || !dev->hbuf_is_ready) { cb->buf_idx = 0; /* unseting complete will enqueue the cb for write */ mei_hdr.msg_complete = 0; rets = buf->size; goto out; } dev->hbuf_is_ready = false; /* Check for a maximum length */ if (buf->size > mei_hbuf_max_len(dev)) { mei_hdr.length = mei_hbuf_max_len(dev); mei_hdr.msg_complete = 0; } else { mei_hdr.length = buf->size; mei_hdr.msg_complete = 1; } mei_hdr.host_addr = cl->host_client_id; mei_hdr.me_addr = cl->me_client_id; mei_hdr.reserved = 0; dev_dbg(&dev->pdev->dev, "write " MEI_HDR_FMT "\n", MEI_HDR_PRM(&mei_hdr)); if (mei_write_message(dev, &mei_hdr, buf->data)) { rets = -EIO; goto err; } cl->writing_state = MEI_WRITING; cb->buf_idx = mei_hdr.length; rets = buf->size; out: if (mei_hdr.msg_complete) { if (mei_cl_flow_ctrl_reduce(cl)) { rets = -ENODEV; goto err; } list_add_tail(&cb->list, &dev->write_waiting_list.list); } else { list_add_tail(&cb->list, &dev->write_list.list); } if (blocking && cl->writing_state != MEI_WRITE_COMPLETE) { mutex_unlock(&dev->device_lock); if (wait_event_interruptible(cl->tx_wait, cl->writing_state == MEI_WRITE_COMPLETE)) { if (signal_pending(current)) rets = -EINTR; else rets = -ERESTARTSYS; } mutex_lock(&dev->device_lock); } err: return rets; } /** * mei_cl_complete - processes completed operation for a client * * @cl: private data of the file object. * @cb: callback block. */ void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb) { if (cb->fop_type == MEI_FOP_WRITE) { mei_io_cb_free(cb); cb = NULL; cl->writing_state = MEI_WRITE_COMPLETE; if (waitqueue_active(&cl->tx_wait)) wake_up_interruptible(&cl->tx_wait); } else if (cb->fop_type == MEI_FOP_READ && MEI_READING == cl->reading_state) { cl->reading_state = MEI_READ_COMPLETE; if (waitqueue_active(&cl->rx_wait)) wake_up_interruptible(&cl->rx_wait); else mei_cl_bus_rx_event(cl); } } /** * mei_cl_all_disconnect - disconnect forcefully all connected clients * * @dev - mei device */ void mei_cl_all_disconnect(struct mei_device *dev) { struct mei_cl *cl, *next; list_for_each_entry_safe(cl, next, &dev->file_list, link) { cl->state = MEI_FILE_DISCONNECTED; cl->mei_flow_ctrl_creds = 0; cl->read_cb = NULL; cl->timer_count = 0; } } /** * mei_cl_all_read_wakeup - wake up all readings so they can be interrupted * * @dev - mei device */ void mei_cl_all_read_wakeup(struct mei_device *dev) { struct mei_cl *cl, *next; list_for_each_entry_safe(cl, next, &dev->file_list, link) { if (waitqueue_active(&cl->rx_wait)) { dev_dbg(&dev->pdev->dev, "Waking up client!\n"); wake_up_interruptible(&cl->rx_wait); } } } /** * mei_cl_all_write_clear - clear all pending writes * @dev - mei device */ void mei_cl_all_write_clear(struct mei_device *dev) { struct mei_cl_cb *cb, *next; list_for_each_entry_safe(cb, next, &dev->write_list.list, list) { list_del(&cb->list); mei_io_cb_free(cb); } }
gpl-2.0
thaskell1/linux
drivers/spi/spi-bfin-sport.c
830
23330
/* * SPI bus via the Blackfin SPORT peripheral * * Enter bugs at http://blackfin.uclinux.org/ * * Copyright 2009-2011 Analog Devices Inc. * * Licensed under the GPL-2 or later. */ #include <linux/module.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/gpio.h> #include <linux/io.h> #include <linux/ioport.h> #include <linux/irq.h> #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/spi/spi.h> #include <linux/workqueue.h> #include <asm/portmux.h> #include <asm/bfin5xx_spi.h> #include <asm/blackfin.h> #include <asm/bfin_sport.h> #include <asm/cacheflush.h> #define DRV_NAME "bfin-sport-spi" #define DRV_DESC "SPI bus via the Blackfin SPORT" MODULE_AUTHOR("Cliff Cai"); MODULE_DESCRIPTION(DRV_DESC); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:bfin-sport-spi"); enum bfin_sport_spi_state { START_STATE, RUNNING_STATE, DONE_STATE, ERROR_STATE, }; struct bfin_sport_spi_master_data; struct bfin_sport_transfer_ops { void (*write) (struct bfin_sport_spi_master_data *); void (*read) (struct bfin_sport_spi_master_data *); void (*duplex) (struct bfin_sport_spi_master_data *); }; struct bfin_sport_spi_master_data { /* Driver model hookup */ struct device *dev; /* SPI framework hookup */ struct spi_master *master; /* Regs base of SPI controller */ struct sport_register __iomem *regs; int err_irq; /* Pin request list */ u16 *pin_req; /* Driver message queue */ struct workqueue_struct *workqueue; struct work_struct pump_messages; spinlock_t lock; struct list_head queue; int busy; bool run; /* Message Transfer pump */ struct tasklet_struct pump_transfers; /* Current message transfer state info */ enum bfin_sport_spi_state state; struct spi_message *cur_msg; struct spi_transfer *cur_transfer; struct bfin_sport_spi_slave_data *cur_chip; union { void *tx; u8 *tx8; u16 *tx16; }; void *tx_end; union { void *rx; u8 *rx8; u16 *rx16; }; void *rx_end; int cs_change; struct bfin_sport_transfer_ops *ops; }; struct bfin_sport_spi_slave_data { u16 ctl_reg; u16 baud; u16 cs_chg_udelay; /* Some devices require > 255usec delay */ u32 cs_gpio; u16 idle_tx_val; struct bfin_sport_transfer_ops *ops; }; static void bfin_sport_spi_enable(struct bfin_sport_spi_master_data *drv_data) { bfin_write_or(&drv_data->regs->tcr1, TSPEN); bfin_write_or(&drv_data->regs->rcr1, TSPEN); SSYNC(); } static void bfin_sport_spi_disable(struct bfin_sport_spi_master_data *drv_data) { bfin_write_and(&drv_data->regs->tcr1, ~TSPEN); bfin_write_and(&drv_data->regs->rcr1, ~TSPEN); SSYNC(); } /* Caculate the SPI_BAUD register value based on input HZ */ static u16 bfin_sport_hz_to_spi_baud(u32 speed_hz) { u_long clk, sclk = get_sclk(); int div = (sclk / (2 * speed_hz)) - 1; if (div < 0) div = 0; clk = sclk / (2 * (div + 1)); if (clk > speed_hz) div++; return div; } /* Chip select operation functions for cs_change flag */ static void bfin_sport_spi_cs_active(struct bfin_sport_spi_slave_data *chip) { gpio_direction_output(chip->cs_gpio, 0); } static void bfin_sport_spi_cs_deactive(struct bfin_sport_spi_slave_data *chip) { gpio_direction_output(chip->cs_gpio, 1); /* Move delay here for consistency */ if (chip->cs_chg_udelay) udelay(chip->cs_chg_udelay); } static void bfin_sport_spi_stat_poll_complete(struct bfin_sport_spi_master_data *drv_data) { unsigned long timeout = jiffies + HZ; while (!(bfin_read(&drv_data->regs->stat) & RXNE)) { if (!time_before(jiffies, timeout)) break; } } static void bfin_sport_spi_u8_writer(struct bfin_sport_spi_master_data *drv_data) { u16 dummy; while (drv_data->tx < drv_data->tx_end) { bfin_write(&drv_data->regs->tx16, *drv_data->tx8++); bfin_sport_spi_stat_poll_complete(drv_data); dummy = bfin_read(&drv_data->regs->rx16); } } static void bfin_sport_spi_u8_reader(struct bfin_sport_spi_master_data *drv_data) { u16 tx_val = drv_data->cur_chip->idle_tx_val; while (drv_data->rx < drv_data->rx_end) { bfin_write(&drv_data->regs->tx16, tx_val); bfin_sport_spi_stat_poll_complete(drv_data); *drv_data->rx8++ = bfin_read(&drv_data->regs->rx16); } } static void bfin_sport_spi_u8_duplex(struct bfin_sport_spi_master_data *drv_data) { while (drv_data->rx < drv_data->rx_end) { bfin_write(&drv_data->regs->tx16, *drv_data->tx8++); bfin_sport_spi_stat_poll_complete(drv_data); *drv_data->rx8++ = bfin_read(&drv_data->regs->rx16); } } static struct bfin_sport_transfer_ops bfin_sport_transfer_ops_u8 = { .write = bfin_sport_spi_u8_writer, .read = bfin_sport_spi_u8_reader, .duplex = bfin_sport_spi_u8_duplex, }; static void bfin_sport_spi_u16_writer(struct bfin_sport_spi_master_data *drv_data) { u16 dummy; while (drv_data->tx < drv_data->tx_end) { bfin_write(&drv_data->regs->tx16, *drv_data->tx16++); bfin_sport_spi_stat_poll_complete(drv_data); dummy = bfin_read(&drv_data->regs->rx16); } } static void bfin_sport_spi_u16_reader(struct bfin_sport_spi_master_data *drv_data) { u16 tx_val = drv_data->cur_chip->idle_tx_val; while (drv_data->rx < drv_data->rx_end) { bfin_write(&drv_data->regs->tx16, tx_val); bfin_sport_spi_stat_poll_complete(drv_data); *drv_data->rx16++ = bfin_read(&drv_data->regs->rx16); } } static void bfin_sport_spi_u16_duplex(struct bfin_sport_spi_master_data *drv_data) { while (drv_data->rx < drv_data->rx_end) { bfin_write(&drv_data->regs->tx16, *drv_data->tx16++); bfin_sport_spi_stat_poll_complete(drv_data); *drv_data->rx16++ = bfin_read(&drv_data->regs->rx16); } } static struct bfin_sport_transfer_ops bfin_sport_transfer_ops_u16 = { .write = bfin_sport_spi_u16_writer, .read = bfin_sport_spi_u16_reader, .duplex = bfin_sport_spi_u16_duplex, }; /* stop controller and re-config current chip */ static void bfin_sport_spi_restore_state(struct bfin_sport_spi_master_data *drv_data) { struct bfin_sport_spi_slave_data *chip = drv_data->cur_chip; bfin_sport_spi_disable(drv_data); dev_dbg(drv_data->dev, "restoring spi ctl state\n"); bfin_write(&drv_data->regs->tcr1, chip->ctl_reg); bfin_write(&drv_data->regs->tclkdiv, chip->baud); SSYNC(); bfin_write(&drv_data->regs->rcr1, chip->ctl_reg & ~(ITCLK | ITFS)); SSYNC(); bfin_sport_spi_cs_active(chip); } /* test if there is more transfer to be done */ static enum bfin_sport_spi_state bfin_sport_spi_next_transfer(struct bfin_sport_spi_master_data *drv_data) { struct spi_message *msg = drv_data->cur_msg; struct spi_transfer *trans = drv_data->cur_transfer; /* Move to next transfer */ if (trans->transfer_list.next != &msg->transfers) { drv_data->cur_transfer = list_entry(trans->transfer_list.next, struct spi_transfer, transfer_list); return RUNNING_STATE; } return DONE_STATE; } /* * caller already set message->status; * dma and pio irqs are blocked give finished message back */ static void bfin_sport_spi_giveback(struct bfin_sport_spi_master_data *drv_data) { struct bfin_sport_spi_slave_data *chip = drv_data->cur_chip; unsigned long flags; struct spi_message *msg; spin_lock_irqsave(&drv_data->lock, flags); msg = drv_data->cur_msg; drv_data->state = START_STATE; drv_data->cur_msg = NULL; drv_data->cur_transfer = NULL; drv_data->cur_chip = NULL; queue_work(drv_data->workqueue, &drv_data->pump_messages); spin_unlock_irqrestore(&drv_data->lock, flags); if (!drv_data->cs_change) bfin_sport_spi_cs_deactive(chip); if (msg->complete) msg->complete(msg->context); } static irqreturn_t sport_err_handler(int irq, void *dev_id) { struct bfin_sport_spi_master_data *drv_data = dev_id; u16 status; dev_dbg(drv_data->dev, "%s enter\n", __func__); status = bfin_read(&drv_data->regs->stat) & (TOVF | TUVF | ROVF | RUVF); if (status) { bfin_write(&drv_data->regs->stat, status); SSYNC(); bfin_sport_spi_disable(drv_data); dev_err(drv_data->dev, "status error:%s%s%s%s\n", status & TOVF ? " TOVF" : "", status & TUVF ? " TUVF" : "", status & ROVF ? " ROVF" : "", status & RUVF ? " RUVF" : ""); } return IRQ_HANDLED; } static void bfin_sport_spi_pump_transfers(unsigned long data) { struct bfin_sport_spi_master_data *drv_data = (void *)data; struct spi_message *message = NULL; struct spi_transfer *transfer = NULL; struct spi_transfer *previous = NULL; struct bfin_sport_spi_slave_data *chip = NULL; unsigned int bits_per_word; u32 tranf_success = 1; u32 transfer_speed; u8 full_duplex = 0; /* Get current state information */ message = drv_data->cur_msg; transfer = drv_data->cur_transfer; chip = drv_data->cur_chip; if (transfer->speed_hz) transfer_speed = bfin_sport_hz_to_spi_baud(transfer->speed_hz); else transfer_speed = chip->baud; bfin_write(&drv_data->regs->tclkdiv, transfer_speed); SSYNC(); /* * if msg is error or done, report it back using complete() callback */ /* Handle for abort */ if (drv_data->state == ERROR_STATE) { dev_dbg(drv_data->dev, "transfer: we've hit an error\n"); message->status = -EIO; bfin_sport_spi_giveback(drv_data); return; } /* Handle end of message */ if (drv_data->state == DONE_STATE) { dev_dbg(drv_data->dev, "transfer: all done!\n"); message->status = 0; bfin_sport_spi_giveback(drv_data); return; } /* Delay if requested at end of transfer */ if (drv_data->state == RUNNING_STATE) { dev_dbg(drv_data->dev, "transfer: still running ...\n"); previous = list_entry(transfer->transfer_list.prev, struct spi_transfer, transfer_list); if (previous->delay_usecs) udelay(previous->delay_usecs); } if (transfer->len == 0) { /* Move to next transfer of this msg */ drv_data->state = bfin_sport_spi_next_transfer(drv_data); /* Schedule next transfer tasklet */ tasklet_schedule(&drv_data->pump_transfers); } if (transfer->tx_buf != NULL) { drv_data->tx = (void *)transfer->tx_buf; drv_data->tx_end = drv_data->tx + transfer->len; dev_dbg(drv_data->dev, "tx_buf is %p, tx_end is %p\n", transfer->tx_buf, drv_data->tx_end); } else drv_data->tx = NULL; if (transfer->rx_buf != NULL) { full_duplex = transfer->tx_buf != NULL; drv_data->rx = transfer->rx_buf; drv_data->rx_end = drv_data->rx + transfer->len; dev_dbg(drv_data->dev, "rx_buf is %p, rx_end is %p\n", transfer->rx_buf, drv_data->rx_end); } else drv_data->rx = NULL; drv_data->cs_change = transfer->cs_change; /* Bits per word setup */ bits_per_word = transfer->bits_per_word; if (bits_per_word == 16) drv_data->ops = &bfin_sport_transfer_ops_u16; else drv_data->ops = &bfin_sport_transfer_ops_u8; bfin_write(&drv_data->regs->tcr2, bits_per_word - 1); bfin_write(&drv_data->regs->tfsdiv, bits_per_word - 1); bfin_write(&drv_data->regs->rcr2, bits_per_word - 1); drv_data->state = RUNNING_STATE; if (drv_data->cs_change) bfin_sport_spi_cs_active(chip); dev_dbg(drv_data->dev, "now pumping a transfer: width is %d, len is %d\n", bits_per_word, transfer->len); /* PIO mode write then read */ dev_dbg(drv_data->dev, "doing IO transfer\n"); bfin_sport_spi_enable(drv_data); if (full_duplex) { /* full duplex mode */ BUG_ON((drv_data->tx_end - drv_data->tx) != (drv_data->rx_end - drv_data->rx)); drv_data->ops->duplex(drv_data); if (drv_data->tx != drv_data->tx_end) tranf_success = 0; } else if (drv_data->tx != NULL) { /* write only half duplex */ drv_data->ops->write(drv_data); if (drv_data->tx != drv_data->tx_end) tranf_success = 0; } else if (drv_data->rx != NULL) { /* read only half duplex */ drv_data->ops->read(drv_data); if (drv_data->rx != drv_data->rx_end) tranf_success = 0; } bfin_sport_spi_disable(drv_data); if (!tranf_success) { dev_dbg(drv_data->dev, "IO write error!\n"); drv_data->state = ERROR_STATE; } else { /* Update total byte transferred */ message->actual_length += transfer->len; /* Move to next transfer of this msg */ drv_data->state = bfin_sport_spi_next_transfer(drv_data); if (drv_data->cs_change) bfin_sport_spi_cs_deactive(chip); } /* Schedule next transfer tasklet */ tasklet_schedule(&drv_data->pump_transfers); } /* pop a msg from queue and kick off real transfer */ static void bfin_sport_spi_pump_messages(struct work_struct *work) { struct bfin_sport_spi_master_data *drv_data; unsigned long flags; struct spi_message *next_msg; drv_data = container_of(work, struct bfin_sport_spi_master_data, pump_messages); /* Lock queue and check for queue work */ spin_lock_irqsave(&drv_data->lock, flags); if (list_empty(&drv_data->queue) || !drv_data->run) { /* pumper kicked off but no work to do */ drv_data->busy = 0; spin_unlock_irqrestore(&drv_data->lock, flags); return; } /* Make sure we are not already running a message */ if (drv_data->cur_msg) { spin_unlock_irqrestore(&drv_data->lock, flags); return; } /* Extract head of queue */ next_msg = list_entry(drv_data->queue.next, struct spi_message, queue); drv_data->cur_msg = next_msg; /* Setup the SSP using the per chip configuration */ drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi); list_del_init(&drv_data->cur_msg->queue); /* Initialize message state */ drv_data->cur_msg->state = START_STATE; drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next, struct spi_transfer, transfer_list); bfin_sport_spi_restore_state(drv_data); dev_dbg(drv_data->dev, "got a message to pump, " "state is set to: baud %d, cs_gpio %i, ctl 0x%x\n", drv_data->cur_chip->baud, drv_data->cur_chip->cs_gpio, drv_data->cur_chip->ctl_reg); dev_dbg(drv_data->dev, "the first transfer len is %d\n", drv_data->cur_transfer->len); /* Mark as busy and launch transfers */ tasklet_schedule(&drv_data->pump_transfers); drv_data->busy = 1; spin_unlock_irqrestore(&drv_data->lock, flags); } /* * got a msg to transfer, queue it in drv_data->queue. * And kick off message pumper */ static int bfin_sport_spi_transfer(struct spi_device *spi, struct spi_message *msg) { struct bfin_sport_spi_master_data *drv_data = spi_master_get_devdata(spi->master); unsigned long flags; spin_lock_irqsave(&drv_data->lock, flags); if (!drv_data->run) { spin_unlock_irqrestore(&drv_data->lock, flags); return -ESHUTDOWN; } msg->actual_length = 0; msg->status = -EINPROGRESS; msg->state = START_STATE; dev_dbg(&spi->dev, "adding an msg in transfer()\n"); list_add_tail(&msg->queue, &drv_data->queue); if (drv_data->run && !drv_data->busy) queue_work(drv_data->workqueue, &drv_data->pump_messages); spin_unlock_irqrestore(&drv_data->lock, flags); return 0; } /* Called every time common spi devices change state */ static int bfin_sport_spi_setup(struct spi_device *spi) { struct bfin_sport_spi_slave_data *chip, *first = NULL; int ret; /* Only alloc (or use chip_info) on first setup */ chip = spi_get_ctldata(spi); if (chip == NULL) { struct bfin5xx_spi_chip *chip_info; chip = first = kzalloc(sizeof(*chip), GFP_KERNEL); if (!chip) return -ENOMEM; /* platform chip_info isn't required */ chip_info = spi->controller_data; if (chip_info) { /* * DITFS and TDTYPE are only thing we don't set, but * they probably shouldn't be changed by people. */ if (chip_info->ctl_reg || chip_info->enable_dma) { ret = -EINVAL; dev_err(&spi->dev, "don't set ctl_reg/enable_dma fields\n"); goto error; } chip->cs_chg_udelay = chip_info->cs_chg_udelay; chip->idle_tx_val = chip_info->idle_tx_val; } } /* translate common spi framework into our register * following configure contents are same for tx and rx. */ if (spi->mode & SPI_CPHA) chip->ctl_reg &= ~TCKFE; else chip->ctl_reg |= TCKFE; if (spi->mode & SPI_LSB_FIRST) chip->ctl_reg |= TLSBIT; else chip->ctl_reg &= ~TLSBIT; /* Sport in master mode */ chip->ctl_reg |= ITCLK | ITFS | TFSR | LATFS | LTFS; chip->baud = bfin_sport_hz_to_spi_baud(spi->max_speed_hz); chip->cs_gpio = spi->chip_select; ret = gpio_request(chip->cs_gpio, spi->modalias); if (ret) goto error; dev_dbg(&spi->dev, "setup spi chip %s, width is %d\n", spi->modalias, spi->bits_per_word); dev_dbg(&spi->dev, "ctl_reg is 0x%x, GPIO is %i\n", chip->ctl_reg, spi->chip_select); spi_set_ctldata(spi, chip); bfin_sport_spi_cs_deactive(chip); return ret; error: kfree(first); return ret; } /* * callback for spi framework. * clean driver specific data */ static void bfin_sport_spi_cleanup(struct spi_device *spi) { struct bfin_sport_spi_slave_data *chip = spi_get_ctldata(spi); if (!chip) return; gpio_free(chip->cs_gpio); kfree(chip); } static int bfin_sport_spi_init_queue(struct bfin_sport_spi_master_data *drv_data) { INIT_LIST_HEAD(&drv_data->queue); spin_lock_init(&drv_data->lock); drv_data->run = false; drv_data->busy = 0; /* init transfer tasklet */ tasklet_init(&drv_data->pump_transfers, bfin_sport_spi_pump_transfers, (unsigned long)drv_data); /* init messages workqueue */ INIT_WORK(&drv_data->pump_messages, bfin_sport_spi_pump_messages); drv_data->workqueue = create_singlethread_workqueue(dev_name(drv_data->master->dev.parent)); if (drv_data->workqueue == NULL) return -EBUSY; return 0; } static int bfin_sport_spi_start_queue(struct bfin_sport_spi_master_data *drv_data) { unsigned long flags; spin_lock_irqsave(&drv_data->lock, flags); if (drv_data->run || drv_data->busy) { spin_unlock_irqrestore(&drv_data->lock, flags); return -EBUSY; } drv_data->run = true; drv_data->cur_msg = NULL; drv_data->cur_transfer = NULL; drv_data->cur_chip = NULL; spin_unlock_irqrestore(&drv_data->lock, flags); queue_work(drv_data->workqueue, &drv_data->pump_messages); return 0; } static inline int bfin_sport_spi_stop_queue(struct bfin_sport_spi_master_data *drv_data) { unsigned long flags; unsigned limit = 500; int status = 0; spin_lock_irqsave(&drv_data->lock, flags); /* * This is a bit lame, but is optimized for the common execution path. * A wait_queue on the drv_data->busy could be used, but then the common * execution path (pump_messages) would be required to call wake_up or * friends on every SPI message. Do this instead */ drv_data->run = false; while (!list_empty(&drv_data->queue) && drv_data->busy && limit--) { spin_unlock_irqrestore(&drv_data->lock, flags); msleep(10); spin_lock_irqsave(&drv_data->lock, flags); } if (!list_empty(&drv_data->queue) || drv_data->busy) status = -EBUSY; spin_unlock_irqrestore(&drv_data->lock, flags); return status; } static inline int bfin_sport_spi_destroy_queue(struct bfin_sport_spi_master_data *drv_data) { int status; status = bfin_sport_spi_stop_queue(drv_data); if (status) return status; destroy_workqueue(drv_data->workqueue); return 0; } static int bfin_sport_spi_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct bfin5xx_spi_master *platform_info; struct spi_master *master; struct resource *res, *ires; struct bfin_sport_spi_master_data *drv_data; int status; platform_info = dev_get_platdata(dev); /* Allocate master with space for drv_data */ master = spi_alloc_master(dev, sizeof(*master) + 16); if (!master) { dev_err(dev, "cannot alloc spi_master\n"); return -ENOMEM; } drv_data = spi_master_get_devdata(master); drv_data->master = master; drv_data->dev = dev; drv_data->pin_req = platform_info->pin_req; master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST; master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16); master->bus_num = pdev->id; master->num_chipselect = platform_info->num_chipselect; master->cleanup = bfin_sport_spi_cleanup; master->setup = bfin_sport_spi_setup; master->transfer = bfin_sport_spi_transfer; /* Find and map our resources */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res == NULL) { dev_err(dev, "cannot get IORESOURCE_MEM\n"); status = -ENOENT; goto out_error_get_res; } drv_data->regs = ioremap(res->start, resource_size(res)); if (drv_data->regs == NULL) { dev_err(dev, "cannot map registers\n"); status = -ENXIO; goto out_error_ioremap; } ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!ires) { dev_err(dev, "cannot get IORESOURCE_IRQ\n"); status = -ENODEV; goto out_error_get_ires; } drv_data->err_irq = ires->start; /* Initial and start queue */ status = bfin_sport_spi_init_queue(drv_data); if (status) { dev_err(dev, "problem initializing queue\n"); goto out_error_queue_alloc; } status = bfin_sport_spi_start_queue(drv_data); if (status) { dev_err(dev, "problem starting queue\n"); goto out_error_queue_alloc; } status = request_irq(drv_data->err_irq, sport_err_handler, 0, "sport_spi_err", drv_data); if (status) { dev_err(dev, "unable to request sport err irq\n"); goto out_error_irq; } status = peripheral_request_list(drv_data->pin_req, DRV_NAME); if (status) { dev_err(dev, "requesting peripherals failed\n"); goto out_error_peripheral; } /* Register with the SPI framework */ platform_set_drvdata(pdev, drv_data); status = spi_register_master(master); if (status) { dev_err(dev, "problem registering spi master\n"); goto out_error_master; } dev_info(dev, "%s, regs_base@%p\n", DRV_DESC, drv_data->regs); return 0; out_error_master: peripheral_free_list(drv_data->pin_req); out_error_peripheral: free_irq(drv_data->err_irq, drv_data); out_error_irq: out_error_queue_alloc: bfin_sport_spi_destroy_queue(drv_data); out_error_get_ires: iounmap(drv_data->regs); out_error_ioremap: out_error_get_res: spi_master_put(master); return status; } /* stop hardware and remove the driver */ static int bfin_sport_spi_remove(struct platform_device *pdev) { struct bfin_sport_spi_master_data *drv_data = platform_get_drvdata(pdev); int status = 0; if (!drv_data) return 0; /* Remove the queue */ status = bfin_sport_spi_destroy_queue(drv_data); if (status) return status; /* Disable the SSP at the peripheral and SOC level */ bfin_sport_spi_disable(drv_data); /* Disconnect from the SPI framework */ spi_unregister_master(drv_data->master); peripheral_free_list(drv_data->pin_req); return 0; } #ifdef CONFIG_PM_SLEEP static int bfin_sport_spi_suspend(struct device *dev) { struct bfin_sport_spi_master_data *drv_data = dev_get_drvdata(dev); int status; status = bfin_sport_spi_stop_queue(drv_data); if (status) return status; /* stop hardware */ bfin_sport_spi_disable(drv_data); return status; } static int bfin_sport_spi_resume(struct device *dev) { struct bfin_sport_spi_master_data *drv_data = dev_get_drvdata(dev); int status; /* Enable the SPI interface */ bfin_sport_spi_enable(drv_data); /* Start the queue running */ status = bfin_sport_spi_start_queue(drv_data); if (status) dev_err(drv_data->dev, "problem resuming queue\n"); return status; } static SIMPLE_DEV_PM_OPS(bfin_sport_spi_pm_ops, bfin_sport_spi_suspend, bfin_sport_spi_resume); #define BFIN_SPORT_SPI_PM_OPS (&bfin_sport_spi_pm_ops) #else #define BFIN_SPORT_SPI_PM_OPS NULL #endif static struct platform_driver bfin_sport_spi_driver = { .driver = { .name = DRV_NAME, .pm = BFIN_SPORT_SPI_PM_OPS, }, .probe = bfin_sport_spi_probe, .remove = bfin_sport_spi_remove, }; module_platform_driver(bfin_sport_spi_driver);
gpl-2.0
Split-Screen/android_kernel_lge_v500
drivers/mmc/host/msm_sdcc_dml.c
1342
8954
/* * linux/drivers/mmc/host/msm_sdcc_dml.c - Qualcomm MSM SDCC DML Driver * * Copyright (c) 2011, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/io.h> #include <asm/sizes.h> #include <mach/msm_iomap.h> #include "msm_sdcc_dml.h" /* * DML registers definations */ /* DML config register defination */ #define DML_CONFIG 0x0000 #define PRODUCER_CRCI_DIS 0x00 #define PRODUCER_CRCI_X_SEL 0x01 #define PRODUCER_CRCI_Y_SEL 0x02 #define PRODUCER_CRCI_MSK 0x3 #define CONSUMER_CRCI_DIS (0x00 << 2) #define CONSUMER_CRCI_X_SEL (0x01 << 2) #define CONSUMER_CRCI_Y_SEL (0x02 << 2) #define CONSUMER_CRCI_MSK (0x3 << 2) #define PRODUCER_TRANS_END_EN (1 << 4) #define BYPASS (1 << 16) #define DIRECT_MODE (1 << 17) #define INFINITE_CONS_TRANS (1 << 18) /* DML status register defination */ #define DML_STATUS 0x0004 #define PRODUCER_IDLE (1 << 0) #define CONSUMER_IDLE (1 << 16) /* * DML SW RESET register defination * NOTE: write to this register resets the DML core. * All internal state information will be lost and all * register values will be reset as well */ #define DML_SW_RESET 0x0008 /* * DML PRODUCER START register defination * NOTE: A write to this register triggers the DML * Producer state machine. No SW register values will be * altered. */ #define DML_PRODUCER_START 0x000C /* * DML CONSUMER START register defination * NOTE: A write to this register triggers the DML * Consumer state machine. No SW register values will be * altered. */ #define DML_CONSUMER_START 0x0010 /* * DML producer pipe logical size register defination * NOTE: This register holds the size of the producer pipe * (in units of bytes) _to_ which the peripheral can * keep writing data to when its the PRODUCER. */ #define DML_PRODUCER_PIPE_LOGICAL_SIZE 0x0014 /* * DML producer pipe logical size register defination * NOTE: This register holds the size of the consumer pipe * (in units of bytes) _from_ which the peripheral * can keep _reading_ data from when its the CONSUMER. */ #define DML_CONSUMER_PIPE_LOGICAL_SIZE 0x00018 /* * DML PIPE ID register * This register holds pipe IDs that services * the producer and consumer side of the peripheral */ #define DML_PIPE_ID 0x0001C #define PRODUCER_PIPE_ID_SHFT 0 #define PRODUCER_PIPE_ID_MSK 0x1f #define CONSUMER_PIPE_ID_SHFT 16 #define CONSUMER_PIPE_ID_MSK (0x1f << 16) /* * DML Producer trackers register defination. * This register is for debug purposes only. They reflect * the value of the producer block and transaction counters * when read. The values may be dynamically changing when * a transaction is in progress. */ #define DML_PRODUCER_TRACKERS 0x00020 #define PROD_BLOCK_CNT_SHFT 0 #define PROD_BLOCK_CNT_MSK 0xffff #define PROD_TRANS_CNT_SHFT 16 #define PROD_TRANS_CNT_MSK (0xffff << 16) /* * DML Producer BAM block size register defination. * This regsiter holds the block size, in units of bytes, * associated with the Producer BAM. The DML asserts the * block_end side band signal to the BAM whenever the producer * side of the peripheral has generated the said amount of data. * This register value should be an integral multiple of the * Producer CRCI Block Size. */ #define DML_PRODUCER_BAM_BLOCK_SIZE 0x00024 /* * DML Producer BAM Transaction size defination. * This regsiter holds the transaction size, in units of bytes, * associated with the Producer BAM. The DML asserts the transaction_end * side band signal to the BAM whenever the producer side of the peripheral * has generated the said amount of data. */ #define DML_PRODUCER_BAM_TRANS_SIZE 0x00028 /* * DML Direct mode base address defination * This register is used whenever the DIRECT_MODE bit * in config register is set. */ #define DML_DIRECT_MODE_BASE_ADDR 0x002C #define PRODUCER_BASE_ADDR_BSHFT 0 #define PRODUCER_BASE_ADDR_BMSK 0xffff #define CONSUMER_BASE_ADDR_BSHFT 16 #define CONSUMER_BASE_ADDR_BMSK (0xffff << 16) /* * DMA Debug and status register defination. * These are the read-only registers useful debugging. */ #define DML_DEBUG 0x0030 #define DML_BAM_SIDE_STATUS_1 0x0034 #define DML_BAM_SIDE_STATUS_2 0x0038 /* other definations */ #define PRODUCER_PIPE_LOGICAL_SIZE 4096 #define CONSUMER_PIPE_LOGICAL_SIZE 4096 #ifdef CONFIG_MMC_MSM_SPS_SUPPORT /** * Initialize DML HW connected with SDCC core * */ int msmsdcc_dml_init(struct msmsdcc_host *host) { int rc = 0; u32 config = 0; void __iomem *dml_base; if (!host->dml_base) { host->dml_base = ioremap(host->dml_memres->start, resource_size(host->dml_memres)); if (!host->dml_base) { pr_err("%s: DML ioremap() failed!!! phys_addr=0x%x," " size=0x%x", mmc_hostname(host->mmc), host->dml_memres->start, (host->dml_memres->end - host->dml_memres->start)); rc = -ENOMEM; goto out; } pr_info("%s: Qualcomm MSM SDCC-DML at 0x%016llx\n", mmc_hostname(host->mmc), (unsigned long long)host->dml_memres->start); } dml_base = host->dml_base; /* Reset the DML block */ writel_relaxed(1, (dml_base + DML_SW_RESET)); /* Disable the producer and consumer CRCI */ config = (PRODUCER_CRCI_DIS | CONSUMER_CRCI_DIS); /* * Disable the bypass mode. Bypass mode will only be used * if data transfer is to happen in PIO mode and don't * want the BAM interface to connect with SDCC-DML. */ config &= ~BYPASS; /* * Disable direct mode as we don't DML to MASTER the AHB bus. * BAM connected with DML should MASTER the AHB bus. */ config &= ~DIRECT_MODE; /* * Disable infinite mode transfer as we won't be doing any * infinite size data transfers. All data transfer will be * of finite data size. */ config &= ~INFINITE_CONS_TRANS; writel_relaxed(config, (dml_base + DML_CONFIG)); /* * Initialize the logical BAM pipe size for producer * and consumer. */ writel_relaxed(PRODUCER_PIPE_LOGICAL_SIZE, (dml_base + DML_PRODUCER_PIPE_LOGICAL_SIZE)); writel_relaxed(CONSUMER_PIPE_LOGICAL_SIZE, (dml_base + DML_CONSUMER_PIPE_LOGICAL_SIZE)); /* Initialize Producer/consumer pipe id */ writel_relaxed(host->sps.src_pipe_index | (host->sps.dest_pipe_index << CONSUMER_PIPE_ID_SHFT), (dml_base + DML_PIPE_ID)); mb(); out: return rc; } /** * Soft reset DML HW * */ void msmsdcc_dml_reset(struct msmsdcc_host *host) { /* Reset the DML block */ writel_relaxed(1, (host->dml_base + DML_SW_RESET)); mb(); } /** * Checks if DML HW is busy or not? * */ bool msmsdcc_is_dml_busy(struct msmsdcc_host *host) { return !(readl_relaxed(host->dml_base + DML_STATUS) & PRODUCER_IDLE) || !(readl_relaxed(host->dml_base + DML_STATUS) & CONSUMER_IDLE); } /** * Start data transfer. * */ void msmsdcc_dml_start_xfer(struct msmsdcc_host *host, struct mmc_data *data) { u32 config; void __iomem *dml_base = host->dml_base; if (data->flags & MMC_DATA_READ) { /* Read operation: configure DML for producer operation */ /* Set producer CRCI-x and disable consumer CRCI */ config = readl_relaxed(dml_base + DML_CONFIG); config = (config & ~PRODUCER_CRCI_MSK) | PRODUCER_CRCI_X_SEL; config = (config & ~CONSUMER_CRCI_MSK) | CONSUMER_CRCI_DIS; writel_relaxed(config, (dml_base + DML_CONFIG)); /* Set the Producer BAM block size */ writel_relaxed(data->blksz, (dml_base + DML_PRODUCER_BAM_BLOCK_SIZE)); /* Set Producer BAM Transaction size */ writel_relaxed(host->curr.xfer_size, (dml_base + DML_PRODUCER_BAM_TRANS_SIZE)); /* Set Producer Transaction End bit */ writel_relaxed((readl_relaxed(dml_base + DML_CONFIG) | PRODUCER_TRANS_END_EN), (dml_base + DML_CONFIG)); /* Trigger producer */ writel_relaxed(1, (dml_base + DML_PRODUCER_START)); } else { /* Write operation: configure DML for consumer operation */ /* Set consumer CRCI-x and disable producer CRCI*/ config = readl_relaxed(dml_base + DML_CONFIG); config = (config & ~CONSUMER_CRCI_MSK) | CONSUMER_CRCI_X_SEL; config = (config & ~PRODUCER_CRCI_MSK) | PRODUCER_CRCI_DIS; writel_relaxed(config, (dml_base + DML_CONFIG)); /* Clear Producer Transaction End bit */ writel_relaxed((readl_relaxed(dml_base + DML_CONFIG) & ~PRODUCER_TRANS_END_EN), (dml_base + DML_CONFIG)); /* Trigger consumer */ writel_relaxed(1, (dml_base + DML_CONSUMER_START)); } mb(); } /** * Deinitialize DML HW connected with SDCC core * */ void msmsdcc_dml_exit(struct msmsdcc_host *host) { /* Put DML block in reset state before exiting */ msmsdcc_dml_reset(host); iounmap(host->dml_base); } #endif /* CONFIG_MMC_MSM_SPS_SUPPORT */
gpl-2.0
wgoossens/linux-nios2
drivers/pinctrl/sh-pfc/pfc-sh7722.c
1598
52379
#include <linux/init.h> #include <linux/kernel.h> #include <linux/gpio.h> #include <cpu/sh7722.h> #include "sh_pfc.h" enum { PINMUX_RESERVED = 0, PINMUX_DATA_BEGIN, PTA7_DATA, PTA6_DATA, PTA5_DATA, PTA4_DATA, PTA3_DATA, PTA2_DATA, PTA1_DATA, PTA0_DATA, PTB7_DATA, PTB6_DATA, PTB5_DATA, PTB4_DATA, PTB3_DATA, PTB2_DATA, PTB1_DATA, PTB0_DATA, PTC7_DATA, PTC5_DATA, PTC4_DATA, PTC3_DATA, PTC2_DATA, PTC0_DATA, PTD7_DATA, PTD6_DATA, PTD5_DATA, PTD4_DATA, PTD3_DATA, PTD2_DATA, PTD1_DATA, PTD0_DATA, PTE7_DATA, PTE6_DATA, PTE5_DATA, PTE4_DATA, PTE1_DATA, PTE0_DATA, PTF6_DATA, PTF5_DATA, PTF4_DATA, PTF3_DATA, PTF2_DATA, PTF1_DATA, PTF0_DATA, PTG4_DATA, PTG3_DATA, PTG2_DATA, PTG1_DATA, PTG0_DATA, PTH7_DATA, PTH6_DATA, PTH5_DATA, PTH4_DATA, PTH3_DATA, PTH2_DATA, PTH1_DATA, PTH0_DATA, PTJ7_DATA, PTJ6_DATA, PTJ5_DATA, PTJ1_DATA, PTJ0_DATA, PTK6_DATA, PTK5_DATA, PTK4_DATA, PTK3_DATA, PTK2_DATA, PTK1_DATA, PTK0_DATA, PTL7_DATA, PTL6_DATA, PTL5_DATA, PTL4_DATA, PTL3_DATA, PTL2_DATA, PTL1_DATA, PTL0_DATA, PTM7_DATA, PTM6_DATA, PTM5_DATA, PTM4_DATA, PTM3_DATA, PTM2_DATA, PTM1_DATA, PTM0_DATA, PTN7_DATA, PTN6_DATA, PTN5_DATA, PTN4_DATA, PTN3_DATA, PTN2_DATA, PTN1_DATA, PTN0_DATA, PTQ6_DATA, PTQ5_DATA, PTQ4_DATA, PTQ3_DATA, PTQ2_DATA, PTQ1_DATA, PTQ0_DATA, PTR4_DATA, PTR3_DATA, PTR2_DATA, PTR1_DATA, PTR0_DATA, PTS4_DATA, PTS3_DATA, PTS2_DATA, PTS1_DATA, PTS0_DATA, PTT4_DATA, PTT3_DATA, PTT2_DATA, PTT1_DATA, PTT0_DATA, PTU4_DATA, PTU3_DATA, PTU2_DATA, PTU1_DATA, PTU0_DATA, PTV4_DATA, PTV3_DATA, PTV2_DATA, PTV1_DATA, PTV0_DATA, PTW6_DATA, PTW5_DATA, PTW4_DATA, PTW3_DATA, PTW2_DATA, PTW1_DATA, PTW0_DATA, PTX6_DATA, PTX5_DATA, PTX4_DATA, PTX3_DATA, PTX2_DATA, PTX1_DATA, PTX0_DATA, PTY6_DATA, PTY5_DATA, PTY4_DATA, PTY3_DATA, PTY2_DATA, PTY1_DATA, PTY0_DATA, PTZ5_DATA, PTZ4_DATA, PTZ3_DATA, PTZ2_DATA, PTZ1_DATA, PTZ0_DATA, PINMUX_DATA_END, PINMUX_INPUT_BEGIN, PTA7_IN, PTA6_IN, PTA5_IN, PTA4_IN, PTA3_IN, PTA2_IN, PTA1_IN, PTA0_IN, PTB7_IN, PTB6_IN, PTB5_IN, PTB4_IN, PTB3_IN, PTB2_IN, PTB1_IN, PTB0_IN, PTC7_IN, PTC5_IN, PTC4_IN, PTC3_IN, PTC2_IN, PTC0_IN, PTD7_IN, PTD6_IN, PTD5_IN, PTD4_IN, PTD3_IN, PTD2_IN, PTD1_IN, PTE7_IN, PTE6_IN, PTE5_IN, PTE4_IN, PTE1_IN, PTE0_IN, PTF6_IN, PTF5_IN, PTF4_IN, PTF3_IN, PTF2_IN, PTF1_IN, PTH6_IN, PTH5_IN, PTH1_IN, PTH0_IN, PTJ1_IN, PTJ0_IN, PTK6_IN, PTK5_IN, PTK4_IN, PTK3_IN, PTK2_IN, PTK0_IN, PTL7_IN, PTL6_IN, PTL5_IN, PTL4_IN, PTL3_IN, PTL2_IN, PTL1_IN, PTL0_IN, PTM7_IN, PTM6_IN, PTM5_IN, PTM4_IN, PTM3_IN, PTM2_IN, PTM1_IN, PTM0_IN, PTN7_IN, PTN6_IN, PTN5_IN, PTN4_IN, PTN3_IN, PTN2_IN, PTN1_IN, PTN0_IN, PTQ5_IN, PTQ4_IN, PTQ3_IN, PTQ2_IN, PTQ0_IN, PTR2_IN, PTS4_IN, PTS2_IN, PTS1_IN, PTT4_IN, PTT3_IN, PTT2_IN, PTT1_IN, PTU4_IN, PTU3_IN, PTU2_IN, PTU1_IN, PTU0_IN, PTV4_IN, PTV3_IN, PTV2_IN, PTV1_IN, PTV0_IN, PTW6_IN, PTW4_IN, PTW3_IN, PTW2_IN, PTW1_IN, PTW0_IN, PTX6_IN, PTX5_IN, PTX4_IN, PTX3_IN, PTX2_IN, PTX1_IN, PTX0_IN, PTY5_IN, PTY4_IN, PTY3_IN, PTY2_IN, PTY0_IN, PTZ5_IN, PTZ4_IN, PTZ3_IN, PTZ2_IN, PTZ1_IN, PINMUX_INPUT_END, PINMUX_OUTPUT_BEGIN, PTA7_OUT, PTA5_OUT, PTB7_OUT, PTB6_OUT, PTB5_OUT, PTB4_OUT, PTB3_OUT, PTB2_OUT, PTB1_OUT, PTB0_OUT, PTC4_OUT, PTC3_OUT, PTC2_OUT, PTC0_OUT, PTD6_OUT, PTD5_OUT, PTD4_OUT, PTD3_OUT, PTD2_OUT, PTD1_OUT, PTD0_OUT, PTE7_OUT, PTE6_OUT, PTE5_OUT, PTE4_OUT, PTE1_OUT, PTE0_OUT, PTF6_OUT, PTF5_OUT, PTF4_OUT, PTF3_OUT, PTF2_OUT, PTF0_OUT, PTG4_OUT, PTG3_OUT, PTG2_OUT, PTG1_OUT, PTG0_OUT, PTH7_OUT, PTH6_OUT, PTH5_OUT, PTH4_OUT, PTH3_OUT, PTH2_OUT, PTH1_OUT, PTH0_OUT, PTJ7_OUT, PTJ6_OUT, PTJ5_OUT, PTJ1_OUT, PTJ0_OUT, PTK6_OUT, PTK5_OUT, PTK4_OUT, PTK3_OUT, PTK1_OUT, PTK0_OUT, PTL7_OUT, PTL6_OUT, PTL5_OUT, PTL4_OUT, PTL3_OUT, PTL2_OUT, PTL1_OUT, PTL0_OUT, PTM7_OUT, PTM6_OUT, PTM5_OUT, PTM4_OUT, PTM3_OUT, PTM2_OUT, PTM1_OUT, PTM0_OUT, PTN7_OUT, PTN6_OUT, PTN5_OUT, PTN4_OUT, PTN3_OUT, PTN2_OUT, PTN1_OUT, PTN0_OUT, PTQ6_OUT, PTQ5_OUT, PTQ4_OUT, PTQ3_OUT, PTQ2_OUT, PTQ1_OUT, PTQ0_OUT, PTR4_OUT, PTR3_OUT, PTR1_OUT, PTR0_OUT, PTS3_OUT, PTS2_OUT, PTS0_OUT, PTT4_OUT, PTT3_OUT, PTT2_OUT, PTT0_OUT, PTU4_OUT, PTU3_OUT, PTU2_OUT, PTU0_OUT, PTV4_OUT, PTV3_OUT, PTV2_OUT, PTV1_OUT, PTV0_OUT, PTW5_OUT, PTW4_OUT, PTW3_OUT, PTW2_OUT, PTW1_OUT, PTW0_OUT, PTX6_OUT, PTX5_OUT, PTX4_OUT, PTX3_OUT, PTX2_OUT, PTX1_OUT, PTX0_OUT, PTY5_OUT, PTY4_OUT, PTY3_OUT, PTY2_OUT, PTY1_OUT, PTY0_OUT, PINMUX_OUTPUT_END, PINMUX_MARK_BEGIN, SCIF0_TXD_MARK, SCIF0_RXD_MARK, SCIF0_RTS_MARK, SCIF0_CTS_MARK, SCIF0_SCK_MARK, SCIF1_TXD_MARK, SCIF1_RXD_MARK, SCIF1_RTS_MARK, SCIF1_CTS_MARK, SCIF1_SCK_MARK, SCIF2_TXD_MARK, SCIF2_RXD_MARK, SCIF2_RTS_MARK, SCIF2_CTS_MARK, SCIF2_SCK_MARK, SIOTXD_MARK, SIORXD_MARK, SIOD_MARK, SIOSTRB0_MARK, SIOSTRB1_MARK, SIOSCK_MARK, SIOMCK_MARK, VIO_D15_MARK, VIO_D14_MARK, VIO_D13_MARK, VIO_D12_MARK, VIO_D11_MARK, VIO_D10_MARK, VIO_D9_MARK, VIO_D8_MARK, VIO_D7_MARK, VIO_D6_MARK, VIO_D5_MARK, VIO_D4_MARK, VIO_D3_MARK, VIO_D2_MARK, VIO_D1_MARK, VIO_D0_MARK, VIO_CLK_MARK, VIO_VD_MARK, VIO_HD_MARK, VIO_FLD_MARK, VIO_CKO_MARK, VIO_STEX_MARK, VIO_STEM_MARK, VIO_VD2_MARK, VIO_HD2_MARK, VIO_CLK2_MARK, LCDD23_MARK, LCDD22_MARK, LCDD21_MARK, LCDD20_MARK, LCDD19_MARK, LCDD18_MARK, LCDD17_MARK, LCDD16_MARK, LCDD15_MARK, LCDD14_MARK, LCDD13_MARK, LCDD12_MARK, LCDD11_MARK, LCDD10_MARK, LCDD9_MARK, LCDD8_MARK, LCDD7_MARK, LCDD6_MARK, LCDD5_MARK, LCDD4_MARK, LCDD3_MARK, LCDD2_MARK, LCDD1_MARK, LCDD0_MARK, LCDLCLK_MARK, LCDDON_MARK, LCDVCPWC_MARK, LCDVEPWC_MARK, LCDVSYN_MARK, LCDDCK_MARK, LCDHSYN_MARK, LCDDISP_MARK, LCDRS_MARK, LCDCS_MARK, LCDWR_MARK, LCDRD_MARK, LCDDON2_MARK, LCDVCPWC2_MARK, LCDVEPWC2_MARK, LCDVSYN2_MARK, LCDCS2_MARK, IOIS16_MARK, A25_MARK, A24_MARK, A23_MARK, A22_MARK, BS_MARK, CS6B_CE1B_MARK, WAIT_MARK, CS6A_CE2B_MARK, HPD63_MARK, HPD62_MARK, HPD61_MARK, HPD60_MARK, HPD59_MARK, HPD58_MARK, HPD57_MARK, HPD56_MARK, HPD55_MARK, HPD54_MARK, HPD53_MARK, HPD52_MARK, HPD51_MARK, HPD50_MARK, HPD49_MARK, HPD48_MARK, HPDQM7_MARK, HPDQM6_MARK, HPDQM5_MARK, HPDQM4_MARK, IRQ0_MARK, IRQ1_MARK, IRQ2_MARK, IRQ3_MARK, IRQ4_MARK, IRQ5_MARK, IRQ6_MARK, IRQ7_MARK, SDHICD_MARK, SDHIWP_MARK, SDHID3_MARK, SDHID2_MARK, SDHID1_MARK, SDHID0_MARK, SDHICMD_MARK, SDHICLK_MARK, SIUAOLR_MARK, SIUAOBT_MARK, SIUAISLD_MARK, SIUAILR_MARK, SIUAIBT_MARK, SIUAOSLD_MARK, SIUMCKA_MARK, SIUFCKA_MARK, SIUBOLR_MARK, SIUBOBT_MARK, SIUBISLD_MARK, SIUBILR_MARK, SIUBIBT_MARK, SIUBOSLD_MARK, SIUMCKB_MARK, SIUFCKB_MARK, AUDSYNC_MARK, AUDATA3_MARK, AUDATA2_MARK, AUDATA1_MARK, AUDATA0_MARK, DACK_MARK, DREQ0_MARK, DV_CLKI_MARK, DV_CLK_MARK, DV_HSYNC_MARK, DV_VSYNC_MARK, DV_D15_MARK, DV_D14_MARK, DV_D13_MARK, DV_D12_MARK, DV_D11_MARK, DV_D10_MARK, DV_D9_MARK, DV_D8_MARK, DV_D7_MARK, DV_D6_MARK, DV_D5_MARK, DV_D4_MARK, DV_D3_MARK, DV_D2_MARK, DV_D1_MARK, DV_D0_MARK, STATUS0_MARK, PDSTATUS_MARK, SIOF0_MCK_MARK, SIOF0_SCK_MARK, SIOF0_SYNC_MARK, SIOF0_SS1_MARK, SIOF0_SS2_MARK, SIOF0_TXD_MARK, SIOF0_RXD_MARK, SIOF1_MCK_MARK, SIOF1_SCK_MARK, SIOF1_SYNC_MARK, SIOF1_SS1_MARK, SIOF1_SS2_MARK, SIOF1_TXD_MARK, SIOF1_RXD_MARK, SIM_D_MARK, SIM_CLK_MARK, SIM_RST_MARK, TS_SDAT_MARK, TS_SCK_MARK, TS_SDEN_MARK, TS_SPSYNC_MARK, IRDA_IN_MARK, IRDA_OUT_MARK, TPUTO_MARK, FCE_MARK, NAF7_MARK, NAF6_MARK, NAF5_MARK, NAF4_MARK, NAF3_MARK, NAF2_MARK, NAF1_MARK, NAF0_MARK, FCDE_MARK, FOE_MARK, FSC_MARK, FWE_MARK, FRB_MARK, KEYIN0_MARK, KEYIN1_MARK, KEYIN2_MARK, KEYIN3_MARK, KEYIN4_MARK, KEYOUT0_MARK, KEYOUT1_MARK, KEYOUT2_MARK, KEYOUT3_MARK, KEYOUT4_IN6_MARK, KEYOUT5_IN5_MARK, PINMUX_MARK_END, PINMUX_FUNCTION_BEGIN, VIO_D7_SCIF1_SCK, VIO_D6_SCIF1_RXD, VIO_D5_SCIF1_TXD, VIO_D4, VIO_D3, VIO_D2, VIO_D1, VIO_D0_LCDLCLK, HPD55, HPD54, HPD53, HPD52, HPD51, HPD50, HPD49, HPD48, IOIS16, HPDQM7, HPDQM6, HPDQM5, HPDQM4, SDHICD, SDHIWP, SDHID3, IRQ2_SDHID2, SDHID1, SDHID0, SDHICMD, SDHICLK, A25, A24, A23, A22, IRQ5, IRQ4_BS, PTF6, SIOSCK_SIUBOBT, SIOSTRB1_SIUBOLR, SIOSTRB0_SIUBIBT, SIOD_SIUBILR, SIORXD_SIUBISLD, SIOTXD_SIUBOSLD, AUDSYNC, AUDATA3, AUDATA2, AUDATA1, AUDATA0, LCDVCPWC_LCDVCPWC2, LCDVSYN2_DACK, LCDVSYN, LCDDISP_LCDRS, LCDHSYN_LCDCS, LCDDON_LCDDON2, LCDD17_DV_HSYNC, LCDD16_DV_VSYNC, STATUS0, PDSTATUS, IRQ1, IRQ0, SIUAILR_SIOF1_SS2, SIUAIBT_SIOF1_SS1, SIUAOLR_SIOF1_SYNC, SIUAOBT_SIOF1_SCK, SIUAISLD_SIOF1_RXD, SIUAOSLD_SIOF1_TXD, PTK0, LCDD15_DV_D15, LCDD14_DV_D14, LCDD13_DV_D13, LCDD12_DV_D12, LCDD11_DV_D11, LCDD10_DV_D10, LCDD9_DV_D9, LCDD8_DV_D8, LCDD7_DV_D7, LCDD6_DV_D6, LCDD5_DV_D5, LCDD4_DV_D4, LCDD3_DV_D3, LCDD2_DV_D2, LCDD1_DV_D1, LCDD0_DV_D0, HPD63, HPD62, HPD61, HPD60, HPD59, HPD58, HPD57, HPD56, SIOF0_SS2_SIM_RST, SIOF0_SS1_TS_SPSYNC, SIOF0_SYNC_TS_SDEN, SIOF0_SCK_TS_SCK, PTQ2, PTQ1, PTQ0, LCDRD, CS6B_CE1B_LCDCS2, WAIT, LCDDCK_LCDWR, LCDVEPWC_LCDVEPWC2, SCIF0_CTS_SIUAISPD, SCIF0_RTS_SIUAOSPD, SCIF0_SCK_TPUTO, SCIF0_RXD, SCIF0_TXD, FOE_VIO_VD2, FWE, FSC, DREQ0, FCDE, NAF2_VIO_D10, NAF1_VIO_D9, NAF0_VIO_D8, FRB_VIO_CLK2, FCE_VIO_HD2, NAF7_VIO_D15, NAF6_VIO_D14, NAF5_VIO_D13, NAF4_VIO_D12, NAF3_VIO_D11, VIO_FLD_SCIF2_CTS, VIO_CKO_SCIF2_RTS, VIO_STEX_SCIF2_SCK, VIO_STEM_SCIF2_TXD, VIO_HD_SCIF2_RXD, VIO_VD_SCIF1_CTS, VIO_CLK_SCIF1_RTS, CS6A_CE2B, LCDD23, LCDD22, LCDD21, LCDD20, LCDD19_DV_CLKI, LCDD18_DV_CLK, KEYOUT5_IN5, KEYOUT4_IN6, KEYOUT3, KEYOUT2, KEYOUT1, KEYOUT0, KEYIN4_IRQ7, KEYIN3, KEYIN2, KEYIN1, KEYIN0_IRQ6, PSA15_KEYIN0, PSA15_IRQ6, PSA14_KEYIN4, PSA14_IRQ7, PSA9_IRQ4, PSA9_BS, PSA4_IRQ2, PSA4_SDHID2, PSB15_SIOTXD, PSB15_SIUBOSLD, PSB14_SIORXD, PSB14_SIUBISLD, PSB13_SIOD, PSB13_SIUBILR, PSB12_SIOSTRB0, PSB12_SIUBIBT, PSB11_SIOSTRB1, PSB11_SIUBOLR, PSB10_SIOSCK, PSB10_SIUBOBT, PSB9_SIOMCK, PSB9_SIUMCKB, PSB8_SIOF0_MCK, PSB8_IRQ3, PSB7_SIOF0_TXD, PSB7_IRDA_OUT, PSB6_SIOF0_RXD, PSB6_IRDA_IN, PSB5_SIOF0_SCK, PSB5_TS_SCK, PSB4_SIOF0_SYNC, PSB4_TS_SDEN, PSB3_SIOF0_SS1, PSB3_TS_SPSYNC, PSB2_SIOF0_SS2, PSB2_SIM_RST, PSB1_SIUMCKA, PSB1_SIOF1_MCK, PSB0_SIUAOSLD, PSB0_SIOF1_TXD, PSC15_SIUAISLD, PSC15_SIOF1_RXD, PSC14_SIUAOBT, PSC14_SIOF1_SCK, PSC13_SIUAOLR, PSC13_SIOF1_SYNC, PSC12_SIUAIBT, PSC12_SIOF1_SS1, PSC11_SIUAILR, PSC11_SIOF1_SS2, PSC0_NAF, PSC0_VIO, PSD13_VIO, PSD13_SCIF2, PSD12_VIO, PSD12_SCIF1, PSD11_VIO, PSD11_SCIF1, PSD10_VIO_D0, PSD10_LCDLCLK, PSD9_SIOMCK_SIUMCKB, PSD9_SIUFCKB, PSD8_SCIF0_SCK, PSD8_TPUTO, PSD7_SCIF0_RTS, PSD7_SIUAOSPD, PSD6_SCIF0_CTS, PSD6_SIUAISPD, PSD5_CS6B_CE1B, PSD5_LCDCS2, PSD3_LCDVEPWC_LCDVCPWC, PSD3_LCDVEPWC2_LCDVCPWC2, PSD2_LCDDON, PSD2_LCDDON2, PSD0_LCDD19_LCDD0, PSD0_DV, PSE15_SIOF0_MCK_IRQ3, PSE15_SIM_D, PSE14_SIOF0_TXD_IRDA_OUT, PSE14_SIM_CLK, PSE13_SIOF0_RXD_IRDA_IN, PSE13_TS_SDAT, PSE12_LCDVSYN2, PSE12_DACK, PSE11_SIUMCKA_SIOF1_MCK, PSE11_SIUFCKA, PSE3_FLCTL, PSE3_VIO, PSE2_NAF2, PSE2_VIO_D10, PSE1_NAF1, PSE1_VIO_D9, PSE0_NAF0, PSE0_VIO_D8, HIZA14_KEYSC, HIZA14_HIZ, HIZA10_NAF, HIZA10_HIZ, HIZA9_VIO, HIZA9_HIZ, HIZA8_LCDC, HIZA8_HIZ, HIZA7_LCDC, HIZA7_HIZ, HIZA6_LCDC, HIZA6_HIZ, HIZB4_SIUA, HIZB4_HIZ, HIZB1_VIO, HIZB1_HIZ, HIZB0_VIO, HIZB0_HIZ, HIZC15_IRQ7, HIZC15_HIZ, HIZC14_IRQ6, HIZC14_HIZ, HIZC13_IRQ5, HIZC13_HIZ, HIZC12_IRQ4, HIZC12_HIZ, HIZC11_IRQ3, HIZC11_HIZ, HIZC10_IRQ2, HIZC10_HIZ, HIZC9_IRQ1, HIZC9_HIZ, HIZC8_IRQ0, HIZC8_HIZ, MSELB9_VIO, MSELB9_VIO2, MSELB8_RGB, MSELB8_SYS, PINMUX_FUNCTION_END, }; static const u16 pinmux_data[] = { /* PTA */ PINMUX_DATA(PTA7_DATA, PTA7_IN, PTA7_OUT), PINMUX_DATA(PTA6_DATA, PTA6_IN), PINMUX_DATA(PTA5_DATA, PTA5_IN, PTA5_OUT), PINMUX_DATA(PTA4_DATA, PTA4_IN), PINMUX_DATA(PTA3_DATA, PTA3_IN), PINMUX_DATA(PTA2_DATA, PTA2_IN), PINMUX_DATA(PTA1_DATA, PTA1_IN), PINMUX_DATA(PTA0_DATA, PTA0_IN), /* PTB */ PINMUX_DATA(PTB7_DATA, PTB7_IN, PTB7_OUT), PINMUX_DATA(PTB6_DATA, PTB6_IN, PTB6_OUT), PINMUX_DATA(PTB5_DATA, PTB5_IN, PTB5_OUT), PINMUX_DATA(PTB4_DATA, PTB4_IN, PTB4_OUT), PINMUX_DATA(PTB3_DATA, PTB3_IN, PTB3_OUT), PINMUX_DATA(PTB2_DATA, PTB2_IN, PTB2_OUT), PINMUX_DATA(PTB1_DATA, PTB1_IN, PTB1_OUT), PINMUX_DATA(PTB0_DATA, PTB0_IN, PTB0_OUT), /* PTC */ PINMUX_DATA(PTC7_DATA, PTC7_IN), PINMUX_DATA(PTC5_DATA, PTC5_IN), PINMUX_DATA(PTC4_DATA, PTC4_IN, PTC4_OUT), PINMUX_DATA(PTC3_DATA, PTC3_IN, PTC3_OUT), PINMUX_DATA(PTC2_DATA, PTC2_IN, PTC2_OUT), PINMUX_DATA(PTC0_DATA, PTC0_IN, PTC0_OUT), /* PTD */ PINMUX_DATA(PTD7_DATA, PTD7_IN), PINMUX_DATA(PTD6_DATA, PTD6_OUT, PTD6_IN), PINMUX_DATA(PTD5_DATA, PTD5_OUT, PTD5_IN), PINMUX_DATA(PTD4_DATA, PTD4_OUT, PTD4_IN), PINMUX_DATA(PTD3_DATA, PTD3_OUT, PTD3_IN), PINMUX_DATA(PTD2_DATA, PTD2_OUT, PTD2_IN), PINMUX_DATA(PTD1_DATA, PTD1_OUT, PTD1_IN), PINMUX_DATA(PTD0_DATA, PTD0_OUT), /* PTE */ PINMUX_DATA(PTE7_DATA, PTE7_OUT, PTE7_IN), PINMUX_DATA(PTE6_DATA, PTE6_OUT, PTE6_IN), PINMUX_DATA(PTE5_DATA, PTE5_OUT, PTE5_IN), PINMUX_DATA(PTE4_DATA, PTE4_OUT, PTE4_IN), PINMUX_DATA(PTE1_DATA, PTE1_OUT, PTE1_IN), PINMUX_DATA(PTE0_DATA, PTE0_OUT, PTE0_IN), /* PTF */ PINMUX_DATA(PTF6_DATA, PTF6_OUT, PTF6_IN), PINMUX_DATA(PTF5_DATA, PTF5_OUT, PTF5_IN), PINMUX_DATA(PTF4_DATA, PTF4_OUT, PTF4_IN), PINMUX_DATA(PTF3_DATA, PTF3_OUT, PTF3_IN), PINMUX_DATA(PTF2_DATA, PTF2_OUT, PTF2_IN), PINMUX_DATA(PTF1_DATA, PTF1_IN), PINMUX_DATA(PTF0_DATA, PTF0_OUT), /* PTG */ PINMUX_DATA(PTG4_DATA, PTG4_OUT), PINMUX_DATA(PTG3_DATA, PTG3_OUT), PINMUX_DATA(PTG2_DATA, PTG2_OUT), PINMUX_DATA(PTG1_DATA, PTG1_OUT), PINMUX_DATA(PTG0_DATA, PTG0_OUT), /* PTH */ PINMUX_DATA(PTH7_DATA, PTH7_OUT), PINMUX_DATA(PTH6_DATA, PTH6_OUT, PTH6_IN), PINMUX_DATA(PTH5_DATA, PTH5_OUT, PTH5_IN), PINMUX_DATA(PTH4_DATA, PTH4_OUT), PINMUX_DATA(PTH3_DATA, PTH3_OUT), PINMUX_DATA(PTH2_DATA, PTH2_OUT), PINMUX_DATA(PTH1_DATA, PTH1_OUT, PTH1_IN), PINMUX_DATA(PTH0_DATA, PTH0_OUT, PTH0_IN), /* PTJ */ PINMUX_DATA(PTJ7_DATA, PTJ7_OUT), PINMUX_DATA(PTJ6_DATA, PTJ6_OUT), PINMUX_DATA(PTJ5_DATA, PTJ5_OUT), PINMUX_DATA(PTJ1_DATA, PTJ1_OUT, PTJ1_IN), PINMUX_DATA(PTJ0_DATA, PTJ0_OUT, PTJ0_IN), /* PTK */ PINMUX_DATA(PTK6_DATA, PTK6_OUT, PTK6_IN), PINMUX_DATA(PTK5_DATA, PTK5_OUT, PTK5_IN), PINMUX_DATA(PTK4_DATA, PTK4_OUT, PTK4_IN), PINMUX_DATA(PTK3_DATA, PTK3_OUT, PTK3_IN), PINMUX_DATA(PTK2_DATA, PTK2_IN), PINMUX_DATA(PTK1_DATA, PTK1_OUT), PINMUX_DATA(PTK0_DATA, PTK0_OUT, PTK0_IN), /* PTL */ PINMUX_DATA(PTL7_DATA, PTL7_OUT, PTL7_IN), PINMUX_DATA(PTL6_DATA, PTL6_OUT, PTL6_IN), PINMUX_DATA(PTL5_DATA, PTL5_OUT, PTL5_IN), PINMUX_DATA(PTL4_DATA, PTL4_OUT, PTL4_IN), PINMUX_DATA(PTL3_DATA, PTL3_OUT, PTL3_IN), PINMUX_DATA(PTL2_DATA, PTL2_OUT, PTL2_IN), PINMUX_DATA(PTL1_DATA, PTL1_OUT, PTL1_IN), PINMUX_DATA(PTL0_DATA, PTL0_OUT, PTL0_IN), /* PTM */ PINMUX_DATA(PTM7_DATA, PTM7_OUT, PTM7_IN), PINMUX_DATA(PTM6_DATA, PTM6_OUT, PTM6_IN), PINMUX_DATA(PTM5_DATA, PTM5_OUT, PTM5_IN), PINMUX_DATA(PTM4_DATA, PTM4_OUT, PTM4_IN), PINMUX_DATA(PTM3_DATA, PTM3_OUT, PTM3_IN), PINMUX_DATA(PTM2_DATA, PTM2_OUT, PTM2_IN), PINMUX_DATA(PTM1_DATA, PTM1_OUT, PTM1_IN), PINMUX_DATA(PTM0_DATA, PTM0_OUT, PTM0_IN), /* PTN */ PINMUX_DATA(PTN7_DATA, PTN7_OUT, PTN7_IN), PINMUX_DATA(PTN6_DATA, PTN6_OUT, PTN6_IN), PINMUX_DATA(PTN5_DATA, PTN5_OUT, PTN5_IN), PINMUX_DATA(PTN4_DATA, PTN4_OUT, PTN4_IN), PINMUX_DATA(PTN3_DATA, PTN3_OUT, PTN3_IN), PINMUX_DATA(PTN2_DATA, PTN2_OUT, PTN2_IN), PINMUX_DATA(PTN1_DATA, PTN1_OUT, PTN1_IN), PINMUX_DATA(PTN0_DATA, PTN0_OUT, PTN0_IN), /* PTQ */ PINMUX_DATA(PTQ6_DATA, PTQ6_OUT), PINMUX_DATA(PTQ5_DATA, PTQ5_OUT, PTQ5_IN), PINMUX_DATA(PTQ4_DATA, PTQ4_OUT, PTQ4_IN), PINMUX_DATA(PTQ3_DATA, PTQ3_OUT, PTQ3_IN), PINMUX_DATA(PTQ2_DATA, PTQ2_IN), PINMUX_DATA(PTQ1_DATA, PTQ1_OUT), PINMUX_DATA(PTQ0_DATA, PTQ0_OUT, PTQ0_IN), /* PTR */ PINMUX_DATA(PTR4_DATA, PTR4_OUT), PINMUX_DATA(PTR3_DATA, PTR3_OUT), PINMUX_DATA(PTR2_DATA, PTR2_IN), PINMUX_DATA(PTR1_DATA, PTR1_OUT), PINMUX_DATA(PTR0_DATA, PTR0_OUT), /* PTS */ PINMUX_DATA(PTS4_DATA, PTS4_IN), PINMUX_DATA(PTS3_DATA, PTS3_OUT), PINMUX_DATA(PTS2_DATA, PTS2_OUT, PTS2_IN), PINMUX_DATA(PTS1_DATA, PTS1_IN), PINMUX_DATA(PTS0_DATA, PTS0_OUT), /* PTT */ PINMUX_DATA(PTT4_DATA, PTT4_OUT, PTT4_IN), PINMUX_DATA(PTT3_DATA, PTT3_OUT, PTT3_IN), PINMUX_DATA(PTT2_DATA, PTT2_OUT, PTT2_IN), PINMUX_DATA(PTT1_DATA, PTT1_IN), PINMUX_DATA(PTT0_DATA, PTT0_OUT), /* PTU */ PINMUX_DATA(PTU4_DATA, PTU4_OUT, PTU4_IN), PINMUX_DATA(PTU3_DATA, PTU3_OUT, PTU3_IN), PINMUX_DATA(PTU2_DATA, PTU2_OUT, PTU2_IN), PINMUX_DATA(PTU1_DATA, PTU1_IN), PINMUX_DATA(PTU0_DATA, PTU0_OUT, PTU0_IN), /* PTV */ PINMUX_DATA(PTV4_DATA, PTV4_OUT, PTV4_IN), PINMUX_DATA(PTV3_DATA, PTV3_OUT, PTV3_IN), PINMUX_DATA(PTV2_DATA, PTV2_OUT, PTV2_IN), PINMUX_DATA(PTV1_DATA, PTV1_OUT, PTV1_IN), PINMUX_DATA(PTV0_DATA, PTV0_OUT, PTV0_IN), /* PTW */ PINMUX_DATA(PTW6_DATA, PTW6_IN), PINMUX_DATA(PTW5_DATA, PTW5_OUT), PINMUX_DATA(PTW4_DATA, PTW4_OUT, PTW4_IN), PINMUX_DATA(PTW3_DATA, PTW3_OUT, PTW3_IN), PINMUX_DATA(PTW2_DATA, PTW2_OUT, PTW2_IN), PINMUX_DATA(PTW1_DATA, PTW1_OUT, PTW1_IN), PINMUX_DATA(PTW0_DATA, PTW0_OUT, PTW0_IN), /* PTX */ PINMUX_DATA(PTX6_DATA, PTX6_OUT, PTX6_IN), PINMUX_DATA(PTX5_DATA, PTX5_OUT, PTX5_IN), PINMUX_DATA(PTX4_DATA, PTX4_OUT, PTX4_IN), PINMUX_DATA(PTX3_DATA, PTX3_OUT, PTX3_IN), PINMUX_DATA(PTX2_DATA, PTX2_OUT, PTX2_IN), PINMUX_DATA(PTX1_DATA, PTX1_OUT, PTX1_IN), PINMUX_DATA(PTX0_DATA, PTX0_OUT, PTX0_IN), /* PTY */ PINMUX_DATA(PTY5_DATA, PTY5_OUT, PTY5_IN), PINMUX_DATA(PTY4_DATA, PTY4_OUT, PTY4_IN), PINMUX_DATA(PTY3_DATA, PTY3_OUT, PTY3_IN), PINMUX_DATA(PTY2_DATA, PTY2_OUT, PTY2_IN), PINMUX_DATA(PTY1_DATA, PTY1_OUT), PINMUX_DATA(PTY0_DATA, PTY0_OUT, PTY0_IN), /* PTZ */ PINMUX_DATA(PTZ5_DATA, PTZ5_IN), PINMUX_DATA(PTZ4_DATA, PTZ4_IN), PINMUX_DATA(PTZ3_DATA, PTZ3_IN), PINMUX_DATA(PTZ2_DATA, PTZ2_IN), PINMUX_DATA(PTZ1_DATA, PTZ1_IN), /* SCIF0 */ PINMUX_DATA(SCIF0_TXD_MARK, SCIF0_TXD), PINMUX_DATA(SCIF0_RXD_MARK, SCIF0_RXD), PINMUX_DATA(SCIF0_RTS_MARK, PSD7_SCIF0_RTS, SCIF0_RTS_SIUAOSPD), PINMUX_DATA(SCIF0_CTS_MARK, PSD6_SCIF0_CTS, SCIF0_CTS_SIUAISPD), PINMUX_DATA(SCIF0_SCK_MARK, PSD8_SCIF0_SCK, SCIF0_SCK_TPUTO), /* SCIF1 */ PINMUX_DATA(SCIF1_TXD_MARK, PSD11_SCIF1, VIO_D5_SCIF1_TXD), PINMUX_DATA(SCIF1_RXD_MARK, PSD11_SCIF1, VIO_D6_SCIF1_RXD), PINMUX_DATA(SCIF1_RTS_MARK, PSD12_SCIF1, VIO_CLK_SCIF1_RTS), PINMUX_DATA(SCIF1_CTS_MARK, PSD12_SCIF1, VIO_VD_SCIF1_CTS), PINMUX_DATA(SCIF1_SCK_MARK, PSD11_SCIF1, VIO_D7_SCIF1_SCK), /* SCIF2 */ PINMUX_DATA(SCIF2_TXD_MARK, PSD13_SCIF2, VIO_STEM_SCIF2_TXD), PINMUX_DATA(SCIF2_RXD_MARK, PSD13_SCIF2, VIO_HD_SCIF2_RXD), PINMUX_DATA(SCIF2_RTS_MARK, PSD13_SCIF2, VIO_CKO_SCIF2_RTS), PINMUX_DATA(SCIF2_CTS_MARK, PSD13_SCIF2, VIO_FLD_SCIF2_CTS), PINMUX_DATA(SCIF2_SCK_MARK, PSD13_SCIF2, VIO_STEX_SCIF2_SCK), /* SIO */ PINMUX_DATA(SIOTXD_MARK, PSB15_SIOTXD, SIOTXD_SIUBOSLD), PINMUX_DATA(SIORXD_MARK, PSB14_SIORXD, SIORXD_SIUBISLD), PINMUX_DATA(SIOD_MARK, PSB13_SIOD, SIOD_SIUBILR), PINMUX_DATA(SIOSTRB0_MARK, PSB12_SIOSTRB0, SIOSTRB0_SIUBIBT), PINMUX_DATA(SIOSTRB1_MARK, PSB11_SIOSTRB1, SIOSTRB1_SIUBOLR), PINMUX_DATA(SIOSCK_MARK, PSB10_SIOSCK, SIOSCK_SIUBOBT), PINMUX_DATA(SIOMCK_MARK, PSD9_SIOMCK_SIUMCKB, PSB9_SIOMCK, PTF6), /* CEU */ PINMUX_DATA(VIO_D15_MARK, PSC0_VIO, HIZA10_NAF, NAF7_VIO_D15), PINMUX_DATA(VIO_D14_MARK, PSC0_VIO, HIZA10_NAF, NAF6_VIO_D14), PINMUX_DATA(VIO_D13_MARK, PSC0_VIO, HIZA10_NAF, NAF5_VIO_D13), PINMUX_DATA(VIO_D12_MARK, PSC0_VIO, HIZA10_NAF, NAF4_VIO_D12), PINMUX_DATA(VIO_D11_MARK, PSC0_VIO, HIZA10_NAF, NAF3_VIO_D11), PINMUX_DATA(VIO_D10_MARK, PSE2_VIO_D10, HIZB0_VIO, NAF2_VIO_D10), PINMUX_DATA(VIO_D9_MARK, PSE1_VIO_D9, HIZB0_VIO, NAF1_VIO_D9), PINMUX_DATA(VIO_D8_MARK, PSE0_VIO_D8, HIZB0_VIO, NAF0_VIO_D8), PINMUX_DATA(VIO_D7_MARK, PSD11_VIO, VIO_D7_SCIF1_SCK), PINMUX_DATA(VIO_D6_MARK, PSD11_VIO, VIO_D6_SCIF1_RXD), PINMUX_DATA(VIO_D5_MARK, PSD11_VIO, VIO_D5_SCIF1_TXD), PINMUX_DATA(VIO_D4_MARK, VIO_D4), PINMUX_DATA(VIO_D3_MARK, VIO_D3), PINMUX_DATA(VIO_D2_MARK, VIO_D2), PINMUX_DATA(VIO_D1_MARK, VIO_D1), PINMUX_DATA(VIO_D0_MARK, PSD10_VIO_D0, VIO_D0_LCDLCLK), PINMUX_DATA(VIO_CLK_MARK, PSD12_VIO, MSELB9_VIO, VIO_CLK_SCIF1_RTS), PINMUX_DATA(VIO_VD_MARK, PSD12_VIO, MSELB9_VIO, VIO_VD_SCIF1_CTS), PINMUX_DATA(VIO_HD_MARK, PSD13_VIO, MSELB9_VIO, VIO_HD_SCIF2_RXD), PINMUX_DATA(VIO_FLD_MARK, PSD13_VIO, HIZA9_VIO, VIO_FLD_SCIF2_CTS), PINMUX_DATA(VIO_CKO_MARK, PSD13_VIO, HIZA9_VIO, VIO_CKO_SCIF2_RTS), PINMUX_DATA(VIO_STEX_MARK, PSD13_VIO, HIZA9_VIO, VIO_STEX_SCIF2_SCK), PINMUX_DATA(VIO_STEM_MARK, PSD13_VIO, HIZA9_VIO, VIO_STEM_SCIF2_TXD), PINMUX_DATA(VIO_VD2_MARK, PSE3_VIO, MSELB9_VIO2, HIZB0_VIO, FOE_VIO_VD2), PINMUX_DATA(VIO_HD2_MARK, PSE3_VIO, MSELB9_VIO2, HIZB1_VIO, FCE_VIO_HD2), PINMUX_DATA(VIO_CLK2_MARK, PSE3_VIO, MSELB9_VIO2, HIZB1_VIO, FRB_VIO_CLK2), /* LCDC */ PINMUX_DATA(LCDD23_MARK, HIZA8_LCDC, LCDD23), PINMUX_DATA(LCDD22_MARK, HIZA8_LCDC, LCDD22), PINMUX_DATA(LCDD21_MARK, HIZA8_LCDC, LCDD21), PINMUX_DATA(LCDD20_MARK, HIZA8_LCDC, LCDD20), PINMUX_DATA(LCDD19_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD19_DV_CLKI), PINMUX_DATA(LCDD18_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD18_DV_CLK), PINMUX_DATA(LCDD17_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD17_DV_HSYNC), PINMUX_DATA(LCDD16_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD16_DV_VSYNC), PINMUX_DATA(LCDD15_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD15_DV_D15), PINMUX_DATA(LCDD14_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD14_DV_D14), PINMUX_DATA(LCDD13_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD13_DV_D13), PINMUX_DATA(LCDD12_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD12_DV_D12), PINMUX_DATA(LCDD11_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD11_DV_D11), PINMUX_DATA(LCDD10_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD10_DV_D10), PINMUX_DATA(LCDD9_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD9_DV_D9), PINMUX_DATA(LCDD8_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD8_DV_D8), PINMUX_DATA(LCDD7_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD7_DV_D7), PINMUX_DATA(LCDD6_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD6_DV_D6), PINMUX_DATA(LCDD5_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD5_DV_D5), PINMUX_DATA(LCDD4_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD4_DV_D4), PINMUX_DATA(LCDD3_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD3_DV_D3), PINMUX_DATA(LCDD2_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD2_DV_D2), PINMUX_DATA(LCDD1_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD1_DV_D1), PINMUX_DATA(LCDD0_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD0_DV_D0), PINMUX_DATA(LCDLCLK_MARK, PSD10_LCDLCLK, VIO_D0_LCDLCLK), /* Main LCD */ PINMUX_DATA(LCDDON_MARK, PSD2_LCDDON, HIZA7_LCDC, LCDDON_LCDDON2), PINMUX_DATA(LCDVCPWC_MARK, PSD3_LCDVEPWC_LCDVCPWC, HIZA6_LCDC, LCDVCPWC_LCDVCPWC2), PINMUX_DATA(LCDVEPWC_MARK, PSD3_LCDVEPWC_LCDVCPWC, HIZA6_LCDC, LCDVEPWC_LCDVEPWC2), PINMUX_DATA(LCDVSYN_MARK, HIZA7_LCDC, LCDVSYN), /* Main LCD - RGB Mode */ PINMUX_DATA(LCDDCK_MARK, MSELB8_RGB, HIZA8_LCDC, LCDDCK_LCDWR), PINMUX_DATA(LCDHSYN_MARK, MSELB8_RGB, HIZA7_LCDC, LCDHSYN_LCDCS), PINMUX_DATA(LCDDISP_MARK, MSELB8_RGB, HIZA7_LCDC, LCDDISP_LCDRS), /* Main LCD - SYS Mode */ PINMUX_DATA(LCDRS_MARK, MSELB8_SYS, HIZA7_LCDC, LCDDISP_LCDRS), PINMUX_DATA(LCDCS_MARK, MSELB8_SYS, HIZA7_LCDC, LCDHSYN_LCDCS), PINMUX_DATA(LCDWR_MARK, MSELB8_SYS, HIZA8_LCDC, LCDDCK_LCDWR), PINMUX_DATA(LCDRD_MARK, HIZA7_LCDC, LCDRD), /* Sub LCD - SYS Mode */ PINMUX_DATA(LCDDON2_MARK, PSD2_LCDDON2, HIZA7_LCDC, LCDDON_LCDDON2), PINMUX_DATA(LCDVCPWC2_MARK, PSD3_LCDVEPWC2_LCDVCPWC2, HIZA6_LCDC, LCDVCPWC_LCDVCPWC2), PINMUX_DATA(LCDVEPWC2_MARK, PSD3_LCDVEPWC2_LCDVCPWC2, HIZA6_LCDC, LCDVEPWC_LCDVEPWC2), PINMUX_DATA(LCDVSYN2_MARK, PSE12_LCDVSYN2, HIZA8_LCDC, LCDVSYN2_DACK), PINMUX_DATA(LCDCS2_MARK, PSD5_LCDCS2, CS6B_CE1B_LCDCS2), /* BSC */ PINMUX_DATA(IOIS16_MARK, IOIS16), PINMUX_DATA(A25_MARK, A25), PINMUX_DATA(A24_MARK, A24), PINMUX_DATA(A23_MARK, A23), PINMUX_DATA(A22_MARK, A22), PINMUX_DATA(BS_MARK, PSA9_BS, IRQ4_BS), PINMUX_DATA(CS6B_CE1B_MARK, PSD5_CS6B_CE1B, CS6B_CE1B_LCDCS2), PINMUX_DATA(WAIT_MARK, WAIT), PINMUX_DATA(CS6A_CE2B_MARK, CS6A_CE2B), /* SBSC */ PINMUX_DATA(HPD63_MARK, HPD63), PINMUX_DATA(HPD62_MARK, HPD62), PINMUX_DATA(HPD61_MARK, HPD61), PINMUX_DATA(HPD60_MARK, HPD60), PINMUX_DATA(HPD59_MARK, HPD59), PINMUX_DATA(HPD58_MARK, HPD58), PINMUX_DATA(HPD57_MARK, HPD57), PINMUX_DATA(HPD56_MARK, HPD56), PINMUX_DATA(HPD55_MARK, HPD55), PINMUX_DATA(HPD54_MARK, HPD54), PINMUX_DATA(HPD53_MARK, HPD53), PINMUX_DATA(HPD52_MARK, HPD52), PINMUX_DATA(HPD51_MARK, HPD51), PINMUX_DATA(HPD50_MARK, HPD50), PINMUX_DATA(HPD49_MARK, HPD49), PINMUX_DATA(HPD48_MARK, HPD48), PINMUX_DATA(HPDQM7_MARK, HPDQM7), PINMUX_DATA(HPDQM6_MARK, HPDQM6), PINMUX_DATA(HPDQM5_MARK, HPDQM5), PINMUX_DATA(HPDQM4_MARK, HPDQM4), /* IRQ */ PINMUX_DATA(IRQ0_MARK, HIZC8_IRQ0, IRQ0), PINMUX_DATA(IRQ1_MARK, HIZC9_IRQ1, IRQ1), PINMUX_DATA(IRQ2_MARK, PSA4_IRQ2, HIZC10_IRQ2, IRQ2_SDHID2), PINMUX_DATA(IRQ3_MARK, PSE15_SIOF0_MCK_IRQ3, PSB8_IRQ3, HIZC11_IRQ3, PTQ0), PINMUX_DATA(IRQ4_MARK, PSA9_IRQ4, HIZC12_IRQ4, IRQ4_BS), PINMUX_DATA(IRQ5_MARK, HIZC13_IRQ5, IRQ5), PINMUX_DATA(IRQ6_MARK, PSA15_IRQ6, HIZC14_IRQ6, KEYIN0_IRQ6), PINMUX_DATA(IRQ7_MARK, PSA14_IRQ7, HIZC15_IRQ7, KEYIN4_IRQ7), /* SDHI */ PINMUX_DATA(SDHICD_MARK, SDHICD), PINMUX_DATA(SDHIWP_MARK, SDHIWP), PINMUX_DATA(SDHID3_MARK, SDHID3), PINMUX_DATA(SDHID2_MARK, PSA4_SDHID2, IRQ2_SDHID2), PINMUX_DATA(SDHID1_MARK, SDHID1), PINMUX_DATA(SDHID0_MARK, SDHID0), PINMUX_DATA(SDHICMD_MARK, SDHICMD), PINMUX_DATA(SDHICLK_MARK, SDHICLK), /* SIU - Port A */ PINMUX_DATA(SIUAOLR_MARK, PSC13_SIUAOLR, HIZB4_SIUA, SIUAOLR_SIOF1_SYNC), PINMUX_DATA(SIUAOBT_MARK, PSC14_SIUAOBT, HIZB4_SIUA, SIUAOBT_SIOF1_SCK), PINMUX_DATA(SIUAISLD_MARK, PSC15_SIUAISLD, HIZB4_SIUA, SIUAISLD_SIOF1_RXD), PINMUX_DATA(SIUAILR_MARK, PSC11_SIUAILR, HIZB4_SIUA, SIUAILR_SIOF1_SS2), PINMUX_DATA(SIUAIBT_MARK, PSC12_SIUAIBT, HIZB4_SIUA, SIUAIBT_SIOF1_SS1), PINMUX_DATA(SIUAOSLD_MARK, PSB0_SIUAOSLD, HIZB4_SIUA, SIUAOSLD_SIOF1_TXD), PINMUX_DATA(SIUMCKA_MARK, PSE11_SIUMCKA_SIOF1_MCK, HIZB4_SIUA, PSB1_SIUMCKA, PTK0), PINMUX_DATA(SIUFCKA_MARK, PSE11_SIUFCKA, HIZB4_SIUA, PTK0), /* SIU - Port B */ PINMUX_DATA(SIUBOLR_MARK, PSB11_SIUBOLR, SIOSTRB1_SIUBOLR), PINMUX_DATA(SIUBOBT_MARK, PSB10_SIUBOBT, SIOSCK_SIUBOBT), PINMUX_DATA(SIUBISLD_MARK, PSB14_SIUBISLD, SIORXD_SIUBISLD), PINMUX_DATA(SIUBILR_MARK, PSB13_SIUBILR, SIOD_SIUBILR), PINMUX_DATA(SIUBIBT_MARK, PSB12_SIUBIBT, SIOSTRB0_SIUBIBT), PINMUX_DATA(SIUBOSLD_MARK, PSB15_SIUBOSLD, SIOTXD_SIUBOSLD), PINMUX_DATA(SIUMCKB_MARK, PSD9_SIOMCK_SIUMCKB, PSB9_SIUMCKB, PTF6), PINMUX_DATA(SIUFCKB_MARK, PSD9_SIUFCKB, PTF6), /* AUD */ PINMUX_DATA(AUDSYNC_MARK, AUDSYNC), PINMUX_DATA(AUDATA3_MARK, AUDATA3), PINMUX_DATA(AUDATA2_MARK, AUDATA2), PINMUX_DATA(AUDATA1_MARK, AUDATA1), PINMUX_DATA(AUDATA0_MARK, AUDATA0), /* DMAC */ PINMUX_DATA(DACK_MARK, PSE12_DACK, LCDVSYN2_DACK), PINMUX_DATA(DREQ0_MARK, DREQ0), /* VOU */ PINMUX_DATA(DV_CLKI_MARK, PSD0_DV, LCDD19_DV_CLKI), PINMUX_DATA(DV_CLK_MARK, PSD0_DV, LCDD18_DV_CLK), PINMUX_DATA(DV_HSYNC_MARK, PSD0_DV, LCDD17_DV_HSYNC), PINMUX_DATA(DV_VSYNC_MARK, PSD0_DV, LCDD16_DV_VSYNC), PINMUX_DATA(DV_D15_MARK, PSD0_DV, LCDD15_DV_D15), PINMUX_DATA(DV_D14_MARK, PSD0_DV, LCDD14_DV_D14), PINMUX_DATA(DV_D13_MARK, PSD0_DV, LCDD13_DV_D13), PINMUX_DATA(DV_D12_MARK, PSD0_DV, LCDD12_DV_D12), PINMUX_DATA(DV_D11_MARK, PSD0_DV, LCDD11_DV_D11), PINMUX_DATA(DV_D10_MARK, PSD0_DV, LCDD10_DV_D10), PINMUX_DATA(DV_D9_MARK, PSD0_DV, LCDD9_DV_D9), PINMUX_DATA(DV_D8_MARK, PSD0_DV, LCDD8_DV_D8), PINMUX_DATA(DV_D7_MARK, PSD0_DV, LCDD7_DV_D7), PINMUX_DATA(DV_D6_MARK, PSD0_DV, LCDD6_DV_D6), PINMUX_DATA(DV_D5_MARK, PSD0_DV, LCDD5_DV_D5), PINMUX_DATA(DV_D4_MARK, PSD0_DV, LCDD4_DV_D4), PINMUX_DATA(DV_D3_MARK, PSD0_DV, LCDD3_DV_D3), PINMUX_DATA(DV_D2_MARK, PSD0_DV, LCDD2_DV_D2), PINMUX_DATA(DV_D1_MARK, PSD0_DV, LCDD1_DV_D1), PINMUX_DATA(DV_D0_MARK, PSD0_DV, LCDD0_DV_D0), /* CPG */ PINMUX_DATA(STATUS0_MARK, STATUS0), PINMUX_DATA(PDSTATUS_MARK, PDSTATUS), /* SIOF0 */ PINMUX_DATA(SIOF0_MCK_MARK, PSE15_SIOF0_MCK_IRQ3, PSB8_SIOF0_MCK, PTQ0), PINMUX_DATA(SIOF0_SCK_MARK, PSB5_SIOF0_SCK, SIOF0_SCK_TS_SCK), PINMUX_DATA(SIOF0_SYNC_MARK, PSB4_SIOF0_SYNC, SIOF0_SYNC_TS_SDEN), PINMUX_DATA(SIOF0_SS1_MARK, PSB3_SIOF0_SS1, SIOF0_SS1_TS_SPSYNC), PINMUX_DATA(SIOF0_SS2_MARK, PSB2_SIOF0_SS2, SIOF0_SS2_SIM_RST), PINMUX_DATA(SIOF0_TXD_MARK, PSE14_SIOF0_TXD_IRDA_OUT, PSB7_SIOF0_TXD, PTQ1), PINMUX_DATA(SIOF0_RXD_MARK, PSE13_SIOF0_RXD_IRDA_IN, PSB6_SIOF0_RXD, PTQ2), /* SIOF1 */ PINMUX_DATA(SIOF1_MCK_MARK, PSE11_SIUMCKA_SIOF1_MCK, PSB1_SIOF1_MCK, PTK0), PINMUX_DATA(SIOF1_SCK_MARK, PSC14_SIOF1_SCK, SIUAOBT_SIOF1_SCK), PINMUX_DATA(SIOF1_SYNC_MARK, PSC13_SIOF1_SYNC, SIUAOLR_SIOF1_SYNC), PINMUX_DATA(SIOF1_SS1_MARK, PSC12_SIOF1_SS1, SIUAIBT_SIOF1_SS1), PINMUX_DATA(SIOF1_SS2_MARK, PSC11_SIOF1_SS2, SIUAILR_SIOF1_SS2), PINMUX_DATA(SIOF1_TXD_MARK, PSB0_SIOF1_TXD, SIUAOSLD_SIOF1_TXD), PINMUX_DATA(SIOF1_RXD_MARK, PSC15_SIOF1_RXD, SIUAISLD_SIOF1_RXD), /* SIM */ PINMUX_DATA(SIM_D_MARK, PSE15_SIM_D, PTQ0), PINMUX_DATA(SIM_CLK_MARK, PSE14_SIM_CLK, PTQ1), PINMUX_DATA(SIM_RST_MARK, PSB2_SIM_RST, SIOF0_SS2_SIM_RST), /* TSIF */ PINMUX_DATA(TS_SDAT_MARK, PSE13_TS_SDAT, PTQ2), PINMUX_DATA(TS_SCK_MARK, PSB5_TS_SCK, SIOF0_SCK_TS_SCK), PINMUX_DATA(TS_SDEN_MARK, PSB4_TS_SDEN, SIOF0_SYNC_TS_SDEN), PINMUX_DATA(TS_SPSYNC_MARK, PSB3_TS_SPSYNC, SIOF0_SS1_TS_SPSYNC), /* IRDA */ PINMUX_DATA(IRDA_IN_MARK, PSE13_SIOF0_RXD_IRDA_IN, PSB6_IRDA_IN, PTQ2), PINMUX_DATA(IRDA_OUT_MARK, PSE14_SIOF0_TXD_IRDA_OUT, PSB7_IRDA_OUT, PTQ1), /* TPU */ PINMUX_DATA(TPUTO_MARK, PSD8_TPUTO, SCIF0_SCK_TPUTO), /* FLCTL */ PINMUX_DATA(FCE_MARK, PSE3_FLCTL, FCE_VIO_HD2), PINMUX_DATA(NAF7_MARK, PSC0_NAF, HIZA10_NAF, NAF7_VIO_D15), PINMUX_DATA(NAF6_MARK, PSC0_NAF, HIZA10_NAF, NAF6_VIO_D14), PINMUX_DATA(NAF5_MARK, PSC0_NAF, HIZA10_NAF, NAF5_VIO_D13), PINMUX_DATA(NAF4_MARK, PSC0_NAF, HIZA10_NAF, NAF4_VIO_D12), PINMUX_DATA(NAF3_MARK, PSC0_NAF, HIZA10_NAF, NAF3_VIO_D11), PINMUX_DATA(NAF2_MARK, PSE2_NAF2, HIZB0_VIO, NAF2_VIO_D10), PINMUX_DATA(NAF1_MARK, PSE1_NAF1, HIZB0_VIO, NAF1_VIO_D9), PINMUX_DATA(NAF0_MARK, PSE0_NAF0, HIZB0_VIO, NAF0_VIO_D8), PINMUX_DATA(FCDE_MARK, FCDE), PINMUX_DATA(FOE_MARK, PSE3_FLCTL, HIZB0_VIO, FOE_VIO_VD2), PINMUX_DATA(FSC_MARK, FSC), PINMUX_DATA(FWE_MARK, FWE), PINMUX_DATA(FRB_MARK, PSE3_FLCTL, FRB_VIO_CLK2), /* KEYSC */ PINMUX_DATA(KEYIN0_MARK, PSA15_KEYIN0, HIZC14_IRQ6, KEYIN0_IRQ6), PINMUX_DATA(KEYIN1_MARK, HIZA14_KEYSC, KEYIN1), PINMUX_DATA(KEYIN2_MARK, HIZA14_KEYSC, KEYIN2), PINMUX_DATA(KEYIN3_MARK, HIZA14_KEYSC, KEYIN3), PINMUX_DATA(KEYIN4_MARK, PSA14_KEYIN4, HIZC15_IRQ7, KEYIN4_IRQ7), PINMUX_DATA(KEYOUT0_MARK, HIZA14_KEYSC, KEYOUT0), PINMUX_DATA(KEYOUT1_MARK, HIZA14_KEYSC, KEYOUT1), PINMUX_DATA(KEYOUT2_MARK, HIZA14_KEYSC, KEYOUT2), PINMUX_DATA(KEYOUT3_MARK, HIZA14_KEYSC, KEYOUT3), PINMUX_DATA(KEYOUT4_IN6_MARK, HIZA14_KEYSC, KEYOUT4_IN6), PINMUX_DATA(KEYOUT5_IN5_MARK, HIZA14_KEYSC, KEYOUT5_IN5), }; static const struct sh_pfc_pin pinmux_pins[] = { /* PTA */ PINMUX_GPIO(PTA7), PINMUX_GPIO(PTA6), PINMUX_GPIO(PTA5), PINMUX_GPIO(PTA4), PINMUX_GPIO(PTA3), PINMUX_GPIO(PTA2), PINMUX_GPIO(PTA1), PINMUX_GPIO(PTA0), /* PTB */ PINMUX_GPIO(PTB7), PINMUX_GPIO(PTB6), PINMUX_GPIO(PTB5), PINMUX_GPIO(PTB4), PINMUX_GPIO(PTB3), PINMUX_GPIO(PTB2), PINMUX_GPIO(PTB1), PINMUX_GPIO(PTB0), /* PTC */ PINMUX_GPIO(PTC7), PINMUX_GPIO(PTC5), PINMUX_GPIO(PTC4), PINMUX_GPIO(PTC3), PINMUX_GPIO(PTC2), PINMUX_GPIO(PTC0), /* PTD */ PINMUX_GPIO(PTD7), PINMUX_GPIO(PTD6), PINMUX_GPIO(PTD5), PINMUX_GPIO(PTD4), PINMUX_GPIO(PTD3), PINMUX_GPIO(PTD2), PINMUX_GPIO(PTD1), PINMUX_GPIO(PTD0), /* PTE */ PINMUX_GPIO(PTE7), PINMUX_GPIO(PTE6), PINMUX_GPIO(PTE5), PINMUX_GPIO(PTE4), PINMUX_GPIO(PTE1), PINMUX_GPIO(PTE0), /* PTF */ PINMUX_GPIO(PTF6), PINMUX_GPIO(PTF5), PINMUX_GPIO(PTF4), PINMUX_GPIO(PTF3), PINMUX_GPIO(PTF2), PINMUX_GPIO(PTF1), PINMUX_GPIO(PTF0), /* PTG */ PINMUX_GPIO(PTG4), PINMUX_GPIO(PTG3), PINMUX_GPIO(PTG2), PINMUX_GPIO(PTG1), PINMUX_GPIO(PTG0), /* PTH */ PINMUX_GPIO(PTH7), PINMUX_GPIO(PTH6), PINMUX_GPIO(PTH5), PINMUX_GPIO(PTH4), PINMUX_GPIO(PTH3), PINMUX_GPIO(PTH2), PINMUX_GPIO(PTH1), PINMUX_GPIO(PTH0), /* PTJ */ PINMUX_GPIO(PTJ7), PINMUX_GPIO(PTJ6), PINMUX_GPIO(PTJ5), PINMUX_GPIO(PTJ1), PINMUX_GPIO(PTJ0), /* PTK */ PINMUX_GPIO(PTK6), PINMUX_GPIO(PTK5), PINMUX_GPIO(PTK4), PINMUX_GPIO(PTK3), PINMUX_GPIO(PTK2), PINMUX_GPIO(PTK1), PINMUX_GPIO(PTK0), /* PTL */ PINMUX_GPIO(PTL7), PINMUX_GPIO(PTL6), PINMUX_GPIO(PTL5), PINMUX_GPIO(PTL4), PINMUX_GPIO(PTL3), PINMUX_GPIO(PTL2), PINMUX_GPIO(PTL1), PINMUX_GPIO(PTL0), /* PTM */ PINMUX_GPIO(PTM7), PINMUX_GPIO(PTM6), PINMUX_GPIO(PTM5), PINMUX_GPIO(PTM4), PINMUX_GPIO(PTM3), PINMUX_GPIO(PTM2), PINMUX_GPIO(PTM1), PINMUX_GPIO(PTM0), /* PTN */ PINMUX_GPIO(PTN7), PINMUX_GPIO(PTN6), PINMUX_GPIO(PTN5), PINMUX_GPIO(PTN4), PINMUX_GPIO(PTN3), PINMUX_GPIO(PTN2), PINMUX_GPIO(PTN1), PINMUX_GPIO(PTN0), /* PTQ */ PINMUX_GPIO(PTQ6), PINMUX_GPIO(PTQ5), PINMUX_GPIO(PTQ4), PINMUX_GPIO(PTQ3), PINMUX_GPIO(PTQ2), PINMUX_GPIO(PTQ1), PINMUX_GPIO(PTQ0), /* PTR */ PINMUX_GPIO(PTR4), PINMUX_GPIO(PTR3), PINMUX_GPIO(PTR2), PINMUX_GPIO(PTR1), PINMUX_GPIO(PTR0), /* PTS */ PINMUX_GPIO(PTS4), PINMUX_GPIO(PTS3), PINMUX_GPIO(PTS2), PINMUX_GPIO(PTS1), PINMUX_GPIO(PTS0), /* PTT */ PINMUX_GPIO(PTT4), PINMUX_GPIO(PTT3), PINMUX_GPIO(PTT2), PINMUX_GPIO(PTT1), PINMUX_GPIO(PTT0), /* PTU */ PINMUX_GPIO(PTU4), PINMUX_GPIO(PTU3), PINMUX_GPIO(PTU2), PINMUX_GPIO(PTU1), PINMUX_GPIO(PTU0), /* PTV */ PINMUX_GPIO(PTV4), PINMUX_GPIO(PTV3), PINMUX_GPIO(PTV2), PINMUX_GPIO(PTV1), PINMUX_GPIO(PTV0), /* PTW */ PINMUX_GPIO(PTW6), PINMUX_GPIO(PTW5), PINMUX_GPIO(PTW4), PINMUX_GPIO(PTW3), PINMUX_GPIO(PTW2), PINMUX_GPIO(PTW1), PINMUX_GPIO(PTW0), /* PTX */ PINMUX_GPIO(PTX6), PINMUX_GPIO(PTX5), PINMUX_GPIO(PTX4), PINMUX_GPIO(PTX3), PINMUX_GPIO(PTX2), PINMUX_GPIO(PTX1), PINMUX_GPIO(PTX0), /* PTY */ PINMUX_GPIO(PTY5), PINMUX_GPIO(PTY4), PINMUX_GPIO(PTY3), PINMUX_GPIO(PTY2), PINMUX_GPIO(PTY1), PINMUX_GPIO(PTY0), /* PTZ */ PINMUX_GPIO(PTZ5), PINMUX_GPIO(PTZ4), PINMUX_GPIO(PTZ3), PINMUX_GPIO(PTZ2), PINMUX_GPIO(PTZ1), }; #define PINMUX_FN_BASE ARRAY_SIZE(pinmux_pins) static const struct pinmux_func pinmux_func_gpios[] = { /* SCIF0 */ GPIO_FN(SCIF0_TXD), GPIO_FN(SCIF0_RXD), GPIO_FN(SCIF0_RTS), GPIO_FN(SCIF0_CTS), GPIO_FN(SCIF0_SCK), /* SCIF1 */ GPIO_FN(SCIF1_TXD), GPIO_FN(SCIF1_RXD), GPIO_FN(SCIF1_RTS), GPIO_FN(SCIF1_CTS), GPIO_FN(SCIF1_SCK), /* SCIF2 */ GPIO_FN(SCIF2_TXD), GPIO_FN(SCIF2_RXD), GPIO_FN(SCIF2_RTS), GPIO_FN(SCIF2_CTS), GPIO_FN(SCIF2_SCK), /* SIO */ GPIO_FN(SIOTXD), GPIO_FN(SIORXD), GPIO_FN(SIOD), GPIO_FN(SIOSTRB0), GPIO_FN(SIOSTRB1), GPIO_FN(SIOSCK), GPIO_FN(SIOMCK), /* CEU */ GPIO_FN(VIO_D15), GPIO_FN(VIO_D14), GPIO_FN(VIO_D13), GPIO_FN(VIO_D12), GPIO_FN(VIO_D11), GPIO_FN(VIO_D10), GPIO_FN(VIO_D9), GPIO_FN(VIO_D8), GPIO_FN(VIO_D7), GPIO_FN(VIO_D6), GPIO_FN(VIO_D5), GPIO_FN(VIO_D4), GPIO_FN(VIO_D3), GPIO_FN(VIO_D2), GPIO_FN(VIO_D1), GPIO_FN(VIO_D0), GPIO_FN(VIO_CLK), GPIO_FN(VIO_VD), GPIO_FN(VIO_HD), GPIO_FN(VIO_FLD), GPIO_FN(VIO_CKO), GPIO_FN(VIO_STEX), GPIO_FN(VIO_STEM), GPIO_FN(VIO_VD2), GPIO_FN(VIO_HD2), GPIO_FN(VIO_CLK2), /* LCDC */ GPIO_FN(LCDD23), GPIO_FN(LCDD22), GPIO_FN(LCDD21), GPIO_FN(LCDD20), GPIO_FN(LCDD19), GPIO_FN(LCDD18), GPIO_FN(LCDD17), GPIO_FN(LCDD16), GPIO_FN(LCDD15), GPIO_FN(LCDD14), GPIO_FN(LCDD13), GPIO_FN(LCDD12), GPIO_FN(LCDD11), GPIO_FN(LCDD10), GPIO_FN(LCDD9), GPIO_FN(LCDD8), GPIO_FN(LCDD7), GPIO_FN(LCDD6), GPIO_FN(LCDD5), GPIO_FN(LCDD4), GPIO_FN(LCDD3), GPIO_FN(LCDD2), GPIO_FN(LCDD1), GPIO_FN(LCDD0), GPIO_FN(LCDLCLK), /* Main LCD */ GPIO_FN(LCDDON), GPIO_FN(LCDVCPWC), GPIO_FN(LCDVEPWC), GPIO_FN(LCDVSYN), /* Main LCD - RGB Mode */ GPIO_FN(LCDDCK), GPIO_FN(LCDHSYN), GPIO_FN(LCDDISP), /* Main LCD - SYS Mode */ GPIO_FN(LCDRS), GPIO_FN(LCDCS), GPIO_FN(LCDWR), GPIO_FN(LCDRD), /* Sub LCD - SYS Mode */ GPIO_FN(LCDDON2), GPIO_FN(LCDVCPWC2), GPIO_FN(LCDVEPWC2), GPIO_FN(LCDVSYN2), GPIO_FN(LCDCS2), /* BSC */ GPIO_FN(IOIS16), GPIO_FN(A25), GPIO_FN(A24), GPIO_FN(A23), GPIO_FN(A22), GPIO_FN(BS), GPIO_FN(CS6B_CE1B), GPIO_FN(WAIT), GPIO_FN(CS6A_CE2B), /* SBSC */ GPIO_FN(HPD63), GPIO_FN(HPD62), GPIO_FN(HPD61), GPIO_FN(HPD60), GPIO_FN(HPD59), GPIO_FN(HPD58), GPIO_FN(HPD57), GPIO_FN(HPD56), GPIO_FN(HPD55), GPIO_FN(HPD54), GPIO_FN(HPD53), GPIO_FN(HPD52), GPIO_FN(HPD51), GPIO_FN(HPD50), GPIO_FN(HPD49), GPIO_FN(HPD48), GPIO_FN(HPDQM7), GPIO_FN(HPDQM6), GPIO_FN(HPDQM5), GPIO_FN(HPDQM4), /* IRQ */ GPIO_FN(IRQ0), GPIO_FN(IRQ1), GPIO_FN(IRQ2), GPIO_FN(IRQ3), GPIO_FN(IRQ4), GPIO_FN(IRQ5), GPIO_FN(IRQ6), GPIO_FN(IRQ7), /* SDHI */ GPIO_FN(SDHICD), GPIO_FN(SDHIWP), GPIO_FN(SDHID3), GPIO_FN(SDHID2), GPIO_FN(SDHID1), GPIO_FN(SDHID0), GPIO_FN(SDHICMD), GPIO_FN(SDHICLK), /* SIU - Port A */ GPIO_FN(SIUAOLR), GPIO_FN(SIUAOBT), GPIO_FN(SIUAISLD), GPIO_FN(SIUAILR), GPIO_FN(SIUAIBT), GPIO_FN(SIUAOSLD), GPIO_FN(SIUMCKA), GPIO_FN(SIUFCKA), /* SIU - Port B */ GPIO_FN(SIUBOLR), GPIO_FN(SIUBOBT), GPIO_FN(SIUBISLD), GPIO_FN(SIUBILR), GPIO_FN(SIUBIBT), GPIO_FN(SIUBOSLD), GPIO_FN(SIUMCKB), GPIO_FN(SIUFCKB), /* AUD */ GPIO_FN(AUDSYNC), GPIO_FN(AUDATA3), GPIO_FN(AUDATA2), GPIO_FN(AUDATA1), GPIO_FN(AUDATA0), /* DMAC */ GPIO_FN(DACK), GPIO_FN(DREQ0), /* VOU */ GPIO_FN(DV_CLKI), GPIO_FN(DV_CLK), GPIO_FN(DV_HSYNC), GPIO_FN(DV_VSYNC), GPIO_FN(DV_D15), GPIO_FN(DV_D14), GPIO_FN(DV_D13), GPIO_FN(DV_D12), GPIO_FN(DV_D11), GPIO_FN(DV_D10), GPIO_FN(DV_D9), GPIO_FN(DV_D8), GPIO_FN(DV_D7), GPIO_FN(DV_D6), GPIO_FN(DV_D5), GPIO_FN(DV_D4), GPIO_FN(DV_D3), GPIO_FN(DV_D2), GPIO_FN(DV_D1), GPIO_FN(DV_D0), /* CPG */ GPIO_FN(STATUS0), GPIO_FN(PDSTATUS), /* SIOF0 */ GPIO_FN(SIOF0_MCK), GPIO_FN(SIOF0_SCK), GPIO_FN(SIOF0_SYNC), GPIO_FN(SIOF0_SS1), GPIO_FN(SIOF0_SS2), GPIO_FN(SIOF0_TXD), GPIO_FN(SIOF0_RXD), /* SIOF1 */ GPIO_FN(SIOF1_MCK), GPIO_FN(SIOF1_SCK), GPIO_FN(SIOF1_SYNC), GPIO_FN(SIOF1_SS1), GPIO_FN(SIOF1_SS2), GPIO_FN(SIOF1_TXD), GPIO_FN(SIOF1_RXD), /* SIM */ GPIO_FN(SIM_D), GPIO_FN(SIM_CLK), GPIO_FN(SIM_RST), /* TSIF */ GPIO_FN(TS_SDAT), GPIO_FN(TS_SCK), GPIO_FN(TS_SDEN), GPIO_FN(TS_SPSYNC), /* IRDA */ GPIO_FN(IRDA_IN), GPIO_FN(IRDA_OUT), /* TPU */ GPIO_FN(TPUTO), /* FLCTL */ GPIO_FN(FCE), GPIO_FN(NAF7), GPIO_FN(NAF6), GPIO_FN(NAF5), GPIO_FN(NAF4), GPIO_FN(NAF3), GPIO_FN(NAF2), GPIO_FN(NAF1), GPIO_FN(NAF0), GPIO_FN(FCDE), GPIO_FN(FOE), GPIO_FN(FSC), GPIO_FN(FWE), GPIO_FN(FRB), /* KEYSC */ GPIO_FN(KEYIN0), GPIO_FN(KEYIN1), GPIO_FN(KEYIN2), GPIO_FN(KEYIN3), GPIO_FN(KEYIN4), GPIO_FN(KEYOUT0), GPIO_FN(KEYOUT1), GPIO_FN(KEYOUT2), GPIO_FN(KEYOUT3), GPIO_FN(KEYOUT4_IN6), GPIO_FN(KEYOUT5_IN5), }; static const struct pinmux_cfg_reg pinmux_config_regs[] = { { PINMUX_CFG_REG("PACR", 0xa4050100, 16, 2) { VIO_D7_SCIF1_SCK, PTA7_OUT, 0, PTA7_IN, VIO_D6_SCIF1_RXD, 0, 0, PTA6_IN, VIO_D5_SCIF1_TXD, PTA5_OUT, 0, PTA5_IN, VIO_D4, 0, 0, PTA4_IN, VIO_D3, 0, 0, PTA3_IN, VIO_D2, 0, 0, PTA2_IN, VIO_D1, 0, 0, PTA1_IN, VIO_D0_LCDLCLK, 0, 0, PTA0_IN } }, { PINMUX_CFG_REG("PBCR", 0xa4050102, 16, 2) { HPD55, PTB7_OUT, 0, PTB7_IN, HPD54, PTB6_OUT, 0, PTB6_IN, HPD53, PTB5_OUT, 0, PTB5_IN, HPD52, PTB4_OUT, 0, PTB4_IN, HPD51, PTB3_OUT, 0, PTB3_IN, HPD50, PTB2_OUT, 0, PTB2_IN, HPD49, PTB1_OUT, 0, PTB1_IN, HPD48, PTB0_OUT, 0, PTB0_IN } }, { PINMUX_CFG_REG("PCCR", 0xa4050104, 16, 2) { 0, 0, 0, PTC7_IN, 0, 0, 0, 0, IOIS16, 0, 0, PTC5_IN, HPDQM7, PTC4_OUT, 0, PTC4_IN, HPDQM6, PTC3_OUT, 0, PTC3_IN, HPDQM5, PTC2_OUT, 0, PTC2_IN, 0, 0, 0, 0, HPDQM4, PTC0_OUT, 0, PTC0_IN } }, { PINMUX_CFG_REG("PDCR", 0xa4050106, 16, 2) { SDHICD, 0, 0, PTD7_IN, SDHIWP, PTD6_OUT, 0, PTD6_IN, SDHID3, PTD5_OUT, 0, PTD5_IN, IRQ2_SDHID2, PTD4_OUT, 0, PTD4_IN, SDHID1, PTD3_OUT, 0, PTD3_IN, SDHID0, PTD2_OUT, 0, PTD2_IN, SDHICMD, PTD1_OUT, 0, PTD1_IN, SDHICLK, PTD0_OUT, 0, 0 } }, { PINMUX_CFG_REG("PECR", 0xa4050108, 16, 2) { A25, PTE7_OUT, 0, PTE7_IN, A24, PTE6_OUT, 0, PTE6_IN, A23, PTE5_OUT, 0, PTE5_IN, A22, PTE4_OUT, 0, PTE4_IN, 0, 0, 0, 0, 0, 0, 0, 0, IRQ5, PTE1_OUT, 0, PTE1_IN, IRQ4_BS, PTE0_OUT, 0, PTE0_IN } }, { PINMUX_CFG_REG("PFCR", 0xa405010a, 16, 2) { 0, 0, 0, 0, PTF6, PTF6_OUT, 0, PTF6_IN, SIOSCK_SIUBOBT, PTF5_OUT, 0, PTF5_IN, SIOSTRB1_SIUBOLR, PTF4_OUT, 0, PTF4_IN, SIOSTRB0_SIUBIBT, PTF3_OUT, 0, PTF3_IN, SIOD_SIUBILR, PTF2_OUT, 0, PTF2_IN, SIORXD_SIUBISLD, 0, 0, PTF1_IN, SIOTXD_SIUBOSLD, PTF0_OUT, 0, 0 } }, { PINMUX_CFG_REG("PGCR", 0xa405010c, 16, 2) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, AUDSYNC, PTG4_OUT, 0, 0, AUDATA3, PTG3_OUT, 0, 0, AUDATA2, PTG2_OUT, 0, 0, AUDATA1, PTG1_OUT, 0, 0, AUDATA0, PTG0_OUT, 0, 0 } }, { PINMUX_CFG_REG("PHCR", 0xa405010e, 16, 2) { LCDVCPWC_LCDVCPWC2, PTH7_OUT, 0, 0, LCDVSYN2_DACK, PTH6_OUT, 0, PTH6_IN, LCDVSYN, PTH5_OUT, 0, PTH5_IN, LCDDISP_LCDRS, PTH4_OUT, 0, 0, LCDHSYN_LCDCS, PTH3_OUT, 0, 0, LCDDON_LCDDON2, PTH2_OUT, 0, 0, LCDD17_DV_HSYNC, PTH1_OUT, 0, PTH1_IN, LCDD16_DV_VSYNC, PTH0_OUT, 0, PTH0_IN } }, { PINMUX_CFG_REG("PJCR", 0xa4050110, 16, 2) { STATUS0, PTJ7_OUT, 0, 0, 0, PTJ6_OUT, 0, 0, PDSTATUS, PTJ5_OUT, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, IRQ1, PTJ1_OUT, 0, PTJ1_IN, IRQ0, PTJ0_OUT, 0, PTJ0_IN } }, { PINMUX_CFG_REG("PKCR", 0xa4050112, 16, 2) { 0, 0, 0, 0, SIUAILR_SIOF1_SS2, PTK6_OUT, 0, PTK6_IN, SIUAIBT_SIOF1_SS1, PTK5_OUT, 0, PTK5_IN, SIUAOLR_SIOF1_SYNC, PTK4_OUT, 0, PTK4_IN, SIUAOBT_SIOF1_SCK, PTK3_OUT, 0, PTK3_IN, SIUAISLD_SIOF1_RXD, 0, 0, PTK2_IN, SIUAOSLD_SIOF1_TXD, PTK1_OUT, 0, 0, PTK0, PTK0_OUT, 0, PTK0_IN } }, { PINMUX_CFG_REG("PLCR", 0xa4050114, 16, 2) { LCDD15_DV_D15, PTL7_OUT, 0, PTL7_IN, LCDD14_DV_D14, PTL6_OUT, 0, PTL6_IN, LCDD13_DV_D13, PTL5_OUT, 0, PTL5_IN, LCDD12_DV_D12, PTL4_OUT, 0, PTL4_IN, LCDD11_DV_D11, PTL3_OUT, 0, PTL3_IN, LCDD10_DV_D10, PTL2_OUT, 0, PTL2_IN, LCDD9_DV_D9, PTL1_OUT, 0, PTL1_IN, LCDD8_DV_D8, PTL0_OUT, 0, PTL0_IN } }, { PINMUX_CFG_REG("PMCR", 0xa4050116, 16, 2) { LCDD7_DV_D7, PTM7_OUT, 0, PTM7_IN, LCDD6_DV_D6, PTM6_OUT, 0, PTM6_IN, LCDD5_DV_D5, PTM5_OUT, 0, PTM5_IN, LCDD4_DV_D4, PTM4_OUT, 0, PTM4_IN, LCDD3_DV_D3, PTM3_OUT, 0, PTM3_IN, LCDD2_DV_D2, PTM2_OUT, 0, PTM2_IN, LCDD1_DV_D1, PTM1_OUT, 0, PTM1_IN, LCDD0_DV_D0, PTM0_OUT, 0, PTM0_IN } }, { PINMUX_CFG_REG("PNCR", 0xa4050118, 16, 2) { HPD63, PTN7_OUT, 0, PTN7_IN, HPD62, PTN6_OUT, 0, PTN6_IN, HPD61, PTN5_OUT, 0, PTN5_IN, HPD60, PTN4_OUT, 0, PTN4_IN, HPD59, PTN3_OUT, 0, PTN3_IN, HPD58, PTN2_OUT, 0, PTN2_IN, HPD57, PTN1_OUT, 0, PTN1_IN, HPD56, PTN0_OUT, 0, PTN0_IN } }, { PINMUX_CFG_REG("PQCR", 0xa405011a, 16, 2) { 0, 0, 0, 0, SIOF0_SS2_SIM_RST, PTQ6_OUT, 0, 0, SIOF0_SS1_TS_SPSYNC, PTQ5_OUT, 0, PTQ5_IN, SIOF0_SYNC_TS_SDEN, PTQ4_OUT, 0, PTQ4_IN, SIOF0_SCK_TS_SCK, PTQ3_OUT, 0, PTQ3_IN, PTQ2, 0, 0, PTQ2_IN, PTQ1, PTQ1_OUT, 0, 0, PTQ0, PTQ0_OUT, 0, PTQ0_IN } }, { PINMUX_CFG_REG("PRCR", 0xa405011c, 16, 2) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, LCDRD, PTR4_OUT, 0, 0, CS6B_CE1B_LCDCS2, PTR3_OUT, 0, 0, WAIT, 0, 0, PTR2_IN, LCDDCK_LCDWR, PTR1_OUT, 0, 0, LCDVEPWC_LCDVEPWC2, PTR0_OUT, 0, 0 } }, { PINMUX_CFG_REG("PSCR", 0xa405011e, 16, 2) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, SCIF0_CTS_SIUAISPD, 0, 0, PTS4_IN, SCIF0_RTS_SIUAOSPD, PTS3_OUT, 0, 0, SCIF0_SCK_TPUTO, PTS2_OUT, 0, PTS2_IN, SCIF0_RXD, 0, 0, PTS1_IN, SCIF0_TXD, PTS0_OUT, 0, 0 } }, { PINMUX_CFG_REG("PTCR", 0xa4050140, 16, 2) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, FOE_VIO_VD2, PTT4_OUT, 0, PTT4_IN, FWE, PTT3_OUT, 0, PTT3_IN, FSC, PTT2_OUT, 0, PTT2_IN, DREQ0, 0, 0, PTT1_IN, FCDE, PTT0_OUT, 0, 0 } }, { PINMUX_CFG_REG("PUCR", 0xa4050142, 16, 2) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, NAF2_VIO_D10, PTU4_OUT, 0, PTU4_IN, NAF1_VIO_D9, PTU3_OUT, 0, PTU3_IN, NAF0_VIO_D8, PTU2_OUT, 0, PTU2_IN, FRB_VIO_CLK2, 0, 0, PTU1_IN, FCE_VIO_HD2, PTU0_OUT, 0, PTU0_IN } }, { PINMUX_CFG_REG("PVCR", 0xa4050144, 16, 2) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, NAF7_VIO_D15, PTV4_OUT, 0, PTV4_IN, NAF6_VIO_D14, PTV3_OUT, 0, PTV3_IN, NAF5_VIO_D13, PTV2_OUT, 0, PTV2_IN, NAF4_VIO_D12, PTV1_OUT, 0, PTV1_IN, NAF3_VIO_D11, PTV0_OUT, 0, PTV0_IN } }, { PINMUX_CFG_REG("PWCR", 0xa4050146, 16, 2) { 0, 0, 0, 0, VIO_FLD_SCIF2_CTS, 0, 0, PTW6_IN, VIO_CKO_SCIF2_RTS, PTW5_OUT, 0, 0, VIO_STEX_SCIF2_SCK, PTW4_OUT, 0, PTW4_IN, VIO_STEM_SCIF2_TXD, PTW3_OUT, 0, PTW3_IN, VIO_HD_SCIF2_RXD, PTW2_OUT, 0, PTW2_IN, VIO_VD_SCIF1_CTS, PTW1_OUT, 0, PTW1_IN, VIO_CLK_SCIF1_RTS, PTW0_OUT, 0, PTW0_IN } }, { PINMUX_CFG_REG("PXCR", 0xa4050148, 16, 2) { 0, 0, 0, 0, CS6A_CE2B, PTX6_OUT, 0, PTX6_IN, LCDD23, PTX5_OUT, 0, PTX5_IN, LCDD22, PTX4_OUT, 0, PTX4_IN, LCDD21, PTX3_OUT, 0, PTX3_IN, LCDD20, PTX2_OUT, 0, PTX2_IN, LCDD19_DV_CLKI, PTX1_OUT, 0, PTX1_IN, LCDD18_DV_CLK, PTX0_OUT, 0, PTX0_IN } }, { PINMUX_CFG_REG("PYCR", 0xa405014a, 16, 2) { 0, 0, 0, 0, 0, 0, 0, 0, KEYOUT5_IN5, PTY5_OUT, 0, PTY5_IN, KEYOUT4_IN6, PTY4_OUT, 0, PTY4_IN, KEYOUT3, PTY3_OUT, 0, PTY3_IN, KEYOUT2, PTY2_OUT, 0, PTY2_IN, KEYOUT1, PTY1_OUT, 0, 0, KEYOUT0, PTY0_OUT, 0, PTY0_IN } }, { PINMUX_CFG_REG("PZCR", 0xa405014c, 16, 2) { 0, 0, 0, 0, 0, 0, 0, 0, KEYIN4_IRQ7, 0, 0, PTZ5_IN, KEYIN3, 0, 0, PTZ4_IN, KEYIN2, 0, 0, PTZ3_IN, KEYIN1, 0, 0, PTZ2_IN, KEYIN0_IRQ6, 0, 0, PTZ1_IN, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("PSELA", 0xa405014e, 16, 1) { PSA15_KEYIN0, PSA15_IRQ6, PSA14_KEYIN4, PSA14_IRQ7, 0, 0, 0, 0, 0, 0, 0, 0, PSA9_IRQ4, PSA9_BS, 0, 0, 0, 0, 0, 0, 0, 0, PSA4_IRQ2, PSA4_SDHID2, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("PSELB", 0xa4050150, 16, 1) { PSB15_SIOTXD, PSB15_SIUBOSLD, PSB14_SIORXD, PSB14_SIUBISLD, PSB13_SIOD, PSB13_SIUBILR, PSB12_SIOSTRB0, PSB12_SIUBIBT, PSB11_SIOSTRB1, PSB11_SIUBOLR, PSB10_SIOSCK, PSB10_SIUBOBT, PSB9_SIOMCK, PSB9_SIUMCKB, PSB8_SIOF0_MCK, PSB8_IRQ3, PSB7_SIOF0_TXD, PSB7_IRDA_OUT, PSB6_SIOF0_RXD, PSB6_IRDA_IN, PSB5_SIOF0_SCK, PSB5_TS_SCK, PSB4_SIOF0_SYNC, PSB4_TS_SDEN, PSB3_SIOF0_SS1, PSB3_TS_SPSYNC, PSB2_SIOF0_SS2, PSB2_SIM_RST, PSB1_SIUMCKA, PSB1_SIOF1_MCK, PSB0_SIUAOSLD, PSB0_SIOF1_TXD } }, { PINMUX_CFG_REG("PSELC", 0xa4050152, 16, 1) { PSC15_SIUAISLD, PSC15_SIOF1_RXD, PSC14_SIUAOBT, PSC14_SIOF1_SCK, PSC13_SIUAOLR, PSC13_SIOF1_SYNC, PSC12_SIUAIBT, PSC12_SIOF1_SS1, PSC11_SIUAILR, PSC11_SIOF1_SS2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PSC0_NAF, PSC0_VIO } }, { PINMUX_CFG_REG("PSELD", 0xa4050154, 16, 1) { 0, 0, 0, 0, PSD13_VIO, PSD13_SCIF2, PSD12_VIO, PSD12_SCIF1, PSD11_VIO, PSD11_SCIF1, PSD10_VIO_D0, PSD10_LCDLCLK, PSD9_SIOMCK_SIUMCKB, PSD9_SIUFCKB, PSD8_SCIF0_SCK, PSD8_TPUTO, PSD7_SCIF0_RTS, PSD7_SIUAOSPD, PSD6_SCIF0_CTS, PSD6_SIUAISPD, PSD5_CS6B_CE1B, PSD5_LCDCS2, 0, 0, PSD3_LCDVEPWC_LCDVCPWC, PSD3_LCDVEPWC2_LCDVCPWC2, PSD2_LCDDON, PSD2_LCDDON2, 0, 0, PSD0_LCDD19_LCDD0, PSD0_DV } }, { PINMUX_CFG_REG("PSELE", 0xa4050156, 16, 1) { PSE15_SIOF0_MCK_IRQ3, PSE15_SIM_D, PSE14_SIOF0_TXD_IRDA_OUT, PSE14_SIM_CLK, PSE13_SIOF0_RXD_IRDA_IN, PSE13_TS_SDAT, PSE12_LCDVSYN2, PSE12_DACK, PSE11_SIUMCKA_SIOF1_MCK, PSE11_SIUFCKA, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PSE3_FLCTL, PSE3_VIO, PSE2_NAF2, PSE2_VIO_D10, PSE1_NAF1, PSE1_VIO_D9, PSE0_NAF0, PSE0_VIO_D8 } }, { PINMUX_CFG_REG("HIZCRA", 0xa4050158, 16, 1) { 0, 0, HIZA14_KEYSC, HIZA14_HIZ, 0, 0, 0, 0, 0, 0, HIZA10_NAF, HIZA10_HIZ, HIZA9_VIO, HIZA9_HIZ, HIZA8_LCDC, HIZA8_HIZ, HIZA7_LCDC, HIZA7_HIZ, HIZA6_LCDC, HIZA6_HIZ, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("HIZCRB", 0xa405015a, 16, 1) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, HIZB4_SIUA, HIZB4_HIZ, 0, 0, 0, 0, HIZB1_VIO, HIZB1_HIZ, HIZB0_VIO, HIZB0_HIZ } }, { PINMUX_CFG_REG("HIZCRC", 0xa405015c, 16, 1) { HIZC15_IRQ7, HIZC15_HIZ, HIZC14_IRQ6, HIZC14_HIZ, HIZC13_IRQ5, HIZC13_HIZ, HIZC12_IRQ4, HIZC12_HIZ, HIZC11_IRQ3, HIZC11_HIZ, HIZC10_IRQ2, HIZC10_HIZ, HIZC9_IRQ1, HIZC9_HIZ, HIZC8_IRQ0, HIZC8_HIZ, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("MSELCRB", 0xa4050182, 16, 1) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, MSELB9_VIO, MSELB9_VIO2, MSELB8_RGB, MSELB8_SYS, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, {} }; static const struct pinmux_data_reg pinmux_data_regs[] = { { PINMUX_DATA_REG("PADR", 0xa4050120, 8) { PTA7_DATA, PTA6_DATA, PTA5_DATA, PTA4_DATA, PTA3_DATA, PTA2_DATA, PTA1_DATA, PTA0_DATA } }, { PINMUX_DATA_REG("PBDR", 0xa4050122, 8) { PTB7_DATA, PTB6_DATA, PTB5_DATA, PTB4_DATA, PTB3_DATA, PTB2_DATA, PTB1_DATA, PTB0_DATA } }, { PINMUX_DATA_REG("PCDR", 0xa4050124, 8) { PTC7_DATA, 0, PTC5_DATA, PTC4_DATA, PTC3_DATA, PTC2_DATA, 0, PTC0_DATA } }, { PINMUX_DATA_REG("PDDR", 0xa4050126, 8) { PTD7_DATA, PTD6_DATA, PTD5_DATA, PTD4_DATA, PTD3_DATA, PTD2_DATA, PTD1_DATA, PTD0_DATA } }, { PINMUX_DATA_REG("PEDR", 0xa4050128, 8) { PTE7_DATA, PTE6_DATA, PTE5_DATA, PTE4_DATA, 0, 0, PTE1_DATA, PTE0_DATA } }, { PINMUX_DATA_REG("PFDR", 0xa405012a, 8) { 0, PTF6_DATA, PTF5_DATA, PTF4_DATA, PTF3_DATA, PTF2_DATA, PTF1_DATA, PTF0_DATA } }, { PINMUX_DATA_REG("PGDR", 0xa405012c, 8) { 0, 0, 0, PTG4_DATA, PTG3_DATA, PTG2_DATA, PTG1_DATA, PTG0_DATA } }, { PINMUX_DATA_REG("PHDR", 0xa405012e, 8) { PTH7_DATA, PTH6_DATA, PTH5_DATA, PTH4_DATA, PTH3_DATA, PTH2_DATA, PTH1_DATA, PTH0_DATA } }, { PINMUX_DATA_REG("PJDR", 0xa4050130, 8) { PTJ7_DATA, PTJ6_DATA, PTJ5_DATA, 0, 0, 0, PTJ1_DATA, PTJ0_DATA } }, { PINMUX_DATA_REG("PKDR", 0xa4050132, 8) { 0, PTK6_DATA, PTK5_DATA, PTK4_DATA, PTK3_DATA, PTK2_DATA, PTK1_DATA, PTK0_DATA } }, { PINMUX_DATA_REG("PLDR", 0xa4050134, 8) { PTL7_DATA, PTL6_DATA, PTL5_DATA, PTL4_DATA, PTL3_DATA, PTL2_DATA, PTL1_DATA, PTL0_DATA } }, { PINMUX_DATA_REG("PMDR", 0xa4050136, 8) { PTM7_DATA, PTM6_DATA, PTM5_DATA, PTM4_DATA, PTM3_DATA, PTM2_DATA, PTM1_DATA, PTM0_DATA } }, { PINMUX_DATA_REG("PNDR", 0xa4050138, 8) { PTN7_DATA, PTN6_DATA, PTN5_DATA, PTN4_DATA, PTN3_DATA, PTN2_DATA, PTN1_DATA, PTN0_DATA } }, { PINMUX_DATA_REG("PQDR", 0xa405013a, 8) { 0, PTQ6_DATA, PTQ5_DATA, PTQ4_DATA, PTQ3_DATA, PTQ2_DATA, PTQ1_DATA, PTQ0_DATA } }, { PINMUX_DATA_REG("PRDR", 0xa405013c, 8) { 0, 0, 0, PTR4_DATA, PTR3_DATA, PTR2_DATA, PTR1_DATA, PTR0_DATA } }, { PINMUX_DATA_REG("PSDR", 0xa405013e, 8) { 0, 0, 0, PTS4_DATA, PTS3_DATA, PTS2_DATA, PTS1_DATA, PTS0_DATA } }, { PINMUX_DATA_REG("PTDR", 0xa4050160, 8) { 0, 0, 0, PTT4_DATA, PTT3_DATA, PTT2_DATA, PTT1_DATA, PTT0_DATA } }, { PINMUX_DATA_REG("PUDR", 0xa4050162, 8) { 0, 0, 0, PTU4_DATA, PTU3_DATA, PTU2_DATA, PTU1_DATA, PTU0_DATA } }, { PINMUX_DATA_REG("PVDR", 0xa4050164, 8) { 0, 0, 0, PTV4_DATA, PTV3_DATA, PTV2_DATA, PTV1_DATA, PTV0_DATA } }, { PINMUX_DATA_REG("PWDR", 0xa4050166, 8) { 0, PTW6_DATA, PTW5_DATA, PTW4_DATA, PTW3_DATA, PTW2_DATA, PTW1_DATA, PTW0_DATA } }, { PINMUX_DATA_REG("PXDR", 0xa4050168, 8) { 0, PTX6_DATA, PTX5_DATA, PTX4_DATA, PTX3_DATA, PTX2_DATA, PTX1_DATA, PTX0_DATA } }, { PINMUX_DATA_REG("PYDR", 0xa405016a, 8) { 0, PTY6_DATA, PTY5_DATA, PTY4_DATA, PTY3_DATA, PTY2_DATA, PTY1_DATA, PTY0_DATA } }, { PINMUX_DATA_REG("PZDR", 0xa405016c, 8) { 0, 0, PTZ5_DATA, PTZ4_DATA, PTZ3_DATA, PTZ2_DATA, PTZ1_DATA, PTZ0_DATA } }, { }, }; const struct sh_pfc_soc_info sh7722_pinmux_info = { .name = "sh7722_pfc", .input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END }, .output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END }, .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END }, .pins = pinmux_pins, .nr_pins = ARRAY_SIZE(pinmux_pins), .func_gpios = pinmux_func_gpios, .nr_func_gpios = ARRAY_SIZE(pinmux_func_gpios), .cfg_regs = pinmux_config_regs, .data_regs = pinmux_data_regs, .gpio_data = pinmux_data, .gpio_data_size = ARRAY_SIZE(pinmux_data), };
gpl-2.0
HyochanPyo/kernel_3.18.9
arch/arm/plat-versatile/platsmp.c
1854
2165
/* * linux/arch/arm/plat-versatile/platsmp.c * * Copyright (C) 2002 ARM Ltd. * All Rights Reserved * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/errno.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/jiffies.h> #include <linux/smp.h> #include <asm/cacheflush.h> #include <asm/smp_plat.h> /* * Write pen_release in a way that is guaranteed to be visible to all * observers, irrespective of whether they're taking part in coherency * or not. This is necessary for the hotplug code to work reliably. */ static void write_pen_release(int val) { pen_release = val; smp_wmb(); sync_cache_w(&pen_release); } static DEFINE_SPINLOCK(boot_lock); void versatile_secondary_init(unsigned int cpu) { /* * let the primary processor know we're out of the * pen, then head off into the C entry point */ write_pen_release(-1); /* * Synchronise with the boot thread. */ spin_lock(&boot_lock); spin_unlock(&boot_lock); } int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle) { unsigned long timeout; /* * Set synchronisation state between this boot processor * and the secondary one */ spin_lock(&boot_lock); /* * This is really belt and braces; we hold unintended secondary * CPUs in the holding pen until we're ready for them. However, * since we haven't sent them a soft interrupt, they shouldn't * be there. */ write_pen_release(cpu_logical_map(cpu)); /* * Send the secondary CPU a soft interrupt, thereby causing * the boot monitor to read the system wide flags register, * and branch to the address found there. */ arch_send_wakeup_ipi_mask(cpumask_of(cpu)); timeout = jiffies + (1 * HZ); while (time_before(jiffies, timeout)) { smp_rmb(); if (pen_release == -1) break; udelay(10); } /* * now the secondary core is starting up let it run its * calibrations, then wait for it to finish */ spin_unlock(&boot_lock); return pen_release != -1 ? -ENOSYS : 0; }
gpl-2.0
Silentlys/android_kernel_cyanogen_msm8916
arch/arm/mach-kirkwood/db88f6281-bp-setup.c
2110
2500
/* * arch/arm/mach-kirkwood/db88f6281-bp-setup.c * * Marvell DB-88F6281-BP Development Board Setup * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/sizes.h> #include <linux/platform_device.h> #include <linux/mtd/partitions.h> #include <linux/ata_platform.h> #include <linux/mv643xx_eth.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <mach/kirkwood.h> #include <linux/platform_data/mmc-mvsdio.h> #include "common.h" #include "mpp.h" static struct mtd_partition db88f6281_nand_parts[] = { { .name = "u-boot", .offset = 0, .size = SZ_1M }, { .name = "uImage", .offset = MTDPART_OFS_NXTBLK, .size = SZ_4M }, { .name = "root", .offset = MTDPART_OFS_NXTBLK, .size = MTDPART_SIZ_FULL }, }; static struct mv643xx_eth_platform_data db88f6281_ge00_data = { .phy_addr = MV643XX_ETH_PHY_ADDR(8), }; static struct mv_sata_platform_data db88f6281_sata_data = { .n_ports = 2, }; static struct mvsdio_platform_data db88f6281_mvsdio_data = { .gpio_write_protect = 37, .gpio_card_detect = 38, }; static unsigned int db88f6281_mpp_config[] __initdata = { MPP0_NF_IO2, MPP1_NF_IO3, MPP2_NF_IO4, MPP3_NF_IO5, MPP4_NF_IO6, MPP5_NF_IO7, MPP18_NF_IO0, MPP19_NF_IO1, MPP37_GPIO, MPP38_GPIO, 0 }; static void __init db88f6281_init(void) { /* * Basic setup. Needs to be called early. */ kirkwood_init(); kirkwood_mpp_conf(db88f6281_mpp_config); kirkwood_nand_init(ARRAY_AND_SIZE(db88f6281_nand_parts), 25); kirkwood_ehci_init(); kirkwood_ge00_init(&db88f6281_ge00_data); kirkwood_sata_init(&db88f6281_sata_data); kirkwood_uart0_init(); kirkwood_sdio_init(&db88f6281_mvsdio_data); } static int __init db88f6281_pci_init(void) { if (machine_is_db88f6281_bp()) { u32 dev, rev; kirkwood_pcie_id(&dev, &rev); if (dev == MV88F6282_DEV_ID) kirkwood_pcie_init(KW_PCIE1 | KW_PCIE0); else kirkwood_pcie_init(KW_PCIE0); } return 0; } subsys_initcall(db88f6281_pci_init); MACHINE_START(DB88F6281_BP, "Marvell DB-88F6281-BP Development Board") /* Maintainer: Saeed Bishara <saeed@marvell.com> */ .atag_offset = 0x100, .init_machine = db88f6281_init, .map_io = kirkwood_map_io, .init_early = kirkwood_init_early, .init_irq = kirkwood_init_irq, .init_time = kirkwood_timer_init, .restart = kirkwood_restart, MACHINE_END
gpl-2.0
CyanogenMod/android_kernel_samsung_smdk4210
drivers/staging/westbridge/astoria/block/cyasblkdev_block.c
2366
40870
/* cyanblkdev_block.c - West Bridge Linux Block Driver source file ## =========================== ## Copyright (C) 2010 Cypress Semiconductor ## ## This program is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License ## as published by the Free Software Foundation; either version 2 ## of the License, or (at your option) any later version. ## ## This program is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with this program; if not, write to the Free Software ## Foundation, Inc., 51 Franklin Street, Fifth Floor ## Boston, MA 02110-1301, USA. ## =========================== */ /* * Linux block driver implementation for Cypress West Bridge. * Based on the mmc block driver implementation by Andrew Christian * for the linux 2.6.26 kernel. * mmc_block.c, 5/28/2002 */ /* * Block driver for media (i.e., flash cards) * * Copyright 2002 Hewlett-Packard Company * * Use consistent with the GNU GPL is permitted, * provided that this copyright notice is * preserved in its entirety in all copies and derived works. * * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED, * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS * FITNESS FOR ANY PARTICULAR PURPOSE. * * Many thanks to Alessandro Rubini and Jonathan Corbet! * * Author: Andrew Christian * 28 May 2002 */ #include <linux/moduleparam.h> #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/fs.h> #include <linux/errno.h> #include <linux/hdreg.h> #include <linux/kdev_t.h> #include <linux/blkdev.h> #include <asm/system.h> #include <linux/uaccess.h> #include <linux/scatterlist.h> #include <linux/time.h> #include <linux/signal.h> #include <linux/delay.h> #include "cyasblkdev_queue.h" #define CYASBLKDEV_SHIFT 0 /* Only a single partition. */ #define CYASBLKDEV_MAX_REQ_LEN (256) #define CYASBLKDEV_NUM_MINORS (256 >> CYASBLKDEV_SHIFT) #define CY_AS_TEST_NUM_BLOCKS (64) #define CYASBLKDEV_MINOR_0 1 #define CYASBLKDEV_MINOR_1 2 #define CYASBLKDEV_MINOR_2 3 static int major; module_param(major, int, 0444); MODULE_PARM_DESC(major, "specify the major device number for cyasblkdev block driver"); /* parameters passed from the user space */ static int vfat_search; module_param(vfat_search, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(vfat_search, "dynamically find the location of the first sector"); static int private_partition_bus = -1; module_param(private_partition_bus, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(private_partition_bus, "bus number for private partition"); static int private_partition_size = -1; module_param(private_partition_size, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(private_partition_size, "size of the private partition"); /* * There is one cyasblkdev_blk_data per slot. */ struct cyasblkdev_blk_data { spinlock_t lock; int media_count[2]; const struct block_device_operations *blkops; unsigned int usage; unsigned int suspended; /* handle to the west bridge device this handle, typdefed as *void */ cy_as_device_handle dev_handle; /* our custom structure, in addition to request queue, * adds lock & semaphore items*/ struct cyasblkdev_queue queue; /* 16 entries is enough given max request size * 16 * 4K (64 K per request)*/ struct scatterlist sg[16]; /* non-zero enables printk of executed reqests */ unsigned int dbgprn_flags; /*gen_disk for private, system disk */ struct gendisk *system_disk; cy_as_media_type system_disk_type; cy_bool system_disk_read_only; cy_bool system_disk_bus_num; /* sector size for the medium */ unsigned int system_disk_blk_size; unsigned int system_disk_first_sector; unsigned int system_disk_unit_no; /*gen_disk for bus 0 */ struct gendisk *user_disk_0; cy_as_media_type user_disk_0_type; cy_bool user_disk_0_read_only; cy_bool user_disk_0_bus_num; /* sector size for the medium */ unsigned int user_disk_0_blk_size; unsigned int user_disk_0_first_sector; unsigned int user_disk_0_unit_no; /*gen_disk for bus 1 */ struct gendisk *user_disk_1; cy_as_media_type user_disk_1_type; cy_bool user_disk_1_read_only; cy_bool user_disk_1_bus_num; /* sector size for the medium */ unsigned int user_disk_1_blk_size; unsigned int user_disk_1_first_sector; unsigned int user_disk_1_unit_no; }; /* pointer to west bridge block data device superstructure */ static struct cyasblkdev_blk_data *gl_bd; static DEFINE_SEMAPHORE(open_lock); /* local forwardd declarationss */ static cy_as_device_handle *cyas_dev_handle; static void cyasblkdev_blk_deinit(struct cyasblkdev_blk_data *bd); /*change debug print options */ #define DBGPRN_RD_RQ (1 < 0) #define DBGPRN_WR_RQ (1 < 1) #define DBGPRN_RQ_END (1 < 2) int blkdev_ctl_dbgprn( int prn_flags ) { int cur_options = gl_bd->dbgprn_flags; DBGPRN_FUNC_NAME; /* set new debug print options */ gl_bd->dbgprn_flags = prn_flags; /* return previous */ return cur_options; } EXPORT_SYMBOL(blkdev_ctl_dbgprn); static struct cyasblkdev_blk_data *cyasblkdev_blk_get( struct gendisk *disk ) { struct cyasblkdev_blk_data *bd; DBGPRN_FUNC_NAME; down(&open_lock); bd = disk->private_data; if (bd && (bd->usage == 0)) bd = NULL; if (bd) { bd->usage++; #ifndef NBDEBUG cy_as_hal_print_message( "cyasblkdev_blk_get: usage = %d\n", bd->usage); #endif } up(&open_lock); return bd; } static void cyasblkdev_blk_put( struct cyasblkdev_blk_data *bd ) { DBGPRN_FUNC_NAME; down(&open_lock); if (bd) { bd->usage--; #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( " cyasblkdev_blk_put , bd->usage= %d\n", bd->usage); #endif } else { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "cyasblkdev: blk_put(bd) on bd = NULL!: usage = %d\n", bd->usage); #endif up(&open_lock); return; } if (bd->usage == 0) { put_disk(bd->user_disk_0); put_disk(bd->user_disk_1); put_disk(bd->system_disk); cyasblkdev_cleanup_queue(&bd->queue); if (CY_AS_ERROR_SUCCESS != cy_as_storage_release(bd->dev_handle, 0, 0, 0, 0)) { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "cyasblkdev: cannot release bus 0\n"); #endif } if (CY_AS_ERROR_SUCCESS != cy_as_storage_release(bd->dev_handle, 1, 0, 0, 0)) { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "cyasblkdev: cannot release bus 1\n"); #endif } if (CY_AS_ERROR_SUCCESS != cy_as_storage_stop(bd->dev_handle, 0, 0)) { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "cyasblkdev: cannot stop storage stack\n"); #endif } #ifdef __CY_ASTORIA_SCM_KERNEL_HAL__ /* If the SCM Kernel HAL is being used, disable the use * of scatter/gather lists at the end of block driver usage. */ cy_as_hal_disable_scatter_list(cyasdevice_gethaltag()); #endif /*ptr to global struct cyasblkdev_blk_data */ gl_bd = NULL; kfree(bd); } #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "cyasblkdev (blk_put): usage = %d\n", bd->usage); #endif up(&open_lock); } static int cyasblkdev_blk_open( struct block_device *bdev, fmode_t mode ) { struct cyasblkdev_blk_data *bd = cyasblkdev_blk_get(bdev->bd_disk); int ret = -ENXIO; DBGPRN_FUNC_NAME; if (bd) { if (bd->usage == 2) check_disk_change(bdev); ret = 0; if (bdev->bd_disk == bd->user_disk_0) { if ((mode & FMODE_WRITE) && bd->user_disk_0_read_only) { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "device marked as readonly " "and write requested\n"); #endif cyasblkdev_blk_put(bd); ret = -EROFS; } } else if (bdev->bd_disk == bd->user_disk_1) { if ((mode & FMODE_WRITE) && bd->user_disk_1_read_only) { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "device marked as readonly " "and write requested\n"); #endif cyasblkdev_blk_put(bd); ret = -EROFS; } } else if (bdev->bd_disk == bd->system_disk) { if ((mode & FMODE_WRITE) && bd->system_disk_read_only) { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "device marked as readonly " "and write requested\n"); #endif cyasblkdev_blk_put(bd); ret = -EROFS; } } } return ret; } static int cyasblkdev_blk_release( struct gendisk *disk, fmode_t mode ) { struct cyasblkdev_blk_data *bd = disk->private_data; DBGPRN_FUNC_NAME; cyasblkdev_blk_put(bd); return 0; } static int cyasblkdev_blk_ioctl( struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg ) { DBGPRN_FUNC_NAME; if (cmd == HDIO_GETGEO) { /*for now we only process geometry IOCTL*/ struct hd_geometry geo; memset(&geo, 0, sizeof(struct hd_geometry)); geo.cylinders = get_capacity(bdev->bd_disk) / (4 * 16); geo.heads = 4; geo.sectors = 16; geo.start = get_start_sect(bdev); /* copy to user space */ return copy_to_user((void __user *)arg, &geo, sizeof(geo)) ? -EFAULT : 0; } return -ENOTTY; } /* check_events block_device opp * this one is called by kernel to confirm if the media really changed * as we indicated by issuing check_disk_change() call */ unsigned int cyasblkdev_check_events(struct gendisk *gd, unsigned int clearing) { struct cyasblkdev_blk_data *bd; #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("cyasblkdev_media_changed() is called\n"); #endif if (gd) bd = gd->private_data; else { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "cyasblkdev_media_changed() is called, " "but gd is null\n"); #endif } /* return media change state - DISK_EVENT_MEDIA_CHANGE yes, 0 no */ return 0; } /* this one called by kernel to give us a chence * to prep the new media before it starts to rescaning * of the newlly inserted SD media */ int cyasblkdev_revalidate_disk(struct gendisk *gd) { /*int (*revalidate_disk) (struct gendisk *); */ #ifndef WESTBRIDGE_NDEBUG if (gd) cy_as_hal_print_message( "cyasblkdev_revalidate_disk() is called, " "(gl_bd->usage:%d)\n", gl_bd->usage); #endif /* 0 means ok, kern can go ahead with partition rescan */ return 0; } /*standard block device driver interface */ static struct block_device_operations cyasblkdev_bdops = { .open = cyasblkdev_blk_open, .release = cyasblkdev_blk_release, .ioctl = cyasblkdev_blk_ioctl, /* .getgeo = cyasblkdev_blk_getgeo, */ /* added to support media removal( real and simulated) media */ .check_events = cyasblkdev_check_events, /* added to support media removal( real and simulated) media */ .revalidate_disk = cyasblkdev_revalidate_disk, .owner = THIS_MODULE, }; /* west bridge block device prep request function */ static int cyasblkdev_blk_prep_rq( struct cyasblkdev_queue *bq, struct request *req ) { struct cyasblkdev_blk_data *bd = bq->data; int stat = BLKPREP_OK; DBGPRN_FUNC_NAME; /* If we have no device, we haven't finished initialising. */ if (!bd || !bd->dev_handle) { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message(KERN_ERR "cyasblkdev %s: killing request - no device/host\n", req->rq_disk->disk_name); #endif stat = BLKPREP_KILL; } if (bd->suspended) { blk_plug_device(bd->queue.queue); stat = BLKPREP_DEFER; } /* Check for excessive requests.*/ if (blk_rq_pos(req) + blk_rq_sectors(req) > get_capacity(req->rq_disk)) { cy_as_hal_print_message("cyasblkdev: bad request address\n"); stat = BLKPREP_KILL; } return stat; } /*west bridge storage async api on_completed callback */ static void cyasblkdev_issuecallback( /* Handle to the device completing the storage operation */ cy_as_device_handle handle, /* The media type completing the operation */ cy_as_media_type type, /* The device completing the operation */ uint32_t device, /* The unit completing the operation */ uint32_t unit, /* The block number of the completed operation */ uint32_t block_number, /* The type of operation */ cy_as_oper_type op, /* The error status */ cy_as_return_status_t status ) { int retry_cnt = 0; DBGPRN_FUNC_NAME; if (status != CY_AS_ERROR_SUCCESS) { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "%s: async r/w: op:%d failed with error %d at address %d\n", __func__, op, status, block_number); #endif } #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "%s calling blk_end_request from issue_callback " "req=0x%x, status=0x%x, nr_sectors=0x%x\n", __func__, (unsigned int) gl_bd->queue.req, status, (unsigned int) blk_rq_sectors(gl_bd->queue.req)); #endif /* note: blk_end_request w/o __ prefix should * not require spinlocks on the queue*/ while (blk_end_request(gl_bd->queue.req, status, blk_rq_sectors(gl_bd->queue.req)*512)) { retry_cnt++; } #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "%s blkdev_callback: ended rq on %d sectors, " "with err:%d, n:%d times\n", __func__, (int)blk_rq_sectors(gl_bd->queue.req), status, retry_cnt ); #endif spin_lock_irq(&gl_bd->lock); /*elevate next request, if there is one*/ if (!blk_queue_plugged(gl_bd->queue.queue)) { /* queue is not plugged */ gl_bd->queue.req = blk_fetch_request(gl_bd->queue.queue); #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("%s blkdev_callback: " "blk_fetch_request():%p\n", __func__, gl_bd->queue.req); #endif } if (gl_bd->queue.req) { spin_unlock_irq(&gl_bd->lock); #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("%s blkdev_callback: about to " "call issue_fn:%p\n", __func__, gl_bd->queue.req); #endif gl_bd->queue.issue_fn(&gl_bd->queue, gl_bd->queue.req); } else { spin_unlock_irq(&gl_bd->lock); } } /* issue astoria blkdev request (issue_fn) */ static int cyasblkdev_blk_issue_rq( struct cyasblkdev_queue *bq, struct request *req ) { struct cyasblkdev_blk_data *bd = bq->data; int index = 0; int ret = CY_AS_ERROR_SUCCESS; uint32_t req_sector = 0; uint32_t req_nr_sectors = 0; int bus_num = 0; int lcl_unit_no = 0; DBGPRN_FUNC_NAME; /* * will construct a scatterlist for the given request; * the return value is the number of actually used * entries in the resulting list. Then, this scatterlist * can be used for the actual DMA prep operation. */ spin_lock_irq(&bd->lock); index = blk_rq_map_sg(bq->queue, req, bd->sg); if (req->rq_disk == bd->user_disk_0) { bus_num = bd->user_disk_0_bus_num; req_sector = blk_rq_pos(req) + gl_bd->user_disk_0_first_sector; req_nr_sectors = blk_rq_sectors(req); lcl_unit_no = gl_bd->user_disk_0_unit_no; #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("%s: request made to disk 0 " "for sector=%d, num_sectors=%d, unit_no=%d\n", __func__, req_sector, (int) blk_rq_sectors(req), lcl_unit_no); #endif } else if (req->rq_disk == bd->user_disk_1) { bus_num = bd->user_disk_1_bus_num; req_sector = blk_rq_pos(req) + gl_bd->user_disk_1_first_sector; /*SECT_NUM_TRANSLATE(blk_rq_sectors(req));*/ req_nr_sectors = blk_rq_sectors(req); lcl_unit_no = gl_bd->user_disk_1_unit_no; #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("%s: request made to disk 1 for " "sector=%d, num_sectors=%d, unit_no=%d\n", __func__, req_sector, (int) blk_rq_sectors(req), lcl_unit_no); #endif } else if (req->rq_disk == bd->system_disk) { bus_num = bd->system_disk_bus_num; req_sector = blk_rq_pos(req) + gl_bd->system_disk_first_sector; req_nr_sectors = blk_rq_sectors(req); lcl_unit_no = gl_bd->system_disk_unit_no; #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("%s: request made to system disk " "for sector=%d, num_sectors=%d, unit_no=%d\n", __func__, req_sector, (int) blk_rq_sectors(req), lcl_unit_no); #endif } #ifndef WESTBRIDGE_NDEBUG else { cy_as_hal_print_message( "%s: invalid disk used for request\n", __func__); } #endif spin_unlock_irq(&bd->lock); if (rq_data_dir(req) == READ) { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("%s: calling readasync() " "req_sector=0x%x, req_nr_sectors=0x%x, bd->sg:%x\n\n", __func__, req_sector, req_nr_sectors, (uint32_t)bd->sg); #endif ret = cy_as_storage_read_async(bd->dev_handle, bus_num, 0, lcl_unit_no, req_sector, bd->sg, req_nr_sectors, (cy_as_storage_callback)cyasblkdev_issuecallback); if (ret != CY_AS_ERROR_SUCCESS) { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("%s:readasync() error %d at " "address %ld, unit no %d\n", __func__, ret, blk_rq_pos(req), lcl_unit_no); cy_as_hal_print_message("%s:ending i/o request " "on reg:%x\n", __func__, (uint32_t)req); #endif while (blk_end_request(req, (ret == CY_AS_ERROR_SUCCESS), req_nr_sectors*512)) ; bq->req = NULL; } } else { ret = cy_as_storage_write_async(bd->dev_handle, bus_num, 0, lcl_unit_no, req_sector, bd->sg, req_nr_sectors, (cy_as_storage_callback)cyasblkdev_issuecallback); if (ret != CY_AS_ERROR_SUCCESS) { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("%s: write failed with " "error %d at address %ld, unit no %d\n", __func__, ret, blk_rq_pos(req), lcl_unit_no); #endif /*end IO op on this request(does both * end_that_request_... _first & _last) */ while (blk_end_request(req, (ret == CY_AS_ERROR_SUCCESS), req_nr_sectors*512)) ; bq->req = NULL; } } return ret; } static unsigned long dev_use[CYASBLKDEV_NUM_MINORS / (8 * sizeof(unsigned long))]; /* storage event callback (note: called in astoria isr context) */ static void cyasblkdev_storage_callback( cy_as_device_handle dev_h, cy_as_bus_number_t bus, uint32_t device, cy_as_storage_event evtype, void *evdata ) { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("%s: bus:%d, device:%d, evtype:%d, " "evdata:%p\n ", __func__, bus, device, evtype, evdata); #endif switch (evtype) { case cy_as_storage_processor: break; case cy_as_storage_removed: break; case cy_as_storage_inserted: break; default: break; } } #define SECTORS_TO_SCAN 4096 uint32_t cyasblkdev_get_vfat_offset(int bus_num, int unit_no) { /* * for sd media, vfat partition boot record is not always * located at sector it greatly depends on the system and * software that was used to format the sd however, linux * fs layer always expects it at sector 0, this function * finds the offset and then uses it in all media r/w * operations */ int sect_no, stat; uint8_t *sect_buf; bool br_found = false; DBGPRN_FUNC_NAME; sect_buf = kmalloc(1024, GFP_KERNEL); /* since HAL layer always uses sg lists instead of the * buffer (for hw dmas) we need to initialize the sg list * for local buffer*/ sg_init_one(gl_bd->sg, sect_buf, 512); /* * Check MPR partition table 1st, then try to scan through * 1st 384 sectors until BR signature(intel JMP istruction * code and ,0x55AA) is found */ #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "%s scanning media for vfat partition...\n", __func__); #endif for (sect_no = 0; sect_no < SECTORS_TO_SCAN; sect_no++) { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("%s before cyasstorageread " "gl_bd->sg addr=0x%x\n", __func__, (unsigned int) gl_bd->sg); #endif stat = cy_as_storage_read( /* Handle to the device of interest */ gl_bd->dev_handle, /* The bus to access */ bus_num, /* The device to access */ 0, /* The unit to access */ unit_no, /* absolute sector number */ sect_no, /* sg structure */ gl_bd->sg, /* The number of blocks to be read */ 1 ); /* try only sectors with boot signature */ if ((sect_buf[510] == 0x55) && (sect_buf[511] == 0xaa)) { /* vfat boot record may also be located at * sector 0, check it first */ if (sect_buf[0] == 0xEB) { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "%s vfat partition found " "at sector:%d\n", __func__, sect_no); #endif br_found = true; break; } } if (stat != 0) { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("%s sector scan error\n", __func__); #endif break; } } kfree(sect_buf); if (br_found) { return sect_no; } else { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "%s vfat partition is not found, using 0 offset\n", __func__); #endif return 0; } } cy_as_storage_query_device_data dev_data = {0}; static int cyasblkdev_add_disks(int bus_num, struct cyasblkdev_blk_data *bd, int total_media_count, int devidx) { int ret = 0; uint64_t disk_cap; int lcl_unit_no; cy_as_storage_query_unit_data unit_data = {0}; #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("%s:query device: " "type:%d, removable:%d, writable:%d, " "blksize %d, units:%d, locked:%d, " "erase_sz:%d\n", __func__, dev_data.desc_p.type, dev_data.desc_p.removable, dev_data.desc_p.writeable, dev_data.desc_p.block_size, dev_data.desc_p.number_units, dev_data.desc_p.locked, dev_data.desc_p.erase_unit_size ); #endif /* make sure that device is not locked */ if (dev_data.desc_p.locked) { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "%s: device is locked\n", __func__); #endif ret = cy_as_storage_release( bd->dev_handle, bus_num, 0, 0, 0); if (ret != CY_AS_ERROR_SUCCESS) { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("%s cannot release" " storage\n", __func__); #endif goto out; } goto out; } unit_data.device = 0; unit_data.unit = 0; unit_data.bus = bus_num; ret = cy_as_storage_query_unit(bd->dev_handle, &unit_data, 0, 0); if (ret != CY_AS_ERROR_SUCCESS) { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("%s: cannot query " "%d device unit - reason code %d\n", __func__, bus_num, ret); #endif goto out; } if (private_partition_bus == bus_num) { if (private_partition_size > 0) { ret = cy_as_storage_create_p_partition( bd->dev_handle, bus_num, 0, private_partition_size, 0, 0); if ((ret != CY_AS_ERROR_SUCCESS) && (ret != CY_AS_ERROR_ALREADY_PARTITIONED)) { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("%s: cy_as_storage_" "create_p_partition after size > 0 check " "failed with error code %d\n", __func__, ret); #endif disk_cap = (uint64_t) (unit_data.desc_p.unit_size); lcl_unit_no = 0; } else if (ret == CY_AS_ERROR_ALREADY_PARTITIONED) { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "%s: cy_as_storage_create_p_partition " "indicates memory already partitioned\n", __func__); #endif /*check to see that partition * matches size */ if (unit_data.desc_p.unit_size != private_partition_size) { ret = cy_as_storage_remove_p_partition( bd->dev_handle, bus_num, 0, 0, 0); if (ret == CY_AS_ERROR_SUCCESS) { ret = cy_as_storage_create_p_partition( bd->dev_handle, bus_num, 0, private_partition_size, 0, 0); if (ret == CY_AS_ERROR_SUCCESS) { unit_data.bus = bus_num; unit_data.device = 0; unit_data.unit = 1; } else { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "%s: cy_as_storage_create_p_partition " "after removal unexpectedly failed " "with error %d\n", __func__, ret); #endif /* need to requery bus * seeing as delete * successful and create * failed we have changed * the disk properties */ unit_data.bus = bus_num; unit_data.device = 0; unit_data.unit = 0; } ret = cy_as_storage_query_unit( bd->dev_handle, &unit_data, 0, 0); if (ret != CY_AS_ERROR_SUCCESS) { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "%s: cannot query %d " "device unit - reason code %d\n", __func__, bus_num, ret); #endif goto out; } else { disk_cap = (uint64_t) (unit_data.desc_p.unit_size); lcl_unit_no = unit_data.unit; } } else { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "%s: cy_as_storage_remove_p_partition " "failed with error %d\n", __func__, ret); #endif unit_data.bus = bus_num; unit_data.device = 0; unit_data.unit = 1; ret = cy_as_storage_query_unit( bd->dev_handle, &unit_data, 0, 0); if (ret != CY_AS_ERROR_SUCCESS) { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "%s: cannot query %d " "device unit - reason " "code %d\n", __func__, bus_num, ret); #endif goto out; } disk_cap = (uint64_t) (unit_data.desc_p.unit_size); lcl_unit_no = unit_data.unit; } } else { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("%s: partition " "exists and sizes equal\n", __func__); #endif /*partition already existed, * need to query second unit*/ unit_data.bus = bus_num; unit_data.device = 0; unit_data.unit = 1; ret = cy_as_storage_query_unit( bd->dev_handle, &unit_data, 0, 0); if (ret != CY_AS_ERROR_SUCCESS) { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "%s: cannot query %d " "device unit " "- reason code %d\n", __func__, bus_num, ret); #endif goto out; } else { disk_cap = (uint64_t) (unit_data.desc_p.unit_size); lcl_unit_no = unit_data.unit; } } } else { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "%s: cy_as_storage_create_p_partition " "created successfully\n", __func__); #endif disk_cap = (uint64_t) (unit_data.desc_p.unit_size - private_partition_size); lcl_unit_no = 1; } } #ifndef WESTBRIDGE_NDEBUG else { cy_as_hal_print_message( "%s: invalid partition_size%d\n", __func__, private_partition_size); disk_cap = (uint64_t) (unit_data.desc_p.unit_size); lcl_unit_no = 0; } #endif } else { disk_cap = (uint64_t) (unit_data.desc_p.unit_size); lcl_unit_no = 0; } if ((bus_num == 0) || (total_media_count == 1)) { sprintf(bd->user_disk_0->disk_name, "cyasblkdevblk%d", devidx); #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "%s: disk unit_sz:%lu blk_sz:%d, " "start_blk:%lu, capacity:%llu\n", __func__, (unsigned long) unit_data.desc_p.unit_size, unit_data.desc_p.block_size, (unsigned long) unit_data.desc_p.start_block, (uint64_t)disk_cap ); #endif #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("%s: setting gendisk disk " "capacity to %d\n", __func__, (int) disk_cap); #endif /* initializing bd->queue */ #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("%s: init bd->queue\n", __func__); #endif /* this will create a * queue kernel thread */ cyasblkdev_init_queue( &bd->queue, &bd->lock); bd->queue.prep_fn = cyasblkdev_blk_prep_rq; bd->queue.issue_fn = cyasblkdev_blk_issue_rq; bd->queue.data = bd; /*blk_size should always * be a multiple of 512, * set to the max to ensure * that all accesses aligned * to the greatest multiple, * can adjust request to * smaller block sizes * dynamically*/ bd->user_disk_0_read_only = !dev_data.desc_p.writeable; bd->user_disk_0_blk_size = dev_data.desc_p.block_size; bd->user_disk_0_type = dev_data.desc_p.type; bd->user_disk_0_bus_num = bus_num; bd->user_disk_0->major = major; bd->user_disk_0->first_minor = devidx << CYASBLKDEV_SHIFT; bd->user_disk_0->minors = 8; bd->user_disk_0->fops = &cyasblkdev_bdops; bd->user_disk_0->events = DISK_EVENT_MEDIA_CHANGE; bd->user_disk_0->private_data = bd; bd->user_disk_0->queue = bd->queue.queue; bd->dbgprn_flags = DBGPRN_RD_RQ; bd->user_disk_0_unit_no = lcl_unit_no; blk_queue_logical_block_size(bd->queue.queue, bd->user_disk_0_blk_size); set_capacity(bd->user_disk_0, disk_cap); #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "%s: returned from set_capacity %d\n", __func__, (int) disk_cap); #endif /* need to start search from * public partition beginning */ if (vfat_search) { bd->user_disk_0_first_sector = cyasblkdev_get_vfat_offset( bd->user_disk_0_bus_num, bd->user_disk_0_unit_no); } else { bd->user_disk_0_first_sector = 0; } #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "%s: set user_disk_0_first " "sector to %d\n", __func__, bd->user_disk_0_first_sector); cy_as_hal_print_message( "%s: add_disk: disk->major=0x%x\n", __func__, bd->user_disk_0->major); cy_as_hal_print_message( "%s: add_disk: " "disk->first_minor=0x%x\n", __func__, bd->user_disk_0->first_minor); cy_as_hal_print_message( "%s: add_disk: " "disk->minors=0x%x\n", __func__, bd->user_disk_0->minors); cy_as_hal_print_message( "%s: add_disk: " "disk->disk_name=%s\n", __func__, bd->user_disk_0->disk_name); cy_as_hal_print_message( "%s: add_disk: " "disk->part_tbl=0x%x\n", __func__, (unsigned int) bd->user_disk_0->part_tbl); cy_as_hal_print_message( "%s: add_disk: " "disk->queue=0x%x\n", __func__, (unsigned int) bd->user_disk_0->queue); cy_as_hal_print_message( "%s: add_disk: " "disk->flags=0x%x\n", __func__, (unsigned int) bd->user_disk_0->flags); cy_as_hal_print_message( "%s: add_disk: " "disk->driverfs_dev=0x%x\n", __func__, (unsigned int) bd->user_disk_0->driverfs_dev); cy_as_hal_print_message( "%s: add_disk: " "disk->slave_dir=0x%x\n", __func__, (unsigned int) bd->user_disk_0->slave_dir); cy_as_hal_print_message( "%s: add_disk: " "disk->random=0x%x\n", __func__, (unsigned int) bd->user_disk_0->random); cy_as_hal_print_message( "%s: add_disk: " "disk->node_id=0x%x\n", __func__, (unsigned int) bd->user_disk_0->node_id); #endif add_disk(bd->user_disk_0); } else if ((bus_num == 1) && (total_media_count == 2)) { bd->user_disk_1_read_only = !dev_data.desc_p.writeable; bd->user_disk_1_blk_size = dev_data.desc_p.block_size; bd->user_disk_1_type = dev_data.desc_p.type; bd->user_disk_1_bus_num = bus_num; bd->user_disk_1->major = major; bd->user_disk_1->first_minor = (devidx + 1) << CYASBLKDEV_SHIFT; bd->user_disk_1->minors = 8; bd->user_disk_1->fops = &cyasblkdev_bdops; bd->user_disk_1->events = DISK_EVENT_MEDIA_CHANGE; bd->user_disk_1->private_data = bd; bd->user_disk_1->queue = bd->queue.queue; bd->dbgprn_flags = DBGPRN_RD_RQ; bd->user_disk_1_unit_no = lcl_unit_no; sprintf(bd->user_disk_1->disk_name, "cyasblkdevblk%d", (devidx + 1)); #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "%s: disk unit_sz:%lu " "blk_sz:%d, " "start_blk:%lu, " "capacity:%llu\n", __func__, (unsigned long) unit_data.desc_p.unit_size, unit_data.desc_p.block_size, (unsigned long) unit_data.desc_p.start_block, (uint64_t)disk_cap ); #endif /*blk_size should always be a * multiple of 512, set to the max * to ensure that all accesses * aligned to the greatest multiple, * can adjust request to smaller * block sizes dynamically*/ if (bd->user_disk_0_blk_size > bd->user_disk_1_blk_size) { blk_queue_logical_block_size(bd->queue.queue, bd->user_disk_0_blk_size); #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "%s: set hard sect_sz:%d\n", __func__, bd->user_disk_0_blk_size); #endif } else { blk_queue_logical_block_size(bd->queue.queue, bd->user_disk_1_blk_size); #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "%s: set hard sect_sz:%d\n", __func__, bd->user_disk_1_blk_size); #endif } set_capacity(bd->user_disk_1, disk_cap); if (vfat_search) { bd->user_disk_1_first_sector = cyasblkdev_get_vfat_offset( bd->user_disk_1_bus_num, bd->user_disk_1_unit_no); } else { bd->user_disk_1_first_sector = 0; } add_disk(bd->user_disk_1); } if (lcl_unit_no > 0) { if (bd->system_disk == NULL) { bd->system_disk = alloc_disk(8); if (bd->system_disk == NULL) { kfree(bd); bd = ERR_PTR(-ENOMEM); return bd; } disk_cap = (uint64_t) (private_partition_size); /* set properties of * system disk */ bd->system_disk_read_only = !dev_data.desc_p.writeable; bd->system_disk_blk_size = dev_data.desc_p.block_size; bd->system_disk_bus_num = bus_num; bd->system_disk->major = major; bd->system_disk->first_minor = (devidx + 2) << CYASBLKDEV_SHIFT; bd->system_disk->minors = 8; bd->system_disk->fops = &cyasblkdev_bdops; bd->system_disk->events = DISK_EVENT_MEDIA_CHANGE; bd->system_disk->private_data = bd; bd->system_disk->queue = bd->queue.queue; /* don't search for vfat * with system disk */ bd->system_disk_first_sector = 0; sprintf( bd->system_disk->disk_name, "cyasblkdevblk%d", (devidx + 2)); set_capacity(bd->system_disk, disk_cap); add_disk(bd->system_disk); } #ifndef WESTBRIDGE_NDEBUG else { cy_as_hal_print_message( "%s: system disk already allocated %d\n", __func__, bus_num); } #endif } out: return ret; } static struct cyasblkdev_blk_data *cyasblkdev_blk_alloc(void) { struct cyasblkdev_blk_data *bd; int ret = 0; cy_as_return_status_t stat = -1; int bus_num = 0; int total_media_count = 0; int devidx = 0; DBGPRN_FUNC_NAME; total_media_count = 0; devidx = find_first_zero_bit(dev_use, CYASBLKDEV_NUM_MINORS); if (devidx >= CYASBLKDEV_NUM_MINORS) return ERR_PTR(-ENOSPC); __set_bit(devidx, dev_use); __set_bit(devidx + 1, dev_use); bd = kzalloc(sizeof(struct cyasblkdev_blk_data), GFP_KERNEL); if (bd) { gl_bd = bd; spin_lock_init(&bd->lock); bd->usage = 1; /* setup the block_dev_ops pointer*/ bd->blkops = &cyasblkdev_bdops; /* Get the device handle */ bd->dev_handle = cyasdevice_getdevhandle(); if (0 == bd->dev_handle) { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "%s: get device failed\n", __func__); #endif ret = ENODEV; goto out; } #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("%s west bridge device handle:%x\n", __func__, (uint32_t)bd->dev_handle); #endif /* start the storage api and get a handle to the * device we are interested in. */ /* Error code to use if the conditions are not satisfied. */ ret = ENOMEDIUM; stat = cy_as_misc_release_resource(bd->dev_handle, cy_as_bus_0); if ((stat != CY_AS_ERROR_SUCCESS) && (stat != CY_AS_ERROR_RESOURCE_NOT_OWNED)) { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("%s: cannot release " "resource bus 0 - reason code %d\n", __func__, stat); #endif } stat = cy_as_misc_release_resource(bd->dev_handle, cy_as_bus_1); if ((stat != CY_AS_ERROR_SUCCESS) && (stat != CY_AS_ERROR_RESOURCE_NOT_OWNED)) { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("%s: cannot release " "resource bus 0 - reason code %d\n", __func__, stat); #endif } /* start storage stack*/ stat = cy_as_storage_start(bd->dev_handle, 0, 0x101); if (stat != CY_AS_ERROR_SUCCESS) { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("%s: cannot start storage " "stack - reason code %d\n", __func__, stat); #endif goto out; } #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("%s: storage started:%d ok\n", __func__, stat); #endif stat = cy_as_storage_register_callback(bd->dev_handle, cyasblkdev_storage_callback); if (stat != CY_AS_ERROR_SUCCESS) { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("%s: cannot register callback " "- reason code %d\n", __func__, stat); #endif goto out; } for (bus_num = 0; bus_num < 2; bus_num++) { stat = cy_as_storage_query_bus(bd->dev_handle, bus_num, &bd->media_count[bus_num], 0, 0); if (stat == CY_AS_ERROR_SUCCESS) { total_media_count = total_media_count + bd->media_count[bus_num]; } else { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("%s: cannot query %d, " "reason code: %d\n", __func__, bus_num, stat); #endif goto out; } } if (total_media_count == 0) { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "%s: no storage media was found\n", __func__); #endif goto out; } else if (total_media_count >= 1) { if (bd->user_disk_0 == NULL) { bd->user_disk_0 = alloc_disk(8); if (bd->user_disk_0 == NULL) { kfree(bd); bd = ERR_PTR(-ENOMEM); return bd; } } #ifndef WESTBRIDGE_NDEBUG else { cy_as_hal_print_message("%s: no available " "gen_disk for disk 0, " "physically inconsistent\n", __func__); } #endif } if (total_media_count == 2) { if (bd->user_disk_1 == NULL) { bd->user_disk_1 = alloc_disk(8); if (bd->user_disk_1 == NULL) { kfree(bd); bd = ERR_PTR(-ENOMEM); return bd; } } #ifndef WESTBRIDGE_NDEBUG else { cy_as_hal_print_message("%s: no available " "gen_disk for media, " "physically inconsistent\n", __func__); } #endif } #ifndef WESTBRIDGE_NDEBUG else if (total_media_count > 2) { cy_as_hal_print_message("%s: count corrupted = 0x%d\n", __func__, total_media_count); } #endif #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("%s: %d device(s) found\n", __func__, total_media_count); #endif for (bus_num = 0; bus_num <= 1; bus_num++) { /*claim storage for cpu */ stat = cy_as_storage_claim(bd->dev_handle, bus_num, 0, 0, 0); if (stat != CY_AS_ERROR_SUCCESS) { cy_as_hal_print_message("%s: cannot claim " "%d bus - reason code %d\n", __func__, bus_num, stat); goto out; } dev_data.bus = bus_num; dev_data.device = 0; stat = cy_as_storage_query_device(bd->dev_handle, &dev_data, 0, 0); if (stat == CY_AS_ERROR_SUCCESS) { cyasblkdev_add_disks(bus_num, bd, total_media_count, devidx); } else if (stat == CY_AS_ERROR_NO_SUCH_DEVICE) { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "%s: no device on bus %d\n", __func__, bus_num); #endif } else { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "%s: cannot query %d device " "- reason code %d\n", __func__, bus_num, stat); #endif goto out; } } /* end for (bus_num = 0; bus_num <= 1; bus_num++)*/ return bd; } out: #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "%s: bd failed to initialize\n", __func__); #endif kfree(bd); bd = ERR_PTR(-ret); return bd; } /*init west bridge block device */ static int cyasblkdev_blk_initialize(void) { struct cyasblkdev_blk_data *bd; int res; DBGPRN_FUNC_NAME; res = register_blkdev(major, "cyasblkdev"); if (res < 0) { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message(KERN_WARNING "%s unable to get major %d for cyasblkdev media: %d\n", __func__, major, res); #endif return res; } if (major == 0) major = res; #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "%s cyasblkdev registered with major number: %d\n", __func__, major); #endif bd = cyasblkdev_blk_alloc(); if (IS_ERR(bd)) return PTR_ERR(bd); return 0; } /* start block device */ static int __init cyasblkdev_blk_init(void) { int res = -ENOMEM; DBGPRN_FUNC_NAME; /* get the cyasdev handle for future use*/ cyas_dev_handle = cyasdevice_getdevhandle(); if (cyasblkdev_blk_initialize() == 0) return 0; #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("cyasblkdev init error:%d\n", res); #endif return res; } static void cyasblkdev_blk_deinit(struct cyasblkdev_blk_data *bd) { DBGPRN_FUNC_NAME; if (bd) { int devidx; if (bd->user_disk_0 != NULL) { del_gendisk(bd->user_disk_0); devidx = bd->user_disk_0->first_minor >> CYASBLKDEV_SHIFT; __clear_bit(devidx, dev_use); } if (bd->user_disk_1 != NULL) { del_gendisk(bd->user_disk_1); devidx = bd->user_disk_1->first_minor >> CYASBLKDEV_SHIFT; __clear_bit(devidx, dev_use); } if (bd->system_disk != NULL) { del_gendisk(bd->system_disk); devidx = bd->system_disk->first_minor >> CYASBLKDEV_SHIFT; __clear_bit(devidx, dev_use); } cyasblkdev_blk_put(bd); } } /* block device exit */ static void __exit cyasblkdev_blk_exit(void) { DBGPRN_FUNC_NAME; cyasblkdev_blk_deinit(gl_bd); unregister_blkdev(major, "cyasblkdev"); } module_init(cyasblkdev_blk_init); module_exit(cyasblkdev_blk_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("antioch (cyasblkdev) block device driver"); MODULE_AUTHOR("cypress semiconductor"); /*[]*/
gpl-2.0
hwoarang/linux
fs/ntfs/namei.c
3390
14444
/* * namei.c - NTFS kernel directory inode operations. Part of the Linux-NTFS * project. * * Copyright (c) 2001-2006 Anton Altaparmakov * * This program/include file is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program/include file is distributed in the hope that it will be * useful, but WITHOUT ANY WARRANTY; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program (in the main directory of the Linux-NTFS * distribution in the file COPYING); if not, write to the Free Software * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/dcache.h> #include <linux/exportfs.h> #include <linux/security.h> #include <linux/slab.h> #include "attrib.h" #include "debug.h" #include "dir.h" #include "mft.h" #include "ntfs.h" /** * ntfs_lookup - find the inode represented by a dentry in a directory inode * @dir_ino: directory inode in which to look for the inode * @dent: dentry representing the inode to look for * @nd: lookup nameidata * * In short, ntfs_lookup() looks for the inode represented by the dentry @dent * in the directory inode @dir_ino and if found attaches the inode to the * dentry @dent. * * In more detail, the dentry @dent specifies which inode to look for by * supplying the name of the inode in @dent->d_name.name. ntfs_lookup() * converts the name to Unicode and walks the contents of the directory inode * @dir_ino looking for the converted Unicode name. If the name is found in the * directory, the corresponding inode is loaded by calling ntfs_iget() on its * inode number and the inode is associated with the dentry @dent via a call to * d_splice_alias(). * * If the name is not found in the directory, a NULL inode is inserted into the * dentry @dent via a call to d_add(). The dentry is then termed a negative * dentry. * * Only if an actual error occurs, do we return an error via ERR_PTR(). * * In order to handle the case insensitivity issues of NTFS with regards to the * dcache and the dcache requiring only one dentry per directory, we deal with * dentry aliases that only differ in case in ->ntfs_lookup() while maintaining * a case sensitive dcache. This means that we get the full benefit of dcache * speed when the file/directory is looked up with the same case as returned by * ->ntfs_readdir() but that a lookup for any other case (or for the short file * name) will not find anything in dcache and will enter ->ntfs_lookup() * instead, where we search the directory for a fully matching file name * (including case) and if that is not found, we search for a file name that * matches with different case and if that has non-POSIX semantics we return * that. We actually do only one search (case sensitive) and keep tabs on * whether we have found a case insensitive match in the process. * * To simplify matters for us, we do not treat the short vs long filenames as * two hard links but instead if the lookup matches a short filename, we * return the dentry for the corresponding long filename instead. * * There are three cases we need to distinguish here: * * 1) @dent perfectly matches (i.e. including case) a directory entry with a * file name in the WIN32 or POSIX namespaces. In this case * ntfs_lookup_inode_by_name() will return with name set to NULL and we * just d_splice_alias() @dent. * 2) @dent matches (not including case) a directory entry with a file name in * the WIN32 namespace. In this case ntfs_lookup_inode_by_name() will return * with name set to point to a kmalloc()ed ntfs_name structure containing * the properly cased little endian Unicode name. We convert the name to the * current NLS code page, search if a dentry with this name already exists * and if so return that instead of @dent. At this point things are * complicated by the possibility of 'disconnected' dentries due to NFS * which we deal with appropriately (see the code comments). The VFS will * then destroy the old @dent and use the one we returned. If a dentry is * not found, we allocate a new one, d_splice_alias() it, and return it as * above. * 3) @dent matches either perfectly or not (i.e. we don't care about case) a * directory entry with a file name in the DOS namespace. In this case * ntfs_lookup_inode_by_name() will return with name set to point to a * kmalloc()ed ntfs_name structure containing the mft reference (cpu endian) * of the inode. We use the mft reference to read the inode and to find the * file name in the WIN32 namespace corresponding to the matched short file * name. We then convert the name to the current NLS code page, and proceed * searching for a dentry with this name, etc, as in case 2), above. * * Locking: Caller must hold i_mutex on the directory. */ static struct dentry *ntfs_lookup(struct inode *dir_ino, struct dentry *dent, unsigned int flags) { ntfs_volume *vol = NTFS_SB(dir_ino->i_sb); struct inode *dent_inode; ntfschar *uname; ntfs_name *name = NULL; MFT_REF mref; unsigned long dent_ino; int uname_len; ntfs_debug("Looking up %s in directory inode 0x%lx.", dent->d_name.name, dir_ino->i_ino); /* Convert the name of the dentry to Unicode. */ uname_len = ntfs_nlstoucs(vol, dent->d_name.name, dent->d_name.len, &uname); if (uname_len < 0) { if (uname_len != -ENAMETOOLONG) ntfs_error(vol->sb, "Failed to convert name to " "Unicode."); return ERR_PTR(uname_len); } mref = ntfs_lookup_inode_by_name(NTFS_I(dir_ino), uname, uname_len, &name); kmem_cache_free(ntfs_name_cache, uname); if (!IS_ERR_MREF(mref)) { dent_ino = MREF(mref); ntfs_debug("Found inode 0x%lx. Calling ntfs_iget.", dent_ino); dent_inode = ntfs_iget(vol->sb, dent_ino); if (likely(!IS_ERR(dent_inode))) { /* Consistency check. */ if (is_bad_inode(dent_inode) || MSEQNO(mref) == NTFS_I(dent_inode)->seq_no || dent_ino == FILE_MFT) { /* Perfect WIN32/POSIX match. -- Case 1. */ if (!name) { ntfs_debug("Done. (Case 1.)"); return d_splice_alias(dent_inode, dent); } /* * We are too indented. Handle imperfect * matches and short file names further below. */ goto handle_name; } ntfs_error(vol->sb, "Found stale reference to inode " "0x%lx (reference sequence number = " "0x%x, inode sequence number = 0x%x), " "returning -EIO. Run chkdsk.", dent_ino, MSEQNO(mref), NTFS_I(dent_inode)->seq_no); iput(dent_inode); dent_inode = ERR_PTR(-EIO); } else ntfs_error(vol->sb, "ntfs_iget(0x%lx) failed with " "error code %li.", dent_ino, PTR_ERR(dent_inode)); kfree(name); /* Return the error code. */ return (struct dentry *)dent_inode; } /* It is guaranteed that @name is no longer allocated at this point. */ if (MREF_ERR(mref) == -ENOENT) { ntfs_debug("Entry was not found, adding negative dentry."); /* The dcache will handle negative entries. */ d_add(dent, NULL); ntfs_debug("Done."); return NULL; } ntfs_error(vol->sb, "ntfs_lookup_ino_by_name() failed with error " "code %i.", -MREF_ERR(mref)); return ERR_PTR(MREF_ERR(mref)); // TODO: Consider moving this lot to a separate function! (AIA) handle_name: { MFT_RECORD *m; ntfs_attr_search_ctx *ctx; ntfs_inode *ni = NTFS_I(dent_inode); int err; struct qstr nls_name; nls_name.name = NULL; if (name->type != FILE_NAME_DOS) { /* Case 2. */ ntfs_debug("Case 2."); nls_name.len = (unsigned)ntfs_ucstonls(vol, (ntfschar*)&name->name, name->len, (unsigned char**)&nls_name.name, 0); kfree(name); } else /* if (name->type == FILE_NAME_DOS) */ { /* Case 3. */ FILE_NAME_ATTR *fn; ntfs_debug("Case 3."); kfree(name); /* Find the WIN32 name corresponding to the matched DOS name. */ ni = NTFS_I(dent_inode); m = map_mft_record(ni); if (IS_ERR(m)) { err = PTR_ERR(m); m = NULL; ctx = NULL; goto err_out; } ctx = ntfs_attr_get_search_ctx(ni, m); if (unlikely(!ctx)) { err = -ENOMEM; goto err_out; } do { ATTR_RECORD *a; u32 val_len; err = ntfs_attr_lookup(AT_FILE_NAME, NULL, 0, 0, 0, NULL, 0, ctx); if (unlikely(err)) { ntfs_error(vol->sb, "Inode corrupt: No WIN32 " "namespace counterpart to DOS " "file name. Run chkdsk."); if (err == -ENOENT) err = -EIO; goto err_out; } /* Consistency checks. */ a = ctx->attr; if (a->non_resident || a->flags) goto eio_err_out; val_len = le32_to_cpu(a->data.resident.value_length); if (le16_to_cpu(a->data.resident.value_offset) + val_len > le32_to_cpu(a->length)) goto eio_err_out; fn = (FILE_NAME_ATTR*)((u8*)ctx->attr + le16_to_cpu( ctx->attr->data.resident.value_offset)); if ((u32)(fn->file_name_length * sizeof(ntfschar) + sizeof(FILE_NAME_ATTR)) > val_len) goto eio_err_out; } while (fn->file_name_type != FILE_NAME_WIN32); /* Convert the found WIN32 name to current NLS code page. */ nls_name.len = (unsigned)ntfs_ucstonls(vol, (ntfschar*)&fn->file_name, fn->file_name_length, (unsigned char**)&nls_name.name, 0); ntfs_attr_put_search_ctx(ctx); unmap_mft_record(ni); } m = NULL; ctx = NULL; /* Check if a conversion error occurred. */ if ((signed)nls_name.len < 0) { err = (signed)nls_name.len; goto err_out; } nls_name.hash = full_name_hash(nls_name.name, nls_name.len); dent = d_add_ci(dent, dent_inode, &nls_name); kfree(nls_name.name); return dent; eio_err_out: ntfs_error(vol->sb, "Illegal file name attribute. Run chkdsk."); err = -EIO; err_out: if (ctx) ntfs_attr_put_search_ctx(ctx); if (m) unmap_mft_record(ni); iput(dent_inode); ntfs_error(vol->sb, "Failed, returning error code %i.", err); return ERR_PTR(err); } } /** * Inode operations for directories. */ const struct inode_operations ntfs_dir_inode_ops = { .lookup = ntfs_lookup, /* VFS: Lookup directory. */ }; /** * ntfs_get_parent - find the dentry of the parent of a given directory dentry * @child_dent: dentry of the directory whose parent directory to find * * Find the dentry for the parent directory of the directory specified by the * dentry @child_dent. This function is called from * fs/exportfs/expfs.c::find_exported_dentry() which in turn is called from the * default ->decode_fh() which is export_decode_fh() in the same file. * * The code is based on the ext3 ->get_parent() implementation found in * fs/ext3/namei.c::ext3_get_parent(). * * Note: ntfs_get_parent() is called with @child_dent->d_inode->i_mutex down. * * Return the dentry of the parent directory on success or the error code on * error (IS_ERR() is true). */ static struct dentry *ntfs_get_parent(struct dentry *child_dent) { struct inode *vi = child_dent->d_inode; ntfs_inode *ni = NTFS_I(vi); MFT_RECORD *mrec; ntfs_attr_search_ctx *ctx; ATTR_RECORD *attr; FILE_NAME_ATTR *fn; unsigned long parent_ino; int err; ntfs_debug("Entering for inode 0x%lx.", vi->i_ino); /* Get the mft record of the inode belonging to the child dentry. */ mrec = map_mft_record(ni); if (IS_ERR(mrec)) return (struct dentry *)mrec; /* Find the first file name attribute in the mft record. */ ctx = ntfs_attr_get_search_ctx(ni, mrec); if (unlikely(!ctx)) { unmap_mft_record(ni); return ERR_PTR(-ENOMEM); } try_next: err = ntfs_attr_lookup(AT_FILE_NAME, NULL, 0, CASE_SENSITIVE, 0, NULL, 0, ctx); if (unlikely(err)) { ntfs_attr_put_search_ctx(ctx); unmap_mft_record(ni); if (err == -ENOENT) ntfs_error(vi->i_sb, "Inode 0x%lx does not have a " "file name attribute. Run chkdsk.", vi->i_ino); return ERR_PTR(err); } attr = ctx->attr; if (unlikely(attr->non_resident)) goto try_next; fn = (FILE_NAME_ATTR *)((u8 *)attr + le16_to_cpu(attr->data.resident.value_offset)); if (unlikely((u8 *)fn + le32_to_cpu(attr->data.resident.value_length) > (u8*)attr + le32_to_cpu(attr->length))) goto try_next; /* Get the inode number of the parent directory. */ parent_ino = MREF_LE(fn->parent_directory); /* Release the search context and the mft record of the child. */ ntfs_attr_put_search_ctx(ctx); unmap_mft_record(ni); return d_obtain_alias(ntfs_iget(vi->i_sb, parent_ino)); } static struct inode *ntfs_nfs_get_inode(struct super_block *sb, u64 ino, u32 generation) { struct inode *inode; inode = ntfs_iget(sb, ino); if (!IS_ERR(inode)) { if (is_bad_inode(inode) || inode->i_generation != generation) { iput(inode); inode = ERR_PTR(-ESTALE); } } return inode; } static struct dentry *ntfs_fh_to_dentry(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { return generic_fh_to_dentry(sb, fid, fh_len, fh_type, ntfs_nfs_get_inode); } static struct dentry *ntfs_fh_to_parent(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { return generic_fh_to_parent(sb, fid, fh_len, fh_type, ntfs_nfs_get_inode); } /** * Export operations allowing NFS exporting of mounted NTFS partitions. * * We use the default ->encode_fh() for now. Note that they * use 32 bits to store the inode number which is an unsigned long so on 64-bit * architectures is usually 64 bits so it would all fail horribly on huge * volumes. I guess we need to define our own encode and decode fh functions * that store 64-bit inode numbers at some point but for now we will ignore the * problem... * * We also use the default ->get_name() helper (used by ->decode_fh() via * fs/exportfs/expfs.c::find_exported_dentry()) as that is completely fs * independent. * * The default ->get_parent() just returns -EACCES so we have to provide our * own and the default ->get_dentry() is incompatible with NTFS due to not * allowing the inode number 0 which is used in NTFS for the system file $MFT * and due to using iget() whereas NTFS needs ntfs_iget(). */ const struct export_operations ntfs_export_ops = { .get_parent = ntfs_get_parent, /* Find the parent of a given directory. */ .fh_to_dentry = ntfs_fh_to_dentry, .fh_to_parent = ntfs_fh_to_parent, };
gpl-2.0
LokiWuh/android_kernel_asus_grouper
drivers/s390/block/dcssblk.c
3902
27424
/* * dcssblk.c -- the S/390 block driver for dcss memory * * Authors: Carsten Otte, Stefan Weinhuber, Gerald Schaefer */ #define KMSG_COMPONENT "dcssblk" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/ctype.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/blkdev.h> #include <linux/completion.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <asm/extmem.h> #include <asm/io.h> #define DCSSBLK_NAME "dcssblk" #define DCSSBLK_MINORS_PER_DISK 1 #define DCSSBLK_PARM_LEN 400 #define DCSS_BUS_ID_SIZE 20 static int dcssblk_open(struct block_device *bdev, fmode_t mode); static int dcssblk_release(struct gendisk *disk, fmode_t mode); static int dcssblk_make_request(struct request_queue *q, struct bio *bio); static int dcssblk_direct_access(struct block_device *bdev, sector_t secnum, void **kaddr, unsigned long *pfn); static char dcssblk_segments[DCSSBLK_PARM_LEN] = "\0"; static int dcssblk_major; static const struct block_device_operations dcssblk_devops = { .owner = THIS_MODULE, .open = dcssblk_open, .release = dcssblk_release, .direct_access = dcssblk_direct_access, }; struct dcssblk_dev_info { struct list_head lh; struct device dev; char segment_name[DCSS_BUS_ID_SIZE]; atomic_t use_count; struct gendisk *gd; unsigned long start; unsigned long end; int segment_type; unsigned char save_pending; unsigned char is_shared; struct request_queue *dcssblk_queue; int num_of_segments; struct list_head seg_list; }; struct segment_info { struct list_head lh; char segment_name[DCSS_BUS_ID_SIZE]; unsigned long start; unsigned long end; int segment_type; }; static ssize_t dcssblk_add_store(struct device * dev, struct device_attribute *attr, const char * buf, size_t count); static ssize_t dcssblk_remove_store(struct device * dev, struct device_attribute *attr, const char * buf, size_t count); static ssize_t dcssblk_save_store(struct device * dev, struct device_attribute *attr, const char * buf, size_t count); static ssize_t dcssblk_save_show(struct device *dev, struct device_attribute *attr, char *buf); static ssize_t dcssblk_shared_store(struct device * dev, struct device_attribute *attr, const char * buf, size_t count); static ssize_t dcssblk_shared_show(struct device *dev, struct device_attribute *attr, char *buf); static ssize_t dcssblk_seglist_show(struct device *dev, struct device_attribute *attr, char *buf); static DEVICE_ATTR(add, S_IWUSR, NULL, dcssblk_add_store); static DEVICE_ATTR(remove, S_IWUSR, NULL, dcssblk_remove_store); static DEVICE_ATTR(save, S_IWUSR | S_IRUSR, dcssblk_save_show, dcssblk_save_store); static DEVICE_ATTR(shared, S_IWUSR | S_IRUSR, dcssblk_shared_show, dcssblk_shared_store); static DEVICE_ATTR(seglist, S_IRUSR, dcssblk_seglist_show, NULL); static struct device *dcssblk_root_dev; static LIST_HEAD(dcssblk_devices); static struct rw_semaphore dcssblk_devices_sem; /* * release function for segment device. */ static void dcssblk_release_segment(struct device *dev) { struct dcssblk_dev_info *dev_info; struct segment_info *entry, *temp; dev_info = container_of(dev, struct dcssblk_dev_info, dev); list_for_each_entry_safe(entry, temp, &dev_info->seg_list, lh) { list_del(&entry->lh); kfree(entry); } kfree(dev_info); module_put(THIS_MODULE); } /* * get a minor number. needs to be called with * down_write(&dcssblk_devices_sem) and the * device needs to be enqueued before the semaphore is * freed. */ static int dcssblk_assign_free_minor(struct dcssblk_dev_info *dev_info) { int minor, found; struct dcssblk_dev_info *entry; if (dev_info == NULL) return -EINVAL; for (minor = 0; minor < (1<<MINORBITS); minor++) { found = 0; // test if minor available list_for_each_entry(entry, &dcssblk_devices, lh) if (minor == entry->gd->first_minor) found++; if (!found) break; // got unused minor } if (found) return -EBUSY; dev_info->gd->first_minor = minor; return 0; } /* * get the struct dcssblk_dev_info from dcssblk_devices * for the given name. * down_read(&dcssblk_devices_sem) must be held. */ static struct dcssblk_dev_info * dcssblk_get_device_by_name(char *name) { struct dcssblk_dev_info *entry; list_for_each_entry(entry, &dcssblk_devices, lh) { if (!strcmp(name, entry->segment_name)) { return entry; } } return NULL; } /* * get the struct segment_info from seg_list * for the given name. * down_read(&dcssblk_devices_sem) must be held. */ static struct segment_info * dcssblk_get_segment_by_name(char *name) { struct dcssblk_dev_info *dev_info; struct segment_info *entry; list_for_each_entry(dev_info, &dcssblk_devices, lh) { list_for_each_entry(entry, &dev_info->seg_list, lh) { if (!strcmp(name, entry->segment_name)) return entry; } } return NULL; } /* * get the highest address of the multi-segment block. */ static unsigned long dcssblk_find_highest_addr(struct dcssblk_dev_info *dev_info) { unsigned long highest_addr; struct segment_info *entry; highest_addr = 0; list_for_each_entry(entry, &dev_info->seg_list, lh) { if (highest_addr < entry->end) highest_addr = entry->end; } return highest_addr; } /* * get the lowest address of the multi-segment block. */ static unsigned long dcssblk_find_lowest_addr(struct dcssblk_dev_info *dev_info) { int set_first; unsigned long lowest_addr; struct segment_info *entry; set_first = 0; lowest_addr = 0; list_for_each_entry(entry, &dev_info->seg_list, lh) { if (set_first == 0) { lowest_addr = entry->start; set_first = 1; } else { if (lowest_addr > entry->start) lowest_addr = entry->start; } } return lowest_addr; } /* * Check continuity of segments. */ static int dcssblk_is_continuous(struct dcssblk_dev_info *dev_info) { int i, j, rc; struct segment_info *sort_list, *entry, temp; if (dev_info->num_of_segments <= 1) return 0; sort_list = kzalloc( sizeof(struct segment_info) * dev_info->num_of_segments, GFP_KERNEL); if (sort_list == NULL) return -ENOMEM; i = 0; list_for_each_entry(entry, &dev_info->seg_list, lh) { memcpy(&sort_list[i], entry, sizeof(struct segment_info)); i++; } /* sort segments */ for (i = 0; i < dev_info->num_of_segments; i++) for (j = 0; j < dev_info->num_of_segments; j++) if (sort_list[j].start > sort_list[i].start) { memcpy(&temp, &sort_list[i], sizeof(struct segment_info)); memcpy(&sort_list[i], &sort_list[j], sizeof(struct segment_info)); memcpy(&sort_list[j], &temp, sizeof(struct segment_info)); } /* check continuity */ for (i = 0; i < dev_info->num_of_segments - 1; i++) { if ((sort_list[i].end + 1) != sort_list[i+1].start) { pr_err("Adjacent DCSSs %s and %s are not " "contiguous\n", sort_list[i].segment_name, sort_list[i+1].segment_name); rc = -EINVAL; goto out; } /* EN and EW are allowed in a block device */ if (sort_list[i].segment_type != sort_list[i+1].segment_type) { if (!(sort_list[i].segment_type & SEGMENT_EXCLUSIVE) || (sort_list[i].segment_type == SEG_TYPE_ER) || !(sort_list[i+1].segment_type & SEGMENT_EXCLUSIVE) || (sort_list[i+1].segment_type == SEG_TYPE_ER)) { pr_err("DCSS %s and DCSS %s have " "incompatible types\n", sort_list[i].segment_name, sort_list[i+1].segment_name); rc = -EINVAL; goto out; } } } rc = 0; out: kfree(sort_list); return rc; } /* * Load a segment */ static int dcssblk_load_segment(char *name, struct segment_info **seg_info) { int rc; /* already loaded? */ down_read(&dcssblk_devices_sem); *seg_info = dcssblk_get_segment_by_name(name); up_read(&dcssblk_devices_sem); if (*seg_info != NULL) return -EEXIST; /* get a struct segment_info */ *seg_info = kzalloc(sizeof(struct segment_info), GFP_KERNEL); if (*seg_info == NULL) return -ENOMEM; strcpy((*seg_info)->segment_name, name); /* load the segment */ rc = segment_load(name, SEGMENT_SHARED, &(*seg_info)->start, &(*seg_info)->end); if (rc < 0) { segment_warning(rc, (*seg_info)->segment_name); kfree(*seg_info); } else { INIT_LIST_HEAD(&(*seg_info)->lh); (*seg_info)->segment_type = rc; } return rc; } static void dcssblk_unregister_callback(struct device *dev) { device_unregister(dev); put_device(dev); } /* * device attribute for switching shared/nonshared (exclusive) * operation (show + store) */ static ssize_t dcssblk_shared_show(struct device *dev, struct device_attribute *attr, char *buf) { struct dcssblk_dev_info *dev_info; dev_info = container_of(dev, struct dcssblk_dev_info, dev); return sprintf(buf, dev_info->is_shared ? "1\n" : "0\n"); } static ssize_t dcssblk_shared_store(struct device *dev, struct device_attribute *attr, const char *inbuf, size_t count) { struct dcssblk_dev_info *dev_info; struct segment_info *entry, *temp; int rc; if ((count > 1) && (inbuf[1] != '\n') && (inbuf[1] != '\0')) return -EINVAL; down_write(&dcssblk_devices_sem); dev_info = container_of(dev, struct dcssblk_dev_info, dev); if (atomic_read(&dev_info->use_count)) { rc = -EBUSY; goto out; } if (inbuf[0] == '1') { /* reload segments in shared mode */ list_for_each_entry(entry, &dev_info->seg_list, lh) { rc = segment_modify_shared(entry->segment_name, SEGMENT_SHARED); if (rc < 0) { BUG_ON(rc == -EINVAL); if (rc != -EAGAIN) goto removeseg; } } dev_info->is_shared = 1; switch (dev_info->segment_type) { case SEG_TYPE_SR: case SEG_TYPE_ER: case SEG_TYPE_SC: set_disk_ro(dev_info->gd, 1); } } else if (inbuf[0] == '0') { /* reload segments in exclusive mode */ if (dev_info->segment_type == SEG_TYPE_SC) { pr_err("DCSS %s is of type SC and cannot be " "loaded as exclusive-writable\n", dev_info->segment_name); rc = -EINVAL; goto out; } list_for_each_entry(entry, &dev_info->seg_list, lh) { rc = segment_modify_shared(entry->segment_name, SEGMENT_EXCLUSIVE); if (rc < 0) { BUG_ON(rc == -EINVAL); if (rc != -EAGAIN) goto removeseg; } } dev_info->is_shared = 0; set_disk_ro(dev_info->gd, 0); } else { rc = -EINVAL; goto out; } rc = count; goto out; removeseg: pr_err("DCSS device %s is removed after a failed access mode " "change\n", dev_info->segment_name); temp = entry; list_for_each_entry(entry, &dev_info->seg_list, lh) { if (entry != temp) segment_unload(entry->segment_name); } list_del(&dev_info->lh); del_gendisk(dev_info->gd); blk_cleanup_queue(dev_info->dcssblk_queue); dev_info->gd->queue = NULL; put_disk(dev_info->gd); rc = device_schedule_callback(dev, dcssblk_unregister_callback); out: up_write(&dcssblk_devices_sem); return rc; } /* * device attribute for save operation on current copy * of the segment. If the segment is busy, saving will * become pending until it gets released, which can be * undone by storing a non-true value to this entry. * (show + store) */ static ssize_t dcssblk_save_show(struct device *dev, struct device_attribute *attr, char *buf) { struct dcssblk_dev_info *dev_info; dev_info = container_of(dev, struct dcssblk_dev_info, dev); return sprintf(buf, dev_info->save_pending ? "1\n" : "0\n"); } static ssize_t dcssblk_save_store(struct device *dev, struct device_attribute *attr, const char *inbuf, size_t count) { struct dcssblk_dev_info *dev_info; struct segment_info *entry; if ((count > 1) && (inbuf[1] != '\n') && (inbuf[1] != '\0')) return -EINVAL; dev_info = container_of(dev, struct dcssblk_dev_info, dev); down_write(&dcssblk_devices_sem); if (inbuf[0] == '1') { if (atomic_read(&dev_info->use_count) == 0) { // device is idle => we save immediately pr_info("All DCSSs that map to device %s are " "saved\n", dev_info->segment_name); list_for_each_entry(entry, &dev_info->seg_list, lh) { segment_save(entry->segment_name); } } else { // device is busy => we save it when it becomes // idle in dcssblk_release pr_info("Device %s is in use, its DCSSs will be " "saved when it becomes idle\n", dev_info->segment_name); dev_info->save_pending = 1; } } else if (inbuf[0] == '0') { if (dev_info->save_pending) { // device is busy & the user wants to undo his save // request dev_info->save_pending = 0; pr_info("A pending save request for device %s " "has been canceled\n", dev_info->segment_name); } } else { up_write(&dcssblk_devices_sem); return -EINVAL; } up_write(&dcssblk_devices_sem); return count; } /* * device attribute for showing all segments in a device */ static ssize_t dcssblk_seglist_show(struct device *dev, struct device_attribute *attr, char *buf) { int i; struct dcssblk_dev_info *dev_info; struct segment_info *entry; down_read(&dcssblk_devices_sem); dev_info = container_of(dev, struct dcssblk_dev_info, dev); i = 0; buf[0] = '\0'; list_for_each_entry(entry, &dev_info->seg_list, lh) { strcpy(&buf[i], entry->segment_name); i += strlen(entry->segment_name); buf[i] = '\n'; i++; } up_read(&dcssblk_devices_sem); return i; } /* * device attribute for adding devices */ static ssize_t dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int rc, i, j, num_of_segments; struct dcssblk_dev_info *dev_info; struct segment_info *seg_info, *temp; char *local_buf; unsigned long seg_byte_size; dev_info = NULL; seg_info = NULL; if (dev != dcssblk_root_dev) { rc = -EINVAL; goto out_nobuf; } if ((count < 1) || (buf[0] == '\0') || (buf[0] == '\n')) { rc = -ENAMETOOLONG; goto out_nobuf; } local_buf = kmalloc(count + 1, GFP_KERNEL); if (local_buf == NULL) { rc = -ENOMEM; goto out_nobuf; } /* * parse input */ num_of_segments = 0; for (i = 0; ((buf[i] != '\0') && (buf[i] != '\n') && i < count); i++) { for (j = i; (buf[j] != ':') && (buf[j] != '\0') && (buf[j] != '\n') && j < count; j++) { local_buf[j-i] = toupper(buf[j]); } local_buf[j-i] = '\0'; if (((j - i) == 0) || ((j - i) > 8)) { rc = -ENAMETOOLONG; goto seg_list_del; } rc = dcssblk_load_segment(local_buf, &seg_info); if (rc < 0) goto seg_list_del; /* * get a struct dcssblk_dev_info */ if (num_of_segments == 0) { dev_info = kzalloc(sizeof(struct dcssblk_dev_info), GFP_KERNEL); if (dev_info == NULL) { rc = -ENOMEM; goto out; } strcpy(dev_info->segment_name, local_buf); dev_info->segment_type = seg_info->segment_type; INIT_LIST_HEAD(&dev_info->seg_list); } list_add_tail(&seg_info->lh, &dev_info->seg_list); num_of_segments++; i = j; if ((buf[j] == '\0') || (buf[j] == '\n')) break; } /* no trailing colon at the end of the input */ if ((i > 0) && (buf[i-1] == ':')) { rc = -ENAMETOOLONG; goto seg_list_del; } strlcpy(local_buf, buf, i + 1); dev_info->num_of_segments = num_of_segments; rc = dcssblk_is_continuous(dev_info); if (rc < 0) goto seg_list_del; dev_info->start = dcssblk_find_lowest_addr(dev_info); dev_info->end = dcssblk_find_highest_addr(dev_info); dev_set_name(&dev_info->dev, dev_info->segment_name); dev_info->dev.release = dcssblk_release_segment; INIT_LIST_HEAD(&dev_info->lh); dev_info->gd = alloc_disk(DCSSBLK_MINORS_PER_DISK); if (dev_info->gd == NULL) { rc = -ENOMEM; goto seg_list_del; } dev_info->gd->major = dcssblk_major; dev_info->gd->fops = &dcssblk_devops; dev_info->dcssblk_queue = blk_alloc_queue(GFP_KERNEL); dev_info->gd->queue = dev_info->dcssblk_queue; dev_info->gd->private_data = dev_info; dev_info->gd->driverfs_dev = &dev_info->dev; blk_queue_make_request(dev_info->dcssblk_queue, dcssblk_make_request); blk_queue_logical_block_size(dev_info->dcssblk_queue, 4096); seg_byte_size = (dev_info->end - dev_info->start + 1); set_capacity(dev_info->gd, seg_byte_size >> 9); // size in sectors pr_info("Loaded %s with total size %lu bytes and capacity %lu " "sectors\n", local_buf, seg_byte_size, seg_byte_size >> 9); dev_info->save_pending = 0; dev_info->is_shared = 1; dev_info->dev.parent = dcssblk_root_dev; /* *get minor, add to list */ down_write(&dcssblk_devices_sem); if (dcssblk_get_segment_by_name(local_buf)) { rc = -EEXIST; goto release_gd; } rc = dcssblk_assign_free_minor(dev_info); if (rc) goto release_gd; sprintf(dev_info->gd->disk_name, "dcssblk%d", dev_info->gd->first_minor); list_add_tail(&dev_info->lh, &dcssblk_devices); if (!try_module_get(THIS_MODULE)) { rc = -ENODEV; goto dev_list_del; } /* * register the device */ rc = device_register(&dev_info->dev); if (rc) { module_put(THIS_MODULE); goto dev_list_del; } get_device(&dev_info->dev); rc = device_create_file(&dev_info->dev, &dev_attr_shared); if (rc) goto unregister_dev; rc = device_create_file(&dev_info->dev, &dev_attr_save); if (rc) goto unregister_dev; rc = device_create_file(&dev_info->dev, &dev_attr_seglist); if (rc) goto unregister_dev; add_disk(dev_info->gd); switch (dev_info->segment_type) { case SEG_TYPE_SR: case SEG_TYPE_ER: case SEG_TYPE_SC: set_disk_ro(dev_info->gd,1); break; default: set_disk_ro(dev_info->gd,0); break; } up_write(&dcssblk_devices_sem); rc = count; goto out; unregister_dev: list_del(&dev_info->lh); blk_cleanup_queue(dev_info->dcssblk_queue); dev_info->gd->queue = NULL; put_disk(dev_info->gd); device_unregister(&dev_info->dev); list_for_each_entry(seg_info, &dev_info->seg_list, lh) { segment_unload(seg_info->segment_name); } put_device(&dev_info->dev); up_write(&dcssblk_devices_sem); goto out; dev_list_del: list_del(&dev_info->lh); release_gd: blk_cleanup_queue(dev_info->dcssblk_queue); dev_info->gd->queue = NULL; put_disk(dev_info->gd); up_write(&dcssblk_devices_sem); seg_list_del: if (dev_info == NULL) goto out; list_for_each_entry_safe(seg_info, temp, &dev_info->seg_list, lh) { list_del(&seg_info->lh); segment_unload(seg_info->segment_name); kfree(seg_info); } kfree(dev_info); out: kfree(local_buf); out_nobuf: return rc; } /* * device attribute for removing devices */ static ssize_t dcssblk_remove_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct dcssblk_dev_info *dev_info; struct segment_info *entry; int rc, i; char *local_buf; if (dev != dcssblk_root_dev) { return -EINVAL; } local_buf = kmalloc(count + 1, GFP_KERNEL); if (local_buf == NULL) { return -ENOMEM; } /* * parse input */ for (i = 0; ((*(buf+i)!='\0') && (*(buf+i)!='\n') && i < count); i++) { local_buf[i] = toupper(buf[i]); } local_buf[i] = '\0'; if ((i == 0) || (i > 8)) { rc = -ENAMETOOLONG; goto out_buf; } down_write(&dcssblk_devices_sem); dev_info = dcssblk_get_device_by_name(local_buf); if (dev_info == NULL) { up_write(&dcssblk_devices_sem); pr_warning("Device %s cannot be removed because it is not a " "known device\n", local_buf); rc = -ENODEV; goto out_buf; } if (atomic_read(&dev_info->use_count) != 0) { up_write(&dcssblk_devices_sem); pr_warning("Device %s cannot be removed while it is in " "use\n", local_buf); rc = -EBUSY; goto out_buf; } list_del(&dev_info->lh); del_gendisk(dev_info->gd); blk_cleanup_queue(dev_info->dcssblk_queue); dev_info->gd->queue = NULL; put_disk(dev_info->gd); device_unregister(&dev_info->dev); /* unload all related segments */ list_for_each_entry(entry, &dev_info->seg_list, lh) segment_unload(entry->segment_name); put_device(&dev_info->dev); up_write(&dcssblk_devices_sem); rc = count; out_buf: kfree(local_buf); return rc; } static int dcssblk_open(struct block_device *bdev, fmode_t mode) { struct dcssblk_dev_info *dev_info; int rc; dev_info = bdev->bd_disk->private_data; if (NULL == dev_info) { rc = -ENODEV; goto out; } atomic_inc(&dev_info->use_count); bdev->bd_block_size = 4096; rc = 0; out: return rc; } static int dcssblk_release(struct gendisk *disk, fmode_t mode) { struct dcssblk_dev_info *dev_info = disk->private_data; struct segment_info *entry; int rc; if (!dev_info) { rc = -ENODEV; goto out; } down_write(&dcssblk_devices_sem); if (atomic_dec_and_test(&dev_info->use_count) && (dev_info->save_pending)) { pr_info("Device %s has become idle and is being saved " "now\n", dev_info->segment_name); list_for_each_entry(entry, &dev_info->seg_list, lh) { segment_save(entry->segment_name); } dev_info->save_pending = 0; } up_write(&dcssblk_devices_sem); rc = 0; out: return rc; } static int dcssblk_make_request(struct request_queue *q, struct bio *bio) { struct dcssblk_dev_info *dev_info; struct bio_vec *bvec; unsigned long index; unsigned long page_addr; unsigned long source_addr; unsigned long bytes_done; int i; bytes_done = 0; dev_info = bio->bi_bdev->bd_disk->private_data; if (dev_info == NULL) goto fail; if ((bio->bi_sector & 7) != 0 || (bio->bi_size & 4095) != 0) /* Request is not page-aligned. */ goto fail; if (((bio->bi_size >> 9) + bio->bi_sector) > get_capacity(bio->bi_bdev->bd_disk)) { /* Request beyond end of DCSS segment. */ goto fail; } /* verify data transfer direction */ if (dev_info->is_shared) { switch (dev_info->segment_type) { case SEG_TYPE_SR: case SEG_TYPE_ER: case SEG_TYPE_SC: /* cannot write to these segments */ if (bio_data_dir(bio) == WRITE) { pr_warning("Writing to %s failed because it " "is a read-only device\n", dev_name(&dev_info->dev)); goto fail; } } } index = (bio->bi_sector >> 3); bio_for_each_segment(bvec, bio, i) { page_addr = (unsigned long) page_address(bvec->bv_page) + bvec->bv_offset; source_addr = dev_info->start + (index<<12) + bytes_done; if (unlikely((page_addr & 4095) != 0) || (bvec->bv_len & 4095) != 0) // More paranoia. goto fail; if (bio_data_dir(bio) == READ) { memcpy((void*)page_addr, (void*)source_addr, bvec->bv_len); } else { memcpy((void*)source_addr, (void*)page_addr, bvec->bv_len); } bytes_done += bvec->bv_len; } bio_endio(bio, 0); return 0; fail: bio_io_error(bio); return 0; } static int dcssblk_direct_access (struct block_device *bdev, sector_t secnum, void **kaddr, unsigned long *pfn) { struct dcssblk_dev_info *dev_info; unsigned long pgoff; dev_info = bdev->bd_disk->private_data; if (!dev_info) return -ENODEV; if (secnum % (PAGE_SIZE/512)) return -EINVAL; pgoff = secnum / (PAGE_SIZE / 512); if ((pgoff+1)*PAGE_SIZE-1 > dev_info->end - dev_info->start) return -ERANGE; *kaddr = (void *) (dev_info->start+pgoff*PAGE_SIZE); *pfn = virt_to_phys(*kaddr) >> PAGE_SHIFT; return 0; } static void dcssblk_check_params(void) { int rc, i, j, k; char buf[DCSSBLK_PARM_LEN + 1]; struct dcssblk_dev_info *dev_info; for (i = 0; (i < DCSSBLK_PARM_LEN) && (dcssblk_segments[i] != '\0'); i++) { for (j = i; (dcssblk_segments[j] != ',') && (dcssblk_segments[j] != '\0') && (dcssblk_segments[j] != '(') && (j < DCSSBLK_PARM_LEN); j++) { buf[j-i] = dcssblk_segments[j]; } buf[j-i] = '\0'; rc = dcssblk_add_store(dcssblk_root_dev, NULL, buf, j-i); if ((rc >= 0) && (dcssblk_segments[j] == '(')) { for (k = 0; (buf[k] != ':') && (buf[k] != '\0'); k++) buf[k] = toupper(buf[k]); buf[k] = '\0'; if (!strncmp(&dcssblk_segments[j], "(local)", 7)) { down_read(&dcssblk_devices_sem); dev_info = dcssblk_get_device_by_name(buf); up_read(&dcssblk_devices_sem); if (dev_info) dcssblk_shared_store(&dev_info->dev, NULL, "0\n", 2); } } while ((dcssblk_segments[j] != ',') && (dcssblk_segments[j] != '\0')) { j++; } if (dcssblk_segments[j] == '\0') break; i = j; } } /* * Suspend / Resume */ static int dcssblk_freeze(struct device *dev) { struct dcssblk_dev_info *dev_info; int rc = 0; list_for_each_entry(dev_info, &dcssblk_devices, lh) { switch (dev_info->segment_type) { case SEG_TYPE_SR: case SEG_TYPE_ER: case SEG_TYPE_SC: if (!dev_info->is_shared) rc = -EINVAL; break; default: rc = -EINVAL; break; } if (rc) break; } if (rc) pr_err("Suspending the system failed because DCSS device %s " "is writable\n", dev_info->segment_name); return rc; } static int dcssblk_restore(struct device *dev) { struct dcssblk_dev_info *dev_info; struct segment_info *entry; unsigned long start, end; int rc = 0; list_for_each_entry(dev_info, &dcssblk_devices, lh) { list_for_each_entry(entry, &dev_info->seg_list, lh) { segment_unload(entry->segment_name); rc = segment_load(entry->segment_name, SEGMENT_SHARED, &start, &end); if (rc < 0) { // TODO in_use check ? segment_warning(rc, entry->segment_name); goto out_panic; } if (start != entry->start || end != entry->end) { pr_err("The address range of DCSS %s changed " "while the system was suspended\n", entry->segment_name); goto out_panic; } } } return 0; out_panic: panic("fatal dcssblk resume error\n"); } static int dcssblk_thaw(struct device *dev) { return 0; } static const struct dev_pm_ops dcssblk_pm_ops = { .freeze = dcssblk_freeze, .thaw = dcssblk_thaw, .restore = dcssblk_restore, }; static struct platform_driver dcssblk_pdrv = { .driver = { .name = "dcssblk", .owner = THIS_MODULE, .pm = &dcssblk_pm_ops, }, }; static struct platform_device *dcssblk_pdev; /* * The init/exit functions. */ static void __exit dcssblk_exit(void) { platform_device_unregister(dcssblk_pdev); platform_driver_unregister(&dcssblk_pdrv); root_device_unregister(dcssblk_root_dev); unregister_blkdev(dcssblk_major, DCSSBLK_NAME); } static int __init dcssblk_init(void) { int rc; rc = platform_driver_register(&dcssblk_pdrv); if (rc) return rc; dcssblk_pdev = platform_device_register_simple("dcssblk", -1, NULL, 0); if (IS_ERR(dcssblk_pdev)) { rc = PTR_ERR(dcssblk_pdev); goto out_pdrv; } dcssblk_root_dev = root_device_register("dcssblk"); if (IS_ERR(dcssblk_root_dev)) { rc = PTR_ERR(dcssblk_root_dev); goto out_pdev; } rc = device_create_file(dcssblk_root_dev, &dev_attr_add); if (rc) goto out_root; rc = device_create_file(dcssblk_root_dev, &dev_attr_remove); if (rc) goto out_root; rc = register_blkdev(0, DCSSBLK_NAME); if (rc < 0) goto out_root; dcssblk_major = rc; init_rwsem(&dcssblk_devices_sem); dcssblk_check_params(); return 0; out_root: root_device_unregister(dcssblk_root_dev); out_pdev: platform_device_unregister(dcssblk_pdev); out_pdrv: platform_driver_unregister(&dcssblk_pdrv); return rc; } module_init(dcssblk_init); module_exit(dcssblk_exit); module_param_string(segments, dcssblk_segments, DCSSBLK_PARM_LEN, 0444); MODULE_PARM_DESC(segments, "Name of DCSS segment(s) to be loaded, " "comma-separated list, names in each set separated " "by commas are separated by colons, each set contains " "names of contiguous segments and each name max. 8 chars.\n" "Adding \"(local)\" to the end of each set equals echoing 0 " "to /sys/devices/dcssblk/<device name>/shared after loading " "the contiguous segments - \n" "e.g. segments=\"mydcss1,mydcss2:mydcss3,mydcss4(local)\""); MODULE_LICENSE("GPL");
gpl-2.0
CyanogenMod/android_kernel_samsung_trlte
fs/nilfs2/the_nilfs.c
4158
21033
/* * the_nilfs.c - the_nilfs shared structure. * * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * * Written by Ryusuke Konishi <ryusuke@osrg.net> * */ #include <linux/buffer_head.h> #include <linux/slab.h> #include <linux/blkdev.h> #include <linux/backing-dev.h> #include <linux/random.h> #include <linux/crc32.h> #include "nilfs.h" #include "segment.h" #include "alloc.h" #include "cpfile.h" #include "sufile.h" #include "dat.h" #include "segbuf.h" static int nilfs_valid_sb(struct nilfs_super_block *sbp); void nilfs_set_last_segment(struct the_nilfs *nilfs, sector_t start_blocknr, u64 seq, __u64 cno) { spin_lock(&nilfs->ns_last_segment_lock); nilfs->ns_last_pseg = start_blocknr; nilfs->ns_last_seq = seq; nilfs->ns_last_cno = cno; if (!nilfs_sb_dirty(nilfs)) { if (nilfs->ns_prev_seq == nilfs->ns_last_seq) goto stay_cursor; set_nilfs_sb_dirty(nilfs); } nilfs->ns_prev_seq = nilfs->ns_last_seq; stay_cursor: spin_unlock(&nilfs->ns_last_segment_lock); } /** * alloc_nilfs - allocate a nilfs object * @bdev: block device to which the_nilfs is related * * Return Value: On success, pointer to the_nilfs is returned. * On error, NULL is returned. */ struct the_nilfs *alloc_nilfs(struct block_device *bdev) { struct the_nilfs *nilfs; nilfs = kzalloc(sizeof(*nilfs), GFP_KERNEL); if (!nilfs) return NULL; nilfs->ns_bdev = bdev; atomic_set(&nilfs->ns_ndirtyblks, 0); init_rwsem(&nilfs->ns_sem); mutex_init(&nilfs->ns_snapshot_mount_mutex); INIT_LIST_HEAD(&nilfs->ns_dirty_files); INIT_LIST_HEAD(&nilfs->ns_gc_inodes); spin_lock_init(&nilfs->ns_inode_lock); spin_lock_init(&nilfs->ns_next_gen_lock); spin_lock_init(&nilfs->ns_last_segment_lock); nilfs->ns_cptree = RB_ROOT; spin_lock_init(&nilfs->ns_cptree_lock); init_rwsem(&nilfs->ns_segctor_sem); return nilfs; } /** * destroy_nilfs - destroy nilfs object * @nilfs: nilfs object to be released */ void destroy_nilfs(struct the_nilfs *nilfs) { might_sleep(); if (nilfs_init(nilfs)) { brelse(nilfs->ns_sbh[0]); brelse(nilfs->ns_sbh[1]); } kfree(nilfs); } static int nilfs_load_super_root(struct the_nilfs *nilfs, struct super_block *sb, sector_t sr_block) { struct buffer_head *bh_sr; struct nilfs_super_root *raw_sr; struct nilfs_super_block **sbp = nilfs->ns_sbp; struct nilfs_inode *rawi; unsigned dat_entry_size, segment_usage_size, checkpoint_size; unsigned inode_size; int err; err = nilfs_read_super_root_block(nilfs, sr_block, &bh_sr, 1); if (unlikely(err)) return err; down_read(&nilfs->ns_sem); dat_entry_size = le16_to_cpu(sbp[0]->s_dat_entry_size); checkpoint_size = le16_to_cpu(sbp[0]->s_checkpoint_size); segment_usage_size = le16_to_cpu(sbp[0]->s_segment_usage_size); up_read(&nilfs->ns_sem); inode_size = nilfs->ns_inode_size; rawi = (void *)bh_sr->b_data + NILFS_SR_DAT_OFFSET(inode_size); err = nilfs_dat_read(sb, dat_entry_size, rawi, &nilfs->ns_dat); if (err) goto failed; rawi = (void *)bh_sr->b_data + NILFS_SR_CPFILE_OFFSET(inode_size); err = nilfs_cpfile_read(sb, checkpoint_size, rawi, &nilfs->ns_cpfile); if (err) goto failed_dat; rawi = (void *)bh_sr->b_data + NILFS_SR_SUFILE_OFFSET(inode_size); err = nilfs_sufile_read(sb, segment_usage_size, rawi, &nilfs->ns_sufile); if (err) goto failed_cpfile; raw_sr = (struct nilfs_super_root *)bh_sr->b_data; nilfs->ns_nongc_ctime = le64_to_cpu(raw_sr->sr_nongc_ctime); failed: brelse(bh_sr); return err; failed_cpfile: iput(nilfs->ns_cpfile); failed_dat: iput(nilfs->ns_dat); goto failed; } static void nilfs_init_recovery_info(struct nilfs_recovery_info *ri) { memset(ri, 0, sizeof(*ri)); INIT_LIST_HEAD(&ri->ri_used_segments); } static void nilfs_clear_recovery_info(struct nilfs_recovery_info *ri) { nilfs_dispose_segment_list(&ri->ri_used_segments); } /** * nilfs_store_log_cursor - load log cursor from a super block * @nilfs: nilfs object * @sbp: buffer storing super block to be read * * nilfs_store_log_cursor() reads the last position of the log * containing a super root from a given super block, and initializes * relevant information on the nilfs object preparatory for log * scanning and recovery. */ static int nilfs_store_log_cursor(struct the_nilfs *nilfs, struct nilfs_super_block *sbp) { int ret = 0; nilfs->ns_last_pseg = le64_to_cpu(sbp->s_last_pseg); nilfs->ns_last_cno = le64_to_cpu(sbp->s_last_cno); nilfs->ns_last_seq = le64_to_cpu(sbp->s_last_seq); nilfs->ns_prev_seq = nilfs->ns_last_seq; nilfs->ns_seg_seq = nilfs->ns_last_seq; nilfs->ns_segnum = nilfs_get_segnum_of_block(nilfs, nilfs->ns_last_pseg); nilfs->ns_cno = nilfs->ns_last_cno + 1; if (nilfs->ns_segnum >= nilfs->ns_nsegments) { printk(KERN_ERR "NILFS invalid last segment number.\n"); ret = -EINVAL; } return ret; } /** * load_nilfs - load and recover the nilfs * @nilfs: the_nilfs structure to be released * @sb: super block isntance used to recover past segment * * load_nilfs() searches and load the latest super root, * attaches the last segment, and does recovery if needed. * The caller must call this exclusively for simultaneous mounts. */ int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb) { struct nilfs_recovery_info ri; unsigned int s_flags = sb->s_flags; int really_read_only = bdev_read_only(nilfs->ns_bdev); int valid_fs = nilfs_valid_fs(nilfs); int err; if (!valid_fs) { printk(KERN_WARNING "NILFS warning: mounting unchecked fs\n"); if (s_flags & MS_RDONLY) { printk(KERN_INFO "NILFS: INFO: recovery " "required for readonly filesystem.\n"); printk(KERN_INFO "NILFS: write access will " "be enabled during recovery.\n"); } } nilfs_init_recovery_info(&ri); err = nilfs_search_super_root(nilfs, &ri); if (unlikely(err)) { struct nilfs_super_block **sbp = nilfs->ns_sbp; int blocksize; if (err != -EINVAL) goto scan_error; if (!nilfs_valid_sb(sbp[1])) { printk(KERN_WARNING "NILFS warning: unable to fall back to spare" "super block\n"); goto scan_error; } printk(KERN_INFO "NILFS: try rollback from an earlier position\n"); /* * restore super block with its spare and reconfigure * relevant states of the nilfs object. */ memcpy(sbp[0], sbp[1], nilfs->ns_sbsize); nilfs->ns_crc_seed = le32_to_cpu(sbp[0]->s_crc_seed); nilfs->ns_sbwtime = le64_to_cpu(sbp[0]->s_wtime); /* verify consistency between two super blocks */ blocksize = BLOCK_SIZE << le32_to_cpu(sbp[0]->s_log_block_size); if (blocksize != nilfs->ns_blocksize) { printk(KERN_WARNING "NILFS warning: blocksize differs between " "two super blocks (%d != %d)\n", blocksize, nilfs->ns_blocksize); goto scan_error; } err = nilfs_store_log_cursor(nilfs, sbp[0]); if (err) goto scan_error; /* drop clean flag to allow roll-forward and recovery */ nilfs->ns_mount_state &= ~NILFS_VALID_FS; valid_fs = 0; err = nilfs_search_super_root(nilfs, &ri); if (err) goto scan_error; } err = nilfs_load_super_root(nilfs, sb, ri.ri_super_root); if (unlikely(err)) { printk(KERN_ERR "NILFS: error loading super root.\n"); goto failed; } if (valid_fs) goto skip_recovery; if (s_flags & MS_RDONLY) { __u64 features; if (nilfs_test_opt(nilfs, NORECOVERY)) { printk(KERN_INFO "NILFS: norecovery option specified. " "skipping roll-forward recovery\n"); goto skip_recovery; } features = le64_to_cpu(nilfs->ns_sbp[0]->s_feature_compat_ro) & ~NILFS_FEATURE_COMPAT_RO_SUPP; if (features) { printk(KERN_ERR "NILFS: couldn't proceed with " "recovery because of unsupported optional " "features (%llx)\n", (unsigned long long)features); err = -EROFS; goto failed_unload; } if (really_read_only) { printk(KERN_ERR "NILFS: write access " "unavailable, cannot proceed.\n"); err = -EROFS; goto failed_unload; } sb->s_flags &= ~MS_RDONLY; } else if (nilfs_test_opt(nilfs, NORECOVERY)) { printk(KERN_ERR "NILFS: recovery cancelled because norecovery " "option was specified for a read/write mount\n"); err = -EINVAL; goto failed_unload; } err = nilfs_salvage_orphan_logs(nilfs, sb, &ri); if (err) goto failed_unload; down_write(&nilfs->ns_sem); nilfs->ns_mount_state |= NILFS_VALID_FS; /* set "clean" flag */ err = nilfs_cleanup_super(sb); up_write(&nilfs->ns_sem); if (err) { printk(KERN_ERR "NILFS: failed to update super block. " "recovery unfinished.\n"); goto failed_unload; } printk(KERN_INFO "NILFS: recovery complete.\n"); skip_recovery: nilfs_clear_recovery_info(&ri); sb->s_flags = s_flags; return 0; scan_error: printk(KERN_ERR "NILFS: error searching super root.\n"); goto failed; failed_unload: iput(nilfs->ns_cpfile); iput(nilfs->ns_sufile); iput(nilfs->ns_dat); failed: nilfs_clear_recovery_info(&ri); sb->s_flags = s_flags; return err; } static unsigned long long nilfs_max_size(unsigned int blkbits) { unsigned int max_bits; unsigned long long res = MAX_LFS_FILESIZE; /* page cache limit */ max_bits = blkbits + NILFS_BMAP_KEY_BIT; /* bmap size limit */ if (max_bits < 64) res = min_t(unsigned long long, res, (1ULL << max_bits) - 1); return res; } /** * nilfs_nrsvsegs - calculate the number of reserved segments * @nilfs: nilfs object * @nsegs: total number of segments */ unsigned long nilfs_nrsvsegs(struct the_nilfs *nilfs, unsigned long nsegs) { return max_t(unsigned long, NILFS_MIN_NRSVSEGS, DIV_ROUND_UP(nsegs * nilfs->ns_r_segments_percentage, 100)); } void nilfs_set_nsegments(struct the_nilfs *nilfs, unsigned long nsegs) { nilfs->ns_nsegments = nsegs; nilfs->ns_nrsvsegs = nilfs_nrsvsegs(nilfs, nsegs); } static int nilfs_store_disk_layout(struct the_nilfs *nilfs, struct nilfs_super_block *sbp) { if (le32_to_cpu(sbp->s_rev_level) < NILFS_MIN_SUPP_REV) { printk(KERN_ERR "NILFS: unsupported revision " "(superblock rev.=%d.%d, current rev.=%d.%d). " "Please check the version of mkfs.nilfs.\n", le32_to_cpu(sbp->s_rev_level), le16_to_cpu(sbp->s_minor_rev_level), NILFS_CURRENT_REV, NILFS_MINOR_REV); return -EINVAL; } nilfs->ns_sbsize = le16_to_cpu(sbp->s_bytes); if (nilfs->ns_sbsize > BLOCK_SIZE) return -EINVAL; nilfs->ns_inode_size = le16_to_cpu(sbp->s_inode_size); nilfs->ns_first_ino = le32_to_cpu(sbp->s_first_ino); nilfs->ns_blocks_per_segment = le32_to_cpu(sbp->s_blocks_per_segment); if (nilfs->ns_blocks_per_segment < NILFS_SEG_MIN_BLOCKS) { printk(KERN_ERR "NILFS: too short segment.\n"); return -EINVAL; } nilfs->ns_first_data_block = le64_to_cpu(sbp->s_first_data_block); nilfs->ns_r_segments_percentage = le32_to_cpu(sbp->s_r_segments_percentage); if (nilfs->ns_r_segments_percentage < 1 || nilfs->ns_r_segments_percentage > 99) { printk(KERN_ERR "NILFS: invalid reserved segments percentage.\n"); return -EINVAL; } nilfs_set_nsegments(nilfs, le64_to_cpu(sbp->s_nsegments)); nilfs->ns_crc_seed = le32_to_cpu(sbp->s_crc_seed); return 0; } static int nilfs_valid_sb(struct nilfs_super_block *sbp) { static unsigned char sum[4]; const int sumoff = offsetof(struct nilfs_super_block, s_sum); size_t bytes; u32 crc; if (!sbp || le16_to_cpu(sbp->s_magic) != NILFS_SUPER_MAGIC) return 0; bytes = le16_to_cpu(sbp->s_bytes); if (bytes > BLOCK_SIZE) return 0; crc = crc32_le(le32_to_cpu(sbp->s_crc_seed), (unsigned char *)sbp, sumoff); crc = crc32_le(crc, sum, 4); crc = crc32_le(crc, (unsigned char *)sbp + sumoff + 4, bytes - sumoff - 4); return crc == le32_to_cpu(sbp->s_sum); } static int nilfs_sb2_bad_offset(struct nilfs_super_block *sbp, u64 offset) { return offset < ((le64_to_cpu(sbp->s_nsegments) * le32_to_cpu(sbp->s_blocks_per_segment)) << (le32_to_cpu(sbp->s_log_block_size) + 10)); } static void nilfs_release_super_block(struct the_nilfs *nilfs) { int i; for (i = 0; i < 2; i++) { if (nilfs->ns_sbp[i]) { brelse(nilfs->ns_sbh[i]); nilfs->ns_sbh[i] = NULL; nilfs->ns_sbp[i] = NULL; } } } void nilfs_fall_back_super_block(struct the_nilfs *nilfs) { brelse(nilfs->ns_sbh[0]); nilfs->ns_sbh[0] = nilfs->ns_sbh[1]; nilfs->ns_sbp[0] = nilfs->ns_sbp[1]; nilfs->ns_sbh[1] = NULL; nilfs->ns_sbp[1] = NULL; } void nilfs_swap_super_block(struct the_nilfs *nilfs) { struct buffer_head *tsbh = nilfs->ns_sbh[0]; struct nilfs_super_block *tsbp = nilfs->ns_sbp[0]; nilfs->ns_sbh[0] = nilfs->ns_sbh[1]; nilfs->ns_sbp[0] = nilfs->ns_sbp[1]; nilfs->ns_sbh[1] = tsbh; nilfs->ns_sbp[1] = tsbp; } static int nilfs_load_super_block(struct the_nilfs *nilfs, struct super_block *sb, int blocksize, struct nilfs_super_block **sbpp) { struct nilfs_super_block **sbp = nilfs->ns_sbp; struct buffer_head **sbh = nilfs->ns_sbh; u64 sb2off = NILFS_SB2_OFFSET_BYTES(nilfs->ns_bdev->bd_inode->i_size); int valid[2], swp = 0; sbp[0] = nilfs_read_super_block(sb, NILFS_SB_OFFSET_BYTES, blocksize, &sbh[0]); sbp[1] = nilfs_read_super_block(sb, sb2off, blocksize, &sbh[1]); if (!sbp[0]) { if (!sbp[1]) { printk(KERN_ERR "NILFS: unable to read superblock\n"); return -EIO; } printk(KERN_WARNING "NILFS warning: unable to read primary superblock " "(blocksize = %d)\n", blocksize); } else if (!sbp[1]) { printk(KERN_WARNING "NILFS warning: unable to read secondary superblock " "(blocksize = %d)\n", blocksize); } /* * Compare two super blocks and set 1 in swp if the secondary * super block is valid and newer. Otherwise, set 0 in swp. */ valid[0] = nilfs_valid_sb(sbp[0]); valid[1] = nilfs_valid_sb(sbp[1]); swp = valid[1] && (!valid[0] || le64_to_cpu(sbp[1]->s_last_cno) > le64_to_cpu(sbp[0]->s_last_cno)); if (valid[swp] && nilfs_sb2_bad_offset(sbp[swp], sb2off)) { brelse(sbh[1]); sbh[1] = NULL; sbp[1] = NULL; valid[1] = 0; swp = 0; } if (!valid[swp]) { nilfs_release_super_block(nilfs); printk(KERN_ERR "NILFS: Can't find nilfs on dev %s.\n", sb->s_id); return -EINVAL; } if (!valid[!swp]) printk(KERN_WARNING "NILFS warning: broken superblock. " "using spare superblock (blocksize = %d).\n", blocksize); if (swp) nilfs_swap_super_block(nilfs); nilfs->ns_sbwcount = 0; nilfs->ns_sbwtime = le64_to_cpu(sbp[0]->s_wtime); nilfs->ns_prot_seq = le64_to_cpu(sbp[valid[1] & !swp]->s_last_seq); *sbpp = sbp[0]; return 0; } /** * init_nilfs - initialize a NILFS instance. * @nilfs: the_nilfs structure * @sb: super block * @data: mount options * * init_nilfs() performs common initialization per block device (e.g. * reading the super block, getting disk layout information, initializing * shared fields in the_nilfs). * * Return Value: On success, 0 is returned. On error, a negative error * code is returned. */ int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb, char *data) { struct nilfs_super_block *sbp; int blocksize; int err; down_write(&nilfs->ns_sem); blocksize = sb_min_blocksize(sb, NILFS_MIN_BLOCK_SIZE); if (!blocksize) { printk(KERN_ERR "NILFS: unable to set blocksize\n"); err = -EINVAL; goto out; } err = nilfs_load_super_block(nilfs, sb, blocksize, &sbp); if (err) goto out; err = nilfs_store_magic_and_option(sb, sbp, data); if (err) goto failed_sbh; err = nilfs_check_feature_compatibility(sb, sbp); if (err) goto failed_sbh; blocksize = BLOCK_SIZE << le32_to_cpu(sbp->s_log_block_size); if (blocksize < NILFS_MIN_BLOCK_SIZE || blocksize > NILFS_MAX_BLOCK_SIZE) { printk(KERN_ERR "NILFS: couldn't mount because of unsupported " "filesystem blocksize %d\n", blocksize); err = -EINVAL; goto failed_sbh; } if (sb->s_blocksize != blocksize) { int hw_blocksize = bdev_logical_block_size(sb->s_bdev); if (blocksize < hw_blocksize) { printk(KERN_ERR "NILFS: blocksize %d too small for device " "(sector-size = %d).\n", blocksize, hw_blocksize); err = -EINVAL; goto failed_sbh; } nilfs_release_super_block(nilfs); sb_set_blocksize(sb, blocksize); err = nilfs_load_super_block(nilfs, sb, blocksize, &sbp); if (err) goto out; /* not failed_sbh; sbh is released automatically when reloading fails. */ } nilfs->ns_blocksize_bits = sb->s_blocksize_bits; nilfs->ns_blocksize = blocksize; get_random_bytes(&nilfs->ns_next_generation, sizeof(nilfs->ns_next_generation)); err = nilfs_store_disk_layout(nilfs, sbp); if (err) goto failed_sbh; sb->s_maxbytes = nilfs_max_size(sb->s_blocksize_bits); nilfs->ns_mount_state = le16_to_cpu(sbp->s_state); err = nilfs_store_log_cursor(nilfs, sbp); if (err) goto failed_sbh; set_nilfs_init(nilfs); err = 0; out: up_write(&nilfs->ns_sem); return err; failed_sbh: nilfs_release_super_block(nilfs); goto out; } int nilfs_discard_segments(struct the_nilfs *nilfs, __u64 *segnump, size_t nsegs) { sector_t seg_start, seg_end; sector_t start = 0, nblocks = 0; unsigned int sects_per_block; __u64 *sn; int ret = 0; sects_per_block = (1 << nilfs->ns_blocksize_bits) / bdev_logical_block_size(nilfs->ns_bdev); for (sn = segnump; sn < segnump + nsegs; sn++) { nilfs_get_segment_range(nilfs, *sn, &seg_start, &seg_end); if (!nblocks) { start = seg_start; nblocks = seg_end - seg_start + 1; } else if (start + nblocks == seg_start) { nblocks += seg_end - seg_start + 1; } else { ret = blkdev_issue_discard(nilfs->ns_bdev, start * sects_per_block, nblocks * sects_per_block, GFP_NOFS, 0); if (ret < 0) return ret; nblocks = 0; } } if (nblocks) ret = blkdev_issue_discard(nilfs->ns_bdev, start * sects_per_block, nblocks * sects_per_block, GFP_NOFS, 0); return ret; } int nilfs_count_free_blocks(struct the_nilfs *nilfs, sector_t *nblocks) { unsigned long ncleansegs; down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem); ncleansegs = nilfs_sufile_get_ncleansegs(nilfs->ns_sufile); up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem); *nblocks = (sector_t)ncleansegs * nilfs->ns_blocks_per_segment; return 0; } int nilfs_near_disk_full(struct the_nilfs *nilfs) { unsigned long ncleansegs, nincsegs; ncleansegs = nilfs_sufile_get_ncleansegs(nilfs->ns_sufile); nincsegs = atomic_read(&nilfs->ns_ndirtyblks) / nilfs->ns_blocks_per_segment + 1; return ncleansegs <= nilfs->ns_nrsvsegs + nincsegs; } struct nilfs_root *nilfs_lookup_root(struct the_nilfs *nilfs, __u64 cno) { struct rb_node *n; struct nilfs_root *root; spin_lock(&nilfs->ns_cptree_lock); n = nilfs->ns_cptree.rb_node; while (n) { root = rb_entry(n, struct nilfs_root, rb_node); if (cno < root->cno) { n = n->rb_left; } else if (cno > root->cno) { n = n->rb_right; } else { atomic_inc(&root->count); spin_unlock(&nilfs->ns_cptree_lock); return root; } } spin_unlock(&nilfs->ns_cptree_lock); return NULL; } struct nilfs_root * nilfs_find_or_create_root(struct the_nilfs *nilfs, __u64 cno) { struct rb_node **p, *parent; struct nilfs_root *root, *new; root = nilfs_lookup_root(nilfs, cno); if (root) return root; new = kmalloc(sizeof(*root), GFP_KERNEL); if (!new) return NULL; spin_lock(&nilfs->ns_cptree_lock); p = &nilfs->ns_cptree.rb_node; parent = NULL; while (*p) { parent = *p; root = rb_entry(parent, struct nilfs_root, rb_node); if (cno < root->cno) { p = &(*p)->rb_left; } else if (cno > root->cno) { p = &(*p)->rb_right; } else { atomic_inc(&root->count); spin_unlock(&nilfs->ns_cptree_lock); kfree(new); return root; } } new->cno = cno; new->ifile = NULL; new->nilfs = nilfs; atomic_set(&new->count, 1); atomic_set(&new->inodes_count, 0); atomic_set(&new->blocks_count, 0); rb_link_node(&new->rb_node, parent, p); rb_insert_color(&new->rb_node, &nilfs->ns_cptree); spin_unlock(&nilfs->ns_cptree_lock); return new; } void nilfs_put_root(struct nilfs_root *root) { if (atomic_dec_and_test(&root->count)) { struct the_nilfs *nilfs = root->nilfs; spin_lock(&nilfs->ns_cptree_lock); rb_erase(&root->rb_node, &nilfs->ns_cptree); spin_unlock(&nilfs->ns_cptree_lock); if (root->ifile) iput(root->ifile); kfree(root); } }
gpl-2.0
bigbiff/android_kernel_samsung_n900
fs/hostfs/hostfs_kern.c
4414
21069
/* * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) * Licensed under the GPL * * Ported the filesystem routines to 2.5. * 2003-02-10 Petr Baudis <pasky@ucw.cz> */ #include <linux/fs.h> #include <linux/module.h> #include <linux/mm.h> #include <linux/pagemap.h> #include <linux/statfs.h> #include <linux/slab.h> #include <linux/seq_file.h> #include <linux/mount.h> #include <linux/namei.h> #include "hostfs.h" #include "init.h" #include "kern.h" struct hostfs_inode_info { int fd; fmode_t mode; struct inode vfs_inode; }; static inline struct hostfs_inode_info *HOSTFS_I(struct inode *inode) { return list_entry(inode, struct hostfs_inode_info, vfs_inode); } #define FILE_HOSTFS_I(file) HOSTFS_I((file)->f_path.dentry->d_inode) static int hostfs_d_delete(const struct dentry *dentry) { return 1; } static const struct dentry_operations hostfs_dentry_ops = { .d_delete = hostfs_d_delete, }; /* Changed in hostfs_args before the kernel starts running */ static char *root_ino = ""; static int append = 0; #define HOSTFS_SUPER_MAGIC 0x00c0ffee static const struct inode_operations hostfs_iops; static const struct inode_operations hostfs_dir_iops; static const struct inode_operations hostfs_link_iops; #ifndef MODULE static int __init hostfs_args(char *options, int *add) { char *ptr; ptr = strchr(options, ','); if (ptr != NULL) *ptr++ = '\0'; if (*options != '\0') root_ino = options; options = ptr; while (options) { ptr = strchr(options, ','); if (ptr != NULL) *ptr++ = '\0'; if (*options != '\0') { if (!strcmp(options, "append")) append = 1; else printf("hostfs_args - unsupported option - %s\n", options); } options = ptr; } return 0; } __uml_setup("hostfs=", hostfs_args, "hostfs=<root dir>,<flags>,...\n" " This is used to set hostfs parameters. The root directory argument\n" " is used to confine all hostfs mounts to within the specified directory\n" " tree on the host. If this isn't specified, then a user inside UML can\n" " mount anything on the host that's accessible to the user that's running\n" " it.\n" " The only flag currently supported is 'append', which specifies that all\n" " files opened by hostfs will be opened in append mode.\n\n" ); #endif static char *__dentry_name(struct dentry *dentry, char *name) { char *p = dentry_path_raw(dentry, name, PATH_MAX); char *root; size_t len; root = dentry->d_sb->s_fs_info; len = strlen(root); if (IS_ERR(p)) { __putname(name); return NULL; } strlcpy(name, root, PATH_MAX); if (len > p - name) { __putname(name); return NULL; } if (p > name + len) { char *s = name + len; while ((*s++ = *p++) != '\0') ; } return name; } static char *dentry_name(struct dentry *dentry) { char *name = __getname(); if (!name) return NULL; return __dentry_name(dentry, name); /* will unlock */ } static char *inode_name(struct inode *ino) { struct dentry *dentry; char *name; dentry = d_find_alias(ino); if (!dentry) return NULL; name = dentry_name(dentry); dput(dentry); return name; } static char *follow_link(char *link) { int len, n; char *name, *resolved, *end; len = 64; while (1) { n = -ENOMEM; name = kmalloc(len, GFP_KERNEL); if (name == NULL) goto out; n = hostfs_do_readlink(link, name, len); if (n < len) break; len *= 2; kfree(name); } if (n < 0) goto out_free; if (*name == '/') return name; end = strrchr(link, '/'); if (end == NULL) return name; *(end + 1) = '\0'; len = strlen(link) + strlen(name) + 1; resolved = kmalloc(len, GFP_KERNEL); if (resolved == NULL) { n = -ENOMEM; goto out_free; } sprintf(resolved, "%s%s", link, name); kfree(name); kfree(link); return resolved; out_free: kfree(name); out: return ERR_PTR(n); } static struct inode *hostfs_iget(struct super_block *sb) { struct inode *inode = new_inode(sb); if (!inode) return ERR_PTR(-ENOMEM); return inode; } int hostfs_statfs(struct dentry *dentry, struct kstatfs *sf) { /* * do_statfs uses struct statfs64 internally, but the linux kernel * struct statfs still has 32-bit versions for most of these fields, * so we convert them here */ int err; long long f_blocks; long long f_bfree; long long f_bavail; long long f_files; long long f_ffree; err = do_statfs(dentry->d_sb->s_fs_info, &sf->f_bsize, &f_blocks, &f_bfree, &f_bavail, &f_files, &f_ffree, &sf->f_fsid, sizeof(sf->f_fsid), &sf->f_namelen); if (err) return err; sf->f_blocks = f_blocks; sf->f_bfree = f_bfree; sf->f_bavail = f_bavail; sf->f_files = f_files; sf->f_ffree = f_ffree; sf->f_type = HOSTFS_SUPER_MAGIC; return 0; } static struct inode *hostfs_alloc_inode(struct super_block *sb) { struct hostfs_inode_info *hi; hi = kzalloc(sizeof(*hi), GFP_KERNEL); if (hi == NULL) return NULL; hi->fd = -1; inode_init_once(&hi->vfs_inode); return &hi->vfs_inode; } static void hostfs_evict_inode(struct inode *inode) { truncate_inode_pages(&inode->i_data, 0); end_writeback(inode); if (HOSTFS_I(inode)->fd != -1) { close_file(&HOSTFS_I(inode)->fd); HOSTFS_I(inode)->fd = -1; } } static void hostfs_i_callback(struct rcu_head *head) { struct inode *inode = container_of(head, struct inode, i_rcu); kfree(HOSTFS_I(inode)); } static void hostfs_destroy_inode(struct inode *inode) { call_rcu(&inode->i_rcu, hostfs_i_callback); } static int hostfs_show_options(struct seq_file *seq, struct dentry *root) { const char *root_path = root->d_sb->s_fs_info; size_t offset = strlen(root_ino) + 1; if (strlen(root_path) > offset) seq_printf(seq, ",%s", root_path + offset); return 0; } static const struct super_operations hostfs_sbops = { .alloc_inode = hostfs_alloc_inode, .destroy_inode = hostfs_destroy_inode, .evict_inode = hostfs_evict_inode, .statfs = hostfs_statfs, .show_options = hostfs_show_options, }; int hostfs_readdir(struct file *file, void *ent, filldir_t filldir) { void *dir; char *name; unsigned long long next, ino; int error, len; unsigned int type; name = dentry_name(file->f_path.dentry); if (name == NULL) return -ENOMEM; dir = open_dir(name, &error); __putname(name); if (dir == NULL) return -error; next = file->f_pos; while ((name = read_dir(dir, &next, &ino, &len, &type)) != NULL) { error = (*filldir)(ent, name, len, file->f_pos, ino, type); if (error) break; file->f_pos = next; } close_dir(dir); return 0; } int hostfs_file_open(struct inode *ino, struct file *file) { static DEFINE_MUTEX(open_mutex); char *name; fmode_t mode = 0; int err; int r = 0, w = 0, fd; mode = file->f_mode & (FMODE_READ | FMODE_WRITE); if ((mode & HOSTFS_I(ino)->mode) == mode) return 0; mode |= HOSTFS_I(ino)->mode; retry: if (mode & FMODE_READ) r = 1; if (mode & FMODE_WRITE) w = 1; if (w) r = 1; name = dentry_name(file->f_path.dentry); if (name == NULL) return -ENOMEM; fd = open_file(name, r, w, append); __putname(name); if (fd < 0) return fd; mutex_lock(&open_mutex); /* somebody else had handled it first? */ if ((mode & HOSTFS_I(ino)->mode) == mode) { mutex_unlock(&open_mutex); return 0; } if ((mode | HOSTFS_I(ino)->mode) != mode) { mode |= HOSTFS_I(ino)->mode; mutex_unlock(&open_mutex); close_file(&fd); goto retry; } if (HOSTFS_I(ino)->fd == -1) { HOSTFS_I(ino)->fd = fd; } else { err = replace_file(fd, HOSTFS_I(ino)->fd); close_file(&fd); if (err < 0) { mutex_unlock(&open_mutex); return err; } } HOSTFS_I(ino)->mode = mode; mutex_unlock(&open_mutex); return 0; } int hostfs_fsync(struct file *file, loff_t start, loff_t end, int datasync) { struct inode *inode = file->f_mapping->host; int ret; ret = filemap_write_and_wait_range(inode->i_mapping, start, end); if (ret) return ret; mutex_lock(&inode->i_mutex); ret = fsync_file(HOSTFS_I(inode)->fd, datasync); mutex_unlock(&inode->i_mutex); return ret; } static const struct file_operations hostfs_file_fops = { .llseek = generic_file_llseek, .read = do_sync_read, .splice_read = generic_file_splice_read, .aio_read = generic_file_aio_read, .aio_write = generic_file_aio_write, .write = do_sync_write, .mmap = generic_file_mmap, .open = hostfs_file_open, .release = NULL, .fsync = hostfs_fsync, }; static const struct file_operations hostfs_dir_fops = { .llseek = generic_file_llseek, .readdir = hostfs_readdir, .read = generic_read_dir, }; int hostfs_writepage(struct page *page, struct writeback_control *wbc) { struct address_space *mapping = page->mapping; struct inode *inode = mapping->host; char *buffer; unsigned long long base; int count = PAGE_CACHE_SIZE; int end_index = inode->i_size >> PAGE_CACHE_SHIFT; int err; if (page->index >= end_index) count = inode->i_size & (PAGE_CACHE_SIZE-1); buffer = kmap(page); base = ((unsigned long long) page->index) << PAGE_CACHE_SHIFT; err = write_file(HOSTFS_I(inode)->fd, &base, buffer, count); if (err != count) { ClearPageUptodate(page); goto out; } if (base > inode->i_size) inode->i_size = base; if (PageError(page)) ClearPageError(page); err = 0; out: kunmap(page); unlock_page(page); return err; } int hostfs_readpage(struct file *file, struct page *page) { char *buffer; long long start; int err = 0; start = (long long) page->index << PAGE_CACHE_SHIFT; buffer = kmap(page); err = read_file(FILE_HOSTFS_I(file)->fd, &start, buffer, PAGE_CACHE_SIZE); if (err < 0) goto out; memset(&buffer[err], 0, PAGE_CACHE_SIZE - err); flush_dcache_page(page); SetPageUptodate(page); if (PageError(page)) ClearPageError(page); err = 0; out: kunmap(page); unlock_page(page); return err; } int hostfs_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { pgoff_t index = pos >> PAGE_CACHE_SHIFT; *pagep = grab_cache_page_write_begin(mapping, index, flags); if (!*pagep) return -ENOMEM; return 0; } int hostfs_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) { struct inode *inode = mapping->host; void *buffer; unsigned from = pos & (PAGE_CACHE_SIZE - 1); int err; buffer = kmap(page); err = write_file(FILE_HOSTFS_I(file)->fd, &pos, buffer + from, copied); kunmap(page); if (!PageUptodate(page) && err == PAGE_CACHE_SIZE) SetPageUptodate(page); /* * If err > 0, write_file has added err to pos, so we are comparing * i_size against the last byte written. */ if (err > 0 && (pos > inode->i_size)) inode->i_size = pos; unlock_page(page); page_cache_release(page); return err; } static const struct address_space_operations hostfs_aops = { .writepage = hostfs_writepage, .readpage = hostfs_readpage, .set_page_dirty = __set_page_dirty_nobuffers, .write_begin = hostfs_write_begin, .write_end = hostfs_write_end, }; static int read_name(struct inode *ino, char *name) { dev_t rdev; struct hostfs_stat st; int err = stat_file(name, &st, -1); if (err) return err; /* Reencode maj and min with the kernel encoding.*/ rdev = MKDEV(st.maj, st.min); switch (st.mode & S_IFMT) { case S_IFLNK: ino->i_op = &hostfs_link_iops; break; case S_IFDIR: ino->i_op = &hostfs_dir_iops; ino->i_fop = &hostfs_dir_fops; break; case S_IFCHR: case S_IFBLK: case S_IFIFO: case S_IFSOCK: init_special_inode(ino, st.mode & S_IFMT, rdev); ino->i_op = &hostfs_iops; break; default: ino->i_op = &hostfs_iops; ino->i_fop = &hostfs_file_fops; ino->i_mapping->a_ops = &hostfs_aops; } ino->i_ino = st.ino; ino->i_mode = st.mode; set_nlink(ino, st.nlink); ino->i_uid = st.uid; ino->i_gid = st.gid; ino->i_atime = st.atime; ino->i_mtime = st.mtime; ino->i_ctime = st.ctime; ino->i_size = st.size; ino->i_blocks = st.blocks; return 0; } int hostfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, struct nameidata *nd) { struct inode *inode; char *name; int error, fd; inode = hostfs_iget(dir->i_sb); if (IS_ERR(inode)) { error = PTR_ERR(inode); goto out; } error = -ENOMEM; name = dentry_name(dentry); if (name == NULL) goto out_put; fd = file_create(name, mode & S_IRUSR, mode & S_IWUSR, mode & S_IXUSR, mode & S_IRGRP, mode & S_IWGRP, mode & S_IXGRP, mode & S_IROTH, mode & S_IWOTH, mode & S_IXOTH); if (fd < 0) error = fd; else error = read_name(inode, name); __putname(name); if (error) goto out_put; HOSTFS_I(inode)->fd = fd; HOSTFS_I(inode)->mode = FMODE_READ | FMODE_WRITE; d_instantiate(dentry, inode); return 0; out_put: iput(inode); out: return error; } struct dentry *hostfs_lookup(struct inode *ino, struct dentry *dentry, struct nameidata *nd) { struct inode *inode; char *name; int err; inode = hostfs_iget(ino->i_sb); if (IS_ERR(inode)) { err = PTR_ERR(inode); goto out; } err = -ENOMEM; name = dentry_name(dentry); if (name == NULL) goto out_put; err = read_name(inode, name); __putname(name); if (err == -ENOENT) { iput(inode); inode = NULL; } else if (err) goto out_put; d_add(dentry, inode); return NULL; out_put: iput(inode); out: return ERR_PTR(err); } int hostfs_link(struct dentry *to, struct inode *ino, struct dentry *from) { char *from_name, *to_name; int err; if ((from_name = dentry_name(from)) == NULL) return -ENOMEM; to_name = dentry_name(to); if (to_name == NULL) { __putname(from_name); return -ENOMEM; } err = link_file(to_name, from_name); __putname(from_name); __putname(to_name); return err; } int hostfs_unlink(struct inode *ino, struct dentry *dentry) { char *file; int err; if (append) return -EPERM; if ((file = dentry_name(dentry)) == NULL) return -ENOMEM; err = unlink_file(file); __putname(file); return err; } int hostfs_symlink(struct inode *ino, struct dentry *dentry, const char *to) { char *file; int err; if ((file = dentry_name(dentry)) == NULL) return -ENOMEM; err = make_symlink(file, to); __putname(file); return err; } int hostfs_mkdir(struct inode *ino, struct dentry *dentry, umode_t mode) { char *file; int err; if ((file = dentry_name(dentry)) == NULL) return -ENOMEM; err = do_mkdir(file, mode); __putname(file); return err; } int hostfs_rmdir(struct inode *ino, struct dentry *dentry) { char *file; int err; if ((file = dentry_name(dentry)) == NULL) return -ENOMEM; err = do_rmdir(file); __putname(file); return err; } static int hostfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev) { struct inode *inode; char *name; int err; inode = hostfs_iget(dir->i_sb); if (IS_ERR(inode)) { err = PTR_ERR(inode); goto out; } err = -ENOMEM; name = dentry_name(dentry); if (name == NULL) goto out_put; init_special_inode(inode, mode, dev); err = do_mknod(name, mode, MAJOR(dev), MINOR(dev)); if (!err) goto out_free; err = read_name(inode, name); __putname(name); if (err) goto out_put; if (err) goto out_put; d_instantiate(dentry, inode); return 0; out_free: __putname(name); out_put: iput(inode); out: return err; } int hostfs_rename(struct inode *from_ino, struct dentry *from, struct inode *to_ino, struct dentry *to) { char *from_name, *to_name; int err; if ((from_name = dentry_name(from)) == NULL) return -ENOMEM; if ((to_name = dentry_name(to)) == NULL) { __putname(from_name); return -ENOMEM; } err = rename_file(from_name, to_name); __putname(from_name); __putname(to_name); return err; } int hostfs_permission(struct inode *ino, int desired) { char *name; int r = 0, w = 0, x = 0, err; if (desired & MAY_NOT_BLOCK) return -ECHILD; if (desired & MAY_READ) r = 1; if (desired & MAY_WRITE) w = 1; if (desired & MAY_EXEC) x = 1; name = inode_name(ino); if (name == NULL) return -ENOMEM; if (S_ISCHR(ino->i_mode) || S_ISBLK(ino->i_mode) || S_ISFIFO(ino->i_mode) || S_ISSOCK(ino->i_mode)) err = 0; else err = access_file(name, r, w, x); __putname(name); if (!err) err = generic_permission(ino, desired); return err; } int hostfs_setattr(struct dentry *dentry, struct iattr *attr) { struct inode *inode = dentry->d_inode; struct hostfs_iattr attrs; char *name; int err; int fd = HOSTFS_I(inode)->fd; err = inode_change_ok(inode, attr); if (err) return err; if (append) attr->ia_valid &= ~ATTR_SIZE; attrs.ia_valid = 0; if (attr->ia_valid & ATTR_MODE) { attrs.ia_valid |= HOSTFS_ATTR_MODE; attrs.ia_mode = attr->ia_mode; } if (attr->ia_valid & ATTR_UID) { attrs.ia_valid |= HOSTFS_ATTR_UID; attrs.ia_uid = attr->ia_uid; } if (attr->ia_valid & ATTR_GID) { attrs.ia_valid |= HOSTFS_ATTR_GID; attrs.ia_gid = attr->ia_gid; } if (attr->ia_valid & ATTR_SIZE) { attrs.ia_valid |= HOSTFS_ATTR_SIZE; attrs.ia_size = attr->ia_size; } if (attr->ia_valid & ATTR_ATIME) { attrs.ia_valid |= HOSTFS_ATTR_ATIME; attrs.ia_atime = attr->ia_atime; } if (attr->ia_valid & ATTR_MTIME) { attrs.ia_valid |= HOSTFS_ATTR_MTIME; attrs.ia_mtime = attr->ia_mtime; } if (attr->ia_valid & ATTR_CTIME) { attrs.ia_valid |= HOSTFS_ATTR_CTIME; attrs.ia_ctime = attr->ia_ctime; } if (attr->ia_valid & ATTR_ATIME_SET) { attrs.ia_valid |= HOSTFS_ATTR_ATIME_SET; } if (attr->ia_valid & ATTR_MTIME_SET) { attrs.ia_valid |= HOSTFS_ATTR_MTIME_SET; } name = dentry_name(dentry); if (name == NULL) return -ENOMEM; err = set_attr(name, &attrs, fd); __putname(name); if (err) return err; if ((attr->ia_valid & ATTR_SIZE) && attr->ia_size != i_size_read(inode)) { int error; error = vmtruncate(inode, attr->ia_size); if (err) return err; } setattr_copy(inode, attr); mark_inode_dirty(inode); return 0; } static const struct inode_operations hostfs_iops = { .create = hostfs_create, .link = hostfs_link, .unlink = hostfs_unlink, .symlink = hostfs_symlink, .mkdir = hostfs_mkdir, .rmdir = hostfs_rmdir, .mknod = hostfs_mknod, .rename = hostfs_rename, .permission = hostfs_permission, .setattr = hostfs_setattr, }; static const struct inode_operations hostfs_dir_iops = { .create = hostfs_create, .lookup = hostfs_lookup, .link = hostfs_link, .unlink = hostfs_unlink, .symlink = hostfs_symlink, .mkdir = hostfs_mkdir, .rmdir = hostfs_rmdir, .mknod = hostfs_mknod, .rename = hostfs_rename, .permission = hostfs_permission, .setattr = hostfs_setattr, }; static void *hostfs_follow_link(struct dentry *dentry, struct nameidata *nd) { char *link = __getname(); if (link) { char *path = dentry_name(dentry); int err = -ENOMEM; if (path) { err = hostfs_do_readlink(path, link, PATH_MAX); if (err == PATH_MAX) err = -E2BIG; __putname(path); } if (err < 0) { __putname(link); link = ERR_PTR(err); } } else { link = ERR_PTR(-ENOMEM); } nd_set_link(nd, link); return NULL; } static void hostfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie) { char *s = nd_get_link(nd); if (!IS_ERR(s)) __putname(s); } static const struct inode_operations hostfs_link_iops = { .readlink = generic_readlink, .follow_link = hostfs_follow_link, .put_link = hostfs_put_link, }; static int hostfs_fill_sb_common(struct super_block *sb, void *d, int silent) { struct inode *root_inode; char *host_root_path, *req_root = d; int err; sb->s_blocksize = 1024; sb->s_blocksize_bits = 10; sb->s_magic = HOSTFS_SUPER_MAGIC; sb->s_op = &hostfs_sbops; sb->s_d_op = &hostfs_dentry_ops; sb->s_maxbytes = MAX_LFS_FILESIZE; /* NULL is printed as <NULL> by sprintf: avoid that. */ if (req_root == NULL) req_root = ""; err = -ENOMEM; sb->s_fs_info = host_root_path = kmalloc(strlen(root_ino) + strlen(req_root) + 2, GFP_KERNEL); if (host_root_path == NULL) goto out; sprintf(host_root_path, "%s/%s", root_ino, req_root); root_inode = new_inode(sb); if (!root_inode) goto out; err = read_name(root_inode, host_root_path); if (err) goto out_put; if (S_ISLNK(root_inode->i_mode)) { char *name = follow_link(host_root_path); if (IS_ERR(name)) err = PTR_ERR(name); else err = read_name(root_inode, name); kfree(name); if (err) goto out_put; } err = -ENOMEM; sb->s_root = d_make_root(root_inode); if (sb->s_root == NULL) goto out; return 0; out_put: iput(root_inode); out: return err; } static struct dentry *hostfs_read_sb(struct file_system_type *type, int flags, const char *dev_name, void *data) { return mount_nodev(type, flags, data, hostfs_fill_sb_common); } static void hostfs_kill_sb(struct super_block *s) { kill_anon_super(s); kfree(s->s_fs_info); } static struct file_system_type hostfs_type = { .owner = THIS_MODULE, .name = "hostfs", .mount = hostfs_read_sb, .kill_sb = hostfs_kill_sb, .fs_flags = 0, }; static int __init init_hostfs(void) { return register_filesystem(&hostfs_type); } static void __exit exit_hostfs(void) { unregister_filesystem(&hostfs_type); } module_init(init_hostfs) module_exit(exit_hostfs) MODULE_LICENSE("GPL");
gpl-2.0
IOKP-kitkat/kernel_samsung_jf
arch/x86/kernel/acpi/sleep.c
4670
3855
/* * sleep.c - x86-specific ACPI sleep support. * * Copyright (C) 2001-2003 Patrick Mochel * Copyright (C) 2001-2003 Pavel Machek <pavel@ucw.cz> */ #include <linux/acpi.h> #include <linux/bootmem.h> #include <linux/memblock.h> #include <linux/dmi.h> #include <linux/cpumask.h> #include <asm/segment.h> #include <asm/desc.h> #include <asm/pgtable.h> #include <asm/cacheflush.h> #include "realmode/wakeup.h" #include "sleep.h" unsigned long acpi_realmode_flags; #if defined(CONFIG_SMP) && defined(CONFIG_64BIT) static char temp_stack[4096]; #endif asmlinkage void acpi_enter_s3(void) { acpi_enter_sleep_state(3, wake_sleep_flags); } /** * acpi_suspend_lowlevel - save kernel state * * Create an identity mapped page table and copy the wakeup routine to * low memory. */ int acpi_suspend_lowlevel(void) { struct wakeup_header *header; /* address in low memory of the wakeup routine. */ char *acpi_realmode; acpi_realmode = TRAMPOLINE_SYM(acpi_wakeup_code); header = (struct wakeup_header *)(acpi_realmode + WAKEUP_HEADER_OFFSET); if (header->signature != WAKEUP_HEADER_SIGNATURE) { printk(KERN_ERR "wakeup header does not match\n"); return -EINVAL; } header->video_mode = saved_video_mode; header->wakeup_jmp_seg = acpi_wakeup_address >> 4; /* * Set up the wakeup GDT. We set these up as Big Real Mode, * that is, with limits set to 4 GB. At least the Lenovo * Thinkpad X61 is known to need this for the video BIOS * initialization quirk to work; this is likely to also * be the case for other laptops or integrated video devices. */ /* GDT[0]: GDT self-pointer */ header->wakeup_gdt[0] = (u64)(sizeof(header->wakeup_gdt) - 1) + ((u64)__pa(&header->wakeup_gdt) << 16); /* GDT[1]: big real mode-like code segment */ header->wakeup_gdt[1] = GDT_ENTRY(0x809b, acpi_wakeup_address, 0xfffff); /* GDT[2]: big real mode-like data segment */ header->wakeup_gdt[2] = GDT_ENTRY(0x8093, acpi_wakeup_address, 0xfffff); #ifndef CONFIG_64BIT store_gdt((struct desc_ptr *)&header->pmode_gdt); if (rdmsr_safe(MSR_EFER, &header->pmode_efer_low, &header->pmode_efer_high)) header->pmode_efer_low = header->pmode_efer_high = 0; #endif /* !CONFIG_64BIT */ header->pmode_cr0 = read_cr0(); header->pmode_cr4 = read_cr4_safe(); header->pmode_behavior = 0; if (!rdmsr_safe(MSR_IA32_MISC_ENABLE, &header->pmode_misc_en_low, &header->pmode_misc_en_high)) header->pmode_behavior |= (1 << WAKEUP_BEHAVIOR_RESTORE_MISC_ENABLE); header->realmode_flags = acpi_realmode_flags; header->real_magic = 0x12345678; #ifndef CONFIG_64BIT header->pmode_entry = (u32)&wakeup_pmode_return; header->pmode_cr3 = (u32)__pa(&initial_page_table); saved_magic = 0x12345678; #else /* CONFIG_64BIT */ header->trampoline_segment = trampoline_address() >> 4; #ifdef CONFIG_SMP stack_start = (unsigned long)temp_stack + sizeof(temp_stack); early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(smp_processor_id()); initial_gs = per_cpu_offset(smp_processor_id()); #endif initial_code = (unsigned long)wakeup_long64; saved_magic = 0x123456789abcdef0L; #endif /* CONFIG_64BIT */ do_suspend_lowlevel(); return 0; } static int __init acpi_sleep_setup(char *str) { while ((str != NULL) && (*str != '\0')) { if (strncmp(str, "s3_bios", 7) == 0) acpi_realmode_flags |= 1; if (strncmp(str, "s3_mode", 7) == 0) acpi_realmode_flags |= 2; if (strncmp(str, "s3_beep", 7) == 0) acpi_realmode_flags |= 4; #ifdef CONFIG_HIBERNATION if (strncmp(str, "s4_nohwsig", 10) == 0) acpi_no_s4_hw_signature(); #endif if (strncmp(str, "nonvs", 5) == 0) acpi_nvs_nosave(); if (strncmp(str, "old_ordering", 12) == 0) acpi_old_suspend_ordering(); str = strchr(str, ','); if (str != NULL) str += strspn(str, ", \t"); } return 1; } __setup("acpi_sleep=", acpi_sleep_setup);
gpl-2.0
nunogia/Z7Max_NX505J_H129_kernel
drivers/net/ppp/ppp_deflate.c
6974
18298
/* * ppp_deflate.c - interface the zlib procedures for Deflate compression * and decompression (as used by gzip) to the PPP code. * * Copyright 1994-1998 Paul Mackerras. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/init.h> #include <linux/string.h> #include <linux/ppp_defs.h> #include <linux/ppp-comp.h> #include <linux/zlib.h> #include <asm/unaligned.h> /* * State for a Deflate (de)compressor. */ struct ppp_deflate_state { int seqno; int w_size; int unit; int mru; int debug; z_stream strm; struct compstat stats; }; #define DEFLATE_OVHD 2 /* Deflate overhead/packet */ static void *z_comp_alloc(unsigned char *options, int opt_len); static void *z_decomp_alloc(unsigned char *options, int opt_len); static void z_comp_free(void *state); static void z_decomp_free(void *state); static int z_comp_init(void *state, unsigned char *options, int opt_len, int unit, int hdrlen, int debug); static int z_decomp_init(void *state, unsigned char *options, int opt_len, int unit, int hdrlen, int mru, int debug); static int z_compress(void *state, unsigned char *rptr, unsigned char *obuf, int isize, int osize); static void z_incomp(void *state, unsigned char *ibuf, int icnt); static int z_decompress(void *state, unsigned char *ibuf, int isize, unsigned char *obuf, int osize); static void z_comp_reset(void *state); static void z_decomp_reset(void *state); static void z_comp_stats(void *state, struct compstat *stats); /** * z_comp_free - free the memory used by a compressor * @arg: pointer to the private state for the compressor. */ static void z_comp_free(void *arg) { struct ppp_deflate_state *state = (struct ppp_deflate_state *) arg; if (state) { zlib_deflateEnd(&state->strm); vfree(state->strm.workspace); kfree(state); } } /** * z_comp_alloc - allocate space for a compressor. * @options: pointer to CCP option data * @opt_len: length of the CCP option at @options. * * The @options pointer points to the a buffer containing the * CCP option data for the compression being negotiated. It is * formatted according to RFC1979, and describes the window * size that the peer is requesting that we use in compressing * data to be sent to it. * * Returns the pointer to the private state for the compressor, * or NULL if we could not allocate enough memory. */ static void *z_comp_alloc(unsigned char *options, int opt_len) { struct ppp_deflate_state *state; int w_size; if (opt_len != CILEN_DEFLATE || (options[0] != CI_DEFLATE && options[0] != CI_DEFLATE_DRAFT) || options[1] != CILEN_DEFLATE || DEFLATE_METHOD(options[2]) != DEFLATE_METHOD_VAL || options[3] != DEFLATE_CHK_SEQUENCE) return NULL; w_size = DEFLATE_SIZE(options[2]); if (w_size < DEFLATE_MIN_SIZE || w_size > DEFLATE_MAX_SIZE) return NULL; state = kzalloc(sizeof(*state), GFP_KERNEL); if (state == NULL) return NULL; state->strm.next_in = NULL; state->w_size = w_size; state->strm.workspace = vmalloc(zlib_deflate_workspacesize(-w_size, 8)); if (state->strm.workspace == NULL) goto out_free; if (zlib_deflateInit2(&state->strm, Z_DEFAULT_COMPRESSION, DEFLATE_METHOD_VAL, -w_size, 8, Z_DEFAULT_STRATEGY) != Z_OK) goto out_free; return (void *) state; out_free: z_comp_free(state); return NULL; } /** * z_comp_init - initialize a previously-allocated compressor. * @arg: pointer to the private state for the compressor * @options: pointer to the CCP option data describing the * compression that was negotiated with the peer * @opt_len: length of the CCP option data at @options * @unit: PPP unit number for diagnostic messages * @hdrlen: ignored (present for backwards compatibility) * @debug: debug flag; if non-zero, debug messages are printed. * * The CCP options described by @options must match the options * specified when the compressor was allocated. The compressor * history is reset. Returns 0 for failure (CCP options don't * match) or 1 for success. */ static int z_comp_init(void *arg, unsigned char *options, int opt_len, int unit, int hdrlen, int debug) { struct ppp_deflate_state *state = (struct ppp_deflate_state *) arg; if (opt_len < CILEN_DEFLATE || (options[0] != CI_DEFLATE && options[0] != CI_DEFLATE_DRAFT) || options[1] != CILEN_DEFLATE || DEFLATE_METHOD(options[2]) != DEFLATE_METHOD_VAL || DEFLATE_SIZE(options[2]) != state->w_size || options[3] != DEFLATE_CHK_SEQUENCE) return 0; state->seqno = 0; state->unit = unit; state->debug = debug; zlib_deflateReset(&state->strm); return 1; } /** * z_comp_reset - reset a previously-allocated compressor. * @arg: pointer to private state for the compressor. * * This clears the history for the compressor and makes it * ready to start emitting a new compressed stream. */ static void z_comp_reset(void *arg) { struct ppp_deflate_state *state = (struct ppp_deflate_state *) arg; state->seqno = 0; zlib_deflateReset(&state->strm); } /** * z_compress - compress a PPP packet with Deflate compression. * @arg: pointer to private state for the compressor * @rptr: uncompressed packet (input) * @obuf: compressed packet (output) * @isize: size of uncompressed packet * @osize: space available at @obuf * * Returns the length of the compressed packet, or 0 if the * packet is incompressible. */ static int z_compress(void *arg, unsigned char *rptr, unsigned char *obuf, int isize, int osize) { struct ppp_deflate_state *state = (struct ppp_deflate_state *) arg; int r, proto, off, olen, oavail; unsigned char *wptr; /* * Check that the protocol is in the range we handle. */ proto = PPP_PROTOCOL(rptr); if (proto > 0x3fff || proto == 0xfd || proto == 0xfb) return 0; /* Don't generate compressed packets which are larger than the uncompressed packet. */ if (osize > isize) osize = isize; wptr = obuf; /* * Copy over the PPP header and store the 2-byte sequence number. */ wptr[0] = PPP_ADDRESS(rptr); wptr[1] = PPP_CONTROL(rptr); put_unaligned_be16(PPP_COMP, wptr + 2); wptr += PPP_HDRLEN; put_unaligned_be16(state->seqno, wptr); wptr += DEFLATE_OVHD; olen = PPP_HDRLEN + DEFLATE_OVHD; state->strm.next_out = wptr; state->strm.avail_out = oavail = osize - olen; ++state->seqno; off = (proto > 0xff) ? 2 : 3; /* skip 1st proto byte if 0 */ rptr += off; state->strm.next_in = rptr; state->strm.avail_in = (isize - off); for (;;) { r = zlib_deflate(&state->strm, Z_PACKET_FLUSH); if (r != Z_OK) { if (state->debug) printk(KERN_ERR "z_compress: deflate returned %d\n", r); break; } if (state->strm.avail_out == 0) { olen += oavail; state->strm.next_out = NULL; state->strm.avail_out = oavail = 1000000; } else { break; /* all done */ } } olen += oavail - state->strm.avail_out; /* * See if we managed to reduce the size of the packet. */ if (olen < isize) { state->stats.comp_bytes += olen; state->stats.comp_packets++; } else { state->stats.inc_bytes += isize; state->stats.inc_packets++; olen = 0; } state->stats.unc_bytes += isize; state->stats.unc_packets++; return olen; } /** * z_comp_stats - return compression statistics for a compressor * or decompressor. * @arg: pointer to private space for the (de)compressor * @stats: pointer to a struct compstat to receive the result. */ static void z_comp_stats(void *arg, struct compstat *stats) { struct ppp_deflate_state *state = (struct ppp_deflate_state *) arg; *stats = state->stats; } /** * z_decomp_free - Free the memory used by a decompressor. * @arg: pointer to private space for the decompressor. */ static void z_decomp_free(void *arg) { struct ppp_deflate_state *state = (struct ppp_deflate_state *) arg; if (state) { zlib_inflateEnd(&state->strm); vfree(state->strm.workspace); kfree(state); } } /** * z_decomp_alloc - allocate space for a decompressor. * @options: pointer to CCP option data * @opt_len: length of the CCP option at @options. * * The @options pointer points to the a buffer containing the * CCP option data for the compression being negotiated. It is * formatted according to RFC1979, and describes the window * size that we are requesting the peer to use in compressing * data to be sent to us. * * Returns the pointer to the private state for the decompressor, * or NULL if we could not allocate enough memory. */ static void *z_decomp_alloc(unsigned char *options, int opt_len) { struct ppp_deflate_state *state; int w_size; if (opt_len != CILEN_DEFLATE || (options[0] != CI_DEFLATE && options[0] != CI_DEFLATE_DRAFT) || options[1] != CILEN_DEFLATE || DEFLATE_METHOD(options[2]) != DEFLATE_METHOD_VAL || options[3] != DEFLATE_CHK_SEQUENCE) return NULL; w_size = DEFLATE_SIZE(options[2]); if (w_size < DEFLATE_MIN_SIZE || w_size > DEFLATE_MAX_SIZE) return NULL; state = kzalloc(sizeof(*state), GFP_KERNEL); if (state == NULL) return NULL; state->w_size = w_size; state->strm.next_out = NULL; state->strm.workspace = vmalloc(zlib_inflate_workspacesize()); if (state->strm.workspace == NULL) goto out_free; if (zlib_inflateInit2(&state->strm, -w_size) != Z_OK) goto out_free; return (void *) state; out_free: z_decomp_free(state); return NULL; } /** * z_decomp_init - initialize a previously-allocated decompressor. * @arg: pointer to the private state for the decompressor * @options: pointer to the CCP option data describing the * compression that was negotiated with the peer * @opt_len: length of the CCP option data at @options * @unit: PPP unit number for diagnostic messages * @hdrlen: ignored (present for backwards compatibility) * @mru: maximum length of decompressed packets * @debug: debug flag; if non-zero, debug messages are printed. * * The CCP options described by @options must match the options * specified when the decompressor was allocated. The decompressor * history is reset. Returns 0 for failure (CCP options don't * match) or 1 for success. */ static int z_decomp_init(void *arg, unsigned char *options, int opt_len, int unit, int hdrlen, int mru, int debug) { struct ppp_deflate_state *state = (struct ppp_deflate_state *) arg; if (opt_len < CILEN_DEFLATE || (options[0] != CI_DEFLATE && options[0] != CI_DEFLATE_DRAFT) || options[1] != CILEN_DEFLATE || DEFLATE_METHOD(options[2]) != DEFLATE_METHOD_VAL || DEFLATE_SIZE(options[2]) != state->w_size || options[3] != DEFLATE_CHK_SEQUENCE) return 0; state->seqno = 0; state->unit = unit; state->debug = debug; state->mru = mru; zlib_inflateReset(&state->strm); return 1; } /** * z_decomp_reset - reset a previously-allocated decompressor. * @arg: pointer to private state for the decompressor. * * This clears the history for the decompressor and makes it * ready to receive a new compressed stream. */ static void z_decomp_reset(void *arg) { struct ppp_deflate_state *state = (struct ppp_deflate_state *) arg; state->seqno = 0; zlib_inflateReset(&state->strm); } /** * z_decompress - decompress a Deflate-compressed packet. * @arg: pointer to private state for the decompressor * @ibuf: pointer to input (compressed) packet data * @isize: length of input packet * @obuf: pointer to space for output (decompressed) packet * @osize: amount of space available at @obuf * * Because of patent problems, we return DECOMP_ERROR for errors * found by inspecting the input data and for system problems, but * DECOMP_FATALERROR for any errors which could possibly be said to * be being detected "after" decompression. For DECOMP_ERROR, * we can issue a CCP reset-request; for DECOMP_FATALERROR, we may be * infringing a patent of Motorola's if we do, so we take CCP down * instead. * * Given that the frame has the correct sequence number and a good FCS, * errors such as invalid codes in the input most likely indicate a * bug, so we return DECOMP_FATALERROR for them in order to turn off * compression, even though they are detected by inspecting the input. */ static int z_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf, int osize) { struct ppp_deflate_state *state = (struct ppp_deflate_state *) arg; int olen, seq, r; int decode_proto, overflow; unsigned char overflow_buf[1]; if (isize <= PPP_HDRLEN + DEFLATE_OVHD) { if (state->debug) printk(KERN_DEBUG "z_decompress%d: short pkt (%d)\n", state->unit, isize); return DECOMP_ERROR; } /* Check the sequence number. */ seq = get_unaligned_be16(ibuf + PPP_HDRLEN); if (seq != (state->seqno & 0xffff)) { if (state->debug) printk(KERN_DEBUG "z_decompress%d: bad seq # %d, expected %d\n", state->unit, seq, state->seqno & 0xffff); return DECOMP_ERROR; } ++state->seqno; /* * Fill in the first part of the PPP header. The protocol field * comes from the decompressed data. */ obuf[0] = PPP_ADDRESS(ibuf); obuf[1] = PPP_CONTROL(ibuf); obuf[2] = 0; /* * Set up to call inflate. We set avail_out to 1 initially so we can * look at the first byte of the output and decide whether we have * a 1-byte or 2-byte protocol field. */ state->strm.next_in = ibuf + PPP_HDRLEN + DEFLATE_OVHD; state->strm.avail_in = isize - (PPP_HDRLEN + DEFLATE_OVHD); state->strm.next_out = obuf + 3; state->strm.avail_out = 1; decode_proto = 1; overflow = 0; /* * Call inflate, supplying more input or output as needed. */ for (;;) { r = zlib_inflate(&state->strm, Z_PACKET_FLUSH); if (r != Z_OK) { if (state->debug) printk(KERN_DEBUG "z_decompress%d: inflate returned %d (%s)\n", state->unit, r, (state->strm.msg? state->strm.msg: "")); return DECOMP_FATALERROR; } if (state->strm.avail_out != 0) break; /* all done */ if (decode_proto) { state->strm.avail_out = osize - PPP_HDRLEN; if ((obuf[3] & 1) == 0) { /* 2-byte protocol field */ obuf[2] = obuf[3]; --state->strm.next_out; ++state->strm.avail_out; } decode_proto = 0; } else if (!overflow) { /* * We've filled up the output buffer; the only way to * find out whether inflate has any more characters * left is to give it another byte of output space. */ state->strm.next_out = overflow_buf; state->strm.avail_out = 1; overflow = 1; } else { if (state->debug) printk(KERN_DEBUG "z_decompress%d: ran out of mru\n", state->unit); return DECOMP_FATALERROR; } } if (decode_proto) { if (state->debug) printk(KERN_DEBUG "z_decompress%d: didn't get proto\n", state->unit); return DECOMP_ERROR; } olen = osize + overflow - state->strm.avail_out; state->stats.unc_bytes += olen; state->stats.unc_packets++; state->stats.comp_bytes += isize; state->stats.comp_packets++; return olen; } /** * z_incomp - add incompressible input data to the history. * @arg: pointer to private state for the decompressor * @ibuf: pointer to input packet data * @icnt: length of input data. */ static void z_incomp(void *arg, unsigned char *ibuf, int icnt) { struct ppp_deflate_state *state = (struct ppp_deflate_state *) arg; int proto, r; /* * Check that the protocol is one we handle. */ proto = PPP_PROTOCOL(ibuf); if (proto > 0x3fff || proto == 0xfd || proto == 0xfb) return; ++state->seqno; /* * We start at the either the 1st or 2nd byte of the protocol field, * depending on whether the protocol value is compressible. */ state->strm.next_in = ibuf + 3; state->strm.avail_in = icnt - 3; if (proto > 0xff) { --state->strm.next_in; ++state->strm.avail_in; } r = zlib_inflateIncomp(&state->strm); if (r != Z_OK) { /* gak! */ if (state->debug) { printk(KERN_DEBUG "z_incomp%d: inflateIncomp returned %d (%s)\n", state->unit, r, (state->strm.msg? state->strm.msg: "")); } return; } /* * Update stats. */ state->stats.inc_bytes += icnt; state->stats.inc_packets++; state->stats.unc_bytes += icnt; state->stats.unc_packets++; } /************************************************************* * Module interface table *************************************************************/ /* These are in ppp_generic.c */ extern int ppp_register_compressor (struct compressor *cp); extern void ppp_unregister_compressor (struct compressor *cp); /* * Procedures exported to if_ppp.c. */ static struct compressor ppp_deflate = { .compress_proto = CI_DEFLATE, .comp_alloc = z_comp_alloc, .comp_free = z_comp_free, .comp_init = z_comp_init, .comp_reset = z_comp_reset, .compress = z_compress, .comp_stat = z_comp_stats, .decomp_alloc = z_decomp_alloc, .decomp_free = z_decomp_free, .decomp_init = z_decomp_init, .decomp_reset = z_decomp_reset, .decompress = z_decompress, .incomp = z_incomp, .decomp_stat = z_comp_stats, .owner = THIS_MODULE }; static struct compressor ppp_deflate_draft = { .compress_proto = CI_DEFLATE_DRAFT, .comp_alloc = z_comp_alloc, .comp_free = z_comp_free, .comp_init = z_comp_init, .comp_reset = z_comp_reset, .compress = z_compress, .comp_stat = z_comp_stats, .decomp_alloc = z_decomp_alloc, .decomp_free = z_decomp_free, .decomp_init = z_decomp_init, .decomp_reset = z_decomp_reset, .decompress = z_decompress, .incomp = z_incomp, .decomp_stat = z_comp_stats, .owner = THIS_MODULE }; static int __init deflate_init(void) { int answer = ppp_register_compressor(&ppp_deflate); if (answer == 0) printk(KERN_INFO "PPP Deflate Compression module registered\n"); ppp_register_compressor(&ppp_deflate_draft); return answer; } static void __exit deflate_cleanup(void) { ppp_unregister_compressor(&ppp_deflate); ppp_unregister_compressor(&ppp_deflate_draft); } module_init(deflate_init); module_exit(deflate_cleanup); MODULE_LICENSE("Dual BSD/GPL"); MODULE_ALIAS("ppp-compress-" __stringify(CI_DEFLATE)); MODULE_ALIAS("ppp-compress-" __stringify(CI_DEFLATE_DRAFT));
gpl-2.0
redglasses/linux-yocto-3.10
arch/sh/boot/compressed/misc.c
11326
2699
/* * arch/sh/boot/compressed/misc.c * * This is a collection of several routines from gzip-1.0.3 * adapted for Linux. * * malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994 * * Adapted for SH by Stuart Menefy, Aug 1999 * * Modified to use standard LinuxSH BIOS by Greg Banks 7Jul2000 */ #include <asm/uaccess.h> #include <asm/addrspace.h> #include <asm/page.h> /* * gzip declarations */ #define STATIC static #undef memset #undef memcpy #define memzero(s, n) memset ((s), 0, (n)) /* cache.c */ #define CACHE_ENABLE 0 #define CACHE_DISABLE 1 int cache_control(unsigned int command); extern char input_data[]; extern int input_len; static unsigned char *output; static void error(char *m); int puts(const char *); extern int _text; /* Defined in vmlinux.lds.S */ extern int _end; static unsigned long free_mem_ptr; static unsigned long free_mem_end_ptr; #ifdef CONFIG_HAVE_KERNEL_BZIP2 #define HEAP_SIZE 0x400000 #else #define HEAP_SIZE 0x10000 #endif #ifdef CONFIG_KERNEL_GZIP #include "../../../../lib/decompress_inflate.c" #endif #ifdef CONFIG_KERNEL_BZIP2 #include "../../../../lib/decompress_bunzip2.c" #endif #ifdef CONFIG_KERNEL_LZMA #include "../../../../lib/decompress_unlzma.c" #endif #ifdef CONFIG_KERNEL_XZ #include "../../../../lib/decompress_unxz.c" #endif #ifdef CONFIG_KERNEL_LZO #include "../../../../lib/decompress_unlzo.c" #endif int puts(const char *s) { /* This should be updated to use the sh-sci routines */ return 0; } void* memset(void* s, int c, size_t n) { int i; char *ss = (char*)s; for (i=0;i<n;i++) ss[i] = c; return s; } void* memcpy(void* __dest, __const void* __src, size_t __n) { int i; char *d = (char *)__dest, *s = (char *)__src; for (i=0;i<__n;i++) d[i] = s[i]; return __dest; } static void error(char *x) { puts("\n\n"); puts(x); puts("\n\n -- System halted"); while(1); /* Halt */ } #ifdef CONFIG_SUPERH64 #define stackalign 8 #else #define stackalign 4 #endif #define STACK_SIZE (4096) long __attribute__ ((aligned(stackalign))) user_stack[STACK_SIZE]; long *stack_start = &user_stack[STACK_SIZE]; void decompress_kernel(void) { unsigned long output_addr; #ifdef CONFIG_SUPERH64 output_addr = (CONFIG_MEMORY_START + 0x2000); #else output_addr = __pa((unsigned long)&_text+PAGE_SIZE); #if defined(CONFIG_29BIT) output_addr |= P2SEG; #endif #endif output = (unsigned char *)output_addr; free_mem_ptr = (unsigned long)&_end; free_mem_end_ptr = free_mem_ptr + HEAP_SIZE; puts("Uncompressing Linux... "); cache_control(CACHE_ENABLE); decompress(input_data, input_len, NULL, NULL, output, NULL, error); cache_control(CACHE_DISABLE); puts("Ok, booting the kernel.\n"); }
gpl-2.0
Krabappel2548/android_kernel_sony_msm8974-kexec
fs/quota/compat.c
13886
3302
#include <linux/syscalls.h> #include <linux/compat.h> #include <linux/quotaops.h> /* * This code works only for 32 bit quota tools over 64 bit OS (x86_64, ia64) * and is necessary due to alignment problems. */ struct compat_if_dqblk { compat_u64 dqb_bhardlimit; compat_u64 dqb_bsoftlimit; compat_u64 dqb_curspace; compat_u64 dqb_ihardlimit; compat_u64 dqb_isoftlimit; compat_u64 dqb_curinodes; compat_u64 dqb_btime; compat_u64 dqb_itime; compat_uint_t dqb_valid; }; /* XFS structures */ struct compat_fs_qfilestat { compat_u64 dqb_bhardlimit; compat_u64 qfs_nblks; compat_uint_t qfs_nextents; }; struct compat_fs_quota_stat { __s8 qs_version; __u16 qs_flags; __s8 qs_pad; struct compat_fs_qfilestat qs_uquota; struct compat_fs_qfilestat qs_gquota; compat_uint_t qs_incoredqs; compat_int_t qs_btimelimit; compat_int_t qs_itimelimit; compat_int_t qs_rtbtimelimit; __u16 qs_bwarnlimit; __u16 qs_iwarnlimit; }; asmlinkage long sys32_quotactl(unsigned int cmd, const char __user *special, qid_t id, void __user *addr) { unsigned int cmds; struct if_dqblk __user *dqblk; struct compat_if_dqblk __user *compat_dqblk; struct fs_quota_stat __user *fsqstat; struct compat_fs_quota_stat __user *compat_fsqstat; compat_uint_t data; u16 xdata; long ret; cmds = cmd >> SUBCMDSHIFT; switch (cmds) { case Q_GETQUOTA: dqblk = compat_alloc_user_space(sizeof(struct if_dqblk)); compat_dqblk = addr; ret = sys_quotactl(cmd, special, id, dqblk); if (ret) break; if (copy_in_user(compat_dqblk, dqblk, sizeof(*compat_dqblk)) || get_user(data, &dqblk->dqb_valid) || put_user(data, &compat_dqblk->dqb_valid)) ret = -EFAULT; break; case Q_SETQUOTA: dqblk = compat_alloc_user_space(sizeof(struct if_dqblk)); compat_dqblk = addr; ret = -EFAULT; if (copy_in_user(dqblk, compat_dqblk, sizeof(*compat_dqblk)) || get_user(data, &compat_dqblk->dqb_valid) || put_user(data, &dqblk->dqb_valid)) break; ret = sys_quotactl(cmd, special, id, dqblk); break; case Q_XGETQSTAT: fsqstat = compat_alloc_user_space(sizeof(struct fs_quota_stat)); compat_fsqstat = addr; ret = sys_quotactl(cmd, special, id, fsqstat); if (ret) break; ret = -EFAULT; /* Copying qs_version, qs_flags, qs_pad */ if (copy_in_user(compat_fsqstat, fsqstat, offsetof(struct compat_fs_quota_stat, qs_uquota))) break; /* Copying qs_uquota */ if (copy_in_user(&compat_fsqstat->qs_uquota, &fsqstat->qs_uquota, sizeof(compat_fsqstat->qs_uquota)) || get_user(data, &fsqstat->qs_uquota.qfs_nextents) || put_user(data, &compat_fsqstat->qs_uquota.qfs_nextents)) break; /* Copying qs_gquota */ if (copy_in_user(&compat_fsqstat->qs_gquota, &fsqstat->qs_gquota, sizeof(compat_fsqstat->qs_gquota)) || get_user(data, &fsqstat->qs_gquota.qfs_nextents) || put_user(data, &compat_fsqstat->qs_gquota.qfs_nextents)) break; /* Copying the rest */ if (copy_in_user(&compat_fsqstat->qs_incoredqs, &fsqstat->qs_incoredqs, sizeof(struct compat_fs_quota_stat) - offsetof(struct compat_fs_quota_stat, qs_incoredqs)) || get_user(xdata, &fsqstat->qs_iwarnlimit) || put_user(xdata, &compat_fsqstat->qs_iwarnlimit)) break; ret = 0; break; default: ret = sys_quotactl(cmd, special, id, addr); } return ret; }
gpl-2.0
VincentS/glibc
sysdeps/ieee754/ldbl-96/s_sinl.c
63
2321
/* s_sinl.c -- long double version of s_sin.c. * Conversion to long double by Ulrich Drepper, * Cygnus Support, drepper@cygnus.com. */ /* * ==================================================== * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved. * * Developed at SunPro, a Sun Microsystems, Inc. business. * Permission to use, copy, modify, and distribute this * software is freely granted, provided that this notice * is preserved. * ==================================================== */ #if defined(LIBM_SCCS) && !defined(lint) static char rcsid[] = "$NetBSD: $"; #endif /* sinl(x) * Return sine function of x. * * kernel function: * __kernel_sinl ... sine function on [-pi/4,pi/4] * __kernel_cosl ... cose function on [-pi/4,pi/4] * __ieee754_rem_pio2l ... argument reduction routine * * Method. * Let S,C and T denote the sin, cos and tan respectively on * [-PI/4, +PI/4]. Reduce the argument x to y1+y2 = x-k*pi/2 * in [-pi/4 , +pi/4], and let n = k mod 4. * We have * * n sin(x) cos(x) tan(x) * ---------------------------------------------------------- * 0 S C T * 1 C -S -1/T * 2 -S -C T * 3 -C S -1/T * ---------------------------------------------------------- * * Special cases: * Let trig be any of sin, cos, or tan. * trig(+-INF) is NaN, with signals; * trig(NaN) is that NaN; * * Accuracy: * TRIG(x) returns trig(x) nearly rounded */ #include <errno.h> #include <math.h> #include <math_private.h> long double __sinl(long double x) { long double y[2],z=0.0; int32_t n, se, i0, i1; /* High word of x. */ GET_LDOUBLE_WORDS(se,i0,i1,x); /* |x| ~< pi/4 */ se &= 0x7fff; if(se < 0x3ffe || (se == 0x3ffe && i0 <= 0xc90fdaa2)) return __kernel_sinl(x,z,0); /* sin(Inf or NaN) is NaN */ else if (se==0x7fff) { if (i1 == 0 && i0 == 0x80000000) __set_errno (EDOM); return x-x; } /* argument reduction needed */ else { n = __ieee754_rem_pio2l(x,y); switch(n&3) { case 0: return __kernel_sinl(y[0],y[1],1); case 1: return __kernel_cosl(y[0],y[1]); case 2: return -__kernel_sinl(y[0],y[1],1); default: return -__kernel_cosl(y[0],y[1]); } } } weak_alias (__sinl, sinl)
gpl-2.0
scriptZilla/linux
drivers/pinctrl/nomadik/pinctrl-nomadik.c
63
55553
/* * Generic GPIO driver for logic cells found in the Nomadik SoC * * Copyright (C) 2008,2009 STMicroelectronics * Copyright (C) 2009 Alessandro Rubini <rubini@unipv.it> * Rewritten based on work by Prafulla WADASKAR <prafulla.wadaskar@st.com> * Copyright (C) 2011-2013 Linus Walleij <linus.walleij@linaro.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/device.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/gpio.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/slab.h> #include <linux/of_device.h> #include <linux/of_address.h> #include <linux/pinctrl/machine.h> #include <linux/pinctrl/pinctrl.h> #include <linux/pinctrl/pinmux.h> #include <linux/pinctrl/pinconf.h> /* Since we request GPIOs from ourself */ #include <linux/pinctrl/consumer.h> #include "pinctrl-nomadik.h" #include "../core.h" #include "../pinctrl-utils.h" /* * The GPIO module in the Nomadik family of Systems-on-Chip is an * AMBA device, managing 32 pins and alternate functions. The logic block * is currently used in the Nomadik and ux500. * * Symbols in this file are called "nmk_gpio" for "nomadik gpio" */ /* * pin configurations are represented by 32-bit integers: * * bit 0.. 8 - Pin Number (512 Pins Maximum) * bit 9..10 - Alternate Function Selection * bit 11..12 - Pull up/down state * bit 13 - Sleep mode behaviour * bit 14 - Direction * bit 15 - Value (if output) * bit 16..18 - SLPM pull up/down state * bit 19..20 - SLPM direction * bit 21..22 - SLPM Value (if output) * bit 23..25 - PDIS value (if input) * bit 26 - Gpio mode * bit 27 - Sleep mode * * to facilitate the definition, the following macros are provided * * PIN_CFG_DEFAULT - default config (0): * pull up/down = disabled * sleep mode = input/wakeup * direction = input * value = low * SLPM direction = same as normal * SLPM pull = same as normal * SLPM value = same as normal * * PIN_CFG - default config with alternate function */ typedef unsigned long pin_cfg_t; #define PIN_NUM_MASK 0x1ff #define PIN_NUM(x) ((x) & PIN_NUM_MASK) #define PIN_ALT_SHIFT 9 #define PIN_ALT_MASK (0x3 << PIN_ALT_SHIFT) #define PIN_ALT(x) (((x) & PIN_ALT_MASK) >> PIN_ALT_SHIFT) #define PIN_GPIO (NMK_GPIO_ALT_GPIO << PIN_ALT_SHIFT) #define PIN_ALT_A (NMK_GPIO_ALT_A << PIN_ALT_SHIFT) #define PIN_ALT_B (NMK_GPIO_ALT_B << PIN_ALT_SHIFT) #define PIN_ALT_C (NMK_GPIO_ALT_C << PIN_ALT_SHIFT) #define PIN_PULL_SHIFT 11 #define PIN_PULL_MASK (0x3 << PIN_PULL_SHIFT) #define PIN_PULL(x) (((x) & PIN_PULL_MASK) >> PIN_PULL_SHIFT) #define PIN_PULL_NONE (NMK_GPIO_PULL_NONE << PIN_PULL_SHIFT) #define PIN_PULL_UP (NMK_GPIO_PULL_UP << PIN_PULL_SHIFT) #define PIN_PULL_DOWN (NMK_GPIO_PULL_DOWN << PIN_PULL_SHIFT) #define PIN_SLPM_SHIFT 13 #define PIN_SLPM_MASK (0x1 << PIN_SLPM_SHIFT) #define PIN_SLPM(x) (((x) & PIN_SLPM_MASK) >> PIN_SLPM_SHIFT) #define PIN_SLPM_MAKE_INPUT (NMK_GPIO_SLPM_INPUT << PIN_SLPM_SHIFT) #define PIN_SLPM_NOCHANGE (NMK_GPIO_SLPM_NOCHANGE << PIN_SLPM_SHIFT) /* These two replace the above in DB8500v2+ */ #define PIN_SLPM_WAKEUP_ENABLE (NMK_GPIO_SLPM_WAKEUP_ENABLE << PIN_SLPM_SHIFT) #define PIN_SLPM_WAKEUP_DISABLE (NMK_GPIO_SLPM_WAKEUP_DISABLE << PIN_SLPM_SHIFT) #define PIN_SLPM_USE_MUX_SETTINGS_IN_SLEEP PIN_SLPM_WAKEUP_DISABLE #define PIN_SLPM_GPIO PIN_SLPM_WAKEUP_ENABLE /* In SLPM, pin is a gpio */ #define PIN_SLPM_ALTFUNC PIN_SLPM_WAKEUP_DISABLE /* In SLPM, pin is altfunc */ #define PIN_DIR_SHIFT 14 #define PIN_DIR_MASK (0x1 << PIN_DIR_SHIFT) #define PIN_DIR(x) (((x) & PIN_DIR_MASK) >> PIN_DIR_SHIFT) #define PIN_DIR_INPUT (0 << PIN_DIR_SHIFT) #define PIN_DIR_OUTPUT (1 << PIN_DIR_SHIFT) #define PIN_VAL_SHIFT 15 #define PIN_VAL_MASK (0x1 << PIN_VAL_SHIFT) #define PIN_VAL(x) (((x) & PIN_VAL_MASK) >> PIN_VAL_SHIFT) #define PIN_VAL_LOW (0 << PIN_VAL_SHIFT) #define PIN_VAL_HIGH (1 << PIN_VAL_SHIFT) #define PIN_SLPM_PULL_SHIFT 16 #define PIN_SLPM_PULL_MASK (0x7 << PIN_SLPM_PULL_SHIFT) #define PIN_SLPM_PULL(x) \ (((x) & PIN_SLPM_PULL_MASK) >> PIN_SLPM_PULL_SHIFT) #define PIN_SLPM_PULL_NONE \ ((1 + NMK_GPIO_PULL_NONE) << PIN_SLPM_PULL_SHIFT) #define PIN_SLPM_PULL_UP \ ((1 + NMK_GPIO_PULL_UP) << PIN_SLPM_PULL_SHIFT) #define PIN_SLPM_PULL_DOWN \ ((1 + NMK_GPIO_PULL_DOWN) << PIN_SLPM_PULL_SHIFT) #define PIN_SLPM_DIR_SHIFT 19 #define PIN_SLPM_DIR_MASK (0x3 << PIN_SLPM_DIR_SHIFT) #define PIN_SLPM_DIR(x) \ (((x) & PIN_SLPM_DIR_MASK) >> PIN_SLPM_DIR_SHIFT) #define PIN_SLPM_DIR_INPUT ((1 + 0) << PIN_SLPM_DIR_SHIFT) #define PIN_SLPM_DIR_OUTPUT ((1 + 1) << PIN_SLPM_DIR_SHIFT) #define PIN_SLPM_VAL_SHIFT 21 #define PIN_SLPM_VAL_MASK (0x3 << PIN_SLPM_VAL_SHIFT) #define PIN_SLPM_VAL(x) \ (((x) & PIN_SLPM_VAL_MASK) >> PIN_SLPM_VAL_SHIFT) #define PIN_SLPM_VAL_LOW ((1 + 0) << PIN_SLPM_VAL_SHIFT) #define PIN_SLPM_VAL_HIGH ((1 + 1) << PIN_SLPM_VAL_SHIFT) #define PIN_SLPM_PDIS_SHIFT 23 #define PIN_SLPM_PDIS_MASK (0x3 << PIN_SLPM_PDIS_SHIFT) #define PIN_SLPM_PDIS(x) \ (((x) & PIN_SLPM_PDIS_MASK) >> PIN_SLPM_PDIS_SHIFT) #define PIN_SLPM_PDIS_NO_CHANGE (0 << PIN_SLPM_PDIS_SHIFT) #define PIN_SLPM_PDIS_DISABLED (1 << PIN_SLPM_PDIS_SHIFT) #define PIN_SLPM_PDIS_ENABLED (2 << PIN_SLPM_PDIS_SHIFT) #define PIN_LOWEMI_SHIFT 25 #define PIN_LOWEMI_MASK (0x1 << PIN_LOWEMI_SHIFT) #define PIN_LOWEMI(x) (((x) & PIN_LOWEMI_MASK) >> PIN_LOWEMI_SHIFT) #define PIN_LOWEMI_DISABLED (0 << PIN_LOWEMI_SHIFT) #define PIN_LOWEMI_ENABLED (1 << PIN_LOWEMI_SHIFT) #define PIN_GPIOMODE_SHIFT 26 #define PIN_GPIOMODE_MASK (0x1 << PIN_GPIOMODE_SHIFT) #define PIN_GPIOMODE(x) (((x) & PIN_GPIOMODE_MASK) >> PIN_GPIOMODE_SHIFT) #define PIN_GPIOMODE_DISABLED (0 << PIN_GPIOMODE_SHIFT) #define PIN_GPIOMODE_ENABLED (1 << PIN_GPIOMODE_SHIFT) #define PIN_SLEEPMODE_SHIFT 27 #define PIN_SLEEPMODE_MASK (0x1 << PIN_SLEEPMODE_SHIFT) #define PIN_SLEEPMODE(x) (((x) & PIN_SLEEPMODE_MASK) >> PIN_SLEEPMODE_SHIFT) #define PIN_SLEEPMODE_DISABLED (0 << PIN_SLEEPMODE_SHIFT) #define PIN_SLEEPMODE_ENABLED (1 << PIN_SLEEPMODE_SHIFT) /* Shortcuts. Use these instead of separate DIR, PULL, and VAL. */ #define PIN_INPUT_PULLDOWN (PIN_DIR_INPUT | PIN_PULL_DOWN) #define PIN_INPUT_PULLUP (PIN_DIR_INPUT | PIN_PULL_UP) #define PIN_INPUT_NOPULL (PIN_DIR_INPUT | PIN_PULL_NONE) #define PIN_OUTPUT_LOW (PIN_DIR_OUTPUT | PIN_VAL_LOW) #define PIN_OUTPUT_HIGH (PIN_DIR_OUTPUT | PIN_VAL_HIGH) #define PIN_SLPM_INPUT_PULLDOWN (PIN_SLPM_DIR_INPUT | PIN_SLPM_PULL_DOWN) #define PIN_SLPM_INPUT_PULLUP (PIN_SLPM_DIR_INPUT | PIN_SLPM_PULL_UP) #define PIN_SLPM_INPUT_NOPULL (PIN_SLPM_DIR_INPUT | PIN_SLPM_PULL_NONE) #define PIN_SLPM_OUTPUT_LOW (PIN_SLPM_DIR_OUTPUT | PIN_SLPM_VAL_LOW) #define PIN_SLPM_OUTPUT_HIGH (PIN_SLPM_DIR_OUTPUT | PIN_SLPM_VAL_HIGH) #define PIN_CFG_DEFAULT (0) #define PIN_CFG(num, alt) \ (PIN_CFG_DEFAULT |\ (PIN_NUM(num) | PIN_##alt)) #define PIN_CFG_INPUT(num, alt, pull) \ (PIN_CFG_DEFAULT |\ (PIN_NUM(num) | PIN_##alt | PIN_INPUT_##pull)) #define PIN_CFG_OUTPUT(num, alt, val) \ (PIN_CFG_DEFAULT |\ (PIN_NUM(num) | PIN_##alt | PIN_OUTPUT_##val)) /* * "nmk_gpio" and "NMK_GPIO" stand for "Nomadik GPIO", leaving * the "gpio" namespace for generic and cross-machine functions */ #define GPIO_BLOCK_SHIFT 5 #define NMK_GPIO_PER_CHIP (1 << GPIO_BLOCK_SHIFT) #define NMK_MAX_BANKS DIV_ROUND_UP(ARCH_NR_GPIOS, NMK_GPIO_PER_CHIP) /* Register in the logic block */ #define NMK_GPIO_DAT 0x00 #define NMK_GPIO_DATS 0x04 #define NMK_GPIO_DATC 0x08 #define NMK_GPIO_PDIS 0x0c #define NMK_GPIO_DIR 0x10 #define NMK_GPIO_DIRS 0x14 #define NMK_GPIO_DIRC 0x18 #define NMK_GPIO_SLPC 0x1c #define NMK_GPIO_AFSLA 0x20 #define NMK_GPIO_AFSLB 0x24 #define NMK_GPIO_LOWEMI 0x28 #define NMK_GPIO_RIMSC 0x40 #define NMK_GPIO_FIMSC 0x44 #define NMK_GPIO_IS 0x48 #define NMK_GPIO_IC 0x4c #define NMK_GPIO_RWIMSC 0x50 #define NMK_GPIO_FWIMSC 0x54 #define NMK_GPIO_WKS 0x58 /* These appear in DB8540 and later ASICs */ #define NMK_GPIO_EDGELEVEL 0x5C #define NMK_GPIO_LEVEL 0x60 /* Pull up/down values */ enum nmk_gpio_pull { NMK_GPIO_PULL_NONE, NMK_GPIO_PULL_UP, NMK_GPIO_PULL_DOWN, }; /* Sleep mode */ enum nmk_gpio_slpm { NMK_GPIO_SLPM_INPUT, NMK_GPIO_SLPM_WAKEUP_ENABLE = NMK_GPIO_SLPM_INPUT, NMK_GPIO_SLPM_NOCHANGE, NMK_GPIO_SLPM_WAKEUP_DISABLE = NMK_GPIO_SLPM_NOCHANGE, }; struct nmk_gpio_chip { struct gpio_chip chip; struct irq_chip irqchip; void __iomem *addr; struct clk *clk; unsigned int bank; unsigned int parent_irq; int latent_parent_irq; u32 (*get_latent_status)(unsigned int bank); void (*set_ioforce)(bool enable); spinlock_t lock; bool sleepmode; /* Keep track of configured edges */ u32 edge_rising; u32 edge_falling; u32 real_wake; u32 rwimsc; u32 fwimsc; u32 rimsc; u32 fimsc; u32 pull_up; u32 lowemi; }; /** * struct nmk_pinctrl - state container for the Nomadik pin controller * @dev: containing device pointer * @pctl: corresponding pin controller device * @soc: SoC data for this specific chip * @prcm_base: PRCM register range virtual base */ struct nmk_pinctrl { struct device *dev; struct pinctrl_dev *pctl; const struct nmk_pinctrl_soc_data *soc; void __iomem *prcm_base; }; static struct nmk_gpio_chip *nmk_gpio_chips[NMK_MAX_BANKS]; static DEFINE_SPINLOCK(nmk_gpio_slpm_lock); #define NUM_BANKS ARRAY_SIZE(nmk_gpio_chips) static void __nmk_gpio_set_mode(struct nmk_gpio_chip *nmk_chip, unsigned offset, int gpio_mode) { u32 bit = 1 << offset; u32 afunc, bfunc; afunc = readl(nmk_chip->addr + NMK_GPIO_AFSLA) & ~bit; bfunc = readl(nmk_chip->addr + NMK_GPIO_AFSLB) & ~bit; if (gpio_mode & NMK_GPIO_ALT_A) afunc |= bit; if (gpio_mode & NMK_GPIO_ALT_B) bfunc |= bit; writel(afunc, nmk_chip->addr + NMK_GPIO_AFSLA); writel(bfunc, nmk_chip->addr + NMK_GPIO_AFSLB); } static void __nmk_gpio_set_slpm(struct nmk_gpio_chip *nmk_chip, unsigned offset, enum nmk_gpio_slpm mode) { u32 bit = 1 << offset; u32 slpm; slpm = readl(nmk_chip->addr + NMK_GPIO_SLPC); if (mode == NMK_GPIO_SLPM_NOCHANGE) slpm |= bit; else slpm &= ~bit; writel(slpm, nmk_chip->addr + NMK_GPIO_SLPC); } static void __nmk_gpio_set_pull(struct nmk_gpio_chip *nmk_chip, unsigned offset, enum nmk_gpio_pull pull) { u32 bit = 1 << offset; u32 pdis; pdis = readl(nmk_chip->addr + NMK_GPIO_PDIS); if (pull == NMK_GPIO_PULL_NONE) { pdis |= bit; nmk_chip->pull_up &= ~bit; } else { pdis &= ~bit; } writel(pdis, nmk_chip->addr + NMK_GPIO_PDIS); if (pull == NMK_GPIO_PULL_UP) { nmk_chip->pull_up |= bit; writel(bit, nmk_chip->addr + NMK_GPIO_DATS); } else if (pull == NMK_GPIO_PULL_DOWN) { nmk_chip->pull_up &= ~bit; writel(bit, nmk_chip->addr + NMK_GPIO_DATC); } } static void __nmk_gpio_set_lowemi(struct nmk_gpio_chip *nmk_chip, unsigned offset, bool lowemi) { u32 bit = BIT(offset); bool enabled = nmk_chip->lowemi & bit; if (lowemi == enabled) return; if (lowemi) nmk_chip->lowemi |= bit; else nmk_chip->lowemi &= ~bit; writel_relaxed(nmk_chip->lowemi, nmk_chip->addr + NMK_GPIO_LOWEMI); } static void __nmk_gpio_make_input(struct nmk_gpio_chip *nmk_chip, unsigned offset) { writel(1 << offset, nmk_chip->addr + NMK_GPIO_DIRC); } static void __nmk_gpio_set_output(struct nmk_gpio_chip *nmk_chip, unsigned offset, int val) { if (val) writel(1 << offset, nmk_chip->addr + NMK_GPIO_DATS); else writel(1 << offset, nmk_chip->addr + NMK_GPIO_DATC); } static void __nmk_gpio_make_output(struct nmk_gpio_chip *nmk_chip, unsigned offset, int val) { writel(1 << offset, nmk_chip->addr + NMK_GPIO_DIRS); __nmk_gpio_set_output(nmk_chip, offset, val); } static void __nmk_gpio_set_mode_safe(struct nmk_gpio_chip *nmk_chip, unsigned offset, int gpio_mode, bool glitch) { u32 rwimsc = nmk_chip->rwimsc; u32 fwimsc = nmk_chip->fwimsc; if (glitch && nmk_chip->set_ioforce) { u32 bit = BIT(offset); /* Prevent spurious wakeups */ writel(rwimsc & ~bit, nmk_chip->addr + NMK_GPIO_RWIMSC); writel(fwimsc & ~bit, nmk_chip->addr + NMK_GPIO_FWIMSC); nmk_chip->set_ioforce(true); } __nmk_gpio_set_mode(nmk_chip, offset, gpio_mode); if (glitch && nmk_chip->set_ioforce) { nmk_chip->set_ioforce(false); writel(rwimsc, nmk_chip->addr + NMK_GPIO_RWIMSC); writel(fwimsc, nmk_chip->addr + NMK_GPIO_FWIMSC); } } static void nmk_gpio_disable_lazy_irq(struct nmk_gpio_chip *nmk_chip, unsigned offset) { u32 falling = nmk_chip->fimsc & BIT(offset); u32 rising = nmk_chip->rimsc & BIT(offset); int gpio = nmk_chip->chip.base + offset; int irq = irq_find_mapping(nmk_chip->chip.irqdomain, offset); struct irq_data *d = irq_get_irq_data(irq); if (!rising && !falling) return; if (!d || !irqd_irq_disabled(d)) return; if (rising) { nmk_chip->rimsc &= ~BIT(offset); writel_relaxed(nmk_chip->rimsc, nmk_chip->addr + NMK_GPIO_RIMSC); } if (falling) { nmk_chip->fimsc &= ~BIT(offset); writel_relaxed(nmk_chip->fimsc, nmk_chip->addr + NMK_GPIO_FIMSC); } dev_dbg(nmk_chip->chip.dev, "%d: clearing interrupt mask\n", gpio); } static void nmk_write_masked(void __iomem *reg, u32 mask, u32 value) { u32 val; val = readl(reg); val = ((val & ~mask) | (value & mask)); writel(val, reg); } static void nmk_prcm_altcx_set_mode(struct nmk_pinctrl *npct, unsigned offset, unsigned alt_num) { int i; u16 reg; u8 bit; u8 alt_index; const struct prcm_gpiocr_altcx_pin_desc *pin_desc; const u16 *gpiocr_regs; if (!npct->prcm_base) return; if (alt_num > PRCM_IDX_GPIOCR_ALTC_MAX) { dev_err(npct->dev, "PRCM GPIOCR: alternate-C%i is invalid\n", alt_num); return; } for (i = 0 ; i < npct->soc->npins_altcx ; i++) { if (npct->soc->altcx_pins[i].pin == offset) break; } if (i == npct->soc->npins_altcx) { dev_dbg(npct->dev, "PRCM GPIOCR: pin %i is not found\n", offset); return; } pin_desc = npct->soc->altcx_pins + i; gpiocr_regs = npct->soc->prcm_gpiocr_registers; /* * If alt_num is NULL, just clear current ALTCx selection * to make sure we come back to a pure ALTC selection */ if (!alt_num) { for (i = 0 ; i < PRCM_IDX_GPIOCR_ALTC_MAX ; i++) { if (pin_desc->altcx[i].used == true) { reg = gpiocr_regs[pin_desc->altcx[i].reg_index]; bit = pin_desc->altcx[i].control_bit; if (readl(npct->prcm_base + reg) & BIT(bit)) { nmk_write_masked(npct->prcm_base + reg, BIT(bit), 0); dev_dbg(npct->dev, "PRCM GPIOCR: pin %i: alternate-C%i has been disabled\n", offset, i+1); } } } return; } alt_index = alt_num - 1; if (pin_desc->altcx[alt_index].used == false) { dev_warn(npct->dev, "PRCM GPIOCR: pin %i: alternate-C%i does not exist\n", offset, alt_num); return; } /* * Check if any other ALTCx functions are activated on this pin * and disable it first. */ for (i = 0 ; i < PRCM_IDX_GPIOCR_ALTC_MAX ; i++) { if (i == alt_index) continue; if (pin_desc->altcx[i].used == true) { reg = gpiocr_regs[pin_desc->altcx[i].reg_index]; bit = pin_desc->altcx[i].control_bit; if (readl(npct->prcm_base + reg) & BIT(bit)) { nmk_write_masked(npct->prcm_base + reg, BIT(bit), 0); dev_dbg(npct->dev, "PRCM GPIOCR: pin %i: alternate-C%i has been disabled\n", offset, i+1); } } } reg = gpiocr_regs[pin_desc->altcx[alt_index].reg_index]; bit = pin_desc->altcx[alt_index].control_bit; dev_dbg(npct->dev, "PRCM GPIOCR: pin %i: alternate-C%i has been selected\n", offset, alt_index+1); nmk_write_masked(npct->prcm_base + reg, BIT(bit), BIT(bit)); } /* * Safe sequence used to switch IOs between GPIO and Alternate-C mode: * - Save SLPM registers * - Set SLPM=0 for the IOs you want to switch and others to 1 * - Configure the GPIO registers for the IOs that are being switched * - Set IOFORCE=1 * - Modify the AFLSA/B registers for the IOs that are being switched * - Set IOFORCE=0 * - Restore SLPM registers * - Any spurious wake up event during switch sequence to be ignored and * cleared */ static void nmk_gpio_glitch_slpm_init(unsigned int *slpm) { int i; for (i = 0; i < NUM_BANKS; i++) { struct nmk_gpio_chip *chip = nmk_gpio_chips[i]; unsigned int temp = slpm[i]; if (!chip) break; clk_enable(chip->clk); slpm[i] = readl(chip->addr + NMK_GPIO_SLPC); writel(temp, chip->addr + NMK_GPIO_SLPC); } } static void nmk_gpio_glitch_slpm_restore(unsigned int *slpm) { int i; for (i = 0; i < NUM_BANKS; i++) { struct nmk_gpio_chip *chip = nmk_gpio_chips[i]; if (!chip) break; writel(slpm[i], chip->addr + NMK_GPIO_SLPC); clk_disable(chip->clk); } } static int __maybe_unused nmk_prcm_gpiocr_get_mode(struct pinctrl_dev *pctldev, int gpio) { int i; u16 reg; u8 bit; struct nmk_pinctrl *npct = pinctrl_dev_get_drvdata(pctldev); const struct prcm_gpiocr_altcx_pin_desc *pin_desc; const u16 *gpiocr_regs; if (!npct->prcm_base) return NMK_GPIO_ALT_C; for (i = 0; i < npct->soc->npins_altcx; i++) { if (npct->soc->altcx_pins[i].pin == gpio) break; } if (i == npct->soc->npins_altcx) return NMK_GPIO_ALT_C; pin_desc = npct->soc->altcx_pins + i; gpiocr_regs = npct->soc->prcm_gpiocr_registers; for (i = 0; i < PRCM_IDX_GPIOCR_ALTC_MAX; i++) { if (pin_desc->altcx[i].used == true) { reg = gpiocr_regs[pin_desc->altcx[i].reg_index]; bit = pin_desc->altcx[i].control_bit; if (readl(npct->prcm_base + reg) & BIT(bit)) return NMK_GPIO_ALT_C+i+1; } } return NMK_GPIO_ALT_C; } int nmk_gpio_get_mode(int gpio) { struct nmk_gpio_chip *nmk_chip; u32 afunc, bfunc, bit; nmk_chip = nmk_gpio_chips[gpio / NMK_GPIO_PER_CHIP]; if (!nmk_chip) return -EINVAL; bit = 1 << (gpio % NMK_GPIO_PER_CHIP); clk_enable(nmk_chip->clk); afunc = readl(nmk_chip->addr + NMK_GPIO_AFSLA) & bit; bfunc = readl(nmk_chip->addr + NMK_GPIO_AFSLB) & bit; clk_disable(nmk_chip->clk); return (afunc ? NMK_GPIO_ALT_A : 0) | (bfunc ? NMK_GPIO_ALT_B : 0); } EXPORT_SYMBOL(nmk_gpio_get_mode); /* IRQ functions */ static inline int nmk_gpio_get_bitmask(int gpio) { return 1 << (gpio % NMK_GPIO_PER_CHIP); } static void nmk_gpio_irq_ack(struct irq_data *d) { struct gpio_chip *chip = irq_data_get_irq_chip_data(d); struct nmk_gpio_chip *nmk_chip = container_of(chip, struct nmk_gpio_chip, chip); clk_enable(nmk_chip->clk); writel(nmk_gpio_get_bitmask(d->hwirq), nmk_chip->addr + NMK_GPIO_IC); clk_disable(nmk_chip->clk); } enum nmk_gpio_irq_type { NORMAL, WAKE, }; static void __nmk_gpio_irq_modify(struct nmk_gpio_chip *nmk_chip, int gpio, enum nmk_gpio_irq_type which, bool enable) { u32 bitmask = nmk_gpio_get_bitmask(gpio); u32 *rimscval; u32 *fimscval; u32 rimscreg; u32 fimscreg; if (which == NORMAL) { rimscreg = NMK_GPIO_RIMSC; fimscreg = NMK_GPIO_FIMSC; rimscval = &nmk_chip->rimsc; fimscval = &nmk_chip->fimsc; } else { rimscreg = NMK_GPIO_RWIMSC; fimscreg = NMK_GPIO_FWIMSC; rimscval = &nmk_chip->rwimsc; fimscval = &nmk_chip->fwimsc; } /* we must individually set/clear the two edges */ if (nmk_chip->edge_rising & bitmask) { if (enable) *rimscval |= bitmask; else *rimscval &= ~bitmask; writel(*rimscval, nmk_chip->addr + rimscreg); } if (nmk_chip->edge_falling & bitmask) { if (enable) *fimscval |= bitmask; else *fimscval &= ~bitmask; writel(*fimscval, nmk_chip->addr + fimscreg); } } static void __nmk_gpio_set_wake(struct nmk_gpio_chip *nmk_chip, int gpio, bool on) { /* * Ensure WAKEUP_ENABLE is on. No need to disable it if wakeup is * disabled, since setting SLPM to 1 increases power consumption, and * wakeup is anyhow controlled by the RIMSC and FIMSC registers. */ if (nmk_chip->sleepmode && on) { __nmk_gpio_set_slpm(nmk_chip, gpio % NMK_GPIO_PER_CHIP, NMK_GPIO_SLPM_WAKEUP_ENABLE); } __nmk_gpio_irq_modify(nmk_chip, gpio, WAKE, on); } static int nmk_gpio_irq_maskunmask(struct irq_data *d, bool enable) { struct nmk_gpio_chip *nmk_chip; unsigned long flags; u32 bitmask; nmk_chip = irq_data_get_irq_chip_data(d); bitmask = nmk_gpio_get_bitmask(d->hwirq); if (!nmk_chip) return -EINVAL; clk_enable(nmk_chip->clk); spin_lock_irqsave(&nmk_gpio_slpm_lock, flags); spin_lock(&nmk_chip->lock); __nmk_gpio_irq_modify(nmk_chip, d->hwirq, NORMAL, enable); if (!(nmk_chip->real_wake & bitmask)) __nmk_gpio_set_wake(nmk_chip, d->hwirq, enable); spin_unlock(&nmk_chip->lock); spin_unlock_irqrestore(&nmk_gpio_slpm_lock, flags); clk_disable(nmk_chip->clk); return 0; } static void nmk_gpio_irq_mask(struct irq_data *d) { nmk_gpio_irq_maskunmask(d, false); } static void nmk_gpio_irq_unmask(struct irq_data *d) { nmk_gpio_irq_maskunmask(d, true); } static int nmk_gpio_irq_set_wake(struct irq_data *d, unsigned int on) { struct nmk_gpio_chip *nmk_chip; unsigned long flags; u32 bitmask; nmk_chip = irq_data_get_irq_chip_data(d); if (!nmk_chip) return -EINVAL; bitmask = nmk_gpio_get_bitmask(d->hwirq); clk_enable(nmk_chip->clk); spin_lock_irqsave(&nmk_gpio_slpm_lock, flags); spin_lock(&nmk_chip->lock); if (irqd_irq_disabled(d)) __nmk_gpio_set_wake(nmk_chip, d->hwirq, on); if (on) nmk_chip->real_wake |= bitmask; else nmk_chip->real_wake &= ~bitmask; spin_unlock(&nmk_chip->lock); spin_unlock_irqrestore(&nmk_gpio_slpm_lock, flags); clk_disable(nmk_chip->clk); return 0; } static int nmk_gpio_irq_set_type(struct irq_data *d, unsigned int type) { bool enabled = !irqd_irq_disabled(d); bool wake = irqd_is_wakeup_set(d); struct nmk_gpio_chip *nmk_chip; unsigned long flags; u32 bitmask; nmk_chip = irq_data_get_irq_chip_data(d); bitmask = nmk_gpio_get_bitmask(d->hwirq); if (!nmk_chip) return -EINVAL; if (type & IRQ_TYPE_LEVEL_HIGH) return -EINVAL; if (type & IRQ_TYPE_LEVEL_LOW) return -EINVAL; clk_enable(nmk_chip->clk); spin_lock_irqsave(&nmk_chip->lock, flags); if (enabled) __nmk_gpio_irq_modify(nmk_chip, d->hwirq, NORMAL, false); if (enabled || wake) __nmk_gpio_irq_modify(nmk_chip, d->hwirq, WAKE, false); nmk_chip->edge_rising &= ~bitmask; if (type & IRQ_TYPE_EDGE_RISING) nmk_chip->edge_rising |= bitmask; nmk_chip->edge_falling &= ~bitmask; if (type & IRQ_TYPE_EDGE_FALLING) nmk_chip->edge_falling |= bitmask; if (enabled) __nmk_gpio_irq_modify(nmk_chip, d->hwirq, NORMAL, true); if (enabled || wake) __nmk_gpio_irq_modify(nmk_chip, d->hwirq, WAKE, true); spin_unlock_irqrestore(&nmk_chip->lock, flags); clk_disable(nmk_chip->clk); return 0; } static unsigned int nmk_gpio_irq_startup(struct irq_data *d) { struct nmk_gpio_chip *nmk_chip = irq_data_get_irq_chip_data(d); clk_enable(nmk_chip->clk); nmk_gpio_irq_unmask(d); return 0; } static void nmk_gpio_irq_shutdown(struct irq_data *d) { struct nmk_gpio_chip *nmk_chip = irq_data_get_irq_chip_data(d); nmk_gpio_irq_mask(d); clk_disable(nmk_chip->clk); } static void __nmk_gpio_irq_handler(struct irq_desc *desc, u32 status) { struct irq_chip *host_chip = irq_desc_get_chip(desc); struct gpio_chip *chip = irq_desc_get_handler_data(desc); chained_irq_enter(host_chip, desc); while (status) { int bit = __ffs(status); generic_handle_irq(irq_find_mapping(chip->irqdomain, bit)); status &= ~BIT(bit); } chained_irq_exit(host_chip, desc); } static void nmk_gpio_irq_handler(unsigned int irq, struct irq_desc *desc) { struct gpio_chip *chip = irq_desc_get_handler_data(desc); struct nmk_gpio_chip *nmk_chip = container_of(chip, struct nmk_gpio_chip, chip); u32 status; clk_enable(nmk_chip->clk); status = readl(nmk_chip->addr + NMK_GPIO_IS); clk_disable(nmk_chip->clk); __nmk_gpio_irq_handler(desc, status); } static void nmk_gpio_latent_irq_handler(unsigned int irq, struct irq_desc *desc) { struct gpio_chip *chip = irq_desc_get_handler_data(desc); struct nmk_gpio_chip *nmk_chip = container_of(chip, struct nmk_gpio_chip, chip); u32 status = nmk_chip->get_latent_status(nmk_chip->bank); __nmk_gpio_irq_handler(desc, status); } /* I/O Functions */ static int nmk_gpio_request(struct gpio_chip *chip, unsigned offset) { /* * Map back to global GPIO space and request muxing, the direction * parameter does not matter for this controller. */ int gpio = chip->base + offset; return pinctrl_request_gpio(gpio); } static void nmk_gpio_free(struct gpio_chip *chip, unsigned offset) { int gpio = chip->base + offset; pinctrl_free_gpio(gpio); } static int nmk_gpio_make_input(struct gpio_chip *chip, unsigned offset) { struct nmk_gpio_chip *nmk_chip = container_of(chip, struct nmk_gpio_chip, chip); clk_enable(nmk_chip->clk); writel(1 << offset, nmk_chip->addr + NMK_GPIO_DIRC); clk_disable(nmk_chip->clk); return 0; } static int nmk_gpio_get_input(struct gpio_chip *chip, unsigned offset) { struct nmk_gpio_chip *nmk_chip = container_of(chip, struct nmk_gpio_chip, chip); u32 bit = 1 << offset; int value; clk_enable(nmk_chip->clk); value = (readl(nmk_chip->addr + NMK_GPIO_DAT) & bit) != 0; clk_disable(nmk_chip->clk); return value; } static void nmk_gpio_set_output(struct gpio_chip *chip, unsigned offset, int val) { struct nmk_gpio_chip *nmk_chip = container_of(chip, struct nmk_gpio_chip, chip); clk_enable(nmk_chip->clk); __nmk_gpio_set_output(nmk_chip, offset, val); clk_disable(nmk_chip->clk); } static int nmk_gpio_make_output(struct gpio_chip *chip, unsigned offset, int val) { struct nmk_gpio_chip *nmk_chip = container_of(chip, struct nmk_gpio_chip, chip); clk_enable(nmk_chip->clk); __nmk_gpio_make_output(nmk_chip, offset, val); clk_disable(nmk_chip->clk); return 0; } #ifdef CONFIG_DEBUG_FS #include <linux/seq_file.h> static void nmk_gpio_dbg_show_one(struct seq_file *s, struct pinctrl_dev *pctldev, struct gpio_chip *chip, unsigned offset, unsigned gpio) { const char *label = gpiochip_is_requested(chip, offset); struct nmk_gpio_chip *nmk_chip = container_of(chip, struct nmk_gpio_chip, chip); int mode; bool is_out; bool data_out; bool pull; u32 bit = 1 << offset; const char *modes[] = { [NMK_GPIO_ALT_GPIO] = "gpio", [NMK_GPIO_ALT_A] = "altA", [NMK_GPIO_ALT_B] = "altB", [NMK_GPIO_ALT_C] = "altC", [NMK_GPIO_ALT_C+1] = "altC1", [NMK_GPIO_ALT_C+2] = "altC2", [NMK_GPIO_ALT_C+3] = "altC3", [NMK_GPIO_ALT_C+4] = "altC4", }; const char *pulls[] = { "none ", "pull down", "pull up ", }; clk_enable(nmk_chip->clk); is_out = !!(readl(nmk_chip->addr + NMK_GPIO_DIR) & bit); pull = !(readl(nmk_chip->addr + NMK_GPIO_PDIS) & bit); data_out = !!(readl(nmk_chip->addr + NMK_GPIO_DAT) & bit); mode = nmk_gpio_get_mode(gpio); if ((mode == NMK_GPIO_ALT_C) && pctldev) mode = nmk_prcm_gpiocr_get_mode(pctldev, gpio); if (is_out) { seq_printf(s, " gpio-%-3d (%-20.20s) out %s %s", gpio, label ?: "(none)", data_out ? "hi" : "lo", (mode < 0) ? "unknown" : modes[mode]); } else { int irq = gpio_to_irq(gpio); struct irq_desc *desc = irq_to_desc(irq); int pullidx = 0; int val; if (pull) pullidx = data_out ? 1 : 2; seq_printf(s, " gpio-%-3d (%-20.20s) in %s %s", gpio, label ?: "(none)", pulls[pullidx], (mode < 0) ? "unknown" : modes[mode]); val = nmk_gpio_get_input(chip, offset); seq_printf(s, " VAL %d", val); /* * This races with request_irq(), set_irq_type(), * and set_irq_wake() ... but those are "rare". */ if (irq > 0 && desc && desc->action) { char *trigger; u32 bitmask = nmk_gpio_get_bitmask(gpio); if (nmk_chip->edge_rising & bitmask) trigger = "edge-rising"; else if (nmk_chip->edge_falling & bitmask) trigger = "edge-falling"; else trigger = "edge-undefined"; seq_printf(s, " irq-%d %s%s", irq, trigger, irqd_is_wakeup_set(&desc->irq_data) ? " wakeup" : ""); } } clk_disable(nmk_chip->clk); } static void nmk_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip) { unsigned i; unsigned gpio = chip->base; for (i = 0; i < chip->ngpio; i++, gpio++) { nmk_gpio_dbg_show_one(s, NULL, chip, i, gpio); seq_printf(s, "\n"); } } #else static inline void nmk_gpio_dbg_show_one(struct seq_file *s, struct pinctrl_dev *pctldev, struct gpio_chip *chip, unsigned offset, unsigned gpio) { } #define nmk_gpio_dbg_show NULL #endif void nmk_gpio_clocks_enable(void) { int i; for (i = 0; i < NUM_BANKS; i++) { struct nmk_gpio_chip *chip = nmk_gpio_chips[i]; if (!chip) continue; clk_enable(chip->clk); } } void nmk_gpio_clocks_disable(void) { int i; for (i = 0; i < NUM_BANKS; i++) { struct nmk_gpio_chip *chip = nmk_gpio_chips[i]; if (!chip) continue; clk_disable(chip->clk); } } /* * Called from the suspend/resume path to only keep the real wakeup interrupts * (those that have had set_irq_wake() called on them) as wakeup interrupts, * and not the rest of the interrupts which we needed to have as wakeups for * cpuidle. * * PM ops are not used since this needs to be done at the end, after all the * other drivers are done with their suspend callbacks. */ void nmk_gpio_wakeups_suspend(void) { int i; for (i = 0; i < NUM_BANKS; i++) { struct nmk_gpio_chip *chip = nmk_gpio_chips[i]; if (!chip) break; clk_enable(chip->clk); writel(chip->rwimsc & chip->real_wake, chip->addr + NMK_GPIO_RWIMSC); writel(chip->fwimsc & chip->real_wake, chip->addr + NMK_GPIO_FWIMSC); clk_disable(chip->clk); } } void nmk_gpio_wakeups_resume(void) { int i; for (i = 0; i < NUM_BANKS; i++) { struct nmk_gpio_chip *chip = nmk_gpio_chips[i]; if (!chip) break; clk_enable(chip->clk); writel(chip->rwimsc, chip->addr + NMK_GPIO_RWIMSC); writel(chip->fwimsc, chip->addr + NMK_GPIO_FWIMSC); clk_disable(chip->clk); } } /* * Read the pull up/pull down status. * A bit set in 'pull_up' means that pull up * is selected if pull is enabled in PDIS register. * Note: only pull up/down set via this driver can * be detected due to HW limitations. */ void nmk_gpio_read_pull(int gpio_bank, u32 *pull_up) { if (gpio_bank < NUM_BANKS) { struct nmk_gpio_chip *chip = nmk_gpio_chips[gpio_bank]; if (!chip) return; *pull_up = chip->pull_up; } } /* * We will allocate memory for the state container using devm* allocators * binding to the first device reaching this point, it doesn't matter if * it is the pin controller or GPIO driver. However we need to use the right * platform device when looking up resources so pay attention to pdev. */ static struct nmk_gpio_chip *nmk_gpio_populate_chip(struct device_node *np, struct platform_device *pdev) { struct nmk_gpio_chip *nmk_chip; struct platform_device *gpio_pdev; struct gpio_chip *chip; struct resource *res; struct clk *clk; void __iomem *base; u32 id; gpio_pdev = of_find_device_by_node(np); if (!gpio_pdev) { pr_err("populate \"%s\": device not found\n", np->name); return ERR_PTR(-ENODEV); } if (of_property_read_u32(np, "gpio-bank", &id)) { dev_err(&pdev->dev, "populate: gpio-bank property not found\n"); return ERR_PTR(-EINVAL); } /* Already populated? */ nmk_chip = nmk_gpio_chips[id]; if (nmk_chip) return nmk_chip; nmk_chip = devm_kzalloc(&pdev->dev, sizeof(*nmk_chip), GFP_KERNEL); if (!nmk_chip) return ERR_PTR(-ENOMEM); nmk_chip->bank = id; chip = &nmk_chip->chip; chip->base = id * NMK_GPIO_PER_CHIP; chip->ngpio = NMK_GPIO_PER_CHIP; chip->label = dev_name(&gpio_pdev->dev); chip->dev = &gpio_pdev->dev; res = platform_get_resource(gpio_pdev, IORESOURCE_MEM, 0); base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(base)) return base; nmk_chip->addr = base; clk = clk_get(&gpio_pdev->dev, NULL); if (IS_ERR(clk)) return (void *) clk; clk_prepare(clk); nmk_chip->clk = clk; BUG_ON(nmk_chip->bank >= ARRAY_SIZE(nmk_gpio_chips)); nmk_gpio_chips[id] = nmk_chip; return nmk_chip; } static int nmk_gpio_probe(struct platform_device *dev) { struct device_node *np = dev->dev.of_node; struct nmk_gpio_chip *nmk_chip; struct gpio_chip *chip; struct irq_chip *irqchip; int latent_irq; bool supports_sleepmode; int irq; int ret; nmk_chip = nmk_gpio_populate_chip(np, dev); if (IS_ERR(nmk_chip)) { dev_err(&dev->dev, "could not populate nmk chip struct\n"); return PTR_ERR(nmk_chip); } if (of_get_property(np, "st,supports-sleepmode", NULL)) supports_sleepmode = true; else supports_sleepmode = false; /* Correct platform device ID */ dev->id = nmk_chip->bank; irq = platform_get_irq(dev, 0); if (irq < 0) return irq; /* It's OK for this IRQ not to be present */ latent_irq = platform_get_irq(dev, 1); /* * The virt address in nmk_chip->addr is in the nomadik register space, * so we can simply convert the resource address, without remapping */ nmk_chip->parent_irq = irq; nmk_chip->latent_parent_irq = latent_irq; nmk_chip->sleepmode = supports_sleepmode; spin_lock_init(&nmk_chip->lock); chip = &nmk_chip->chip; chip->request = nmk_gpio_request; chip->free = nmk_gpio_free; chip->direction_input = nmk_gpio_make_input; chip->get = nmk_gpio_get_input; chip->direction_output = nmk_gpio_make_output; chip->set = nmk_gpio_set_output; chip->dbg_show = nmk_gpio_dbg_show; chip->can_sleep = false; chip->owner = THIS_MODULE; irqchip = &nmk_chip->irqchip; irqchip->irq_ack = nmk_gpio_irq_ack; irqchip->irq_mask = nmk_gpio_irq_mask; irqchip->irq_unmask = nmk_gpio_irq_unmask; irqchip->irq_set_type = nmk_gpio_irq_set_type; irqchip->irq_set_wake = nmk_gpio_irq_set_wake; irqchip->irq_startup = nmk_gpio_irq_startup; irqchip->irq_shutdown = nmk_gpio_irq_shutdown; irqchip->flags = IRQCHIP_MASK_ON_SUSPEND; irqchip->name = kasprintf(GFP_KERNEL, "nmk%u-%u-%u", dev->id, chip->base, chip->base + chip->ngpio - 1); clk_enable(nmk_chip->clk); nmk_chip->lowemi = readl_relaxed(nmk_chip->addr + NMK_GPIO_LOWEMI); clk_disable(nmk_chip->clk); chip->of_node = np; ret = gpiochip_add(chip); if (ret) return ret; platform_set_drvdata(dev, nmk_chip); /* * Let the generic code handle this edge IRQ, the the chained * handler will perform the actual work of handling the parent * interrupt. */ ret = gpiochip_irqchip_add(chip, irqchip, 0, handle_edge_irq, IRQ_TYPE_EDGE_FALLING); if (ret) { dev_err(&dev->dev, "could not add irqchip\n"); gpiochip_remove(&nmk_chip->chip); return -ENODEV; } /* Then register the chain on the parent IRQ */ gpiochip_set_chained_irqchip(chip, irqchip, nmk_chip->parent_irq, nmk_gpio_irq_handler); if (nmk_chip->latent_parent_irq > 0) gpiochip_set_chained_irqchip(chip, irqchip, nmk_chip->latent_parent_irq, nmk_gpio_latent_irq_handler); dev_info(&dev->dev, "at address %p\n", nmk_chip->addr); return 0; } static int nmk_get_groups_cnt(struct pinctrl_dev *pctldev) { struct nmk_pinctrl *npct = pinctrl_dev_get_drvdata(pctldev); return npct->soc->ngroups; } static const char *nmk_get_group_name(struct pinctrl_dev *pctldev, unsigned selector) { struct nmk_pinctrl *npct = pinctrl_dev_get_drvdata(pctldev); return npct->soc->groups[selector].name; } static int nmk_get_group_pins(struct pinctrl_dev *pctldev, unsigned selector, const unsigned **pins, unsigned *num_pins) { struct nmk_pinctrl *npct = pinctrl_dev_get_drvdata(pctldev); *pins = npct->soc->groups[selector].pins; *num_pins = npct->soc->groups[selector].npins; return 0; } static struct nmk_gpio_chip *find_nmk_gpio_from_pin(unsigned pin) { int i; struct nmk_gpio_chip *nmk_gpio; for(i = 0; i < NMK_MAX_BANKS; i++) { nmk_gpio = nmk_gpio_chips[i]; if (!nmk_gpio) continue; if (pin >= nmk_gpio->chip.base && pin < nmk_gpio->chip.base + nmk_gpio->chip.ngpio) return nmk_gpio; } return NULL; } static struct gpio_chip *find_gc_from_pin(unsigned pin) { struct nmk_gpio_chip *nmk_gpio = find_nmk_gpio_from_pin(pin); if (nmk_gpio) return &nmk_gpio->chip; return NULL; } static void nmk_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s, unsigned offset) { struct gpio_chip *chip = find_gc_from_pin(offset); if (!chip) { seq_printf(s, "invalid pin offset"); return; } nmk_gpio_dbg_show_one(s, pctldev, chip, offset - chip->base, offset); } static int nmk_dt_add_map_mux(struct pinctrl_map **map, unsigned *reserved_maps, unsigned *num_maps, const char *group, const char *function) { if (*num_maps == *reserved_maps) return -ENOSPC; (*map)[*num_maps].type = PIN_MAP_TYPE_MUX_GROUP; (*map)[*num_maps].data.mux.group = group; (*map)[*num_maps].data.mux.function = function; (*num_maps)++; return 0; } static int nmk_dt_add_map_configs(struct pinctrl_map **map, unsigned *reserved_maps, unsigned *num_maps, const char *group, unsigned long *configs, unsigned num_configs) { unsigned long *dup_configs; if (*num_maps == *reserved_maps) return -ENOSPC; dup_configs = kmemdup(configs, num_configs * sizeof(*dup_configs), GFP_KERNEL); if (!dup_configs) return -ENOMEM; (*map)[*num_maps].type = PIN_MAP_TYPE_CONFIGS_PIN; (*map)[*num_maps].data.configs.group_or_pin = group; (*map)[*num_maps].data.configs.configs = dup_configs; (*map)[*num_maps].data.configs.num_configs = num_configs; (*num_maps)++; return 0; } #define NMK_CONFIG_PIN(x, y) { .property = x, .config = y, } #define NMK_CONFIG_PIN_ARRAY(x, y) { .property = x, .choice = y, \ .size = ARRAY_SIZE(y), } static const unsigned long nmk_pin_input_modes[] = { PIN_INPUT_NOPULL, PIN_INPUT_PULLUP, PIN_INPUT_PULLDOWN, }; static const unsigned long nmk_pin_output_modes[] = { PIN_OUTPUT_LOW, PIN_OUTPUT_HIGH, PIN_DIR_OUTPUT, }; static const unsigned long nmk_pin_sleep_modes[] = { PIN_SLEEPMODE_DISABLED, PIN_SLEEPMODE_ENABLED, }; static const unsigned long nmk_pin_sleep_input_modes[] = { PIN_SLPM_INPUT_NOPULL, PIN_SLPM_INPUT_PULLUP, PIN_SLPM_INPUT_PULLDOWN, PIN_SLPM_DIR_INPUT, }; static const unsigned long nmk_pin_sleep_output_modes[] = { PIN_SLPM_OUTPUT_LOW, PIN_SLPM_OUTPUT_HIGH, PIN_SLPM_DIR_OUTPUT, }; static const unsigned long nmk_pin_sleep_wakeup_modes[] = { PIN_SLPM_WAKEUP_DISABLE, PIN_SLPM_WAKEUP_ENABLE, }; static const unsigned long nmk_pin_gpio_modes[] = { PIN_GPIOMODE_DISABLED, PIN_GPIOMODE_ENABLED, }; static const unsigned long nmk_pin_sleep_pdis_modes[] = { PIN_SLPM_PDIS_DISABLED, PIN_SLPM_PDIS_ENABLED, }; struct nmk_cfg_param { const char *property; unsigned long config; const unsigned long *choice; int size; }; static const struct nmk_cfg_param nmk_cfg_params[] = { NMK_CONFIG_PIN_ARRAY("ste,input", nmk_pin_input_modes), NMK_CONFIG_PIN_ARRAY("ste,output", nmk_pin_output_modes), NMK_CONFIG_PIN_ARRAY("ste,sleep", nmk_pin_sleep_modes), NMK_CONFIG_PIN_ARRAY("ste,sleep-input", nmk_pin_sleep_input_modes), NMK_CONFIG_PIN_ARRAY("ste,sleep-output", nmk_pin_sleep_output_modes), NMK_CONFIG_PIN_ARRAY("ste,sleep-wakeup", nmk_pin_sleep_wakeup_modes), NMK_CONFIG_PIN_ARRAY("ste,gpio", nmk_pin_gpio_modes), NMK_CONFIG_PIN_ARRAY("ste,sleep-pull-disable", nmk_pin_sleep_pdis_modes), }; static int nmk_dt_pin_config(int index, int val, unsigned long *config) { int ret = 0; if (nmk_cfg_params[index].choice == NULL) *config = nmk_cfg_params[index].config; else { /* test if out of range */ if (val < nmk_cfg_params[index].size) { *config = nmk_cfg_params[index].config | nmk_cfg_params[index].choice[val]; } } return ret; } static const char *nmk_find_pin_name(struct pinctrl_dev *pctldev, const char *pin_name) { int i, pin_number; struct nmk_pinctrl *npct = pinctrl_dev_get_drvdata(pctldev); if (sscanf((char *)pin_name, "GPIO%d", &pin_number) == 1) for (i = 0; i < npct->soc->npins; i++) if (npct->soc->pins[i].number == pin_number) return npct->soc->pins[i].name; return NULL; } static bool nmk_pinctrl_dt_get_config(struct device_node *np, unsigned long *configs) { bool has_config = 0; unsigned long cfg = 0; int i, val, ret; for (i = 0; i < ARRAY_SIZE(nmk_cfg_params); i++) { ret = of_property_read_u32(np, nmk_cfg_params[i].property, &val); if (ret != -EINVAL) { if (nmk_dt_pin_config(i, val, &cfg) == 0) { *configs |= cfg; has_config = 1; } } } return has_config; } static int nmk_pinctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev, struct device_node *np, struct pinctrl_map **map, unsigned *reserved_maps, unsigned *num_maps) { int ret; const char *function = NULL; unsigned long configs = 0; bool has_config = 0; struct property *prop; struct device_node *np_config; ret = of_property_read_string(np, "function", &function); if (ret >= 0) { const char *group; ret = of_property_count_strings(np, "groups"); if (ret < 0) goto exit; ret = pinctrl_utils_reserve_map(pctldev, map, reserved_maps, num_maps, ret); if (ret < 0) goto exit; of_property_for_each_string(np, "groups", prop, group) { ret = nmk_dt_add_map_mux(map, reserved_maps, num_maps, group, function); if (ret < 0) goto exit; } } has_config = nmk_pinctrl_dt_get_config(np, &configs); np_config = of_parse_phandle(np, "ste,config", 0); if (np_config) has_config |= nmk_pinctrl_dt_get_config(np_config, &configs); if (has_config) { const char *gpio_name; const char *pin; ret = of_property_count_strings(np, "pins"); if (ret < 0) goto exit; ret = pinctrl_utils_reserve_map(pctldev, map, reserved_maps, num_maps, ret); if (ret < 0) goto exit; of_property_for_each_string(np, "pins", prop, pin) { gpio_name = nmk_find_pin_name(pctldev, pin); ret = nmk_dt_add_map_configs(map, reserved_maps, num_maps, gpio_name, &configs, 1); if (ret < 0) goto exit; } } exit: return ret; } static int nmk_pinctrl_dt_node_to_map(struct pinctrl_dev *pctldev, struct device_node *np_config, struct pinctrl_map **map, unsigned *num_maps) { unsigned reserved_maps; struct device_node *np; int ret; reserved_maps = 0; *map = NULL; *num_maps = 0; for_each_child_of_node(np_config, np) { ret = nmk_pinctrl_dt_subnode_to_map(pctldev, np, map, &reserved_maps, num_maps); if (ret < 0) { pinctrl_utils_dt_free_map(pctldev, *map, *num_maps); return ret; } } return 0; } static const struct pinctrl_ops nmk_pinctrl_ops = { .get_groups_count = nmk_get_groups_cnt, .get_group_name = nmk_get_group_name, .get_group_pins = nmk_get_group_pins, .pin_dbg_show = nmk_pin_dbg_show, .dt_node_to_map = nmk_pinctrl_dt_node_to_map, .dt_free_map = pinctrl_utils_dt_free_map, }; static int nmk_pmx_get_funcs_cnt(struct pinctrl_dev *pctldev) { struct nmk_pinctrl *npct = pinctrl_dev_get_drvdata(pctldev); return npct->soc->nfunctions; } static const char *nmk_pmx_get_func_name(struct pinctrl_dev *pctldev, unsigned function) { struct nmk_pinctrl *npct = pinctrl_dev_get_drvdata(pctldev); return npct->soc->functions[function].name; } static int nmk_pmx_get_func_groups(struct pinctrl_dev *pctldev, unsigned function, const char * const **groups, unsigned * const num_groups) { struct nmk_pinctrl *npct = pinctrl_dev_get_drvdata(pctldev); *groups = npct->soc->functions[function].groups; *num_groups = npct->soc->functions[function].ngroups; return 0; } static int nmk_pmx_set(struct pinctrl_dev *pctldev, unsigned function, unsigned group) { struct nmk_pinctrl *npct = pinctrl_dev_get_drvdata(pctldev); const struct nmk_pingroup *g; static unsigned int slpm[NUM_BANKS]; unsigned long flags = 0; bool glitch; int ret = -EINVAL; int i; g = &npct->soc->groups[group]; if (g->altsetting < 0) return -EINVAL; dev_dbg(npct->dev, "enable group %s, %u pins\n", g->name, g->npins); /* * If we're setting altfunc C by setting both AFSLA and AFSLB to 1, * we may pass through an undesired state. In this case we take * some extra care. * * Safe sequence used to switch IOs between GPIO and Alternate-C mode: * - Save SLPM registers (since we have a shadow register in the * nmk_chip we're using that as backup) * - Set SLPM=0 for the IOs you want to switch and others to 1 * - Configure the GPIO registers for the IOs that are being switched * - Set IOFORCE=1 * - Modify the AFLSA/B registers for the IOs that are being switched * - Set IOFORCE=0 * - Restore SLPM registers * - Any spurious wake up event during switch sequence to be ignored * and cleared * * We REALLY need to save ALL slpm registers, because the external * IOFORCE will switch *all* ports to their sleepmode setting to as * to avoid glitches. (Not just one port!) */ glitch = ((g->altsetting & NMK_GPIO_ALT_C) == NMK_GPIO_ALT_C); if (glitch) { spin_lock_irqsave(&nmk_gpio_slpm_lock, flags); /* Initially don't put any pins to sleep when switching */ memset(slpm, 0xff, sizeof(slpm)); /* * Then mask the pins that need to be sleeping now when we're * switching to the ALT C function. */ for (i = 0; i < g->npins; i++) slpm[g->pins[i] / NMK_GPIO_PER_CHIP] &= ~BIT(g->pins[i]); nmk_gpio_glitch_slpm_init(slpm); } for (i = 0; i < g->npins; i++) { struct nmk_gpio_chip *nmk_chip; unsigned bit; nmk_chip = find_nmk_gpio_from_pin(g->pins[i]); if (!nmk_chip) { dev_err(npct->dev, "invalid pin offset %d in group %s at index %d\n", g->pins[i], g->name, i); goto out_glitch; } dev_dbg(npct->dev, "setting pin %d to altsetting %d\n", g->pins[i], g->altsetting); clk_enable(nmk_chip->clk); bit = g->pins[i] % NMK_GPIO_PER_CHIP; /* * If the pin is switching to altfunc, and there was an * interrupt installed on it which has been lazy disabled, * actually mask the interrupt to prevent spurious interrupts * that would occur while the pin is under control of the * peripheral. Only SKE does this. */ nmk_gpio_disable_lazy_irq(nmk_chip, bit); __nmk_gpio_set_mode_safe(nmk_chip, bit, (g->altsetting & NMK_GPIO_ALT_C), glitch); clk_disable(nmk_chip->clk); /* * Call PRCM GPIOCR config function in case ALTC * has been selected: * - If selection is a ALTCx, some bits in PRCM GPIOCR registers * must be set. * - If selection is pure ALTC and previous selection was ALTCx, * then some bits in PRCM GPIOCR registers must be cleared. */ if ((g->altsetting & NMK_GPIO_ALT_C) == NMK_GPIO_ALT_C) nmk_prcm_altcx_set_mode(npct, g->pins[i], g->altsetting >> NMK_GPIO_ALT_CX_SHIFT); } /* When all pins are successfully reconfigured we get here */ ret = 0; out_glitch: if (glitch) { nmk_gpio_glitch_slpm_restore(slpm); spin_unlock_irqrestore(&nmk_gpio_slpm_lock, flags); } return ret; } static int nmk_gpio_request_enable(struct pinctrl_dev *pctldev, struct pinctrl_gpio_range *range, unsigned offset) { struct nmk_pinctrl *npct = pinctrl_dev_get_drvdata(pctldev); struct nmk_gpio_chip *nmk_chip; struct gpio_chip *chip; unsigned bit; if (!range) { dev_err(npct->dev, "invalid range\n"); return -EINVAL; } if (!range->gc) { dev_err(npct->dev, "missing GPIO chip in range\n"); return -EINVAL; } chip = range->gc; nmk_chip = container_of(chip, struct nmk_gpio_chip, chip); dev_dbg(npct->dev, "enable pin %u as GPIO\n", offset); clk_enable(nmk_chip->clk); bit = offset % NMK_GPIO_PER_CHIP; /* There is no glitch when converting any pin to GPIO */ __nmk_gpio_set_mode(nmk_chip, bit, NMK_GPIO_ALT_GPIO); clk_disable(nmk_chip->clk); return 0; } static void nmk_gpio_disable_free(struct pinctrl_dev *pctldev, struct pinctrl_gpio_range *range, unsigned offset) { struct nmk_pinctrl *npct = pinctrl_dev_get_drvdata(pctldev); dev_dbg(npct->dev, "disable pin %u as GPIO\n", offset); /* Set the pin to some default state, GPIO is usually default */ } static const struct pinmux_ops nmk_pinmux_ops = { .get_functions_count = nmk_pmx_get_funcs_cnt, .get_function_name = nmk_pmx_get_func_name, .get_function_groups = nmk_pmx_get_func_groups, .set_mux = nmk_pmx_set, .gpio_request_enable = nmk_gpio_request_enable, .gpio_disable_free = nmk_gpio_disable_free, .strict = true, }; static int nmk_pin_config_get(struct pinctrl_dev *pctldev, unsigned pin, unsigned long *config) { /* Not implemented */ return -EINVAL; } static int nmk_pin_config_set(struct pinctrl_dev *pctldev, unsigned pin, unsigned long *configs, unsigned num_configs) { static const char *pullnames[] = { [NMK_GPIO_PULL_NONE] = "none", [NMK_GPIO_PULL_UP] = "up", [NMK_GPIO_PULL_DOWN] = "down", [3] /* illegal */ = "??" }; static const char *slpmnames[] = { [NMK_GPIO_SLPM_INPUT] = "input/wakeup", [NMK_GPIO_SLPM_NOCHANGE] = "no-change/no-wakeup", }; struct nmk_pinctrl *npct = pinctrl_dev_get_drvdata(pctldev); struct nmk_gpio_chip *nmk_chip; unsigned bit; pin_cfg_t cfg; int pull, slpm, output, val, i; bool lowemi, gpiomode, sleep; nmk_chip = find_nmk_gpio_from_pin(pin); if (!nmk_chip) { dev_err(npct->dev, "invalid pin offset %d\n", pin); return -EINVAL; } for (i = 0; i < num_configs; i++) { /* * The pin config contains pin number and altfunction fields, * here we just ignore that part. It's being handled by the * framework and pinmux callback respectively. */ cfg = (pin_cfg_t) configs[i]; pull = PIN_PULL(cfg); slpm = PIN_SLPM(cfg); output = PIN_DIR(cfg); val = PIN_VAL(cfg); lowemi = PIN_LOWEMI(cfg); gpiomode = PIN_GPIOMODE(cfg); sleep = PIN_SLEEPMODE(cfg); if (sleep) { int slpm_pull = PIN_SLPM_PULL(cfg); int slpm_output = PIN_SLPM_DIR(cfg); int slpm_val = PIN_SLPM_VAL(cfg); /* All pins go into GPIO mode at sleep */ gpiomode = true; /* * The SLPM_* values are normal values + 1 to allow zero * to mean "same as normal". */ if (slpm_pull) pull = slpm_pull - 1; if (slpm_output) output = slpm_output - 1; if (slpm_val) val = slpm_val - 1; dev_dbg(nmk_chip->chip.dev, "pin %d: sleep pull %s, dir %s, val %s\n", pin, slpm_pull ? pullnames[pull] : "same", slpm_output ? (output ? "output" : "input") : "same", slpm_val ? (val ? "high" : "low") : "same"); } dev_dbg(nmk_chip->chip.dev, "pin %d [%#lx]: pull %s, slpm %s (%s%s), lowemi %s\n", pin, cfg, pullnames[pull], slpmnames[slpm], output ? "output " : "input", output ? (val ? "high" : "low") : "", lowemi ? "on" : "off"); clk_enable(nmk_chip->clk); bit = pin % NMK_GPIO_PER_CHIP; if (gpiomode) /* No glitch when going to GPIO mode */ __nmk_gpio_set_mode(nmk_chip, bit, NMK_GPIO_ALT_GPIO); if (output) __nmk_gpio_make_output(nmk_chip, bit, val); else { __nmk_gpio_make_input(nmk_chip, bit); __nmk_gpio_set_pull(nmk_chip, bit, pull); } /* TODO: isn't this only applicable on output pins? */ __nmk_gpio_set_lowemi(nmk_chip, bit, lowemi); __nmk_gpio_set_slpm(nmk_chip, bit, slpm); clk_disable(nmk_chip->clk); } /* for each config */ return 0; } static const struct pinconf_ops nmk_pinconf_ops = { .pin_config_get = nmk_pin_config_get, .pin_config_set = nmk_pin_config_set, }; static struct pinctrl_desc nmk_pinctrl_desc = { .name = "pinctrl-nomadik", .pctlops = &nmk_pinctrl_ops, .pmxops = &nmk_pinmux_ops, .confops = &nmk_pinconf_ops, .owner = THIS_MODULE, }; static const struct of_device_id nmk_pinctrl_match[] = { { .compatible = "stericsson,stn8815-pinctrl", .data = (void *)PINCTRL_NMK_STN8815, }, { .compatible = "stericsson,db8500-pinctrl", .data = (void *)PINCTRL_NMK_DB8500, }, { .compatible = "stericsson,db8540-pinctrl", .data = (void *)PINCTRL_NMK_DB8540, }, {}, }; #ifdef CONFIG_PM_SLEEP static int nmk_pinctrl_suspend(struct device *dev) { struct nmk_pinctrl *npct; npct = dev_get_drvdata(dev); if (!npct) return -EINVAL; return pinctrl_force_sleep(npct->pctl); } static int nmk_pinctrl_resume(struct device *dev) { struct nmk_pinctrl *npct; npct = dev_get_drvdata(dev); if (!npct) return -EINVAL; return pinctrl_force_default(npct->pctl); } #endif static int nmk_pinctrl_probe(struct platform_device *pdev) { const struct of_device_id *match; struct device_node *np = pdev->dev.of_node; struct device_node *prcm_np; struct nmk_pinctrl *npct; unsigned int version = 0; int i; npct = devm_kzalloc(&pdev->dev, sizeof(*npct), GFP_KERNEL); if (!npct) return -ENOMEM; match = of_match_device(nmk_pinctrl_match, &pdev->dev); if (!match) return -ENODEV; version = (unsigned int) match->data; /* Poke in other ASIC variants here */ if (version == PINCTRL_NMK_STN8815) nmk_pinctrl_stn8815_init(&npct->soc); if (version == PINCTRL_NMK_DB8500) nmk_pinctrl_db8500_init(&npct->soc); if (version == PINCTRL_NMK_DB8540) nmk_pinctrl_db8540_init(&npct->soc); /* * Since we depend on the GPIO chips to provide clock and register base * for the pin control operations, make sure that we have these * populated before we continue. Follow the phandles to instantiate * them. The GPIO portion of the actual hardware may be probed before * or after this point: it shouldn't matter as the APIs are orthogonal. */ for (i = 0; i < NMK_MAX_BANKS; i++) { struct device_node *gpio_np; struct nmk_gpio_chip *nmk_chip; gpio_np = of_parse_phandle(np, "nomadik-gpio-chips", i); if (gpio_np) { dev_info(&pdev->dev, "populate NMK GPIO %d \"%s\"\n", i, gpio_np->name); nmk_chip = nmk_gpio_populate_chip(gpio_np, pdev); if (IS_ERR(nmk_chip)) dev_err(&pdev->dev, "could not populate nmk chip struct " "- continue anyway\n"); of_node_put(gpio_np); } } prcm_np = of_parse_phandle(np, "prcm", 0); if (prcm_np) npct->prcm_base = of_iomap(prcm_np, 0); if (!npct->prcm_base) { if (version == PINCTRL_NMK_STN8815) { dev_info(&pdev->dev, "No PRCM base, " "assuming no ALT-Cx control is available\n"); } else { dev_err(&pdev->dev, "missing PRCM base address\n"); return -EINVAL; } } nmk_pinctrl_desc.pins = npct->soc->pins; nmk_pinctrl_desc.npins = npct->soc->npins; npct->dev = &pdev->dev; npct->pctl = pinctrl_register(&nmk_pinctrl_desc, &pdev->dev, npct); if (IS_ERR(npct->pctl)) { dev_err(&pdev->dev, "could not register Nomadik pinctrl driver\n"); return PTR_ERR(npct->pctl); } platform_set_drvdata(pdev, npct); dev_info(&pdev->dev, "initialized Nomadik pin control driver\n"); return 0; } static const struct of_device_id nmk_gpio_match[] = { { .compatible = "st,nomadik-gpio", }, {} }; static struct platform_driver nmk_gpio_driver = { .driver = { .name = "gpio", .of_match_table = nmk_gpio_match, }, .probe = nmk_gpio_probe, }; static SIMPLE_DEV_PM_OPS(nmk_pinctrl_pm_ops, nmk_pinctrl_suspend, nmk_pinctrl_resume); static struct platform_driver nmk_pinctrl_driver = { .driver = { .name = "pinctrl-nomadik", .of_match_table = nmk_pinctrl_match, .pm = &nmk_pinctrl_pm_ops, }, .probe = nmk_pinctrl_probe, }; static int __init nmk_gpio_init(void) { return platform_driver_register(&nmk_gpio_driver); } subsys_initcall(nmk_gpio_init); static int __init nmk_pinctrl_init(void) { return platform_driver_register(&nmk_pinctrl_driver); } core_initcall(nmk_pinctrl_init); MODULE_AUTHOR("Prafulla WADASKAR and Alessandro Rubini"); MODULE_DESCRIPTION("Nomadik GPIO Driver"); MODULE_LICENSE("GPL");
gpl-2.0
LibiSC/GTabPro10
arch/arm/mach-exynos/board-smdk5410-clock.c
63
13242
/* * Copyright (c) 2012 Samsung Electronics Co., Ltd. * http://www.samsung.com * * EXYNOS5410 - PLL support * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/err.h> #include <plat/clock.h> #include <plat/cpu.h> #include <media/exynos_camera.h> #include "board-smdk5410.h" static int exynos5_gsc_clock_init(void) { struct clk *clk_child; struct clk *clk_parent; struct clk *clk_isp_sensor; char sensor_name[20]; int i; clk_child = clk_get(NULL, "aclk_300_gscl"); if (IS_ERR(clk_child)) { pr_err("failed to get %s clock\n", "aclk_300_gscl"); return PTR_ERR(clk_child); } clk_parent = clk_get(NULL, "dout_aclk_300_gscl"); if (IS_ERR(clk_parent)) { clk_put(clk_child); pr_err("failed to get %s clock\n", "dout_aclk_300_gscl"); return PTR_ERR(clk_child); } if (clk_set_parent(clk_child, clk_parent)) { clk_put(clk_child); clk_put(clk_parent); pr_err("Unable to set parent %s of clock %s.\n", "dout_aclk_300_gscl", "aclk_300_gscl"); return PTR_ERR(clk_child); } clk_set_rate(clk_parent, 300000000); clk_put(clk_child); clk_put(clk_parent); /* Set MIPI-CSI source clock */ clk_child = clk_get(NULL, "aclk_333_432_gscl"); if (IS_ERR(clk_child)) { pr_err("failed to get %s clock\n", "aclk_333_432_gscl"); return PTR_ERR(clk_child); } clk_parent = clk_get(NULL, "dout_aclk_333_432_gscl"); if (IS_ERR(clk_parent)) { clk_put(clk_child); pr_err("failed to get %s clock\n", "dout_aclk_333_432_gscl"); return PTR_ERR(clk_child); } if (clk_set_parent(clk_child, clk_parent)) { clk_put(clk_child); clk_put(clk_parent); pr_err("Unable to set parent %s of clock %s.\n", "dout_aclk_333_432_gscl", "aclk_333_432_gscl"); return PTR_ERR(clk_child); } clk_put(clk_child); clk_put(clk_parent); /* Set Camera sensor source clock */ for (i = 0; i < MAX_CAM_NUM; i++) { snprintf(sensor_name, sizeof(sensor_name), "sclk_isp_sensor%d", i); clk_isp_sensor = clk_get(NULL, sensor_name); if (IS_ERR(clk_isp_sensor)) { pr_err("failed to get sclk_isp_sensor%d clock\n", i); return PTR_ERR(clk_child); } clk_set_rate(clk_isp_sensor, 24000000); clk_put(clk_isp_sensor); } return 0; } static int exynos5_aclk_300_disp1_init(void) { struct clk *aclk_300_disp1 = NULL; struct clk *dout_disp1 = NULL; struct clk *mout_dpll = NULL; int ret; aclk_300_disp1 = clk_get(NULL, "aclk_300_disp1"); if (IS_ERR(aclk_300_disp1)) { pr_err("failed to get aclk for disp1\n"); goto err_clk1; } dout_disp1 = clk_get(NULL, "dout_aclk_300_disp1"); if (IS_ERR(dout_disp1)) { pr_err("failed to get dout_disp1 for disp1\n"); goto err_clk2; } ret = clk_set_parent(aclk_300_disp1, dout_disp1); if (ret < 0) { pr_err("failed to clk_set_parent for disp1\n"); goto err_clk2; } mout_dpll = clk_get(NULL, "mout_dpll"); if (IS_ERR(mout_dpll)) { pr_err("failed to get mout_dpll for disp1\n"); goto err_clk2; } ret = clk_set_parent(dout_disp1, mout_dpll); if (ret < 0) { pr_err("failed to clk_set_parent for disp1\n"); goto err_clk2; } ret = clk_set_rate(dout_disp1, 300*1000*1000); if (ret < 0) { pr_err("failed to clk_set_rate of aclk_300_disp1 for disp1\n"); goto err_clk2; } clk_put(dout_disp1); clk_put(mout_dpll); clk_put(aclk_300_disp1); return 0; err_clk2: clk_put(mout_dpll); err_clk1: clk_put(aclk_300_disp1); return -EINVAL; } static int exynos5_mfc_clock_init(void) { struct clk *clk_child; struct clk *clk_parent; clk_child = clk_get(NULL, "aclk_333_pre"); if (IS_ERR(clk_child)) { pr_err("failed to get %s clock\n", "aclk_333_pre"); return PTR_ERR(clk_child); } clk_parent = clk_get(NULL, "mout_cpll"); if (IS_ERR(clk_parent)) { clk_put(clk_child); pr_err("failed to get %s clock\n", "mout_cpll"); return PTR_ERR(clk_child); } if (clk_set_parent(clk_child, clk_parent)) { clk_put(clk_child); clk_put(clk_parent); pr_err("Unable to set parent %s of clock %s.\n", "mout_cpll", "aclk_333_pre"); return PTR_ERR(clk_child); } clk_put(clk_child); clk_put(clk_parent); clk_child = clk_get(NULL, "aclk_333"); if (IS_ERR(clk_child)) { pr_err("failed to get %s clock\n", "aclk_333"); return PTR_ERR(clk_child); } clk_parent = clk_get(NULL, "aclk_333_pre"); if (IS_ERR(clk_parent)) { clk_put(clk_child); pr_err("failed to get %s clock\n", "aclk_333_pre"); return PTR_ERR(clk_child); } if (clk_set_parent(clk_child, clk_parent)) { clk_put(clk_child); clk_put(clk_parent); pr_err("Unable to set parent %s of clock %s.\n", "aclk_333_pre", "aclk_333"); return PTR_ERR(clk_child); } clk_set_rate(clk_parent, 83250000); clk_put(clk_child); clk_put(clk_parent); /* FIXME: W/A for MFC clock source setting */ clk_child = clk_get(NULL, "aclk_333"); if (IS_ERR(clk_child)) { pr_err("failed to get %s clock\n", "aclk_333"); return PTR_ERR(clk_child); } clk_enable(clk_child); clk_disable(clk_child); clk_put(clk_child); return 0; } static int exynos5_jpeg_clock_init(void) { struct clk *sclk; sclk = clk_get(NULL, "sclk_jpeg"); if (IS_ERR(sclk)) return PTR_ERR(sclk); if (clk_set_rate(sclk, 166500000UL)) { pr_err("%s rate change failed: %lu\n", sclk->name, 166500000UL); clk_put(sclk); return PTR_ERR(sclk); } clk_put(sclk); return 0; } static int exynos5_jpeg_hx_clock_init(void) { struct clk *sclk = NULL; struct clk *dout_jpeg = NULL; struct clk *mout_dpll = NULL; int ret; sclk = clk_get(NULL, "aclk_300_jpeg"); if (IS_ERR(sclk)) { pr_err("failed to get aclk for jpeg\n"); goto err_clk1; } dout_jpeg = clk_get(NULL, "dout_aclk_300_jpeg"); if (IS_ERR(dout_jpeg)) { pr_err("failed to get dout_jpeg for jpeg\n"); goto err_clk2; } ret = clk_set_parent(sclk, dout_jpeg); if (ret < 0) { pr_err("failed to clk_set_parent for jpeg\n"); goto err_clk3; } mout_dpll = clk_get(NULL, "mout_dpll"); if (IS_ERR(mout_dpll)) { pr_err("failed to get mout_dpll for jpeg\n"); goto err_clk3; } ret = clk_set_parent(dout_jpeg, mout_dpll); if (ret < 0) { pr_err("failed to clk_set_parent for jpeg\n"); goto err_clk4; } ret = clk_set_rate(dout_jpeg, 300 * MHZ); if (ret < 0) { pr_err("failed to clk_set_rate of sclk for jpeg\n"); goto err_clk4; } clk_put(dout_jpeg); clk_put(mout_dpll); clk_put(sclk); return 0; err_clk4: clk_put(mout_dpll); err_clk3: clk_put(dout_jpeg); err_clk2: clk_put(sclk); err_clk1: return -EINVAL; } static int exynos5_aclk_200_disp1_init(void) { struct clk *aclk_200_disp1; struct clk *aclk_200; aclk_200_disp1 = clk_get(NULL, "aclk_200_disp1"); if (IS_ERR(aclk_200_disp1)) { pr_err("failed to get %s clock\n", "aclk_200_disp1"); return PTR_ERR(aclk_200_disp1); } aclk_200 = clk_get(NULL, "aclk_200"); if (IS_ERR(aclk_200)) { clk_put(aclk_200_disp1); pr_err("failed to get %s clock\n", "aclk_200"); return PTR_ERR(aclk_200); } if (clk_set_parent(aclk_200_disp1, aclk_200)) { clk_put(aclk_200_disp1); clk_put(aclk_200); pr_err("Unable to set parent %s of clock %s.\n", "aclk_200_disp1", "aclk_200"); return PTR_ERR(aclk_200_disp1); } clk_set_rate(aclk_200, 200*1000*1000); clk_put(aclk_200_disp1); clk_put(aclk_200); return 0; } static int exynos5_pcm_clock_init(void) { struct clk *sclk_pcm0, *sclk_pcm1, *sclk_pcm2; sclk_pcm0 = clk_get(NULL, "sclk_pcm0"); if (IS_ERR(sclk_pcm0)) { pr_err("failed to get %s clock\n", "sclk_pcm0"); return PTR_ERR(sclk_pcm0); } sclk_pcm1 = clk_get(NULL, "sclk_pcm1"); if (IS_ERR(sclk_pcm1)) { pr_err("failed to get %s clock\n", "sclk_pcm1"); return PTR_ERR(sclk_pcm1); } sclk_pcm2 = clk_get(NULL, "sclk_pcm2"); if (IS_ERR(sclk_pcm2)) { pr_err("failed to get %s clock\n", "sclk_pcm2"); return PTR_ERR(sclk_pcm2); } clk_set_rate(sclk_pcm0, 4*1000*1000); clk_set_rate(sclk_pcm1, 4*1000*1000); clk_set_rate(sclk_pcm2, 4*1000*1000); clk_put(sclk_pcm0); clk_put(sclk_pcm1); clk_put(sclk_pcm2); return 0; } static int exynos5_spi_clock_init(void) { struct clk *child_clk = NULL; struct clk *parent_clk = NULL; char clk_name[16]; int i; for (i = 0; i < 3; i++) { snprintf(clk_name, sizeof(clk_name), "dout_spi%d", i); child_clk = clk_get(NULL, clk_name); if (IS_ERR(child_clk)) { pr_err("Failed to get %s clk\n", clk_name); return PTR_ERR(child_clk); } parent_clk = clk_get(NULL, "mout_cpll"); if (IS_ERR(parent_clk)) { clk_put(child_clk); pr_err("Failed to get mout_cpll clk\n"); return PTR_ERR(parent_clk); } if (clk_set_parent(child_clk, parent_clk)) { clk_put(child_clk); clk_put(parent_clk); pr_err("Unable to set parent %s of clock %s\n", parent_clk->name, child_clk->name); return PTR_ERR(child_clk); } clk_set_rate(child_clk, 80 * 1000 * 1000); clk_put(parent_clk); clk_put(child_clk); } return 0; } static int exynos5_mmc_clock_init(void) { struct clk *sclk_mmc0, *sclk_mmc1, *sclk_mmc2, *mout_cpll; struct clk *dw_mmc0, *dw_mmc1, *dw_mmc2; sclk_mmc0 = clk_get(NULL, "sclk_mmc0"); sclk_mmc1 = clk_get(NULL, "sclk_mmc1"); sclk_mmc2 = clk_get(NULL, "sclk_mmc2"); mout_cpll = clk_get(NULL, "mout_cpll"); dw_mmc0 = clk_get_sys("dw_mmc.0", "sclk_dwmci"); dw_mmc1 = clk_get_sys("dw_mmc.1", "sclk_dwmci"); dw_mmc2 = clk_get_sys("dw_mmc.2", "sclk_dwmci"); if (clk_set_parent(sclk_mmc0, mout_cpll)) pr_err("Unable to set parent %s of clock %s.\n", mout_cpll->name, sclk_mmc0->name); if (clk_set_parent(sclk_mmc1, mout_cpll)) pr_err("Unable to set parent %s of clock %s.\n", mout_cpll->name, sclk_mmc1->name); if (clk_set_parent(sclk_mmc2, mout_cpll)) pr_err("Unable to set parent %s of clock %s.\n", mout_cpll->name, sclk_mmc2->name); clk_set_rate(sclk_mmc0, 640 * MHZ); clk_set_rate(sclk_mmc1, 640 * MHZ); clk_set_rate(sclk_mmc2, 640 * MHZ); clk_set_rate(dw_mmc0, 640 * MHZ); clk_set_rate(dw_mmc1, 640 * MHZ); clk_set_rate(dw_mmc2, 640 * MHZ); clk_put(sclk_mmc0); clk_put(sclk_mmc1); clk_put(sclk_mmc2); clk_put(mout_cpll); clk_put(dw_mmc0); clk_put(dw_mmc1); clk_put(dw_mmc2); return 0; } static int exynos5_mpll_bpll_clock_init(void) { struct clk *mout_mpll_bpll, *mout_mpll_user; mout_mpll_bpll = clk_get(NULL, "mout_mpll_bpll"); mout_mpll_user = clk_get(NULL, "mout_mpll_user"); if (clk_set_parent(mout_mpll_bpll, mout_mpll_user)) pr_err("Unable to set parent %s of clock %s.\n", mout_mpll_user->name, mout_mpll_bpll->name); clk_put(mout_mpll_bpll); clk_put(mout_mpll_user); return 0; } static int exynos5_mipi_clock_init(void) { struct clk *mipihsi_txbase; mipihsi_txbase = clk_get(NULL, "exynos5_clk_mipihsi"); clk_set_rate(mipihsi_txbase, 100 * MHZ); clk_put(mipihsi_txbase); return 0; } static int exynos5_acp_clock_init(void) { struct clk *aclk_acp, *pclk_acp; aclk_acp = clk_get(NULL, "aclk_acp"); pclk_acp = clk_get(NULL, "pclk_acp"); clk_set_rate(aclk_acp, 267000000); clk_set_rate(pclk_acp, 134000000); clk_put(aclk_acp); clk_put(pclk_acp); return 0; } static int exynos5_uart_clock_init(void) { struct clk *uart0, *uart1, *uart2, *uart3; struct clk *mout_cpll; uart0 = clk_get_sys("s5pv210-uart.0", "uclk1"); uart1 = clk_get_sys("s5pv210-uart.1", "uclk1"); uart2 = clk_get_sys("s5pv210-uart.2", "uclk1"); uart3 = clk_get_sys("s5pv210-uart.3", "uclk1"); mout_cpll = clk_get(NULL, "mout_cpll"); if (clk_set_parent(uart0, mout_cpll)) pr_err("Unable to set parent %s of clock %s.\n", mout_cpll->name, uart0->name); else clk_set_rate(uart0, 107 * MHZ); if (clk_set_parent(uart1, mout_cpll)) pr_err("Unable to set parent %s of clock %s.\n", mout_cpll->name, uart1->name); else clk_set_rate(uart1, 107 * MHZ); if (clk_set_parent(uart2, mout_cpll)) pr_err("Unable to set parent %s of clock %s.\n", mout_cpll->name, uart2->name); else clk_set_rate(uart2, 107 * MHZ); if (clk_set_parent(uart3, mout_cpll)) pr_err("Unable to set parent %s of clock %s.\n", mout_cpll->name, uart3->name); else clk_set_rate(uart3, 107 * MHZ); clk_put(uart0); clk_put(uart1); clk_put(uart2); clk_put(uart3); return 0; } void __init exynos5_smdk5410_clock_init(void) { if (exynos5_uart_clock_init()) pr_err("failed to init uart clock init\n"); if (exynos5_mipi_clock_init()) pr_err("failed to init mipi clock init\n"); if (exynos5_mpll_bpll_clock_init()) pr_err("failed to init mpll_bpll clock init\n"); if (exynos5_mmc_clock_init()) pr_err("failed to init emmc clock init\n"); if (exynos5_gsc_clock_init()) pr_err("failed to gscaler clock init\n"); if (exynos5_mfc_clock_init()) pr_err("failed to MFC clock init\n"); if (exynos5_jpeg_clock_init()) pr_err("failed to jpeg clock init\n"); if (exynos5_jpeg_hx_clock_init()) pr_err("failed to jpeg-hx clock init\n"); if (exynos5_aclk_300_disp1_init()) pr_err("failed to init aclk_300_disp1\n"); if (exynos5_aclk_200_disp1_init()) pr_err("failed to init aclk_200_disp1\n"); if (exynos5_pcm_clock_init()) pr_err("failed to init pcm clock init\n"); if (exynos5_spi_clock_init()) pr_err("failed to init spi clock init\n"); if (exynos5_acp_clock_init()) pr_err("failed to init acp clock init\n"); }
gpl-2.0
qnhoang81/Intercept_Kernel
drivers/net/wireless/ath/ath5k/dma.c
1343
18721
/* * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org> * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com> * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. * */ /*************************************\ * DMA and interrupt masking functions * \*************************************/ /* * dma.c - DMA and interrupt masking functions * * Here we setup descriptor pointers (rxdp/txdp) start/stop dma engine and * handle queue setup for 5210 chipset (rest are handled on qcu.c). * Also we setup interrupt mask register (IMR) and read the various iterrupt * status registers (ISR). * * TODO: Handle SISR on 5211+ and introduce a function to return the queue * number that resulted the interrupt. */ #include "ath5k.h" #include "reg.h" #include "debug.h" #include "base.h" /*********\ * Receive * \*********/ /** * ath5k_hw_start_rx_dma - Start DMA receive * * @ah: The &struct ath5k_hw */ void ath5k_hw_start_rx_dma(struct ath5k_hw *ah) { ATH5K_TRACE(ah->ah_sc); ath5k_hw_reg_write(ah, AR5K_CR_RXE, AR5K_CR); ath5k_hw_reg_read(ah, AR5K_CR); } /** * ath5k_hw_stop_rx_dma - Stop DMA receive * * @ah: The &struct ath5k_hw */ int ath5k_hw_stop_rx_dma(struct ath5k_hw *ah) { unsigned int i; ATH5K_TRACE(ah->ah_sc); ath5k_hw_reg_write(ah, AR5K_CR_RXD, AR5K_CR); /* * It may take some time to disable the DMA receive unit */ for (i = 1000; i > 0 && (ath5k_hw_reg_read(ah, AR5K_CR) & AR5K_CR_RXE) != 0; i--) udelay(10); return i ? 0 : -EBUSY; } /** * ath5k_hw_get_rxdp - Get RX Descriptor's address * * @ah: The &struct ath5k_hw */ u32 ath5k_hw_get_rxdp(struct ath5k_hw *ah) { return ath5k_hw_reg_read(ah, AR5K_RXDP); } /** * ath5k_hw_set_rxdp - Set RX Descriptor's address * * @ah: The &struct ath5k_hw * @phys_addr: RX descriptor address * * XXX: Should we check if rx is enabled before setting rxdp ? */ void ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr) { ATH5K_TRACE(ah->ah_sc); ath5k_hw_reg_write(ah, phys_addr, AR5K_RXDP); } /**********\ * Transmit * \**********/ /** * ath5k_hw_start_tx_dma - Start DMA transmit for a specific queue * * @ah: The &struct ath5k_hw * @queue: The hw queue number * * Start DMA transmit for a specific queue and since 5210 doesn't have * QCU/DCU, set up queue parameters for 5210 here based on queue type (one * queue for normal data and one queue for beacons). For queue setup * on newer chips check out qcu.c. Returns -EINVAL if queue number is out * of range or if queue is already disabled. * * NOTE: Must be called after setting up tx control descriptor for that * queue (see below). */ int ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue) { u32 tx_queue; ATH5K_TRACE(ah->ah_sc); AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); /* Return if queue is declared inactive */ if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE) return -EIO; if (ah->ah_version == AR5K_AR5210) { tx_queue = ath5k_hw_reg_read(ah, AR5K_CR); /* * Set the queue by type on 5210 */ switch (ah->ah_txq[queue].tqi_type) { case AR5K_TX_QUEUE_DATA: tx_queue |= AR5K_CR_TXE0 & ~AR5K_CR_TXD0; break; case AR5K_TX_QUEUE_BEACON: tx_queue |= AR5K_CR_TXE1 & ~AR5K_CR_TXD1; ath5k_hw_reg_write(ah, AR5K_BCR_TQ1V | AR5K_BCR_BDMAE, AR5K_BSR); break; case AR5K_TX_QUEUE_CAB: tx_queue |= AR5K_CR_TXE1 & ~AR5K_CR_TXD1; ath5k_hw_reg_write(ah, AR5K_BCR_TQ1FV | AR5K_BCR_TQ1V | AR5K_BCR_BDMAE, AR5K_BSR); break; default: return -EINVAL; } /* Start queue */ ath5k_hw_reg_write(ah, tx_queue, AR5K_CR); ath5k_hw_reg_read(ah, AR5K_CR); } else { /* Return if queue is disabled */ if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXD, queue)) return -EIO; /* Start queue */ AR5K_REG_WRITE_Q(ah, AR5K_QCU_TXE, queue); } return 0; } /** * ath5k_hw_stop_tx_dma - Stop DMA transmit on a specific queue * * @ah: The &struct ath5k_hw * @queue: The hw queue number * * Stop DMA transmit on a specific hw queue and drain queue so we don't * have any pending frames. Returns -EBUSY if we still have pending frames, * -EINVAL if queue number is out of range. * */ int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue) { unsigned int i = 40; u32 tx_queue, pending; ATH5K_TRACE(ah->ah_sc); AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); /* Return if queue is declared inactive */ if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE) return -EIO; if (ah->ah_version == AR5K_AR5210) { tx_queue = ath5k_hw_reg_read(ah, AR5K_CR); /* * Set by queue type */ switch (ah->ah_txq[queue].tqi_type) { case AR5K_TX_QUEUE_DATA: tx_queue |= AR5K_CR_TXD0 & ~AR5K_CR_TXE0; break; case AR5K_TX_QUEUE_BEACON: case AR5K_TX_QUEUE_CAB: /* XXX Fix me... */ tx_queue |= AR5K_CR_TXD1 & ~AR5K_CR_TXD1; ath5k_hw_reg_write(ah, 0, AR5K_BSR); break; default: return -EINVAL; } /* Stop queue */ ath5k_hw_reg_write(ah, tx_queue, AR5K_CR); ath5k_hw_reg_read(ah, AR5K_CR); } else { /* * Schedule TX disable and wait until queue is empty */ AR5K_REG_WRITE_Q(ah, AR5K_QCU_TXD, queue); /*Check for pending frames*/ do { pending = ath5k_hw_reg_read(ah, AR5K_QUEUE_STATUS(queue)) & AR5K_QCU_STS_FRMPENDCNT; udelay(100); } while (--i && pending); /* For 2413+ order PCU to drop packets using * QUIET mechanism */ if (ah->ah_mac_version >= (AR5K_SREV_AR2414 >> 4) && pending){ /* Set periodicity and duration */ ath5k_hw_reg_write(ah, AR5K_REG_SM(100, AR5K_QUIET_CTL2_QT_PER)| AR5K_REG_SM(10, AR5K_QUIET_CTL2_QT_DUR), AR5K_QUIET_CTL2); /* Enable quiet period for current TSF */ ath5k_hw_reg_write(ah, AR5K_QUIET_CTL1_QT_EN | AR5K_REG_SM(ath5k_hw_reg_read(ah, AR5K_TSF_L32_5211) >> 10, AR5K_QUIET_CTL1_NEXT_QT_TSF), AR5K_QUIET_CTL1); /* Force channel idle high */ AR5K_REG_ENABLE_BITS(ah, AR5K_DIAG_SW_5211, AR5K_DIAG_SW_CHANEL_IDLE_HIGH); /* Wait a while and disable mechanism */ udelay(200); AR5K_REG_DISABLE_BITS(ah, AR5K_QUIET_CTL1, AR5K_QUIET_CTL1_QT_EN); /* Re-check for pending frames */ i = 40; do { pending = ath5k_hw_reg_read(ah, AR5K_QUEUE_STATUS(queue)) & AR5K_QCU_STS_FRMPENDCNT; udelay(100); } while (--i && pending); AR5K_REG_DISABLE_BITS(ah, AR5K_DIAG_SW_5211, AR5K_DIAG_SW_CHANEL_IDLE_HIGH); } /* Clear register */ ath5k_hw_reg_write(ah, 0, AR5K_QCU_TXD); if (pending) return -EBUSY; } /* TODO: Check for success on 5210 else return error */ return 0; } /** * ath5k_hw_get_txdp - Get TX Descriptor's address for a specific queue * * @ah: The &struct ath5k_hw * @queue: The hw queue number * * Get TX descriptor's address for a specific queue. For 5210 we ignore * the queue number and use tx queue type since we only have 2 queues. * We use TXDP0 for normal data queue and TXDP1 for beacon queue. * For newer chips with QCU/DCU we just read the corresponding TXDP register. * * XXX: Is TXDP read and clear ? */ u32 ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue) { u16 tx_reg; ATH5K_TRACE(ah->ah_sc); AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); /* * Get the transmit queue descriptor pointer from the selected queue */ /*5210 doesn't have QCU*/ if (ah->ah_version == AR5K_AR5210) { switch (ah->ah_txq[queue].tqi_type) { case AR5K_TX_QUEUE_DATA: tx_reg = AR5K_NOQCU_TXDP0; break; case AR5K_TX_QUEUE_BEACON: case AR5K_TX_QUEUE_CAB: tx_reg = AR5K_NOQCU_TXDP1; break; default: return 0xffffffff; } } else { tx_reg = AR5K_QUEUE_TXDP(queue); } return ath5k_hw_reg_read(ah, tx_reg); } /** * ath5k_hw_set_txdp - Set TX Descriptor's address for a specific queue * * @ah: The &struct ath5k_hw * @queue: The hw queue number * * Set TX descriptor's address for a specific queue. For 5210 we ignore * the queue number and we use tx queue type since we only have 2 queues * so as above we use TXDP0 for normal data queue and TXDP1 for beacon queue. * For newer chips with QCU/DCU we just set the corresponding TXDP register. * Returns -EINVAL if queue type is invalid for 5210 and -EIO if queue is still * active. */ int ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue, u32 phys_addr) { u16 tx_reg; ATH5K_TRACE(ah->ah_sc); AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); /* * Set the transmit queue descriptor pointer register by type * on 5210 */ if (ah->ah_version == AR5K_AR5210) { switch (ah->ah_txq[queue].tqi_type) { case AR5K_TX_QUEUE_DATA: tx_reg = AR5K_NOQCU_TXDP0; break; case AR5K_TX_QUEUE_BEACON: case AR5K_TX_QUEUE_CAB: tx_reg = AR5K_NOQCU_TXDP1; break; default: return -EINVAL; } } else { /* * Set the transmit queue descriptor pointer for * the selected queue on QCU for 5211+ * (this won't work if the queue is still active) */ if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue)) return -EIO; tx_reg = AR5K_QUEUE_TXDP(queue); } /* Set descriptor pointer */ ath5k_hw_reg_write(ah, phys_addr, tx_reg); return 0; } /** * ath5k_hw_update_tx_triglevel - Update tx trigger level * * @ah: The &struct ath5k_hw * @increase: Flag to force increase of trigger level * * This function increases/decreases the tx trigger level for the tx fifo * buffer (aka FIFO threshold) that is used to indicate when PCU flushes * the buffer and transmits it's data. Lowering this results sending small * frames more quickly but can lead to tx underruns, raising it a lot can * result other problems (i think bmiss is related). Right now we start with * the lowest possible (64Bytes) and if we get tx underrun we increase it using * the increase flag. Returns -EIO if we have have reached maximum/minimum. * * XXX: Link this with tx DMA size ? * XXX: Use it to save interrupts ? * TODO: Needs testing, i think it's related to bmiss... */ int ath5k_hw_update_tx_triglevel(struct ath5k_hw *ah, bool increase) { u32 trigger_level, imr; int ret = -EIO; ATH5K_TRACE(ah->ah_sc); /* * Disable interrupts by setting the mask */ imr = ath5k_hw_set_imr(ah, ah->ah_imr & ~AR5K_INT_GLOBAL); trigger_level = AR5K_REG_MS(ath5k_hw_reg_read(ah, AR5K_TXCFG), AR5K_TXCFG_TXFULL); if (!increase) { if (--trigger_level < AR5K_TUNE_MIN_TX_FIFO_THRES) goto done; } else trigger_level += ((AR5K_TUNE_MAX_TX_FIFO_THRES - trigger_level) / 2); /* * Update trigger level on success */ if (ah->ah_version == AR5K_AR5210) ath5k_hw_reg_write(ah, trigger_level, AR5K_TRIG_LVL); else AR5K_REG_WRITE_BITS(ah, AR5K_TXCFG, AR5K_TXCFG_TXFULL, trigger_level); ret = 0; done: /* * Restore interrupt mask */ ath5k_hw_set_imr(ah, imr); return ret; } /*******************\ * Interrupt masking * \*******************/ /** * ath5k_hw_is_intr_pending - Check if we have pending interrupts * * @ah: The &struct ath5k_hw * * Check if we have pending interrupts to process. Returns 1 if we * have pending interrupts and 0 if we haven't. */ bool ath5k_hw_is_intr_pending(struct ath5k_hw *ah) { ATH5K_TRACE(ah->ah_sc); return ath5k_hw_reg_read(ah, AR5K_INTPEND) == 1 ? 1 : 0; } /** * ath5k_hw_get_isr - Get interrupt status * * @ah: The @struct ath5k_hw * @interrupt_mask: Driver's interrupt mask used to filter out * interrupts in sw. * * This function is used inside our interrupt handler to determine the reason * for the interrupt by reading Primary Interrupt Status Register. Returns an * abstract interrupt status mask which is mostly ISR with some uncommon bits * being mapped on some standard non hw-specific positions * (check out &ath5k_int). * * NOTE: We use read-and-clear register, so after this function is called ISR * is zeroed. */ int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask) { u32 data; ATH5K_TRACE(ah->ah_sc); /* * Read interrupt status from the Interrupt Status register * on 5210 */ if (ah->ah_version == AR5K_AR5210) { data = ath5k_hw_reg_read(ah, AR5K_ISR); if (unlikely(data == AR5K_INT_NOCARD)) { *interrupt_mask = data; return -ENODEV; } } else { /* * Read interrupt status from Interrupt * Status Register shadow copy (Read And Clear) * * Note: PISR/SISR Not available on 5210 */ data = ath5k_hw_reg_read(ah, AR5K_RAC_PISR); if (unlikely(data == AR5K_INT_NOCARD)) { *interrupt_mask = data; return -ENODEV; } } /* * Get abstract interrupt mask (driver-compatible) */ *interrupt_mask = (data & AR5K_INT_COMMON) & ah->ah_imr; if (ah->ah_version != AR5K_AR5210) { u32 sisr2 = ath5k_hw_reg_read(ah, AR5K_RAC_SISR2); /*HIU = Host Interface Unit (PCI etc)*/ if (unlikely(data & (AR5K_ISR_HIUERR))) *interrupt_mask |= AR5K_INT_FATAL; /*Beacon Not Ready*/ if (unlikely(data & (AR5K_ISR_BNR))) *interrupt_mask |= AR5K_INT_BNR; if (unlikely(sisr2 & (AR5K_SISR2_SSERR | AR5K_SISR2_DPERR | AR5K_SISR2_MCABT))) *interrupt_mask |= AR5K_INT_FATAL; if (data & AR5K_ISR_TIM) *interrupt_mask |= AR5K_INT_TIM; if (data & AR5K_ISR_BCNMISC) { if (sisr2 & AR5K_SISR2_TIM) *interrupt_mask |= AR5K_INT_TIM; if (sisr2 & AR5K_SISR2_DTIM) *interrupt_mask |= AR5K_INT_DTIM; if (sisr2 & AR5K_SISR2_DTIM_SYNC) *interrupt_mask |= AR5K_INT_DTIM_SYNC; if (sisr2 & AR5K_SISR2_BCN_TIMEOUT) *interrupt_mask |= AR5K_INT_BCN_TIMEOUT; if (sisr2 & AR5K_SISR2_CAB_TIMEOUT) *interrupt_mask |= AR5K_INT_CAB_TIMEOUT; } if (data & AR5K_ISR_RXDOPPLER) *interrupt_mask |= AR5K_INT_RX_DOPPLER; if (data & AR5K_ISR_QCBRORN) { *interrupt_mask |= AR5K_INT_QCBRORN; ah->ah_txq_isr |= AR5K_REG_MS( ath5k_hw_reg_read(ah, AR5K_RAC_SISR3), AR5K_SISR3_QCBRORN); } if (data & AR5K_ISR_QCBRURN) { *interrupt_mask |= AR5K_INT_QCBRURN; ah->ah_txq_isr |= AR5K_REG_MS( ath5k_hw_reg_read(ah, AR5K_RAC_SISR3), AR5K_SISR3_QCBRURN); } if (data & AR5K_ISR_QTRIG) { *interrupt_mask |= AR5K_INT_QTRIG; ah->ah_txq_isr |= AR5K_REG_MS( ath5k_hw_reg_read(ah, AR5K_RAC_SISR4), AR5K_SISR4_QTRIG); } if (data & AR5K_ISR_TXOK) ah->ah_txq_isr |= AR5K_REG_MS( ath5k_hw_reg_read(ah, AR5K_RAC_SISR0), AR5K_SISR0_QCU_TXOK); if (data & AR5K_ISR_TXDESC) ah->ah_txq_isr |= AR5K_REG_MS( ath5k_hw_reg_read(ah, AR5K_RAC_SISR0), AR5K_SISR0_QCU_TXDESC); if (data & AR5K_ISR_TXERR) ah->ah_txq_isr |= AR5K_REG_MS( ath5k_hw_reg_read(ah, AR5K_RAC_SISR1), AR5K_SISR1_QCU_TXERR); if (data & AR5K_ISR_TXEOL) ah->ah_txq_isr |= AR5K_REG_MS( ath5k_hw_reg_read(ah, AR5K_RAC_SISR1), AR5K_SISR1_QCU_TXEOL); if (data & AR5K_ISR_TXURN) ah->ah_txq_isr |= AR5K_REG_MS( ath5k_hw_reg_read(ah, AR5K_RAC_SISR2), AR5K_SISR2_QCU_TXURN); } else { if (unlikely(data & (AR5K_ISR_SSERR | AR5K_ISR_MCABT | AR5K_ISR_HIUERR | AR5K_ISR_DPERR))) *interrupt_mask |= AR5K_INT_FATAL; /* * XXX: BMISS interrupts may occur after association. * I found this on 5210 code but it needs testing. If this is * true we should disable them before assoc and re-enable them * after a successful assoc + some jiffies. interrupt_mask &= ~AR5K_INT_BMISS; */ } /* * In case we didn't handle anything, * print the register value. */ if (unlikely(*interrupt_mask == 0 && net_ratelimit())) ATH5K_PRINTF("ISR: 0x%08x IMR: 0x%08x\n", data, ah->ah_imr); return 0; } /** * ath5k_hw_set_imr - Set interrupt mask * * @ah: The &struct ath5k_hw * @new_mask: The new interrupt mask to be set * * Set the interrupt mask in hw to save interrupts. We do that by mapping * ath5k_int bits to hw-specific bits to remove abstraction and writing * Interrupt Mask Register. */ enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask) { enum ath5k_int old_mask, int_mask; old_mask = ah->ah_imr; /* * Disable card interrupts to prevent any race conditions * (they will be re-enabled afterwards if AR5K_INT GLOBAL * is set again on the new mask). */ if (old_mask & AR5K_INT_GLOBAL) { ath5k_hw_reg_write(ah, AR5K_IER_DISABLE, AR5K_IER); ath5k_hw_reg_read(ah, AR5K_IER); } /* * Add additional, chipset-dependent interrupt mask flags * and write them to the IMR (interrupt mask register). */ int_mask = new_mask & AR5K_INT_COMMON; if (ah->ah_version != AR5K_AR5210) { /* Preserve per queue TXURN interrupt mask */ u32 simr2 = ath5k_hw_reg_read(ah, AR5K_SIMR2) & AR5K_SIMR2_QCU_TXURN; if (new_mask & AR5K_INT_FATAL) { int_mask |= AR5K_IMR_HIUERR; simr2 |= (AR5K_SIMR2_MCABT | AR5K_SIMR2_SSERR | AR5K_SIMR2_DPERR); } /*Beacon Not Ready*/ if (new_mask & AR5K_INT_BNR) int_mask |= AR5K_INT_BNR; if (new_mask & AR5K_INT_TIM) int_mask |= AR5K_IMR_TIM; if (new_mask & AR5K_INT_TIM) simr2 |= AR5K_SISR2_TIM; if (new_mask & AR5K_INT_DTIM) simr2 |= AR5K_SISR2_DTIM; if (new_mask & AR5K_INT_DTIM_SYNC) simr2 |= AR5K_SISR2_DTIM_SYNC; if (new_mask & AR5K_INT_BCN_TIMEOUT) simr2 |= AR5K_SISR2_BCN_TIMEOUT; if (new_mask & AR5K_INT_CAB_TIMEOUT) simr2 |= AR5K_SISR2_CAB_TIMEOUT; if (new_mask & AR5K_INT_RX_DOPPLER) int_mask |= AR5K_IMR_RXDOPPLER; /* Note: Per queue interrupt masks * are set via reset_tx_queue (qcu.c) */ ath5k_hw_reg_write(ah, int_mask, AR5K_PIMR); ath5k_hw_reg_write(ah, simr2, AR5K_SIMR2); } else { if (new_mask & AR5K_INT_FATAL) int_mask |= (AR5K_IMR_SSERR | AR5K_IMR_MCABT | AR5K_IMR_HIUERR | AR5K_IMR_DPERR); ath5k_hw_reg_write(ah, int_mask, AR5K_IMR); } /* If RXNOFRM interrupt is masked disable it * by setting AR5K_RXNOFRM to zero */ if (!(new_mask & AR5K_INT_RXNOFRM)) ath5k_hw_reg_write(ah, 0, AR5K_RXNOFRM); /* Store new interrupt mask */ ah->ah_imr = new_mask; /* ..re-enable interrupts if AR5K_INT_GLOBAL is set */ if (new_mask & AR5K_INT_GLOBAL) { ath5k_hw_reg_write(ah, AR5K_IER_ENABLE, AR5K_IER); ath5k_hw_reg_read(ah, AR5K_IER); } return old_mask; }
gpl-2.0
V-KING/g3_kernel
drivers/gpio/gpio-timberdale.c
1599
9032
/* * Timberdale FPGA GPIO driver * Copyright (c) 2009 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* Supports: * Timberdale FPGA GPIO */ #include <linux/module.h> #include <linux/gpio.h> #include <linux/platform_device.h> #include <linux/irq.h> #include <linux/io.h> #include <linux/timb_gpio.h> #include <linux/interrupt.h> #include <linux/slab.h> #define DRIVER_NAME "timb-gpio" #define TGPIOVAL 0x00 #define TGPIODIR 0x04 #define TGPIO_IER 0x08 #define TGPIO_ISR 0x0c #define TGPIO_IPR 0x10 #define TGPIO_ICR 0x14 #define TGPIO_FLR 0x18 #define TGPIO_LVR 0x1c #define TGPIO_VER 0x20 #define TGPIO_BFLR 0x24 struct timbgpio { void __iomem *membase; spinlock_t lock; /* mutual exclusion */ struct gpio_chip gpio; int irq_base; unsigned long last_ier; }; static int timbgpio_update_bit(struct gpio_chip *gpio, unsigned index, unsigned offset, bool enabled) { struct timbgpio *tgpio = container_of(gpio, struct timbgpio, gpio); u32 reg; spin_lock(&tgpio->lock); reg = ioread32(tgpio->membase + offset); if (enabled) reg |= (1 << index); else reg &= ~(1 << index); iowrite32(reg, tgpio->membase + offset); spin_unlock(&tgpio->lock); return 0; } static int timbgpio_gpio_direction_input(struct gpio_chip *gpio, unsigned nr) { return timbgpio_update_bit(gpio, nr, TGPIODIR, true); } static int timbgpio_gpio_get(struct gpio_chip *gpio, unsigned nr) { struct timbgpio *tgpio = container_of(gpio, struct timbgpio, gpio); u32 value; value = ioread32(tgpio->membase + TGPIOVAL); return (value & (1 << nr)) ? 1 : 0; } static int timbgpio_gpio_direction_output(struct gpio_chip *gpio, unsigned nr, int val) { return timbgpio_update_bit(gpio, nr, TGPIODIR, false); } static void timbgpio_gpio_set(struct gpio_chip *gpio, unsigned nr, int val) { timbgpio_update_bit(gpio, nr, TGPIOVAL, val != 0); } static int timbgpio_to_irq(struct gpio_chip *gpio, unsigned offset) { struct timbgpio *tgpio = container_of(gpio, struct timbgpio, gpio); if (tgpio->irq_base <= 0) return -EINVAL; return tgpio->irq_base + offset; } /* * GPIO IRQ */ static void timbgpio_irq_disable(struct irq_data *d) { struct timbgpio *tgpio = irq_data_get_irq_chip_data(d); int offset = d->irq - tgpio->irq_base; unsigned long flags; spin_lock_irqsave(&tgpio->lock, flags); tgpio->last_ier &= ~(1UL << offset); iowrite32(tgpio->last_ier, tgpio->membase + TGPIO_IER); spin_unlock_irqrestore(&tgpio->lock, flags); } static void timbgpio_irq_enable(struct irq_data *d) { struct timbgpio *tgpio = irq_data_get_irq_chip_data(d); int offset = d->irq - tgpio->irq_base; unsigned long flags; spin_lock_irqsave(&tgpio->lock, flags); tgpio->last_ier |= 1UL << offset; iowrite32(tgpio->last_ier, tgpio->membase + TGPIO_IER); spin_unlock_irqrestore(&tgpio->lock, flags); } static int timbgpio_irq_type(struct irq_data *d, unsigned trigger) { struct timbgpio *tgpio = irq_data_get_irq_chip_data(d); int offset = d->irq - tgpio->irq_base; unsigned long flags; u32 lvr, flr, bflr = 0; u32 ver; int ret = 0; if (offset < 0 || offset > tgpio->gpio.ngpio) return -EINVAL; ver = ioread32(tgpio->membase + TGPIO_VER); spin_lock_irqsave(&tgpio->lock, flags); lvr = ioread32(tgpio->membase + TGPIO_LVR); flr = ioread32(tgpio->membase + TGPIO_FLR); if (ver > 2) bflr = ioread32(tgpio->membase + TGPIO_BFLR); if (trigger & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) { bflr &= ~(1 << offset); flr &= ~(1 << offset); if (trigger & IRQ_TYPE_LEVEL_HIGH) lvr |= 1 << offset; else lvr &= ~(1 << offset); } if ((trigger & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH) { if (ver < 3) { ret = -EINVAL; goto out; } else { flr |= 1 << offset; bflr |= 1 << offset; } } else { bflr &= ~(1 << offset); flr |= 1 << offset; if (trigger & IRQ_TYPE_EDGE_FALLING) lvr &= ~(1 << offset); else lvr |= 1 << offset; } iowrite32(lvr, tgpio->membase + TGPIO_LVR); iowrite32(flr, tgpio->membase + TGPIO_FLR); if (ver > 2) iowrite32(bflr, tgpio->membase + TGPIO_BFLR); iowrite32(1 << offset, tgpio->membase + TGPIO_ICR); out: spin_unlock_irqrestore(&tgpio->lock, flags); return ret; } static void timbgpio_irq(unsigned int irq, struct irq_desc *desc) { struct timbgpio *tgpio = irq_get_handler_data(irq); unsigned long ipr; int offset; desc->irq_data.chip->irq_ack(irq_get_irq_data(irq)); ipr = ioread32(tgpio->membase + TGPIO_IPR); iowrite32(ipr, tgpio->membase + TGPIO_ICR); /* * Some versions of the hardware trash the IER register if more than * one interrupt is received simultaneously. */ iowrite32(0, tgpio->membase + TGPIO_IER); for_each_set_bit(offset, &ipr, tgpio->gpio.ngpio) generic_handle_irq(timbgpio_to_irq(&tgpio->gpio, offset)); iowrite32(tgpio->last_ier, tgpio->membase + TGPIO_IER); } static struct irq_chip timbgpio_irqchip = { .name = "GPIO", .irq_enable = timbgpio_irq_enable, .irq_disable = timbgpio_irq_disable, .irq_set_type = timbgpio_irq_type, }; static int __devinit timbgpio_probe(struct platform_device *pdev) { int err, i; struct gpio_chip *gc; struct timbgpio *tgpio; struct resource *iomem; struct timbgpio_platform_data *pdata = pdev->dev.platform_data; int irq = platform_get_irq(pdev, 0); if (!pdata || pdata->nr_pins > 32) { err = -EINVAL; goto err_mem; } iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!iomem) { err = -EINVAL; goto err_mem; } tgpio = kzalloc(sizeof(*tgpio), GFP_KERNEL); if (!tgpio) { err = -EINVAL; goto err_mem; } tgpio->irq_base = pdata->irq_base; spin_lock_init(&tgpio->lock); if (!request_mem_region(iomem->start, resource_size(iomem), DRIVER_NAME)) { err = -EBUSY; goto err_request; } tgpio->membase = ioremap(iomem->start, resource_size(iomem)); if (!tgpio->membase) { err = -ENOMEM; goto err_ioremap; } gc = &tgpio->gpio; gc->label = dev_name(&pdev->dev); gc->owner = THIS_MODULE; gc->dev = &pdev->dev; gc->direction_input = timbgpio_gpio_direction_input; gc->get = timbgpio_gpio_get; gc->direction_output = timbgpio_gpio_direction_output; gc->set = timbgpio_gpio_set; gc->to_irq = (irq >= 0 && tgpio->irq_base > 0) ? timbgpio_to_irq : NULL; gc->dbg_show = NULL; gc->base = pdata->gpio_base; gc->ngpio = pdata->nr_pins; gc->can_sleep = 0; err = gpiochip_add(gc); if (err) goto err_chipadd; platform_set_drvdata(pdev, tgpio); /* make sure to disable interrupts */ iowrite32(0x0, tgpio->membase + TGPIO_IER); if (irq < 0 || tgpio->irq_base <= 0) return 0; for (i = 0; i < pdata->nr_pins; i++) { irq_set_chip_and_handler_name(tgpio->irq_base + i, &timbgpio_irqchip, handle_simple_irq, "mux"); irq_set_chip_data(tgpio->irq_base + i, tgpio); #ifdef CONFIG_ARM set_irq_flags(tgpio->irq_base + i, IRQF_VALID | IRQF_PROBE); #endif } irq_set_handler_data(irq, tgpio); irq_set_chained_handler(irq, timbgpio_irq); return 0; err_chipadd: iounmap(tgpio->membase); err_ioremap: release_mem_region(iomem->start, resource_size(iomem)); err_request: kfree(tgpio); err_mem: printk(KERN_ERR DRIVER_NAME": Failed to register GPIOs: %d\n", err); return err; } static int __devexit timbgpio_remove(struct platform_device *pdev) { int err; struct timbgpio_platform_data *pdata = pdev->dev.platform_data; struct timbgpio *tgpio = platform_get_drvdata(pdev); struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); int irq = platform_get_irq(pdev, 0); if (irq >= 0 && tgpio->irq_base > 0) { int i; for (i = 0; i < pdata->nr_pins; i++) { irq_set_chip(tgpio->irq_base + i, NULL); irq_set_chip_data(tgpio->irq_base + i, NULL); } irq_set_handler(irq, NULL); irq_set_handler_data(irq, NULL); } err = gpiochip_remove(&tgpio->gpio); if (err) printk(KERN_ERR DRIVER_NAME": failed to remove gpio_chip\n"); iounmap(tgpio->membase); release_mem_region(iomem->start, resource_size(iomem)); kfree(tgpio); platform_set_drvdata(pdev, NULL); return 0; } static struct platform_driver timbgpio_platform_driver = { .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, }, .probe = timbgpio_probe, .remove = timbgpio_remove, }; /*--------------------------------------------------------------------------*/ module_platform_driver(timbgpio_platform_driver); MODULE_DESCRIPTION("Timberdale GPIO driver"); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Mocean Laboratories"); MODULE_ALIAS("platform:"DRIVER_NAME);
gpl-2.0
taozhijiang/linux
drivers/media/pci/cx88/cx88-input.c
1599
17330
/* * * Device driver for GPIO attached remote control interfaces * on Conexant 2388x based TV/DVB cards. * * Copyright (c) 2003 Pavel Machek * Copyright (c) 2004 Gerd Knorr * Copyright (c) 2004, 2005 Chris Pascoe * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/hrtimer.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/module.h> #include "cx88.h" #include <media/rc-core.h> #define MODULE_NAME "cx88xx" /* ---------------------------------------------------------------------- */ struct cx88_IR { struct cx88_core *core; struct rc_dev *dev; int users; char name[32]; char phys[32]; /* sample from gpio pin 16 */ u32 sampling; /* poll external decoder */ int polling; struct hrtimer timer; u32 gpio_addr; u32 last_gpio; u32 mask_keycode; u32 mask_keydown; u32 mask_keyup; }; static unsigned ir_samplerate = 4; module_param(ir_samplerate, uint, 0444); MODULE_PARM_DESC(ir_samplerate, "IR samplerate in kHz, 1 - 20, default 4"); static int ir_debug; module_param(ir_debug, int, 0644); /* debug level [IR] */ MODULE_PARM_DESC(ir_debug, "enable debug messages [IR]"); #define ir_dprintk(fmt, arg...) if (ir_debug) \ printk(KERN_DEBUG "%s IR: " fmt , ir->core->name , ##arg) #define dprintk(fmt, arg...) if (ir_debug) \ printk(KERN_DEBUG "cx88 IR: " fmt , ##arg) /* ---------------------------------------------------------------------- */ static void cx88_ir_handle_key(struct cx88_IR *ir) { struct cx88_core *core = ir->core; u32 gpio, data, auxgpio; /* read gpio value */ gpio = cx_read(ir->gpio_addr); switch (core->boardnr) { case CX88_BOARD_NPGTECH_REALTV_TOP10FM: /* This board apparently uses a combination of 2 GPIO to represent the keys. Additionally, the second GPIO can be used for parity. Example: for key "5" gpio = 0x758, auxgpio = 0xe5 or 0xf5 for key "Power" gpio = 0x758, auxgpio = 0xed or 0xfd */ auxgpio = cx_read(MO_GP1_IO); /* Take out the parity part */ gpio=(gpio & 0x7fd) + (auxgpio & 0xef); break; case CX88_BOARD_WINFAST_DTV1000: case CX88_BOARD_WINFAST_DTV1800H: case CX88_BOARD_WINFAST_DTV1800H_XC4000: case CX88_BOARD_WINFAST_DTV2000H_PLUS: case CX88_BOARD_WINFAST_TV2000_XP_GLOBAL: case CX88_BOARD_WINFAST_TV2000_XP_GLOBAL_6F36: case CX88_BOARD_WINFAST_TV2000_XP_GLOBAL_6F43: gpio = (gpio & 0x6ff) | ((cx_read(MO_GP1_IO) << 8) & 0x900); auxgpio = gpio; break; default: auxgpio = gpio; } if (ir->polling) { if (ir->last_gpio == auxgpio) return; ir->last_gpio = auxgpio; } /* extract data */ data = ir_extract_bits(gpio, ir->mask_keycode); ir_dprintk("irq gpio=0x%x code=%d | %s%s%s\n", gpio, data, ir->polling ? "poll" : "irq", (gpio & ir->mask_keydown) ? " down" : "", (gpio & ir->mask_keyup) ? " up" : ""); if (ir->core->boardnr == CX88_BOARD_NORWOOD_MICRO) { u32 gpio_key = cx_read(MO_GP0_IO); data = (data << 4) | ((gpio_key & 0xf0) >> 4); rc_keydown(ir->dev, RC_TYPE_UNKNOWN, data, 0); } else if (ir->core->boardnr == CX88_BOARD_PROLINK_PLAYTVPVR || ir->core->boardnr == CX88_BOARD_PIXELVIEW_PLAYTV_ULTRA_PRO) { /* bit cleared on keydown, NEC scancode, 0xAAAACC, A = 0x866b */ u16 addr; u8 cmd; u32 scancode; addr = (data >> 8) & 0xffff; cmd = (data >> 0) & 0x00ff; scancode = RC_SCANCODE_NECX(addr, cmd); if (0 == (gpio & ir->mask_keyup)) rc_keydown_notimeout(ir->dev, RC_TYPE_NEC, scancode, 0); else rc_keyup(ir->dev); } else if (ir->mask_keydown) { /* bit set on keydown */ if (gpio & ir->mask_keydown) rc_keydown_notimeout(ir->dev, RC_TYPE_UNKNOWN, data, 0); else rc_keyup(ir->dev); } else if (ir->mask_keyup) { /* bit cleared on keydown */ if (0 == (gpio & ir->mask_keyup)) rc_keydown_notimeout(ir->dev, RC_TYPE_UNKNOWN, data, 0); else rc_keyup(ir->dev); } else { /* can't distinguish keydown/up :-/ */ rc_keydown_notimeout(ir->dev, RC_TYPE_UNKNOWN, data, 0); rc_keyup(ir->dev); } } static enum hrtimer_restart cx88_ir_work(struct hrtimer *timer) { unsigned long missed; struct cx88_IR *ir = container_of(timer, struct cx88_IR, timer); cx88_ir_handle_key(ir); missed = hrtimer_forward_now(&ir->timer, ktime_set(0, ir->polling * 1000000)); if (missed > 1) ir_dprintk("Missed ticks %ld\n", missed - 1); return HRTIMER_RESTART; } static int __cx88_ir_start(void *priv) { struct cx88_core *core = priv; struct cx88_IR *ir; if (!core || !core->ir) return -EINVAL; ir = core->ir; if (ir->polling) { hrtimer_init(&ir->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); ir->timer.function = cx88_ir_work; hrtimer_start(&ir->timer, ktime_set(0, ir->polling * 1000000), HRTIMER_MODE_REL); } if (ir->sampling) { core->pci_irqmask |= PCI_INT_IR_SMPINT; cx_write(MO_DDS_IO, 0x33F286 * ir_samplerate); /* samplerate */ cx_write(MO_DDSCFG_IO, 0x5); /* enable */ } return 0; } static void __cx88_ir_stop(void *priv) { struct cx88_core *core = priv; struct cx88_IR *ir; if (!core || !core->ir) return; ir = core->ir; if (ir->sampling) { cx_write(MO_DDSCFG_IO, 0x0); core->pci_irqmask &= ~PCI_INT_IR_SMPINT; } if (ir->polling) hrtimer_cancel(&ir->timer); } int cx88_ir_start(struct cx88_core *core) { if (core->ir->users) return __cx88_ir_start(core); return 0; } void cx88_ir_stop(struct cx88_core *core) { if (core->ir->users) __cx88_ir_stop(core); } static int cx88_ir_open(struct rc_dev *rc) { struct cx88_core *core = rc->priv; core->ir->users++; return __cx88_ir_start(core); } static void cx88_ir_close(struct rc_dev *rc) { struct cx88_core *core = rc->priv; core->ir->users--; if (!core->ir->users) __cx88_ir_stop(core); } /* ---------------------------------------------------------------------- */ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci) { struct cx88_IR *ir; struct rc_dev *dev; char *ir_codes = NULL; u64 rc_type = RC_BIT_OTHER; int err = -ENOMEM; u32 hardware_mask = 0; /* For devices with a hardware mask, when * used with a full-code IR table */ ir = kzalloc(sizeof(*ir), GFP_KERNEL); dev = rc_allocate_device(); if (!ir || !dev) goto err_out_free; ir->dev = dev; /* detect & configure */ switch (core->boardnr) { case CX88_BOARD_DNTV_LIVE_DVB_T: case CX88_BOARD_KWORLD_DVB_T: case CX88_BOARD_KWORLD_DVB_T_CX22702: ir_codes = RC_MAP_DNTV_LIVE_DVB_T; ir->gpio_addr = MO_GP1_IO; ir->mask_keycode = 0x1f; ir->mask_keyup = 0x60; ir->polling = 50; /* ms */ break; case CX88_BOARD_TERRATEC_CINERGY_1400_DVB_T1: ir_codes = RC_MAP_CINERGY_1400; ir->sampling = 0xeb04; /* address */ break; case CX88_BOARD_HAUPPAUGE: case CX88_BOARD_HAUPPAUGE_DVB_T1: case CX88_BOARD_HAUPPAUGE_NOVASE2_S1: case CX88_BOARD_HAUPPAUGE_NOVASPLUS_S1: case CX88_BOARD_HAUPPAUGE_HVR1100: case CX88_BOARD_HAUPPAUGE_HVR3000: case CX88_BOARD_HAUPPAUGE_HVR4000: case CX88_BOARD_HAUPPAUGE_HVR4000LITE: case CX88_BOARD_PCHDTV_HD3000: case CX88_BOARD_PCHDTV_HD5500: case CX88_BOARD_HAUPPAUGE_IRONLY: ir_codes = RC_MAP_HAUPPAUGE; ir->sampling = 1; break; case CX88_BOARD_WINFAST_DTV2000H: case CX88_BOARD_WINFAST_DTV2000H_J: case CX88_BOARD_WINFAST_DTV1800H: case CX88_BOARD_WINFAST_DTV1800H_XC4000: case CX88_BOARD_WINFAST_DTV2000H_PLUS: ir_codes = RC_MAP_WINFAST; ir->gpio_addr = MO_GP0_IO; ir->mask_keycode = 0x8f8; ir->mask_keyup = 0x100; ir->polling = 50; /* ms */ break; case CX88_BOARD_WINFAST2000XP_EXPERT: case CX88_BOARD_WINFAST_DTV1000: case CX88_BOARD_WINFAST_TV2000_XP_GLOBAL: case CX88_BOARD_WINFAST_TV2000_XP_GLOBAL_6F36: case CX88_BOARD_WINFAST_TV2000_XP_GLOBAL_6F43: ir_codes = RC_MAP_WINFAST; ir->gpio_addr = MO_GP0_IO; ir->mask_keycode = 0x8f8; ir->mask_keyup = 0x100; ir->polling = 1; /* ms */ break; case CX88_BOARD_IODATA_GVBCTV7E: ir_codes = RC_MAP_IODATA_BCTV7E; ir->gpio_addr = MO_GP0_IO; ir->mask_keycode = 0xfd; ir->mask_keydown = 0x02; ir->polling = 5; /* ms */ break; case CX88_BOARD_PROLINK_PLAYTVPVR: case CX88_BOARD_PIXELVIEW_PLAYTV_ULTRA_PRO: /* * It seems that this hardware is paired with NEC extended * address 0x866b. So, unfortunately, its usage with other * IR's with different address won't work. Still, there are * other IR's from the same manufacturer that works, like the * 002-T mini RC, provided with newer PV hardware */ ir_codes = RC_MAP_PIXELVIEW_MK12; rc_type = RC_BIT_NEC; ir->gpio_addr = MO_GP1_IO; ir->mask_keyup = 0x80; ir->polling = 10; /* ms */ hardware_mask = 0x3f; /* Hardware returns only 6 bits from command part */ break; case CX88_BOARD_PROLINK_PV_8000GT: case CX88_BOARD_PROLINK_PV_GLOBAL_XTREME: ir_codes = RC_MAP_PIXELVIEW_NEW; ir->gpio_addr = MO_GP1_IO; ir->mask_keycode = 0x3f; ir->mask_keyup = 0x80; ir->polling = 1; /* ms */ break; case CX88_BOARD_KWORLD_LTV883: ir_codes = RC_MAP_PIXELVIEW; ir->gpio_addr = MO_GP1_IO; ir->mask_keycode = 0x1f; ir->mask_keyup = 0x60; ir->polling = 1; /* ms */ break; case CX88_BOARD_ADSTECH_DVB_T_PCI: ir_codes = RC_MAP_ADSTECH_DVB_T_PCI; ir->gpio_addr = MO_GP1_IO; ir->mask_keycode = 0xbf; ir->mask_keyup = 0x40; ir->polling = 50; /* ms */ break; case CX88_BOARD_MSI_TVANYWHERE_MASTER: ir_codes = RC_MAP_MSI_TVANYWHERE; ir->gpio_addr = MO_GP1_IO; ir->mask_keycode = 0x1f; ir->mask_keyup = 0x40; ir->polling = 1; /* ms */ break; case CX88_BOARD_AVERTV_303: case CX88_BOARD_AVERTV_STUDIO_303: ir_codes = RC_MAP_AVERTV_303; ir->gpio_addr = MO_GP2_IO; ir->mask_keycode = 0xfb; ir->mask_keydown = 0x02; ir->polling = 50; /* ms */ break; case CX88_BOARD_OMICOM_SS4_PCI: case CX88_BOARD_SATTRADE_ST4200: case CX88_BOARD_TBS_8920: case CX88_BOARD_TBS_8910: case CX88_BOARD_PROF_7300: case CX88_BOARD_PROF_7301: case CX88_BOARD_PROF_6200: ir_codes = RC_MAP_TBS_NEC; ir->sampling = 0xff00; /* address */ break; case CX88_BOARD_TEVII_S464: case CX88_BOARD_TEVII_S460: case CX88_BOARD_TEVII_S420: ir_codes = RC_MAP_TEVII_NEC; ir->sampling = 0xff00; /* address */ break; case CX88_BOARD_DNTV_LIVE_DVB_T_PRO: ir_codes = RC_MAP_DNTV_LIVE_DVBT_PRO; ir->sampling = 0xff00; /* address */ break; case CX88_BOARD_NORWOOD_MICRO: ir_codes = RC_MAP_NORWOOD; ir->gpio_addr = MO_GP1_IO; ir->mask_keycode = 0x0e; ir->mask_keyup = 0x80; ir->polling = 50; /* ms */ break; case CX88_BOARD_NPGTECH_REALTV_TOP10FM: ir_codes = RC_MAP_NPGTECH; ir->gpio_addr = MO_GP0_IO; ir->mask_keycode = 0xfa; ir->polling = 50; /* ms */ break; case CX88_BOARD_PINNACLE_PCTV_HD_800i: ir_codes = RC_MAP_PINNACLE_PCTV_HD; ir->sampling = 1; break; case CX88_BOARD_POWERCOLOR_REAL_ANGEL: ir_codes = RC_MAP_POWERCOLOR_REAL_ANGEL; ir->gpio_addr = MO_GP2_IO; ir->mask_keycode = 0x7e; ir->polling = 100; /* ms */ break; case CX88_BOARD_TWINHAN_VP1027_DVBS: ir_codes = RC_MAP_TWINHAN_VP1027_DVBS; ir->sampling = 0xff00; /* address */ break; } if (!ir_codes) { err = -ENODEV; goto err_out_free; } /* * The usage of mask_keycode were very convenient, due to several * reasons. Among others, the scancode tables were using the scancode * as the index elements. So, the less bits it was used, the smaller * the table were stored. After the input changes, the better is to use * the full scancodes, since it allows replacing the IR remote by * another one. Unfortunately, there are still some hardware, like * Pixelview Ultra Pro, where only part of the scancode is sent via * GPIO. So, there's no way to get the full scancode. Due to that, * hardware_mask were introduced here: it represents those hardware * that has such limits. */ if (hardware_mask && !ir->mask_keycode) ir->mask_keycode = hardware_mask; /* init input device */ snprintf(ir->name, sizeof(ir->name), "cx88 IR (%s)", core->board.name); snprintf(ir->phys, sizeof(ir->phys), "pci-%s/ir0", pci_name(pci)); dev->input_name = ir->name; dev->input_phys = ir->phys; dev->input_id.bustype = BUS_PCI; dev->input_id.version = 1; if (pci->subsystem_vendor) { dev->input_id.vendor = pci->subsystem_vendor; dev->input_id.product = pci->subsystem_device; } else { dev->input_id.vendor = pci->vendor; dev->input_id.product = pci->device; } dev->dev.parent = &pci->dev; dev->map_name = ir_codes; dev->driver_name = MODULE_NAME; dev->priv = core; dev->open = cx88_ir_open; dev->close = cx88_ir_close; dev->scancode_mask = hardware_mask; if (ir->sampling) { dev->driver_type = RC_DRIVER_IR_RAW; dev->timeout = 10 * 1000 * 1000; /* 10 ms */ } else { dev->driver_type = RC_DRIVER_SCANCODE; dev->allowed_protocols = rc_type; } ir->core = core; core->ir = ir; /* all done */ err = rc_register_device(dev); if (err) goto err_out_free; return 0; err_out_free: rc_free_device(dev); core->ir = NULL; kfree(ir); return err; } int cx88_ir_fini(struct cx88_core *core) { struct cx88_IR *ir = core->ir; /* skip detach on non attached boards */ if (NULL == ir) return 0; cx88_ir_stop(core); rc_unregister_device(ir->dev); kfree(ir); /* done */ core->ir = NULL; return 0; } /* ---------------------------------------------------------------------- */ void cx88_ir_irq(struct cx88_core *core) { struct cx88_IR *ir = core->ir; u32 samples; unsigned todo, bits; struct ir_raw_event ev; if (!ir || !ir->sampling) return; /* * Samples are stored in a 32 bit register, oldest sample in * the msb. A set bit represents space and an unset bit * represents a pulse. */ samples = cx_read(MO_SAMPLE_IO); if (samples == 0xff && ir->dev->idle) return; init_ir_raw_event(&ev); for (todo = 32; todo > 0; todo -= bits) { ev.pulse = samples & 0x80000000 ? false : true; bits = min(todo, 32U - fls(ev.pulse ? samples : ~samples)); ev.duration = (bits * (NSEC_PER_SEC / 1000)) / ir_samplerate; ir_raw_event_store_with_filter(ir->dev, &ev); samples <<= bits; } ir_raw_event_handle(ir->dev); } static int get_key_pvr2000(struct IR_i2c *ir, enum rc_type *protocol, u32 *scancode, u8 *toggle) { int flags, code; /* poll IR chip */ flags = i2c_smbus_read_byte_data(ir->c, 0x10); if (flags < 0) { dprintk("read error\n"); return 0; } /* key pressed ? */ if (0 == (flags & 0x80)) return 0; /* read actual key code */ code = i2c_smbus_read_byte_data(ir->c, 0x00); if (code < 0) { dprintk("read error\n"); return 0; } dprintk("IR Key/Flags: (0x%02x/0x%02x)\n", code & 0xff, flags & 0xff); *protocol = RC_TYPE_UNKNOWN; *scancode = code & 0xff; *toggle = 0; return 1; } void cx88_i2c_init_ir(struct cx88_core *core) { struct i2c_board_info info; const unsigned short default_addr_list[] = { 0x18, 0x6b, 0x71, I2C_CLIENT_END }; const unsigned short pvr2000_addr_list[] = { 0x18, 0x1a, I2C_CLIENT_END }; const unsigned short *addr_list = default_addr_list; const unsigned short *addrp; /* Instantiate the IR receiver device, if present */ if (0 != core->i2c_rc) return; memset(&info, 0, sizeof(struct i2c_board_info)); strlcpy(info.type, "ir_video", I2C_NAME_SIZE); switch (core->boardnr) { case CX88_BOARD_LEADTEK_PVR2000: addr_list = pvr2000_addr_list; core->init_data.name = "cx88 Leadtek PVR 2000 remote"; core->init_data.type = RC_BIT_UNKNOWN; core->init_data.get_key = get_key_pvr2000; core->init_data.ir_codes = RC_MAP_EMPTY; break; } /* * We can't call i2c_new_probed_device() because it uses * quick writes for probing and at least some RC receiver * devices only reply to reads. * Also, Hauppauge XVR needs to be specified, as address 0x71 * conflicts with another remote type used with saa7134 */ for (addrp = addr_list; *addrp != I2C_CLIENT_END; addrp++) { info.platform_data = NULL; memset(&core->init_data, 0, sizeof(core->init_data)); if (*addrp == 0x71) { /* Hauppauge XVR */ core->init_data.name = "cx88 Hauppauge XVR remote"; core->init_data.ir_codes = RC_MAP_HAUPPAUGE; core->init_data.type = RC_BIT_RC5; core->init_data.internal_get_key_func = IR_KBD_GET_KEY_HAUP_XVR; info.platform_data = &core->init_data; } if (i2c_smbus_xfer(&core->i2c_adap, *addrp, 0, I2C_SMBUS_READ, 0, I2C_SMBUS_QUICK, NULL) >= 0) { info.addr = *addrp; i2c_new_device(&core->i2c_adap, &info); break; } } } /* ---------------------------------------------------------------------- */ MODULE_AUTHOR("Gerd Knorr, Pavel Machek, Chris Pascoe"); MODULE_DESCRIPTION("input driver for cx88 GPIO-based IR remote controls"); MODULE_LICENSE("GPL");
gpl-2.0
dirkbehme/linux-renesas-rcar-gen3
net/netfilter/nf_conntrack_timestamp.c
2111
2692
/* * (C) 2010 Pablo Neira Ayuso <pablo@netfilter.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation (or any later at your option). */ #include <linux/netfilter.h> #include <linux/slab.h> #include <linux/kernel.h> #include <linux/moduleparam.h> #include <net/netfilter/nf_conntrack.h> #include <net/netfilter/nf_conntrack_extend.h> #include <net/netfilter/nf_conntrack_timestamp.h> static bool nf_ct_tstamp __read_mostly; module_param_named(tstamp, nf_ct_tstamp, bool, 0644); MODULE_PARM_DESC(tstamp, "Enable connection tracking flow timestamping."); #ifdef CONFIG_SYSCTL static struct ctl_table tstamp_sysctl_table[] = { { .procname = "nf_conntrack_timestamp", .data = &init_net.ct.sysctl_tstamp, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec, }, {} }; #endif /* CONFIG_SYSCTL */ static struct nf_ct_ext_type tstamp_extend __read_mostly = { .len = sizeof(struct nf_conn_tstamp), .align = __alignof__(struct nf_conn_tstamp), .id = NF_CT_EXT_TSTAMP, }; #ifdef CONFIG_SYSCTL static int nf_conntrack_tstamp_init_sysctl(struct net *net) { struct ctl_table *table; table = kmemdup(tstamp_sysctl_table, sizeof(tstamp_sysctl_table), GFP_KERNEL); if (!table) goto out; table[0].data = &net->ct.sysctl_tstamp; /* Don't export sysctls to unprivileged users */ if (net->user_ns != &init_user_ns) table[0].procname = NULL; net->ct.tstamp_sysctl_header = register_net_sysctl(net, "net/netfilter", table); if (!net->ct.tstamp_sysctl_header) { printk(KERN_ERR "nf_ct_tstamp: can't register to sysctl.\n"); goto out_register; } return 0; out_register: kfree(table); out: return -ENOMEM; } static void nf_conntrack_tstamp_fini_sysctl(struct net *net) { struct ctl_table *table; table = net->ct.tstamp_sysctl_header->ctl_table_arg; unregister_net_sysctl_table(net->ct.tstamp_sysctl_header); kfree(table); } #else static int nf_conntrack_tstamp_init_sysctl(struct net *net) { return 0; } static void nf_conntrack_tstamp_fini_sysctl(struct net *net) { } #endif int nf_conntrack_tstamp_pernet_init(struct net *net) { net->ct.sysctl_tstamp = nf_ct_tstamp; return nf_conntrack_tstamp_init_sysctl(net); } void nf_conntrack_tstamp_pernet_fini(struct net *net) { nf_conntrack_tstamp_fini_sysctl(net); } int nf_conntrack_tstamp_init(void) { int ret; ret = nf_ct_extend_register(&tstamp_extend); if (ret < 0) pr_err("nf_ct_tstamp: Unable to register extension\n"); return ret; } void nf_conntrack_tstamp_fini(void) { nf_ct_extend_unregister(&tstamp_extend); }
gpl-2.0
mythos234/SimplKernel-LL-N910G
arch/arm/mach-omap2/board-omap3stalker.c
2111
10792
/* * linux/arch/arm/mach-omap2/board-omap3evm.c * * Copyright (C) 2008 Guangzhou EMA-Tech * * Modified from mach-omap2/board-omap3evm.c * * Initial code: Syed Mohammed Khasim * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/leds.h> #include <linux/gpio.h> #include <linux/input.h> #include <linux/gpio_keys.h> #include <linux/regulator/fixed.h> #include <linux/regulator/machine.h> #include <linux/i2c/twl.h> #include <linux/mmc/host.h> #include <linux/input/matrix_keypad.h> #include <linux/spi/spi.h> #include <linux/interrupt.h> #include <linux/smsc911x.h> #include <linux/i2c/at24.h> #include <linux/usb/phy.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach/flash.h> #include "common.h" #include "gpmc.h" #include <linux/platform_data/mtd-nand-omap2.h> #include <video/omapdss.h> #include <video/omap-panel-data.h> #include <linux/platform_data/spi-omap2-mcspi.h> #include "sdram-micron-mt46h32m32lf-6.h" #include "mux.h" #include "hsmmc.h" #include "common-board-devices.h" #if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE) #include "gpmc-smsc911x.h" #define OMAP3STALKER_ETHR_START 0x2c000000 #define OMAP3STALKER_ETHR_SIZE 1024 #define OMAP3STALKER_ETHR_GPIO_IRQ 19 #define OMAP3STALKER_SMC911X_CS 5 static struct omap_smsc911x_platform_data smsc911x_cfg = { .cs = OMAP3STALKER_SMC911X_CS, .gpio_irq = OMAP3STALKER_ETHR_GPIO_IRQ, .gpio_reset = -EINVAL, .flags = (SMSC911X_USE_32BIT | SMSC911X_SAVE_MAC_ADDRESS), }; static inline void __init omap3stalker_init_eth(void) { omap_mux_init_gpio(19, OMAP_PIN_INPUT_PULLUP); gpmc_smsc911x_init(&smsc911x_cfg); } #else static inline void __init omap3stalker_init_eth(void) { return; } #endif /* * OMAP3 DSS control signals */ #define DSS_ENABLE_GPIO 199 #define LCD_PANEL_BKLIGHT_GPIO 210 #define ENABLE_VPLL2_DEV_GRP 0xE0 static void __init omap3_stalker_display_init(void) { return; } static struct omap_dss_device omap3_stalker_tv_device = { .name = "tv", .driver_name = "venc", .type = OMAP_DISPLAY_TYPE_VENC, #if defined(CONFIG_OMAP2_VENC_OUT_TYPE_SVIDEO) .phy.venc.type = OMAP_DSS_VENC_TYPE_SVIDEO, #elif defined(CONFIG_OMAP2_VENC_OUT_TYPE_COMPOSITE) .u.venc.type = OMAP_DSS_VENC_TYPE_COMPOSITE, #endif }; static struct tfp410_platform_data dvi_panel = { .power_down_gpio = DSS_ENABLE_GPIO, .i2c_bus_num = -1, }; static struct omap_dss_device omap3_stalker_dvi_device = { .name = "dvi", .type = OMAP_DISPLAY_TYPE_DPI, .driver_name = "tfp410", .data = &dvi_panel, .phy.dpi.data_lines = 24, }; static struct omap_dss_device *omap3_stalker_dss_devices[] = { &omap3_stalker_tv_device, &omap3_stalker_dvi_device, }; static struct omap_dss_board_info omap3_stalker_dss_data = { .num_devices = ARRAY_SIZE(omap3_stalker_dss_devices), .devices = omap3_stalker_dss_devices, .default_device = &omap3_stalker_dvi_device, }; static struct regulator_consumer_supply omap3stalker_vmmc1_supply[] = { REGULATOR_SUPPLY("vmmc", "omap_hsmmc.0"), }; static struct regulator_consumer_supply omap3stalker_vsim_supply[] = { REGULATOR_SUPPLY("vmmc_aux", "omap_hsmmc.0"), }; /* VMMC1 for MMC1 pins CMD, CLK, DAT0..DAT3 (20 mA, plus card == max 220 mA) */ static struct regulator_init_data omap3stalker_vmmc1 = { .constraints = { .min_uV = 1850000, .max_uV = 3150000, .valid_modes_mask = REGULATOR_MODE_NORMAL | REGULATOR_MODE_STANDBY, .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE | REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_STATUS, }, .num_consumer_supplies = ARRAY_SIZE(omap3stalker_vmmc1_supply), .consumer_supplies = omap3stalker_vmmc1_supply, }; /* VSIM for MMC1 pins DAT4..DAT7 (2 mA, plus card == max 50 mA) */ static struct regulator_init_data omap3stalker_vsim = { .constraints = { .min_uV = 1800000, .max_uV = 3000000, .valid_modes_mask = REGULATOR_MODE_NORMAL | REGULATOR_MODE_STANDBY, .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE | REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_STATUS, }, .num_consumer_supplies = ARRAY_SIZE(omap3stalker_vsim_supply), .consumer_supplies = omap3stalker_vsim_supply, }; static struct omap2_hsmmc_info mmc[] = { { .mmc = 1, .caps = MMC_CAP_4_BIT_DATA, .gpio_cd = -EINVAL, .gpio_wp = 23, .deferred = true, }, {} /* Terminator */ }; static struct gpio_keys_button gpio_buttons[] = { { .code = BTN_EXTRA, .gpio = 18, .desc = "user", .wakeup = 1, }, }; static struct gpio_keys_platform_data gpio_key_info = { .buttons = gpio_buttons, .nbuttons = ARRAY_SIZE(gpio_buttons), }; static struct platform_device keys_gpio = { .name = "gpio-keys", .id = -1, .dev = { .platform_data = &gpio_key_info, }, }; static struct gpio_led gpio_leds[] = { { .name = "stalker:D8:usr0", .default_trigger = "default-on", .gpio = 126, }, { .name = "stalker:D9:usr1", .default_trigger = "default-on", .gpio = 127, }, { .name = "stalker:D3:mmc0", .gpio = -EINVAL, /* gets replaced */ .active_low = true, .default_trigger = "mmc0", }, { .name = "stalker:D4:heartbeat", .gpio = -EINVAL, /* gets replaced */ .active_low = true, .default_trigger = "heartbeat", }, }; static struct gpio_led_platform_data gpio_led_info = { .leds = gpio_leds, .num_leds = ARRAY_SIZE(gpio_leds), }; static struct platform_device leds_gpio = { .name = "leds-gpio", .id = -1, .dev = { .platform_data = &gpio_led_info, }, }; static int omap3stalker_twl_gpio_setup(struct device *dev, unsigned gpio, unsigned ngpio) { /* gpio + 0 is "mmc0_cd" (input/IRQ) */ mmc[0].gpio_cd = gpio + 0; omap_hsmmc_late_init(mmc); /* * Most GPIOs are for USB OTG. Some are mostly sent to * the P2 connector; notably LEDA for the LCD backlight. */ /* TWL4030_GPIO_MAX + 0 == ledA, LCD Backlight control */ gpio_request_one(gpio + TWL4030_GPIO_MAX, GPIOF_OUT_INIT_LOW, "EN_LCD_BKL"); /* gpio + 7 == DVI Enable */ gpio_request_one(gpio + 7, GPIOF_OUT_INIT_LOW, "EN_DVI"); /* TWL4030_GPIO_MAX + 1 == ledB (out, mmc0) */ gpio_leds[2].gpio = gpio + TWL4030_GPIO_MAX + 1; /* GPIO + 13 == ledsync (out, heartbeat) */ gpio_leds[3].gpio = gpio + 13; platform_device_register(&leds_gpio); return 0; } static struct twl4030_gpio_platform_data omap3stalker_gpio_data = { .use_leds = true, .setup = omap3stalker_twl_gpio_setup, }; static uint32_t board_keymap[] = { KEY(0, 0, KEY_LEFT), KEY(0, 1, KEY_DOWN), KEY(0, 2, KEY_ENTER), KEY(0, 3, KEY_M), KEY(1, 0, KEY_RIGHT), KEY(1, 1, KEY_UP), KEY(1, 2, KEY_I), KEY(1, 3, KEY_N), KEY(2, 0, KEY_A), KEY(2, 1, KEY_E), KEY(2, 2, KEY_J), KEY(2, 3, KEY_O), KEY(3, 0, KEY_B), KEY(3, 1, KEY_F), KEY(3, 2, KEY_K), KEY(3, 3, KEY_P) }; static struct matrix_keymap_data board_map_data = { .keymap = board_keymap, .keymap_size = ARRAY_SIZE(board_keymap), }; static struct twl4030_keypad_data omap3stalker_kp_data = { .keymap_data = &board_map_data, .rows = 4, .cols = 4, .rep = 1, }; static struct twl4030_platform_data omap3stalker_twldata = { /* platform_data for children goes here */ .keypad = &omap3stalker_kp_data, .gpio = &omap3stalker_gpio_data, .vmmc1 = &omap3stalker_vmmc1, .vsim = &omap3stalker_vsim, }; static struct at24_platform_data fram_info = { .byte_len = (64 * 1024) / 8, .page_size = 8192, .flags = AT24_FLAG_ADDR16 | AT24_FLAG_IRUGO, }; static struct i2c_board_info __initdata omap3stalker_i2c_boardinfo3[] = { { I2C_BOARD_INFO("24c64", 0x50), .flags = I2C_CLIENT_WAKE, .platform_data = &fram_info, }, }; static int __init omap3_stalker_i2c_init(void) { omap3_pmic_get_config(&omap3stalker_twldata, TWL_COMMON_PDATA_USB | TWL_COMMON_PDATA_MADC | TWL_COMMON_PDATA_AUDIO, TWL_COMMON_REGULATOR_VDAC | TWL_COMMON_REGULATOR_VPLL2); omap3stalker_twldata.vdac->constraints.apply_uV = true; omap3stalker_twldata.vpll2->constraints.apply_uV = true; omap3stalker_twldata.vpll2->constraints.name = "VDVI"; omap3_pmic_init("twl4030", &omap3stalker_twldata); omap_register_i2c_bus(2, 400, NULL, 0); omap_register_i2c_bus(3, 400, omap3stalker_i2c_boardinfo3, ARRAY_SIZE(omap3stalker_i2c_boardinfo3)); return 0; } #define OMAP3_STALKER_TS_GPIO 175 static struct usbhs_phy_data phy_data[] __initdata = { { .port = 2, .reset_gpio = 21, .vcc_gpio = -EINVAL, }, }; static struct platform_device *omap3_stalker_devices[] __initdata = { &keys_gpio, }; static struct usbhs_omap_platform_data usbhs_bdata __initdata = { .port_mode[1] = OMAP_EHCI_PORT_MODE_PHY, }; #ifdef CONFIG_OMAP_MUX static struct omap_board_mux board_mux[] __initdata = { OMAP3_MUX(SYS_NIRQ, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP | OMAP_PIN_OFF_INPUT_PULLUP | OMAP_PIN_OFF_WAKEUPENABLE), OMAP3_MUX(MCSPI1_CS1, OMAP_MUX_MODE4 | OMAP_PIN_INPUT_PULLUP | OMAP_PIN_OFF_INPUT_PULLUP | OMAP_PIN_OFF_WAKEUPENABLE), {.reg_offset = OMAP_MUX_TERMINATOR}, }; #endif static struct regulator_consumer_supply dummy_supplies[] = { REGULATOR_SUPPLY("vddvario", "smsc911x.0"), REGULATOR_SUPPLY("vdd33a", "smsc911x.0"), }; static void __init omap3_stalker_init(void) { regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies)); omap3_mux_init(board_mux, OMAP_PACKAGE_CUS); omap_mux_init_gpio(23, OMAP_PIN_INPUT); omap_hsmmc_init(mmc); omap3_stalker_i2c_init(); platform_add_devices(omap3_stalker_devices, ARRAY_SIZE(omap3_stalker_devices)); omap_display_init(&omap3_stalker_dss_data); omap_serial_init(); omap_sdrc_init(mt46h32m32lf6_sdrc_params, NULL); usb_bind_phy("musb-hdrc.0.auto", 0, "twl4030_usb"); usb_musb_init(NULL); usbhs_init_phys(phy_data, ARRAY_SIZE(phy_data)); usbhs_init(&usbhs_bdata); omap_ads7846_init(1, OMAP3_STALKER_TS_GPIO, 310, NULL); omap_mux_init_gpio(21, OMAP_PIN_OUTPUT); omap_mux_init_gpio(18, OMAP_PIN_INPUT_PULLUP); omap3stalker_init_eth(); omap3_stalker_display_init(); /* Ensure SDRC pins are mux'd for self-refresh */ omap_mux_init_signal("sdr_cke0", OMAP_PIN_OUTPUT); omap_mux_init_signal("sdr_cke1", OMAP_PIN_OUTPUT); } MACHINE_START(SBC3530, "OMAP3 STALKER") /* Maintainer: Jason Lam -lzg@ema-tech.com */ .atag_offset = 0x100, .map_io = omap3_map_io, .init_early = omap35xx_init_early, .init_irq = omap3_init_irq, .handle_irq = omap3_intc_handle_irq, .init_machine = omap3_stalker_init, .init_late = omap35xx_init_late, .init_time = omap3_secure_sync32k_timer_init, .restart = omap3xxx_restart, MACHINE_END
gpl-2.0
hephaex/a10c
drivers/power/reset/qnap-poweroff.c
2623
3007
/* * QNAP Turbo NAS Board power off * * Copyright (C) 2012 Andrew Lunn <andrew@lunn.ch> * * Based on the code from: * * Copyright (C) 2009 Martin Michlmayr <tbm@cyrius.com> * Copyright (C) 2008 Byron Bradley <byron.bbradley@gmail.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/serial_reg.h> #include <linux/kallsyms.h> #include <linux/of.h> #include <linux/io.h> #include <linux/clk.h> #define UART1_REG(x) (base + ((UART_##x) << 2)) static void __iomem *base; static unsigned long tclk; static void qnap_power_off(void) { /* 19200 baud divisor */ const unsigned divisor = ((tclk + (8 * 19200)) / (16 * 19200)); pr_err("%s: triggering power-off...\n", __func__); /* hijack UART1 and reset into sane state (19200,8n1) */ writel(0x83, UART1_REG(LCR)); writel(divisor & 0xff, UART1_REG(DLL)); writel((divisor >> 8) & 0xff, UART1_REG(DLM)); writel(0x03, UART1_REG(LCR)); writel(0x00, UART1_REG(IER)); writel(0x00, UART1_REG(FCR)); writel(0x00, UART1_REG(MCR)); /* send the power-off command 'A' to PIC */ writel('A', UART1_REG(TX)); } static int qnap_power_off_probe(struct platform_device *pdev) { struct resource *res; struct clk *clk; char symname[KSYM_NAME_LEN]; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "Missing resource"); return -EINVAL; } base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (!base) { dev_err(&pdev->dev, "Unable to map resource"); return -EINVAL; } /* We need to know tclk in order to calculate the UART divisor */ clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(clk)) { dev_err(&pdev->dev, "Clk missing"); return PTR_ERR(clk); } tclk = clk_get_rate(clk); /* Check that nothing else has already setup a handler */ if (pm_power_off) { lookup_symbol_name((ulong)pm_power_off, symname); dev_err(&pdev->dev, "pm_power_off already claimed %p %s", pm_power_off, symname); return -EBUSY; } pm_power_off = qnap_power_off; return 0; } static int qnap_power_off_remove(struct platform_device *pdev) { pm_power_off = NULL; return 0; } static const struct of_device_id qnap_power_off_of_match_table[] = { { .compatible = "qnap,power-off", }, {} }; MODULE_DEVICE_TABLE(of, qnap_power_off_of_match_table); static struct platform_driver qnap_power_off_driver = { .probe = qnap_power_off_probe, .remove = qnap_power_off_remove, .driver = { .owner = THIS_MODULE, .name = "qnap_power_off", .of_match_table = of_match_ptr(qnap_power_off_of_match_table), }, }; module_platform_driver(qnap_power_off_driver); MODULE_AUTHOR("Andrew Lunn <andrew@lunn.ch>"); MODULE_DESCRIPTION("QNAP Power off driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
NX511J-dev/kernel_nubia_nx511j
fs/dlm/recover.c
2623
23364
/****************************************************************************** ******************************************************************************* ** ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. ** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved. ** ** This copyrighted material is made available to anyone wishing to use, ** modify, copy, or redistribute it subject to the terms and conditions ** of the GNU General Public License v.2. ** ******************************************************************************* ******************************************************************************/ #include "dlm_internal.h" #include "lockspace.h" #include "dir.h" #include "config.h" #include "ast.h" #include "memory.h" #include "rcom.h" #include "lock.h" #include "lowcomms.h" #include "member.h" #include "recover.h" /* * Recovery waiting routines: these functions wait for a particular reply from * a remote node, or for the remote node to report a certain status. They need * to abort if the lockspace is stopped indicating a node has failed (perhaps * the one being waited for). */ /* * Wait until given function returns non-zero or lockspace is stopped * (LS_RECOVERY_STOP set due to failure of a node in ls_nodes). When another * function thinks it could have completed the waited-on task, they should wake * up ls_wait_general to get an immediate response rather than waiting for the * timeout. This uses a timeout so it can check periodically if the wait * should abort due to node failure (which doesn't cause a wake_up). * This should only be called by the dlm_recoverd thread. */ int dlm_wait_function(struct dlm_ls *ls, int (*testfn) (struct dlm_ls *ls)) { int error = 0; int rv; while (1) { rv = wait_event_timeout(ls->ls_wait_general, testfn(ls) || dlm_recovery_stopped(ls), dlm_config.ci_recover_timer * HZ); if (rv) break; } if (dlm_recovery_stopped(ls)) { log_debug(ls, "dlm_wait_function aborted"); error = -EINTR; } return error; } /* * An efficient way for all nodes to wait for all others to have a certain * status. The node with the lowest nodeid polls all the others for their * status (wait_status_all) and all the others poll the node with the low id * for its accumulated result (wait_status_low). When all nodes have set * status flag X, then status flag X_ALL will be set on the low nodeid. */ uint32_t dlm_recover_status(struct dlm_ls *ls) { uint32_t status; spin_lock(&ls->ls_recover_lock); status = ls->ls_recover_status; spin_unlock(&ls->ls_recover_lock); return status; } static void _set_recover_status(struct dlm_ls *ls, uint32_t status) { ls->ls_recover_status |= status; } void dlm_set_recover_status(struct dlm_ls *ls, uint32_t status) { spin_lock(&ls->ls_recover_lock); _set_recover_status(ls, status); spin_unlock(&ls->ls_recover_lock); } static int wait_status_all(struct dlm_ls *ls, uint32_t wait_status, int save_slots) { struct dlm_rcom *rc = ls->ls_recover_buf; struct dlm_member *memb; int error = 0, delay; list_for_each_entry(memb, &ls->ls_nodes, list) { delay = 0; for (;;) { if (dlm_recovery_stopped(ls)) { error = -EINTR; goto out; } error = dlm_rcom_status(ls, memb->nodeid, 0); if (error) goto out; if (save_slots) dlm_slot_save(ls, rc, memb); if (rc->rc_result & wait_status) break; if (delay < 1000) delay += 20; msleep(delay); } } out: return error; } static int wait_status_low(struct dlm_ls *ls, uint32_t wait_status, uint32_t status_flags) { struct dlm_rcom *rc = ls->ls_recover_buf; int error = 0, delay = 0, nodeid = ls->ls_low_nodeid; for (;;) { if (dlm_recovery_stopped(ls)) { error = -EINTR; goto out; } error = dlm_rcom_status(ls, nodeid, status_flags); if (error) break; if (rc->rc_result & wait_status) break; if (delay < 1000) delay += 20; msleep(delay); } out: return error; } static int wait_status(struct dlm_ls *ls, uint32_t status) { uint32_t status_all = status << 1; int error; if (ls->ls_low_nodeid == dlm_our_nodeid()) { error = wait_status_all(ls, status, 0); if (!error) dlm_set_recover_status(ls, status_all); } else error = wait_status_low(ls, status_all, 0); return error; } int dlm_recover_members_wait(struct dlm_ls *ls) { struct dlm_member *memb; struct dlm_slot *slots; int num_slots, slots_size; int error, rv; uint32_t gen; list_for_each_entry(memb, &ls->ls_nodes, list) { memb->slot = -1; memb->generation = 0; } if (ls->ls_low_nodeid == dlm_our_nodeid()) { error = wait_status_all(ls, DLM_RS_NODES, 1); if (error) goto out; /* slots array is sparse, slots_size may be > num_slots */ rv = dlm_slots_assign(ls, &num_slots, &slots_size, &slots, &gen); if (!rv) { spin_lock(&ls->ls_recover_lock); _set_recover_status(ls, DLM_RS_NODES_ALL); ls->ls_num_slots = num_slots; ls->ls_slots_size = slots_size; ls->ls_slots = slots; ls->ls_generation = gen; spin_unlock(&ls->ls_recover_lock); } else { dlm_set_recover_status(ls, DLM_RS_NODES_ALL); } } else { error = wait_status_low(ls, DLM_RS_NODES_ALL, DLM_RSF_NEED_SLOTS); if (error) goto out; dlm_slots_copy_in(ls); } out: return error; } int dlm_recover_directory_wait(struct dlm_ls *ls) { return wait_status(ls, DLM_RS_DIR); } int dlm_recover_locks_wait(struct dlm_ls *ls) { return wait_status(ls, DLM_RS_LOCKS); } int dlm_recover_done_wait(struct dlm_ls *ls) { return wait_status(ls, DLM_RS_DONE); } /* * The recover_list contains all the rsb's for which we've requested the new * master nodeid. As replies are returned from the resource directories the * rsb's are removed from the list. When the list is empty we're done. * * The recover_list is later similarly used for all rsb's for which we've sent * new lkb's and need to receive new corresponding lkid's. * * We use the address of the rsb struct as a simple local identifier for the * rsb so we can match an rcom reply with the rsb it was sent for. */ static int recover_list_empty(struct dlm_ls *ls) { int empty; spin_lock(&ls->ls_recover_list_lock); empty = list_empty(&ls->ls_recover_list); spin_unlock(&ls->ls_recover_list_lock); return empty; } static void recover_list_add(struct dlm_rsb *r) { struct dlm_ls *ls = r->res_ls; spin_lock(&ls->ls_recover_list_lock); if (list_empty(&r->res_recover_list)) { list_add_tail(&r->res_recover_list, &ls->ls_recover_list); ls->ls_recover_list_count++; dlm_hold_rsb(r); } spin_unlock(&ls->ls_recover_list_lock); } static void recover_list_del(struct dlm_rsb *r) { struct dlm_ls *ls = r->res_ls; spin_lock(&ls->ls_recover_list_lock); list_del_init(&r->res_recover_list); ls->ls_recover_list_count--; spin_unlock(&ls->ls_recover_list_lock); dlm_put_rsb(r); } static void recover_list_clear(struct dlm_ls *ls) { struct dlm_rsb *r, *s; spin_lock(&ls->ls_recover_list_lock); list_for_each_entry_safe(r, s, &ls->ls_recover_list, res_recover_list) { list_del_init(&r->res_recover_list); r->res_recover_locks_count = 0; dlm_put_rsb(r); ls->ls_recover_list_count--; } if (ls->ls_recover_list_count != 0) { log_error(ls, "warning: recover_list_count %d", ls->ls_recover_list_count); ls->ls_recover_list_count = 0; } spin_unlock(&ls->ls_recover_list_lock); } static int recover_idr_empty(struct dlm_ls *ls) { int empty = 1; spin_lock(&ls->ls_recover_idr_lock); if (ls->ls_recover_list_count) empty = 0; spin_unlock(&ls->ls_recover_idr_lock); return empty; } static int recover_idr_add(struct dlm_rsb *r) { struct dlm_ls *ls = r->res_ls; int rv; idr_preload(GFP_NOFS); spin_lock(&ls->ls_recover_idr_lock); if (r->res_id) { rv = -1; goto out_unlock; } rv = idr_alloc(&ls->ls_recover_idr, r, 1, 0, GFP_NOWAIT); if (rv < 0) goto out_unlock; r->res_id = rv; ls->ls_recover_list_count++; dlm_hold_rsb(r); rv = 0; out_unlock: spin_unlock(&ls->ls_recover_idr_lock); idr_preload_end(); return rv; } static void recover_idr_del(struct dlm_rsb *r) { struct dlm_ls *ls = r->res_ls; spin_lock(&ls->ls_recover_idr_lock); idr_remove(&ls->ls_recover_idr, r->res_id); r->res_id = 0; ls->ls_recover_list_count--; spin_unlock(&ls->ls_recover_idr_lock); dlm_put_rsb(r); } static struct dlm_rsb *recover_idr_find(struct dlm_ls *ls, uint64_t id) { struct dlm_rsb *r; spin_lock(&ls->ls_recover_idr_lock); r = idr_find(&ls->ls_recover_idr, (int)id); spin_unlock(&ls->ls_recover_idr_lock); return r; } static void recover_idr_clear(struct dlm_ls *ls) { struct dlm_rsb *r; int id; spin_lock(&ls->ls_recover_idr_lock); idr_for_each_entry(&ls->ls_recover_idr, r, id) { idr_remove(&ls->ls_recover_idr, id); r->res_id = 0; r->res_recover_locks_count = 0; ls->ls_recover_list_count--; dlm_put_rsb(r); } if (ls->ls_recover_list_count != 0) { log_error(ls, "warning: recover_list_count %d", ls->ls_recover_list_count); ls->ls_recover_list_count = 0; } spin_unlock(&ls->ls_recover_idr_lock); } /* Master recovery: find new master node for rsb's that were mastered on nodes that have been removed. dlm_recover_masters recover_master dlm_send_rcom_lookup -> receive_rcom_lookup dlm_dir_lookup receive_rcom_lookup_reply <- dlm_recover_master_reply set_new_master set_master_lkbs set_lock_master */ /* * Set the lock master for all LKBs in a lock queue * If we are the new master of the rsb, we may have received new * MSTCPY locks from other nodes already which we need to ignore * when setting the new nodeid. */ static void set_lock_master(struct list_head *queue, int nodeid) { struct dlm_lkb *lkb; list_for_each_entry(lkb, queue, lkb_statequeue) { if (!(lkb->lkb_flags & DLM_IFL_MSTCPY)) { lkb->lkb_nodeid = nodeid; lkb->lkb_remid = 0; } } } static void set_master_lkbs(struct dlm_rsb *r) { set_lock_master(&r->res_grantqueue, r->res_nodeid); set_lock_master(&r->res_convertqueue, r->res_nodeid); set_lock_master(&r->res_waitqueue, r->res_nodeid); } /* * Propagate the new master nodeid to locks * The NEW_MASTER flag tells dlm_recover_locks() which rsb's to consider. * The NEW_MASTER2 flag tells recover_lvb() and recover_grant() which * rsb's to consider. */ static void set_new_master(struct dlm_rsb *r) { set_master_lkbs(r); rsb_set_flag(r, RSB_NEW_MASTER); rsb_set_flag(r, RSB_NEW_MASTER2); } /* * We do async lookups on rsb's that need new masters. The rsb's * waiting for a lookup reply are kept on the recover_list. * * Another node recovering the master may have sent us a rcom lookup, * and our dlm_master_lookup() set it as the new master, along with * NEW_MASTER so that we'll recover it here (this implies dir_nodeid * equals our_nodeid below). */ static int recover_master(struct dlm_rsb *r, unsigned int *count) { struct dlm_ls *ls = r->res_ls; int our_nodeid, dir_nodeid; int is_removed = 0; int error; if (is_master(r)) return 0; is_removed = dlm_is_removed(ls, r->res_nodeid); if (!is_removed && !rsb_flag(r, RSB_NEW_MASTER)) return 0; our_nodeid = dlm_our_nodeid(); dir_nodeid = dlm_dir_nodeid(r); if (dir_nodeid == our_nodeid) { if (is_removed) { r->res_master_nodeid = our_nodeid; r->res_nodeid = 0; } /* set master of lkbs to ourself when is_removed, or to another new master which we set along with NEW_MASTER in dlm_master_lookup */ set_new_master(r); error = 0; } else { recover_idr_add(r); error = dlm_send_rcom_lookup(r, dir_nodeid); } (*count)++; return error; } /* * All MSTCPY locks are purged and rebuilt, even if the master stayed the same. * This is necessary because recovery can be started, aborted and restarted, * causing the master nodeid to briefly change during the aborted recovery, and * change back to the original value in the second recovery. The MSTCPY locks * may or may not have been purged during the aborted recovery. Another node * with an outstanding request in waiters list and a request reply saved in the * requestqueue, cannot know whether it should ignore the reply and resend the * request, or accept the reply and complete the request. It must do the * former if the remote node purged MSTCPY locks, and it must do the later if * the remote node did not. This is solved by always purging MSTCPY locks, in * which case, the request reply would always be ignored and the request * resent. */ static int recover_master_static(struct dlm_rsb *r, unsigned int *count) { int dir_nodeid = dlm_dir_nodeid(r); int new_master = dir_nodeid; if (dir_nodeid == dlm_our_nodeid()) new_master = 0; dlm_purge_mstcpy_locks(r); r->res_master_nodeid = dir_nodeid; r->res_nodeid = new_master; set_new_master(r); (*count)++; return 0; } /* * Go through local root resources and for each rsb which has a master which * has departed, get the new master nodeid from the directory. The dir will * assign mastery to the first node to look up the new master. That means * we'll discover in this lookup if we're the new master of any rsb's. * * We fire off all the dir lookup requests individually and asynchronously to * the correct dir node. */ int dlm_recover_masters(struct dlm_ls *ls) { struct dlm_rsb *r; unsigned int total = 0; unsigned int count = 0; int nodir = dlm_no_directory(ls); int error; log_debug(ls, "dlm_recover_masters"); down_read(&ls->ls_root_sem); list_for_each_entry(r, &ls->ls_root_list, res_root_list) { if (dlm_recovery_stopped(ls)) { up_read(&ls->ls_root_sem); error = -EINTR; goto out; } lock_rsb(r); if (nodir) error = recover_master_static(r, &count); else error = recover_master(r, &count); unlock_rsb(r); cond_resched(); total++; if (error) { up_read(&ls->ls_root_sem); goto out; } } up_read(&ls->ls_root_sem); log_debug(ls, "dlm_recover_masters %u of %u", count, total); error = dlm_wait_function(ls, &recover_idr_empty); out: if (error) recover_idr_clear(ls); return error; } int dlm_recover_master_reply(struct dlm_ls *ls, struct dlm_rcom *rc) { struct dlm_rsb *r; int ret_nodeid, new_master; r = recover_idr_find(ls, rc->rc_id); if (!r) { log_error(ls, "dlm_recover_master_reply no id %llx", (unsigned long long)rc->rc_id); goto out; } ret_nodeid = rc->rc_result; if (ret_nodeid == dlm_our_nodeid()) new_master = 0; else new_master = ret_nodeid; lock_rsb(r); r->res_master_nodeid = ret_nodeid; r->res_nodeid = new_master; set_new_master(r); unlock_rsb(r); recover_idr_del(r); if (recover_idr_empty(ls)) wake_up(&ls->ls_wait_general); out: return 0; } /* Lock recovery: rebuild the process-copy locks we hold on a remastered rsb on the new rsb master. dlm_recover_locks recover_locks recover_locks_queue dlm_send_rcom_lock -> receive_rcom_lock dlm_recover_master_copy receive_rcom_lock_reply <- dlm_recover_process_copy */ /* * keep a count of the number of lkb's we send to the new master; when we get * an equal number of replies then recovery for the rsb is done */ static int recover_locks_queue(struct dlm_rsb *r, struct list_head *head) { struct dlm_lkb *lkb; int error = 0; list_for_each_entry(lkb, head, lkb_statequeue) { error = dlm_send_rcom_lock(r, lkb); if (error) break; r->res_recover_locks_count++; } return error; } static int recover_locks(struct dlm_rsb *r) { int error = 0; lock_rsb(r); DLM_ASSERT(!r->res_recover_locks_count, dlm_dump_rsb(r);); error = recover_locks_queue(r, &r->res_grantqueue); if (error) goto out; error = recover_locks_queue(r, &r->res_convertqueue); if (error) goto out; error = recover_locks_queue(r, &r->res_waitqueue); if (error) goto out; if (r->res_recover_locks_count) recover_list_add(r); else rsb_clear_flag(r, RSB_NEW_MASTER); out: unlock_rsb(r); return error; } int dlm_recover_locks(struct dlm_ls *ls) { struct dlm_rsb *r; int error, count = 0; down_read(&ls->ls_root_sem); list_for_each_entry(r, &ls->ls_root_list, res_root_list) { if (is_master(r)) { rsb_clear_flag(r, RSB_NEW_MASTER); continue; } if (!rsb_flag(r, RSB_NEW_MASTER)) continue; if (dlm_recovery_stopped(ls)) { error = -EINTR; up_read(&ls->ls_root_sem); goto out; } error = recover_locks(r); if (error) { up_read(&ls->ls_root_sem); goto out; } count += r->res_recover_locks_count; } up_read(&ls->ls_root_sem); log_debug(ls, "dlm_recover_locks %d out", count); error = dlm_wait_function(ls, &recover_list_empty); out: if (error) recover_list_clear(ls); return error; } void dlm_recovered_lock(struct dlm_rsb *r) { DLM_ASSERT(rsb_flag(r, RSB_NEW_MASTER), dlm_dump_rsb(r);); r->res_recover_locks_count--; if (!r->res_recover_locks_count) { rsb_clear_flag(r, RSB_NEW_MASTER); recover_list_del(r); } if (recover_list_empty(r->res_ls)) wake_up(&r->res_ls->ls_wait_general); } /* * The lvb needs to be recovered on all master rsb's. This includes setting * the VALNOTVALID flag if necessary, and determining the correct lvb contents * based on the lvb's of the locks held on the rsb. * * RSB_VALNOTVALID is set in two cases: * * 1. we are master, but not new, and we purged an EX/PW lock held by a * failed node (in dlm_recover_purge which set RSB_RECOVER_LVB_INVAL) * * 2. we are a new master, and there are only NL/CR locks left. * (We could probably improve this by only invaliding in this way when * the previous master left uncleanly. VMS docs mention that.) * * The LVB contents are only considered for changing when this is a new master * of the rsb (NEW_MASTER2). Then, the rsb's lvb is taken from any lkb with * mode > CR. If no lkb's exist with mode above CR, the lvb contents are taken * from the lkb with the largest lvb sequence number. */ static void recover_lvb(struct dlm_rsb *r) { struct dlm_lkb *lkb, *high_lkb = NULL; uint32_t high_seq = 0; int lock_lvb_exists = 0; int big_lock_exists = 0; int lvblen = r->res_ls->ls_lvblen; if (!rsb_flag(r, RSB_NEW_MASTER2) && rsb_flag(r, RSB_RECOVER_LVB_INVAL)) { /* case 1 above */ rsb_set_flag(r, RSB_VALNOTVALID); return; } if (!rsb_flag(r, RSB_NEW_MASTER2)) return; /* we are the new master, so figure out if VALNOTVALID should be set, and set the rsb lvb from the best lkb available. */ list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue) { if (!(lkb->lkb_exflags & DLM_LKF_VALBLK)) continue; lock_lvb_exists = 1; if (lkb->lkb_grmode > DLM_LOCK_CR) { big_lock_exists = 1; goto setflag; } if (((int)lkb->lkb_lvbseq - (int)high_seq) >= 0) { high_lkb = lkb; high_seq = lkb->lkb_lvbseq; } } list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue) { if (!(lkb->lkb_exflags & DLM_LKF_VALBLK)) continue; lock_lvb_exists = 1; if (lkb->lkb_grmode > DLM_LOCK_CR) { big_lock_exists = 1; goto setflag; } if (((int)lkb->lkb_lvbseq - (int)high_seq) >= 0) { high_lkb = lkb; high_seq = lkb->lkb_lvbseq; } } setflag: if (!lock_lvb_exists) goto out; /* lvb is invalidated if only NL/CR locks remain */ if (!big_lock_exists) rsb_set_flag(r, RSB_VALNOTVALID); if (!r->res_lvbptr) { r->res_lvbptr = dlm_allocate_lvb(r->res_ls); if (!r->res_lvbptr) goto out; } if (big_lock_exists) { r->res_lvbseq = lkb->lkb_lvbseq; memcpy(r->res_lvbptr, lkb->lkb_lvbptr, lvblen); } else if (high_lkb) { r->res_lvbseq = high_lkb->lkb_lvbseq; memcpy(r->res_lvbptr, high_lkb->lkb_lvbptr, lvblen); } else { r->res_lvbseq = 0; memset(r->res_lvbptr, 0, lvblen); } out: return; } /* All master rsb's flagged RECOVER_CONVERT need to be looked at. The locks converting PR->CW or CW->PR need to have their lkb_grmode set. */ static void recover_conversion(struct dlm_rsb *r) { struct dlm_ls *ls = r->res_ls; struct dlm_lkb *lkb; int grmode = -1; list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue) { if (lkb->lkb_grmode == DLM_LOCK_PR || lkb->lkb_grmode == DLM_LOCK_CW) { grmode = lkb->lkb_grmode; break; } } list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue) { if (lkb->lkb_grmode != DLM_LOCK_IV) continue; if (grmode == -1) { log_debug(ls, "recover_conversion %x set gr to rq %d", lkb->lkb_id, lkb->lkb_rqmode); lkb->lkb_grmode = lkb->lkb_rqmode; } else { log_debug(ls, "recover_conversion %x set gr %d", lkb->lkb_id, grmode); lkb->lkb_grmode = grmode; } } } /* We've become the new master for this rsb and waiting/converting locks may need to be granted in dlm_recover_grant() due to locks that may have existed from a removed node. */ static void recover_grant(struct dlm_rsb *r) { if (!list_empty(&r->res_waitqueue) || !list_empty(&r->res_convertqueue)) rsb_set_flag(r, RSB_RECOVER_GRANT); } void dlm_recover_rsbs(struct dlm_ls *ls) { struct dlm_rsb *r; unsigned int count = 0; down_read(&ls->ls_root_sem); list_for_each_entry(r, &ls->ls_root_list, res_root_list) { lock_rsb(r); if (is_master(r)) { if (rsb_flag(r, RSB_RECOVER_CONVERT)) recover_conversion(r); /* recover lvb before granting locks so the updated lvb/VALNOTVALID is presented in the completion */ recover_lvb(r); if (rsb_flag(r, RSB_NEW_MASTER2)) recover_grant(r); count++; } else { rsb_clear_flag(r, RSB_VALNOTVALID); } rsb_clear_flag(r, RSB_RECOVER_CONVERT); rsb_clear_flag(r, RSB_RECOVER_LVB_INVAL); rsb_clear_flag(r, RSB_NEW_MASTER2); unlock_rsb(r); } up_read(&ls->ls_root_sem); if (count) log_debug(ls, "dlm_recover_rsbs %d done", count); } /* Create a single list of all root rsb's to be used during recovery */ int dlm_create_root_list(struct dlm_ls *ls) { struct rb_node *n; struct dlm_rsb *r; int i, error = 0; down_write(&ls->ls_root_sem); if (!list_empty(&ls->ls_root_list)) { log_error(ls, "root list not empty"); error = -EINVAL; goto out; } for (i = 0; i < ls->ls_rsbtbl_size; i++) { spin_lock(&ls->ls_rsbtbl[i].lock); for (n = rb_first(&ls->ls_rsbtbl[i].keep); n; n = rb_next(n)) { r = rb_entry(n, struct dlm_rsb, res_hashnode); list_add(&r->res_root_list, &ls->ls_root_list); dlm_hold_rsb(r); } if (!RB_EMPTY_ROOT(&ls->ls_rsbtbl[i].toss)) log_error(ls, "dlm_create_root_list toss not empty"); spin_unlock(&ls->ls_rsbtbl[i].lock); } out: up_write(&ls->ls_root_sem); return error; } void dlm_release_root_list(struct dlm_ls *ls) { struct dlm_rsb *r, *safe; down_write(&ls->ls_root_sem); list_for_each_entry_safe(r, safe, &ls->ls_root_list, res_root_list) { list_del_init(&r->res_root_list); dlm_put_rsb(r); } up_write(&ls->ls_root_sem); } void dlm_clear_toss(struct dlm_ls *ls) { struct rb_node *n, *next; struct dlm_rsb *r; unsigned int count = 0; int i; for (i = 0; i < ls->ls_rsbtbl_size; i++) { spin_lock(&ls->ls_rsbtbl[i].lock); for (n = rb_first(&ls->ls_rsbtbl[i].toss); n; n = next) { next = rb_next(n); r = rb_entry(n, struct dlm_rsb, res_hashnode); rb_erase(n, &ls->ls_rsbtbl[i].toss); dlm_free_rsb(r); count++; } spin_unlock(&ls->ls_rsbtbl[i].lock); } if (count) log_debug(ls, "dlm_clear_toss %u done", count); }
gpl-2.0
AndroPlus-org/android_kernel_sony_msm8994
drivers/acpi/acpica/rsinfo.c
2623
11353
/******************************************************************************* * * Module Name: rsinfo - Dispatch and Info tables * ******************************************************************************/ /* * Copyright (C) 2000 - 2013, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #include "acresrc.h" #define _COMPONENT ACPI_RESOURCES ACPI_MODULE_NAME("rsinfo") /* * Resource dispatch and information tables. Any new resource types (either * Large or Small) must be reflected in each of these tables, so they are here * in one place. * * The tables for Large descriptors are indexed by bits 6:0 of the AML * descriptor type byte. The tables for Small descriptors are indexed by * bits 6:3 of the descriptor byte. The tables for internal resource * descriptors are indexed by the acpi_resource_type field. */ /* Dispatch table for resource-to-AML (Set Resource) conversion functions */ struct acpi_rsconvert_info *acpi_gbl_set_resource_dispatch[] = { acpi_rs_set_irq, /* 0x00, ACPI_RESOURCE_TYPE_IRQ */ acpi_rs_convert_dma, /* 0x01, ACPI_RESOURCE_TYPE_DMA */ acpi_rs_set_start_dpf, /* 0x02, ACPI_RESOURCE_TYPE_START_DEPENDENT */ acpi_rs_convert_end_dpf, /* 0x03, ACPI_RESOURCE_TYPE_END_DEPENDENT */ acpi_rs_convert_io, /* 0x04, ACPI_RESOURCE_TYPE_IO */ acpi_rs_convert_fixed_io, /* 0x05, ACPI_RESOURCE_TYPE_FIXED_IO */ acpi_rs_set_vendor, /* 0x06, ACPI_RESOURCE_TYPE_VENDOR */ acpi_rs_convert_end_tag, /* 0x07, ACPI_RESOURCE_TYPE_END_TAG */ acpi_rs_convert_memory24, /* 0x08, ACPI_RESOURCE_TYPE_MEMORY24 */ acpi_rs_convert_memory32, /* 0x09, ACPI_RESOURCE_TYPE_MEMORY32 */ acpi_rs_convert_fixed_memory32, /* 0x0A, ACPI_RESOURCE_TYPE_FIXED_MEMORY32 */ acpi_rs_convert_address16, /* 0x0B, ACPI_RESOURCE_TYPE_ADDRESS16 */ acpi_rs_convert_address32, /* 0x0C, ACPI_RESOURCE_TYPE_ADDRESS32 */ acpi_rs_convert_address64, /* 0x0D, ACPI_RESOURCE_TYPE_ADDRESS64 */ acpi_rs_convert_ext_address64, /* 0x0E, ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64 */ acpi_rs_convert_ext_irq, /* 0x0F, ACPI_RESOURCE_TYPE_EXTENDED_IRQ */ acpi_rs_convert_generic_reg, /* 0x10, ACPI_RESOURCE_TYPE_GENERIC_REGISTER */ acpi_rs_convert_gpio, /* 0x11, ACPI_RESOURCE_TYPE_GPIO */ acpi_rs_convert_fixed_dma, /* 0x12, ACPI_RESOURCE_TYPE_FIXED_DMA */ NULL, /* 0x13, ACPI_RESOURCE_TYPE_SERIAL_BUS - Use subtype table below */ }; /* Dispatch tables for AML-to-resource (Get Resource) conversion functions */ struct acpi_rsconvert_info *acpi_gbl_get_resource_dispatch[] = { /* Small descriptors */ NULL, /* 0x00, Reserved */ NULL, /* 0x01, Reserved */ NULL, /* 0x02, Reserved */ NULL, /* 0x03, Reserved */ acpi_rs_get_irq, /* 0x04, ACPI_RESOURCE_NAME_IRQ */ acpi_rs_convert_dma, /* 0x05, ACPI_RESOURCE_NAME_DMA */ acpi_rs_get_start_dpf, /* 0x06, ACPI_RESOURCE_NAME_START_DEPENDENT */ acpi_rs_convert_end_dpf, /* 0x07, ACPI_RESOURCE_NAME_END_DEPENDENT */ acpi_rs_convert_io, /* 0x08, ACPI_RESOURCE_NAME_IO */ acpi_rs_convert_fixed_io, /* 0x09, ACPI_RESOURCE_NAME_FIXED_IO */ acpi_rs_convert_fixed_dma, /* 0x0A, ACPI_RESOURCE_NAME_FIXED_DMA */ NULL, /* 0x0B, Reserved */ NULL, /* 0x0C, Reserved */ NULL, /* 0x0D, Reserved */ acpi_rs_get_vendor_small, /* 0x0E, ACPI_RESOURCE_NAME_VENDOR_SMALL */ acpi_rs_convert_end_tag, /* 0x0F, ACPI_RESOURCE_NAME_END_TAG */ /* Large descriptors */ NULL, /* 0x00, Reserved */ acpi_rs_convert_memory24, /* 0x01, ACPI_RESOURCE_NAME_MEMORY24 */ acpi_rs_convert_generic_reg, /* 0x02, ACPI_RESOURCE_NAME_GENERIC_REGISTER */ NULL, /* 0x03, Reserved */ acpi_rs_get_vendor_large, /* 0x04, ACPI_RESOURCE_NAME_VENDOR_LARGE */ acpi_rs_convert_memory32, /* 0x05, ACPI_RESOURCE_NAME_MEMORY32 */ acpi_rs_convert_fixed_memory32, /* 0x06, ACPI_RESOURCE_NAME_FIXED_MEMORY32 */ acpi_rs_convert_address32, /* 0x07, ACPI_RESOURCE_NAME_ADDRESS32 */ acpi_rs_convert_address16, /* 0x08, ACPI_RESOURCE_NAME_ADDRESS16 */ acpi_rs_convert_ext_irq, /* 0x09, ACPI_RESOURCE_NAME_EXTENDED_IRQ */ acpi_rs_convert_address64, /* 0x0A, ACPI_RESOURCE_NAME_ADDRESS64 */ acpi_rs_convert_ext_address64, /* 0x0B, ACPI_RESOURCE_NAME_EXTENDED_ADDRESS64 */ acpi_rs_convert_gpio, /* 0x0C, ACPI_RESOURCE_NAME_GPIO */ NULL, /* 0x0D, Reserved */ NULL, /* 0x0E, ACPI_RESOURCE_NAME_SERIAL_BUS - Use subtype table below */ }; /* Subtype table for serial_bus -- I2C, SPI, and UART */ struct acpi_rsconvert_info *acpi_gbl_convert_resource_serial_bus_dispatch[] = { NULL, acpi_rs_convert_i2c_serial_bus, acpi_rs_convert_spi_serial_bus, acpi_rs_convert_uart_serial_bus, }; #ifdef ACPI_FUTURE_USAGE #if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER) /* Dispatch table for resource dump functions */ struct acpi_rsdump_info *acpi_gbl_dump_resource_dispatch[] = { acpi_rs_dump_irq, /* ACPI_RESOURCE_TYPE_IRQ */ acpi_rs_dump_dma, /* ACPI_RESOURCE_TYPE_DMA */ acpi_rs_dump_start_dpf, /* ACPI_RESOURCE_TYPE_START_DEPENDENT */ acpi_rs_dump_end_dpf, /* ACPI_RESOURCE_TYPE_END_DEPENDENT */ acpi_rs_dump_io, /* ACPI_RESOURCE_TYPE_IO */ acpi_rs_dump_fixed_io, /* ACPI_RESOURCE_TYPE_FIXED_IO */ acpi_rs_dump_vendor, /* ACPI_RESOURCE_TYPE_VENDOR */ acpi_rs_dump_end_tag, /* ACPI_RESOURCE_TYPE_END_TAG */ acpi_rs_dump_memory24, /* ACPI_RESOURCE_TYPE_MEMORY24 */ acpi_rs_dump_memory32, /* ACPI_RESOURCE_TYPE_MEMORY32 */ acpi_rs_dump_fixed_memory32, /* ACPI_RESOURCE_TYPE_FIXED_MEMORY32 */ acpi_rs_dump_address16, /* ACPI_RESOURCE_TYPE_ADDRESS16 */ acpi_rs_dump_address32, /* ACPI_RESOURCE_TYPE_ADDRESS32 */ acpi_rs_dump_address64, /* ACPI_RESOURCE_TYPE_ADDRESS64 */ acpi_rs_dump_ext_address64, /* ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64 */ acpi_rs_dump_ext_irq, /* ACPI_RESOURCE_TYPE_EXTENDED_IRQ */ acpi_rs_dump_generic_reg, /* ACPI_RESOURCE_TYPE_GENERIC_REGISTER */ acpi_rs_dump_gpio, /* ACPI_RESOURCE_TYPE_GPIO */ acpi_rs_dump_fixed_dma, /* ACPI_RESOURCE_TYPE_FIXED_DMA */ NULL, /* ACPI_RESOURCE_TYPE_SERIAL_BUS */ }; struct acpi_rsdump_info *acpi_gbl_dump_serial_bus_dispatch[] = { NULL, acpi_rs_dump_i2c_serial_bus, /* AML_RESOURCE_I2C_BUS_TYPE */ acpi_rs_dump_spi_serial_bus, /* AML_RESOURCE_SPI_BUS_TYPE */ acpi_rs_dump_uart_serial_bus, /* AML_RESOURCE_UART_BUS_TYPE */ }; #endif #endif /* ACPI_FUTURE_USAGE */ /* * Base sizes for external AML resource descriptors, indexed by internal type. * Includes size of the descriptor header (1 byte for small descriptors, * 3 bytes for large descriptors) */ const u8 acpi_gbl_aml_resource_sizes[] = { sizeof(struct aml_resource_irq), /* ACPI_RESOURCE_TYPE_IRQ (optional Byte 3 always created) */ sizeof(struct aml_resource_dma), /* ACPI_RESOURCE_TYPE_DMA */ sizeof(struct aml_resource_start_dependent), /* ACPI_RESOURCE_TYPE_START_DEPENDENT (optional Byte 1 always created) */ sizeof(struct aml_resource_end_dependent), /* ACPI_RESOURCE_TYPE_END_DEPENDENT */ sizeof(struct aml_resource_io), /* ACPI_RESOURCE_TYPE_IO */ sizeof(struct aml_resource_fixed_io), /* ACPI_RESOURCE_TYPE_FIXED_IO */ sizeof(struct aml_resource_vendor_small), /* ACPI_RESOURCE_TYPE_VENDOR */ sizeof(struct aml_resource_end_tag), /* ACPI_RESOURCE_TYPE_END_TAG */ sizeof(struct aml_resource_memory24), /* ACPI_RESOURCE_TYPE_MEMORY24 */ sizeof(struct aml_resource_memory32), /* ACPI_RESOURCE_TYPE_MEMORY32 */ sizeof(struct aml_resource_fixed_memory32), /* ACPI_RESOURCE_TYPE_FIXED_MEMORY32 */ sizeof(struct aml_resource_address16), /* ACPI_RESOURCE_TYPE_ADDRESS16 */ sizeof(struct aml_resource_address32), /* ACPI_RESOURCE_TYPE_ADDRESS32 */ sizeof(struct aml_resource_address64), /* ACPI_RESOURCE_TYPE_ADDRESS64 */ sizeof(struct aml_resource_extended_address64), /*ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64 */ sizeof(struct aml_resource_extended_irq), /* ACPI_RESOURCE_TYPE_EXTENDED_IRQ */ sizeof(struct aml_resource_generic_register), /* ACPI_RESOURCE_TYPE_GENERIC_REGISTER */ sizeof(struct aml_resource_gpio), /* ACPI_RESOURCE_TYPE_GPIO */ sizeof(struct aml_resource_fixed_dma), /* ACPI_RESOURCE_TYPE_FIXED_DMA */ sizeof(struct aml_resource_common_serialbus), /* ACPI_RESOURCE_TYPE_SERIAL_BUS */ }; const u8 acpi_gbl_resource_struct_sizes[] = { /* Small descriptors */ 0, 0, 0, 0, ACPI_RS_SIZE(struct acpi_resource_irq), ACPI_RS_SIZE(struct acpi_resource_dma), ACPI_RS_SIZE(struct acpi_resource_start_dependent), ACPI_RS_SIZE_MIN, ACPI_RS_SIZE(struct acpi_resource_io), ACPI_RS_SIZE(struct acpi_resource_fixed_io), ACPI_RS_SIZE(struct acpi_resource_fixed_dma), 0, 0, 0, ACPI_RS_SIZE(struct acpi_resource_vendor), ACPI_RS_SIZE_MIN, /* Large descriptors */ 0, ACPI_RS_SIZE(struct acpi_resource_memory24), ACPI_RS_SIZE(struct acpi_resource_generic_register), 0, ACPI_RS_SIZE(struct acpi_resource_vendor), ACPI_RS_SIZE(struct acpi_resource_memory32), ACPI_RS_SIZE(struct acpi_resource_fixed_memory32), ACPI_RS_SIZE(struct acpi_resource_address32), ACPI_RS_SIZE(struct acpi_resource_address16), ACPI_RS_SIZE(struct acpi_resource_extended_irq), ACPI_RS_SIZE(struct acpi_resource_address64), ACPI_RS_SIZE(struct acpi_resource_extended_address64), ACPI_RS_SIZE(struct acpi_resource_gpio), ACPI_RS_SIZE(struct acpi_resource_common_serialbus) }; const u8 acpi_gbl_aml_resource_serial_bus_sizes[] = { 0, sizeof(struct aml_resource_i2c_serialbus), sizeof(struct aml_resource_spi_serialbus), sizeof(struct aml_resource_uart_serialbus), }; const u8 acpi_gbl_resource_struct_serial_bus_sizes[] = { 0, ACPI_RS_SIZE(struct acpi_resource_i2c_serialbus), ACPI_RS_SIZE(struct acpi_resource_spi_serialbus), ACPI_RS_SIZE(struct acpi_resource_uart_serialbus), };
gpl-2.0
brieuwers/N8000Kernel
drivers/net/stmmac/dwmac1000_dma.c
2623
4773
/******************************************************************************* This is the driver for the GMAC on-chip Ethernet controller for ST SoCs. DWC Ether MAC 10/100/1000 Universal version 3.41a has been used for developing this code. This contains the functions to handle the dma. Copyright (C) 2007-2009 STMicroelectronics Ltd This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, version 2, as published by the Free Software Foundation. This program is distributed in the hope it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. The full GNU General Public License is included in this distribution in the file called "COPYING". Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> *******************************************************************************/ #include "dwmac1000.h" #include "dwmac_dma.h" static int dwmac1000_dma_init(void __iomem *ioaddr, int pbl, u32 dma_tx, u32 dma_rx) { u32 value = readl(ioaddr + DMA_BUS_MODE); int limit; /* DMA SW reset */ value |= DMA_BUS_MODE_SFT_RESET; writel(value, ioaddr + DMA_BUS_MODE); limit = 15000; while (limit--) { if (!(readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET)) break; } if (limit < 0) return -EBUSY; value = /* DMA_BUS_MODE_FB | */ DMA_BUS_MODE_4PBL | ((pbl << DMA_BUS_MODE_PBL_SHIFT) | (pbl << DMA_BUS_MODE_RPBL_SHIFT)); #ifdef CONFIG_STMMAC_DA value |= DMA_BUS_MODE_DA; /* Rx has priority over tx */ #endif writel(value, ioaddr + DMA_BUS_MODE); /* Mask interrupts by writing to CSR7 */ writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA); /* The base address of the RX/TX descriptor lists must be written into * DMA CSR3 and CSR4, respectively. */ writel(dma_tx, ioaddr + DMA_TX_BASE_ADDR); writel(dma_rx, ioaddr + DMA_RCV_BASE_ADDR); return 0; } static void dwmac1000_dma_operation_mode(void __iomem *ioaddr, int txmode, int rxmode) { u32 csr6 = readl(ioaddr + DMA_CONTROL); if (txmode == SF_DMA_MODE) { CHIP_DBG(KERN_DEBUG "GMAC: enable TX store and forward mode\n"); /* Transmit COE type 2 cannot be done in cut-through mode. */ csr6 |= DMA_CONTROL_TSF; /* Operating on second frame increase the performance * especially when transmit store-and-forward is used.*/ csr6 |= DMA_CONTROL_OSF; } else { CHIP_DBG(KERN_DEBUG "GMAC: disabling TX store and forward mode" " (threshold = %d)\n", txmode); csr6 &= ~DMA_CONTROL_TSF; csr6 &= DMA_CONTROL_TC_TX_MASK; /* Set the transmit threshold */ if (txmode <= 32) csr6 |= DMA_CONTROL_TTC_32; else if (txmode <= 64) csr6 |= DMA_CONTROL_TTC_64; else if (txmode <= 128) csr6 |= DMA_CONTROL_TTC_128; else if (txmode <= 192) csr6 |= DMA_CONTROL_TTC_192; else csr6 |= DMA_CONTROL_TTC_256; } if (rxmode == SF_DMA_MODE) { CHIP_DBG(KERN_DEBUG "GMAC: enable RX store and forward mode\n"); csr6 |= DMA_CONTROL_RSF; } else { CHIP_DBG(KERN_DEBUG "GMAC: disabling RX store and forward mode" " (threshold = %d)\n", rxmode); csr6 &= ~DMA_CONTROL_RSF; csr6 &= DMA_CONTROL_TC_RX_MASK; if (rxmode <= 32) csr6 |= DMA_CONTROL_RTC_32; else if (rxmode <= 64) csr6 |= DMA_CONTROL_RTC_64; else if (rxmode <= 96) csr6 |= DMA_CONTROL_RTC_96; else csr6 |= DMA_CONTROL_RTC_128; } writel(csr6, ioaddr + DMA_CONTROL); } /* Not yet implemented --- no RMON module */ static void dwmac1000_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x, void __iomem *ioaddr) { return; } static void dwmac1000_dump_dma_regs(void __iomem *ioaddr) { int i; pr_info(" DMA registers\n"); for (i = 0; i < 22; i++) { if ((i < 9) || (i > 17)) { int offset = i * 4; pr_err("\t Reg No. %d (offset 0x%x): 0x%08x\n", i, (DMA_BUS_MODE + offset), readl(ioaddr + DMA_BUS_MODE + offset)); } } } const struct stmmac_dma_ops dwmac1000_dma_ops = { .init = dwmac1000_dma_init, .dump_regs = dwmac1000_dump_dma_regs, .dma_mode = dwmac1000_dma_operation_mode, .dma_diagnostic_fr = dwmac1000_dma_diagnostic_fr, .enable_dma_transmission = dwmac_enable_dma_transmission, .enable_dma_irq = dwmac_enable_dma_irq, .disable_dma_irq = dwmac_disable_dma_irq, .start_tx = dwmac_dma_start_tx, .stop_tx = dwmac_dma_stop_tx, .start_rx = dwmac_dma_start_rx, .stop_rx = dwmac_dma_stop_rx, .dma_interrupt = dwmac_dma_interrupt, };
gpl-2.0
longman88/kernel-qspinlock-v10
arch/metag/kernel/process.c
3135
10642
/* * Copyright (C) 2005,2006,2007,2008,2009,2010,2011 Imagination Technologies * * This file contains the architecture-dependent parts of process handling. * */ #include <linux/errno.h> #include <linux/export.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/unistd.h> #include <linux/ptrace.h> #include <linux/user.h> #include <linux/reboot.h> #include <linux/elfcore.h> #include <linux/fs.h> #include <linux/tick.h> #include <linux/slab.h> #include <linux/mman.h> #include <linux/pm.h> #include <linux/syscalls.h> #include <linux/uaccess.h> #include <linux/smp.h> #include <asm/core_reg.h> #include <asm/user_gateway.h> #include <asm/tcm.h> #include <asm/traps.h> #include <asm/switch_to.h> /* * Wait for the next interrupt and enable local interrupts */ void arch_cpu_idle(void) { int tmp; /* * Quickly jump straight into the interrupt entry point without actually * triggering an interrupt. When TXSTATI gets read the processor will * block until an interrupt is triggered. */ asm volatile (/* Switch into ISTAT mode */ "RTH\n\t" /* Enable local interrupts */ "MOV TXMASKI, %1\n\t" /* * We can't directly "SWAP PC, PCX", so we swap via a * temporary. Essentially we do: * PCX_new = 1f (the place to continue execution) * PC = PCX_old */ "ADD %0, CPC0, #(1f-.)\n\t" "SWAP PCX, %0\n\t" "MOV PC, %0\n" /* Continue execution here with interrupts enabled */ "1:" : "=a" (tmp) : "r" (get_trigger_mask())); } #ifdef CONFIG_HOTPLUG_CPU void arch_cpu_idle_dead(void) { cpu_die(); } #endif void (*pm_power_off)(void); EXPORT_SYMBOL(pm_power_off); void (*soc_restart)(char *cmd); void (*soc_halt)(void); void machine_restart(char *cmd) { if (soc_restart) soc_restart(cmd); hard_processor_halt(HALT_OK); } void machine_halt(void) { if (soc_halt) soc_halt(); smp_send_stop(); hard_processor_halt(HALT_OK); } void machine_power_off(void) { if (pm_power_off) pm_power_off(); smp_send_stop(); hard_processor_halt(HALT_OK); } #define FLAG_Z 0x8 #define FLAG_N 0x4 #define FLAG_O 0x2 #define FLAG_C 0x1 void show_regs(struct pt_regs *regs) { int i; const char *AX0_names[] = {"A0StP", "A0FrP"}; const char *AX1_names[] = {"A1GbP", "A1LbP"}; const char *DX0_names[] = { "D0Re0", "D0Ar6", "D0Ar4", "D0Ar2", "D0FrT", "D0.5 ", "D0.6 ", "D0.7 " }; const char *DX1_names[] = { "D1Re0", "D1Ar5", "D1Ar3", "D1Ar1", "D1RtP", "D1.5 ", "D1.6 ", "D1.7 " }; show_regs_print_info(KERN_INFO); pr_info(" pt_regs @ %p\n", regs); pr_info(" SaveMask = 0x%04hx\n", regs->ctx.SaveMask); pr_info(" Flags = 0x%04hx (%c%c%c%c)\n", regs->ctx.Flags, regs->ctx.Flags & FLAG_Z ? 'Z' : 'z', regs->ctx.Flags & FLAG_N ? 'N' : 'n', regs->ctx.Flags & FLAG_O ? 'O' : 'o', regs->ctx.Flags & FLAG_C ? 'C' : 'c'); pr_info(" TXRPT = 0x%08x\n", regs->ctx.CurrRPT); pr_info(" PC = 0x%08x\n", regs->ctx.CurrPC); /* AX regs */ for (i = 0; i < 2; i++) { pr_info(" %s = 0x%08x ", AX0_names[i], regs->ctx.AX[i].U0); printk(" %s = 0x%08x\n", AX1_names[i], regs->ctx.AX[i].U1); } if (regs->ctx.SaveMask & TBICTX_XEXT_BIT) pr_warn(" Extended state present - AX2.[01] will be WRONG\n"); /* Special place with AXx.2 */ pr_info(" A0.2 = 0x%08x ", regs->ctx.Ext.AX2.U0); printk(" A1.2 = 0x%08x\n", regs->ctx.Ext.AX2.U1); /* 'extended' AX regs (nominally, just AXx.3) */ for (i = 0; i < (TBICTX_AX_REGS - 3); i++) { pr_info(" A0.%d = 0x%08x ", i + 3, regs->ctx.AX3[i].U0); printk(" A1.%d = 0x%08x\n", i + 3, regs->ctx.AX3[i].U1); } for (i = 0; i < 8; i++) { pr_info(" %s = 0x%08x ", DX0_names[i], regs->ctx.DX[i].U0); printk(" %s = 0x%08x\n", DX1_names[i], regs->ctx.DX[i].U1); } show_trace(NULL, (unsigned long *)regs->ctx.AX[0].U0, regs); } int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg, struct task_struct *tsk) { struct pt_regs *childregs = task_pt_regs(tsk); void *kernel_context = ((void *) childregs + sizeof(struct pt_regs)); unsigned long global_base; BUG_ON(((unsigned long)childregs) & 0x7); BUG_ON(((unsigned long)kernel_context) & 0x7); memset(&tsk->thread.kernel_context, 0, sizeof(tsk->thread.kernel_context)); tsk->thread.kernel_context = __TBISwitchInit(kernel_context, ret_from_fork, 0, 0); if (unlikely(tsk->flags & PF_KTHREAD)) { /* * Make sure we don't leak any kernel data to child's regs * if kernel thread becomes a userspace thread in the future */ memset(childregs, 0 , sizeof(struct pt_regs)); global_base = __core_reg_get(A1GbP); childregs->ctx.AX[0].U1 = (unsigned long) global_base; childregs->ctx.AX[0].U0 = (unsigned long) kernel_context; /* Set D1Ar1=arg and D1RtP=usp (fn) */ childregs->ctx.DX[4].U1 = usp; childregs->ctx.DX[3].U1 = arg; tsk->thread.int_depth = 2; return 0; } /* * Get a pointer to where the new child's register block should have * been pushed. * The Meta's stack grows upwards, and the context is the the first * thing to be pushed by TBX (phew) */ *childregs = *current_pt_regs(); /* Set the correct stack for the clone mode */ if (usp) childregs->ctx.AX[0].U0 = ALIGN(usp, 8); tsk->thread.int_depth = 1; /* set return value for child process */ childregs->ctx.DX[0].U0 = 0; /* The TLS pointer is passed as an argument to sys_clone. */ if (clone_flags & CLONE_SETTLS) tsk->thread.tls_ptr = (__force void __user *)childregs->ctx.DX[1].U1; #ifdef CONFIG_METAG_FPU if (tsk->thread.fpu_context) { struct meta_fpu_context *ctx; ctx = kmemdup(tsk->thread.fpu_context, sizeof(struct meta_fpu_context), GFP_ATOMIC); tsk->thread.fpu_context = ctx; } #endif #ifdef CONFIG_METAG_DSP if (tsk->thread.dsp_context) { struct meta_ext_context *ctx; int i; ctx = kmemdup(tsk->thread.dsp_context, sizeof(struct meta_ext_context), GFP_ATOMIC); for (i = 0; i < 2; i++) ctx->ram[i] = kmemdup(ctx->ram[i], ctx->ram_sz[i], GFP_ATOMIC); tsk->thread.dsp_context = ctx; } #endif return 0; } #ifdef CONFIG_METAG_FPU static void alloc_fpu_context(struct thread_struct *thread) { thread->fpu_context = kzalloc(sizeof(struct meta_fpu_context), GFP_ATOMIC); } static void clear_fpu(struct thread_struct *thread) { thread->user_flags &= ~TBICTX_FPAC_BIT; kfree(thread->fpu_context); thread->fpu_context = NULL; } #else static void clear_fpu(struct thread_struct *thread) { } #endif #ifdef CONFIG_METAG_DSP static void clear_dsp(struct thread_struct *thread) { if (thread->dsp_context) { kfree(thread->dsp_context->ram[0]); kfree(thread->dsp_context->ram[1]); kfree(thread->dsp_context); thread->dsp_context = NULL; } __core_reg_set(D0.8, 0); } #else static void clear_dsp(struct thread_struct *thread) { } #endif struct task_struct *__sched __switch_to(struct task_struct *prev, struct task_struct *next) { TBIRES to, from; to.Switch.pCtx = next->thread.kernel_context; to.Switch.pPara = prev; #ifdef CONFIG_METAG_FPU if (prev->thread.user_flags & TBICTX_FPAC_BIT) { struct pt_regs *regs = task_pt_regs(prev); TBIRES state; state.Sig.SaveMask = prev->thread.user_flags; state.Sig.pCtx = &regs->ctx; if (!prev->thread.fpu_context) alloc_fpu_context(&prev->thread); if (prev->thread.fpu_context) __TBICtxFPUSave(state, prev->thread.fpu_context); } /* * Force a restore of the FPU context next time this process is * scheduled. */ if (prev->thread.fpu_context) prev->thread.fpu_context->needs_restore = true; #endif from = __TBISwitch(to, &prev->thread.kernel_context); /* Restore TLS pointer for this process. */ set_gateway_tls(current->thread.tls_ptr); return (struct task_struct *) from.Switch.pPara; } void flush_thread(void) { clear_fpu(&current->thread); clear_dsp(&current->thread); } /* * Free current thread data structures etc. */ void exit_thread(void) { clear_fpu(&current->thread); clear_dsp(&current->thread); } /* TODO: figure out how to unwind the kernel stack here to figure out * where we went to sleep. */ unsigned long get_wchan(struct task_struct *p) { return 0; } int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu) { /* Returning 0 indicates that the FPU state was not stored (as it was * not in use) */ return 0; } #ifdef CONFIG_METAG_USER_TCM #define ELF_MIN_ALIGN PAGE_SIZE #define ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_MIN_ALIGN-1)) #define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1)) #define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1)) #define BAD_ADDR(x) ((unsigned long)(x) >= TASK_SIZE) unsigned long __metag_elf_map(struct file *filep, unsigned long addr, struct elf_phdr *eppnt, int prot, int type, unsigned long total_size) { unsigned long map_addr, size; unsigned long page_off = ELF_PAGEOFFSET(eppnt->p_vaddr); unsigned long raw_size = eppnt->p_filesz + page_off; unsigned long off = eppnt->p_offset - page_off; unsigned int tcm_tag; addr = ELF_PAGESTART(addr); size = ELF_PAGEALIGN(raw_size); /* mmap() will return -EINVAL if given a zero size, but a * segment with zero filesize is perfectly valid */ if (!size) return addr; tcm_tag = tcm_lookup_tag(addr); if (tcm_tag != TCM_INVALID_TAG) type &= ~MAP_FIXED; /* * total_size is the size of the ELF (interpreter) image. * The _first_ mmap needs to know the full size, otherwise * randomization might put this image into an overlapping * position with the ELF binary image. (since size < total_size) * So we first map the 'big' image - and unmap the remainder at * the end. (which unmap is needed for ELF images with holes.) */ if (total_size) { total_size = ELF_PAGEALIGN(total_size); map_addr = vm_mmap(filep, addr, total_size, prot, type, off); if (!BAD_ADDR(map_addr)) vm_munmap(map_addr+size, total_size-size); } else map_addr = vm_mmap(filep, addr, size, prot, type, off); if (!BAD_ADDR(map_addr) && tcm_tag != TCM_INVALID_TAG) { struct tcm_allocation *tcm; unsigned long tcm_addr; tcm = kmalloc(sizeof(*tcm), GFP_KERNEL); if (!tcm) return -ENOMEM; tcm_addr = tcm_alloc(tcm_tag, raw_size); if (tcm_addr != addr) { kfree(tcm); return -ENOMEM; } tcm->tag = tcm_tag; tcm->addr = tcm_addr; tcm->size = raw_size; list_add(&tcm->list, &current->mm->context.tcm); eppnt->p_vaddr = map_addr; if (copy_from_user((void *) addr, (void __user *) map_addr, raw_size)) return -EFAULT; } return map_addr; } #endif
gpl-2.0
ryukiri/DracoKernel
drivers/i2c/busses/i2c-via.c
4159
4497
/* i2c Support for Via Technologies 82C586B South Bridge Copyright (c) 1998, 1999 Kyösti Mälkki <kmalkki@cc.hut.fi> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/ioport.h> #include <linux/init.h> #include <linux/i2c.h> #include <linux/i2c-algo-bit.h> #include <linux/io.h> /* Power management registers */ #define PM_CFG_REVID 0x08 /* silicon revision code */ #define PM_CFG_IOBASE0 0x20 #define PM_CFG_IOBASE1 0x48 #define I2C_DIR (pm_io_base+0x40) #define I2C_OUT (pm_io_base+0x42) #define I2C_IN (pm_io_base+0x44) #define I2C_SCL 0x02 /* clock bit in DIR/OUT/IN register */ #define I2C_SDA 0x04 /* io-region reservation */ #define IOSPACE 0x06 static struct pci_driver vt586b_driver; static u16 pm_io_base; /* It does not appear from the datasheet that the GPIO pins are open drain. So a we set a low value by setting the direction to output and a high value by setting the direction to input and relying on the required I2C pullup. The data value is initialized to 0 in via_init() and never changed. */ static void bit_via_setscl(void *data, int state) { outb(state ? inb(I2C_DIR) & ~I2C_SCL : inb(I2C_DIR) | I2C_SCL, I2C_DIR); } static void bit_via_setsda(void *data, int state) { outb(state ? inb(I2C_DIR) & ~I2C_SDA : inb(I2C_DIR) | I2C_SDA, I2C_DIR); } static int bit_via_getscl(void *data) { return (0 != (inb(I2C_IN) & I2C_SCL)); } static int bit_via_getsda(void *data) { return (0 != (inb(I2C_IN) & I2C_SDA)); } static struct i2c_algo_bit_data bit_data = { .setsda = bit_via_setsda, .setscl = bit_via_setscl, .getsda = bit_via_getsda, .getscl = bit_via_getscl, .udelay = 5, .timeout = HZ }; static struct i2c_adapter vt586b_adapter = { .owner = THIS_MODULE, .class = I2C_CLASS_HWMON | I2C_CLASS_SPD, .name = "VIA i2c", .algo_data = &bit_data, }; static const struct pci_device_id vt586b_ids[] __devinitconst = { { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_3) }, { 0, } }; MODULE_DEVICE_TABLE (pci, vt586b_ids); static int __devinit vt586b_probe(struct pci_dev *dev, const struct pci_device_id *id) { u16 base; u8 rev; int res; if (pm_io_base) { dev_err(&dev->dev, "i2c-via: Will only support one host\n"); return -ENODEV; } pci_read_config_byte(dev, PM_CFG_REVID, &rev); switch (rev) { case 0x00: base = PM_CFG_IOBASE0; break; case 0x01: case 0x10: base = PM_CFG_IOBASE1; break; default: base = PM_CFG_IOBASE1; /* later revision */ } pci_read_config_word(dev, base, &pm_io_base); pm_io_base &= (0xff << 8); if (!request_region(I2C_DIR, IOSPACE, vt586b_driver.name)) { dev_err(&dev->dev, "IO 0x%x-0x%x already in use\n", I2C_DIR, I2C_DIR + IOSPACE); return -ENODEV; } outb(inb(I2C_DIR) & ~(I2C_SDA | I2C_SCL), I2C_DIR); outb(inb(I2C_OUT) & ~(I2C_SDA | I2C_SCL), I2C_OUT); /* set up the sysfs linkage to our parent device */ vt586b_adapter.dev.parent = &dev->dev; res = i2c_bit_add_bus(&vt586b_adapter); if ( res < 0 ) { release_region(I2C_DIR, IOSPACE); pm_io_base = 0; return res; } return 0; } static void __devexit vt586b_remove(struct pci_dev *dev) { i2c_del_adapter(&vt586b_adapter); release_region(I2C_DIR, IOSPACE); pm_io_base = 0; } static struct pci_driver vt586b_driver = { .name = "vt586b_smbus", .id_table = vt586b_ids, .probe = vt586b_probe, .remove = __devexit_p(vt586b_remove), }; static int __init i2c_vt586b_init(void) { return pci_register_driver(&vt586b_driver); } static void __exit i2c_vt586b_exit(void) { pci_unregister_driver(&vt586b_driver); } MODULE_AUTHOR("Kyösti Mälkki <kmalkki@cc.hut.fi>"); MODULE_DESCRIPTION("i2c for Via vt82c586b southbridge"); MODULE_LICENSE("GPL"); module_init(i2c_vt586b_init); module_exit(i2c_vt586b_exit);
gpl-2.0
trevd/android_kernel_ti_archos
drivers/i2c/busses/i2c-via.c
4159
4497
/* i2c Support for Via Technologies 82C586B South Bridge Copyright (c) 1998, 1999 Kyösti Mälkki <kmalkki@cc.hut.fi> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/ioport.h> #include <linux/init.h> #include <linux/i2c.h> #include <linux/i2c-algo-bit.h> #include <linux/io.h> /* Power management registers */ #define PM_CFG_REVID 0x08 /* silicon revision code */ #define PM_CFG_IOBASE0 0x20 #define PM_CFG_IOBASE1 0x48 #define I2C_DIR (pm_io_base+0x40) #define I2C_OUT (pm_io_base+0x42) #define I2C_IN (pm_io_base+0x44) #define I2C_SCL 0x02 /* clock bit in DIR/OUT/IN register */ #define I2C_SDA 0x04 /* io-region reservation */ #define IOSPACE 0x06 static struct pci_driver vt586b_driver; static u16 pm_io_base; /* It does not appear from the datasheet that the GPIO pins are open drain. So a we set a low value by setting the direction to output and a high value by setting the direction to input and relying on the required I2C pullup. The data value is initialized to 0 in via_init() and never changed. */ static void bit_via_setscl(void *data, int state) { outb(state ? inb(I2C_DIR) & ~I2C_SCL : inb(I2C_DIR) | I2C_SCL, I2C_DIR); } static void bit_via_setsda(void *data, int state) { outb(state ? inb(I2C_DIR) & ~I2C_SDA : inb(I2C_DIR) | I2C_SDA, I2C_DIR); } static int bit_via_getscl(void *data) { return (0 != (inb(I2C_IN) & I2C_SCL)); } static int bit_via_getsda(void *data) { return (0 != (inb(I2C_IN) & I2C_SDA)); } static struct i2c_algo_bit_data bit_data = { .setsda = bit_via_setsda, .setscl = bit_via_setscl, .getsda = bit_via_getsda, .getscl = bit_via_getscl, .udelay = 5, .timeout = HZ }; static struct i2c_adapter vt586b_adapter = { .owner = THIS_MODULE, .class = I2C_CLASS_HWMON | I2C_CLASS_SPD, .name = "VIA i2c", .algo_data = &bit_data, }; static const struct pci_device_id vt586b_ids[] __devinitconst = { { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_3) }, { 0, } }; MODULE_DEVICE_TABLE (pci, vt586b_ids); static int __devinit vt586b_probe(struct pci_dev *dev, const struct pci_device_id *id) { u16 base; u8 rev; int res; if (pm_io_base) { dev_err(&dev->dev, "i2c-via: Will only support one host\n"); return -ENODEV; } pci_read_config_byte(dev, PM_CFG_REVID, &rev); switch (rev) { case 0x00: base = PM_CFG_IOBASE0; break; case 0x01: case 0x10: base = PM_CFG_IOBASE1; break; default: base = PM_CFG_IOBASE1; /* later revision */ } pci_read_config_word(dev, base, &pm_io_base); pm_io_base &= (0xff << 8); if (!request_region(I2C_DIR, IOSPACE, vt586b_driver.name)) { dev_err(&dev->dev, "IO 0x%x-0x%x already in use\n", I2C_DIR, I2C_DIR + IOSPACE); return -ENODEV; } outb(inb(I2C_DIR) & ~(I2C_SDA | I2C_SCL), I2C_DIR); outb(inb(I2C_OUT) & ~(I2C_SDA | I2C_SCL), I2C_OUT); /* set up the sysfs linkage to our parent device */ vt586b_adapter.dev.parent = &dev->dev; res = i2c_bit_add_bus(&vt586b_adapter); if ( res < 0 ) { release_region(I2C_DIR, IOSPACE); pm_io_base = 0; return res; } return 0; } static void __devexit vt586b_remove(struct pci_dev *dev) { i2c_del_adapter(&vt586b_adapter); release_region(I2C_DIR, IOSPACE); pm_io_base = 0; } static struct pci_driver vt586b_driver = { .name = "vt586b_smbus", .id_table = vt586b_ids, .probe = vt586b_probe, .remove = __devexit_p(vt586b_remove), }; static int __init i2c_vt586b_init(void) { return pci_register_driver(&vt586b_driver); } static void __exit i2c_vt586b_exit(void) { pci_unregister_driver(&vt586b_driver); } MODULE_AUTHOR("Kyösti Mälkki <kmalkki@cc.hut.fi>"); MODULE_DESCRIPTION("i2c for Via vt82c586b southbridge"); MODULE_LICENSE("GPL"); module_init(i2c_vt586b_init); module_exit(i2c_vt586b_exit);
gpl-2.0
tadeas482/kernel-old
net/ipv4/netfilter/ipt_NETMAP.c
4671
2855
/* NETMAP - static NAT mapping of IP network addresses (1:1). * The mapping can be applied to source (POSTROUTING), * destination (PREROUTING), or both (with separate rules). */ /* (C) 2000-2001 Svenning Soerensen <svenning@post5.tele.dk> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/ip.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/netfilter.h> #include <linux/netfilter_ipv4.h> #include <linux/netfilter/x_tables.h> #include <net/netfilter/nf_nat_rule.h> MODULE_LICENSE("GPL"); MODULE_AUTHOR("Svenning Soerensen <svenning@post5.tele.dk>"); MODULE_DESCRIPTION("Xtables: 1:1 NAT mapping of IPv4 subnets"); static int netmap_tg_check(const struct xt_tgchk_param *par) { const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo; if (!(mr->range[0].flags & NF_NAT_RANGE_MAP_IPS)) { pr_debug("bad MAP_IPS.\n"); return -EINVAL; } if (mr->rangesize != 1) { pr_debug("bad rangesize %u.\n", mr->rangesize); return -EINVAL; } return 0; } static unsigned int netmap_tg(struct sk_buff *skb, const struct xt_action_param *par) { struct nf_conn *ct; enum ip_conntrack_info ctinfo; __be32 new_ip, netmask; const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo; struct nf_nat_ipv4_range newrange; NF_CT_ASSERT(par->hooknum == NF_INET_PRE_ROUTING || par->hooknum == NF_INET_POST_ROUTING || par->hooknum == NF_INET_LOCAL_OUT || par->hooknum == NF_INET_LOCAL_IN); ct = nf_ct_get(skb, &ctinfo); netmask = ~(mr->range[0].min_ip ^ mr->range[0].max_ip); if (par->hooknum == NF_INET_PRE_ROUTING || par->hooknum == NF_INET_LOCAL_OUT) new_ip = ip_hdr(skb)->daddr & ~netmask; else new_ip = ip_hdr(skb)->saddr & ~netmask; new_ip |= mr->range[0].min_ip & netmask; newrange = ((struct nf_nat_ipv4_range) { mr->range[0].flags | NF_NAT_RANGE_MAP_IPS, new_ip, new_ip, mr->range[0].min, mr->range[0].max }); /* Hand modified range to generic setup. */ return nf_nat_setup_info(ct, &newrange, HOOK2MANIP(par->hooknum)); } static struct xt_target netmap_tg_reg __read_mostly = { .name = "NETMAP", .family = NFPROTO_IPV4, .target = netmap_tg, .targetsize = sizeof(struct nf_nat_ipv4_multi_range_compat), .table = "nat", .hooks = (1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_POST_ROUTING) | (1 << NF_INET_LOCAL_OUT) | (1 << NF_INET_LOCAL_IN), .checkentry = netmap_tg_check, .me = THIS_MODULE }; static int __init netmap_tg_init(void) { return xt_register_target(&netmap_tg_reg); } static void __exit netmap_tg_exit(void) { xt_unregister_target(&netmap_tg_reg); } module_init(netmap_tg_init); module_exit(netmap_tg_exit);
gpl-2.0
upndwn4par/kernel_hammerhead_lollipop
drivers/input/sparse-keymap.c
5183
8782
/* * Generic support for sparse keymaps * * Copyright (c) 2009 Dmitry Torokhov * * Derived from wistron button driver: * Copyright (C) 2005 Miloslav Trmac <mitr@volny.cz> * Copyright (C) 2005 Bernhard Rosenkraenzer <bero@arklinux.org> * Copyright (C) 2005 Dmitry Torokhov <dtor@mail.ru> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. */ #include <linux/input.h> #include <linux/input/sparse-keymap.h> #include <linux/module.h> #include <linux/slab.h> MODULE_AUTHOR("Dmitry Torokhov <dtor@mail.ru>"); MODULE_DESCRIPTION("Generic support for sparse keymaps"); MODULE_LICENSE("GPL v2"); MODULE_VERSION("0.1"); static unsigned int sparse_keymap_get_key_index(struct input_dev *dev, const struct key_entry *k) { struct key_entry *key; unsigned int idx = 0; for (key = dev->keycode; key->type != KE_END; key++) { if (key->type == KE_KEY) { if (key == k) break; idx++; } } return idx; } static struct key_entry *sparse_keymap_entry_by_index(struct input_dev *dev, unsigned int index) { struct key_entry *key; unsigned int key_cnt = 0; for (key = dev->keycode; key->type != KE_END; key++) if (key->type == KE_KEY) if (key_cnt++ == index) return key; return NULL; } /** * sparse_keymap_entry_from_scancode - perform sparse keymap lookup * @dev: Input device using sparse keymap * @code: Scan code * * This function is used to perform &struct key_entry lookup in an * input device using sparse keymap. */ struct key_entry *sparse_keymap_entry_from_scancode(struct input_dev *dev, unsigned int code) { struct key_entry *key; for (key = dev->keycode; key->type != KE_END; key++) if (code == key->code) return key; return NULL; } EXPORT_SYMBOL(sparse_keymap_entry_from_scancode); /** * sparse_keymap_entry_from_keycode - perform sparse keymap lookup * @dev: Input device using sparse keymap * @keycode: Key code * * This function is used to perform &struct key_entry lookup in an * input device using sparse keymap. */ struct key_entry *sparse_keymap_entry_from_keycode(struct input_dev *dev, unsigned int keycode) { struct key_entry *key; for (key = dev->keycode; key->type != KE_END; key++) if (key->type == KE_KEY && keycode == key->keycode) return key; return NULL; } EXPORT_SYMBOL(sparse_keymap_entry_from_keycode); static struct key_entry *sparse_keymap_locate(struct input_dev *dev, const struct input_keymap_entry *ke) { struct key_entry *key; unsigned int scancode; if (ke->flags & INPUT_KEYMAP_BY_INDEX) key = sparse_keymap_entry_by_index(dev, ke->index); else if (input_scancode_to_scalar(ke, &scancode) == 0) key = sparse_keymap_entry_from_scancode(dev, scancode); else key = NULL; return key; } static int sparse_keymap_getkeycode(struct input_dev *dev, struct input_keymap_entry *ke) { const struct key_entry *key; if (dev->keycode) { key = sparse_keymap_locate(dev, ke); if (key && key->type == KE_KEY) { ke->keycode = key->keycode; if (!(ke->flags & INPUT_KEYMAP_BY_INDEX)) ke->index = sparse_keymap_get_key_index(dev, key); ke->len = sizeof(key->code); memcpy(ke->scancode, &key->code, sizeof(key->code)); return 0; } } return -EINVAL; } static int sparse_keymap_setkeycode(struct input_dev *dev, const struct input_keymap_entry *ke, unsigned int *old_keycode) { struct key_entry *key; if (dev->keycode) { key = sparse_keymap_locate(dev, ke); if (key && key->type == KE_KEY) { *old_keycode = key->keycode; key->keycode = ke->keycode; set_bit(ke->keycode, dev->keybit); if (!sparse_keymap_entry_from_keycode(dev, *old_keycode)) clear_bit(*old_keycode, dev->keybit); return 0; } } return -EINVAL; } /** * sparse_keymap_setup - set up sparse keymap for an input device * @dev: Input device * @keymap: Keymap in form of array of &key_entry structures ending * with %KE_END type entry * @setup: Function that can be used to adjust keymap entries * depending on device's deeds, may be %NULL * * The function calculates size and allocates copy of the original * keymap after which sets up input device event bits appropriately. * Before destroying input device allocated keymap should be freed * with a call to sparse_keymap_free(). */ int sparse_keymap_setup(struct input_dev *dev, const struct key_entry *keymap, int (*setup)(struct input_dev *, struct key_entry *)) { size_t map_size = 1; /* to account for the last KE_END entry */ const struct key_entry *e; struct key_entry *map, *entry; int i; int error; for (e = keymap; e->type != KE_END; e++) map_size++; map = kcalloc(map_size, sizeof (struct key_entry), GFP_KERNEL); if (!map) return -ENOMEM; memcpy(map, keymap, map_size * sizeof (struct key_entry)); for (i = 0; i < map_size; i++) { entry = &map[i]; if (setup) { error = setup(dev, entry); if (error) goto err_out; } switch (entry->type) { case KE_KEY: __set_bit(EV_KEY, dev->evbit); __set_bit(entry->keycode, dev->keybit); break; case KE_SW: case KE_VSW: __set_bit(EV_SW, dev->evbit); __set_bit(entry->sw.code, dev->swbit); break; } } if (test_bit(EV_KEY, dev->evbit)) { __set_bit(KEY_UNKNOWN, dev->keybit); __set_bit(EV_MSC, dev->evbit); __set_bit(MSC_SCAN, dev->mscbit); } dev->keycode = map; dev->keycodemax = map_size; dev->getkeycode = sparse_keymap_getkeycode; dev->setkeycode = sparse_keymap_setkeycode; return 0; err_out: kfree(map); return error; } EXPORT_SYMBOL(sparse_keymap_setup); /** * sparse_keymap_free - free memory allocated for sparse keymap * @dev: Input device using sparse keymap * * This function is used to free memory allocated by sparse keymap * in an input device that was set up by sparse_keymap_setup(). * NOTE: It is safe to cal this function while input device is * still registered (however the drivers should care not to try to * use freed keymap and thus have to shut off interrups/polling * before freeing the keymap). */ void sparse_keymap_free(struct input_dev *dev) { unsigned long flags; /* * Take event lock to prevent racing with input_get_keycode() * and input_set_keycode() if we are called while input device * is still registered. */ spin_lock_irqsave(&dev->event_lock, flags); kfree(dev->keycode); dev->keycode = NULL; dev->keycodemax = 0; spin_unlock_irqrestore(&dev->event_lock, flags); } EXPORT_SYMBOL(sparse_keymap_free); /** * sparse_keymap_report_entry - report event corresponding to given key entry * @dev: Input device for which event should be reported * @ke: key entry describing event * @value: Value that should be reported (ignored by %KE_SW entries) * @autorelease: Signals whether release event should be emitted for %KE_KEY * entries right after reporting press event, ignored by all other * entries * * This function is used to report input event described by given * &struct key_entry. */ void sparse_keymap_report_entry(struct input_dev *dev, const struct key_entry *ke, unsigned int value, bool autorelease) { switch (ke->type) { case KE_KEY: input_event(dev, EV_MSC, MSC_SCAN, ke->code); input_report_key(dev, ke->keycode, value); input_sync(dev); if (value && autorelease) { input_report_key(dev, ke->keycode, 0); input_sync(dev); } break; case KE_SW: value = ke->sw.value; /* fall through */ case KE_VSW: input_report_switch(dev, ke->sw.code, value); break; } } EXPORT_SYMBOL(sparse_keymap_report_entry); /** * sparse_keymap_report_event - report event corresponding to given scancode * @dev: Input device using sparse keymap * @code: Scan code * @value: Value that should be reported (ignored by %KE_SW entries) * @autorelease: Signals whether release event should be emitted for %KE_KEY * entries right after reporting press event, ignored by all other * entries * * This function is used to perform lookup in an input device using sparse * keymap and report corresponding event. Returns %true if lookup was * successful and %false otherwise. */ bool sparse_keymap_report_event(struct input_dev *dev, unsigned int code, unsigned int value, bool autorelease) { const struct key_entry *ke = sparse_keymap_entry_from_scancode(dev, code); struct key_entry unknown_ke; if (ke) { sparse_keymap_report_entry(dev, ke, value, autorelease); return true; } /* Report an unknown key event as a debugging aid */ unknown_ke.type = KE_KEY; unknown_ke.code = code; unknown_ke.keycode = KEY_UNKNOWN; sparse_keymap_report_entry(dev, &unknown_ke, value, true); return false; } EXPORT_SYMBOL(sparse_keymap_report_event);
gpl-2.0
TeamEOS/kernel_samsung_manta
kernel/notifier.c
6975
16426
#include <linux/kdebug.h> #include <linux/kprobes.h> #include <linux/export.h> #include <linux/notifier.h> #include <linux/rcupdate.h> #include <linux/vmalloc.h> #include <linux/reboot.h> /* * Notifier list for kernel code which wants to be called * at shutdown. This is used to stop any idling DMA operations * and the like. */ BLOCKING_NOTIFIER_HEAD(reboot_notifier_list); /* * Notifier chain core routines. The exported routines below * are layered on top of these, with appropriate locking added. */ static int notifier_chain_register(struct notifier_block **nl, struct notifier_block *n) { while ((*nl) != NULL) { if (n->priority > (*nl)->priority) break; nl = &((*nl)->next); } n->next = *nl; rcu_assign_pointer(*nl, n); return 0; } static int notifier_chain_cond_register(struct notifier_block **nl, struct notifier_block *n) { while ((*nl) != NULL) { if ((*nl) == n) return 0; if (n->priority > (*nl)->priority) break; nl = &((*nl)->next); } n->next = *nl; rcu_assign_pointer(*nl, n); return 0; } static int notifier_chain_unregister(struct notifier_block **nl, struct notifier_block *n) { while ((*nl) != NULL) { if ((*nl) == n) { rcu_assign_pointer(*nl, n->next); return 0; } nl = &((*nl)->next); } return -ENOENT; } /** * notifier_call_chain - Informs the registered notifiers about an event. * @nl: Pointer to head of the blocking notifier chain * @val: Value passed unmodified to notifier function * @v: Pointer passed unmodified to notifier function * @nr_to_call: Number of notifier functions to be called. Don't care * value of this parameter is -1. * @nr_calls: Records the number of notifications sent. Don't care * value of this field is NULL. * @returns: notifier_call_chain returns the value returned by the * last notifier function called. */ static int __kprobes notifier_call_chain(struct notifier_block **nl, unsigned long val, void *v, int nr_to_call, int *nr_calls) { int ret = NOTIFY_DONE; struct notifier_block *nb, *next_nb; nb = rcu_dereference_raw(*nl); while (nb && nr_to_call) { next_nb = rcu_dereference_raw(nb->next); #ifdef CONFIG_DEBUG_NOTIFIERS if (unlikely(!func_ptr_is_kernel_text(nb->notifier_call))) { WARN(1, "Invalid notifier called!"); nb = next_nb; continue; } #endif ret = nb->notifier_call(nb, val, v); if (nr_calls) (*nr_calls)++; if ((ret & NOTIFY_STOP_MASK) == NOTIFY_STOP_MASK) break; nb = next_nb; nr_to_call--; } return ret; } /* * Atomic notifier chain routines. Registration and unregistration * use a spinlock, and call_chain is synchronized by RCU (no locks). */ /** * atomic_notifier_chain_register - Add notifier to an atomic notifier chain * @nh: Pointer to head of the atomic notifier chain * @n: New entry in notifier chain * * Adds a notifier to an atomic notifier chain. * * Currently always returns zero. */ int atomic_notifier_chain_register(struct atomic_notifier_head *nh, struct notifier_block *n) { unsigned long flags; int ret; spin_lock_irqsave(&nh->lock, flags); ret = notifier_chain_register(&nh->head, n); spin_unlock_irqrestore(&nh->lock, flags); return ret; } EXPORT_SYMBOL_GPL(atomic_notifier_chain_register); /** * atomic_notifier_chain_unregister - Remove notifier from an atomic notifier chain * @nh: Pointer to head of the atomic notifier chain * @n: Entry to remove from notifier chain * * Removes a notifier from an atomic notifier chain. * * Returns zero on success or %-ENOENT on failure. */ int atomic_notifier_chain_unregister(struct atomic_notifier_head *nh, struct notifier_block *n) { unsigned long flags; int ret; spin_lock_irqsave(&nh->lock, flags); ret = notifier_chain_unregister(&nh->head, n); spin_unlock_irqrestore(&nh->lock, flags); synchronize_rcu(); return ret; } EXPORT_SYMBOL_GPL(atomic_notifier_chain_unregister); /** * __atomic_notifier_call_chain - Call functions in an atomic notifier chain * @nh: Pointer to head of the atomic notifier chain * @val: Value passed unmodified to notifier function * @v: Pointer passed unmodified to notifier function * @nr_to_call: See the comment for notifier_call_chain. * @nr_calls: See the comment for notifier_call_chain. * * Calls each function in a notifier chain in turn. The functions * run in an atomic context, so they must not block. * This routine uses RCU to synchronize with changes to the chain. * * If the return value of the notifier can be and'ed * with %NOTIFY_STOP_MASK then atomic_notifier_call_chain() * will return immediately, with the return value of * the notifier function which halted execution. * Otherwise the return value is the return value * of the last notifier function called. */ int __kprobes __atomic_notifier_call_chain(struct atomic_notifier_head *nh, unsigned long val, void *v, int nr_to_call, int *nr_calls) { int ret; rcu_read_lock(); ret = notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls); rcu_read_unlock(); return ret; } EXPORT_SYMBOL_GPL(__atomic_notifier_call_chain); int __kprobes atomic_notifier_call_chain(struct atomic_notifier_head *nh, unsigned long val, void *v) { return __atomic_notifier_call_chain(nh, val, v, -1, NULL); } EXPORT_SYMBOL_GPL(atomic_notifier_call_chain); /* * Blocking notifier chain routines. All access to the chain is * synchronized by an rwsem. */ /** * blocking_notifier_chain_register - Add notifier to a blocking notifier chain * @nh: Pointer to head of the blocking notifier chain * @n: New entry in notifier chain * * Adds a notifier to a blocking notifier chain. * Must be called in process context. * * Currently always returns zero. */ int blocking_notifier_chain_register(struct blocking_notifier_head *nh, struct notifier_block *n) { int ret; /* * This code gets used during boot-up, when task switching is * not yet working and interrupts must remain disabled. At * such times we must not call down_write(). */ if (unlikely(system_state == SYSTEM_BOOTING)) return notifier_chain_register(&nh->head, n); down_write(&nh->rwsem); ret = notifier_chain_register(&nh->head, n); up_write(&nh->rwsem); return ret; } EXPORT_SYMBOL_GPL(blocking_notifier_chain_register); /** * blocking_notifier_chain_cond_register - Cond add notifier to a blocking notifier chain * @nh: Pointer to head of the blocking notifier chain * @n: New entry in notifier chain * * Adds a notifier to a blocking notifier chain, only if not already * present in the chain. * Must be called in process context. * * Currently always returns zero. */ int blocking_notifier_chain_cond_register(struct blocking_notifier_head *nh, struct notifier_block *n) { int ret; down_write(&nh->rwsem); ret = notifier_chain_cond_register(&nh->head, n); up_write(&nh->rwsem); return ret; } EXPORT_SYMBOL_GPL(blocking_notifier_chain_cond_register); /** * blocking_notifier_chain_unregister - Remove notifier from a blocking notifier chain * @nh: Pointer to head of the blocking notifier chain * @n: Entry to remove from notifier chain * * Removes a notifier from a blocking notifier chain. * Must be called from process context. * * Returns zero on success or %-ENOENT on failure. */ int blocking_notifier_chain_unregister(struct blocking_notifier_head *nh, struct notifier_block *n) { int ret; /* * This code gets used during boot-up, when task switching is * not yet working and interrupts must remain disabled. At * such times we must not call down_write(). */ if (unlikely(system_state == SYSTEM_BOOTING)) return notifier_chain_unregister(&nh->head, n); down_write(&nh->rwsem); ret = notifier_chain_unregister(&nh->head, n); up_write(&nh->rwsem); return ret; } EXPORT_SYMBOL_GPL(blocking_notifier_chain_unregister); /** * __blocking_notifier_call_chain - Call functions in a blocking notifier chain * @nh: Pointer to head of the blocking notifier chain * @val: Value passed unmodified to notifier function * @v: Pointer passed unmodified to notifier function * @nr_to_call: See comment for notifier_call_chain. * @nr_calls: See comment for notifier_call_chain. * * Calls each function in a notifier chain in turn. The functions * run in a process context, so they are allowed to block. * * If the return value of the notifier can be and'ed * with %NOTIFY_STOP_MASK then blocking_notifier_call_chain() * will return immediately, with the return value of * the notifier function which halted execution. * Otherwise the return value is the return value * of the last notifier function called. */ int __blocking_notifier_call_chain(struct blocking_notifier_head *nh, unsigned long val, void *v, int nr_to_call, int *nr_calls) { int ret = NOTIFY_DONE; /* * We check the head outside the lock, but if this access is * racy then it does not matter what the result of the test * is, we re-check the list after having taken the lock anyway: */ if (rcu_dereference_raw(nh->head)) { down_read(&nh->rwsem); ret = notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls); up_read(&nh->rwsem); } return ret; } EXPORT_SYMBOL_GPL(__blocking_notifier_call_chain); int blocking_notifier_call_chain(struct blocking_notifier_head *nh, unsigned long val, void *v) { return __blocking_notifier_call_chain(nh, val, v, -1, NULL); } EXPORT_SYMBOL_GPL(blocking_notifier_call_chain); /* * Raw notifier chain routines. There is no protection; * the caller must provide it. Use at your own risk! */ /** * raw_notifier_chain_register - Add notifier to a raw notifier chain * @nh: Pointer to head of the raw notifier chain * @n: New entry in notifier chain * * Adds a notifier to a raw notifier chain. * All locking must be provided by the caller. * * Currently always returns zero. */ int raw_notifier_chain_register(struct raw_notifier_head *nh, struct notifier_block *n) { return notifier_chain_register(&nh->head, n); } EXPORT_SYMBOL_GPL(raw_notifier_chain_register); /** * raw_notifier_chain_unregister - Remove notifier from a raw notifier chain * @nh: Pointer to head of the raw notifier chain * @n: Entry to remove from notifier chain * * Removes a notifier from a raw notifier chain. * All locking must be provided by the caller. * * Returns zero on success or %-ENOENT on failure. */ int raw_notifier_chain_unregister(struct raw_notifier_head *nh, struct notifier_block *n) { return notifier_chain_unregister(&nh->head, n); } EXPORT_SYMBOL_GPL(raw_notifier_chain_unregister); /** * __raw_notifier_call_chain - Call functions in a raw notifier chain * @nh: Pointer to head of the raw notifier chain * @val: Value passed unmodified to notifier function * @v: Pointer passed unmodified to notifier function * @nr_to_call: See comment for notifier_call_chain. * @nr_calls: See comment for notifier_call_chain * * Calls each function in a notifier chain in turn. The functions * run in an undefined context. * All locking must be provided by the caller. * * If the return value of the notifier can be and'ed * with %NOTIFY_STOP_MASK then raw_notifier_call_chain() * will return immediately, with the return value of * the notifier function which halted execution. * Otherwise the return value is the return value * of the last notifier function called. */ int __raw_notifier_call_chain(struct raw_notifier_head *nh, unsigned long val, void *v, int nr_to_call, int *nr_calls) { return notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls); } EXPORT_SYMBOL_GPL(__raw_notifier_call_chain); int raw_notifier_call_chain(struct raw_notifier_head *nh, unsigned long val, void *v) { return __raw_notifier_call_chain(nh, val, v, -1, NULL); } EXPORT_SYMBOL_GPL(raw_notifier_call_chain); /* * SRCU notifier chain routines. Registration and unregistration * use a mutex, and call_chain is synchronized by SRCU (no locks). */ /** * srcu_notifier_chain_register - Add notifier to an SRCU notifier chain * @nh: Pointer to head of the SRCU notifier chain * @n: New entry in notifier chain * * Adds a notifier to an SRCU notifier chain. * Must be called in process context. * * Currently always returns zero. */ int srcu_notifier_chain_register(struct srcu_notifier_head *nh, struct notifier_block *n) { int ret; /* * This code gets used during boot-up, when task switching is * not yet working and interrupts must remain disabled. At * such times we must not call mutex_lock(). */ if (unlikely(system_state == SYSTEM_BOOTING)) return notifier_chain_register(&nh->head, n); mutex_lock(&nh->mutex); ret = notifier_chain_register(&nh->head, n); mutex_unlock(&nh->mutex); return ret; } EXPORT_SYMBOL_GPL(srcu_notifier_chain_register); /** * srcu_notifier_chain_unregister - Remove notifier from an SRCU notifier chain * @nh: Pointer to head of the SRCU notifier chain * @n: Entry to remove from notifier chain * * Removes a notifier from an SRCU notifier chain. * Must be called from process context. * * Returns zero on success or %-ENOENT on failure. */ int srcu_notifier_chain_unregister(struct srcu_notifier_head *nh, struct notifier_block *n) { int ret; /* * This code gets used during boot-up, when task switching is * not yet working and interrupts must remain disabled. At * such times we must not call mutex_lock(). */ if (unlikely(system_state == SYSTEM_BOOTING)) return notifier_chain_unregister(&nh->head, n); mutex_lock(&nh->mutex); ret = notifier_chain_unregister(&nh->head, n); mutex_unlock(&nh->mutex); synchronize_srcu(&nh->srcu); return ret; } EXPORT_SYMBOL_GPL(srcu_notifier_chain_unregister); /** * __srcu_notifier_call_chain - Call functions in an SRCU notifier chain * @nh: Pointer to head of the SRCU notifier chain * @val: Value passed unmodified to notifier function * @v: Pointer passed unmodified to notifier function * @nr_to_call: See comment for notifier_call_chain. * @nr_calls: See comment for notifier_call_chain * * Calls each function in a notifier chain in turn. The functions * run in a process context, so they are allowed to block. * * If the return value of the notifier can be and'ed * with %NOTIFY_STOP_MASK then srcu_notifier_call_chain() * will return immediately, with the return value of * the notifier function which halted execution. * Otherwise the return value is the return value * of the last notifier function called. */ int __srcu_notifier_call_chain(struct srcu_notifier_head *nh, unsigned long val, void *v, int nr_to_call, int *nr_calls) { int ret; int idx; idx = srcu_read_lock(&nh->srcu); ret = notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls); srcu_read_unlock(&nh->srcu, idx); return ret; } EXPORT_SYMBOL_GPL(__srcu_notifier_call_chain); int srcu_notifier_call_chain(struct srcu_notifier_head *nh, unsigned long val, void *v) { return __srcu_notifier_call_chain(nh, val, v, -1, NULL); } EXPORT_SYMBOL_GPL(srcu_notifier_call_chain); /** * srcu_init_notifier_head - Initialize an SRCU notifier head * @nh: Pointer to head of the srcu notifier chain * * Unlike other sorts of notifier heads, SRCU notifier heads require * dynamic initialization. Be sure to call this routine before * calling any of the other SRCU notifier routines for this head. * * If an SRCU notifier head is deallocated, it must first be cleaned * up by calling srcu_cleanup_notifier_head(). Otherwise the head's * per-cpu data (used by the SRCU mechanism) will leak. */ void srcu_init_notifier_head(struct srcu_notifier_head *nh) { mutex_init(&nh->mutex); if (init_srcu_struct(&nh->srcu) < 0) BUG(); nh->head = NULL; } EXPORT_SYMBOL_GPL(srcu_init_notifier_head); static ATOMIC_NOTIFIER_HEAD(die_chain); int notrace __kprobes notify_die(enum die_val val, const char *str, struct pt_regs *regs, long err, int trap, int sig) { struct die_args args = { .regs = regs, .str = str, .err = err, .trapnr = trap, .signr = sig, }; return atomic_notifier_call_chain(&die_chain, val, &args); } int register_die_notifier(struct notifier_block *nb) { vmalloc_sync_all(); return atomic_notifier_chain_register(&die_chain, nb); } EXPORT_SYMBOL_GPL(register_die_notifier); int unregister_die_notifier(struct notifier_block *nb) { return atomic_notifier_chain_unregister(&die_chain, nb); } EXPORT_SYMBOL_GPL(unregister_die_notifier);
gpl-2.0
SVMP/kernel
kernel/notifier.c
6975
16426
#include <linux/kdebug.h> #include <linux/kprobes.h> #include <linux/export.h> #include <linux/notifier.h> #include <linux/rcupdate.h> #include <linux/vmalloc.h> #include <linux/reboot.h> /* * Notifier list for kernel code which wants to be called * at shutdown. This is used to stop any idling DMA operations * and the like. */ BLOCKING_NOTIFIER_HEAD(reboot_notifier_list); /* * Notifier chain core routines. The exported routines below * are layered on top of these, with appropriate locking added. */ static int notifier_chain_register(struct notifier_block **nl, struct notifier_block *n) { while ((*nl) != NULL) { if (n->priority > (*nl)->priority) break; nl = &((*nl)->next); } n->next = *nl; rcu_assign_pointer(*nl, n); return 0; } static int notifier_chain_cond_register(struct notifier_block **nl, struct notifier_block *n) { while ((*nl) != NULL) { if ((*nl) == n) return 0; if (n->priority > (*nl)->priority) break; nl = &((*nl)->next); } n->next = *nl; rcu_assign_pointer(*nl, n); return 0; } static int notifier_chain_unregister(struct notifier_block **nl, struct notifier_block *n) { while ((*nl) != NULL) { if ((*nl) == n) { rcu_assign_pointer(*nl, n->next); return 0; } nl = &((*nl)->next); } return -ENOENT; } /** * notifier_call_chain - Informs the registered notifiers about an event. * @nl: Pointer to head of the blocking notifier chain * @val: Value passed unmodified to notifier function * @v: Pointer passed unmodified to notifier function * @nr_to_call: Number of notifier functions to be called. Don't care * value of this parameter is -1. * @nr_calls: Records the number of notifications sent. Don't care * value of this field is NULL. * @returns: notifier_call_chain returns the value returned by the * last notifier function called. */ static int __kprobes notifier_call_chain(struct notifier_block **nl, unsigned long val, void *v, int nr_to_call, int *nr_calls) { int ret = NOTIFY_DONE; struct notifier_block *nb, *next_nb; nb = rcu_dereference_raw(*nl); while (nb && nr_to_call) { next_nb = rcu_dereference_raw(nb->next); #ifdef CONFIG_DEBUG_NOTIFIERS if (unlikely(!func_ptr_is_kernel_text(nb->notifier_call))) { WARN(1, "Invalid notifier called!"); nb = next_nb; continue; } #endif ret = nb->notifier_call(nb, val, v); if (nr_calls) (*nr_calls)++; if ((ret & NOTIFY_STOP_MASK) == NOTIFY_STOP_MASK) break; nb = next_nb; nr_to_call--; } return ret; } /* * Atomic notifier chain routines. Registration and unregistration * use a spinlock, and call_chain is synchronized by RCU (no locks). */ /** * atomic_notifier_chain_register - Add notifier to an atomic notifier chain * @nh: Pointer to head of the atomic notifier chain * @n: New entry in notifier chain * * Adds a notifier to an atomic notifier chain. * * Currently always returns zero. */ int atomic_notifier_chain_register(struct atomic_notifier_head *nh, struct notifier_block *n) { unsigned long flags; int ret; spin_lock_irqsave(&nh->lock, flags); ret = notifier_chain_register(&nh->head, n); spin_unlock_irqrestore(&nh->lock, flags); return ret; } EXPORT_SYMBOL_GPL(atomic_notifier_chain_register); /** * atomic_notifier_chain_unregister - Remove notifier from an atomic notifier chain * @nh: Pointer to head of the atomic notifier chain * @n: Entry to remove from notifier chain * * Removes a notifier from an atomic notifier chain. * * Returns zero on success or %-ENOENT on failure. */ int atomic_notifier_chain_unregister(struct atomic_notifier_head *nh, struct notifier_block *n) { unsigned long flags; int ret; spin_lock_irqsave(&nh->lock, flags); ret = notifier_chain_unregister(&nh->head, n); spin_unlock_irqrestore(&nh->lock, flags); synchronize_rcu(); return ret; } EXPORT_SYMBOL_GPL(atomic_notifier_chain_unregister); /** * __atomic_notifier_call_chain - Call functions in an atomic notifier chain * @nh: Pointer to head of the atomic notifier chain * @val: Value passed unmodified to notifier function * @v: Pointer passed unmodified to notifier function * @nr_to_call: See the comment for notifier_call_chain. * @nr_calls: See the comment for notifier_call_chain. * * Calls each function in a notifier chain in turn. The functions * run in an atomic context, so they must not block. * This routine uses RCU to synchronize with changes to the chain. * * If the return value of the notifier can be and'ed * with %NOTIFY_STOP_MASK then atomic_notifier_call_chain() * will return immediately, with the return value of * the notifier function which halted execution. * Otherwise the return value is the return value * of the last notifier function called. */ int __kprobes __atomic_notifier_call_chain(struct atomic_notifier_head *nh, unsigned long val, void *v, int nr_to_call, int *nr_calls) { int ret; rcu_read_lock(); ret = notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls); rcu_read_unlock(); return ret; } EXPORT_SYMBOL_GPL(__atomic_notifier_call_chain); int __kprobes atomic_notifier_call_chain(struct atomic_notifier_head *nh, unsigned long val, void *v) { return __atomic_notifier_call_chain(nh, val, v, -1, NULL); } EXPORT_SYMBOL_GPL(atomic_notifier_call_chain); /* * Blocking notifier chain routines. All access to the chain is * synchronized by an rwsem. */ /** * blocking_notifier_chain_register - Add notifier to a blocking notifier chain * @nh: Pointer to head of the blocking notifier chain * @n: New entry in notifier chain * * Adds a notifier to a blocking notifier chain. * Must be called in process context. * * Currently always returns zero. */ int blocking_notifier_chain_register(struct blocking_notifier_head *nh, struct notifier_block *n) { int ret; /* * This code gets used during boot-up, when task switching is * not yet working and interrupts must remain disabled. At * such times we must not call down_write(). */ if (unlikely(system_state == SYSTEM_BOOTING)) return notifier_chain_register(&nh->head, n); down_write(&nh->rwsem); ret = notifier_chain_register(&nh->head, n); up_write(&nh->rwsem); return ret; } EXPORT_SYMBOL_GPL(blocking_notifier_chain_register); /** * blocking_notifier_chain_cond_register - Cond add notifier to a blocking notifier chain * @nh: Pointer to head of the blocking notifier chain * @n: New entry in notifier chain * * Adds a notifier to a blocking notifier chain, only if not already * present in the chain. * Must be called in process context. * * Currently always returns zero. */ int blocking_notifier_chain_cond_register(struct blocking_notifier_head *nh, struct notifier_block *n) { int ret; down_write(&nh->rwsem); ret = notifier_chain_cond_register(&nh->head, n); up_write(&nh->rwsem); return ret; } EXPORT_SYMBOL_GPL(blocking_notifier_chain_cond_register); /** * blocking_notifier_chain_unregister - Remove notifier from a blocking notifier chain * @nh: Pointer to head of the blocking notifier chain * @n: Entry to remove from notifier chain * * Removes a notifier from a blocking notifier chain. * Must be called from process context. * * Returns zero on success or %-ENOENT on failure. */ int blocking_notifier_chain_unregister(struct blocking_notifier_head *nh, struct notifier_block *n) { int ret; /* * This code gets used during boot-up, when task switching is * not yet working and interrupts must remain disabled. At * such times we must not call down_write(). */ if (unlikely(system_state == SYSTEM_BOOTING)) return notifier_chain_unregister(&nh->head, n); down_write(&nh->rwsem); ret = notifier_chain_unregister(&nh->head, n); up_write(&nh->rwsem); return ret; } EXPORT_SYMBOL_GPL(blocking_notifier_chain_unregister); /** * __blocking_notifier_call_chain - Call functions in a blocking notifier chain * @nh: Pointer to head of the blocking notifier chain * @val: Value passed unmodified to notifier function * @v: Pointer passed unmodified to notifier function * @nr_to_call: See comment for notifier_call_chain. * @nr_calls: See comment for notifier_call_chain. * * Calls each function in a notifier chain in turn. The functions * run in a process context, so they are allowed to block. * * If the return value of the notifier can be and'ed * with %NOTIFY_STOP_MASK then blocking_notifier_call_chain() * will return immediately, with the return value of * the notifier function which halted execution. * Otherwise the return value is the return value * of the last notifier function called. */ int __blocking_notifier_call_chain(struct blocking_notifier_head *nh, unsigned long val, void *v, int nr_to_call, int *nr_calls) { int ret = NOTIFY_DONE; /* * We check the head outside the lock, but if this access is * racy then it does not matter what the result of the test * is, we re-check the list after having taken the lock anyway: */ if (rcu_dereference_raw(nh->head)) { down_read(&nh->rwsem); ret = notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls); up_read(&nh->rwsem); } return ret; } EXPORT_SYMBOL_GPL(__blocking_notifier_call_chain); int blocking_notifier_call_chain(struct blocking_notifier_head *nh, unsigned long val, void *v) { return __blocking_notifier_call_chain(nh, val, v, -1, NULL); } EXPORT_SYMBOL_GPL(blocking_notifier_call_chain); /* * Raw notifier chain routines. There is no protection; * the caller must provide it. Use at your own risk! */ /** * raw_notifier_chain_register - Add notifier to a raw notifier chain * @nh: Pointer to head of the raw notifier chain * @n: New entry in notifier chain * * Adds a notifier to a raw notifier chain. * All locking must be provided by the caller. * * Currently always returns zero. */ int raw_notifier_chain_register(struct raw_notifier_head *nh, struct notifier_block *n) { return notifier_chain_register(&nh->head, n); } EXPORT_SYMBOL_GPL(raw_notifier_chain_register); /** * raw_notifier_chain_unregister - Remove notifier from a raw notifier chain * @nh: Pointer to head of the raw notifier chain * @n: Entry to remove from notifier chain * * Removes a notifier from a raw notifier chain. * All locking must be provided by the caller. * * Returns zero on success or %-ENOENT on failure. */ int raw_notifier_chain_unregister(struct raw_notifier_head *nh, struct notifier_block *n) { return notifier_chain_unregister(&nh->head, n); } EXPORT_SYMBOL_GPL(raw_notifier_chain_unregister); /** * __raw_notifier_call_chain - Call functions in a raw notifier chain * @nh: Pointer to head of the raw notifier chain * @val: Value passed unmodified to notifier function * @v: Pointer passed unmodified to notifier function * @nr_to_call: See comment for notifier_call_chain. * @nr_calls: See comment for notifier_call_chain * * Calls each function in a notifier chain in turn. The functions * run in an undefined context. * All locking must be provided by the caller. * * If the return value of the notifier can be and'ed * with %NOTIFY_STOP_MASK then raw_notifier_call_chain() * will return immediately, with the return value of * the notifier function which halted execution. * Otherwise the return value is the return value * of the last notifier function called. */ int __raw_notifier_call_chain(struct raw_notifier_head *nh, unsigned long val, void *v, int nr_to_call, int *nr_calls) { return notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls); } EXPORT_SYMBOL_GPL(__raw_notifier_call_chain); int raw_notifier_call_chain(struct raw_notifier_head *nh, unsigned long val, void *v) { return __raw_notifier_call_chain(nh, val, v, -1, NULL); } EXPORT_SYMBOL_GPL(raw_notifier_call_chain); /* * SRCU notifier chain routines. Registration and unregistration * use a mutex, and call_chain is synchronized by SRCU (no locks). */ /** * srcu_notifier_chain_register - Add notifier to an SRCU notifier chain * @nh: Pointer to head of the SRCU notifier chain * @n: New entry in notifier chain * * Adds a notifier to an SRCU notifier chain. * Must be called in process context. * * Currently always returns zero. */ int srcu_notifier_chain_register(struct srcu_notifier_head *nh, struct notifier_block *n) { int ret; /* * This code gets used during boot-up, when task switching is * not yet working and interrupts must remain disabled. At * such times we must not call mutex_lock(). */ if (unlikely(system_state == SYSTEM_BOOTING)) return notifier_chain_register(&nh->head, n); mutex_lock(&nh->mutex); ret = notifier_chain_register(&nh->head, n); mutex_unlock(&nh->mutex); return ret; } EXPORT_SYMBOL_GPL(srcu_notifier_chain_register); /** * srcu_notifier_chain_unregister - Remove notifier from an SRCU notifier chain * @nh: Pointer to head of the SRCU notifier chain * @n: Entry to remove from notifier chain * * Removes a notifier from an SRCU notifier chain. * Must be called from process context. * * Returns zero on success or %-ENOENT on failure. */ int srcu_notifier_chain_unregister(struct srcu_notifier_head *nh, struct notifier_block *n) { int ret; /* * This code gets used during boot-up, when task switching is * not yet working and interrupts must remain disabled. At * such times we must not call mutex_lock(). */ if (unlikely(system_state == SYSTEM_BOOTING)) return notifier_chain_unregister(&nh->head, n); mutex_lock(&nh->mutex); ret = notifier_chain_unregister(&nh->head, n); mutex_unlock(&nh->mutex); synchronize_srcu(&nh->srcu); return ret; } EXPORT_SYMBOL_GPL(srcu_notifier_chain_unregister); /** * __srcu_notifier_call_chain - Call functions in an SRCU notifier chain * @nh: Pointer to head of the SRCU notifier chain * @val: Value passed unmodified to notifier function * @v: Pointer passed unmodified to notifier function * @nr_to_call: See comment for notifier_call_chain. * @nr_calls: See comment for notifier_call_chain * * Calls each function in a notifier chain in turn. The functions * run in a process context, so they are allowed to block. * * If the return value of the notifier can be and'ed * with %NOTIFY_STOP_MASK then srcu_notifier_call_chain() * will return immediately, with the return value of * the notifier function which halted execution. * Otherwise the return value is the return value * of the last notifier function called. */ int __srcu_notifier_call_chain(struct srcu_notifier_head *nh, unsigned long val, void *v, int nr_to_call, int *nr_calls) { int ret; int idx; idx = srcu_read_lock(&nh->srcu); ret = notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls); srcu_read_unlock(&nh->srcu, idx); return ret; } EXPORT_SYMBOL_GPL(__srcu_notifier_call_chain); int srcu_notifier_call_chain(struct srcu_notifier_head *nh, unsigned long val, void *v) { return __srcu_notifier_call_chain(nh, val, v, -1, NULL); } EXPORT_SYMBOL_GPL(srcu_notifier_call_chain); /** * srcu_init_notifier_head - Initialize an SRCU notifier head * @nh: Pointer to head of the srcu notifier chain * * Unlike other sorts of notifier heads, SRCU notifier heads require * dynamic initialization. Be sure to call this routine before * calling any of the other SRCU notifier routines for this head. * * If an SRCU notifier head is deallocated, it must first be cleaned * up by calling srcu_cleanup_notifier_head(). Otherwise the head's * per-cpu data (used by the SRCU mechanism) will leak. */ void srcu_init_notifier_head(struct srcu_notifier_head *nh) { mutex_init(&nh->mutex); if (init_srcu_struct(&nh->srcu) < 0) BUG(); nh->head = NULL; } EXPORT_SYMBOL_GPL(srcu_init_notifier_head); static ATOMIC_NOTIFIER_HEAD(die_chain); int notrace __kprobes notify_die(enum die_val val, const char *str, struct pt_regs *regs, long err, int trap, int sig) { struct die_args args = { .regs = regs, .str = str, .err = err, .trapnr = trap, .signr = sig, }; return atomic_notifier_call_chain(&die_chain, val, &args); } int register_die_notifier(struct notifier_block *nb) { vmalloc_sync_all(); return atomic_notifier_chain_register(&die_chain, nb); } EXPORT_SYMBOL_GPL(register_die_notifier); int unregister_die_notifier(struct notifier_block *nb) { return atomic_notifier_chain_unregister(&die_chain, nb); } EXPORT_SYMBOL_GPL(unregister_die_notifier);
gpl-2.0
Loller79/Solid_Kernel-GEEHRC
drivers/net/wireless/hostap/hostap_80211_tx.c
7743
16387
#include <linux/slab.h> #include <linux/export.h> #include "hostap_80211.h" #include "hostap_common.h" #include "hostap_wlan.h" #include "hostap.h" #include "hostap_ap.h" /* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */ /* Ethernet-II snap header (RFC1042 for most EtherTypes) */ static unsigned char rfc1042_header[] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 }; /* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */ static unsigned char bridge_tunnel_header[] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 }; /* No encapsulation header if EtherType < 0x600 (=length) */ void hostap_dump_tx_80211(const char *name, struct sk_buff *skb) { struct ieee80211_hdr *hdr; u16 fc; hdr = (struct ieee80211_hdr *) skb->data; printk(KERN_DEBUG "%s: TX len=%d jiffies=%ld\n", name, skb->len, jiffies); if (skb->len < 2) return; fc = le16_to_cpu(hdr->frame_control); printk(KERN_DEBUG " FC=0x%04x (type=%d:%d)%s%s", fc, (fc & IEEE80211_FCTL_FTYPE) >> 2, (fc & IEEE80211_FCTL_STYPE) >> 4, fc & IEEE80211_FCTL_TODS ? " [ToDS]" : "", fc & IEEE80211_FCTL_FROMDS ? " [FromDS]" : ""); if (skb->len < IEEE80211_DATA_HDR3_LEN) { printk("\n"); return; } printk(" dur=0x%04x seq=0x%04x\n", le16_to_cpu(hdr->duration_id), le16_to_cpu(hdr->seq_ctrl)); printk(KERN_DEBUG " A1=%pM", hdr->addr1); printk(" A2=%pM", hdr->addr2); printk(" A3=%pM", hdr->addr3); if (skb->len >= 30) printk(" A4=%pM", hdr->addr4); printk("\n"); } /* hard_start_xmit function for data interfaces (wlan#, wlan#wds#, wlan#sta) * Convert Ethernet header into a suitable IEEE 802.11 header depending on * device configuration. */ netdev_tx_t hostap_data_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct hostap_interface *iface; local_info_t *local; int need_headroom, need_tailroom = 0; struct ieee80211_hdr hdr; u16 fc, ethertype = 0; enum { WDS_NO = 0, WDS_OWN_FRAME, WDS_COMPLIANT_FRAME } use_wds = WDS_NO; u8 *encaps_data; int hdr_len, encaps_len, skip_header_bytes; int to_assoc_ap = 0; struct hostap_skb_tx_data *meta; iface = netdev_priv(dev); local = iface->local; if (skb->len < ETH_HLEN) { printk(KERN_DEBUG "%s: hostap_data_start_xmit: short skb " "(len=%d)\n", dev->name, skb->len); kfree_skb(skb); return NETDEV_TX_OK; } if (local->ddev != dev) { use_wds = (local->iw_mode == IW_MODE_MASTER && !(local->wds_type & HOSTAP_WDS_STANDARD_FRAME)) ? WDS_OWN_FRAME : WDS_COMPLIANT_FRAME; if (dev == local->stadev) { to_assoc_ap = 1; use_wds = WDS_NO; } else if (dev == local->apdev) { printk(KERN_DEBUG "%s: prism2_tx: trying to use " "AP device with Ethernet net dev\n", dev->name); kfree_skb(skb); return NETDEV_TX_OK; } } else { if (local->iw_mode == IW_MODE_REPEAT) { printk(KERN_DEBUG "%s: prism2_tx: trying to use " "non-WDS link in Repeater mode\n", dev->name); kfree_skb(skb); return NETDEV_TX_OK; } else if (local->iw_mode == IW_MODE_INFRA && (local->wds_type & HOSTAP_WDS_AP_CLIENT) && memcmp(skb->data + ETH_ALEN, dev->dev_addr, ETH_ALEN) != 0) { /* AP client mode: send frames with foreign src addr * using 4-addr WDS frames */ use_wds = WDS_COMPLIANT_FRAME; } } /* Incoming skb->data: dst_addr[6], src_addr[6], proto[2], payload * ==> * Prism2 TX frame with 802.11 header: * txdesc (address order depending on used mode; includes dst_addr and * src_addr), possible encapsulation (RFC1042/Bridge-Tunnel; * proto[2], payload {, possible addr4[6]} */ ethertype = (skb->data[12] << 8) | skb->data[13]; memset(&hdr, 0, sizeof(hdr)); /* Length of data after IEEE 802.11 header */ encaps_data = NULL; encaps_len = 0; skip_header_bytes = ETH_HLEN; if (ethertype == ETH_P_AARP || ethertype == ETH_P_IPX) { encaps_data = bridge_tunnel_header; encaps_len = sizeof(bridge_tunnel_header); skip_header_bytes -= 2; } else if (ethertype >= 0x600) { encaps_data = rfc1042_header; encaps_len = sizeof(rfc1042_header); skip_header_bytes -= 2; } fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA; hdr_len = IEEE80211_DATA_HDR3_LEN; if (use_wds != WDS_NO) { /* Note! Prism2 station firmware has problems with sending real * 802.11 frames with four addresses; until these problems can * be fixed or worked around, 4-addr frames needed for WDS are * using incompatible format: FromDS flag is not set and the * fourth address is added after the frame payload; it is * assumed, that the receiving station knows how to handle this * frame format */ if (use_wds == WDS_COMPLIANT_FRAME) { fc |= IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS; /* From&To DS: Addr1 = RA, Addr2 = TA, Addr3 = DA, * Addr4 = SA */ skb_copy_from_linear_data_offset(skb, ETH_ALEN, &hdr.addr4, ETH_ALEN); hdr_len += ETH_ALEN; } else { /* bogus 4-addr format to workaround Prism2 station * f/w bug */ fc |= IEEE80211_FCTL_TODS; /* From DS: Addr1 = DA (used as RA), * Addr2 = BSSID (used as TA), Addr3 = SA (used as DA), */ /* SA from skb->data + ETH_ALEN will be added after * frame payload; use hdr.addr4 as a temporary buffer */ skb_copy_from_linear_data_offset(skb, ETH_ALEN, &hdr.addr4, ETH_ALEN); need_tailroom += ETH_ALEN; } /* send broadcast and multicast frames to broadcast RA, if * configured; otherwise, use unicast RA of the WDS link */ if ((local->wds_type & HOSTAP_WDS_BROADCAST_RA) && skb->data[0] & 0x01) memset(&hdr.addr1, 0xff, ETH_ALEN); else if (iface->type == HOSTAP_INTERFACE_WDS) memcpy(&hdr.addr1, iface->u.wds.remote_addr, ETH_ALEN); else memcpy(&hdr.addr1, local->bssid, ETH_ALEN); memcpy(&hdr.addr2, dev->dev_addr, ETH_ALEN); skb_copy_from_linear_data(skb, &hdr.addr3, ETH_ALEN); } else if (local->iw_mode == IW_MODE_MASTER && !to_assoc_ap) { fc |= IEEE80211_FCTL_FROMDS; /* From DS: Addr1 = DA, Addr2 = BSSID, Addr3 = SA */ skb_copy_from_linear_data(skb, &hdr.addr1, ETH_ALEN); memcpy(&hdr.addr2, dev->dev_addr, ETH_ALEN); skb_copy_from_linear_data_offset(skb, ETH_ALEN, &hdr.addr3, ETH_ALEN); } else if (local->iw_mode == IW_MODE_INFRA || to_assoc_ap) { fc |= IEEE80211_FCTL_TODS; /* To DS: Addr1 = BSSID, Addr2 = SA, Addr3 = DA */ memcpy(&hdr.addr1, to_assoc_ap ? local->assoc_ap_addr : local->bssid, ETH_ALEN); skb_copy_from_linear_data_offset(skb, ETH_ALEN, &hdr.addr2, ETH_ALEN); skb_copy_from_linear_data(skb, &hdr.addr3, ETH_ALEN); } else if (local->iw_mode == IW_MODE_ADHOC) { /* not From/To DS: Addr1 = DA, Addr2 = SA, Addr3 = BSSID */ skb_copy_from_linear_data(skb, &hdr.addr1, ETH_ALEN); skb_copy_from_linear_data_offset(skb, ETH_ALEN, &hdr.addr2, ETH_ALEN); memcpy(&hdr.addr3, local->bssid, ETH_ALEN); } hdr.frame_control = cpu_to_le16(fc); skb_pull(skb, skip_header_bytes); need_headroom = local->func->need_tx_headroom + hdr_len + encaps_len; if (skb_tailroom(skb) < need_tailroom) { skb = skb_unshare(skb, GFP_ATOMIC); if (skb == NULL) { iface->stats.tx_dropped++; return NETDEV_TX_OK; } if (pskb_expand_head(skb, need_headroom, need_tailroom, GFP_ATOMIC)) { kfree_skb(skb); iface->stats.tx_dropped++; return NETDEV_TX_OK; } } else if (skb_headroom(skb) < need_headroom) { struct sk_buff *tmp = skb; skb = skb_realloc_headroom(skb, need_headroom); kfree_skb(tmp); if (skb == NULL) { iface->stats.tx_dropped++; return NETDEV_TX_OK; } } else { skb = skb_unshare(skb, GFP_ATOMIC); if (skb == NULL) { iface->stats.tx_dropped++; return NETDEV_TX_OK; } } if (encaps_data) memcpy(skb_push(skb, encaps_len), encaps_data, encaps_len); memcpy(skb_push(skb, hdr_len), &hdr, hdr_len); if (use_wds == WDS_OWN_FRAME) { memcpy(skb_put(skb, ETH_ALEN), &hdr.addr4, ETH_ALEN); } iface->stats.tx_packets++; iface->stats.tx_bytes += skb->len; skb_reset_mac_header(skb); meta = (struct hostap_skb_tx_data *) skb->cb; memset(meta, 0, sizeof(*meta)); meta->magic = HOSTAP_SKB_TX_DATA_MAGIC; if (use_wds) meta->flags |= HOSTAP_TX_FLAGS_WDS; meta->ethertype = ethertype; meta->iface = iface; /* Send IEEE 802.11 encapsulated frame using the master radio device */ skb->dev = local->dev; dev_queue_xmit(skb); return NETDEV_TX_OK; } /* hard_start_xmit function for hostapd wlan#ap interfaces */ netdev_tx_t hostap_mgmt_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct hostap_interface *iface; local_info_t *local; struct hostap_skb_tx_data *meta; struct ieee80211_hdr *hdr; u16 fc; iface = netdev_priv(dev); local = iface->local; if (skb->len < 10) { printk(KERN_DEBUG "%s: hostap_mgmt_start_xmit: short skb " "(len=%d)\n", dev->name, skb->len); kfree_skb(skb); return NETDEV_TX_OK; } iface->stats.tx_packets++; iface->stats.tx_bytes += skb->len; meta = (struct hostap_skb_tx_data *) skb->cb; memset(meta, 0, sizeof(*meta)); meta->magic = HOSTAP_SKB_TX_DATA_MAGIC; meta->iface = iface; if (skb->len >= IEEE80211_DATA_HDR3_LEN + sizeof(rfc1042_header) + 2) { hdr = (struct ieee80211_hdr *) skb->data; fc = le16_to_cpu(hdr->frame_control); if (ieee80211_is_data(hdr->frame_control) && (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_DATA) { u8 *pos = &skb->data[IEEE80211_DATA_HDR3_LEN + sizeof(rfc1042_header)]; meta->ethertype = (pos[0] << 8) | pos[1]; } } /* Send IEEE 802.11 encapsulated frame using the master radio device */ skb->dev = local->dev; dev_queue_xmit(skb); return NETDEV_TX_OK; } /* Called only from software IRQ */ static struct sk_buff * hostap_tx_encrypt(struct sk_buff *skb, struct lib80211_crypt_data *crypt) { struct hostap_interface *iface; local_info_t *local; struct ieee80211_hdr *hdr; int prefix_len, postfix_len, hdr_len, res; iface = netdev_priv(skb->dev); local = iface->local; if (skb->len < IEEE80211_DATA_HDR3_LEN) { kfree_skb(skb); return NULL; } if (local->tkip_countermeasures && strcmp(crypt->ops->name, "TKIP") == 0) { hdr = (struct ieee80211_hdr *) skb->data; if (net_ratelimit()) { printk(KERN_DEBUG "%s: TKIP countermeasures: dropped " "TX packet to %pM\n", local->dev->name, hdr->addr1); } kfree_skb(skb); return NULL; } skb = skb_unshare(skb, GFP_ATOMIC); if (skb == NULL) return NULL; prefix_len = crypt->ops->extra_mpdu_prefix_len + crypt->ops->extra_msdu_prefix_len; postfix_len = crypt->ops->extra_mpdu_postfix_len + crypt->ops->extra_msdu_postfix_len; if ((skb_headroom(skb) < prefix_len || skb_tailroom(skb) < postfix_len) && pskb_expand_head(skb, prefix_len, postfix_len, GFP_ATOMIC)) { kfree_skb(skb); return NULL; } hdr = (struct ieee80211_hdr *) skb->data; hdr_len = hostap_80211_get_hdrlen(hdr->frame_control); /* Host-based IEEE 802.11 fragmentation for TX is not yet supported, so * call both MSDU and MPDU encryption functions from here. */ atomic_inc(&crypt->refcnt); res = 0; if (crypt->ops->encrypt_msdu) res = crypt->ops->encrypt_msdu(skb, hdr_len, crypt->priv); if (res == 0 && crypt->ops->encrypt_mpdu) res = crypt->ops->encrypt_mpdu(skb, hdr_len, crypt->priv); atomic_dec(&crypt->refcnt); if (res < 0) { kfree_skb(skb); return NULL; } return skb; } /* hard_start_xmit function for master radio interface wifi#. * AP processing (TX rate control, power save buffering, etc.). * Use hardware TX function to send the frame. */ netdev_tx_t hostap_master_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct hostap_interface *iface; local_info_t *local; netdev_tx_t ret = NETDEV_TX_BUSY; u16 fc; struct hostap_tx_data tx; ap_tx_ret tx_ret; struct hostap_skb_tx_data *meta; int no_encrypt = 0; struct ieee80211_hdr *hdr; iface = netdev_priv(dev); local = iface->local; tx.skb = skb; tx.sta_ptr = NULL; meta = (struct hostap_skb_tx_data *) skb->cb; if (meta->magic != HOSTAP_SKB_TX_DATA_MAGIC) { printk(KERN_DEBUG "%s: invalid skb->cb magic (0x%08x, " "expected 0x%08x)\n", dev->name, meta->magic, HOSTAP_SKB_TX_DATA_MAGIC); ret = NETDEV_TX_OK; iface->stats.tx_dropped++; goto fail; } if (local->host_encrypt) { /* Set crypt to default algorithm and key; will be replaced in * AP code if STA has own alg/key */ tx.crypt = local->crypt_info.crypt[local->crypt_info.tx_keyidx]; tx.host_encrypt = 1; } else { tx.crypt = NULL; tx.host_encrypt = 0; } if (skb->len < 24) { printk(KERN_DEBUG "%s: hostap_master_start_xmit: short skb " "(len=%d)\n", dev->name, skb->len); ret = NETDEV_TX_OK; iface->stats.tx_dropped++; goto fail; } /* FIX (?): * Wi-Fi 802.11b test plan suggests that AP should ignore power save * bit in authentication and (re)association frames and assume tha * STA remains awake for the response. */ tx_ret = hostap_handle_sta_tx(local, &tx); skb = tx.skb; meta = (struct hostap_skb_tx_data *) skb->cb; hdr = (struct ieee80211_hdr *) skb->data; fc = le16_to_cpu(hdr->frame_control); switch (tx_ret) { case AP_TX_CONTINUE: break; case AP_TX_CONTINUE_NOT_AUTHORIZED: if (local->ieee_802_1x && ieee80211_is_data(hdr->frame_control) && meta->ethertype != ETH_P_PAE && !(meta->flags & HOSTAP_TX_FLAGS_WDS)) { printk(KERN_DEBUG "%s: dropped frame to unauthorized " "port (IEEE 802.1X): ethertype=0x%04x\n", dev->name, meta->ethertype); hostap_dump_tx_80211(dev->name, skb); ret = NETDEV_TX_OK; /* drop packet */ iface->stats.tx_dropped++; goto fail; } break; case AP_TX_DROP: ret = NETDEV_TX_OK; /* drop packet */ iface->stats.tx_dropped++; goto fail; case AP_TX_RETRY: goto fail; case AP_TX_BUFFERED: /* do not free skb here, it will be freed when the * buffered frame is sent/timed out */ ret = NETDEV_TX_OK; goto tx_exit; } /* Request TX callback if protocol version is 2 in 802.11 header; * this version 2 is a special case used between hostapd and kernel * driver */ if (((fc & IEEE80211_FCTL_VERS) == BIT(1)) && local->ap && local->ap->tx_callback_idx && meta->tx_cb_idx == 0) { meta->tx_cb_idx = local->ap->tx_callback_idx; /* remove special version from the frame header */ fc &= ~IEEE80211_FCTL_VERS; hdr->frame_control = cpu_to_le16(fc); } if (!ieee80211_is_data(hdr->frame_control)) { no_encrypt = 1; tx.crypt = NULL; } if (local->ieee_802_1x && meta->ethertype == ETH_P_PAE && tx.crypt && !(fc & IEEE80211_FCTL_PROTECTED)) { no_encrypt = 1; PDEBUG(DEBUG_EXTRA2, "%s: TX: IEEE 802.1X - passing " "unencrypted EAPOL frame\n", dev->name); tx.crypt = NULL; /* no encryption for IEEE 802.1X frames */ } if (tx.crypt && (!tx.crypt->ops || !tx.crypt->ops->encrypt_mpdu)) tx.crypt = NULL; else if ((tx.crypt || local->crypt_info.crypt[local->crypt_info.tx_keyidx]) && !no_encrypt) { /* Add ISWEP flag both for firmware and host based encryption */ fc |= IEEE80211_FCTL_PROTECTED; hdr->frame_control = cpu_to_le16(fc); } else if (local->drop_unencrypted && ieee80211_is_data(hdr->frame_control) && meta->ethertype != ETH_P_PAE) { if (net_ratelimit()) { printk(KERN_DEBUG "%s: dropped unencrypted TX data " "frame (drop_unencrypted=1)\n", dev->name); } iface->stats.tx_dropped++; ret = NETDEV_TX_OK; goto fail; } if (tx.crypt) { skb = hostap_tx_encrypt(skb, tx.crypt); if (skb == NULL) { printk(KERN_DEBUG "%s: TX - encryption failed\n", dev->name); ret = NETDEV_TX_OK; goto fail; } meta = (struct hostap_skb_tx_data *) skb->cb; if (meta->magic != HOSTAP_SKB_TX_DATA_MAGIC) { printk(KERN_DEBUG "%s: invalid skb->cb magic (0x%08x, " "expected 0x%08x) after hostap_tx_encrypt\n", dev->name, meta->magic, HOSTAP_SKB_TX_DATA_MAGIC); ret = NETDEV_TX_OK; iface->stats.tx_dropped++; goto fail; } } if (local->func->tx == NULL || local->func->tx(skb, dev)) { ret = NETDEV_TX_OK; iface->stats.tx_dropped++; } else { ret = NETDEV_TX_OK; iface->stats.tx_packets++; iface->stats.tx_bytes += skb->len; } fail: if (ret == NETDEV_TX_OK && skb) dev_kfree_skb(skb); tx_exit: if (tx.sta_ptr) hostap_handle_sta_release(tx.sta_ptr); return ret; } EXPORT_SYMBOL(hostap_master_start_xmit);
gpl-2.0
MetalPhoenix45/SmoothGKernel
net/ipv4/tcp_hybla.c
7999
5004
/* * TCP HYBLA * * TCP-HYBLA Congestion control algorithm, based on: * C.Caini, R.Firrincieli, "TCP-Hybla: A TCP Enhancement * for Heterogeneous Networks", * International Journal on satellite Communications, * September 2004 * Daniele Lacamera * root at danielinux.net */ #include <linux/module.h> #include <net/tcp.h> /* Tcp Hybla structure. */ struct hybla { u8 hybla_en; u32 snd_cwnd_cents; /* Keeps increment values when it is <1, <<7 */ u32 rho; /* Rho parameter, integer part */ u32 rho2; /* Rho * Rho, integer part */ u32 rho_3ls; /* Rho parameter, <<3 */ u32 rho2_7ls; /* Rho^2, <<7 */ u32 minrtt; /* Minimum smoothed round trip time value seen */ }; /* Hybla reference round trip time (default= 1/40 sec = 25 ms), expressed in jiffies */ static int rtt0 = 25; module_param(rtt0, int, 0644); MODULE_PARM_DESC(rtt0, "reference rout trip time (ms)"); /* This is called to refresh values for hybla parameters */ static inline void hybla_recalc_param (struct sock *sk) { struct hybla *ca = inet_csk_ca(sk); ca->rho_3ls = max_t(u32, tcp_sk(sk)->srtt / msecs_to_jiffies(rtt0), 8); ca->rho = ca->rho_3ls >> 3; ca->rho2_7ls = (ca->rho_3ls * ca->rho_3ls) << 1; ca->rho2 = ca->rho2_7ls >>7; } static void hybla_init(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); struct hybla *ca = inet_csk_ca(sk); ca->rho = 0; ca->rho2 = 0; ca->rho_3ls = 0; ca->rho2_7ls = 0; ca->snd_cwnd_cents = 0; ca->hybla_en = 1; tp->snd_cwnd = 2; tp->snd_cwnd_clamp = 65535; /* 1st Rho measurement based on initial srtt */ hybla_recalc_param(sk); /* set minimum rtt as this is the 1st ever seen */ ca->minrtt = tp->srtt; tp->snd_cwnd = ca->rho; } static void hybla_state(struct sock *sk, u8 ca_state) { struct hybla *ca = inet_csk_ca(sk); ca->hybla_en = (ca_state == TCP_CA_Open); } static inline u32 hybla_fraction(u32 odds) { static const u32 fractions[] = { 128, 139, 152, 165, 181, 197, 215, 234, }; return (odds < ARRAY_SIZE(fractions)) ? fractions[odds] : 128; } /* TCP Hybla main routine. * This is the algorithm behavior: * o Recalc Hybla parameters if min_rtt has changed * o Give cwnd a new value based on the model proposed * o remember increments <1 */ static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) { struct tcp_sock *tp = tcp_sk(sk); struct hybla *ca = inet_csk_ca(sk); u32 increment, odd, rho_fractions; int is_slowstart = 0; /* Recalculate rho only if this srtt is the lowest */ if (tp->srtt < ca->minrtt){ hybla_recalc_param(sk); ca->minrtt = tp->srtt; } if (!tcp_is_cwnd_limited(sk, in_flight)) return; if (!ca->hybla_en) { tcp_reno_cong_avoid(sk, ack, in_flight); return; } if (ca->rho == 0) hybla_recalc_param(sk); rho_fractions = ca->rho_3ls - (ca->rho << 3); if (tp->snd_cwnd < tp->snd_ssthresh) { /* * slow start * INC = 2^RHO - 1 * This is done by splitting the rho parameter * into 2 parts: an integer part and a fraction part. * Inrement<<7 is estimated by doing: * [2^(int+fract)]<<7 * that is equal to: * (2^int) * [(2^fract) <<7] * 2^int is straightly computed as 1<<int, * while we will use hybla_slowstart_fraction_increment() to * calculate 2^fract in a <<7 value. */ is_slowstart = 1; increment = ((1 << min(ca->rho, 16U)) * hybla_fraction(rho_fractions)) - 128; } else { /* * congestion avoidance * INC = RHO^2 / W * as long as increment is estimated as (rho<<7)/window * it already is <<7 and we can easily count its fractions. */ increment = ca->rho2_7ls / tp->snd_cwnd; if (increment < 128) tp->snd_cwnd_cnt++; } odd = increment % 128; tp->snd_cwnd += increment >> 7; ca->snd_cwnd_cents += odd; /* check when fractions goes >=128 and increase cwnd by 1. */ while (ca->snd_cwnd_cents >= 128) { tp->snd_cwnd++; ca->snd_cwnd_cents -= 128; tp->snd_cwnd_cnt = 0; } /* check when cwnd has not been incremented for a while */ if (increment == 0 && odd == 0 && tp->snd_cwnd_cnt >= tp->snd_cwnd) { tp->snd_cwnd++; tp->snd_cwnd_cnt = 0; } /* clamp down slowstart cwnd to ssthresh value. */ if (is_slowstart) tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh); tp->snd_cwnd = min_t(u32, tp->snd_cwnd, tp->snd_cwnd_clamp); } static struct tcp_congestion_ops tcp_hybla __read_mostly = { .init = hybla_init, .ssthresh = tcp_reno_ssthresh, .min_cwnd = tcp_reno_min_cwnd, .cong_avoid = hybla_cong_avoid, .set_state = hybla_state, .owner = THIS_MODULE, .name = "hybla" }; static int __init hybla_register(void) { BUILD_BUG_ON(sizeof(struct hybla) > ICSK_CA_PRIV_SIZE); return tcp_register_congestion_control(&tcp_hybla); } static void __exit hybla_unregister(void) { tcp_unregister_congestion_control(&tcp_hybla); } module_init(hybla_register); module_exit(hybla_unregister); MODULE_AUTHOR("Daniele Lacamera"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("TCP Hybla");
gpl-2.0
kelvinbui31/android_mediatek_muse72
drivers/misc/mediatek/conn_soc/drv_wlan/mt_wifi/wlan/mgmt/auth.c
64
45320
/* ** $Id: //Department/DaVinci/BRANCHES/MT6620_WIFI_DRIVER_V2_3/mgmt/auth.c#1 $ */ /*! \file "auth.c" \brief This file includes the authentication-related functions. This file includes the authentication-related functions. */ /* ** $Log: auth.c $ * * 02 13 2012 cp.wu * NULL * show error message only instead of raise assertion when * received authentication frame is carrying illegal parameters. * * 11 09 2011 yuche.tsai * NULL * Fix a network index & station record index issue when TX deauth frame. * * 10 12 2011 wh.su * [WCXRP00001036] [MT6620 Wi-Fi][Driver][FW] Adding the 802.11w code for MFP * adding the 802.11w related function and define . * * 06 22 2011 yuche.tsai * NULL * Fix coding error. * * 06 20 2011 yuche.tsai * [WCXRP00000796] [Volunteer Patch][MT6620][Driver] Add BC deauth frame TX feature. * BC deauth support. * * 04 21 2011 terry.wu * [WCXRP00000674] [MT6620 Wi-Fi][Driver] Refine AAA authSendAuthFrame * Add network type parameter to authSendAuthFrame. * * 04 15 2011 chinghwa.yu * [WCXRP00000065] Update BoW design and settings * Add BOW short range mode. * * 02 08 2011 yuche.tsai * [WCXRP00000245] 1. Invitation Request/Response. 2. Provision Discovery Request/Response * 1. Fix Service Disocvery Logical issue. * 2. Fix a NULL pointer access violation issue when sending deauthentication packet to a class error station. * * 01 24 2011 cp.wu * [WCXRP00000382] [MT6620 Wi-Fi][Driver] Track forwarding packet number with notifying tx thread for serving * 1. add an extra counter for tracking pending forward frames. * 2. notify TX service thread as well when there is pending forward frame * 3. correct build errors leaded by introduction of Wi-Fi direct separation module * * 01 21 2011 terry.wu * [WCXRP00000381] [MT6620 Wi-Fi][Driver] Kernel panic when replying unaccept Auth in AP mode * In AP mode, use STA_REC_INDEX_NOT_FOUND(0xFE) instead of StaRec index when replying an unaccept Auth frame. * * 10 18 2010 cp.wu * [WCXRP00000052] [MT6620 Wi-Fi][Driver] Eliminate Linux Compile Warning * use definition macro to replace hard-coded constant * * 09 03 2010 kevin.huang * NULL * Refine #include sequence and solve recursive/nested #include issue * * 08 30 2010 cp.wu * NULL * eliminate klockwork errors * * 08 16 2010 cp.wu * NULL * Replace CFG_SUPPORT_BOW by CFG_ENABLE_BT_OVER_WIFI. * There is no CFG_SUPPORT_BOW in driver domain source. * * 08 16 2010 kevin.huang * NULL * Refine AAA functions * * 08 03 2010 cp.wu * NULL * surpress compilation warning. * * 07 08 2010 cp.wu * * [WPD00003833] [MT6620 and MT5931] Driver migration - move to new repository. * * 06 28 2010 cp.wu * [WPD00003833][MT6620 and MT5931] Driver migration * send MMPDU in basic rate. * * 06 21 2010 cp.wu * [WPD00003833][MT6620 and MT5931] Driver migration * specify correct value for management frames. * * 06 18 2010 cm.chang * [WPD00003841][LITE Driver] Migrate RLM/CNM to host driver * Provide cnmMgtPktAlloc() and alloc/free function of msg/buf * * 06 14 2010 cp.wu * [WPD00003833][MT6620 and MT5931] Driver migration * add management dispatching function table. * * 06 11 2010 cp.wu * [WPD00003833][MT6620 and MT5931] Driver migration * auth.c is migrated. * * 05 28 2010 kevin.huang * [BORA00000794][WIFISYS][New Feature]Power Management Support * Update authSendDeauthFrame() for correct the value of eNetTypeIndex in MSDU_INFO_T * * 05 24 2010 kevin.huang * [BORA00000794][WIFISYS][New Feature]Power Management Support * Check Net is active before sending Deauth frame. * * 05 24 2010 kevin.huang * [BORA00000794][WIFISYS][New Feature]Power Management Support * Refine authSendAuthFrame() for NULL STA_RECORD_T case and minimum deauth interval. * * 04 24 2010 cm.chang * [BORA00000018]Integrate WIFI part into BORA for the 1st time * g_aprBssInfo[] depends on CFG_SUPPORT_P2P and CFG_SUPPORT_BOW * * 04 19 2010 kevin.huang * [BORA00000714][WIFISYS][New Feature]Beacon Timeout Support * Add Send Deauth for Class 3 Error and Leave Network Support * * 02 23 2010 kevin.huang * [BORA00000603][WIFISYS] [New Feature] AAA Module Support * Fix compile warning * * 02 05 2010 kevin.huang * [BORA00000603][WIFISYS] [New Feature] AAA Module Support * Add debug message for abnormal authentication frame from AP * * 02 04 2010 kevin.huang * [BORA00000603][WIFISYS] [New Feature] AAA Module Support * Add AAA Module Support, Revise Net Type to Net Type Index for array lookup * * 01 11 2010 kevin.huang * [BORA00000018]Integrate WIFI part into BORA for the 1st time * Add Deauth and Disassoc Handler * * 01 07 2010 kevin.huang * [BORA00000018]Integrate WIFI part into BORA for the 1st time * [BORA00000018] Integrate WIFI part into BORA for the 1st time * * Fix the Debug Label * * 12 18 2009 cm.chang * [BORA00000018]Integrate WIFI part into BORA for the 1st time * . * * Dec 7 2009 mtk01461 * [BORA00000018] Integrate WIFI part into BORA for the 1st time * Update the authComposeAuthFrameHeader() * * Dec 7 2009 mtk01088 * [BORA00000476] [Wi-Fi][firmware] Add the security module initialize code * adding the send deauth frame function * * Dec 3 2009 mtk01461 * [BORA00000018] Integrate WIFI part into BORA for the 1st time * Integrate send Auth with TXM * * Nov 24 2009 mtk01461 * [BORA00000018] Integrate WIFI part into BORA for the 1st time * Revise MGMT Handler with Retain Status * * Nov 23 2009 mtk01461 * [BORA00000018] Integrate WIFI part into BORA for the 1st time * */ /******************************************************************************* * C O M P I L E R F L A G S ******************************************************************************** */ /******************************************************************************* * E X T E R N A L R E F E R E N C E S ******************************************************************************** */ #include "precomp.h" /******************************************************************************* * C O N S T A N T S ******************************************************************************** */ /******************************************************************************* * D A T A T Y P E S ******************************************************************************** */ /******************************************************************************* * P U B L I C D A T A ******************************************************************************** */ APPEND_IE_ENTRY_T txAuthIETable[] = { { (ELEM_HDR_LEN + ELEM_MAX_LEN_CHALLENGE_TEXT), authAddIEChallengeText } }; HANDLE_IE_ENTRY_T rxAuthIETable[] = { { ELEM_ID_CHALLENGE_TEXT, authHandleIEChallengeText } }; /******************************************************************************* * P R I V A T E D A T A ******************************************************************************** */ /******************************************************************************* * M A C R O S ******************************************************************************** */ /******************************************************************************* * F U N C T I O N D E C L A R A T I O N S ******************************************************************************** */ /******************************************************************************* * F U N C T I O N S ******************************************************************************** */ /*----------------------------------------------------------------------------*/ /*! * @brief This function will compose the Authentication frame header and fixed fields. * * @param[in] pucBuffer Pointer to the frame buffer. * @param[in] aucPeerMACAddress Given Peer MAC Address. * @param[in] aucMACAddress Given Our MAC Address. * @param[in] u2AuthAlgNum Authentication Algorithm Number * @param[in] u2TransactionSeqNum Transaction Sequence Number * @param[in] u2StatusCode Status Code * * \return (none) */ /*----------------------------------------------------------------------------*/ __KAL_INLINE__ VOID authComposeAuthFrameHeaderAndFF ( IN PUINT_8 pucBuffer, IN UINT_8 aucPeerMACAddress[], IN UINT_8 aucMACAddress[], IN UINT_16 u2AuthAlgNum, IN UINT_16 u2TransactionSeqNum, IN UINT_16 u2StatusCode ) { P_WLAN_AUTH_FRAME_T prAuthFrame; UINT_16 u2FrameCtrl; ASSERT(pucBuffer); ASSERT(aucPeerMACAddress); ASSERT(aucMACAddress); prAuthFrame = (P_WLAN_AUTH_FRAME_T)pucBuffer; //4 <1> Compose the frame header of the Authentication frame. /* Fill the Frame Control field. */ u2FrameCtrl = MAC_FRAME_AUTH; /* If this frame is the third frame in the shared key authentication * sequence, it shall be encrypted. */ if ((u2AuthAlgNum == AUTH_ALGORITHM_NUM_SHARED_KEY) && (u2TransactionSeqNum == AUTH_TRANSACTION_SEQ_3)) { u2FrameCtrl |= MASK_FC_PROTECTED_FRAME; /* HW will also detect this bit for applying encryption */ } //WLAN_SET_FIELD_16(&prAuthFrame->u2FrameCtrl, u2FrameCtrl); prAuthFrame->u2FrameCtrl = u2FrameCtrl; // NOTE(Kevin): Optimized for ARM /* Fill the DA field with Target BSSID. */ COPY_MAC_ADDR(prAuthFrame->aucDestAddr, aucPeerMACAddress); /* Fill the SA field with our MAC Address. */ COPY_MAC_ADDR(prAuthFrame->aucSrcAddr, aucMACAddress); switch (u2TransactionSeqNum) { case AUTH_TRANSACTION_SEQ_1: case AUTH_TRANSACTION_SEQ_3: /* Fill the BSSID field with Target BSSID. */ COPY_MAC_ADDR(prAuthFrame->aucBSSID, aucPeerMACAddress); break; case AUTH_TRANSACTION_SEQ_2: case AUTH_TRANSACTION_SEQ_4: /* Fill the BSSID field with Current BSSID. */ COPY_MAC_ADDR(prAuthFrame->aucBSSID, aucMACAddress); break; default: ASSERT(0); } /* Clear the SEQ/FRAG_NO field. */ prAuthFrame->u2SeqCtrl = 0; //4 <2> Compose the frame body's fixed field part of the Authentication frame. /* Fill the Authentication Algorithm Number field. */ //WLAN_SET_FIELD_16(&prAuthFrame->u2AuthAlgNum, u2AuthAlgNum); prAuthFrame->u2AuthAlgNum = u2AuthAlgNum; // NOTE(Kevin): Optimized for ARM /* Fill the Authentication Transaction Sequence Number field. */ //WLAN_SET_FIELD_16(&prAuthFrame->u2AuthTransSeqNo, u2TransactionSeqNum); prAuthFrame->u2AuthTransSeqNo = u2TransactionSeqNum; // NOTE(Kevin): Optimized for ARM /* Fill the Status Code field. */ //WLAN_SET_FIELD_16(&prAuthFrame->u2StatusCode, u2StatusCode); prAuthFrame->u2StatusCode = u2StatusCode; // NOTE(Kevin): Optimized for ARM return; } /* end of authComposeAuthFrameHeaderAndFF() */ /*----------------------------------------------------------------------------*/ /*! * @brief This function will append Challenge Text IE to the Authentication frame * * @param[in] prMsduInfo Pointer to the composed MSDU_INFO_T. * * @return (none) */ /*----------------------------------------------------------------------------*/ VOID authAddIEChallengeText ( IN P_ADAPTER_T prAdapter, IN OUT P_MSDU_INFO_T prMsduInfo ) { P_WLAN_AUTH_FRAME_T prAuthFrame; P_STA_RECORD_T prStaRec; UINT_16 u2TransactionSeqNum; ASSERT(prMsduInfo); prStaRec = cnmGetStaRecByIndex(prAdapter, prMsduInfo->ucStaRecIndex); if(!prStaRec) { return; } ASSERT(prStaRec); /* For Management, frame header and payload are in a continuous buffer */ prAuthFrame = (P_WLAN_AUTH_FRAME_T)prMsduInfo->prPacket; WLAN_GET_FIELD_16(&prAuthFrame->u2AuthTransSeqNo, &u2TransactionSeqNum) /* Only consider SEQ_3 for Challenge Text */ if ((u2TransactionSeqNum == AUTH_TRANSACTION_SEQ_3) && (prStaRec->ucAuthAlgNum == AUTH_ALGORITHM_NUM_SHARED_KEY) && (prStaRec->prChallengeText != NULL)) { COPY_IE(((UINT_32)(prMsduInfo->prPacket) + prMsduInfo->u2FrameLength), (prStaRec->prChallengeText)); prMsduInfo->u2FrameLength += IE_SIZE(prStaRec->prChallengeText); } return; } /* end of authAddIEChallengeText() */ #if !CFG_SUPPORT_AAA /*----------------------------------------------------------------------------*/ /*! * @brief This function will send the Authenticiation frame * * @param[in] prStaRec Pointer to the STA_RECORD_T * @param[in] u2TransactionSeqNum Transaction Sequence Number * * @retval WLAN_STATUS_RESOURCES No available resource for frame composing. * @retval WLAN_STATUS_SUCCESS Successfully send frame to TX Module */ /*----------------------------------------------------------------------------*/ WLAN_STATUS authSendAuthFrame ( IN P_ADAPTER_T prAdapter, IN P_STA_RECORD_T prStaRec, IN UINT_16 u2TransactionSeqNum ) { P_MSDU_INFO_T prMsduInfo; P_BSS_INFO_T prBssInfo; UINT_16 u2EstimatedFrameLen; UINT_16 u2EstimatedExtraIELen; UINT_16 u2PayloadLen; UINT_32 i; DBGLOG(SAA, LOUD, ("Send Auth Frame\n")); ASSERT(prStaRec); //4 <1> Allocate a PKT_INFO_T for Authentication Frame /* Init with MGMT Header Length + Length of Fixed Fields */ u2EstimatedFrameLen = (MAC_TX_RESERVED_FIELD + WLAN_MAC_MGMT_HEADER_LEN + AUTH_ALGORITHM_NUM_FIELD_LEN + AUTH_TRANSACTION_SEQENCE_NUM_FIELD_LEN + STATUS_CODE_FIELD_LEN); /* + Extra IE Length */ u2EstimatedExtraIELen = 0; for (i = 0; i < sizeof(txAuthIETable)/sizeof(APPEND_IE_ENTRY_T); i++) { u2EstimatedExtraIELen += txAuthIETable[i].u2EstimatedIELen; } u2EstimatedFrameLen += u2EstimatedExtraIELen; /* Allocate a MSDU_INFO_T */ if ( (prMsduInfo = cnmMgtPktAlloc(prAdapter, u2EstimatedFrameLen)) == NULL) { DBGLOG(SAA, WARN, ("No PKT_INFO_T for sending Auth Frame.\n")); return WLAN_STATUS_RESOURCES; } //4 <2> Compose Authentication Request frame header and fixed fields in MSDU_INfO_T. ASSERT(prStaRec->ucNetTypeIndex < NETWORK_TYPE_INDEX_NUM); prBssInfo = &(prAdapter->rWifiVar.arBssInfo[prStaRec->ucNetTypeIndex]); /* Compose Header and some Fixed Fields */ authComposeAuthFrameHeaderAndFF( (PUINT_8)((UINT_32)(prMsduInfo->prPacket) + MAC_TX_RESERVED_FIELD), prStaRec->aucMacAddr, prBssInfo->aucOwnMacAddr, prStaRec->ucAuthAlgNum, u2TransactionSeqNum, STATUS_CODE_RESERVED); u2PayloadLen = (AUTH_ALGORITHM_NUM_FIELD_LEN + AUTH_TRANSACTION_SEQENCE_NUM_FIELD_LEN + STATUS_CODE_FIELD_LEN); //4 <3> Update information of MSDU_INFO_T prMsduInfo->eSrc = TX_PACKET_MGMT; prMsduInfo->ucPacketType = HIF_TX_PACKET_TYPE_MGMT; prMsduInfo->ucStaRecIndex = prStaRec->ucIndex; prMsduInfo->ucNetworkType = prStaRec->ucNetTypeIndex; prMsduInfo->ucMacHeaderLength = WLAN_MAC_MGMT_HEADER_LEN; prMsduInfo->fgIs802_1x = FALSE; prMsduInfo->fgIs802_11 = TRUE; prMsduInfo->u2FrameLength = WLAN_MAC_MGMT_HEADER_LEN + u2PayloadLen; prMsduInfo->ucTxSeqNum = nicIncreaseTxSeqNum(prAdapter); prMsduInfo->pfTxDoneHandler = saaFsmRunEventTxDone; prMsduInfo->fgIsBasicRate = TRUE; //4 <4> Compose IEs in MSDU_INFO_T for (i = 0; i < sizeof(txAuthIETable)/sizeof(APPEND_IE_ENTRY_T); i++) { if (txAuthIETable[i].pfnAppendIE) { txAuthIETable[i].pfnAppendIE(prAdapter, prMsduInfo); } } /* TODO(Kevin): Also release the unused tail room of the composed MMPDU */ //4 <6> Inform TXM to send this Authentication frame. nicTxEnqueueMsdu(prAdapter, prMsduInfo); return WLAN_STATUS_SUCCESS; } /* end of authSendAuthFrame() */ #else /*----------------------------------------------------------------------------*/ /*! * @brief This function will send the Authenticiation frame * * @param[in] prStaRec Pointer to the STA_RECORD_T * @param[in] u2TransactionSeqNum Transaction Sequence Number * * @retval WLAN_STATUS_RESOURCES No available resource for frame composing. * @retval WLAN_STATUS_SUCCESS Successfully send frame to TX Module */ /*----------------------------------------------------------------------------*/ WLAN_STATUS authSendAuthFrame ( IN P_ADAPTER_T prAdapter, IN P_STA_RECORD_T prStaRec, IN ENUM_NETWORK_TYPE_INDEX_T eNetTypeIndex, IN P_SW_RFB_T prFalseAuthSwRfb, IN UINT_16 u2TransactionSeqNum, IN UINT_16 u2StatusCode ) { PUINT_8 pucReceiveAddr; PUINT_8 pucTransmitAddr; P_MSDU_INFO_T prMsduInfo; P_BSS_INFO_T prBssInfo; /*get from input parameter*/ //ENUM_NETWORK_TYPE_INDEX_T eNetTypeIndex = NETWORK_TYPE_AIS_INDEX; PFN_TX_DONE_HANDLER pfTxDoneHandler = (PFN_TX_DONE_HANDLER)NULL; UINT_16 u2EstimatedFrameLen; UINT_16 u2EstimatedExtraIELen; UINT_16 u2PayloadLen; UINT_16 ucAuthAlgNum; UINT_32 i; DBGLOG(SAA, LOUD, ("Send Auth Frame %d, Status Code = %d\n", u2TransactionSeqNum, u2StatusCode)); //4 <1> Allocate a PKT_INFO_T for Authentication Frame /* Init with MGMT Header Length + Length of Fixed Fields */ u2EstimatedFrameLen = (MAC_TX_RESERVED_FIELD + WLAN_MAC_MGMT_HEADER_LEN + AUTH_ALGORITHM_NUM_FIELD_LEN + AUTH_TRANSACTION_SEQENCE_NUM_FIELD_LEN + STATUS_CODE_FIELD_LEN); /* + Extra IE Length */ u2EstimatedExtraIELen = 0; for (i = 0; i < sizeof(txAuthIETable)/sizeof(APPEND_IE_ENTRY_T); i++) { u2EstimatedExtraIELen += txAuthIETable[i].u2EstimatedIELen; } u2EstimatedFrameLen += u2EstimatedExtraIELen; /* Allocate a MSDU_INFO_T */ if ( (prMsduInfo = cnmMgtPktAlloc(prAdapter, u2EstimatedFrameLen)) == NULL) { DBGLOG(SAA, WARN, ("No PKT_INFO_T for sending Auth Frame.\n")); return WLAN_STATUS_RESOURCES; } //4 <2> Compose Authentication Request frame header and fixed fields in MSDU_INfO_T. if (prStaRec) { ASSERT(prStaRec->ucNetTypeIndex < NETWORK_TYPE_INDEX_NUM); prBssInfo = &(prAdapter->rWifiVar.arBssInfo[prStaRec->ucNetTypeIndex]); pucTransmitAddr = prBssInfo->aucOwnMacAddr; pucReceiveAddr = prStaRec->aucMacAddr; ucAuthAlgNum = prStaRec->ucAuthAlgNum; switch (u2TransactionSeqNum) { case AUTH_TRANSACTION_SEQ_1: case AUTH_TRANSACTION_SEQ_3: pfTxDoneHandler = saaFsmRunEventTxDone; break; case AUTH_TRANSACTION_SEQ_2: case AUTH_TRANSACTION_SEQ_4: pfTxDoneHandler = aaaFsmRunEventTxDone; break; } } else { /* For Error Status Code */ P_WLAN_AUTH_FRAME_T prFalseAuthFrame; ASSERT(prFalseAuthSwRfb); prFalseAuthFrame = (P_WLAN_AUTH_FRAME_T)prFalseAuthSwRfb->pvHeader; ASSERT(u2StatusCode != STATUS_CODE_SUCCESSFUL); pucTransmitAddr = prFalseAuthFrame->aucDestAddr; pucReceiveAddr = prFalseAuthFrame->aucSrcAddr; ucAuthAlgNum = prFalseAuthFrame->u2AuthAlgNum; u2TransactionSeqNum = (prFalseAuthFrame->u2AuthTransSeqNo + 1); } /* Compose Header and some Fixed Fields */ authComposeAuthFrameHeaderAndFF((PUINT_8)((UINT_32)(prMsduInfo->prPacket) + MAC_TX_RESERVED_FIELD), pucReceiveAddr, pucTransmitAddr, ucAuthAlgNum, u2TransactionSeqNum, u2StatusCode); u2PayloadLen = (AUTH_ALGORITHM_NUM_FIELD_LEN + AUTH_TRANSACTION_SEQENCE_NUM_FIELD_LEN + STATUS_CODE_FIELD_LEN); //4 <3> Update information of MSDU_INFO_T prMsduInfo->eSrc = TX_PACKET_MGMT; prMsduInfo->ucPacketType = HIF_TX_PACKET_TYPE_MGMT; if(prStaRec) { prMsduInfo->ucStaRecIndex = prStaRec->ucIndex; } else { prMsduInfo->ucStaRecIndex = STA_REC_INDEX_NOT_FOUND; //false Auth frame } prMsduInfo->ucNetworkType = (UINT_8)eNetTypeIndex; prMsduInfo->ucMacHeaderLength = WLAN_MAC_MGMT_HEADER_LEN; prMsduInfo->fgIs802_1x = FALSE; prMsduInfo->fgIs802_11 = TRUE; prMsduInfo->u2FrameLength = WLAN_MAC_MGMT_HEADER_LEN + u2PayloadLen; prMsduInfo->ucTxSeqNum = nicIncreaseTxSeqNum(prAdapter); prMsduInfo->pfTxDoneHandler = pfTxDoneHandler; prMsduInfo->fgIsBasicRate = TRUE; //4 <4> Compose IEs in MSDU_INFO_T for (i = 0; i < sizeof(txAuthIETable)/sizeof(APPEND_IE_ENTRY_T); i++) { if (txAuthIETable[i].pfnAppendIE) { txAuthIETable[i].pfnAppendIE(prAdapter, prMsduInfo); } } /* TODO(Kevin): Also release the unused tail room of the composed MMPDU */ //4 <6> Inform TXM to send this Authentication frame. nicTxEnqueueMsdu(prAdapter, prMsduInfo); return WLAN_STATUS_SUCCESS; } /* end of authSendAuthFrame() */ #endif /* CFG_SUPPORT_AAA */ /*----------------------------------------------------------------------------*/ /*! * @brief This function will strictly check the TX Authentication frame for SAA/AAA event * handling. * * @param[in] prMsduInfo Pointer of MSDU_INFO_T * @param[in] u2TransactionSeqNum Transaction Sequence Number * * @retval WLAN_STATUS_FAILURE This is not the frame we should handle at current state. * @retval WLAN_STATUS_SUCCESS This is the frame we should handle. */ /*----------------------------------------------------------------------------*/ WLAN_STATUS authCheckTxAuthFrame ( IN P_ADAPTER_T prAdapter, IN P_MSDU_INFO_T prMsduInfo, IN UINT_16 u2TransactionSeqNum ) { P_WLAN_AUTH_FRAME_T prAuthFrame; P_STA_RECORD_T prStaRec; UINT_16 u2TxFrameCtrl; UINT_16 u2TxAuthAlgNum; UINT_16 u2TxTransactionSeqNum; ASSERT(prMsduInfo); prAuthFrame = (P_WLAN_AUTH_FRAME_T)(prMsduInfo->prPacket); ASSERT(prAuthFrame); prStaRec = cnmGetStaRecByIndex(prAdapter, prMsduInfo->ucStaRecIndex); ASSERT(prStaRec); if(!prStaRec) { return WLAN_STATUS_INVALID_PACKET; } //WLAN_GET_FIELD_16(&prAuthFrame->u2FrameCtrl, &u2TxFrameCtrl) u2TxFrameCtrl = prAuthFrame->u2FrameCtrl; // NOTE(Kevin): Optimized for ARM u2TxFrameCtrl &= MASK_FRAME_TYPE; if (u2TxFrameCtrl != MAC_FRAME_AUTH) { return WLAN_STATUS_FAILURE; } //WLAN_GET_FIELD_16(&prAuthFrame->u2AuthAlgNum, &u2TxAuthAlgNum) u2TxAuthAlgNum = prAuthFrame->u2AuthAlgNum; // NOTE(Kevin): Optimized for ARM if (u2TxAuthAlgNum != (UINT_16)(prStaRec->ucAuthAlgNum)) { return WLAN_STATUS_FAILURE; } //WLAN_GET_FIELD_16(&prAuthFrame->u2AuthTransSeqNo, &u2TxTransactionSeqNum) u2TxTransactionSeqNum = prAuthFrame->u2AuthTransSeqNo; // NOTE(Kevin): Optimized for ARM if (u2TxTransactionSeqNum != u2TransactionSeqNum) { return WLAN_STATUS_FAILURE; } return WLAN_STATUS_SUCCESS; } /* end of authCheckTxAuthFrame() */ /*----------------------------------------------------------------------------*/ /*! * @brief This function will check the incoming Auth Frame's Transaction Sequence * Number before delivering it to the corresponding SAA or AAA Module. * * @param[in] prSwRfb Pointer to the SW_RFB_T structure. * * @retval WLAN_STATUS_SUCCESS Always not retain authentication frames */ /*----------------------------------------------------------------------------*/ WLAN_STATUS authCheckRxAuthFrameTransSeq ( IN P_ADAPTER_T prAdapter, IN P_SW_RFB_T prSwRfb ) { P_WLAN_AUTH_FRAME_T prAuthFrame; UINT_16 u2RxTransactionSeqNum; ASSERT(prSwRfb); //4 <1> locate the Authentication Frame. prAuthFrame = (P_WLAN_AUTH_FRAME_T) prSwRfb->pvHeader; //4 <2> Parse the Header of Authentication Frame. if ((prSwRfb->u2PacketLen - prSwRfb->u2HeaderLen) < (AUTH_ALGORITHM_NUM_FIELD_LEN + AUTH_TRANSACTION_SEQENCE_NUM_FIELD_LEN + STATUS_CODE_FIELD_LEN)) { ASSERT(0); return WLAN_STATUS_SUCCESS; } //4 <3> Parse the Fixed Fields of Authentication Frame Body. //WLAN_GET_FIELD_16(&prAuthFrame->u2AuthTransSeqNo, &u2RxTransactionSeqNum); u2RxTransactionSeqNum = prAuthFrame->u2AuthTransSeqNo; // NOTE(Kevin): Optimized for ARM switch (u2RxTransactionSeqNum) { case AUTH_TRANSACTION_SEQ_2: case AUTH_TRANSACTION_SEQ_4: saaFsmRunEventRxAuth(prAdapter, prSwRfb); break; case AUTH_TRANSACTION_SEQ_1: case AUTH_TRANSACTION_SEQ_3: #if CFG_SUPPORT_AAA aaaFsmRunEventRxAuth(prAdapter, prSwRfb); #endif /* CFG_SUPPORT_AAA */ break; default: DBGLOG(SAA, WARN, ("Strange Authentication Packet: Auth Trans Seq No = %d, Error Status Code = %d\n", u2RxTransactionSeqNum, prAuthFrame->u2StatusCode)); break; } return WLAN_STATUS_SUCCESS; } /* end of authCheckRxAuthFrameTransSeq() */ /*----------------------------------------------------------------------------*/ /*! * @brief This function will validate the incoming Authentication Frame and take * the status code out. * * @param[in] prSwRfb Pointer to SW RFB data structure. * @param[in] u2TransactionSeqNum Transaction Sequence Number * @param[out] pu2StatusCode Pointer to store the Status Code from Authentication. * * @retval WLAN_STATUS_FAILURE This is not the frame we should handle at current state. * @retval WLAN_STATUS_SUCCESS This is the frame we should handle. */ /*----------------------------------------------------------------------------*/ WLAN_STATUS authCheckRxAuthFrameStatus ( IN P_ADAPTER_T prAdapter, IN P_SW_RFB_T prSwRfb, IN UINT_16 u2TransactionSeqNum, OUT PUINT_16 pu2StatusCode ) { P_STA_RECORD_T prStaRec; P_WLAN_AUTH_FRAME_T prAuthFrame; UINT_16 u2RxAuthAlgNum; UINT_16 u2RxTransactionSeqNum; //UINT_16 u2RxStatusCode; // NOTE(Kevin): Optimized for ARM ASSERT(prSwRfb); ASSERT(pu2StatusCode); prStaRec = cnmGetStaRecByIndex(prAdapter, prSwRfb->ucStaRecIdx); ASSERT(prStaRec); if(!prStaRec) { return WLAN_STATUS_INVALID_PACKET; } //4 <1> locate the Authentication Frame. prAuthFrame = (P_WLAN_AUTH_FRAME_T) prSwRfb->pvHeader; //4 <2> Parse the Fixed Fields of Authentication Frame Body. //WLAN_GET_FIELD_16(&prAuthFrame->u2AuthAlgNum, &u2RxAuthAlgNum); u2RxAuthAlgNum = prAuthFrame->u2AuthAlgNum; // NOTE(Kevin): Optimized for ARM if (u2RxAuthAlgNum != (UINT_16)prStaRec->ucAuthAlgNum) { DBGLOG(SAA, LOUD, ("Discard Auth frame with auth type = %d, current = %d\n", u2RxAuthAlgNum, prStaRec->ucAuthAlgNum)); return WLAN_STATUS_FAILURE; } //WLAN_GET_FIELD_16(&prAuthFrame->u2AuthTransSeqNo, &u2RxTransactionSeqNum); u2RxTransactionSeqNum = prAuthFrame->u2AuthTransSeqNo; // NOTE(Kevin): Optimized for ARM if (u2RxTransactionSeqNum != u2TransactionSeqNum) { DBGLOG(SAA, LOUD, ("Discard Auth frame with Transaction Seq No = %d\n", u2RxTransactionSeqNum)); return WLAN_STATUS_FAILURE; } //4 <3> Get the Status code //WLAN_GET_FIELD_16(&prAuthFrame->u2StatusCode, &u2RxStatusCode); //*pu2StatusCode = u2RxStatusCode; *pu2StatusCode = prAuthFrame->u2StatusCode; // NOTE(Kevin): Optimized for ARM return WLAN_STATUS_SUCCESS; } /* end of authCheckRxAuthFrameStatus() */ /*----------------------------------------------------------------------------*/ /*! * @brief This function will handle the Challenge Text IE from the Authentication frame * * @param[in] prSwRfb Pointer to SW RFB data structure. * @param[in] prIEHdr Pointer to start address of IE * * @return (none) */ /*----------------------------------------------------------------------------*/ VOID authHandleIEChallengeText ( P_ADAPTER_T prAdapter, P_SW_RFB_T prSwRfb, P_IE_HDR_T prIEHdr ) { P_WLAN_AUTH_FRAME_T prAuthFrame; P_STA_RECORD_T prStaRec; UINT_16 u2TransactionSeqNum; ASSERT(prSwRfb); ASSERT(prIEHdr); prStaRec = cnmGetStaRecByIndex(prAdapter, prSwRfb->ucStaRecIdx); ASSERT(prStaRec); if(!prStaRec) { return; } /* For Management, frame header and payload are in a continuous buffer */ prAuthFrame = (P_WLAN_AUTH_FRAME_T)prSwRfb->pvHeader; //WLAN_GET_FIELD_16(&prAuthFrame->u2AuthTransSeqNo, &u2TransactionSeqNum) u2TransactionSeqNum = prAuthFrame->u2AuthTransSeqNo; // NOTE(Kevin): Optimized for ARM /* Only consider SEQ_2 for Challenge Text */ if ((u2TransactionSeqNum == AUTH_TRANSACTION_SEQ_2) && (prStaRec->ucAuthAlgNum == AUTH_ALGORITHM_NUM_SHARED_KEY)) { /* Free previous allocated TCM memory */ if (prStaRec->prChallengeText) { ASSERT(0); cnmMemFree(prAdapter, prStaRec->prChallengeText); prStaRec->prChallengeText = (P_IE_CHALLENGE_TEXT_T)NULL; } if ( ( prStaRec->prChallengeText = cnmMemAlloc(prAdapter, RAM_TYPE_MSG, IE_SIZE(prIEHdr)) ) == NULL) { return; } /* Save the Challenge Text from Auth Seq 2 Frame, before sending Auth Seq 3 Frame */ COPY_IE(prStaRec->prChallengeText, prIEHdr); } return; } /* end of authAddIEChallengeText() */ /*----------------------------------------------------------------------------*/ /*! * @brief This function will parse and process the incoming Authentication frame. * * @param[in] prSwRfb Pointer to SW RFB data structure. * * @retval WLAN_STATUS_SUCCESS This is the frame we should handle. */ /*----------------------------------------------------------------------------*/ WLAN_STATUS authProcessRxAuth2_Auth4Frame ( IN P_ADAPTER_T prAdapter, IN P_SW_RFB_T prSwRfb ) { P_WLAN_AUTH_FRAME_T prAuthFrame; PUINT_8 pucIEsBuffer; UINT_16 u2IEsLen; UINT_16 u2Offset; UINT_8 ucIEID; UINT_32 i; ASSERT(prSwRfb); prAuthFrame = (P_WLAN_AUTH_FRAME_T) prSwRfb->pvHeader; pucIEsBuffer = &prAuthFrame->aucInfoElem[0]; u2IEsLen = (prSwRfb->u2PacketLen - prSwRfb->u2HeaderLen) - (AUTH_ALGORITHM_NUM_FIELD_LEN + AUTH_TRANSACTION_SEQENCE_NUM_FIELD_LEN + STATUS_CODE_FIELD_LEN); IE_FOR_EACH(pucIEsBuffer, u2IEsLen, u2Offset) { ucIEID = IE_ID(pucIEsBuffer); for (i = 0; i < (sizeof(rxAuthIETable) / sizeof(HANDLE_IE_ENTRY_T)); i++) { if (ucIEID == rxAuthIETable[i].ucElemID) { rxAuthIETable[i].pfnHandleIE(prAdapter, prSwRfb, (P_IE_HDR_T)pucIEsBuffer); } } } return WLAN_STATUS_SUCCESS; } /* end of authProcessRxAuth2_Auth4Frame() */ /*----------------------------------------------------------------------------*/ /*! * @brief This function will compose the Deauthentication frame * * @param[in] pucBuffer Pointer to the frame buffer. * @param[in] aucPeerMACAddress Given Peer MAC Address. * @param[in] aucMACAddress Given Our MAC Address. * @param[in] u2StatusCode Status Code * * @return (none) */ /*----------------------------------------------------------------------------*/ __KAL_INLINE__ VOID authComposeDeauthFrameHeaderAndFF ( IN PUINT_8 pucBuffer, IN UINT_8 aucPeerMACAddress[], IN UINT_8 aucMACAddress[], IN UINT_8 aucBssid[], IN UINT_16 u2ReasonCode ) { P_WLAN_DEAUTH_FRAME_T prDeauthFrame; UINT_16 u2FrameCtrl; ASSERT(pucBuffer); ASSERT(aucPeerMACAddress); ASSERT(aucMACAddress); ASSERT(aucBssid); prDeauthFrame = (P_WLAN_DEAUTH_FRAME_T)pucBuffer; //4 <1> Compose the frame header of the Deauthentication frame. /* Fill the Frame Control field. */ u2FrameCtrl = MAC_FRAME_DEAUTH; //WLAN_SET_FIELD_16(&prDeauthFrame->u2FrameCtrl, u2FrameCtrl); prDeauthFrame->u2FrameCtrl = u2FrameCtrl; // NOTE(Kevin): Optimized for ARM /* Fill the DA field with Target BSSID. */ COPY_MAC_ADDR(prDeauthFrame->aucDestAddr, aucPeerMACAddress); /* Fill the SA field with our MAC Address. */ COPY_MAC_ADDR(prDeauthFrame->aucSrcAddr, aucMACAddress); /* Fill the BSSID field with Target BSSID. */ COPY_MAC_ADDR(prDeauthFrame->aucBSSID, aucBssid); /* Clear the SEQ/FRAG_NO field(HW won't overide the FRAG_NO, so we need to clear it). */ prDeauthFrame->u2SeqCtrl = 0; //4 <2> Compose the frame body's fixed field part of the Authentication frame. /* Fill the Status Code field. */ //WLAN_SET_FIELD_16(&prDeauthFrame->u2ReasonCode, u2ReasonCode); prDeauthFrame->u2ReasonCode = u2ReasonCode; // NOTE(Kevin): Optimized for ARM return; } /* end of authComposeDeauthFrameHeaderAndFF() */ /*----------------------------------------------------------------------------*/ /*! * @brief This function will send the Deauthenticiation frame * * @param[in] prStaRec Pointer to the STA_RECORD_T * @param[in] prClassErrSwRfb Pointer to the SW_RFB_T which is Class Error. * @param[in] u2ReasonCode A reason code to indicate why to leave BSS. * @param[in] pfTxDoneHandler TX Done call back function * * @retval WLAN_STATUS_RESOURCES No available resource for frame composing. * @retval WLAN_STATUS_SUCCESS Successfully send frame to TX Module * @retval WLAN_STATUS_FAILURE Didn't send Deauth frame for various reasons. */ /*----------------------------------------------------------------------------*/ WLAN_STATUS authSendDeauthFrame ( IN P_ADAPTER_T prAdapter, IN P_STA_RECORD_T prStaRec, IN P_SW_RFB_T prClassErrSwRfb, IN UINT_16 u2ReasonCode, IN PFN_TX_DONE_HANDLER pfTxDoneHandler ) { P_WLAN_MAC_HEADER_A4_T prWlanMacHeader = NULL; PUINT_8 pucReceiveAddr; PUINT_8 pucTransmitAddr; PUINT_8 pucBssid = NULL; ENUM_NETWORK_TYPE_INDEX_T eNetTypeIndex = NETWORK_TYPE_AIS_INDEX; P_MSDU_INFO_T prMsduInfo; UINT_16 u2EstimatedFrameLen; UINT_16 u2RxFrameCtrl; P_BSS_INFO_T prBssInfo; P_DEAUTH_INFO_T prDeauthInfo; OS_SYSTIME rCurrentTime; INT_32 i4NewEntryIndex, i; UINT_8 ucStaRecIdx = STA_REC_INDEX_NOT_FOUND; #if CFG_ENABLE_WIFI_DIRECT UINT_8 aucBMC[] = BC_MAC_ADDR; #endif /* NOTE(Kevin): The best way to reply the Deauth is according to the incoming data * frame */ //4 <1> Find the Receiver Address first. if (prClassErrSwRfb) { BOOLEAN fgIsAbleToSendDeauth = FALSE; prWlanMacHeader = (P_WLAN_MAC_HEADER_A4_T) prClassErrSwRfb->pvHeader; //WLAN_GET_FIELD_16(&prWlanMacHeader->u2FrameCtrl, &u2RxFrameCtrl); u2RxFrameCtrl = prWlanMacHeader->u2FrameCtrl; // NOTE(Kevin): Optimized for ARM /* TODO(Kevin): Currently we won't send Deauth for IBSS node. How about DLS ? */ if ((prWlanMacHeader->u2FrameCtrl & MASK_TO_DS_FROM_DS) == 0) { return WLAN_STATUS_FAILURE; } /* Check if corresponding BSS is able to send Deauth */ for (i = NETWORK_TYPE_AIS_INDEX; i < NETWORK_TYPE_INDEX_NUM; i++) { prBssInfo = &(prAdapter->rWifiVar.arBssInfo[i]); if (IS_NET_ACTIVE(prAdapter, i) && (EQUAL_MAC_ADDR(prWlanMacHeader->aucAddr1, prBssInfo->aucOwnMacAddr))) { { fgIsAbleToSendDeauth = TRUE; eNetTypeIndex = (ENUM_NETWORK_TYPE_INDEX_T)i; break; } } } if (!fgIsAbleToSendDeauth) { return WLAN_STATUS_FAILURE; } pucReceiveAddr = prWlanMacHeader->aucAddr2; } else if (prStaRec) { pucReceiveAddr = prStaRec->aucMacAddr; } else { #if CFG_ENABLE_WIFI_DIRECT pucReceiveAddr = aucBMC; #else return WLAN_STATUS_FAILURE; #endif } //4 <2> Check if already send a Deauth frame in MIN_DEAUTH_INTERVAL_MSEC GET_CURRENT_SYSTIME(&rCurrentTime); i4NewEntryIndex = -1; for (i = 0; i < MAX_DEAUTH_INFO_COUNT; i++) { prDeauthInfo = &(prAdapter->rWifiVar.arDeauthInfo[i]); /* For continuously sending Deauth frame, the minimum interval is * MIN_DEAUTH_INTERVAL_MSEC. */ if (CHECK_FOR_TIMEOUT(rCurrentTime, prDeauthInfo->rLastSendTime, MSEC_TO_SYSTIME(MIN_DEAUTH_INTERVAL_MSEC))) { i4NewEntryIndex = i; } else if (EQUAL_MAC_ADDR(pucReceiveAddr, prDeauthInfo->aucRxAddr) && (!pfTxDoneHandler)) { return WLAN_STATUS_FAILURE; } } //4 <3> Update information. if (i4NewEntryIndex > 0) { prDeauthInfo = &(prAdapter->rWifiVar.arDeauthInfo[i4NewEntryIndex]); COPY_MAC_ADDR(prDeauthInfo->aucRxAddr, pucReceiveAddr); prDeauthInfo->rLastSendTime = rCurrentTime; } else { /* NOTE(Kevin): for the case of AP mode, we may encounter this case * if deauth all the associated clients. */ DBGLOG(SAA, WARN, ("No unused DEAUTH_INFO_T !\n")); } //4 <4> Allocate a PKT_INFO_T for Deauthentication Frame /* Init with MGMT Header Length + Length of Fixed Fields + IE Length */ u2EstimatedFrameLen = (MAC_TX_RESERVED_FIELD + WLAN_MAC_MGMT_HEADER_LEN + REASON_CODE_FIELD_LEN); /* Allocate a MSDU_INFO_T */ if ( (prMsduInfo = cnmMgtPktAlloc(prAdapter, u2EstimatedFrameLen)) == NULL) { DBGLOG(SAA, WARN, ("No PKT_INFO_T for sending Deauth Request.\n")); return WLAN_STATUS_RESOURCES; } //4 <5> Find the Transmitter Address and BSSID. if (prClassErrSwRfb) { /* The TA of Deauth is the A1 of RX frame */ pucTransmitAddr = prWlanMacHeader->aucAddr1; switch (prWlanMacHeader->u2FrameCtrl & MASK_TO_DS_FROM_DS) { case MASK_FC_FROM_DS: /* The BSSID of Deauth is the A2 of RX frame */ pucBssid = prWlanMacHeader->aucAddr2; break; case MASK_FC_TO_DS: /* The BSSID of Deauth is the A1 of RX frame */ pucBssid = prWlanMacHeader->aucAddr1; break; case MASK_TO_DS_FROM_DS: /* TODO(Kevin): Consider BOW, now we set the BSSID of Deauth * to the A2 of RX frame for temporary solution. */ pucBssid = prWlanMacHeader->aucAddr2; break; /* No Default */ } } else if (prStaRec) { eNetTypeIndex = prStaRec->ucNetTypeIndex; prBssInfo = &(prAdapter->rWifiVar.arBssInfo[eNetTypeIndex]); pucTransmitAddr = prBssInfo->aucOwnMacAddr; pucBssid = prBssInfo->aucBSSID; } #if CFG_ENABLE_WIFI_DIRECT else { if (prAdapter->fgIsP2PRegistered) { prBssInfo = &(prAdapter->rWifiVar.arBssInfo[NETWORK_TYPE_P2P_INDEX]); ucStaRecIdx = STA_REC_INDEX_BMCAST; pucTransmitAddr = prBssInfo->aucOwnMacAddr; pucBssid = prBssInfo->aucBSSID; eNetTypeIndex = NETWORK_TYPE_P2P_INDEX; } else { /* 20130122: free packet by samplin */ cnmMgtPktFree(prAdapter, prMsduInfo); return WLAN_STATUS_FAILURE; } } #endif //4 <6> compose Deauthentication frame header and some fixed fields */ authComposeDeauthFrameHeaderAndFF( (PUINT_8)((UINT_32)(prMsduInfo->prPacket) + MAC_TX_RESERVED_FIELD), pucReceiveAddr, pucTransmitAddr, pucBssid, u2ReasonCode); #if CFG_SUPPORT_802_11W if (rsnCheckBipKeyInstalled(prAdapter, prStaRec)) { P_WLAN_DEAUTH_FRAME_T prDeauthFrame; prDeauthFrame = (P_WLAN_DEAUTH_FRAME_T)(PUINT_8)((UINT_32)(prMsduInfo->prPacket) + MAC_TX_RESERVED_FIELD); prDeauthFrame->u2FrameCtrl |= MASK_FC_PROTECTED_FRAME; DBGLOG(TX, WARN, ("authSendDeauthFrame with protection\n")); } #endif //4 <7> Update information of MSDU_INFO_T prMsduInfo->eSrc = TX_PACKET_MGMT; prMsduInfo->ucPacketType = HIF_TX_PACKET_TYPE_MGMT; prMsduInfo->ucStaRecIndex = ((prStaRec == NULL)?ucStaRecIdx:prStaRec->ucIndex); prMsduInfo->ucNetworkType = (UINT_8)eNetTypeIndex; prMsduInfo->ucMacHeaderLength = WLAN_MAC_MGMT_HEADER_LEN; prMsduInfo->fgIs802_1x = FALSE; prMsduInfo->fgIs802_11 = TRUE; prMsduInfo->u2FrameLength = WLAN_MAC_MGMT_HEADER_LEN + REASON_CODE_FIELD_LEN; prMsduInfo->ucTxSeqNum = nicIncreaseTxSeqNum(prAdapter); prMsduInfo->pfTxDoneHandler = pfTxDoneHandler; prMsduInfo->fgIsBasicRate = TRUE; //4 <8> Inform TXM to send this Deauthentication frame. nicTxEnqueueMsdu(prAdapter, prMsduInfo); return WLAN_STATUS_SUCCESS; } /* end of authSendDeauthFrame() */ /*----------------------------------------------------------------------------*/ /*! * @brief This function will parse and process the incoming Deauthentication frame * if the given BSSID is matched. * * @param[in] prSwRfb Pointer to SW RFB data structure. * @param[in] aucBSSID Given BSSID * @param[out] pu2ReasonCode Pointer to store the Reason Code from Deauthentication. * * @retval WLAN_STATUS_FAILURE This is not the frame we should handle at current state. * @retval WLAN_STATUS_SUCCESS This is the frame we should handle. */ /*----------------------------------------------------------------------------*/ WLAN_STATUS authProcessRxDeauthFrame ( IN P_SW_RFB_T prSwRfb, IN UINT_8 aucBSSID[], OUT PUINT_16 pu2ReasonCode ) { P_WLAN_DEAUTH_FRAME_T prDeauthFrame; UINT_16 u2RxReasonCode; ASSERT(prSwRfb); ASSERT(aucBSSID); ASSERT(pu2ReasonCode); //4 <1> locate the Deauthentication Frame. prDeauthFrame = (P_WLAN_DEAUTH_FRAME_T) prSwRfb->pvHeader; //4 <2> Parse the Header of Deauthentication Frame. #if 0 // Kevin: Seems redundant WLAN_GET_FIELD_16(&prDeauthFrame->u2FrameCtrl, &u2RxFrameCtrl) u2RxFrameCtrl &= MASK_FRAME_TYPE; if (u2RxFrameCtrl != MAC_FRAME_DEAUTH) { return WLAN_STATUS_FAILURE; } #endif if ((prSwRfb->u2PacketLen - prSwRfb->u2HeaderLen) < REASON_CODE_FIELD_LEN) { ASSERT(0); return WLAN_STATUS_FAILURE; } /* Check if this Deauth Frame is coming from Target BSSID */ if (UNEQUAL_MAC_ADDR(prDeauthFrame->aucBSSID, aucBSSID)) { DBGLOG(SAA, LOUD, ("Ignore Deauth Frame from other BSS ["MACSTR"]\n", MAC2STR(prDeauthFrame->aucSrcAddr))); return WLAN_STATUS_FAILURE; } //4 <3> Parse the Fixed Fields of Deauthentication Frame Body. WLAN_GET_FIELD_16(&prDeauthFrame->u2ReasonCode, &u2RxReasonCode); *pu2ReasonCode = u2RxReasonCode; return WLAN_STATUS_SUCCESS; } /* end of authProcessRxDeauthFrame() */ /*----------------------------------------------------------------------------*/ /*! * @brief This function will parse and process the incoming Authentication frame. * * @param[in] prSwRfb Pointer to SW RFB data structure. * @param[in] aucExpectedBSSID Given Expected BSSID. * @param[in] u2ExpectedAuthAlgNum Given Expected Authentication Algorithm Number * @param[in] u2ExpectedTransSeqNum Given Expected Transaction Sequence Number. * @param[out] pu2ReturnStatusCode Return Status Code. * * @retval WLAN_STATUS_SUCCESS This is the frame we should handle. * @retval WLAN_STATUS_FAILURE The frame we will ignore. */ /*----------------------------------------------------------------------------*/ WLAN_STATUS authProcessRxAuth1Frame ( IN P_ADAPTER_T prAdapter, IN P_SW_RFB_T prSwRfb, IN UINT_8 aucExpectedBSSID[], IN UINT_16 u2ExpectedAuthAlgNum, IN UINT_16 u2ExpectedTransSeqNum, OUT PUINT_16 pu2ReturnStatusCode ) { P_WLAN_AUTH_FRAME_T prAuthFrame; UINT_16 u2ReturnStatusCode = STATUS_CODE_SUCCESSFUL; ASSERT(prSwRfb); ASSERT(aucExpectedBSSID); ASSERT(pu2ReturnStatusCode); //4 <1> locate the Authentication Frame. prAuthFrame = (P_WLAN_AUTH_FRAME_T) prSwRfb->pvHeader; //4 <2> Check the BSSID if (UNEQUAL_MAC_ADDR(prAuthFrame->aucBSSID, aucExpectedBSSID)) { return WLAN_STATUS_FAILURE; /* Just Ignore this MMPDU */ } //4 <3> Parse the Fixed Fields of Authentication Frame Body. if (prAuthFrame->u2AuthAlgNum != u2ExpectedAuthAlgNum) { u2ReturnStatusCode = STATUS_CODE_AUTH_ALGORITHM_NOT_SUPPORTED; } if (prAuthFrame->u2AuthTransSeqNo != u2ExpectedTransSeqNum) { u2ReturnStatusCode = STATUS_CODE_AUTH_OUT_OF_SEQ; } *pu2ReturnStatusCode = u2ReturnStatusCode; return WLAN_STATUS_SUCCESS; } /* end of authProcessRxAuth1Frame() */
gpl-2.0
ashikrobi/Crabbykernel
drivers/video/msm/samsung_cmc624-8960.c
64
51661
/* * Copyright (C) 2011, Samsung Electronics. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. */ #define DEBUG /* uncomment if you want debugging output */ #include <linux/delay.h> #include <asm/gpio.h> #include <mach/gpio.h> #include "msm_fb.h" //#include <linux/regulator/gpio-regulator.h> #include <linux/regulator/consumer.h> #include <linux/mfd/pm8xxx/pm8921.h> #include "samsung_cmc624-8960.h" #define PM8921_GPIO_BASE NR_GPIO_IRQS #define PM8921_GPIO_PM_TO_SYS(pm_gpio) (pm_gpio - 1 + PM8921_GPIO_BASE) /* * V_IMA_1.8V VREG_L3A * DISPLAY_3.3V VREG_L8A * V_IMA_1.2V GPIO LCD_PWR_EN * LCD_VOUT GPIO LCD_PWR_EN */ /* static struct regulator *v_ima_1_8v; static struct regulator *display_3_3v; */ #define LCD_PWR_EN 70 struct cmc624_data { struct i2c_client *client; }; static struct cmc624_data *p_cmc624_data; static struct i2c_client *g_client; #define I2C_M_WR 0 /* for i2c */ #define I2c_M_RD 1 /* for i2c */ unsigned long last_cmc624_Bank = 0xffff; unsigned long last_cmc624_Algorithm = 0xffff; static struct cmc624RegisterSet cmc624_TuneSeq[CMC624_MAX_SETTINGS]; static int cmc624_TuneSeqLen; #define CMC624_BRIGHTNESS_MAX_LEVEL 1600 struct cmc624_state_type cmc624_state = { .cabc_mode = CABC_OFF_MODE, .brightness = 42, .suspended = 0, .scenario = mDNIe_UI_MODE, .browser_scenario = COLOR_TONE_1, .background = STANDARD_MODE, .temperature = TEMP_STANDARD, .outdoor = OUTDOOR_OFF_MODE, .sub_tune = NULL, .main_tune = NULL, .negative = NEGATIVE_OFF_MODE, }; u16 gIDs[3] = { 0, }; boolean video_mode; static DEFINE_MUTEX(tuning_mutex); /* * functions for I2C transactions */ unsigned char cmc624_Power_LUT[NUM_POWER_LUT][NUM_ITEM_POWER_LUT] = { {0x42, 0x47, 0x3E, 0x52, 0x42, 0x3F, 0x3A, 0x37, 0x3F}, {0x38, 0x3d, 0x34, 0x48, 0x38, 0x35, 0x30, 0x2d, 0x35}, }; static int is_cmc624_on; /* ############################################################### * # * # TUNE VALUE * # * ############################################################### */ unsigned char cmc624_default_plut[NUM_ITEM_POWER_LUT] = { 0x42, 0x47, 0x3E, 0x52, 0x42, 0x3F, 0x3A, 0x37, 0x3F }; unsigned char cmc624_video_plut[NUM_ITEM_POWER_LUT] = { 0x38, 0x3d, 0x34, 0x48, 0x38, 0x35, 0x30, 0x2d, 0x35 }; const struct str_sub_tuning sub_tune_value[MAX_TEMP_MODE][MAX_OUTDOOR_MODE] = { { { .value[CABC_OFF_MODE] = {.name = "STANDARD, OUTDOOR:OFF, CABC:OFF", .value = NULL, .size = 0}, .value[CABC_ON_MODE] = {.name = "STANDARD, OUTDOOR:OFF, CABC:ON", .value = NULL, .size = 0} }, { .value[CABC_OFF_MODE] = {.name = "STANDARD, OUTDOOR:ON, CABC:OFF", .value = ove_cabcoff, .size = ARRAY_SIZE(ove_cabcoff)}, .value[CABC_ON_MODE] = {.name = "STANDARD, OUTDOOR:ON, CABC:ON", .value = ove_cabcoff, .size = ARRAY_SIZE(ove_cabcoff)} } }, { { .value[CABC_OFF_MODE] = {.name = "WARM, OUTDOOR:OFF, CABC:OFF", .value = warm_cabcoff, .size = ARRAY_SIZE(warm_cabcoff)}, .value[CABC_ON_MODE] = {.name = "WARM, OUTDOOR:OFF, CABC:ON", .value = warm_cabcoff, .size = ARRAY_SIZE(warm_cabcoff)} }, { .value[CABC_OFF_MODE] = {.name = "WARM, OUTDOOR:ON, CABC:OFF", .value = warm_ove_cabcoff, .size = ARRAY_SIZE(warm_ove_cabcoff)}, .value[CABC_ON_MODE] = {.name = "WARM, OUTDOOR:ON, CABC:ON", .value = warm_ove_cabcoff, .size = ARRAY_SIZE(warm_ove_cabcoff)} } }, { { .value[CABC_OFF_MODE] = {.name = "COLD, OUTDOOR:OFF, CABC:OFF", .value = cold_cabcoff, .size = ARRAY_SIZE(cold_cabcoff)}, .value[CABC_ON_MODE] = {.name = "COLD, OUTDOOR:OFF, CABC:ON", .value = cold_cabcoff, .size = ARRAY_SIZE(cold_cabcoff)} }, { .value[CABC_OFF_MODE] = {.name = "COLD, OUTDOOR:ON, CABC:OFF", .value = cold_ove_cabcoff, .size = ARRAY_SIZE(cold_ove_cabcoff)}, .value[CABC_ON_MODE] = {.name = "COLD, OUTDOOR:ON, CABC:ON", .value = cold_ove_cabcoff, .size = ARRAY_SIZE(cold_ove_cabcoff)} } }, }; const struct str_main_tuning tune_value[MAX_BACKGROUND_MODE][MAX_mDNIe_MODE] = { {{.value[CABC_OFF_MODE] = { .name = "DYN_UI_OFF", .flag = 0, .tune = dynamic_ui_cabcoff, .plut = NULL, .size = ARRAY_SIZE(dynamic_ui_cabcoff)}, .value[CABC_ON_MODE] = {.name = "DYN_UI_ON", .flag = 0, .tune = dynamic_ui_cabcoff, .plut = NULL, .size = ARRAY_SIZE(dynamic_ui_cabcoff)} }, {.value[CABC_OFF_MODE] = { .name = "DYN_VIDEO_OFF", .flag = 0, .tune = dynamic_video_cabcoff, .plut = cmc624_video_plut, .size = ARRAY_SIZE(dynamic_video_cabcoff)}, .value[CABC_ON_MODE] = {.name = "DYN_VIDEO_ON", .flag = 0, .tune = dynamic_video_cabcoff, .plut = cmc624_video_plut, .size = ARRAY_SIZE(dynamic_video_cabcoff)} }, {.value[CABC_OFF_MODE] = { .name = "DYN_VIDEO_W_OFF", .flag = 0, .tune = dynamic_video_cabcoff, .plut = cmc624_video_plut, .size = ARRAY_SIZE(dynamic_video_cabcoff)}, .value[CABC_ON_MODE] = { .name = "DYN_VIDEO_W_ON", .flag = 0, .tune = dynamic_video_cabcoff, .plut = cmc624_video_plut, .size = ARRAY_SIZE(dynamic_video_cabcoff)} }, {.value[CABC_OFF_MODE] = { .name = "DYN_VIDEO_C_OFF", .flag = 0, .tune = dynamic_video_cabcoff, .plut = cmc624_video_plut, .size = ARRAY_SIZE(dynamic_video_cabcoff)}, .value[CABC_ON_MODE] = {.name = "DYN_VIDEO_C_ON", .flag = 0, .tune = dynamic_video_cabcoff, .plut = cmc624_video_plut, .size = ARRAY_SIZE(dynamic_video_cabcoff)} }, {.value[CABC_OFF_MODE] = { .name = "DYN_CAMERA_OFF", .flag = TUNE_FLAG_CABC_ALWAYS_OFF, .tune = camera_cabcoff, .plut = NULL, .size = ARRAY_SIZE(camera_cabcoff)}, .value[CABC_ON_MODE] = {.name = "DYN_CAMERA_ON", .flag = TUNE_FLAG_CABC_ALWAYS_OFF, .tune = camera_cabcoff, .plut = NULL, .size = ARRAY_SIZE(camera_cabcoff)} }, {.value[CABC_OFF_MODE] = { .name = "DYN_NAVI_OFF", .flag = 0, .tune = NULL, .plut = NULL, .size = 0}, .value[CABC_ON_MODE] = {.name = "DYN_NAVI_ON", .flag = 0, .tune = NULL, .plut = NULL, .size = 0} }, {.value[CABC_OFF_MODE] = { .name = "DYN_GALLERY_OFF", .flag = 0, .tune = dynamic_gallery_cabcoff, .plut = NULL, .size = ARRAY_SIZE(dynamic_gallery_cabcoff)}, .value[CABC_ON_MODE] = {.name = "DYN_GALLERY_ON", .flag = 0, .tune = dynamic_gallery_cabcoff, .plut = NULL, .size = ARRAY_SIZE(dynamic_gallery_cabcoff)} }, {.value[CABC_OFF_MODE] = { .name = "DYN_DMB_OFF", .flag = 0, .tune = dynamic_dmb_cabcoff, .plut = cmc624_video_plut, .size = ARRAY_SIZE(dynamic_dmb_cabcoff)}, .value[CABC_ON_MODE] = {.name = "DYN_DMB_ON", .flag = 0, .tune = dynamic_dmb_cabcoff, .plut = cmc624_video_plut, .size = ARRAY_SIZE(dynamic_dmb_cabcoff)} } }, {{.value[CABC_OFF_MODE] = { .name = "STD_UI_OFF", .flag = 0, .tune = standard_ui_cabcoff, .plut = NULL, .size = ARRAY_SIZE(standard_ui_cabcoff)}, .value[CABC_ON_MODE] = {.name = "STD_UI_ON", .flag = 0, .tune = standard_ui_cabcon, .plut = NULL, .size = ARRAY_SIZE(standard_ui_cabcon)} }, {.value[CABC_OFF_MODE] = { .name = "STD_VIDEO_OFF", .flag = 0, .tune = standard_video_cabcoff, .plut = cmc624_video_plut, .size = ARRAY_SIZE(standard_video_cabcoff)}, .value[CABC_ON_MODE] = {.name = "STD_VIDEO_ON", .flag = 0, .tune = standard_video_cabcon, .plut = cmc624_video_plut, .size = ARRAY_SIZE(standard_video_cabcon)} }, {.value[CABC_OFF_MODE] = { .name = "STD_VIDEO_W_OFF", .flag = 0, .tune = standard_video_cabcoff, .plut = cmc624_video_plut, .size = ARRAY_SIZE(standard_video_cabcoff)}, .value[CABC_ON_MODE] = {.name = "STD_VIDEO_W_ON", .flag = 0, .tune = standard_video_cabcon, .plut = cmc624_video_plut, .size = ARRAY_SIZE(standard_video_cabcon)} }, {.value[CABC_OFF_MODE] = { .name = "STD_VIDEO_C_OFF", .flag = 0, .tune = standard_video_cabcoff, .plut = cmc624_video_plut, .size = ARRAY_SIZE(standard_video_cabcoff)}, .value[CABC_ON_MODE] = {.name = "STD_VIDEO_C_ON", .flag = 0, .tune = standard_video_cabcon, .plut = cmc624_video_plut, .size = ARRAY_SIZE(standard_video_cabcon)} }, {.value[CABC_OFF_MODE] = { .name = "STD_CAMERA_OFF", .flag = TUNE_FLAG_CABC_ALWAYS_OFF, .tune = camera_cabcoff, .plut = NULL, .size = ARRAY_SIZE(camera_cabcoff)}, .value[CABC_ON_MODE] = {.name = "STD_CAMERA_ON", .flag = TUNE_FLAG_CABC_ALWAYS_OFF, .tune = camera_cabcon, .plut = NULL, .size = ARRAY_SIZE(camera_cabcon)} }, {.value[CABC_OFF_MODE] = { .name = "STD_NAVI_OFF", .flag = 0, .tune = NULL, .plut = NULL, .size = 0}, .value[CABC_ON_MODE] = {.name = "STD_NAVI_ON", .flag = 0, .tune = NULL, .plut = NULL, .size = 0} }, {.value[CABC_OFF_MODE] = { .name = "STD_GALLERY_OFF", .flag = 0, .tune = standard_gallery_cabcoff, .plut = NULL, .size = ARRAY_SIZE(standard_gallery_cabcoff)}, .value[CABC_ON_MODE] = {.name = "STD_GALLERY_ON", .flag = 0, .tune = standard_gallery_cabcoff, .plut = NULL, .size = ARRAY_SIZE(standard_gallery_cabcoff)} }, {.value[CABC_OFF_MODE] = { .name = "STD_DMB_OFF", .flag = 0, .tune = standard_dmb_cabcoff, .plut = cmc624_video_plut, .size = ARRAY_SIZE(standard_dmb_cabcoff)}, .value[CABC_ON_MODE] = {.name = "STD_DMB_ON", .flag = 0, .tune = standard_dmb_cabcoff, .plut = cmc624_video_plut, .size = ARRAY_SIZE(standard_dmb_cabcoff)} }, {.value[CABC_OFF_MODE] = { .name = "STD_VTCALL_OFF", .flag = 0, .tune = standard_vtcall_cabcoff, .plut = cmc624_video_plut, .size = ARRAY_SIZE(standard_vtcall_cabcoff)}, .value[CABC_ON_MODE] = {.name = "STD_VTCALL_ON", .flag = 0, .tune = standard_vtcall_cabcoff, .plut = cmc624_video_plut, .size = ARRAY_SIZE(standard_vtcall_cabcoff)} } }, {{.value[CABC_OFF_MODE] = { .name = "NAT_UI_OFF", .flag = 0, .tune = natural_ui_cabcoff, .plut = NULL , .size = ARRAY_SIZE(natural_ui_cabcoff) }, .value[CABC_ON_MODE] = { .name = "NAT_UI_ON", .flag = 0, .tune = natural_ui_cabcoff, .plut = NULL, .size = ARRAY_SIZE(natural_ui_cabcoff) } }, {.value[CABC_OFF_MODE] = { .name = "NAT_VIDEO_OFF", .flag = 0, .tune = natural_video_cabcoff, .plut = cmc624_video_plut, .size = ARRAY_SIZE(natural_video_cabcoff) }, .value[CABC_ON_MODE] = { .name = "NAT_VIDEO_ON", .flag = 0, .tune = natural_video_cabcoff, .plut = cmc624_video_plut , .size = ARRAY_SIZE(natural_video_cabcoff) } }, {.value[CABC_OFF_MODE] = { .name = "NAT_VIDEO_W_OFF", .flag = 0, .tune = natural_video_cabcoff, .plut = cmc624_video_plut , .size = ARRAY_SIZE(natural_video_cabcoff) }, .value[CABC_ON_MODE] = { .name = "NAT_VIDEO_W_ON", .flag = 0, .tune = natural_video_cabcoff, .plut = cmc624_video_plut , .size = ARRAY_SIZE(natural_video_cabcoff) } }, {.value[CABC_OFF_MODE] = { .name = "NAT_VIDEO_C_OFF", .flag = 0, .tune = natural_video_cabcoff, .plut = cmc624_video_plut , .size = ARRAY_SIZE(natural_video_cabcoff) }, .value[CABC_ON_MODE] = { .name = "NAT_VIDEO_C_ON", .flag = 0, .tune = natural_video_cabcoff, .plut = cmc624_video_plut , .size = ARRAY_SIZE(natural_video_cabcoff) } }, {.value[CABC_OFF_MODE] = { .name = "NAT_CAMERA_OFF", .flag = TUNE_FLAG_CABC_ALWAYS_OFF, .tune = camera_cabcoff, .plut = NULL , .size = ARRAY_SIZE(camera_cabcoff)}, .value[CABC_ON_MODE] = { .name = "NAT_CAMERA_ON", .flag = TUNE_FLAG_CABC_ALWAYS_OFF, .tune = camera_cabcoff, .plut = NULL , .size = ARRAY_SIZE(camera_cabcoff)} }, {.value[CABC_OFF_MODE] = { .name = "NAT_NAVI_OFF", .flag = 0, .tune = NULL, .plut = NULL , .size = 0}, .value[CABC_ON_MODE] = { .name = "NAT_NAVI_ON", .flag = 0, .tune = NULL, .plut = NULL , .size = 0} }, {.value[CABC_OFF_MODE] = { .name = "NAT_GALLERY_OFF", .flag = 0, .tune = natural_gallery_cabcoff, .plut = NULL , .size = ARRAY_SIZE(natural_gallery_cabcoff) }, .value[CABC_ON_MODE] = { .name = "NAT_GALLERY_ON", .flag = 0, .tune = natural_gallery_cabcoff, .plut = NULL , .size = ARRAY_SIZE(natural_gallery_cabcoff) } }, {.value[CABC_OFF_MODE] = { .name = "NAT_DMB_OFF", .flag = 0, .tune = natural_dmb_cabcoff, .plut = cmc624_video_plut , .size = ARRAY_SIZE(natural_dmb_cabcoff) }, .value[CABC_ON_MODE] = { .name = "NAT_DMB_ON", .flag = 0, .tune = natural_dmb_cabcoff, .plut = cmc624_video_plut , .size = ARRAY_SIZE(natural_dmb_cabcoff) } } }, {{.value[CABC_OFF_MODE] = { .name = "MOV_UI_OFF", .flag = 0, .tune = movie_ui_cabcoff, .plut = NULL, .size = ARRAY_SIZE(movie_ui_cabcoff)}, .value[CABC_ON_MODE] = {.name = "MOV_UI_ON", .flag = 0, .tune = movie_ui_cabcoff, .plut = NULL, .size = ARRAY_SIZE(movie_ui_cabcoff)} }, {.value[CABC_OFF_MODE] = { .name = "MOV_VIDEO_OFF", .flag = 0, .tune = movie_video_cabcoff, .plut = cmc624_video_plut, .size = ARRAY_SIZE(movie_video_cabcoff)}, .value[CABC_ON_MODE] = {.name = "MOV_VIDEO_ON", .flag = 0, .tune = movie_video_cabcoff, .plut = cmc624_video_plut, .size = ARRAY_SIZE(movie_video_cabcoff)} }, {.value[CABC_OFF_MODE] = { .name = "MOV_VIDEO_W_OFF", .flag = 0, .tune = movie_video_cabcoff, .plut = cmc624_video_plut, .size = ARRAY_SIZE(movie_video_cabcoff)}, .value[CABC_ON_MODE] = {.name = "MOV_VIDEO_W_ON", .flag = 0, .tune = movie_video_cabcoff, .plut = cmc624_video_plut, .size = ARRAY_SIZE(movie_video_cabcoff)} }, {.value[CABC_OFF_MODE] = { .name = "MOV_VIDEO_C_OFF", .flag = 0, .tune = movie_video_cabcoff, .plut = cmc624_video_plut, .size = ARRAY_SIZE(movie_video_cabcoff) }, .value[CABC_ON_MODE] = {.name = "MOV_VIDEO_C_ON", .flag = 0, .tune = movie_video_cabcoff, .plut = cmc624_video_plut, .size = ARRAY_SIZE(movie_video_cabcoff) } }, {.value[CABC_OFF_MODE] = { .name = "MOV_CAMERA_OFF", .flag = TUNE_FLAG_CABC_ALWAYS_OFF, .tune = camera_cabcoff, .plut = NULL, .size = ARRAY_SIZE(camera_cabcoff)}, .value[CABC_ON_MODE] = {.name = "MOV_CAMERA_ON", .flag = TUNE_FLAG_CABC_ALWAYS_OFF, .tune = camera_cabcoff, .plut = NULL, .size = ARRAY_SIZE(camera_cabcoff) } }, {.value[CABC_OFF_MODE] = { .name = "MOV_NAVI_OFF", .flag = 0, .tune = NULL, .plut = NULL, .size = 0}, .value[CABC_ON_MODE] = {.name = "MOV_NAVI_ON", .flag = 0, .tune = NULL, .plut = NULL, .size = 0} }, {.value[CABC_OFF_MODE] = { .name = "MOV_GALLERY_OFF", .flag = 0, .tune = movie_gallery_cabcoff, .plut = NULL, .size = ARRAY_SIZE(movie_gallery_cabcoff) }, .value[CABC_ON_MODE] = { .name = "MOV_GALLERY_ON", .flag = 0, .tune = movie_gallery_cabcoff, .plut = NULL, .size = ARRAY_SIZE(movie_gallery_cabcoff) } }, {.value[CABC_OFF_MODE] = { .name = "MOV_DMB_OFF", .flag = 0, .tune = movie_dmb_cabcoff, .plut = cmc624_video_plut, .size = ARRAY_SIZE(movie_dmb_cabcoff) }, .value[CABC_ON_MODE] = { .name = "MOV_DMB_ON", .flag = 0, .tune = movie_dmb_cabcoff, .plut = cmc624_video_plut, .size = ARRAY_SIZE(movie_dmb_cabcoff) } } } }; const struct str_sub_tuning browser_tune_value[COLOR_TONE_MAX] = { /* browser tone*/ { .value[CABC_OFF_MODE] = {.name = "BROWSER_TONE1,CABC:OFF", .value = browser_tone1_tune, .size = ARRAY_SIZE(browser_tone1_tune)}, .value[CABC_ON_MODE] = {.name = "BROWSER_TONE1,CABC:ON", .value = browser_tone1_tune, .size = ARRAY_SIZE(browser_tone1_tune)} }, { .value[CABC_OFF_MODE] = {.name = "BROWSER_TONE2,CABC:OFF", .value = browser_tone2_tune, .size = ARRAY_SIZE(browser_tone2_tune)}, .value[CABC_ON_MODE] = {.name = "BROWSER_TONE2,CABC:ON", .value = browser_tone2_tune, .size = ARRAY_SIZE(browser_tone2_tune)} }, { .value[CABC_OFF_MODE] = {.name = "BROWSER_TONE3,CABC:OFF", .value = browser_tone3_tune, .size = ARRAY_SIZE(browser_tone3_tune)}, .value[CABC_ON_MODE] = {.name = "BROWSER_TONE3,CABC:ON", .value = browser_tone3_tune, .size = ARRAY_SIZE(browser_tone3_tune)} }, }; const struct str_sub_tuning negative_tune_value = { .value[CABC_OFF_MODE] = {.name = "NEGATIVE_TONE : CABC:OFF", .value = cmc624_tune_tone_reversal, .size = ARRAY_SIZE(cmc624_tune_tone_reversal)}, .value[CABC_ON_MODE] = {.name = "NEGATIVE_TONE : CABC:ON", .value = cmc624_tune_tone_reversal, .size = ARRAY_SIZE(cmc624_tune_tone_reversal)} }; /* end of TUNE VALUE * ########################################################## */ bool cmc624_I2cWrite16(unsigned char Addr, unsigned long Data) { int err = -1000; struct i2c_msg msg[1]; unsigned char data[3]; if (!p_cmc624_data) { pr_info("p_cmc624_data is NULL\n"); return -ENODEV; } if (!is_cmc624_on) { pr_info("cmc624 power down..\n"); return -ENODEV; } g_client = p_cmc624_data->client; if ((g_client == NULL)) { pr_info("cmc624_I2cWrite16 g_client is NULL\n"); return -ENODEV; } if (!g_client->adapter) { pr_info("cmc624_I2cWrite16 g_client->adapter is NULL\n"); return -ENODEV; } #if defined(CONFIG_TARGET_LOCALE_KOR_SKT)\ || defined(CONFIG_TARGET_LOCALE_KOR_LGU) if (Addr == 0x0000) { if (Data == last_cmc624_Bank) return 0; last_cmc624_Bank = Data; } else if (Addr == 0x0001 && last_cmc624_Bank == 0) { last_cmc624_Algorithm = Data; } #endif data[0] = Addr; data[1] = ((Data >> 8) & 0xFF); data[2] = (Data) & 0xFF; msg->addr = g_client->addr; msg->flags = I2C_M_WR; msg->len = 3; msg->buf = data; err = i2c_transfer(g_client->adapter, msg, 1); if (err >= 0) { /*pr_info("%s %d i2c transfer OK\n", __func__, __LINE__); */ return 0; } pr_info("%s i2c transfer error:%d(a:%d)\n", __func__, err, Addr); return err; } int cmc624_I2cRead16(u8 reg, u16 *val) { int err; struct i2c_msg msg[2]; u8 regaddr = reg; u8 data[2]; if (!p_cmc624_data) { pr_err("%s p_cmc624_data is NULL\n", __func__); return -ENODEV; } g_client = p_cmc624_data->client; if ((g_client == NULL)) { pr_err("%s g_client is NULL\n", __func__); return -ENODEV; } if (!g_client->adapter) { pr_err("%s g_client->adapter is NULL\n", __func__); return -ENODEV; } if (regaddr == 0x0001) { *val = last_cmc624_Algorithm; return 0; } msg[0].addr = g_client->addr; msg[0].flags = I2C_M_WR; msg[0].len = 1; msg[0].buf = &regaddr; msg[1].addr = g_client->addr; msg[1].flags = I2C_M_RD; msg[1].len = 2; msg[1].buf = &data[0]; err = i2c_transfer(g_client->adapter, &msg[0], 2); if (err >= 0) { *val = (data[0] << 8) | data[1]; return 0; } /* add by inter.park */ pr_err("%s %d i2c transfer error: %d\n", __func__, __LINE__, err); return err; } /*##################################################### * CMC624 Backlight Control function ##################################################### */ static int cmc624_set_tune_value(const struct cmc624RegisterSet *value, int array_size) { int ret = 0; unsigned int num; unsigned int i = 0; mutex_lock(&tuning_mutex); num = array_size; for (i = 0; i < num; i++) { ret = cmc624_I2cWrite16(value[i].RegAddr, value[i].Data); pr_info("\naddr =%x , data = %d",value[i].RegAddr,value[i].Data); if (ret != 0) { pr_debug ("[CMC624:ERROR]:cmc624_I2cWrite16 failed : %d\n", ret); goto set_error; } } set_error: mutex_unlock(&tuning_mutex); return ret; } #if defined(CONFIG_FB_MSM_MIPI_NOVATEK_VIDEO_WXGA_PT_PANEL) #define DUTY_DIM 13 #define DUTY_MIN 20 #define DUTY_25 120 #define DUTY_DEFAULT 900 #define DUTY_MAX 1450 #define PWM_DUTY_MAX 1 /* Backlight levels */ #define BRIGHTNESS_OFF 0 #define BRIGHTNESS_DIM 20 #define BRIGHTNESS_MIN 30 #define BRIGHTNESS_25 86 #define BRIGHTNESS_DEFAULT 140 #define BRIGHTNESS_MAX 255 /* * CMC624 PWM control */ static struct cmc624RegisterSet pwm_cabcoff[] = { {0x00, 0x0001}, /* BANK 1 */ {0xF8, 0x0011}, /* PWM HIGH ACTIVE, USE REGISTER VALUE */ {0xF9, }, /* PWM Counter */ {0x00, 0x0000}, /* BANK 0 */ {0xFD, 0xFFFF}, /* MODULE REG MASK RELEASE */ {0xFE, 0xFFFF}, /* MODULE REG MASK RELEASE */ {0xFF, 0x0000}, /* MASK RELEASE */ }; static int cmc624_scale_pwm_dutycycle(int level) { int scaled_level = 0; if (level == BRIGHTNESS_OFF) scaled_level = BRIGHTNESS_OFF; else if (level <= BRIGHTNESS_DIM) scaled_level = PWM_DUTY_MAX*DUTY_DIM; else if (level <= BRIGHTNESS_MIN) scaled_level = (level - BRIGHTNESS_DIM) * (PWM_DUTY_MAX * DUTY_MIN - PWM_DUTY_MAX * DUTY_DIM) / (BRIGHTNESS_MIN - BRIGHTNESS_DIM) + PWM_DUTY_MAX * DUTY_DIM; else if (level <= BRIGHTNESS_25) scaled_level = (level - BRIGHTNESS_MIN) * (PWM_DUTY_MAX * DUTY_25 - PWM_DUTY_MAX * DUTY_MIN) / (BRIGHTNESS_25 - BRIGHTNESS_MIN) + PWM_DUTY_MAX * DUTY_MIN; else if (level <= BRIGHTNESS_DEFAULT) scaled_level = (level - BRIGHTNESS_25) * (PWM_DUTY_MAX * DUTY_DEFAULT - PWM_DUTY_MAX * DUTY_25) / (BRIGHTNESS_DEFAULT - BRIGHTNESS_25) + PWM_DUTY_MAX * DUTY_25; else if (level <= BRIGHTNESS_MAX) scaled_level = (level - BRIGHTNESS_DEFAULT) * (PWM_DUTY_MAX * DUTY_MAX - PWM_DUTY_MAX * DUTY_DEFAULT) / (BRIGHTNESS_MAX - BRIGHTNESS_DEFAULT) + PWM_DUTY_MAX * DUTY_DEFAULT; pr_debug("%s: level: %d, scaled_level: %d, proc:%s, pid: %d, tgid:%d\n", __func__, level, scaled_level, current->comm, current->pid, current->tgid); return scaled_level; } int cmc624_set_pwm_backlight( int level) { int ret; /* set pwm counter value for cabc-off pwm */ pwm_cabcoff[2].Data = cmc624_scale_pwm_dutycycle(level); pr_debug("%s: cabc off: intensity=%d, pwm cnt=%d\n", __func__, level, pwm_cabcoff[2].Data); ret = cmc624_set_tune_value(pwm_cabcoff, ARRAY_SIZE(pwm_cabcoff)); return 0; } #endif /* value: 0 ~ 1600*/ int Islcdonoff(void) { /*return 0;*/ return cmc624_state.suspended; /* tmps solution*/ } /*value: 0 ~ 100*/ static void cmc624_manual_pwm_brightness(int value) { pr_debug("[CMC624:INFO]: %s value : %d\n", __func__, value); return; } /* value: 0 ~ 1600*/ void cmc624_pwm_brightness(int value) { pr_debug("[CMC624:Info] : %s : value : %d\n", __func__, value); return; } void cmc624_pwm_brightness_bypass(int value) { int data; pr_debug("%s : BL brightness level = %d\n", __func__, value); if (value < 0) data = 0; else if (value > 1600) data = 1600; else data = value; if (data < 16) data = 1; /* Range of data 0~1600, min value 0~15 is same as 0 */ else data = data >> 4; cmc624_state.brightness = data; cmc624_manual_pwm_brightness(data); } #define VALUE_FOR_1600 16 void set_backlight_pwm(int level) { mutex_lock(&tuning_mutex); if (!Islcdonoff()) { if (cmc624_state.main_tune == NULL) { /* * struct cmc624_state_type cmc624_state = { * .cabc_mode = CABC_ON_MODE, * .brightness = 42, * .suspended = 0, * .scenario = mDNIe_UI_MODE, * .background = STANDARD_MODE, * .temperature = TEMP_STANDARD, * .outdoor = OUTDOOR_OFF_MODE, * .sub_tune = NULL, * .main_tune = NULL, *}; */ pr_info ("===================================\n"); pr_info ("[CMC624] cmc624_state.main_tune is NULL?...\n"); pr_info("[CMC624] DUMP :\n"); pr_info("[CMC624] cabc_mode : %d\n", cmc624_state.cabc_mode); pr_info("[CMC624] brightness : %d\n", cmc624_state.brightness); pr_info("[CMC624] suspended : %d\n", cmc624_state.suspended); pr_info("[CMC624] scenario : %d\n", cmc624_state.scenario); pr_info("[CMC624] background : %d\n", cmc624_state.background); pr_info("[CMC624] temperature : %d\n", cmc624_state.temperature); pr_info("[CMC624] outdoor : %d\n", cmc624_state.outdoor); pr_info ("===================================\n"); mutex_unlock(&tuning_mutex); return; } if (cmc624_state.cabc_mode == CABC_OFF_MODE || (cmc624_state.main_tune->flag & TUNE_FLAG_CABC_ALWAYS_OFF)) cmc624_manual_pwm_brightness(level); else cmc624_pwm_brightness(level * VALUE_FOR_1600); } mutex_unlock(&tuning_mutex); } /*##################################################### * CMC624 common function * * void bypass_onoff_ctrl(int value); * void cabc_onoff_ctrl(int value); * int set_mdnie_scenario_mode(unsigned int mode); * int load_tuning_data(char *filename); * int apply_main_tune_value(enum eLcd_mDNIe_UI ui, * enum eBackground_Mode bg, enum eCabc_Mode cabc, int force); * int apply_sub_tune_value(enum eCurrent_Temp temp, * enum eOutdoor_Mode ove, enum eCabc_Mode cabc, int force); ##################################################### */ void bypass_onoff_ctrl(int value) { pr_debug("[CMC624:Info] : %s : value : %d\n", __func__, value); return; } void cabc_onoff_ctrl(int value) { if (apply_main_tune_value (cmc624_state.scenario, cmc624_state.background, value, 0) != 0) { pr_debug("[CMC624:ERROR]:%s: apply main tune value faile\n", __func__); return; } } static struct cmc624RegisterSet cmc624_TuneSeq[CMC624_MAX_SETTINGS]; static int parse_text(char *src, int len) { int i, count, ret; int index = 0; char *str_line[CMC624_MAX_SETTINGS]; char *sstart; char *c; unsigned int data1, data2; c = src; count = 0; sstart = c; for (i = 0; i < len; i++, c++) { char a = *c; if (a == '\r' || a == '\n') { if (c > sstart) { str_line[count] = sstart; count++; } *c = '\0'; sstart = c + 1; } } if (c > sstart) { str_line[count] = sstart; count++; } for (i = 0; i < count; i++) { ret = sscanf(str_line[i], "0x%x,0x%x\n", &data1, &data2); pr_debug("Result => [0x%2x 0x%4x] %s\n", data1, data2, (ret == 2) ? "Ok" : "Not available"); if (ret == 2) { cmc624_TuneSeq[index].RegAddr = (unsigned char)data1; cmc624_TuneSeq[index++].Data = (unsigned long)data2; } } return index; } bool cmc624_tune(unsigned long num) { unsigned int i; pr_debug("[CMC624:INFO] Start tuning CMC624\n"); for (i = 0; i < num; i++) { pr_debug("[CMC624:Tuning][%2d] : reg : 0x%2x, data: 0x%4x\n", i + 1, cmc624_TuneSeq[i].RegAddr, cmc624_TuneSeq[i].Data); if (cmc624_I2cWrite16 (cmc624_TuneSeq[i].RegAddr, cmc624_TuneSeq[i].Data) != 0) { pr_err("[CMC624:ERROR] : I2CWrite failed\n"); return 0; } pr_debug("[CMC624:Tunig] : Write Done\n"); if (cmc624_TuneSeq[i].RegAddr == CMC624_REG_SWRESET && cmc624_TuneSeq[i].Data == 0xffff) { mdelay(3); } } pr_debug("[CMC624:INFO] End tuning CMC624\n"); return 1; } int load_tuning_data(char *filename) { struct file *filp; char *dp; long l; loff_t pos; int ret; mm_segment_t fs; pr_debug("[CMC624:INFO]:%s called loading file name : %s\n", __func__, filename); cmc624_TuneSeqLen = 0; fs = get_fs(); set_fs(get_ds()); filp = filp_open(filename, O_RDONLY, 0); if (IS_ERR(filp)) { pr_debug("[CMC624:ERROR]:File open failed\n"); return -1; } l = filp->f_path.dentry->d_inode->i_size; pr_debug("[CMC624:INFO]: Loading File Size : %ld(bytes)", l); dp = kmalloc(l + 10, GFP_KERNEL); if (dp == NULL) { pr_debug( "[CMC624:ERROR]:Can't not alloc memory for tuning file load\n"); filp_close(filp, current->files); return -1; } pos = 0; memset(dp, 0, l); pr_debug("[CMC624:INFO] : before vfs_read()\n"); ret = vfs_read(filp, (char __user *)dp, l, &pos); pr_debug("[CMC624:INFO] : after vfs_read()\n"); if (ret != l) { pr_debug("[CMC624:ERROR] : vfs_read() filed ret : %d\n", ret); kfree(dp); filp_close(filp, current->files); return -1; } filp_close(filp, current->files); set_fs(fs); cmc624_TuneSeqLen = parse_text(dp, l); if (!cmc624_TuneSeqLen) { pr_debug("[CMC624:ERROR]:Nothing to parse\n"); kfree(dp); return -1; } pr_debug("[CMC624:INFO] : Loading Tuning Value's Count : %d", cmc624_TuneSeqLen); cmc624_tune(cmc624_TuneSeqLen); kfree(dp); return cmc624_TuneSeqLen; } #define SUB_TUNE 0 #define MAIN_TUNE 1 #define BROW_TUNE 2 int init_tune_flag[3] = {0,}; int apply_sub_tune_value(enum eCurrent_Temp temp, enum eOutdoor_Mode outdoor, enum eCabc_Mode cabc, int force) { int register_size; if (cmc624_state.negative == 1) { cmc624_state.temperature = temp; cmc624_state.outdoor = outdoor; return 0; } /* if(tuning_enable){ pr_debug("[CMC624:INFO]:%s:Tuning mode Enabled\n", __func__); return 0; } */ if (force == 0) { if ((cmc624_state.temperature == temp) && (cmc624_state.outdoor == outdoor)) { pr_debug( "[CMC624:INFO]:%s:already setted temp : %d, over : %d\n" , __func__, temp, outdoor); return 1; } } pr_debug("=================================================\n"); pr_debug(" CMC624 Mode Change.sub tune\n"); pr_debug("==================================================\n"); pr_debug("[CMC624:INFO]:%s:curr sub tune : %s\n", __func__, sub_tune_value[cmc624_state.temperature][cmc624_state.outdoor]. value[cmc624_state.cabc_mode].name); pr_debug("[CMC624:INFO]:%s: sub tune : %s\n", __func__, sub_tune_value[temp][outdoor].value[cabc].name); if ((outdoor == OUTDOOR_OFF_MODE) || (temp == TEMP_STANDARD)) { pr_debug("[CMC624:INFO]:%s:set default main tune\n", __func__); register_size = tune_value[cmc624_state.background] [cmc624_state.scenario]. value[cmc624_state.cabc_mode].size; if (cmc624_set_tune_value(tune_value[cmc624_state.background] [cmc624_state.scenario]. value[cmc624_state.cabc_mode].tune, register_size) != 0) { pr_debug("[CMC624:ERROR]:%s: set tune value falied\n",\ __func__); return -1; } if ((outdoor != OUTDOOR_OFF_MODE) || (temp != TEMP_STANDARD)) goto set_sub_tune; } else { set_sub_tune: register_size = sub_tune_value[temp][outdoor].value[cabc].size; if (cmc624_set_tune_value (sub_tune_value[temp][outdoor].value[cabc].value, register_size)) { pr_debug("[CMC624:ERROR]:%s: set tune value falied\n",\ __func__); return -1; } } cmc624_state.temperature = temp; cmc624_state.outdoor = outdoor; return 0; } void cmc_timing_generator_reset(void) { mutex_lock(&tuning_mutex); /* Switch off cmc timing generator */ cmc624_I2cWrite16(0x00, 0x0003); cmc624_I2cWrite16(0x80, 0x0000); cmc624_I2cWrite16(0x00, 0x0002); cmc624_I2cWrite16(0x52, 0x0000); /* Switch on cmc timing generator */ cmc624_I2cWrite16(0x00, 0x0003); cmc624_I2cWrite16(0x80, 0x0001); cmc624_I2cWrite16(0x00, 0x0002); cmc624_I2cWrite16(0x52, 0x0001); mutex_unlock(&tuning_mutex); } int apply_main_tune_value(enum eLcd_mDNIe_UI ui, enum eBackground_Mode bg, enum eCabc_Mode cabc, int force) { enum eCurrent_Temp temp = 0; if (cmc624_state.negative == 1) { cmc624_state.scenario = ui; cmc624_state.background = bg; cmc624_state.cabc_mode = cabc; return 0; } pr_debug("==================================================\n"); pr_debug(" CMC624 Mode Change. Main tune\n"); pr_debug("==================================================\n"); if (force == 0) { if ((cmc624_state.scenario == ui) && (cmc624_state.background == bg) && (cmc624_state.cabc_mode == cabc)) { pr_debug( "[CMC624:INFO]:%s:already setted ui : %d, bg : %d\n", __func__, ui, bg); return 0; } } if (cmc624_state.scenario == mDNIe_BROWSER_ON_MODE) ui = mDNIe_UI_MODE; pr_debug("[CMC624:INFO]:%s:curr main tune : %s\n", __func__, tune_value[cmc624_state.background][cmc624_state.scenario]. value[cmc624_state.cabc_mode].name); pr_debug("[CMC624:INFO]:%s: main tune : %s\n", __func__, tune_value[bg][ui].value[cabc].name); if (ui == mDNIe_VIDEO_MODE || ui == mDNIe_VIDEO_WARM_MODE || ui == mDNIe_VIDEO_COLD_MODE) video_mode = true; else video_mode = false; if ((ui == mDNIe_VIDEO_MODE) || (ui == mDNIe_DMB_MODE)) { if (apply_sub_tune_value\ (temp, cmc624_state.outdoor, cabc, 0) != 1) goto rest_set; } pr_debug("[CMC624:INFO]:%s set, size : %d\n",\ tune_value[bg][ui].value[cabc].name,\ tune_value[bg][ui].value[cabc].size); if (cmc624_set_tune_value (tune_value[bg][ui].value[cabc].tune, tune_value[bg][ui].value[cabc].size) != 0) { pr_err("[CMC624:ERROR]:%s: set tune value falied\n", __func__); return -1; } if ((ui == mDNIe_VIDEO_WARM_MODE) || (ui == mDNIe_VIDEO_COLD_MODE)) { if (ui == mDNIe_VIDEO_WARM_MODE) temp = TEMP_WARM; else if (ui == mDNIe_VIDEO_COLD_MODE) temp = TEMP_COLD; if (apply_sub_tune_value(temp, cmc624_state.outdoor, cabc, 0) != 0) { pr_debug( "[CMC624:ERROR]:%s:apply_sub_tune_value() faield\n", __func__); } } rest_set: cmc624_state.scenario = ui; cmc624_state.background = bg; cmc624_state.cabc_mode = cabc; cmc624_state.main_tune = &tune_value[bg][ui].value[cabc]; if (ui == mDNIe_UI_MODE) { cmc624_state.temperature = TEMP_STANDARD; cmc624_state.outdoor = OUTDOOR_OFF_MODE; } if (ui == mDNIe_VIDEO_WARM_MODE) cmc624_state.temperature = TEMP_WARM; else if (ui == mDNIe_VIDEO_COLD_MODE) cmc624_state.temperature = TEMP_COLD; set_backlight_pwm(cmc624_state.brightness); return 0; } int apply_browser_tune_value(enum SCENARIO_COLOR_TONE browser_mode, int force) { browser_mode -= COLOR_TONE_1; if (cmc624_state.negative == 1) { cmc624_state.browser_scenario = browser_mode; cmc624_state.scenario = mDNIe_BROWSER_ON_MODE; return 0; } pr_debug("==================================================\n"); pr_debug(" CMC624 Mode Change. brower tune\n"); pr_debug("==================================================\n"); if (!init_tune_flag[BROW_TUNE]) init_tune_flag[MAIN_TUNE] = 0; init_tune_flag[SUB_TUNE] = 0;\ init_tune_flag[BROW_TUNE] = 1; force = 1; if ((force == 0) && (cmc624_state.browser_scenario == browser_mode)) { pr_debug( "[CMC624:INFO]:%s:already setted browser tone : %d\n", __func__ , browser_mode); return 0; } pr_debug("[CMC624:INFO]:%s curr browser tune : %s\n", __func__, browser_tune_value[cmc624_state.browser_scenario]. value[cmc624_state.cabc_mode].name); pr_debug("[CMC624:INFO]:%s browser tune : %s\n", __func__, browser_tune_value[browser_mode]. value[cmc624_state.cabc_mode].name); pr_debug("[CMC624:INFO]:%s set, size : %d\n",\ browser_tune_value[browser_mode]. value[cmc624_state.cabc_mode].name, browser_tune_value[browser_mode].\ value[cmc624_state.cabc_mode].size); if (cmc624_set_tune_value(browser_tune_value[browser_mode].\ value[cmc624_state.cabc_mode].value,\ browser_tune_value[browser_mode].\ value[cmc624_state.cabc_mode].size) != 0) { pr_err("[CMC624:ERROR]:%s: set tune value falied\n", __func__); return -1; } cmc624_state.browser_scenario = browser_mode; cmc624_state.scenario = mDNIe_BROWSER_ON_MODE; return 0; } int apply_negative_tune_value( enum eNegative_Mode negative_mode, enum eCabc_Mode cabc) { int register_size; pr_debug("==================================================\n"); pr_debug(" CMC624 Negative Change.\n"); pr_debug("==================================================\n"); if (negative_mode == 0) { cmc624_state.negative = negative_mode; apply_main_tune_value(cmc624_state.scenario, cmc624_state.background, cmc624_state.cabc_mode, 1); pr_info("[CMC624:INFO]:%s:negative setted disalbe : %d\n", __func__, negative_mode); return 0; } register_size = negative_tune_value.value[cabc].size; if (cmc624_set_tune_value( negative_tune_value.value[cabc].value, register_size) != 0) { pr_err("[CMC624:ERROR]:%s:set tune value falied\n", __func__); return -1; } cmc624_state.negative = negative_mode; return 0; } void samsung_test_tune_dmb(void) { int i = 0; pr_info("[%s]\n", __func__); for (i = 0; i < ARRAY_SIZE(cmc624_tune_dmb_test); i++) { if (cmc624_I2cWrite16 (cmc624_tune_dmb_test[i].RegAddr, cmc624_tune_dmb_test[i].Data)) { pr_err("why return false??!!!\n"); } } } #define LDI_MTP_ADDR 0xd3 #define LDI_ID_ADDR 0xd1 u8 mtp_read_data[24]; u8 id_buffer[8]; unsigned char LCD_ID; unsigned char LCD_ELVSS_ID3; static void read_ldi_reg(unsigned long src_add, unsigned char *read_data, unsigned char read_length) { int i; u16 data = 0; /* LP Operation2 (Read IDs) */ /* cmc624_I2cWrite16( 0x00, 0x0003 ); */ cmc624_I2cWrite16(0xaa, read_length); cmc624_I2cWrite16(0x02, 0x1100); /* GENERIC WRITE 1 PARA */ cmc624_I2cWrite16(0x04, 0x00b0); /* WRITE B0 */ cmc624_I2cWrite16(0x02, 0x1501); /* GENERIC READ 1 PARA */ cmc624_I2cWrite16(0x04, src_add); /* WRITE d1/ d3 (Address of MTP) */ if (src_add == LDI_ID_ADDR) pr_info("Read IDs from D1h\n"); else pr_info("READ Bright Gamma Offset from MTP(D3h)\n"); for (i = 0; i < read_length; i++) { cmc624_I2cRead16(0x08, &data); /* Read Data Valid Register */ if ((data & 0x1000) != 0x1000) pr_info("[%d] invalid data %x!!!\n", i, data); /* Data Valid Check 0x1000? */ else pr_info("[%d] 0x08 data %x!!!\n", i, data); /* Data Valid Check 0x1000? */ cmc624_I2cRead16(0x06, &data); /* Read Nst Parameter */ read_data[i] = data; /*Store Nst Parameter */ pr_info("[%d] IDs = %x\n", i, read_data[i]); } if (read_length == 8) { LCD_ID = read_data[1]; LCD_ELVSS_ID3 = read_data[2]; } cmc624_I2cRead16(0x08, &data); /* Read Data Valid Register */ if ((data & 0x1000) != 0x1000) pr_info("End of read IDs %x!!!\n", data); /* Data Valid Check 0x1000? */ else pr_info("0x08 data %x!!!\n", data); /* Data Valid Check 0x1000? */ } unsigned char LCD_Get_Value(void){ return LCD_ID; } bool Is_4_8LCD_cmc(void){ if ((LCD_ID == 0x20) || (LCD_ID == 0x40) || (LCD_ID == 0x60)) return true; else return false; } bool Is_4_65LCD_cmc(void){ if (LCD_ID == 0xAE) return true; else return false; } unsigned char LCD_ID3(void){ return LCD_ELVSS_ID3; } /* ################# END of SYSFS Function ############################# */ static bool CMC624_SetUp(void) { u16 data = 0; int i = 0; static int bInit, bRead; mutex_lock(&tuning_mutex); if (!bInit) { for (i = 0; i < ARRAY_SIZE(cmc624_init); i++) { if (cmc624_I2cWrite16 (cmc624_init[i].RegAddr, cmc624_init[i].Data)) { pr_err("why return false??!!!\n"); mutex_unlock(&tuning_mutex); return FALSE; } if (cmc624_init[i].RegAddr == 0x3E && cmc624_init[i].Data == 0x2223) { mdelay(1); } } mdelay(5); if (!bRead) { bRead = 1; pr_info("CMC Init Sequence 2 Start....\n"); read_ldi_reg(LDI_ID_ADDR, id_buffer, 8); read_ldi_reg(LDI_MTP_ADDR, mtp_read_data, 24); } cmc624_I2cRead16(0x00, &data); pr_info("0x00 data %x!!!\n", data); cmc624_I2cRead16(0x82, &data); pr_info("0x82 data %x!!!\n", data); cmc624_I2cRead16(0x83, &data); pr_info("0x83 data %x!!!\n", data); cmc624_I2cRead16(0xC2, &data); pr_info("0xC2 data %x!!!\n", data); cmc624_I2cRead16(0xC3, &data); pr_info("0xC3 data %x!!!\n", data); /*CONV */ cmc624_I2cWrite16(0x00, 0x0003); /* BANK 3 */ cmc624_I2cWrite16(0x01, 0x0001); /* MIPI TO MIPI */ cmc624_I2cWrite16(0x00, 0x0002); /* BANK 2 */ cmc624_I2cWrite16(0x52, 0x0001); /* RGB IF ENABLE */ cmc624_I2cWrite16(0x00, 0x0003); /* BANK 3 goooooood */ } else { pr_info("cmc624_init3!!!\n"); for (i = 0; i < ARRAY_SIZE(cmc624_wakeup); i++) { if (cmc624_I2cWrite16 (cmc624_wakeup[i].RegAddr, cmc624_wakeup[i].Data)) { mutex_unlock(&tuning_mutex); pr_err("why return false??!!!\n"); return FALSE; } } } /* samsung_test_tune_dmb(); */ mutex_unlock(&tuning_mutex); return TRUE; } void samsung_get_id(unsigned char *buf) { buf[0] = (unsigned char)gIDs[0] & 0xff; buf[1] = (unsigned char)gIDs[1] & 0xff; buf[2] = (unsigned char)gIDs[2] & 0xff; pr_info("[%s] IDs = %x%x%x\n", __func__, buf[0], buf[1], buf[2]); } void Check_Prog(void) { u16 bank = 0; u16 data = 0; pr_info("++++++%s++++\n", __func__); mutex_lock(&tuning_mutex); cmc624_I2cWrite16(0x00, 0x0003); /* BANK 3 */ cmc624_I2cRead16(0x00, &data); /* Read Nst Parameter */ pr_info("0x00 data %x!!!\n", data); cmc624_I2cRead16(0x82, &data); /* Read Nst Parameter */ pr_info("0x82 data %x!!!\n", data); cmc624_I2cRead16(0x83, &data); /* Read Nst Parameter */ pr_info("0x83 data %x!!!\n", data); cmc624_I2cRead16(0xC2, &data); /* Read Nst Parameter */ pr_info("0xC2 data %x!!!\n", data); cmc624_I2cRead16(0xC3, &data); /* Read Nst Parameter */ pr_info("0xC3 data %x!!!\n", data); cmc624_I2cRead16(0x01, &data); cmc624_I2cRead16(0x00, &bank); pr_info("[bank %d] 0x01 data %x!!!\n", bank, data); cmc624_I2cWrite16(0x00, 0x0002); /* BANK 2 */ cmc624_I2cRead16(0x52, &data); cmc624_I2cRead16(0x00, &bank); pr_info("[bank %d] 0x52 data %x!!!\n", bank, data); cmc624_I2cWrite16(0x00, 0x0005); /* BANK 5 */ cmc624_I2cRead16(0x0B, &data); cmc624_I2cRead16(0x00, &bank); pr_info("[bank %d] 0x0B data %x!!!\n", bank, data); mutex_unlock(&tuning_mutex); pr_info("-----%s-----\n", __func__); } void change_mon_clk(void) { pr_info("++++++%s++++\n", __func__); cmc624_I2cWrite16(0x00, 0x0002); /* BANK 2 */ cmc624_I2cWrite16(0x3F, 0x0107); pr_info("-----%s-----\n", __func__); } int samsung_cmc624_on(int enable) { int ret = 0; pr_info("[LCD] %s:enable:%d\n", __func__, enable); if (enable) { pr_info("CMC Init Sequence!\n"); is_cmc624_on = 1; CMC624_SetUp(); cmc624_state.suspended = 0; if (cmc624_state.negative == 0) { apply_main_tune_value(cmc624_state.scenario,\ cmc624_state.background,\ cmc624_state.cabc_mode, 1); } else apply_negative_tune_value(cmc624_state.negative,\ cmc624_state.cabc_mode); msleep(10); #ifdef TIMER_DEBUG timer_init(); #endif } else { is_cmc624_on = 0; cmc624_state.suspended = 1; } return ret; } int samsung_cmc624_bypass_mode(void) { int ret; gpio_set_value(IMA_SLEEP, 1); ret = gpio_get_value(IMA_SLEEP); pr_debug("%s, IMA_SLEEP : %d\n", __func__, ret); udelay(20); gpio_set_value(IMA_nRST, 0); ret = gpio_get_value(IMA_nRST); pr_debug("%s, IMA_nRST : %d\n", __func__, ret); msleep(4); gpio_set_value(IMA_nRST, 1); ret = gpio_get_value(IMA_nRST); pr_debug("%s, IMA_nRST : %d\n", __func__, ret); return ret; } int samsung_cmc624_normal_mode(void) { int ret; gpio_set_value(IMA_SLEEP, 1); ret = gpio_get_value(IMA_SLEEP); pr_debug("%s, IMA_SLEEP : %d\n", __func__, ret); udelay(20); gpio_set_value(IMA_nRST, 0); ret = gpio_get_value(IMA_nRST); pr_debug("%s, IMA_nRST : %d\n", __func__, ret); msleep(4); gpio_set_value(IMA_nRST, 1); ret = gpio_get_value(IMA_nRST); pr_debug("%s, IMA_nRST : %d\n", __func__, ret); return ret; } /* Platform Range : 0 ~ 255 * CMC624 Range : 0 ~ 100 * User Platform Range : * - MIN : 30 * - MAX : 255 * if under 30, CMC624 val : 2% * if 30, CMC624 val : 3% * if default, CMC624 val : 49% * if 255, CMC624 val : 100% */ #define DIMMING_BRIGHTNESS_VALUE 20 #define MINIMUM_BRIGHTNESS_VALUE 30 #define MAXIMUM_BRIGHTNESS_VALUE 255 #define DEFAULT_BRIGHTNESS_VALUE 144 #define QUATER_BRIGHTNESS_VALUE 87 #define DIMMING_CMC624_VALUE 1 #define MINIMUM_CMC624_VALUE 1 /* 0% progress bar */ #define MAXIMUM_CMC624_VALUE 100 /* 100% progress bar */ #define DEFAULT_CMC624_VALUE 49 /* 50% progress bar */ #define QUATER_CMC624_VALUE 9 /* 25% progress bar */ #define QUATER_DEFAUT_MAGIC_VALUE 3 #define UNDER_DEFAULT_MAGIC_VALUE 52 #define OVER_DEFAULT_MAGIC_VALUE 17 #if defined(CONFIG_MACH_P4_LTE) && defined(CONFIG_TARGET_LOCALE_JPN_NTT) void cmc624_manual_brightness(int bl_lvl) { /* Platform Range : 0 ~ 255 * CMC624 Range : 0 ~ 100 * User Platform Range : * - MIN : 30 * - MAX : 255 * * if 30, CMC624 val : 5 * if 255, CMC624 val : 100 * */ int value; if (bl_lvl < 30) { value = bl_lvl / 6; } else if (bl_lvl <= 255) { value = ((95 * bl_lvl) / 225) - 7; } else { pr_debug("[CMC624] Wrong Backlight scope!!!!!! [%d]\n", bl_lvl); value = 50; } if (value < 0 || value > 100) pr_debug("[CMC624] Wrong value scope [%d]\n", value); pr_debug("[CMC624] BL : %d Value : %d\n", bl_lvl, value); set_backlight_pwm(value); cmc624_state.brightness = value; } #else void cmc624_manual_brightness(int bl_lvl) { int value; if (bl_lvl < MINIMUM_BRIGHTNESS_VALUE) { if (bl_lvl == 0) value = 0; else value = DIMMING_CMC624_VALUE; } else if (bl_lvl <= QUATER_BRIGHTNESS_VALUE) { value = ((QUATER_CMC624_VALUE - MINIMUM_CMC624_VALUE) * bl_lvl / (QUATER_BRIGHTNESS_VALUE - MINIMUM_BRIGHTNESS_VALUE)) - QUATER_DEFAUT_MAGIC_VALUE; } else if (bl_lvl <= DEFAULT_BRIGHTNESS_VALUE) { value = ((DEFAULT_CMC624_VALUE - QUATER_CMC624_VALUE) * bl_lvl / (DEFAULT_BRIGHTNESS_VALUE - QUATER_BRIGHTNESS_VALUE)) - UNDER_DEFAULT_MAGIC_VALUE; } else if (bl_lvl <= MAXIMUM_BRIGHTNESS_VALUE) { value = ((MAXIMUM_CMC624_VALUE - DEFAULT_CMC624_VALUE) * bl_lvl / (MAXIMUM_BRIGHTNESS_VALUE - DEFAULT_BRIGHTNESS_VALUE)) - OVER_DEFAULT_MAGIC_VALUE; } else { pr_debug("[CMC624] Wrong Backlight scope!!!!!! [%d]\n", bl_lvl); value = 50; } if (value < 0 || value > 100) pr_debug("[CMC624] Wrong value scope [%d]\n", value); value = 50; pr_debug("[CMC624] BL : %d Value : %d\n", bl_lvl, value); set_backlight_pwm(value); cmc624_state.brightness = value; } #endif #if defined(CONFIG_TARGET_LOCALE_KOR_SKT)\ || defined(CONFIG_TARGET_LOCALE_KOR_LGU) void cmc624_Set_Region_Ext(int enable, int hStart, int hEnd, int vStart, int vEnd) { u16 data = 0; mutex_lock(&tuning_mutex); cmc624_I2cWrite16(0x0000, 0x0000); cmc624_I2cRead16(0x0001, &data); data &= 0x00ff; if (enable) { cmc624_I2cWrite16(0x00, 0x0000); /*BANK 0*/ cmc624_I2cWrite16(0x0c, 0x5555); /*ROI on */ cmc624_I2cWrite16(0x0d, 0x1555); /*ROI on */ cmc624_I2cWrite16(0x0e, hStart); /*ROI x start 0 */ cmc624_I2cWrite16(0x0f, hEnd); /*ROI x end 1279 */ cmc624_I2cWrite16(0x10, vStart + 1); /*ROI y start 1 */ cmc624_I2cWrite16(0x11, vEnd + 1); /*ROI y end 800 */ cmc624_I2cWrite16(0xff, 0x0000); /*Mask Release */ } else { cmc624_I2cWrite16(0x00, 0x0000); /*BANK 0 */ cmc624_I2cWrite16(0x0c, 0x0000); /*ROI off */ cmc624_I2cWrite16(0x0d, 0x0000); /*ROI off */ cmc624_I2cWrite16(0xff, 0x0000); /*Mask Release */ } mutex_unlock(&tuning_mutex); cmc624_current_region_enable = enable; } EXPORT_SYMBOL(cmc624_Set_Region_Ext); #endif int samsung_gpio_init(void) { int status; struct pm_gpio lvds_cfg = { .direction = PM_GPIO_DIR_OUT, .pull = PM_GPIO_PULL_NO, .vin_sel = 2, .function = PM_GPIO_FUNC_NORMAL, .inv_int_pol = 0, }; struct pm_gpio backlight_cfg = { .direction = PM_GPIO_DIR_OUT, .pull = PM_GPIO_PULL_NO, .vin_sel = 2, .function = PM_GPIO_FUNC_NORMAL, .inv_int_pol = 0, }; int gpio_lvds = PM8921_GPIO_PM_TO_SYS(PMIC_GPIO_LVDS_nSHDN); int gpio_backlight = PM8921_GPIO_PM_TO_SYS(PMIC_GPIO_BACKLIGHT_RST); if (pm8xxx_gpio_config(gpio_lvds, &lvds_cfg)) pr_err("%s PMIC_GPIO_LVDS_nSHDN config failed\n", __func__); status = gpio_request(PM8921_GPIO_PM_TO_SYS(PMIC_GPIO_LVDS_nSHDN), "LVDS_nSHDN"); if (status) pr_debug(KERN_ERR "%s: LVS_nSHDN gpio" " %d request failed\n", __func__, PMIC_GPIO_LVDS_nSHDN); if (pm8xxx_gpio_config(gpio_backlight, &backlight_cfg)) pr_err("%s PMIC_GPIO_BACKLIGHT_RST config failed\n", __func__); status = gpio_request(PM8921_GPIO_PM_TO_SYS(PMIC_GPIO_BACKLIGHT_RST), "BACKLIGHT_EN"); if (status) pr_debug(KERN_ERR "%s: BACKLIGHT_EN gpio" " %d request failed\n", __func__, PMIC_GPIO_BACKLIGHT_RST); return status; } int samsung_lvds_on(int enable) { int status; if (enable) status = gpio_direction_output(PM8921_GPIO_PM_TO_SYS (PMIC_GPIO_LVDS_nSHDN), PMIC_GPIO_LCD_LEVEL_HIGH); else status = gpio_direction_output(PM8921_GPIO_PM_TO_SYS (PMIC_GPIO_LVDS_nSHDN), PMIC_GPIO_LCD_LEVEL_LOW); status = gpio_get_value_cansleep(PM8921_GPIO_PM_TO_SYS (PMIC_GPIO_LVDS_nSHDN)); pr_debug("gpio_get_value result. LVDS_nSHDN : %d\n", status); return (unsigned int)status; } int samsung_backlight_en(int enable) { int status; if (enable) status = gpio_direction_output(PM8921_GPIO_PM_TO_SYS (PMIC_GPIO_BACKLIGHT_RST), PMIC_GPIO_LCD_LEVEL_HIGH); else status = gpio_direction_output(PM8921_GPIO_PM_TO_SYS (PMIC_GPIO_BACKLIGHT_RST), PMIC_GPIO_LCD_LEVEL_LOW); status = gpio_get_value_cansleep(PM8921_GPIO_PM_TO_SYS (PMIC_GPIO_BACKLIGHT_RST)); pr_debug("gpio_get_value. BACKLIGHT_EN : %d\n", status); return (unsigned int)status; } static int cmc624_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct cmc624_data *data; pr_info("%s +\n", __func__); pr_debug("==============================\n"); pr_debug("cmc624 attach START!!!\n"); pr_debug("==============================\n"); data = kzalloc(sizeof(struct cmc624_data), GFP_KERNEL); if (!data) { dev_err(&client->dev, "Failed to allocate memory\n"); return -ENOMEM; } data->client = client; i2c_set_clientdata(client, data); dev_info(&client->dev, "cmc624 i2c probe success!!!\n"); p_cmc624_data = data; pr_debug("==============================\n"); pr_debug("CMC624 SYSFS INIT!!!\n"); pr_debug("==============================\n"); pr_info("%s -\n", __func__); return 0; } static int __devexit cmc624_i2c_remove(struct i2c_client *client) { struct cmc624_data *data = i2c_get_clientdata(client); i2c_set_clientdata(client, NULL); kfree(data); dev_info(&client->dev, "cmc624 i2c remove success!!!\n"); return 0; } void cmc624_shutdown(struct i2c_client *client) { pr_debug("-0- %s called -0-\n", __func__); } static const struct i2c_device_id cmc624[] = { {"cmc624", 0}, }; MODULE_DEVICE_TABLE(i2c, cmc624); struct i2c_driver cmc624_i2c_driver = { .driver = { .name = "cmc624", .owner = THIS_MODULE, }, .probe = cmc624_i2c_probe, .remove = __devexit_p(cmc624_i2c_remove), .id_table = cmc624, .shutdown = cmc624_shutdown, }; int samsung_cmc624_init(void) { int ret; /* register I2C driver for CMC624 */ pr_debug("<cmc624_i2c_driver Add START>\n"); ret = i2c_add_driver(&cmc624_i2c_driver); pr_debug("cmc624_init Return value (%d)\n", ret); pr_debug("<cmc624_i2c_driver Add END>\n"); return 0; } bool samsung_has_cmc624(void) { #ifdef CONFIG_MACH_M2_ATT if ((system_rev >= 11) && (system_rev != 13)) return false; else return true; #elif defined(CONFIG_MACH_M2_SPR) if (system_rev >= 11) return false; else return true; #elif defined(CONFIG_MACH_M2_VZW) if ((system_rev == 4) || (system_rev >= 14)) return false; else return true; #elif defined(CONFIG_MACH_M2_SKT) if (system_rev >= 10) return false; else return true; #elif defined(CONFIG_MACH_M2_DCM) || defined(CONFIG_MACH_K2_KDI) if (system_rev >= 5) return false; else return true; #elif defined(CONFIG_MACH_KONA) if(system_rev >= 3) return true; else return false; #else return false; #endif } EXPORT_SYMBOL(samsung_has_cmc624); /* Module information */ MODULE_AUTHOR("Samsung Electronics"); MODULE_DESCRIPTION("samsung CMC624 image converter"); MODULE_LICENSE("GPL");
gpl-2.0
sakuraba001/android_kernel_samsung_d2kdi
drivers/input/misc/pmic8921-hall.c
64
7898
/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. */ #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/input.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/mfd/pm8xxx/pm8921.h> #include <mach/irqs.h> #include <linux/log2.h> #include <linux/spinlock.h> #include <linux/hrtimer.h> #include <linux/pm.h> #include <linux/slab.h> #include <linux/pm_runtime.h> #include <mach/gpio.h> #include <linux/interrupt.h> #include <linux/switch.h> #include <linux/wakelock.h> #define FLIP_DET_PIN 23 #define FLIP_NOTINIT -1 #define FLIP_OPEN 1 #define FLIP_CLOSE 0 #define FLIP_SCAN_INTERVAL (50) #define FLIP_STABLE_COUNT (1) #define dbg_printk(fmt, ...) \ printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) struct sec_flip_pdata { void (*pmic_gpio_config) (void); int wakeup; int gpio; }; struct sec_flip { struct input_dev *input; struct sec_flip_pdata *pdata; struct wake_lock wlock; struct timer_list flip_timer; int flip_status; int gpio; int irq; struct mutex gpio_get_lock; }; struct switch_dev switch_flip = { .name = "flip", }; static void sec_report_flip_key(struct sec_flip *flip) { switch_set_state(&switch_flip, !flip->flip_status); if (flip->flip_status) { input_report_key(flip->input, KEY_FOLDER_OPEN, 1); input_report_key(flip->input, KEY_FOLDER_OPEN, 0); input_sync(flip->input); } else { input_report_key(flip->input, KEY_FOLDER_CLOSE, 1); input_report_key(flip->input, KEY_FOLDER_CLOSE, 0); input_sync(flip->input); } } static void set_flip_status(struct sec_flip *flip) { int val = 0; val = gpio_get_value_cansleep(flip->gpio); flip->flip_status = val ? 1 : 0; } static irqreturn_t sec_flip_irq_handler(int irq, void *_flip) { struct sec_flip *flip = _flip; unsigned long flags; mutex_lock(&flip->gpio_get_lock); wake_lock_timeout(&flip->wlock, 1 * HZ); set_flip_status(flip); mutex_unlock(&flip->gpio_get_lock); sec_report_flip_key(flip); return IRQ_HANDLED; } static void sec_flip_timer(unsigned long data) { int val = 0; struct sec_flip* flip = (struct sec_flip *)data; static int wait_flip_count; static int wait_flip_status; val = gpio_get_value_cansleep(flip->gpio); if (val != wait_flip_status) { wait_flip_count = 0; wait_flip_status = val; } else if (wait_flip_count < FLIP_STABLE_COUNT) { wait_flip_count++; } if (wait_flip_count >= FLIP_STABLE_COUNT) { if (val) flip->flip_status = 1; else flip->flip_status = 0; sec_report_flip_key(flip); } else { mod_timer(&flip->flip_timer, jiffies + msecs_to_jiffies(FLIP_SCAN_INTERVAL)); } } static int sec_flip_suspend(struct device *dev) { struct sec_flip *flip = dev_get_drvdata(dev); if (device_may_wakeup(dev)) enable_irq_wake(flip->irq); return 0; } static int sec_flip_resume(struct device *dev) { struct sec_flip *flip = dev_get_drvdata(dev); if (device_may_wakeup(dev)) disable_irq_wake(flip->irq); return 0; } static const struct dev_pm_ops pm8921_flip_pm_ops = { .suspend = sec_flip_suspend, .resume = sec_flip_resume, }; static ssize_t status_check(struct device *dev, struct device_attribute *attr, char *buf) { struct sec_flip *flip = dev_get_drvdata(dev); return sprintf(buf, "%d\n", flip->flip_status); } static DEVICE_ATTR(flipStatus, S_IRUGO | S_IWUSR | S_IWGRP , status_check, NULL); static int __devinit sec_flip_probe(struct platform_device *pdev) { struct input_dev *input; int err; struct sec_flip *flip; struct sec_flip_pdata *pdata = pdev->dev.platform_data; dev_info(&pdev->dev, "probe\n"); if (!pdata) { dev_err(&pdev->dev, "power key platform data not supplied\n"); return -EINVAL; } flip = kzalloc(sizeof(*flip), GFP_KERNEL); if (!flip) return -ENOMEM; flip->pdata = pdata; /* Enable runtime PM ops, start in ACTIVE mode */ err = pm_runtime_set_active(&pdev->dev); if (err < 0) { dev_err(&pdev->dev, "unable to set runtime pm state\n"); goto free_flip; } pm_runtime_enable(&pdev->dev); /* INPUT DEVICE */ input = input_allocate_device(); if (!input) { dev_err(&pdev->dev, "Can't allocate power button\n"); err = -ENOMEM; goto free_pm; } set_bit(EV_KEY, input->evbit); set_bit(KEY_FOLDER_OPEN & KEY_MAX, input->keybit); set_bit(KEY_FOLDER_CLOSE & KEY_MAX, input->keybit); input->name = "sec_hall_key"; input->phys = "sec_hall/input0"; input->dev.parent = &pdev->dev; err = input_register_device(input); if (err) { dev_err(&pdev->dev, "Can't register power key: %d\n", err); goto free_input_dev; } flip->input = input; input_set_capability(flip->input, EV_ABS, ABS_MISC); input_set_abs_params(flip->input, ABS_MISC, 0, 1, 0, 0); platform_set_drvdata(pdev, flip); err = switch_dev_register(&switch_flip); if (err < 0) { printk(KERN_ERR "FLIP: Failed to register switch device\n"); goto free_input_dev; } /* INTERRUPT */ flip->gpio = pdata->gpio; if (pdata->pmic_gpio_config) pdata->pmic_gpio_config(); flip->irq = gpio_to_irq(flip->gpio); mutex_init(&flip->gpio_get_lock); err = request_threaded_irq(flip->irq, NULL, sec_flip_irq_handler, (IRQF_TRIGGER_RISING|IRQF_TRIGGER_FALLING), "flip_det_irq", flip); if (err < 0) { dev_err(&pdev->dev, "Can't get %d IRQ for flip: %d\n", flip->irq, err); goto unreg_input_dev; } device_init_wakeup(&pdev->dev, pdata->wakeup); wake_lock_init(&flip->wlock, WAKE_LOCK_SUSPEND, "sec_flip"); err = device_create_file(&pdev->dev, &dev_attr_flipStatus); if (err < 0) { printk(KERN_ERR "flip status check cannot create file : %d\n", flip->flip_status); goto unreg_input_dev; } init_timer(&flip->flip_timer); flip->flip_timer.function = sec_flip_timer; flip->flip_timer.data = (unsigned long)flip; mod_timer(&flip->flip_timer, jiffies + msecs_to_jiffies(5000)); return 0; err_flip: del_timer_sync(&flip->flip_timer); switch_dev_unregister(&switch_flip); unreg_input_dev: input_unregister_device(input); free_input_dev: input_free_device(input); free_pm: pm_runtime_set_suspended(&pdev->dev); pm_runtime_disable(&pdev->dev); free_flip: kfree(flip); return err; } static int __devexit sec_flip_remove(struct platform_device *pdev) { struct sec_flip *flip = platform_get_drvdata(pdev); printk(KERN_DEBUG "%s:\n", __func__); if (flip != NULL) del_timer_sync(&flip->flip_timer); switch_dev_unregister(&switch_flip); pm_runtime_set_suspended(&pdev->dev); pm_runtime_disable(&pdev->dev); device_init_wakeup(&pdev->dev, 0); if (flip != NULL) { free_irq(flip->irq, NULL); input_unregister_device(flip->input); kfree(flip); } return 0; } static struct platform_driver sec_flip_driver = { .probe = sec_flip_probe, .remove = __devexit_p(sec_flip_remove), .driver = { .name = "hall_sw", .owner = THIS_MODULE, .pm = &pm8921_flip_pm_ops, }, }; static int __init sec_flip_init(void) { return platform_driver_register(&sec_flip_driver); } static void __exit sec_flip_exit(void) { platform_driver_unregister(&sec_flip_driver); } late_initcall(sec_flip_init); module_exit(sec_flip_exit); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("PMIC8921 Hall Switch"); MODULE_VERSION("1.0"); MODULE_ALIAS("platform:hall_sw");
gpl-2.0
RoneyThomas/linux-kernel-surnia
drivers/video/msm/mdss/mdp3_ppp_hwio.c
320
33948
/* Copyright (c) 2007, 2012-2013 The Linux Foundation. All rights reserved. * Copyright (C) 2007 Google Incorporated * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/file.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/major.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/uaccess.h> #include "linux/proc_fs.h" #include "mdss_fb.h" #include "mdp3_ppp.h" #include "mdp3_hwio.h" /* SHIM Q Factor */ #define PHI_Q_FACTOR 29 #define PQF_PLUS_5 (PHI_Q_FACTOR + 5) /* due to 32 phases */ #define PQF_PLUS_4 (PHI_Q_FACTOR + 4) #define PQF_PLUS_2 (PHI_Q_FACTOR + 2) /* to get 4.0 */ #define PQF_MINUS_2 (PHI_Q_FACTOR - 2) /* to get 0.25 */ #define PQF_PLUS_5_PLUS_2 (PQF_PLUS_5 + 2) #define PQF_PLUS_5_MINUS_2 (PQF_PLUS_5 - 2) static long long mdp_do_div(long long num, long long den) { do_div(num, den); return num; } static int mdp_calc_scale_params(uint32_t org, uint32_t dim_in, uint32_t dim_out, bool is_W, int32_t *phase_init_ptr, uint32_t *phase_step_ptr) { bool rpa_on = false; int init_phase = 0; uint64_t numer = 0; uint64_t denom = 0; int64_t point5 = 1; int64_t one = 1; int64_t k1, k2, k3, k4; /* linear equation coefficients */ uint64_t int_mask; uint64_t fract_mask; uint64_t Os; int64_t Osprime; int64_t Od; int64_t Odprime; int64_t Oreq; int64_t init_phase_temp; int64_t delta; uint32_t mult; /* * The phase accumulator should really be rational for all cases in a * general purpose polyphase scaler for a tiled architecture with * non-zero * origin capability because there is no way to represent * certain scale factors in fixed point regardless of precision. * The error incurred in attempting to use fixed point is most * eggregious for SF where 1/SF is an integral multiple of 1/3. * * Set the RPA flag for this dimension. * * In order for 1/SF (dim_in/dim_out) to be an integral multiple of * 1/3, dim_out must be an integral multiple of 3. */ if (!(dim_out % 3)) { mult = dim_out / 3; rpa_on = (!(dim_in % mult)); } numer = dim_out; denom = dim_in; /* * convert to U30.34 before division * * The K vectors carry 4 extra bits of precision * and are rounded. * * We initially go 5 bits over then round by adding * 1 and right shifting by 1 * so final result is U31.33 */ numer <<= PQF_PLUS_5; /* now calculate the scale factor (aka k3) */ k3 = ((mdp_do_div(numer, denom) + 1) >> 1); /* check scale factor for legal range [0.25 - 4.0] */ if (((k3 >> 4) < (1LL << PQF_MINUS_2)) || ((k3 >> 4) > (1LL << PQF_PLUS_2))) { return -EINVAL; } /* calculate inverse scale factor (aka k1) for phase init */ numer = dim_in; denom = dim_out; numer <<= PQF_PLUS_5; k1 = ((mdp_do_div(numer, denom) + 1) >> 1); /* * calculate initial phase and ROI overfetch */ /* convert point5 & one to S39.24 (will always be positive) */ point5 <<= (PQF_PLUS_4 - 1); one <<= PQF_PLUS_4; k2 = ((k1 - one) >> 1); init_phase = (int)(k2 >> 4); k4 = ((k3 - one) >> 1); if (k3 != one) { /* calculate the masks */ fract_mask = one - 1; int_mask = ~fract_mask; if (!rpa_on) { /* * FIXED POINT IMPLEMENTATION */ if (org) { /* * The complicated case; ROI origin != 0 * init_phase needs to be adjusted * OF is also position dependent */ /* map (org - .5) into destination space */ Os = ((uint64_t) org << 1) - 1; Od = ((k3 * Os) >> 1) + k4; /* take the ceiling */ Odprime = (Od & int_mask); if (Odprime != Od) Odprime += one; /* now map that back to source space */ Osprime = (k1 * (Odprime >> PQF_PLUS_4)) + k2; /* then floor & decrement to calc the required starting coordinate */ Oreq = (Osprime & int_mask) - one; /* calculate initial phase */ init_phase_temp = Osprime - Oreq; delta = ((int64_t) (org) << PQF_PLUS_4) - Oreq; init_phase_temp -= delta; /* limit to valid range before the left shift */ delta = (init_phase_temp & (1LL << 63)) ? 4 : -4; delta <<= PQF_PLUS_4; while (abs((int)(init_phase_temp >> PQF_PLUS_4)) > 4) init_phase_temp += delta; /* * right shift to account for extra bits of * precision */ init_phase = (int)(init_phase_temp >> 4); } } else { /* * RPA IMPLEMENTATION * * init_phase needs to be calculated in all RPA_on cases * because it's a numerator, not a fixed point value. */ /* map (org - .5) into destination space */ Os = ((uint64_t) org << PQF_PLUS_4) - point5; Od = mdp_do_div((dim_out * (Os + point5)), dim_in); Od -= point5; /* take the ceiling */ Odprime = (Od & int_mask); if (Odprime != Od) Odprime += one; /* now map that back to source space */ Osprime = mdp_do_div((dim_in * (Odprime + point5)), dim_out); Osprime -= point5; /* then floor & decrement to calculate the required starting coordinate */ Oreq = (Osprime & int_mask) - one; /* calculate initial phase */ init_phase_temp = Osprime - Oreq; delta = ((int64_t) (org) << PQF_PLUS_4) - Oreq; init_phase_temp -= delta; /* limit to valid range before the left shift */ delta = (init_phase_temp & (1LL << 63)) ? 4 : -4; delta <<= PQF_PLUS_4; while (abs((int)(init_phase_temp >> PQF_PLUS_4)) > 4) init_phase_temp += delta; /* right shift to account for extra bits of precision */ init_phase = (int)(init_phase_temp >> 4); } } /* return the scale parameters */ *phase_init_ptr = init_phase; *phase_step_ptr = (uint32_t) (k1 >> 4); return 0; } static int scale_idx(int factor) { int idx; if (factor > 80) idx = PPP_DOWNSCALE_PT8TOPT1; else if (factor > 60) idx = PPP_DOWNSCALE_PT6TOPT8; else if (factor > 40) idx = PPP_DOWNSCALE_PT4TOPT6; else idx = PPP_DOWNSCALE_PT2TOPT4; return idx; } inline int32_t comp_conv_rgb2yuv(int32_t comp, int32_t y_high, int32_t y_low, int32_t c_high, int32_t c_low) { if (comp < 0) comp = 0; if (comp > 255) comp = 255; /* clamp */ if (comp < y_low) comp = y_low; if (comp > y_high) comp = y_high; return comp; } static uint32_t conv_rgb2yuv(uint32_t input_pixel, uint16_t *matrix_vector, uint16_t *bv, uint16_t *clamp_vector) { uint8_t input_C2, input_C0, input_C1; uint32_t output; int32_t comp_C2, comp_C1, comp_C0, temp; int32_t temp1, temp2, temp3; int32_t matrix[9]; int32_t bias_vector[3]; int32_t Y_low_limit, Y_high_limit, C_low_limit, C_high_limit; int32_t i; input_C2 = (input_pixel >> 16) & 0xFF; input_C1 = (input_pixel >> 8) & 0xFF; input_C0 = (input_pixel >> 0) & 0xFF; comp_C0 = input_C0; comp_C1 = input_C1; comp_C2 = input_C2; for (i = 0; i < MDP_CSC_SIZE; i++) matrix[i] = ((int32_t) (((int32_t) matrix_vector[i]) << 20)) >> 20; bias_vector[0] = (int32_t) (bv[0] & 0xFF); bias_vector[1] = (int32_t) (bv[1] & 0xFF); bias_vector[2] = (int32_t) (bv[2] & 0xFF); Y_low_limit = (int32_t) clamp_vector[0]; Y_high_limit = (int32_t) clamp_vector[1]; C_low_limit = (int32_t) clamp_vector[2]; C_high_limit = (int32_t) clamp_vector[3]; /* * Color Conversion * reorder input colors */ temp = comp_C2; comp_C2 = comp_C1; comp_C1 = comp_C0; comp_C0 = temp; /* matrix multiplication */ temp1 = comp_C0 * matrix[0] + comp_C1 * matrix[1] + comp_C2 * matrix[2]; temp2 = comp_C0 * matrix[3] + comp_C1 * matrix[4] + comp_C2 * matrix[5]; temp3 = comp_C0 * matrix[6] + comp_C1 * matrix[7] + comp_C2 * matrix[8]; comp_C0 = temp1 + 0x100; comp_C1 = temp2 + 0x100; comp_C2 = temp3 + 0x100; /* take interger part */ comp_C0 >>= 9; comp_C1 >>= 9; comp_C2 >>= 9; /* post bias (+) */ comp_C0 += bias_vector[0]; comp_C1 += bias_vector[1]; comp_C2 += bias_vector[2]; /* limit pixel to 8-bit */ comp_C0 = comp_conv_rgb2yuv(comp_C0, Y_high_limit, Y_low_limit, C_high_limit, C_low_limit); comp_C1 = comp_conv_rgb2yuv(comp_C1, Y_high_limit, Y_low_limit, C_high_limit, C_low_limit); comp_C2 = comp_conv_rgb2yuv(comp_C2, Y_high_limit, Y_low_limit, C_high_limit, C_low_limit); output = (comp_C2 << 16) | (comp_C1 << 8) | comp_C0; return output; } inline void y_h_even_num(struct ppp_img_desc *img) { img->roi.y = (img->roi.y / 2) * 2; img->roi.height = (img->roi.height / 2) * 2; } inline void x_w_even_num(struct ppp_img_desc *img) { img->roi.x = (img->roi.x / 2) * 2; img->roi.width = (img->roi.width / 2) * 2; } bool check_if_rgb(int color) { bool rgb = false; switch (color) { case MDP_RGB_565: case MDP_BGR_565: case MDP_RGB_888: case MDP_BGR_888: case MDP_BGRA_8888: case MDP_RGBA_8888: case MDP_ARGB_8888: case MDP_XRGB_8888: case MDP_RGBX_8888: case MDP_BGRX_8888: rgb = true; default: break; } return rgb; } uint8_t *mdp_dst_adjust_rot_addr(struct ppp_blit_op *iBuf, uint8_t *addr, uint32_t bpp, uint32_t uv) { uint32_t dest_ystride = iBuf->dst.prop.width * bpp; uint32_t h_slice = 1; if (uv && ((iBuf->dst.color_fmt == MDP_Y_CBCR_H2V2) || (iBuf->dst.color_fmt == MDP_Y_CRCB_H2V2))) h_slice = 2; if (((iBuf->mdp_op & MDPOP_ROT90) == MDPOP_ROT90) ^ ((iBuf->mdp_op & MDPOP_LR) == MDPOP_LR)) { addr += (iBuf->dst.roi.width - MIN(16, iBuf->dst.roi.width)) * bpp; } if ((iBuf->mdp_op & MDPOP_UD) == MDPOP_UD) { addr += ((iBuf->dst.roi.height - MIN(16, iBuf->dst.roi.height))/h_slice) * dest_ystride; } return addr; } void mdp_adjust_start_addr(struct ppp_blit_op *blit_op, struct ppp_img_desc *img, int v_slice, int h_slice, int layer) { uint32_t bpp = ppp_bpp(img->color_fmt); int x = img->roi.x; int y = img->roi.y; uint32_t width = img->prop.width; if (img->color_fmt == MDP_Y_CBCR_H2V2_ADRENO && layer == 0) img->p0 += (x + y * ALIGN(width, 32)) * bpp; else if (img->color_fmt == MDP_Y_CBCR_H2V2_VENUS && layer == 0) img->p0 += (x + y * ALIGN(width, 128)) * bpp; else img->p0 += (x + y * width) * bpp; if (layer != 0) img->p0 = mdp_dst_adjust_rot_addr(blit_op, img->p0, bpp, 0); if (img->p1) { /* * MDP_Y_CBCR_H2V2/MDP_Y_CRCB_H2V2 cosite for now * we need to shift x direction same as y dir for offsite */ if ((img->color_fmt == MDP_Y_CBCR_H2V2_ADRENO || img->color_fmt == MDP_Y_CBCR_H2V2_VENUS) && layer == 0) img->p1 += ((x / h_slice) * h_slice + ((y == 0) ? 0 : (((y + 1) / v_slice - 1) * (ALIGN(width/2, 32) * 2)))) * bpp; else img->p1 += ((x / h_slice) * h_slice + ((y == 0) ? 0 : ((y + 1) / v_slice - 1) * width)) * bpp; if (layer != 0) img->p0 = mdp_dst_adjust_rot_addr(blit_op, img->p0, bpp, 0); } } int load_ppp_lut(int tableType, uint32_t *lut) { int i; uint32_t base_addr; base_addr = tableType ? MDP3_PPP_POST_LUT : MDP3_PPP_PRE_LUT; for (i = 0; i < PPP_LUT_MAX; i++) PPP_WRITEL(lut[i], base_addr + MDP3_PPP_LUTn(i)); return 0; } /* Configure Primary CSC Matrix */ int load_primary_matrix(struct ppp_csc_table *csc) { int i; for (i = 0; i < MDP_CSC_SIZE; i++) PPP_WRITEL(csc->fwd_matrix[i], MDP3_PPP_CSC_PFMVn(i)); for (i = 0; i < MDP_CSC_SIZE; i++) PPP_WRITEL(csc->rev_matrix[i], MDP3_PPP_CSC_PRMVn(i)); for (i = 0; i < MDP_BV_SIZE; i++) PPP_WRITEL(csc->bv[i], MDP3_PPP_CSC_PBVn(i)); for (i = 0; i < MDP_LV_SIZE; i++) PPP_WRITEL(csc->lv[i], MDP3_PPP_CSC_PLVn(i)); return 0; } /* Load Secondary CSC Matrix */ int load_secondary_matrix(struct ppp_csc_table *csc) { int i; for (i = 0; i < MDP_CSC_SIZE; i++) PPP_WRITEL(csc->fwd_matrix[i], MDP3_PPP_CSC_SFMVn(i)); for (i = 0; i < MDP_CSC_SIZE; i++) PPP_WRITEL(csc->rev_matrix[i], MDP3_PPP_CSC_SRMVn(i)); for (i = 0; i < MDP_BV_SIZE; i++) PPP_WRITEL(csc->bv[i], MDP3_PPP_CSC_SBVn(i)); for (i = 0; i < MDP_LV_SIZE; i++) PPP_WRITEL(csc->lv[i], MDP3_PPP_CSC_SLVn(i)); return 0; } int load_csc_matrix(int matrix_type, struct ppp_csc_table *csc) { if (matrix_type == CSC_PRIMARY_MATRIX) return load_primary_matrix(csc); return load_secondary_matrix(csc); } int config_ppp_src(struct ppp_img_desc *src, uint32_t yuv2rgb) { uint32_t val; val = ((src->roi.height & MDP3_PPP_XY_MASK) << MDP3_PPP_XY_OFFSET) | (src->roi.width & MDP3_PPP_XY_MASK); PPP_WRITEL(val, MDP3_PPP_SRC_SIZE); PPP_WRITEL(src->p0, MDP3_PPP_SRCP0_ADDR); PPP_WRITEL(src->p1, MDP3_PPP_SRCP1_ADDR); PPP_WRITEL(src->p3, MDP3_PPP_SRCP3_ADDR); val = (src->stride0 & MDP3_PPP_STRIDE_MASK) | ((src->stride1 & MDP3_PPP_STRIDE_MASK) << MDP3_PPP_STRIDE1_OFFSET); PPP_WRITEL(val, MDP3_PPP_SRC_YSTRIDE1_ADDR); val = ((src->stride2 & MDP3_PPP_STRIDE_MASK) << MDP3_PPP_STRIDE1_OFFSET); PPP_WRITEL(val, MDP3_PPP_SRC_YSTRIDE2_ADDR); val = ppp_src_config(src->color_fmt); val |= (src->roi.x % 2) ? PPP_SRC_BPP_ROI_ODD_X : 0; val |= (src->roi.y % 2) ? PPP_SRC_BPP_ROI_ODD_Y : 0; PPP_WRITEL(val, MDP3_PPP_SRC_FORMAT); PPP_WRITEL(ppp_pack_pattern(src->color_fmt, yuv2rgb), MDP3_PPP_SRC_UNPACK_PATTERN1); return 0; } int config_ppp_out(struct ppp_img_desc *dst, uint32_t yuv2rgb) { uint32_t val; bool pseudoplanr_output = false; switch (dst->color_fmt) { case MDP_Y_CBCR_H2V2: case MDP_Y_CRCB_H2V2: case MDP_Y_CBCR_H2V1: case MDP_Y_CRCB_H2V1: pseudoplanr_output = true; break; default: break; } val = ppp_out_config(dst->color_fmt); if (pseudoplanr_output) val |= PPP_DST_PLANE_PSEUDOPLN; PPP_WRITEL(val, MDP3_PPP_OUT_FORMAT); PPP_WRITEL(ppp_pack_pattern(dst->color_fmt, yuv2rgb), MDP3_PPP_OUT_PACK_PATTERN1); val = ((dst->roi.height & MDP3_PPP_XY_MASK) << MDP3_PPP_XY_OFFSET) | (dst->roi.width & MDP3_PPP_XY_MASK); PPP_WRITEL(val, MDP3_PPP_OUT_SIZE); PPP_WRITEL(dst->p0, MDP3_PPP_OUTP0_ADDR); PPP_WRITEL(dst->p1, MDP3_PPP_OUTP1_ADDR); PPP_WRITEL(dst->p3, MDP3_PPP_OUTP3_ADDR); val = (dst->stride0 & MDP3_PPP_STRIDE_MASK) | ((dst->stride1 & MDP3_PPP_STRIDE_MASK) << MDP3_PPP_STRIDE1_OFFSET); PPP_WRITEL(val, MDP3_PPP_OUT_YSTRIDE1_ADDR); val = ((dst->stride2 & MDP3_PPP_STRIDE_MASK) << MDP3_PPP_STRIDE1_OFFSET); PPP_WRITEL(val, MDP3_PPP_OUT_YSTRIDE2_ADDR); return 0; } int config_ppp_background(struct ppp_img_desc *bg) { uint32_t val; PPP_WRITEL(bg->p0, MDP3_PPP_BGP0_ADDR); PPP_WRITEL(bg->p1, MDP3_PPP_BGP1_ADDR); PPP_WRITEL(bg->p3, MDP3_PPP_BGP3_ADDR); val = (bg->stride0 & MDP3_PPP_STRIDE_MASK) | ((bg->stride1 & MDP3_PPP_STRIDE_MASK) << MDP3_PPP_STRIDE1_OFFSET); PPP_WRITEL(val, MDP3_PPP_BG_YSTRIDE1_ADDR); val = ((bg->stride2 & MDP3_PPP_STRIDE_MASK) << MDP3_PPP_STRIDE1_OFFSET); PPP_WRITEL(val, MDP3_PPP_BG_YSTRIDE2_ADDR); PPP_WRITEL(ppp_src_config(bg->color_fmt), MDP3_PPP_BG_FORMAT); PPP_WRITEL(ppp_pack_pattern(bg->color_fmt, 0), MDP3_PPP_BG_UNPACK_PATTERN1); return 0; } void ppp_edge_rep_luma_pixel(struct ppp_blit_op *blit_op, struct ppp_edge_rep *er) { if (blit_op->mdp_op & MDPOP_ASCALE) { er->is_scale_enabled = 1; if (blit_op->mdp_op & MDPOP_ROT90) { er->dst_roi_width = blit_op->dst.roi.height; er->dst_roi_height = blit_op->dst.roi.width; } else { er->dst_roi_width = blit_op->dst.roi.width; er->dst_roi_height = blit_op->dst.roi.height; } /* * Find out the luma pixels needed for scaling in the * x direction (LEFT and RIGHT). Locations of pixels are * relative to the ROI. Upper-left corner of ROI corresponds * to coordinates (0,0). Also set the number of luma pixel * to repeat. */ if (blit_op->src.roi.width > 3 * er->dst_roi_width) { /* scale factor < 1/3 */ er->luma_interp_point_right = (blit_op->src.roi.width - 1); } else if (blit_op->src.roi.width == 3 * er->dst_roi_width) { /* scale factor == 1/3 */ er->luma_interp_point_right = (blit_op->src.roi.width - 1) + 1; er->luma_repeat_right = 1; } else if ((blit_op->src.roi.width > er->dst_roi_width) && (blit_op->src.roi.width < 3 * er->dst_roi_width)) { /* 1/3 < scale factor < 1 */ er->luma_interp_point_left = -1; er->luma_interp_point_right = (blit_op->src.roi.width - 1) + 1; er->luma_repeat_left = 1; er->luma_repeat_right = 1; } else if (blit_op->src.roi.width == er->dst_roi_width) { /* scale factor == 1 */ er->luma_interp_point_left = -1; er->luma_interp_point_right = (blit_op->src.roi.width - 1) + 2; er->luma_repeat_left = 1; er->luma_repeat_right = 2; } else { /* scale factor > 1 */ er->luma_interp_point_left = -2; er->luma_interp_point_right = (blit_op->src.roi.width - 1) + 2; er->luma_repeat_left = 2; er->luma_repeat_right = 2; } /* * Find out the number of pixels needed for scaling in the * y direction (TOP and BOTTOM). Locations of pixels are * relative to the ROI. Upper-left corner of ROI corresponds * to coordinates (0,0). Also set the number of luma pixel * to repeat. */ if (blit_op->src.roi.height > 3 * er->dst_roi_height) { er->luma_interp_point_bottom = (blit_op->src.roi.height - 1); } else if (blit_op->src.roi.height == 3 * er->dst_roi_height) { er->luma_interp_point_bottom = (blit_op->src.roi.height - 1) + 1; er->luma_repeat_bottom = 1; } else if ((blit_op->src.roi.height > er->dst_roi_height) && (blit_op->src.roi.height < 3 * er->dst_roi_height)) { er->luma_interp_point_top = -1; er->luma_interp_point_bottom = (blit_op->src.roi.height - 1) + 1; er->luma_repeat_top = 1; er->luma_repeat_bottom = 1; } else if (blit_op->src.roi.height == er->dst_roi_height) { er->luma_interp_point_top = -1; er->luma_interp_point_bottom = (blit_op->src.roi.height - 1) + 2; er->luma_repeat_top = 1; er->luma_repeat_bottom = 2; } else { er->luma_interp_point_top = -2; er->luma_interp_point_bottom = (blit_op->src.roi.height - 1) + 2; er->luma_repeat_top = 2; er->luma_repeat_bottom = 2; } } else { /* * Since no scaling needed, Tile Fetch does not require any * more luma pixel than what the ROI contains. */ er->luma_interp_point_right = (int32_t) (blit_op->src.roi.width - 1); er->luma_interp_point_bottom = (int32_t) (blit_op->src.roi.height - 1); } /* After adding the ROI offsets, we have locations of * luma_interp_points relative to the image. */ er->luma_interp_point_left += (int32_t) (blit_op->src.roi.x); er->luma_interp_point_right += (int32_t) (blit_op->src.roi.x); er->luma_interp_point_top += (int32_t) (blit_op->src.roi.y); er->luma_interp_point_bottom += (int32_t) (blit_op->src.roi.y); } void ppp_edge_rep_chroma_pixel(struct ppp_blit_op *blit_op, struct ppp_edge_rep *er) { bool chroma_edge_enable = true; uint32_t is_yuv_offsite_vertical = 0; /* find out which chroma pixels are needed for chroma upsampling. */ switch (blit_op->src.color_fmt) { case MDP_Y_CBCR_H2V1: case MDP_Y_CRCB_H2V1: case MDP_YCRYCB_H2V1: er->chroma_interp_point_left = er->luma_interp_point_left >> 1; er->chroma_interp_point_right = (er->luma_interp_point_right + 1) >> 1; er->chroma_interp_point_top = er->luma_interp_point_top; er->chroma_interp_point_bottom = er->luma_interp_point_bottom; break; case MDP_Y_CBCR_H2V2: case MDP_Y_CBCR_H2V2_ADRENO: case MDP_Y_CBCR_H2V2_VENUS: case MDP_Y_CRCB_H2V2: er->chroma_interp_point_left = er->luma_interp_point_left >> 1; er->chroma_interp_point_right = (er->luma_interp_point_right + 1) >> 1; er->chroma_interp_point_top = (er->luma_interp_point_top - 1) >> 1; er->chroma_interp_point_bottom = (er->luma_interp_point_bottom + 1) >> 1; is_yuv_offsite_vertical = 1; break; default: chroma_edge_enable = false; er->chroma_interp_point_left = er->luma_interp_point_left; er->chroma_interp_point_right = er->luma_interp_point_right; er->chroma_interp_point_top = er->luma_interp_point_top; er->chroma_interp_point_bottom = er->luma_interp_point_bottom; break; } if (chroma_edge_enable) { /* Defines which chroma pixels belongs to the roi */ switch (blit_op->src.color_fmt) { case MDP_Y_CBCR_H2V1: case MDP_Y_CRCB_H2V1: case MDP_YCRYCB_H2V1: er->chroma_bound_left = blit_op->src.roi.x / 2; /* there are half as many chroma pixel as luma pixels */ er->chroma_bound_right = (blit_op->src.roi.width + blit_op->src.roi.x - 1) / 2; er->chroma_bound_top = blit_op->src.roi.y; er->chroma_bound_bottom = (blit_op->src.roi.height + blit_op->src.roi.y - 1); break; case MDP_Y_CBCR_H2V2: case MDP_Y_CBCR_H2V2_ADRENO: case MDP_Y_CBCR_H2V2_VENUS: case MDP_Y_CRCB_H2V2: /* * cosite in horizontal dir, and offsite in vertical dir * width of chroma ROI is 1/2 of size of luma ROI * height of chroma ROI is 1/2 of size of luma ROI */ er->chroma_bound_left = blit_op->src.roi.x / 2; er->chroma_bound_right = (blit_op->src.roi.width + blit_op->src.roi.x - 1) / 2; er->chroma_bound_top = blit_op->src.roi.y / 2; er->chroma_bound_bottom = (blit_op->src.roi.height + blit_op->src.roi.y - 1) / 2; break; default: /* * If no valid chroma sub-sampling format specified, * assume 4:4:4 ( i.e. fully sampled). */ er->chroma_bound_left = blit_op->src.roi.x; er->chroma_bound_right = blit_op->src.roi.width + blit_op->src.roi.x - 1; er->chroma_bound_top = blit_op->src.roi.y; er->chroma_bound_bottom = (blit_op->src.roi.height + blit_op->src.roi.y - 1); break; } /* * Knowing which chroma pixels are needed, and which chroma * pixels belong to the ROI (i.e. available for fetching ), * calculate how many chroma pixels Tile Fetch needs to * duplicate. If any required chroma pixels falls outside * of the ROI, Tile Fetch must obtain them by replicating * pixels. */ if (er->chroma_bound_left > er->chroma_interp_point_left) er->chroma_repeat_left = er->chroma_bound_left - er->chroma_interp_point_left; else er->chroma_repeat_left = 0; if (er->chroma_interp_point_right > er->chroma_bound_right) er->chroma_repeat_right = er->chroma_interp_point_right - er->chroma_bound_right; else er->chroma_repeat_right = 0; if (er->chroma_bound_top > er->chroma_interp_point_top) er->chroma_repeat_top = er->chroma_bound_top - er->chroma_interp_point_top; else er->chroma_repeat_top = 0; if (er->chroma_interp_point_bottom > er->chroma_bound_bottom) er->chroma_repeat_bottom = er->chroma_interp_point_bottom - er->chroma_bound_bottom; else er->chroma_repeat_bottom = 0; if (er->is_scale_enabled && (blit_op->src.roi.height == 1) && is_yuv_offsite_vertical) { er->chroma_repeat_bottom = 3; er->chroma_repeat_top = 0; } } } int config_ppp_edge_rep(struct ppp_blit_op *blit_op) { uint32_t reg = 0; struct ppp_edge_rep er; memset(&er, 0, sizeof(er)); ppp_edge_rep_luma_pixel(blit_op, &er); /* * After adding the ROI offsets, we have locations of * chroma_interp_points relative to the image. */ er.chroma_interp_point_left = er.luma_interp_point_left; er.chroma_interp_point_right = er.luma_interp_point_right; er.chroma_interp_point_top = er.luma_interp_point_top; er.chroma_interp_point_bottom = er.luma_interp_point_bottom; ppp_edge_rep_chroma_pixel(blit_op, &er); /* ensure repeats are >=0 and no larger than 3 pixels */ if ((er.chroma_repeat_left < 0) || (er.chroma_repeat_right < 0) || (er.chroma_repeat_top < 0) || (er.chroma_repeat_bottom < 0)) return -EINVAL; if ((er.chroma_repeat_left > 3) || (er.chroma_repeat_right > 3) || (er.chroma_repeat_top > 3) || (er.chroma_repeat_bottom > 3)) return -EINVAL; if ((er.luma_repeat_left < 0) || (er.luma_repeat_right < 0) || (er.luma_repeat_top < 0) || (er.luma_repeat_bottom < 0)) return -EINVAL; if ((er.luma_repeat_left > 3) || (er.luma_repeat_right > 3) || (er.luma_repeat_top > 3) || (er.luma_repeat_bottom > 3)) return -EINVAL; reg |= (er.chroma_repeat_left & 3) << MDP_LEFT_CHROMA; reg |= (er.chroma_repeat_right & 3) << MDP_RIGHT_CHROMA; reg |= (er.chroma_repeat_top & 3) << MDP_TOP_CHROMA; reg |= (er.chroma_repeat_bottom & 3) << MDP_BOTTOM_CHROMA; reg |= (er.luma_repeat_left & 3) << MDP_LEFT_LUMA; reg |= (er.luma_repeat_right & 3) << MDP_RIGHT_LUMA; reg |= (er.luma_repeat_top & 3) << MDP_TOP_LUMA; reg |= (er.luma_repeat_bottom & 3) << MDP_BOTTOM_LUMA; PPP_WRITEL(reg, MDP3_PPP_SRC_EDGE_REP); return 0; } int config_ppp_bg_edge_rep(struct ppp_blit_op *blit_op) { uint32_t reg = 0; switch (blit_op->dst.color_fmt) { case MDP_Y_CBCR_H2V2: case MDP_Y_CRCB_H2V2: if (blit_op->dst.roi.y == 0) reg |= BIT(MDP_TOP_CHROMA); if ((blit_op->dst.roi.y + blit_op->dst.roi.height) == blit_op->dst.prop.height) { reg |= BIT(MDP_BOTTOM_CHROMA); } if (((blit_op->dst.roi.x + blit_op->dst.roi.width) == blit_op->dst.prop.width) && ((blit_op->dst.roi.width % 2) == 0)) reg |= BIT(MDP_RIGHT_CHROMA); break; case MDP_Y_CBCR_H2V1: case MDP_Y_CRCB_H2V1: case MDP_YCRYCB_H2V1: if (((blit_op->dst.roi.x + blit_op->dst.roi.width) == blit_op->dst.prop.width) && ((blit_op->dst.roi.width % 2) == 0)) reg |= BIT(MDP_RIGHT_CHROMA); break; default: break; } PPP_WRITEL(reg, MDP3_PPP_BG_EDGE_REP); return 0; } int config_ppp_lut(uint32_t *pppop_reg_ptr, int lut_c0_en, int lut_c1_en, int lut_c2_en) { if (lut_c0_en) *pppop_reg_ptr |= MDP_LUT_C0_EN; if (lut_c1_en) *pppop_reg_ptr |= MDP_LUT_C1_EN; if (lut_c2_en) *pppop_reg_ptr |= MDP_LUT_C2_EN; return 0; } int config_ppp_scale(struct ppp_blit_op *blit_op, uint32_t *pppop_reg_ptr) { struct ppp_img_desc *src = &blit_op->src; struct ppp_img_desc *dst = &blit_op->dst; uint32_t dstW, dstH; uint32_t x_fac, y_fac; uint32_t mdp_blur = 0; uint32_t phase_init_x, phase_init_y, phase_step_x, phase_step_y; int x_idx, y_idx; if (blit_op->mdp_op & MDPOP_ASCALE) { if (blit_op->mdp_op & MDPOP_ROT90) { dstW = dst->roi.height; dstH = dst->roi.width; } else { dstW = dst->roi.width; dstH = dst->roi.height; } *pppop_reg_ptr |= (PPP_OP_SCALE_Y_ON | PPP_OP_SCALE_X_ON); mdp_blur = blit_op->mdp_op & MDPOP_BLUR; if ((dstW != src->roi.width) || (dstH != src->roi.height) || mdp_blur) { mdp_calc_scale_params(blit_op->src.roi.x, blit_op->src.roi.width, dstW, 1, &phase_init_x, &phase_step_x); mdp_calc_scale_params(blit_op->src.roi.y, blit_op->src.roi.height, dstH, 0, &phase_init_y, &phase_step_y); PPP_WRITEL(phase_init_x, MDP3_PPP_SCALE_PHASEX_INIT); PPP_WRITEL(phase_init_y, MDP3_PPP_SCALE_PHASEY_INIT); PPP_WRITEL(phase_step_x, MDP3_PPP_SCALE_PHASEX_STEP); PPP_WRITEL(phase_step_y, MDP3_PPP_SCALE_PHASEY_STEP); if (dstW > src->roi.width || dstW > src->roi.height) ppp_load_up_lut(); if (mdp_blur) ppp_load_gaussian_lut(); if (dstW <= src->roi.width) { x_fac = (dstW * 100) / src->roi.width; x_idx = scale_idx(x_fac); ppp_load_x_scale_table(x_idx); } if (dstH <= src->roi.height) { y_fac = (dstH * 100) / src->roi.height; y_idx = scale_idx(y_fac); ppp_load_y_scale_table(y_idx); } } else { blit_op->mdp_op &= ~(MDPOP_ASCALE); } } config_ppp_edge_rep(blit_op); config_ppp_bg_edge_rep(blit_op); return 0; } int config_ppp_csc(int src_color, int dst_color, uint32_t *pppop_reg_ptr) { bool inputRGB, outputRGB; inputRGB = check_if_rgb(src_color); outputRGB = check_if_rgb(dst_color); if ((!inputRGB) && (outputRGB)) *pppop_reg_ptr |= PPP_OP_CONVERT_YCBCR2RGB | PPP_OP_CONVERT_ON; if ((inputRGB) && (!outputRGB)) *pppop_reg_ptr |= PPP_OP_CONVERT_ON; return 0; } int config_ppp_blend(struct ppp_blit_op *blit_op, uint32_t *pppop_reg_ptr) { struct ppp_csc_table *csc; uint32_t alpha, trans_color; uint32_t val = 0; int c_fmt = blit_op->src.color_fmt; int bg_alpha; csc = ppp_csc_rgb2yuv(); alpha = blit_op->blend.const_alpha; trans_color = blit_op->blend.trans_color; if (blit_op->mdp_op & MDPOP_FG_PM_ALPHA) { if (ppp_per_p_alpha(c_fmt)) { *pppop_reg_ptr |= PPP_OP_ROT_ON | PPP_OP_BLEND_ON | PPP_OP_BLEND_CONSTANT_ALPHA; } else { if ((blit_op->mdp_op & MDPOP_ALPHAB) && (blit_op->blend.const_alpha == 0xff)) { blit_op->mdp_op &= ~(MDPOP_ALPHAB); } if ((blit_op->mdp_op & MDPOP_ALPHAB) || (blit_op->mdp_op & MDPOP_TRANSP)) { *pppop_reg_ptr |= PPP_OP_ROT_ON | PPP_OP_BLEND_ON | PPP_OP_BLEND_CONSTANT_ALPHA | PPP_OP_BLEND_ALPHA_BLEND_NORMAL; } } bg_alpha = PPP_BLEND_BG_USE_ALPHA_SEL | PPP_BLEND_BG_ALPHA_REVERSE; if ((ppp_per_p_alpha(c_fmt)) && !(blit_op->mdp_op & MDPOP_LAYER_IS_FG)) { bg_alpha |= PPP_BLEND_BG_SRCPIXEL_ALPHA; } else { bg_alpha |= PPP_BLEND_BG_CONSTANT_ALPHA; bg_alpha |= blit_op->blend.const_alpha << 24; } PPP_WRITEL(bg_alpha, MDP3_PPP_BLEND_BG_ALPHA_SEL); if (blit_op->mdp_op & MDPOP_TRANSP) *pppop_reg_ptr |= PPP_BLEND_CALPHA_TRNASP; } else if (ppp_per_p_alpha(c_fmt)) { if (blit_op->mdp_op & MDPOP_LAYER_IS_FG) *pppop_reg_ptr |= PPP_OP_ROT_ON | PPP_OP_BLEND_ON | PPP_OP_BLEND_CONSTANT_ALPHA; else *pppop_reg_ptr |= PPP_OP_ROT_ON | PPP_OP_BLEND_ON | PPP_OP_BLEND_SRCPIXEL_ALPHA; PPP_WRITEL(0, MDP3_PPP_BLEND_BG_ALPHA_SEL); } else { if ((blit_op->mdp_op & MDPOP_ALPHAB) && (blit_op->blend.const_alpha == 0xff)) { blit_op->mdp_op &= ~(MDPOP_ALPHAB); } if ((blit_op->mdp_op & MDPOP_ALPHAB) || (blit_op->mdp_op & MDPOP_TRANSP)) { *pppop_reg_ptr |= PPP_OP_ROT_ON | PPP_OP_BLEND_ON | PPP_OP_BLEND_CONSTANT_ALPHA | PPP_OP_BLEND_ALPHA_BLEND_NORMAL; } if (blit_op->mdp_op & MDPOP_TRANSP) *pppop_reg_ptr |= PPP_BLEND_CALPHA_TRNASP; PPP_WRITEL(0, MDP3_PPP_BLEND_BG_ALPHA_SEL); } if (*pppop_reg_ptr & PPP_OP_BLEND_ON) { config_ppp_background(&blit_op->bg); if (blit_op->dst.color_fmt == MDP_YCRYCB_H2V1) { *pppop_reg_ptr |= PPP_OP_BG_CHROMA_H2V1; if (blit_op->mdp_op & MDPOP_TRANSP) { trans_color = conv_rgb2yuv(trans_color, &csc->fwd_matrix[0], &csc->bv[0], &csc->lv[0]); } } } val = (alpha << MDP_BLEND_CONST_ALPHA); val |= (trans_color & MDP_BLEND_TRASP_COL_MASK); PPP_WRITEL(val, MDP3_PPP_BLEND_PARAM); return 0; } int config_ppp_rotation(uint32_t mdp_op, uint32_t *pppop_reg_ptr) { *pppop_reg_ptr |= PPP_OP_ROT_ON; if (mdp_op & MDPOP_ROT90) *pppop_reg_ptr |= PPP_OP_ROT_90; if (mdp_op & MDPOP_LR) *pppop_reg_ptr |= PPP_OP_FLIP_LR; if (mdp_op & MDPOP_UD) *pppop_reg_ptr |= PPP_OP_FLIP_UD; return 0; } int config_ppp_op_mode(struct ppp_blit_op *blit_op) { uint32_t yuv2rgb; uint32_t ppp_operation_reg = 0; int sv_slice, sh_slice; int dv_slice, dh_slice; sv_slice = sh_slice = dv_slice = dh_slice = 1; ppp_operation_reg |= ppp_dst_op_reg(blit_op->dst.color_fmt); switch (blit_op->dst.color_fmt) { case MDP_Y_CBCR_H2V2: case MDP_Y_CRCB_H2V2: y_h_even_num(&blit_op->dst); y_h_even_num(&blit_op->src); dv_slice = 2; case MDP_Y_CBCR_H2V1: case MDP_Y_CRCB_H2V1: case MDP_YCRYCB_H2V1: x_w_even_num(&blit_op->dst); x_w_even_num(&blit_op->src); dh_slice = 2; break; default: break; } ppp_operation_reg |= ppp_src_op_reg(blit_op->src.color_fmt); switch (blit_op->src.color_fmt) { case MDP_Y_CBCR_H2V2: case MDP_Y_CBCR_H2V2_ADRENO: case MDP_Y_CBCR_H2V2_VENUS: case MDP_Y_CRCB_H2V2: sh_slice = sv_slice = 2; break; case MDP_YCRYCB_H2V1: x_w_even_num(&blit_op->dst); x_w_even_num(&blit_op->src); case MDP_Y_CBCR_H2V1: case MDP_Y_CRCB_H2V1: sh_slice = 2; break; default: break; } config_ppp_csc(blit_op->src.color_fmt, blit_op->dst.color_fmt, &ppp_operation_reg); yuv2rgb = ppp_operation_reg & PPP_OP_CONVERT_YCBCR2RGB; if (blit_op->mdp_op & MDPOP_DITHER) ppp_operation_reg |= PPP_OP_DITHER_EN; if (blit_op->mdp_op & MDPOP_ROTATION) config_ppp_rotation(blit_op->mdp_op, &ppp_operation_reg); if (blit_op->src.color_fmt == MDP_Y_CBCR_H2V2_ADRENO) { blit_op->src.stride0 = ALIGN(blit_op->src.prop.width, 32) * ppp_bpp(blit_op->src.color_fmt); blit_op->src.stride1 = 2 * ALIGN(blit_op->src.prop.width/2, 32); } else if (blit_op->src.color_fmt == MDP_Y_CBCR_H2V2_VENUS) { blit_op->src.stride0 = ALIGN(blit_op->src.prop.width, 128) * ppp_bpp(blit_op->src.color_fmt); blit_op->src.stride1 = blit_op->src.stride0; } else { blit_op->src.stride0 = blit_op->src.prop.width * ppp_bpp(blit_op->src.color_fmt); blit_op->src.stride1 = blit_op->src.stride0; } blit_op->dst.stride0 = blit_op->dst.prop.width * ppp_bpp(blit_op->dst.color_fmt); if (ppp_multi_plane(blit_op->dst.color_fmt)) { blit_op->dst.p1 = blit_op->dst.p0; blit_op->dst.p1 += blit_op->dst.prop.width * blit_op->dst.prop.height * ppp_bpp(blit_op->dst.color_fmt); } else { blit_op->dst.p1 = NULL; } blit_op->bg = blit_op->dst; /* Jumping from Y-Plane to Chroma Plane */ /* first pixel addr calculation */ mdp_adjust_start_addr(blit_op, &blit_op->src, sv_slice, sh_slice, 0); mdp_adjust_start_addr(blit_op, &blit_op->bg, dv_slice, dh_slice, 1); mdp_adjust_start_addr(blit_op, &blit_op->dst, dv_slice, dh_slice, 2); config_ppp_scale(blit_op, &ppp_operation_reg); config_ppp_blend(blit_op, &ppp_operation_reg); config_ppp_src(&blit_op->src, yuv2rgb); config_ppp_out(&blit_op->dst, yuv2rgb); PPP_WRITEL(ppp_operation_reg, MDP3_PPP_OP_MODE); mb(); return 0; } void ppp_enable(void) { PPP_WRITEL(0x1000, 0x30); mb(); } int mdp3_ppp_init(void) { load_ppp_lut(LUT_PRE_TABLE, ppp_default_pre_lut()); load_ppp_lut(LUT_POST_TABLE, ppp_default_post_lut()); load_csc_matrix(CSC_PRIMARY_MATRIX, ppp_csc_rgb2yuv()); load_csc_matrix(CSC_SECONDARY_MATRIX, ppp_csc_table2()); return 0; }
gpl-2.0
shuiziliuBUPT/linuxkernel
drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c
1088
3427
/* * Copyright 2012 Maarten Lankhorst * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Maarten Lankhorst */ #include <engine/falcon.h> #include <engine/bsp.h> struct nvc0_bsp_priv { struct nouveau_falcon base; }; /******************************************************************************* * BSP object classes ******************************************************************************/ static struct nouveau_oclass nvc0_bsp_sclass[] = { { 0x90b1, &nouveau_object_ofuncs }, {}, }; /******************************************************************************* * PBSP context ******************************************************************************/ static struct nouveau_oclass nvc0_bsp_cclass = { .handle = NV_ENGCTX(BSP, 0xc0), .ofuncs = &(struct nouveau_ofuncs) { .ctor = _nouveau_falcon_context_ctor, .dtor = _nouveau_falcon_context_dtor, .init = _nouveau_falcon_context_init, .fini = _nouveau_falcon_context_fini, .rd32 = _nouveau_falcon_context_rd32, .wr32 = _nouveau_falcon_context_wr32, }, }; /******************************************************************************* * PBSP engine/subdev functions ******************************************************************************/ static int nvc0_bsp_init(struct nouveau_object *object) { struct nvc0_bsp_priv *priv = (void *)object; int ret; ret = nouveau_falcon_init(&priv->base); if (ret) return ret; nv_wr32(priv, 0x084010, 0x0000fff2); nv_wr32(priv, 0x08401c, 0x0000fff2); return 0; } static int nvc0_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, struct nouveau_object **pobject) { struct nvc0_bsp_priv *priv; int ret; ret = nouveau_falcon_create(parent, engine, oclass, 0x084000, true, "PBSP", "bsp", &priv); *pobject = nv_object(priv); if (ret) return ret; nv_subdev(priv)->unit = 0x00008000; nv_subdev(priv)->intr = nouveau_falcon_intr; nv_engine(priv)->cclass = &nvc0_bsp_cclass; nv_engine(priv)->sclass = nvc0_bsp_sclass; return 0; } struct nouveau_oclass nvc0_bsp_oclass = { .handle = NV_ENGINE(BSP, 0xc0), .ofuncs = &(struct nouveau_ofuncs) { .ctor = nvc0_bsp_ctor, .dtor = _nouveau_falcon_dtor, .init = nvc0_bsp_init, .fini = _nouveau_falcon_fini, .rd32 = _nouveau_falcon_rd32, .wr32 = _nouveau_falcon_wr32, }, };
gpl-2.0
kevin0100/android_kernel_qcom_msm8916
drivers/iio/light/adjd_s311.c
2112
9009
/* * adjd_s311.c - Support for ADJD-S311-CR999 digital color sensor * * Copyright (C) 2012 Peter Meerwald <pmeerw@pmeerw.net> * * This file is subject to the terms and conditions of version 2 of * the GNU General Public License. See the file COPYING in the main * directory of this archive for more details. * * driver for ADJD-S311-CR999 digital color sensor (10-bit channels for * red, green, blue, clear); 7-bit I2C slave address 0x74 * * limitations: no calibration, no offset mode, no sleep mode */ #include <linux/module.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/i2c.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/bitmap.h> #include <linux/err.h> #include <linux/irq.h> #include <linux/iio/iio.h> #include <linux/iio/sysfs.h> #include <linux/iio/trigger_consumer.h> #include <linux/iio/buffer.h> #include <linux/iio/triggered_buffer.h> #define ADJD_S311_DRV_NAME "adjd_s311" #define ADJD_S311_CTRL 0x00 #define ADJD_S311_CONFIG 0x01 #define ADJD_S311_CAP_RED 0x06 #define ADJD_S311_CAP_GREEN 0x07 #define ADJD_S311_CAP_BLUE 0x08 #define ADJD_S311_CAP_CLEAR 0x09 #define ADJD_S311_INT_RED_LO 0x0a #define ADJD_S311_INT_RED_HI 0x0b #define ADJD_S311_INT_GREEN_LO 0x0c #define ADJD_S311_INT_GREEN_HI 0x0d #define ADJD_S311_INT_BLUE_LO 0x0e #define ADJD_S311_INT_BLUE_HI 0x0f #define ADJD_S311_INT_CLEAR_LO 0x10 #define ADJD_S311_INT_CLEAR_HI 0x11 #define ADJD_S311_DATA_RED_LO 0x40 #define ADJD_S311_DATA_RED_HI 0x41 #define ADJD_S311_DATA_GREEN_LO 0x42 #define ADJD_S311_DATA_GREEN_HI 0x43 #define ADJD_S311_DATA_BLUE_LO 0x44 #define ADJD_S311_DATA_BLUE_HI 0x45 #define ADJD_S311_DATA_CLEAR_LO 0x46 #define ADJD_S311_DATA_CLEAR_HI 0x47 #define ADJD_S311_OFFSET_RED 0x48 #define ADJD_S311_OFFSET_GREEN 0x49 #define ADJD_S311_OFFSET_BLUE 0x4a #define ADJD_S311_OFFSET_CLEAR 0x4b #define ADJD_S311_CTRL_GOFS 0x02 #define ADJD_S311_CTRL_GSSR 0x01 #define ADJD_S311_CAP_MASK 0x0f #define ADJD_S311_INT_MASK 0x0fff #define ADJD_S311_DATA_MASK 0x03ff struct adjd_s311_data { struct i2c_client *client; u16 *buffer; }; enum adjd_s311_channel_idx { IDX_RED, IDX_GREEN, IDX_BLUE, IDX_CLEAR }; #define ADJD_S311_DATA_REG(chan) (ADJD_S311_DATA_RED_LO + (chan) * 2) #define ADJD_S311_INT_REG(chan) (ADJD_S311_INT_RED_LO + (chan) * 2) #define ADJD_S311_CAP_REG(chan) (ADJD_S311_CAP_RED + (chan)) static int adjd_s311_req_data(struct iio_dev *indio_dev) { struct adjd_s311_data *data = iio_priv(indio_dev); int tries = 10; int ret = i2c_smbus_write_byte_data(data->client, ADJD_S311_CTRL, ADJD_S311_CTRL_GSSR); if (ret < 0) return ret; while (tries--) { ret = i2c_smbus_read_byte_data(data->client, ADJD_S311_CTRL); if (ret < 0) return ret; if (!(ret & ADJD_S311_CTRL_GSSR)) break; msleep(20); } if (tries < 0) { dev_err(&data->client->dev, "adjd_s311_req_data() failed, data not ready\n"); return -EIO; } return 0; } static int adjd_s311_read_data(struct iio_dev *indio_dev, u8 reg, int *val) { struct adjd_s311_data *data = iio_priv(indio_dev); int ret = adjd_s311_req_data(indio_dev); if (ret < 0) return ret; ret = i2c_smbus_read_word_data(data->client, reg); if (ret < 0) return ret; *val = ret & ADJD_S311_DATA_MASK; return 0; } static ssize_t adjd_s311_read_int_time(struct iio_dev *indio_dev, uintptr_t private, const struct iio_chan_spec *chan, char *buf) { struct adjd_s311_data *data = iio_priv(indio_dev); s32 ret; ret = i2c_smbus_read_word_data(data->client, ADJD_S311_INT_REG(chan->address)); if (ret < 0) return ret; return sprintf(buf, "%d\n", ret & ADJD_S311_INT_MASK); } static ssize_t adjd_s311_write_int_time(struct iio_dev *indio_dev, uintptr_t private, const struct iio_chan_spec *chan, const char *buf, size_t len) { struct adjd_s311_data *data = iio_priv(indio_dev); unsigned long int_time; int ret; ret = kstrtoul(buf, 10, &int_time); if (ret) return ret; if (int_time > ADJD_S311_INT_MASK) return -EINVAL; ret = i2c_smbus_write_word_data(data->client, ADJD_S311_INT_REG(chan->address), int_time); if (ret < 0) return ret; return len; } static irqreturn_t adjd_s311_trigger_handler(int irq, void *p) { struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; struct adjd_s311_data *data = iio_priv(indio_dev); s64 time_ns = iio_get_time_ns(); int len = 0; int i, j = 0; int ret = adjd_s311_req_data(indio_dev); if (ret < 0) goto done; for_each_set_bit(i, indio_dev->active_scan_mask, indio_dev->masklength) { ret = i2c_smbus_read_word_data(data->client, ADJD_S311_DATA_REG(i)); if (ret < 0) goto done; data->buffer[j++] = ret & ADJD_S311_DATA_MASK; len += 2; } if (indio_dev->scan_timestamp) *(s64 *)((u8 *)data->buffer + ALIGN(len, sizeof(s64))) = time_ns; iio_push_to_buffers(indio_dev, (u8 *)data->buffer); done: iio_trigger_notify_done(indio_dev->trig); return IRQ_HANDLED; } static const struct iio_chan_spec_ext_info adjd_s311_ext_info[] = { { .name = "integration_time", .read = adjd_s311_read_int_time, .write = adjd_s311_write_int_time, }, { } }; #define ADJD_S311_CHANNEL(_color, _scan_idx) { \ .type = IIO_INTENSITY, \ .modified = 1, \ .address = (IDX_##_color), \ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ BIT(IIO_CHAN_INFO_HARDWAREGAIN), \ .channel2 = (IIO_MOD_LIGHT_##_color), \ .scan_index = (_scan_idx), \ .scan_type = IIO_ST('u', 10, 16, 0), \ .ext_info = adjd_s311_ext_info, \ } static const struct iio_chan_spec adjd_s311_channels[] = { ADJD_S311_CHANNEL(RED, 0), ADJD_S311_CHANNEL(GREEN, 1), ADJD_S311_CHANNEL(BLUE, 2), ADJD_S311_CHANNEL(CLEAR, 3), IIO_CHAN_SOFT_TIMESTAMP(4), }; static int adjd_s311_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int *val, int *val2, long mask) { struct adjd_s311_data *data = iio_priv(indio_dev); int ret; switch (mask) { case IIO_CHAN_INFO_RAW: ret = adjd_s311_read_data(indio_dev, chan->address, val); if (ret < 0) return ret; return IIO_VAL_INT; case IIO_CHAN_INFO_HARDWAREGAIN: ret = i2c_smbus_read_byte_data(data->client, ADJD_S311_CAP_REG(chan->address)); if (ret < 0) return ret; *val = ret & ADJD_S311_CAP_MASK; return IIO_VAL_INT; } return -EINVAL; } static int adjd_s311_write_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int val, int val2, long mask) { struct adjd_s311_data *data = iio_priv(indio_dev); int ret; switch (mask) { case IIO_CHAN_INFO_HARDWAREGAIN: if (val < 0 || val > ADJD_S311_CAP_MASK) return -EINVAL; ret = i2c_smbus_write_byte_data(data->client, ADJD_S311_CAP_REG(chan->address), val); return ret; } return -EINVAL; } static int adjd_s311_update_scan_mode(struct iio_dev *indio_dev, const unsigned long *scan_mask) { struct adjd_s311_data *data = iio_priv(indio_dev); kfree(data->buffer); data->buffer = kmalloc(indio_dev->scan_bytes, GFP_KERNEL); if (data->buffer == NULL) return -ENOMEM; return 0; } static const struct iio_info adjd_s311_info = { .read_raw = adjd_s311_read_raw, .write_raw = adjd_s311_write_raw, .update_scan_mode = adjd_s311_update_scan_mode, .driver_module = THIS_MODULE, }; static int adjd_s311_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct adjd_s311_data *data; struct iio_dev *indio_dev; int err; indio_dev = iio_device_alloc(sizeof(*data)); if (indio_dev == NULL) { err = -ENOMEM; goto exit; } data = iio_priv(indio_dev); i2c_set_clientdata(client, indio_dev); data->client = client; indio_dev->dev.parent = &client->dev; indio_dev->info = &adjd_s311_info; indio_dev->name = ADJD_S311_DRV_NAME; indio_dev->channels = adjd_s311_channels; indio_dev->num_channels = ARRAY_SIZE(adjd_s311_channels); indio_dev->modes = INDIO_DIRECT_MODE; err = iio_triggered_buffer_setup(indio_dev, NULL, adjd_s311_trigger_handler, NULL); if (err < 0) goto exit_free_device; err = iio_device_register(indio_dev); if (err) goto exit_unreg_buffer; dev_info(&client->dev, "ADJD-S311 color sensor registered\n"); return 0; exit_unreg_buffer: iio_triggered_buffer_cleanup(indio_dev); exit_free_device: iio_device_free(indio_dev); exit: return err; } static int adjd_s311_remove(struct i2c_client *client) { struct iio_dev *indio_dev = i2c_get_clientdata(client); struct adjd_s311_data *data = iio_priv(indio_dev); iio_device_unregister(indio_dev); iio_triggered_buffer_cleanup(indio_dev); kfree(data->buffer); iio_device_free(indio_dev); return 0; } static const struct i2c_device_id adjd_s311_id[] = { { "adjd_s311", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, adjd_s311_id); static struct i2c_driver adjd_s311_driver = { .driver = { .name = ADJD_S311_DRV_NAME, }, .probe = adjd_s311_probe, .remove = adjd_s311_remove, .id_table = adjd_s311_id, }; module_i2c_driver(adjd_s311_driver); MODULE_AUTHOR("Peter Meerwald <pmeerw@pmeerw.net>"); MODULE_DESCRIPTION("ADJD-S311 color sensor"); MODULE_LICENSE("GPL");
gpl-2.0
pasomnica/doubleslash-kernel-protou
sound/pci/intel8x0m.c
2368
38620
/* * ALSA modem driver for Intel ICH (i8x0) chipsets * * Copyright (c) 2000 Jaroslav Kysela <perex@perex.cz> * * This is modified (by Sasha Khapyorsky <sashak@alsa-project.org>) version * of ALSA ICH sound driver intel8x0.c . * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <asm/io.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/moduleparam.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/ac97_codec.h> #include <sound/info.h> #include <sound/initval.h> MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>"); MODULE_DESCRIPTION("Intel 82801AA,82901AB,i810,i820,i830,i840,i845,MX440; " "SiS 7013; NVidia MCP/2/2S/3 modems"); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("{{Intel,82801AA-ICH}," "{Intel,82901AB-ICH0}," "{Intel,82801BA-ICH2}," "{Intel,82801CA-ICH3}," "{Intel,82801DB-ICH4}," "{Intel,ICH5}," "{Intel,ICH6}," "{Intel,ICH7}," "{Intel,MX440}," "{SiS,7013}," "{NVidia,NForce Modem}," "{NVidia,NForce2 Modem}," "{NVidia,NForce2s Modem}," "{NVidia,NForce3 Modem}," "{AMD,AMD768}}"); static int index = -2; /* Exclude the first card */ static char *id = SNDRV_DEFAULT_STR1; /* ID for this card */ static int ac97_clock; module_param(index, int, 0444); MODULE_PARM_DESC(index, "Index value for Intel i8x0 modemcard."); module_param(id, charp, 0444); MODULE_PARM_DESC(id, "ID string for Intel i8x0 modemcard."); module_param(ac97_clock, int, 0444); MODULE_PARM_DESC(ac97_clock, "AC'97 codec clock (0 = auto-detect)."); /* just for backward compatibility */ static int enable; module_param(enable, bool, 0444); /* * Direct registers */ enum { DEVICE_INTEL, DEVICE_SIS, DEVICE_ALI, DEVICE_NFORCE }; #define ICHREG(x) ICH_REG_##x #define DEFINE_REGSET(name,base) \ enum { \ ICH_REG_##name##_BDBAR = base + 0x0, /* dword - buffer descriptor list base address */ \ ICH_REG_##name##_CIV = base + 0x04, /* byte - current index value */ \ ICH_REG_##name##_LVI = base + 0x05, /* byte - last valid index */ \ ICH_REG_##name##_SR = base + 0x06, /* byte - status register */ \ ICH_REG_##name##_PICB = base + 0x08, /* word - position in current buffer */ \ ICH_REG_##name##_PIV = base + 0x0a, /* byte - prefetched index value */ \ ICH_REG_##name##_CR = base + 0x0b, /* byte - control register */ \ }; /* busmaster blocks */ DEFINE_REGSET(OFF, 0); /* offset */ /* values for each busmaster block */ /* LVI */ #define ICH_REG_LVI_MASK 0x1f /* SR */ #define ICH_FIFOE 0x10 /* FIFO error */ #define ICH_BCIS 0x08 /* buffer completion interrupt status */ #define ICH_LVBCI 0x04 /* last valid buffer completion interrupt */ #define ICH_CELV 0x02 /* current equals last valid */ #define ICH_DCH 0x01 /* DMA controller halted */ /* PIV */ #define ICH_REG_PIV_MASK 0x1f /* mask */ /* CR */ #define ICH_IOCE 0x10 /* interrupt on completion enable */ #define ICH_FEIE 0x08 /* fifo error interrupt enable */ #define ICH_LVBIE 0x04 /* last valid buffer interrupt enable */ #define ICH_RESETREGS 0x02 /* reset busmaster registers */ #define ICH_STARTBM 0x01 /* start busmaster operation */ /* global block */ #define ICH_REG_GLOB_CNT 0x3c /* dword - global control */ #define ICH_TRIE 0x00000040 /* tertiary resume interrupt enable */ #define ICH_SRIE 0x00000020 /* secondary resume interrupt enable */ #define ICH_PRIE 0x00000010 /* primary resume interrupt enable */ #define ICH_ACLINK 0x00000008 /* AClink shut off */ #define ICH_AC97WARM 0x00000004 /* AC'97 warm reset */ #define ICH_AC97COLD 0x00000002 /* AC'97 cold reset */ #define ICH_GIE 0x00000001 /* GPI interrupt enable */ #define ICH_REG_GLOB_STA 0x40 /* dword - global status */ #define ICH_TRI 0x20000000 /* ICH4: tertiary (AC_SDIN2) resume interrupt */ #define ICH_TCR 0x10000000 /* ICH4: tertiary (AC_SDIN2) codec ready */ #define ICH_BCS 0x08000000 /* ICH4: bit clock stopped */ #define ICH_SPINT 0x04000000 /* ICH4: S/PDIF interrupt */ #define ICH_P2INT 0x02000000 /* ICH4: PCM2-In interrupt */ #define ICH_M2INT 0x01000000 /* ICH4: Mic2-In interrupt */ #define ICH_SAMPLE_CAP 0x00c00000 /* ICH4: sample capability bits (RO) */ #define ICH_MULTICHAN_CAP 0x00300000 /* ICH4: multi-channel capability bits (RO) */ #define ICH_MD3 0x00020000 /* modem power down semaphore */ #define ICH_AD3 0x00010000 /* audio power down semaphore */ #define ICH_RCS 0x00008000 /* read completion status */ #define ICH_BIT3 0x00004000 /* bit 3 slot 12 */ #define ICH_BIT2 0x00002000 /* bit 2 slot 12 */ #define ICH_BIT1 0x00001000 /* bit 1 slot 12 */ #define ICH_SRI 0x00000800 /* secondary (AC_SDIN1) resume interrupt */ #define ICH_PRI 0x00000400 /* primary (AC_SDIN0) resume interrupt */ #define ICH_SCR 0x00000200 /* secondary (AC_SDIN1) codec ready */ #define ICH_PCR 0x00000100 /* primary (AC_SDIN0) codec ready */ #define ICH_MCINT 0x00000080 /* MIC capture interrupt */ #define ICH_POINT 0x00000040 /* playback interrupt */ #define ICH_PIINT 0x00000020 /* capture interrupt */ #define ICH_NVSPINT 0x00000010 /* nforce spdif interrupt */ #define ICH_MOINT 0x00000004 /* modem playback interrupt */ #define ICH_MIINT 0x00000002 /* modem capture interrupt */ #define ICH_GSCI 0x00000001 /* GPI status change interrupt */ #define ICH_REG_ACC_SEMA 0x44 /* byte - codec write semaphore */ #define ICH_CAS 0x01 /* codec access semaphore */ #define ICH_MAX_FRAGS 32 /* max hw frags */ /* * */ enum { ICHD_MDMIN, ICHD_MDMOUT, ICHD_MDMLAST = ICHD_MDMOUT }; enum { ALID_MDMIN, ALID_MDMOUT, ALID_MDMLAST = ALID_MDMOUT }; #define get_ichdev(substream) (substream->runtime->private_data) struct ichdev { unsigned int ichd; /* ich device number */ unsigned long reg_offset; /* offset to bmaddr */ u32 *bdbar; /* CPU address (32bit) */ unsigned int bdbar_addr; /* PCI bus address (32bit) */ struct snd_pcm_substream *substream; unsigned int physbuf; /* physical address (32bit) */ unsigned int size; unsigned int fragsize; unsigned int fragsize1; unsigned int position; int frags; int lvi; int lvi_frag; int civ; int ack; int ack_reload; unsigned int ack_bit; unsigned int roff_sr; unsigned int roff_picb; unsigned int int_sta_mask; /* interrupt status mask */ unsigned int ali_slot; /* ALI DMA slot */ struct snd_ac97 *ac97; }; struct intel8x0m { unsigned int device_type; int irq; void __iomem *addr; void __iomem *bmaddr; struct pci_dev *pci; struct snd_card *card; int pcm_devs; struct snd_pcm *pcm[2]; struct ichdev ichd[2]; unsigned int in_ac97_init: 1; struct snd_ac97_bus *ac97_bus; struct snd_ac97 *ac97; spinlock_t reg_lock; struct snd_dma_buffer bdbars; u32 bdbars_count; u32 int_sta_reg; /* interrupt status register */ u32 int_sta_mask; /* interrupt status mask */ unsigned int pcm_pos_shift; }; static DEFINE_PCI_DEVICE_TABLE(snd_intel8x0m_ids) = { { PCI_VDEVICE(INTEL, 0x2416), DEVICE_INTEL }, /* 82801AA */ { PCI_VDEVICE(INTEL, 0x2426), DEVICE_INTEL }, /* 82901AB */ { PCI_VDEVICE(INTEL, 0x2446), DEVICE_INTEL }, /* 82801BA */ { PCI_VDEVICE(INTEL, 0x2486), DEVICE_INTEL }, /* ICH3 */ { PCI_VDEVICE(INTEL, 0x24c6), DEVICE_INTEL }, /* ICH4 */ { PCI_VDEVICE(INTEL, 0x24d6), DEVICE_INTEL }, /* ICH5 */ { PCI_VDEVICE(INTEL, 0x266d), DEVICE_INTEL }, /* ICH6 */ { PCI_VDEVICE(INTEL, 0x27dd), DEVICE_INTEL }, /* ICH7 */ { PCI_VDEVICE(INTEL, 0x7196), DEVICE_INTEL }, /* 440MX */ { PCI_VDEVICE(AMD, 0x7446), DEVICE_INTEL }, /* AMD768 */ { PCI_VDEVICE(SI, 0x7013), DEVICE_SIS }, /* SI7013 */ { PCI_VDEVICE(NVIDIA, 0x01c1), DEVICE_NFORCE }, /* NFORCE */ { PCI_VDEVICE(NVIDIA, 0x0069), DEVICE_NFORCE }, /* NFORCE2 */ { PCI_VDEVICE(NVIDIA, 0x0089), DEVICE_NFORCE }, /* NFORCE2s */ { PCI_VDEVICE(NVIDIA, 0x00d9), DEVICE_NFORCE }, /* NFORCE3 */ { PCI_VDEVICE(AMD, 0x746e), DEVICE_INTEL }, /* AMD8111 */ #if 0 { PCI_VDEVICE(AL, 0x5455), DEVICE_ALI }, /* Ali5455 */ #endif { 0, } }; MODULE_DEVICE_TABLE(pci, snd_intel8x0m_ids); /* * Lowlevel I/O - busmaster */ static inline u8 igetbyte(struct intel8x0m *chip, u32 offset) { return ioread8(chip->bmaddr + offset); } static inline u16 igetword(struct intel8x0m *chip, u32 offset) { return ioread16(chip->bmaddr + offset); } static inline u32 igetdword(struct intel8x0m *chip, u32 offset) { return ioread32(chip->bmaddr + offset); } static inline void iputbyte(struct intel8x0m *chip, u32 offset, u8 val) { iowrite8(val, chip->bmaddr + offset); } static inline void iputword(struct intel8x0m *chip, u32 offset, u16 val) { iowrite16(val, chip->bmaddr + offset); } static inline void iputdword(struct intel8x0m *chip, u32 offset, u32 val) { iowrite32(val, chip->bmaddr + offset); } /* * Lowlevel I/O - AC'97 registers */ static inline u16 iagetword(struct intel8x0m *chip, u32 offset) { return ioread16(chip->addr + offset); } static inline void iaputword(struct intel8x0m *chip, u32 offset, u16 val) { iowrite16(val, chip->addr + offset); } /* * Basic I/O */ /* * access to AC97 codec via normal i/o (for ICH and SIS7013) */ /* return the GLOB_STA bit for the corresponding codec */ static unsigned int get_ich_codec_bit(struct intel8x0m *chip, unsigned int codec) { static unsigned int codec_bit[3] = { ICH_PCR, ICH_SCR, ICH_TCR }; if (snd_BUG_ON(codec >= 3)) return ICH_PCR; return codec_bit[codec]; } static int snd_intel8x0m_codec_semaphore(struct intel8x0m *chip, unsigned int codec) { int time; if (codec > 1) return -EIO; codec = get_ich_codec_bit(chip, codec); /* codec ready ? */ if ((igetdword(chip, ICHREG(GLOB_STA)) & codec) == 0) return -EIO; /* Anyone holding a semaphore for 1 msec should be shot... */ time = 100; do { if (!(igetbyte(chip, ICHREG(ACC_SEMA)) & ICH_CAS)) return 0; udelay(10); } while (time--); /* access to some forbidden (non existent) ac97 registers will not * reset the semaphore. So even if you don't get the semaphore, still * continue the access. We don't need the semaphore anyway. */ snd_printk(KERN_ERR "codec_semaphore: semaphore is not ready [0x%x][0x%x]\n", igetbyte(chip, ICHREG(ACC_SEMA)), igetdword(chip, ICHREG(GLOB_STA))); iagetword(chip, 0); /* clear semaphore flag */ /* I don't care about the semaphore */ return -EBUSY; } static void snd_intel8x0m_codec_write(struct snd_ac97 *ac97, unsigned short reg, unsigned short val) { struct intel8x0m *chip = ac97->private_data; if (snd_intel8x0m_codec_semaphore(chip, ac97->num) < 0) { if (! chip->in_ac97_init) snd_printk(KERN_ERR "codec_write %d: semaphore is not ready for register 0x%x\n", ac97->num, reg); } iaputword(chip, reg + ac97->num * 0x80, val); } static unsigned short snd_intel8x0m_codec_read(struct snd_ac97 *ac97, unsigned short reg) { struct intel8x0m *chip = ac97->private_data; unsigned short res; unsigned int tmp; if (snd_intel8x0m_codec_semaphore(chip, ac97->num) < 0) { if (! chip->in_ac97_init) snd_printk(KERN_ERR "codec_read %d: semaphore is not ready for register 0x%x\n", ac97->num, reg); res = 0xffff; } else { res = iagetword(chip, reg + ac97->num * 0x80); if ((tmp = igetdword(chip, ICHREG(GLOB_STA))) & ICH_RCS) { /* reset RCS and preserve other R/WC bits */ iputdword(chip, ICHREG(GLOB_STA), tmp & ~(ICH_SRI|ICH_PRI|ICH_TRI|ICH_GSCI)); if (! chip->in_ac97_init) snd_printk(KERN_ERR "codec_read %d: read timeout for register 0x%x\n", ac97->num, reg); res = 0xffff; } } if (reg == AC97_GPIO_STATUS) iagetword(chip, 0); /* clear semaphore */ return res; } /* * DMA I/O */ static void snd_intel8x0m_setup_periods(struct intel8x0m *chip, struct ichdev *ichdev) { int idx; u32 *bdbar = ichdev->bdbar; unsigned long port = ichdev->reg_offset; iputdword(chip, port + ICH_REG_OFF_BDBAR, ichdev->bdbar_addr); if (ichdev->size == ichdev->fragsize) { ichdev->ack_reload = ichdev->ack = 2; ichdev->fragsize1 = ichdev->fragsize >> 1; for (idx = 0; idx < (ICH_REG_LVI_MASK + 1) * 2; idx += 4) { bdbar[idx + 0] = cpu_to_le32(ichdev->physbuf); bdbar[idx + 1] = cpu_to_le32(0x80000000 | /* interrupt on completion */ ichdev->fragsize1 >> chip->pcm_pos_shift); bdbar[idx + 2] = cpu_to_le32(ichdev->physbuf + (ichdev->size >> 1)); bdbar[idx + 3] = cpu_to_le32(0x80000000 | /* interrupt on completion */ ichdev->fragsize1 >> chip->pcm_pos_shift); } ichdev->frags = 2; } else { ichdev->ack_reload = ichdev->ack = 1; ichdev->fragsize1 = ichdev->fragsize; for (idx = 0; idx < (ICH_REG_LVI_MASK + 1) * 2; idx += 2) { bdbar[idx + 0] = cpu_to_le32(ichdev->physbuf + (((idx >> 1) * ichdev->fragsize) % ichdev->size)); bdbar[idx + 1] = cpu_to_le32(0x80000000 | /* interrupt on completion */ ichdev->fragsize >> chip->pcm_pos_shift); /* printk(KERN_DEBUG "bdbar[%i] = 0x%x [0x%x]\n", idx + 0, bdbar[idx + 0], bdbar[idx + 1]); */ } ichdev->frags = ichdev->size / ichdev->fragsize; } iputbyte(chip, port + ICH_REG_OFF_LVI, ichdev->lvi = ICH_REG_LVI_MASK); ichdev->civ = 0; iputbyte(chip, port + ICH_REG_OFF_CIV, 0); ichdev->lvi_frag = ICH_REG_LVI_MASK % ichdev->frags; ichdev->position = 0; #if 0 printk(KERN_DEBUG "lvi_frag = %i, frags = %i, period_size = 0x%x, " "period_size1 = 0x%x\n", ichdev->lvi_frag, ichdev->frags, ichdev->fragsize, ichdev->fragsize1); #endif /* clear interrupts */ iputbyte(chip, port + ichdev->roff_sr, ICH_FIFOE | ICH_BCIS | ICH_LVBCI); } /* * Interrupt handler */ static inline void snd_intel8x0m_update(struct intel8x0m *chip, struct ichdev *ichdev) { unsigned long port = ichdev->reg_offset; int civ, i, step; int ack = 0; civ = igetbyte(chip, port + ICH_REG_OFF_CIV); if (civ == ichdev->civ) { // snd_printd("civ same %d\n", civ); step = 1; ichdev->civ++; ichdev->civ &= ICH_REG_LVI_MASK; } else { step = civ - ichdev->civ; if (step < 0) step += ICH_REG_LVI_MASK + 1; // if (step != 1) // snd_printd("step = %d, %d -> %d\n", step, ichdev->civ, civ); ichdev->civ = civ; } ichdev->position += step * ichdev->fragsize1; ichdev->position %= ichdev->size; ichdev->lvi += step; ichdev->lvi &= ICH_REG_LVI_MASK; iputbyte(chip, port + ICH_REG_OFF_LVI, ichdev->lvi); for (i = 0; i < step; i++) { ichdev->lvi_frag++; ichdev->lvi_frag %= ichdev->frags; ichdev->bdbar[ichdev->lvi * 2] = cpu_to_le32(ichdev->physbuf + ichdev->lvi_frag * ichdev->fragsize1); #if 0 printk(KERN_DEBUG "new: bdbar[%i] = 0x%x [0x%x], " "prefetch = %i, all = 0x%x, 0x%x\n", ichdev->lvi * 2, ichdev->bdbar[ichdev->lvi * 2], ichdev->bdbar[ichdev->lvi * 2 + 1], inb(ICH_REG_OFF_PIV + port), inl(port + 4), inb(port + ICH_REG_OFF_CR)); #endif if (--ichdev->ack == 0) { ichdev->ack = ichdev->ack_reload; ack = 1; } } if (ack && ichdev->substream) { spin_unlock(&chip->reg_lock); snd_pcm_period_elapsed(ichdev->substream); spin_lock(&chip->reg_lock); } iputbyte(chip, port + ichdev->roff_sr, ICH_FIFOE | ICH_BCIS | ICH_LVBCI); } static irqreturn_t snd_intel8x0m_interrupt(int irq, void *dev_id) { struct intel8x0m *chip = dev_id; struct ichdev *ichdev; unsigned int status; unsigned int i; spin_lock(&chip->reg_lock); status = igetdword(chip, chip->int_sta_reg); if (status == 0xffffffff) { /* we are not yet resumed */ spin_unlock(&chip->reg_lock); return IRQ_NONE; } if ((status & chip->int_sta_mask) == 0) { if (status) iputdword(chip, chip->int_sta_reg, status); spin_unlock(&chip->reg_lock); return IRQ_NONE; } for (i = 0; i < chip->bdbars_count; i++) { ichdev = &chip->ichd[i]; if (status & ichdev->int_sta_mask) snd_intel8x0m_update(chip, ichdev); } /* ack them */ iputdword(chip, chip->int_sta_reg, status & chip->int_sta_mask); spin_unlock(&chip->reg_lock); return IRQ_HANDLED; } /* * PCM part */ static int snd_intel8x0m_pcm_trigger(struct snd_pcm_substream *substream, int cmd) { struct intel8x0m *chip = snd_pcm_substream_chip(substream); struct ichdev *ichdev = get_ichdev(substream); unsigned char val = 0; unsigned long port = ichdev->reg_offset; switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: val = ICH_IOCE | ICH_STARTBM; break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: val = 0; break; case SNDRV_PCM_TRIGGER_PAUSE_PUSH: val = ICH_IOCE; break; case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: val = ICH_IOCE | ICH_STARTBM; break; default: return -EINVAL; } iputbyte(chip, port + ICH_REG_OFF_CR, val); if (cmd == SNDRV_PCM_TRIGGER_STOP) { /* wait until DMA stopped */ while (!(igetbyte(chip, port + ichdev->roff_sr) & ICH_DCH)) ; /* reset whole DMA things */ iputbyte(chip, port + ICH_REG_OFF_CR, ICH_RESETREGS); } return 0; } static int snd_intel8x0m_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { return snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params)); } static int snd_intel8x0m_hw_free(struct snd_pcm_substream *substream) { return snd_pcm_lib_free_pages(substream); } static snd_pcm_uframes_t snd_intel8x0m_pcm_pointer(struct snd_pcm_substream *substream) { struct intel8x0m *chip = snd_pcm_substream_chip(substream); struct ichdev *ichdev = get_ichdev(substream); size_t ptr1, ptr; ptr1 = igetword(chip, ichdev->reg_offset + ichdev->roff_picb) << chip->pcm_pos_shift; if (ptr1 != 0) ptr = ichdev->fragsize1 - ptr1; else ptr = 0; ptr += ichdev->position; if (ptr >= ichdev->size) return 0; return bytes_to_frames(substream->runtime, ptr); } static int snd_intel8x0m_pcm_prepare(struct snd_pcm_substream *substream) { struct intel8x0m *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct ichdev *ichdev = get_ichdev(substream); ichdev->physbuf = runtime->dma_addr; ichdev->size = snd_pcm_lib_buffer_bytes(substream); ichdev->fragsize = snd_pcm_lib_period_bytes(substream); snd_ac97_write(ichdev->ac97, AC97_LINE1_RATE, runtime->rate); snd_ac97_write(ichdev->ac97, AC97_LINE1_LEVEL, 0); snd_intel8x0m_setup_periods(chip, ichdev); return 0; } static struct snd_pcm_hardware snd_intel8x0m_stream = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME), .formats = SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_KNOT, .rate_min = 8000, .rate_max = 16000, .channels_min = 1, .channels_max = 1, .buffer_bytes_max = 64 * 1024, .period_bytes_min = 32, .period_bytes_max = 64 * 1024, .periods_min = 1, .periods_max = 1024, .fifo_size = 0, }; static int snd_intel8x0m_pcm_open(struct snd_pcm_substream *substream, struct ichdev *ichdev) { static unsigned int rates[] = { 8000, 9600, 12000, 16000 }; static struct snd_pcm_hw_constraint_list hw_constraints_rates = { .count = ARRAY_SIZE(rates), .list = rates, .mask = 0, }; struct snd_pcm_runtime *runtime = substream->runtime; int err; ichdev->substream = substream; runtime->hw = snd_intel8x0m_stream; err = snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, &hw_constraints_rates); if ( err < 0 ) return err; runtime->private_data = ichdev; return 0; } static int snd_intel8x0m_playback_open(struct snd_pcm_substream *substream) { struct intel8x0m *chip = snd_pcm_substream_chip(substream); return snd_intel8x0m_pcm_open(substream, &chip->ichd[ICHD_MDMOUT]); } static int snd_intel8x0m_playback_close(struct snd_pcm_substream *substream) { struct intel8x0m *chip = snd_pcm_substream_chip(substream); chip->ichd[ICHD_MDMOUT].substream = NULL; return 0; } static int snd_intel8x0m_capture_open(struct snd_pcm_substream *substream) { struct intel8x0m *chip = snd_pcm_substream_chip(substream); return snd_intel8x0m_pcm_open(substream, &chip->ichd[ICHD_MDMIN]); } static int snd_intel8x0m_capture_close(struct snd_pcm_substream *substream) { struct intel8x0m *chip = snd_pcm_substream_chip(substream); chip->ichd[ICHD_MDMIN].substream = NULL; return 0; } static struct snd_pcm_ops snd_intel8x0m_playback_ops = { .open = snd_intel8x0m_playback_open, .close = snd_intel8x0m_playback_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_intel8x0m_hw_params, .hw_free = snd_intel8x0m_hw_free, .prepare = snd_intel8x0m_pcm_prepare, .trigger = snd_intel8x0m_pcm_trigger, .pointer = snd_intel8x0m_pcm_pointer, }; static struct snd_pcm_ops snd_intel8x0m_capture_ops = { .open = snd_intel8x0m_capture_open, .close = snd_intel8x0m_capture_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_intel8x0m_hw_params, .hw_free = snd_intel8x0m_hw_free, .prepare = snd_intel8x0m_pcm_prepare, .trigger = snd_intel8x0m_pcm_trigger, .pointer = snd_intel8x0m_pcm_pointer, }; struct ich_pcm_table { char *suffix; struct snd_pcm_ops *playback_ops; struct snd_pcm_ops *capture_ops; size_t prealloc_size; size_t prealloc_max_size; int ac97_idx; }; static int __devinit snd_intel8x0m_pcm1(struct intel8x0m *chip, int device, struct ich_pcm_table *rec) { struct snd_pcm *pcm; int err; char name[32]; if (rec->suffix) sprintf(name, "Intel ICH - %s", rec->suffix); else strcpy(name, "Intel ICH"); err = snd_pcm_new(chip->card, name, device, rec->playback_ops ? 1 : 0, rec->capture_ops ? 1 : 0, &pcm); if (err < 0) return err; if (rec->playback_ops) snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, rec->playback_ops); if (rec->capture_ops) snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, rec->capture_ops); pcm->private_data = chip; pcm->info_flags = 0; pcm->dev_class = SNDRV_PCM_CLASS_MODEM; if (rec->suffix) sprintf(pcm->name, "%s - %s", chip->card->shortname, rec->suffix); else strcpy(pcm->name, chip->card->shortname); chip->pcm[device] = pcm; snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci), rec->prealloc_size, rec->prealloc_max_size); return 0; } static struct ich_pcm_table intel_pcms[] __devinitdata = { { .suffix = "Modem", .playback_ops = &snd_intel8x0m_playback_ops, .capture_ops = &snd_intel8x0m_capture_ops, .prealloc_size = 32 * 1024, .prealloc_max_size = 64 * 1024, }, }; static int __devinit snd_intel8x0m_pcm(struct intel8x0m *chip) { int i, tblsize, device, err; struct ich_pcm_table *tbl, *rec; #if 1 tbl = intel_pcms; tblsize = 1; #else switch (chip->device_type) { case DEVICE_NFORCE: tbl = nforce_pcms; tblsize = ARRAY_SIZE(nforce_pcms); break; case DEVICE_ALI: tbl = ali_pcms; tblsize = ARRAY_SIZE(ali_pcms); break; default: tbl = intel_pcms; tblsize = 2; break; } #endif device = 0; for (i = 0; i < tblsize; i++) { rec = tbl + i; if (i > 0 && rec->ac97_idx) { /* activate PCM only when associated AC'97 codec */ if (! chip->ichd[rec->ac97_idx].ac97) continue; } err = snd_intel8x0m_pcm1(chip, device, rec); if (err < 0) return err; device++; } chip->pcm_devs = device; return 0; } /* * Mixer part */ static void snd_intel8x0m_mixer_free_ac97_bus(struct snd_ac97_bus *bus) { struct intel8x0m *chip = bus->private_data; chip->ac97_bus = NULL; } static void snd_intel8x0m_mixer_free_ac97(struct snd_ac97 *ac97) { struct intel8x0m *chip = ac97->private_data; chip->ac97 = NULL; } static int __devinit snd_intel8x0m_mixer(struct intel8x0m *chip, int ac97_clock) { struct snd_ac97_bus *pbus; struct snd_ac97_template ac97; struct snd_ac97 *x97; int err; unsigned int glob_sta = 0; static struct snd_ac97_bus_ops ops = { .write = snd_intel8x0m_codec_write, .read = snd_intel8x0m_codec_read, }; chip->in_ac97_init = 1; memset(&ac97, 0, sizeof(ac97)); ac97.private_data = chip; ac97.private_free = snd_intel8x0m_mixer_free_ac97; ac97.scaps = AC97_SCAP_SKIP_AUDIO | AC97_SCAP_POWER_SAVE; glob_sta = igetdword(chip, ICHREG(GLOB_STA)); if ((err = snd_ac97_bus(chip->card, 0, &ops, chip, &pbus)) < 0) goto __err; pbus->private_free = snd_intel8x0m_mixer_free_ac97_bus; if (ac97_clock >= 8000 && ac97_clock <= 48000) pbus->clock = ac97_clock; chip->ac97_bus = pbus; ac97.pci = chip->pci; ac97.num = glob_sta & ICH_SCR ? 1 : 0; if ((err = snd_ac97_mixer(pbus, &ac97, &x97)) < 0) { snd_printk(KERN_ERR "Unable to initialize codec #%d\n", ac97.num); if (ac97.num == 0) goto __err; return err; } chip->ac97 = x97; if(ac97_is_modem(x97) && !chip->ichd[ICHD_MDMIN].ac97) { chip->ichd[ICHD_MDMIN].ac97 = x97; chip->ichd[ICHD_MDMOUT].ac97 = x97; } chip->in_ac97_init = 0; return 0; __err: /* clear the cold-reset bit for the next chance */ if (chip->device_type != DEVICE_ALI) iputdword(chip, ICHREG(GLOB_CNT), igetdword(chip, ICHREG(GLOB_CNT)) & ~ICH_AC97COLD); return err; } /* * */ static int snd_intel8x0m_ich_chip_init(struct intel8x0m *chip, int probing) { unsigned long end_time; unsigned int cnt, status, nstatus; /* put logic to right state */ /* first clear status bits */ status = ICH_RCS | ICH_MIINT | ICH_MOINT; cnt = igetdword(chip, ICHREG(GLOB_STA)); iputdword(chip, ICHREG(GLOB_STA), cnt & status); /* ACLink on, 2 channels */ cnt = igetdword(chip, ICHREG(GLOB_CNT)); cnt &= ~(ICH_ACLINK); /* finish cold or do warm reset */ cnt |= (cnt & ICH_AC97COLD) == 0 ? ICH_AC97COLD : ICH_AC97WARM; iputdword(chip, ICHREG(GLOB_CNT), cnt); usleep_range(500, 1000); /* give warm reset some time */ end_time = jiffies + HZ / 4; do { if ((igetdword(chip, ICHREG(GLOB_CNT)) & ICH_AC97WARM) == 0) goto __ok; schedule_timeout_uninterruptible(1); } while (time_after_eq(end_time, jiffies)); snd_printk(KERN_ERR "AC'97 warm reset still in progress? [0x%x]\n", igetdword(chip, ICHREG(GLOB_CNT))); return -EIO; __ok: if (probing) { /* wait for any codec ready status. * Once it becomes ready it should remain ready * as long as we do not disable the ac97 link. */ end_time = jiffies + HZ; do { status = igetdword(chip, ICHREG(GLOB_STA)) & (ICH_PCR | ICH_SCR | ICH_TCR); if (status) break; schedule_timeout_uninterruptible(1); } while (time_after_eq(end_time, jiffies)); if (! status) { /* no codec is found */ snd_printk(KERN_ERR "codec_ready: codec is not ready [0x%x]\n", igetdword(chip, ICHREG(GLOB_STA))); return -EIO; } /* up to two codecs (modem cannot be tertiary with ICH4) */ nstatus = ICH_PCR | ICH_SCR; /* wait for other codecs ready status. */ end_time = jiffies + HZ / 4; while (status != nstatus && time_after_eq(end_time, jiffies)) { schedule_timeout_uninterruptible(1); status |= igetdword(chip, ICHREG(GLOB_STA)) & nstatus; } } else { /* resume phase */ status = 0; if (chip->ac97) status |= get_ich_codec_bit(chip, chip->ac97->num); /* wait until all the probed codecs are ready */ end_time = jiffies + HZ; do { nstatus = igetdword(chip, ICHREG(GLOB_STA)) & (ICH_PCR | ICH_SCR | ICH_TCR); if (status == nstatus) break; schedule_timeout_uninterruptible(1); } while (time_after_eq(end_time, jiffies)); } if (chip->device_type == DEVICE_SIS) { /* unmute the output on SIS7012 */ iputword(chip, 0x4c, igetword(chip, 0x4c) | 1); } return 0; } static int snd_intel8x0m_chip_init(struct intel8x0m *chip, int probing) { unsigned int i; int err; if ((err = snd_intel8x0m_ich_chip_init(chip, probing)) < 0) return err; iagetword(chip, 0); /* clear semaphore flag */ /* disable interrupts */ for (i = 0; i < chip->bdbars_count; i++) iputbyte(chip, ICH_REG_OFF_CR + chip->ichd[i].reg_offset, 0x00); /* reset channels */ for (i = 0; i < chip->bdbars_count; i++) iputbyte(chip, ICH_REG_OFF_CR + chip->ichd[i].reg_offset, ICH_RESETREGS); /* initialize Buffer Descriptor Lists */ for (i = 0; i < chip->bdbars_count; i++) iputdword(chip, ICH_REG_OFF_BDBAR + chip->ichd[i].reg_offset, chip->ichd[i].bdbar_addr); return 0; } static int snd_intel8x0m_free(struct intel8x0m *chip) { unsigned int i; if (chip->irq < 0) goto __hw_end; /* disable interrupts */ for (i = 0; i < chip->bdbars_count; i++) iputbyte(chip, ICH_REG_OFF_CR + chip->ichd[i].reg_offset, 0x00); /* reset channels */ for (i = 0; i < chip->bdbars_count; i++) iputbyte(chip, ICH_REG_OFF_CR + chip->ichd[i].reg_offset, ICH_RESETREGS); __hw_end: if (chip->irq >= 0) free_irq(chip->irq, chip); if (chip->bdbars.area) snd_dma_free_pages(&chip->bdbars); if (chip->addr) pci_iounmap(chip->pci, chip->addr); if (chip->bmaddr) pci_iounmap(chip->pci, chip->bmaddr); pci_release_regions(chip->pci); pci_disable_device(chip->pci); kfree(chip); return 0; } #ifdef CONFIG_PM /* * power management */ static int intel8x0m_suspend(struct pci_dev *pci, pm_message_t state) { struct snd_card *card = pci_get_drvdata(pci); struct intel8x0m *chip = card->private_data; int i; snd_power_change_state(card, SNDRV_CTL_POWER_D3hot); for (i = 0; i < chip->pcm_devs; i++) snd_pcm_suspend_all(chip->pcm[i]); snd_ac97_suspend(chip->ac97); if (chip->irq >= 0) { free_irq(chip->irq, chip); chip->irq = -1; } pci_disable_device(pci); pci_save_state(pci); pci_set_power_state(pci, pci_choose_state(pci, state)); return 0; } static int intel8x0m_resume(struct pci_dev *pci) { struct snd_card *card = pci_get_drvdata(pci); struct intel8x0m *chip = card->private_data; pci_set_power_state(pci, PCI_D0); pci_restore_state(pci); if (pci_enable_device(pci) < 0) { printk(KERN_ERR "intel8x0m: pci_enable_device failed, " "disabling device\n"); snd_card_disconnect(card); return -EIO; } pci_set_master(pci); if (request_irq(pci->irq, snd_intel8x0m_interrupt, IRQF_SHARED, card->shortname, chip)) { printk(KERN_ERR "intel8x0m: unable to grab IRQ %d, " "disabling device\n", pci->irq); snd_card_disconnect(card); return -EIO; } chip->irq = pci->irq; snd_intel8x0m_chip_init(chip, 0); snd_ac97_resume(chip->ac97); snd_power_change_state(card, SNDRV_CTL_POWER_D0); return 0; } #endif /* CONFIG_PM */ #ifdef CONFIG_PROC_FS static void snd_intel8x0m_proc_read(struct snd_info_entry * entry, struct snd_info_buffer *buffer) { struct intel8x0m *chip = entry->private_data; unsigned int tmp; snd_iprintf(buffer, "Intel8x0m\n\n"); if (chip->device_type == DEVICE_ALI) return; tmp = igetdword(chip, ICHREG(GLOB_STA)); snd_iprintf(buffer, "Global control : 0x%08x\n", igetdword(chip, ICHREG(GLOB_CNT))); snd_iprintf(buffer, "Global status : 0x%08x\n", tmp); snd_iprintf(buffer, "AC'97 codecs ready :%s%s%s%s\n", tmp & ICH_PCR ? " primary" : "", tmp & ICH_SCR ? " secondary" : "", tmp & ICH_TCR ? " tertiary" : "", (tmp & (ICH_PCR | ICH_SCR | ICH_TCR)) == 0 ? " none" : ""); } static void __devinit snd_intel8x0m_proc_init(struct intel8x0m * chip) { struct snd_info_entry *entry; if (! snd_card_proc_new(chip->card, "intel8x0m", &entry)) snd_info_set_text_ops(entry, chip, snd_intel8x0m_proc_read); } #else /* !CONFIG_PROC_FS */ #define snd_intel8x0m_proc_init(chip) #endif /* CONFIG_PROC_FS */ static int snd_intel8x0m_dev_free(struct snd_device *device) { struct intel8x0m *chip = device->device_data; return snd_intel8x0m_free(chip); } struct ich_reg_info { unsigned int int_sta_mask; unsigned int offset; }; static int __devinit snd_intel8x0m_create(struct snd_card *card, struct pci_dev *pci, unsigned long device_type, struct intel8x0m **r_intel8x0m) { struct intel8x0m *chip; int err; unsigned int i; unsigned int int_sta_masks; struct ichdev *ichdev; static struct snd_device_ops ops = { .dev_free = snd_intel8x0m_dev_free, }; static struct ich_reg_info intel_regs[2] = { { ICH_MIINT, 0 }, { ICH_MOINT, 0x10 }, }; struct ich_reg_info *tbl; *r_intel8x0m = NULL; if ((err = pci_enable_device(pci)) < 0) return err; chip = kzalloc(sizeof(*chip), GFP_KERNEL); if (chip == NULL) { pci_disable_device(pci); return -ENOMEM; } spin_lock_init(&chip->reg_lock); chip->device_type = device_type; chip->card = card; chip->pci = pci; chip->irq = -1; if ((err = pci_request_regions(pci, card->shortname)) < 0) { kfree(chip); pci_disable_device(pci); return err; } if (device_type == DEVICE_ALI) { /* ALI5455 has no ac97 region */ chip->bmaddr = pci_iomap(pci, 0, 0); goto port_inited; } if (pci_resource_flags(pci, 2) & IORESOURCE_MEM) /* ICH4 and Nforce */ chip->addr = pci_iomap(pci, 2, 0); else chip->addr = pci_iomap(pci, 0, 0); if (!chip->addr) { snd_printk(KERN_ERR "AC'97 space ioremap problem\n"); snd_intel8x0m_free(chip); return -EIO; } if (pci_resource_flags(pci, 3) & IORESOURCE_MEM) /* ICH4 */ chip->bmaddr = pci_iomap(pci, 3, 0); else chip->bmaddr = pci_iomap(pci, 1, 0); if (!chip->bmaddr) { snd_printk(KERN_ERR "Controller space ioremap problem\n"); snd_intel8x0m_free(chip); return -EIO; } port_inited: if (request_irq(pci->irq, snd_intel8x0m_interrupt, IRQF_SHARED, card->shortname, chip)) { snd_printk(KERN_ERR "unable to grab IRQ %d\n", pci->irq); snd_intel8x0m_free(chip); return -EBUSY; } chip->irq = pci->irq; pci_set_master(pci); synchronize_irq(chip->irq); /* initialize offsets */ chip->bdbars_count = 2; tbl = intel_regs; for (i = 0; i < chip->bdbars_count; i++) { ichdev = &chip->ichd[i]; ichdev->ichd = i; ichdev->reg_offset = tbl[i].offset; ichdev->int_sta_mask = tbl[i].int_sta_mask; if (device_type == DEVICE_SIS) { /* SiS 7013 swaps the registers */ ichdev->roff_sr = ICH_REG_OFF_PICB; ichdev->roff_picb = ICH_REG_OFF_SR; } else { ichdev->roff_sr = ICH_REG_OFF_SR; ichdev->roff_picb = ICH_REG_OFF_PICB; } if (device_type == DEVICE_ALI) ichdev->ali_slot = (ichdev->reg_offset - 0x40) / 0x10; } /* SIS7013 handles the pcm data in bytes, others are in words */ chip->pcm_pos_shift = (device_type == DEVICE_SIS) ? 0 : 1; /* allocate buffer descriptor lists */ /* the start of each lists must be aligned to 8 bytes */ if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci), chip->bdbars_count * sizeof(u32) * ICH_MAX_FRAGS * 2, &chip->bdbars) < 0) { snd_intel8x0m_free(chip); return -ENOMEM; } /* tables must be aligned to 8 bytes here, but the kernel pages are much bigger, so we don't care (on i386) */ int_sta_masks = 0; for (i = 0; i < chip->bdbars_count; i++) { ichdev = &chip->ichd[i]; ichdev->bdbar = ((u32 *)chip->bdbars.area) + (i * ICH_MAX_FRAGS * 2); ichdev->bdbar_addr = chip->bdbars.addr + (i * sizeof(u32) * ICH_MAX_FRAGS * 2); int_sta_masks |= ichdev->int_sta_mask; } chip->int_sta_reg = ICH_REG_GLOB_STA; chip->int_sta_mask = int_sta_masks; if ((err = snd_intel8x0m_chip_init(chip, 1)) < 0) { snd_intel8x0m_free(chip); return err; } if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) { snd_intel8x0m_free(chip); return err; } snd_card_set_dev(card, &pci->dev); *r_intel8x0m = chip; return 0; } static struct shortname_table { unsigned int id; const char *s; } shortnames[] __devinitdata = { { PCI_DEVICE_ID_INTEL_82801AA_6, "Intel 82801AA-ICH" }, { PCI_DEVICE_ID_INTEL_82801AB_6, "Intel 82901AB-ICH0" }, { PCI_DEVICE_ID_INTEL_82801BA_6, "Intel 82801BA-ICH2" }, { PCI_DEVICE_ID_INTEL_440MX_6, "Intel 440MX" }, { PCI_DEVICE_ID_INTEL_82801CA_6, "Intel 82801CA-ICH3" }, { PCI_DEVICE_ID_INTEL_82801DB_6, "Intel 82801DB-ICH4" }, { PCI_DEVICE_ID_INTEL_82801EB_6, "Intel ICH5" }, { PCI_DEVICE_ID_INTEL_ICH6_17, "Intel ICH6" }, { PCI_DEVICE_ID_INTEL_ICH7_19, "Intel ICH7" }, { 0x7446, "AMD AMD768" }, { PCI_DEVICE_ID_SI_7013, "SiS SI7013" }, { PCI_DEVICE_ID_NVIDIA_MCP1_MODEM, "NVidia nForce" }, { PCI_DEVICE_ID_NVIDIA_MCP2_MODEM, "NVidia nForce2" }, { PCI_DEVICE_ID_NVIDIA_MCP2S_MODEM, "NVidia nForce2s" }, { PCI_DEVICE_ID_NVIDIA_MCP3_MODEM, "NVidia nForce3" }, { 0x746e, "AMD AMD8111" }, #if 0 { 0x5455, "ALi M5455" }, #endif { 0 }, }; static int __devinit snd_intel8x0m_probe(struct pci_dev *pci, const struct pci_device_id *pci_id) { struct snd_card *card; struct intel8x0m *chip; int err; struct shortname_table *name; err = snd_card_create(index, id, THIS_MODULE, 0, &card); if (err < 0) return err; strcpy(card->driver, "ICH-MODEM"); strcpy(card->shortname, "Intel ICH"); for (name = shortnames; name->id; name++) { if (pci->device == name->id) { strcpy(card->shortname, name->s); break; } } strcat(card->shortname," Modem"); if ((err = snd_intel8x0m_create(card, pci, pci_id->driver_data, &chip)) < 0) { snd_card_free(card); return err; } card->private_data = chip; if ((err = snd_intel8x0m_mixer(chip, ac97_clock)) < 0) { snd_card_free(card); return err; } if ((err = snd_intel8x0m_pcm(chip)) < 0) { snd_card_free(card); return err; } snd_intel8x0m_proc_init(chip); sprintf(card->longname, "%s at irq %i", card->shortname, chip->irq); if ((err = snd_card_register(card)) < 0) { snd_card_free(card); return err; } pci_set_drvdata(pci, card); return 0; } static void __devexit snd_intel8x0m_remove(struct pci_dev *pci) { snd_card_free(pci_get_drvdata(pci)); pci_set_drvdata(pci, NULL); } static struct pci_driver driver = { .name = "Intel ICH Modem", .id_table = snd_intel8x0m_ids, .probe = snd_intel8x0m_probe, .remove = __devexit_p(snd_intel8x0m_remove), #ifdef CONFIG_PM .suspend = intel8x0m_suspend, .resume = intel8x0m_resume, #endif }; static int __init alsa_card_intel8x0m_init(void) { return pci_register_driver(&driver); } static void __exit alsa_card_intel8x0m_exit(void) { pci_unregister_driver(&driver); } module_init(alsa_card_intel8x0m_init) module_exit(alsa_card_intel8x0m_exit)
gpl-2.0
dl12345/kernel_sony_kitakami
drivers/staging/wlan-ng/p80211netdev.c
2624
30780
/* src/p80211/p80211knetdev.c * * Linux Kernel net device interface * * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved. * -------------------------------------------------------------------- * * linux-wlan * * The contents of this file are subject to the Mozilla Public * License Version 1.1 (the "License"); you may not use this file * except in compliance with the License. You may obtain a copy of * the License at http://www.mozilla.org/MPL/ * * Software distributed under the License is distributed on an "AS * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or * implied. See the License for the specific language governing * rights and limitations under the License. * * Alternatively, the contents of this file may be used under the * terms of the GNU Public License version 2 (the "GPL"), in which * case the provisions of the GPL are applicable instead of the * above. If you wish to allow the use of your version of this file * only under the terms of the GPL and not to allow others to use * your version of this file under the MPL, indicate your decision * by deleting the provisions above and replace them with the notice * and other provisions required by the GPL. If you do not delete * the provisions above, a recipient may use your version of this * file under either the MPL or the GPL. * * -------------------------------------------------------------------- * * Inquiries regarding the linux-wlan Open Source project can be * made directly to: * * AbsoluteValue Systems Inc. * info@linux-wlan.com * http://www.linux-wlan.com * * -------------------------------------------------------------------- * * Portions of the development of this software were funded by * Intersil Corporation as part of PRISM(R) chipset product development. * * -------------------------------------------------------------------- * * The functions required for a Linux network device are defined here. * * -------------------------------------------------------------------- */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/types.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <linux/proc_fs.h> #include <linux/interrupt.h> #include <linux/netdevice.h> #include <linux/kmod.h> #include <linux/if_arp.h> #include <linux/wireless.h> #include <linux/sockios.h> #include <linux/etherdevice.h> #include <linux/if_ether.h> #include <linux/byteorder/generic.h> #include <linux/bitops.h> #include <linux/uaccess.h> #include <asm/byteorder.h> #ifdef SIOCETHTOOL #include <linux/ethtool.h> #endif #include <net/iw_handler.h> #include <net/net_namespace.h> #include <net/cfg80211.h> #include "p80211types.h" #include "p80211hdr.h" #include "p80211conv.h" #include "p80211mgmt.h" #include "p80211msg.h" #include "p80211netdev.h" #include "p80211ioctl.h" #include "p80211req.h" #include "p80211metastruct.h" #include "p80211metadef.h" #include "cfg80211.c" /* Support functions */ static void p80211netdev_rx_bh(unsigned long arg); /* netdevice method functions */ static int p80211knetdev_init(netdevice_t *netdev); static struct net_device_stats *p80211knetdev_get_stats(netdevice_t *netdev); static int p80211knetdev_open(netdevice_t *netdev); static int p80211knetdev_stop(netdevice_t *netdev); static int p80211knetdev_hard_start_xmit(struct sk_buff *skb, netdevice_t *netdev); static void p80211knetdev_set_multicast_list(netdevice_t *dev); static int p80211knetdev_do_ioctl(netdevice_t *dev, struct ifreq *ifr, int cmd); static int p80211knetdev_set_mac_address(netdevice_t *dev, void *addr); static void p80211knetdev_tx_timeout(netdevice_t *netdev); static int p80211_rx_typedrop(wlandevice_t *wlandev, u16 fc); int wlan_watchdog = 5000; module_param(wlan_watchdog, int, 0644); MODULE_PARM_DESC(wlan_watchdog, "transmit timeout in milliseconds"); int wlan_wext_write = 1; module_param(wlan_wext_write, int, 0644); MODULE_PARM_DESC(wlan_wext_write, "enable write wireless extensions"); /*---------------------------------------------------------------- * p80211knetdev_init * * Init method for a Linux netdevice. Called in response to * register_netdev. * * Arguments: * none * * Returns: * nothing ----------------------------------------------------------------*/ static int p80211knetdev_init(netdevice_t *netdev) { /* Called in response to register_netdev */ /* This is usually the probe function, but the probe has */ /* already been done by the MSD and the create_kdev */ /* function. All we do here is return success */ return 0; } /*---------------------------------------------------------------- * p80211knetdev_get_stats * * Statistics retrieval for linux netdevices. Here we're reporting * the Linux i/f level statistics. Hence, for the primary numbers, * we don't want to report the numbers from the MIB. Eventually, * it might be useful to collect some of the error counters though. * * Arguments: * netdev Linux netdevice * * Returns: * the address of the statistics structure ----------------------------------------------------------------*/ static struct net_device_stats *p80211knetdev_get_stats(netdevice_t *netdev) { wlandevice_t *wlandev = netdev->ml_priv; /* TODO: review the MIB stats for items that correspond to linux stats */ return &(wlandev->linux_stats); } /*---------------------------------------------------------------- * p80211knetdev_open * * Linux netdevice open method. Following a successful call here, * the device is supposed to be ready for tx and rx. In our * situation that may not be entirely true due to the state of the * MAC below. * * Arguments: * netdev Linux network device structure * * Returns: * zero on success, non-zero otherwise ----------------------------------------------------------------*/ static int p80211knetdev_open(netdevice_t *netdev) { int result = 0; /* success */ wlandevice_t *wlandev = netdev->ml_priv; /* Check to make sure the MSD is running */ if (wlandev->msdstate != WLAN_MSD_RUNNING) return -ENODEV; /* Tell the MSD to open */ if (wlandev->open != NULL) { result = wlandev->open(wlandev); if (result == 0) { netif_start_queue(wlandev->netdev); wlandev->state = WLAN_DEVICE_OPEN; } } else { result = -EAGAIN; } return result; } /*---------------------------------------------------------------- * p80211knetdev_stop * * Linux netdevice stop (close) method. Following this call, * no frames should go up or down through this interface. * * Arguments: * netdev Linux network device structure * * Returns: * zero on success, non-zero otherwise ----------------------------------------------------------------*/ static int p80211knetdev_stop(netdevice_t *netdev) { int result = 0; wlandevice_t *wlandev = netdev->ml_priv; if (wlandev->close != NULL) result = wlandev->close(wlandev); netif_stop_queue(wlandev->netdev); wlandev->state = WLAN_DEVICE_CLOSED; return result; } /*---------------------------------------------------------------- * p80211netdev_rx * * Frame receive function called by the mac specific driver. * * Arguments: * wlandev WLAN network device structure * skb skbuff containing a full 802.11 frame. * Returns: * nothing * Side effects: * ----------------------------------------------------------------*/ void p80211netdev_rx(wlandevice_t *wlandev, struct sk_buff *skb) { /* Enqueue for post-irq processing */ skb_queue_tail(&wlandev->nsd_rxq, skb); tasklet_schedule(&wlandev->rx_bh); } /*---------------------------------------------------------------- * p80211netdev_rx_bh * * Deferred processing of all received frames. * * Arguments: * wlandev WLAN network device structure * skb skbuff containing a full 802.11 frame. * Returns: * nothing * Side effects: * ----------------------------------------------------------------*/ static void p80211netdev_rx_bh(unsigned long arg) { wlandevice_t *wlandev = (wlandevice_t *) arg; struct sk_buff *skb = NULL; netdevice_t *dev = wlandev->netdev; struct p80211_hdr_a3 *hdr; u16 fc; /* Let's empty our our queue */ while ((skb = skb_dequeue(&wlandev->nsd_rxq))) { if (wlandev->state == WLAN_DEVICE_OPEN) { if (dev->type != ARPHRD_ETHER) { /* RAW frame; we shouldn't convert it */ /* XXX Append the Prism Header here instead. */ /* set up various data fields */ skb->dev = dev; skb_reset_mac_header(skb); skb->ip_summed = CHECKSUM_NONE; skb->pkt_type = PACKET_OTHERHOST; skb->protocol = htons(ETH_P_80211_RAW); dev->last_rx = jiffies; wlandev->linux_stats.rx_packets++; wlandev->linux_stats.rx_bytes += skb->len; netif_rx_ni(skb); continue; } else { hdr = (struct p80211_hdr_a3 *) skb->data; fc = le16_to_cpu(hdr->fc); if (p80211_rx_typedrop(wlandev, fc)) { dev_kfree_skb(skb); continue; } /* perform mcast filtering */ if (wlandev->netdev->flags & IFF_ALLMULTI) { /* allow my local address through */ if (memcmp (hdr->a1, wlandev->netdev->dev_addr, ETH_ALEN) != 0) { /* but reject anything else that isn't multicast */ if (!(hdr->a1[0] & 0x01)) { dev_kfree_skb(skb); continue; } } } if (skb_p80211_to_ether (wlandev, wlandev->ethconv, skb) == 0) { skb->dev->last_rx = jiffies; wlandev->linux_stats.rx_packets++; wlandev->linux_stats.rx_bytes += skb->len; netif_rx_ni(skb); continue; } pr_debug("p80211_to_ether failed.\n"); } } dev_kfree_skb(skb); } } /*---------------------------------------------------------------- * p80211knetdev_hard_start_xmit * * Linux netdevice method for transmitting a frame. * * Arguments: * skb Linux sk_buff containing the frame. * netdev Linux netdevice. * * Side effects: * If the lower layers report that buffers are full. netdev->tbusy * will be set to prevent higher layers from sending more traffic. * * Note: If this function returns non-zero, higher layers retain * ownership of the skb. * * Returns: * zero on success, non-zero on failure. ----------------------------------------------------------------*/ static int p80211knetdev_hard_start_xmit(struct sk_buff *skb, netdevice_t *netdev) { int result = 0; int txresult = -1; wlandevice_t *wlandev = netdev->ml_priv; union p80211_hdr p80211_hdr; struct p80211_metawep p80211_wep; p80211_wep.data = NULL; if (skb == NULL) return NETDEV_TX_OK; if (wlandev->state != WLAN_DEVICE_OPEN) { result = 1; goto failed; } memset(&p80211_hdr, 0, sizeof(union p80211_hdr)); memset(&p80211_wep, 0, sizeof(struct p80211_metawep)); if (netif_queue_stopped(netdev)) { pr_debug("called when queue stopped.\n"); result = 1; goto failed; } netif_stop_queue(netdev); /* Check to see that a valid mode is set */ switch (wlandev->macmode) { case WLAN_MACMODE_IBSS_STA: case WLAN_MACMODE_ESS_STA: case WLAN_MACMODE_ESS_AP: break; default: /* Mode isn't set yet, just drop the frame * and return success . * TODO: we need a saner way to handle this */ if (skb->protocol != ETH_P_80211_RAW) { netif_start_queue(wlandev->netdev); printk(KERN_NOTICE "Tx attempt prior to association, frame dropped.\n"); wlandev->linux_stats.tx_dropped++; result = 0; goto failed; } break; } /* Check for raw transmits */ if (skb->protocol == ETH_P_80211_RAW) { if (!capable(CAP_NET_ADMIN)) { result = 1; goto failed; } /* move the header over */ memcpy(&p80211_hdr, skb->data, sizeof(union p80211_hdr)); skb_pull(skb, sizeof(union p80211_hdr)); } else { if (skb_ether_to_p80211 (wlandev, wlandev->ethconv, skb, &p80211_hdr, &p80211_wep) != 0) { /* convert failed */ pr_debug("ether_to_80211(%d) failed.\n", wlandev->ethconv); result = 1; goto failed; } } if (wlandev->txframe == NULL) { result = 1; goto failed; } netdev->trans_start = jiffies; wlandev->linux_stats.tx_packets++; /* count only the packet payload */ wlandev->linux_stats.tx_bytes += skb->len; txresult = wlandev->txframe(wlandev, skb, &p80211_hdr, &p80211_wep); if (txresult == 0) { /* success and more buf */ /* avail, re: hw_txdata */ netif_wake_queue(wlandev->netdev); result = NETDEV_TX_OK; } else if (txresult == 1) { /* success, no more avail */ pr_debug("txframe success, no more bufs\n"); /* netdev->tbusy = 1; don't set here, irqhdlr */ /* may have already cleared it */ result = NETDEV_TX_OK; } else if (txresult == 2) { /* alloc failure, drop frame */ pr_debug("txframe returned alloc_fail\n"); result = NETDEV_TX_BUSY; } else { /* buffer full or queue busy, drop frame. */ pr_debug("txframe returned full or busy\n"); result = NETDEV_TX_BUSY; } failed: /* Free up the WEP buffer if it's not the same as the skb */ if ((p80211_wep.data) && (p80211_wep.data != skb->data)) kzfree(p80211_wep.data); /* we always free the skb here, never in a lower level. */ if (!result) dev_kfree_skb(skb); return result; } /*---------------------------------------------------------------- * p80211knetdev_set_multicast_list * * Called from higher layers whenever there's a need to set/clear * promiscuous mode or rewrite the multicast list. * * Arguments: * none * * Returns: * nothing ----------------------------------------------------------------*/ static void p80211knetdev_set_multicast_list(netdevice_t *dev) { wlandevice_t *wlandev = dev->ml_priv; /* TODO: real multicast support as well */ if (wlandev->set_multicast_list) wlandev->set_multicast_list(wlandev, dev); } #ifdef SIOCETHTOOL static int p80211netdev_ethtool(wlandevice_t *wlandev, void __user *useraddr) { u32 ethcmd; struct ethtool_drvinfo info; struct ethtool_value edata; memset(&info, 0, sizeof(info)); memset(&edata, 0, sizeof(edata)); if (copy_from_user(&ethcmd, useraddr, sizeof(ethcmd))) return -EFAULT; switch (ethcmd) { case ETHTOOL_GDRVINFO: info.cmd = ethcmd; snprintf(info.driver, sizeof(info.driver), "p80211_%s", wlandev->nsdname); snprintf(info.version, sizeof(info.version), "%s", WLAN_RELEASE); if (copy_to_user(useraddr, &info, sizeof(info))) return -EFAULT; return 0; #ifdef ETHTOOL_GLINK case ETHTOOL_GLINK: edata.cmd = ethcmd; if (wlandev->linkstatus && (wlandev->macmode != WLAN_MACMODE_NONE)) { edata.data = 1; } else { edata.data = 0; } if (copy_to_user(useraddr, &edata, sizeof(edata))) return -EFAULT; return 0; #endif } return -EOPNOTSUPP; } #endif /*---------------------------------------------------------------- * p80211knetdev_do_ioctl * * Handle an ioctl call on one of our devices. Everything Linux * ioctl specific is done here. Then we pass the contents of the * ifr->data to the request message handler. * * Arguments: * dev Linux kernel netdevice * ifr Our private ioctl request structure, typed for the * generic struct ifreq so we can use ptr to func * w/o cast. * * Returns: * zero on success, a negative errno on failure. Possible values: * -ENETDOWN Device isn't up. * -EBUSY cmd already in progress * -ETIME p80211 cmd timed out (MSD may have its own timers) * -EFAULT memory fault copying msg from user buffer * -ENOMEM unable to allocate kernel msg buffer * -ENOSYS bad magic, it the cmd really for us? * -EintR sleeping on cmd, awakened by signal, cmd cancelled. * * Call Context: * Process thread (ioctl caller). TODO: SMP support may require * locks. ----------------------------------------------------------------*/ static int p80211knetdev_do_ioctl(netdevice_t *dev, struct ifreq *ifr, int cmd) { int result = 0; struct p80211ioctl_req *req = (struct p80211ioctl_req *) ifr; wlandevice_t *wlandev = dev->ml_priv; u8 *msgbuf; pr_debug("rx'd ioctl, cmd=%d, len=%d\n", cmd, req->len); #ifdef SIOCETHTOOL if (cmd == SIOCETHTOOL) { result = p80211netdev_ethtool(wlandev, (void __user *)ifr->ifr_data); goto bail; } #endif /* Test the magic, assume ifr is good if it's there */ if (req->magic != P80211_IOCTL_MAGIC) { result = -ENOSYS; goto bail; } if (cmd == P80211_IFTEST) { result = 0; goto bail; } else if (cmd != P80211_IFREQ) { result = -ENOSYS; goto bail; } /* Allocate a buf of size req->len */ msgbuf = kmalloc(req->len, GFP_KERNEL); if (msgbuf) { if (copy_from_user(msgbuf, (void __user *)req->data, req->len)) result = -EFAULT; else result = p80211req_dorequest(wlandev, msgbuf); if (result == 0) { if (copy_to_user ((void __user *)req->data, msgbuf, req->len)) { result = -EFAULT; } } kfree(msgbuf); } else { result = -ENOMEM; } bail: /* If allocate,copyfrom or copyto fails, return errno */ return result; } /*---------------------------------------------------------------- * p80211knetdev_set_mac_address * * Handles the ioctl for changing the MACAddress of a netdevice * * references: linux/netdevice.h and drivers/net/net_init.c * * NOTE: [MSM] We only prevent address changes when the netdev is * up. We don't control anything based on dot11 state. If the * address is changed on a STA that's currently associated, you * will probably lose the ability to send and receive data frames. * Just be aware. Therefore, this should usually only be done * prior to scan/join/auth/assoc. * * Arguments: * dev netdevice struct * addr the new MACAddress (a struct) * * Returns: * zero on success, a negative errno on failure. Possible values: * -EBUSY device is bussy (cmd not possible) * -and errors returned by: p80211req_dorequest(..) * * by: Collin R. Mulliner <collin@mulliner.org> ----------------------------------------------------------------*/ static int p80211knetdev_set_mac_address(netdevice_t *dev, void *addr) { struct sockaddr *new_addr = addr; struct p80211msg_dot11req_mibset dot11req; p80211item_unk392_t *mibattr; p80211item_pstr6_t *macaddr; p80211item_uint32_t *resultcode; int result; /* If we're running, we don't allow MAC address changes */ if (netif_running(dev)) return -EBUSY; /* Set up some convenience pointers. */ mibattr = &dot11req.mibattribute; macaddr = (p80211item_pstr6_t *) &mibattr->data; resultcode = &dot11req.resultcode; /* Set up a dot11req_mibset */ memset(&dot11req, 0, sizeof(struct p80211msg_dot11req_mibset)); dot11req.msgcode = DIDmsg_dot11req_mibset; dot11req.msglen = sizeof(struct p80211msg_dot11req_mibset); memcpy(dot11req.devname, ((wlandevice_t *) dev->ml_priv)->name, WLAN_DEVNAMELEN_MAX - 1); /* Set up the mibattribute argument */ mibattr->did = DIDmsg_dot11req_mibset_mibattribute; mibattr->status = P80211ENUM_msgitem_status_data_ok; mibattr->len = sizeof(mibattr->data); macaddr->did = DIDmib_dot11mac_dot11OperationTable_dot11MACAddress; macaddr->status = P80211ENUM_msgitem_status_data_ok; macaddr->len = sizeof(macaddr->data); macaddr->data.len = ETH_ALEN; memcpy(&macaddr->data.data, new_addr->sa_data, ETH_ALEN); /* Set up the resultcode argument */ resultcode->did = DIDmsg_dot11req_mibset_resultcode; resultcode->status = P80211ENUM_msgitem_status_no_value; resultcode->len = sizeof(resultcode->data); resultcode->data = 0; /* now fire the request */ result = p80211req_dorequest(dev->ml_priv, (u8 *) &dot11req); /* If the request wasn't successful, report an error and don't * change the netdev address */ if (result != 0 || resultcode->data != P80211ENUM_resultcode_success) { printk(KERN_ERR "Low-level driver failed dot11req_mibset(dot11MACAddress).\n"); result = -EADDRNOTAVAIL; } else { /* everything's ok, change the addr in netdev */ memcpy(dev->dev_addr, new_addr->sa_data, dev->addr_len); } return result; } static int wlan_change_mtu(netdevice_t *dev, int new_mtu) { /* 2312 is max 802.11 payload, 20 is overhead, (ether + llc +snap) and another 8 for wep. */ if ((new_mtu < 68) || (new_mtu > (2312 - 20 - 8))) return -EINVAL; dev->mtu = new_mtu; return 0; } static const struct net_device_ops p80211_netdev_ops = { .ndo_init = p80211knetdev_init, .ndo_open = p80211knetdev_open, .ndo_stop = p80211knetdev_stop, .ndo_get_stats = p80211knetdev_get_stats, .ndo_start_xmit = p80211knetdev_hard_start_xmit, .ndo_set_rx_mode = p80211knetdev_set_multicast_list, .ndo_do_ioctl = p80211knetdev_do_ioctl, .ndo_set_mac_address = p80211knetdev_set_mac_address, .ndo_tx_timeout = p80211knetdev_tx_timeout, .ndo_change_mtu = wlan_change_mtu, .ndo_validate_addr = eth_validate_addr, }; /*---------------------------------------------------------------- * wlan_setup * * Roughly matches the functionality of ether_setup. Here * we set up any members of the wlandevice structure that are common * to all devices. Additionally, we allocate a linux 'struct device' * and perform the same setup as ether_setup. * * Note: It's important that the caller have setup the wlandev->name * ptr prior to calling this function. * * Arguments: * wlandev ptr to the wlandev structure for the * interface. * physdev ptr to usb device * Returns: * zero on success, non-zero otherwise. * Call Context: * Should be process thread. We'll assume it might be * interrupt though. When we add support for statically * compiled drivers, this function will be called in the * context of the kernel startup code. ----------------------------------------------------------------*/ int wlan_setup(wlandevice_t *wlandev, struct device *physdev) { int result = 0; netdevice_t *netdev; struct wiphy *wiphy; struct wireless_dev *wdev; /* Set up the wlandev */ wlandev->state = WLAN_DEVICE_CLOSED; wlandev->ethconv = WLAN_ETHCONV_8021h; wlandev->macmode = WLAN_MACMODE_NONE; /* Set up the rx queue */ skb_queue_head_init(&wlandev->nsd_rxq); tasklet_init(&wlandev->rx_bh, p80211netdev_rx_bh, (unsigned long)wlandev); /* Allocate and initialize the wiphy struct */ wiphy = wlan_create_wiphy(physdev, wlandev); if (wiphy == NULL) { printk(KERN_ERR "Failed to alloc wiphy.\n"); return 1; } /* Allocate and initialize the struct device */ netdev = alloc_netdev(sizeof(struct wireless_dev), "wlan%d", ether_setup); if (netdev == NULL) { printk(KERN_ERR "Failed to alloc netdev.\n"); wlan_free_wiphy(wiphy); result = 1; } else { wlandev->netdev = netdev; netdev->ml_priv = wlandev; netdev->netdev_ops = &p80211_netdev_ops; wdev = netdev_priv(netdev); wdev->wiphy = wiphy; wdev->iftype = NL80211_IFTYPE_STATION; netdev->ieee80211_ptr = wdev; netif_stop_queue(netdev); netif_carrier_off(netdev); } return result; } /*---------------------------------------------------------------- * wlan_unsetup * * This function is paired with the wlan_setup routine. It should * be called after unregister_wlandev. Basically, all it does is * free the 'struct device' that's associated with the wlandev. * We do it here because the 'struct device' isn't allocated * explicitly in the driver code, it's done in wlan_setup. To * do the free in the driver might seem like 'magic'. * * Arguments: * wlandev ptr to the wlandev structure for the * interface. * Call Context: * Should be process thread. We'll assume it might be * interrupt though. When we add support for statically * compiled drivers, this function will be called in the * context of the kernel startup code. ----------------------------------------------------------------*/ void wlan_unsetup(wlandevice_t *wlandev) { struct wireless_dev *wdev; tasklet_kill(&wlandev->rx_bh); if (wlandev->netdev) { wdev = netdev_priv(wlandev->netdev); if (wdev->wiphy) wlan_free_wiphy(wdev->wiphy); free_netdev(wlandev->netdev); wlandev->netdev = NULL; } } /*---------------------------------------------------------------- * register_wlandev * * Roughly matches the functionality of register_netdev. This function * is called after the driver has successfully probed and set up the * resources for the device. It's now ready to become a named device * in the Linux system. * * First we allocate a name for the device (if not already set), then * we call the Linux function register_netdevice. * * Arguments: * wlandev ptr to the wlandev structure for the * interface. * Returns: * zero on success, non-zero otherwise. * Call Context: * Can be either interrupt or not. ----------------------------------------------------------------*/ int register_wlandev(wlandevice_t *wlandev) { return register_netdev(wlandev->netdev); } /*---------------------------------------------------------------- * unregister_wlandev * * Roughly matches the functionality of unregister_netdev. This * function is called to remove a named device from the system. * * First we tell linux that the device should no longer exist. * Then we remove it from the list of known wlan devices. * * Arguments: * wlandev ptr to the wlandev structure for the * interface. * Returns: * zero on success, non-zero otherwise. * Call Context: * Can be either interrupt or not. ----------------------------------------------------------------*/ int unregister_wlandev(wlandevice_t *wlandev) { struct sk_buff *skb; unregister_netdev(wlandev->netdev); /* Now to clean out the rx queue */ while ((skb = skb_dequeue(&wlandev->nsd_rxq))) dev_kfree_skb(skb); return 0; } /*---------------------------------------------------------------- * p80211netdev_hwremoved * * Hardware removed notification. This function should be called * immediately after an MSD has detected that the underlying hardware * has been yanked out from under us. The primary things we need * to do are: * - Mark the wlandev * - Prevent any further traffic from the knetdev i/f * - Prevent any further requests from mgmt i/f * - If there are any waitq'd mgmt requests or mgmt-frame exchanges, * shut them down. * - Call the MSD hwremoved function. * * The remainder of the cleanup will be handled by unregister(). * Our primary goal here is to prevent as much tickling of the MSD * as possible since the MSD is already in a 'wounded' state. * * TODO: As new features are added, this function should be * updated. * * Arguments: * wlandev WLAN network device structure * Returns: * nothing * Side effects: * * Call context: * Usually interrupt. ----------------------------------------------------------------*/ void p80211netdev_hwremoved(wlandevice_t *wlandev) { wlandev->hwremoved = 1; if (wlandev->state == WLAN_DEVICE_OPEN) netif_stop_queue(wlandev->netdev); netif_device_detach(wlandev->netdev); } /*---------------------------------------------------------------- * p80211_rx_typedrop * * Classifies the frame, increments the appropriate counter, and * returns 0|1|2 indicating whether the driver should handle, ignore, or * drop the frame * * Arguments: * wlandev wlan device structure * fc frame control field * * Returns: * zero if the frame should be handled by the driver, * one if the frame should be ignored * anything else means we drop it. * * Side effects: * * Call context: * interrupt ----------------------------------------------------------------*/ static int p80211_rx_typedrop(wlandevice_t *wlandev, u16 fc) { u16 ftype; u16 fstype; int drop = 0; /* Classify frame, increment counter */ ftype = WLAN_GET_FC_FTYPE(fc); fstype = WLAN_GET_FC_FSTYPE(fc); #if 0 pr_debug("rx_typedrop : ftype=%d fstype=%d.\n", ftype, fstype); #endif switch (ftype) { case WLAN_FTYPE_MGMT: if ((wlandev->netdev->flags & IFF_PROMISC) || (wlandev->netdev->flags & IFF_ALLMULTI)) { drop = 1; break; } pr_debug("rx'd mgmt:\n"); wlandev->rx.mgmt++; switch (fstype) { case WLAN_FSTYPE_ASSOCREQ: /* printk("assocreq"); */ wlandev->rx.assocreq++; break; case WLAN_FSTYPE_ASSOCRESP: /* printk("assocresp"); */ wlandev->rx.assocresp++; break; case WLAN_FSTYPE_REASSOCREQ: /* printk("reassocreq"); */ wlandev->rx.reassocreq++; break; case WLAN_FSTYPE_REASSOCRESP: /* printk("reassocresp"); */ wlandev->rx.reassocresp++; break; case WLAN_FSTYPE_PROBEREQ: /* printk("probereq"); */ wlandev->rx.probereq++; break; case WLAN_FSTYPE_PROBERESP: /* printk("proberesp"); */ wlandev->rx.proberesp++; break; case WLAN_FSTYPE_BEACON: /* printk("beacon"); */ wlandev->rx.beacon++; break; case WLAN_FSTYPE_ATIM: /* printk("atim"); */ wlandev->rx.atim++; break; case WLAN_FSTYPE_DISASSOC: /* printk("disassoc"); */ wlandev->rx.disassoc++; break; case WLAN_FSTYPE_AUTHEN: /* printk("authen"); */ wlandev->rx.authen++; break; case WLAN_FSTYPE_DEAUTHEN: /* printk("deauthen"); */ wlandev->rx.deauthen++; break; default: /* printk("unknown"); */ wlandev->rx.mgmt_unknown++; break; } /* printk("\n"); */ drop = 2; break; case WLAN_FTYPE_CTL: if ((wlandev->netdev->flags & IFF_PROMISC) || (wlandev->netdev->flags & IFF_ALLMULTI)) { drop = 1; break; } pr_debug("rx'd ctl:\n"); wlandev->rx.ctl++; switch (fstype) { case WLAN_FSTYPE_PSPOLL: /* printk("pspoll"); */ wlandev->rx.pspoll++; break; case WLAN_FSTYPE_RTS: /* printk("rts"); */ wlandev->rx.rts++; break; case WLAN_FSTYPE_CTS: /* printk("cts"); */ wlandev->rx.cts++; break; case WLAN_FSTYPE_ACK: /* printk("ack"); */ wlandev->rx.ack++; break; case WLAN_FSTYPE_CFEND: /* printk("cfend"); */ wlandev->rx.cfend++; break; case WLAN_FSTYPE_CFENDCFACK: /* printk("cfendcfack"); */ wlandev->rx.cfendcfack++; break; default: /* printk("unknown"); */ wlandev->rx.ctl_unknown++; break; } /* printk("\n"); */ drop = 2; break; case WLAN_FTYPE_DATA: wlandev->rx.data++; switch (fstype) { case WLAN_FSTYPE_DATAONLY: wlandev->rx.dataonly++; break; case WLAN_FSTYPE_DATA_CFACK: wlandev->rx.data_cfack++; break; case WLAN_FSTYPE_DATA_CFPOLL: wlandev->rx.data_cfpoll++; break; case WLAN_FSTYPE_DATA_CFACK_CFPOLL: wlandev->rx.data__cfack_cfpoll++; break; case WLAN_FSTYPE_NULL: pr_debug("rx'd data:null\n"); wlandev->rx.null++; break; case WLAN_FSTYPE_CFACK: pr_debug("rx'd data:cfack\n"); wlandev->rx.cfack++; break; case WLAN_FSTYPE_CFPOLL: pr_debug("rx'd data:cfpoll\n"); wlandev->rx.cfpoll++; break; case WLAN_FSTYPE_CFACK_CFPOLL: pr_debug("rx'd data:cfack_cfpoll\n"); wlandev->rx.cfack_cfpoll++; break; default: /* printk("unknown"); */ wlandev->rx.data_unknown++; break; } break; } return drop; } static void p80211knetdev_tx_timeout(netdevice_t *netdev) { wlandevice_t *wlandev = netdev->ml_priv; if (wlandev->tx_timeout) { wlandev->tx_timeout(wlandev); } else { printk(KERN_WARNING "Implement tx_timeout for %s\n", wlandev->nsdname); netif_wake_queue(wlandev->netdev); } }
gpl-2.0
TheBootloader/android_kernel_samsung_msm8930-common
drivers/block/virtio_blk.c
3904
17789
//#define DEBUG #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/blkdev.h> #include <linux/hdreg.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/virtio.h> #include <linux/virtio_blk.h> #include <linux/scatterlist.h> #include <linux/string_helpers.h> #include <scsi/scsi_cmnd.h> #include <linux/idr.h> #define PART_BITS 4 static int major; static DEFINE_IDA(vd_index_ida); struct workqueue_struct *virtblk_wq; struct virtio_blk { spinlock_t lock; struct virtio_device *vdev; struct virtqueue *vq; /* The disk structure for the kernel. */ struct gendisk *disk; /* Request tracking. */ struct list_head reqs; mempool_t *pool; /* Process context for config space updates */ struct work_struct config_work; /* Lock for config space updates */ struct mutex config_lock; /* enable config space updates */ bool config_enable; /* What host tells us, plus 2 for header & tailer. */ unsigned int sg_elems; /* Ida index - used to track minor number allocations. */ int index; /* Scatterlist: can be too big for stack. */ struct scatterlist sg[/*sg_elems*/]; }; struct virtblk_req { struct list_head list; struct request *req; struct virtio_blk_outhdr out_hdr; struct virtio_scsi_inhdr in_hdr; u8 status; }; static void blk_done(struct virtqueue *vq) { struct virtio_blk *vblk = vq->vdev->priv; struct virtblk_req *vbr; unsigned int len; unsigned long flags; spin_lock_irqsave(&vblk->lock, flags); while ((vbr = virtqueue_get_buf(vblk->vq, &len)) != NULL) { int error; switch (vbr->status) { case VIRTIO_BLK_S_OK: error = 0; break; case VIRTIO_BLK_S_UNSUPP: error = -ENOTTY; break; default: error = -EIO; break; } switch (vbr->req->cmd_type) { case REQ_TYPE_BLOCK_PC: vbr->req->resid_len = vbr->in_hdr.residual; vbr->req->sense_len = vbr->in_hdr.sense_len; vbr->req->errors = vbr->in_hdr.errors; break; case REQ_TYPE_SPECIAL: vbr->req->errors = (error != 0); break; default: break; } __blk_end_request_all(vbr->req, error); list_del(&vbr->list); mempool_free(vbr, vblk->pool); } /* In case queue is stopped waiting for more buffers. */ blk_start_queue(vblk->disk->queue); spin_unlock_irqrestore(&vblk->lock, flags); } static bool do_req(struct request_queue *q, struct virtio_blk *vblk, struct request *req) { unsigned long num, out = 0, in = 0; struct virtblk_req *vbr; vbr = mempool_alloc(vblk->pool, GFP_ATOMIC); if (!vbr) /* When another request finishes we'll try again. */ return false; vbr->req = req; if (req->cmd_flags & REQ_FLUSH) { vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH; vbr->out_hdr.sector = 0; vbr->out_hdr.ioprio = req_get_ioprio(vbr->req); } else { switch (req->cmd_type) { case REQ_TYPE_FS: vbr->out_hdr.type = 0; vbr->out_hdr.sector = blk_rq_pos(vbr->req); vbr->out_hdr.ioprio = req_get_ioprio(vbr->req); break; case REQ_TYPE_BLOCK_PC: vbr->out_hdr.type = VIRTIO_BLK_T_SCSI_CMD; vbr->out_hdr.sector = 0; vbr->out_hdr.ioprio = req_get_ioprio(vbr->req); break; case REQ_TYPE_SPECIAL: vbr->out_hdr.type = VIRTIO_BLK_T_GET_ID; vbr->out_hdr.sector = 0; vbr->out_hdr.ioprio = req_get_ioprio(vbr->req); break; default: /* We don't put anything else in the queue. */ BUG(); } } sg_set_buf(&vblk->sg[out++], &vbr->out_hdr, sizeof(vbr->out_hdr)); /* * If this is a packet command we need a couple of additional headers. * Behind the normal outhdr we put a segment with the scsi command * block, and before the normal inhdr we put the sense data and the * inhdr with additional status information before the normal inhdr. */ if (vbr->req->cmd_type == REQ_TYPE_BLOCK_PC) sg_set_buf(&vblk->sg[out++], vbr->req->cmd, vbr->req->cmd_len); num = blk_rq_map_sg(q, vbr->req, vblk->sg + out); if (vbr->req->cmd_type == REQ_TYPE_BLOCK_PC) { sg_set_buf(&vblk->sg[num + out + in++], vbr->req->sense, SCSI_SENSE_BUFFERSIZE); sg_set_buf(&vblk->sg[num + out + in++], &vbr->in_hdr, sizeof(vbr->in_hdr)); } sg_set_buf(&vblk->sg[num + out + in++], &vbr->status, sizeof(vbr->status)); if (num) { if (rq_data_dir(vbr->req) == WRITE) { vbr->out_hdr.type |= VIRTIO_BLK_T_OUT; out += num; } else { vbr->out_hdr.type |= VIRTIO_BLK_T_IN; in += num; } } if (virtqueue_add_buf(vblk->vq, vblk->sg, out, in, vbr, GFP_ATOMIC)<0) { mempool_free(vbr, vblk->pool); return false; } list_add_tail(&vbr->list, &vblk->reqs); return true; } static void do_virtblk_request(struct request_queue *q) { struct virtio_blk *vblk = q->queuedata; struct request *req; unsigned int issued = 0; while ((req = blk_peek_request(q)) != NULL) { BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems); /* If this request fails, stop queue and wait for something to finish to restart it. */ if (!do_req(q, vblk, req)) { blk_stop_queue(q); break; } blk_start_request(req); issued++; } if (issued) virtqueue_kick(vblk->vq); } /* return id (s/n) string for *disk to *id_str */ static int virtblk_get_id(struct gendisk *disk, char *id_str) { struct virtio_blk *vblk = disk->private_data; struct request *req; struct bio *bio; int err; bio = bio_map_kern(vblk->disk->queue, id_str, VIRTIO_BLK_ID_BYTES, GFP_KERNEL); if (IS_ERR(bio)) return PTR_ERR(bio); req = blk_make_request(vblk->disk->queue, bio, GFP_KERNEL); if (IS_ERR(req)) { bio_put(bio); return PTR_ERR(req); } req->cmd_type = REQ_TYPE_SPECIAL; err = blk_execute_rq(vblk->disk->queue, vblk->disk, req, false); blk_put_request(req); return err; } static int virtblk_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long data) { struct gendisk *disk = bdev->bd_disk; struct virtio_blk *vblk = disk->private_data; /* * Only allow the generic SCSI ioctls if the host can support it. */ if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI)) return -ENOTTY; return scsi_cmd_blk_ioctl(bdev, mode, cmd, (void __user *)data); } /* We provide getgeo only to please some old bootloader/partitioning tools */ static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo) { struct virtio_blk *vblk = bd->bd_disk->private_data; struct virtio_blk_geometry vgeo; int err; /* see if the host passed in geometry config */ err = virtio_config_val(vblk->vdev, VIRTIO_BLK_F_GEOMETRY, offsetof(struct virtio_blk_config, geometry), &vgeo); if (!err) { geo->heads = vgeo.heads; geo->sectors = vgeo.sectors; geo->cylinders = vgeo.cylinders; } else { /* some standard values, similar to sd */ geo->heads = 1 << 6; geo->sectors = 1 << 5; geo->cylinders = get_capacity(bd->bd_disk) >> 11; } return 0; } static const struct block_device_operations virtblk_fops = { .ioctl = virtblk_ioctl, .owner = THIS_MODULE, .getgeo = virtblk_getgeo, }; static int index_to_minor(int index) { return index << PART_BITS; } static int minor_to_index(int minor) { return minor >> PART_BITS; } static ssize_t virtblk_serial_show(struct device *dev, struct device_attribute *attr, char *buf) { struct gendisk *disk = dev_to_disk(dev); int err; /* sysfs gives us a PAGE_SIZE buffer */ BUILD_BUG_ON(PAGE_SIZE < VIRTIO_BLK_ID_BYTES); buf[VIRTIO_BLK_ID_BYTES] = '\0'; err = virtblk_get_id(disk, buf); if (!err) return strlen(buf); if (err == -EIO) /* Unsupported? Make it empty. */ return 0; return err; } DEVICE_ATTR(serial, S_IRUGO, virtblk_serial_show, NULL); static void virtblk_config_changed_work(struct work_struct *work) { struct virtio_blk *vblk = container_of(work, struct virtio_blk, config_work); struct virtio_device *vdev = vblk->vdev; struct request_queue *q = vblk->disk->queue; char cap_str_2[10], cap_str_10[10]; u64 capacity, size; mutex_lock(&vblk->config_lock); if (!vblk->config_enable) goto done; /* Host must always specify the capacity. */ vdev->config->get(vdev, offsetof(struct virtio_blk_config, capacity), &capacity, sizeof(capacity)); /* If capacity is too big, truncate with warning. */ if ((sector_t)capacity != capacity) { dev_warn(&vdev->dev, "Capacity %llu too large: truncating\n", (unsigned long long)capacity); capacity = (sector_t)-1; } size = capacity * queue_logical_block_size(q); string_get_size(size, STRING_UNITS_2, cap_str_2, sizeof(cap_str_2)); string_get_size(size, STRING_UNITS_10, cap_str_10, sizeof(cap_str_10)); dev_notice(&vdev->dev, "new size: %llu %d-byte logical blocks (%s/%s)\n", (unsigned long long)capacity, queue_logical_block_size(q), cap_str_10, cap_str_2); set_capacity(vblk->disk, capacity); revalidate_disk(vblk->disk); done: mutex_unlock(&vblk->config_lock); } static void virtblk_config_changed(struct virtio_device *vdev) { struct virtio_blk *vblk = vdev->priv; queue_work(virtblk_wq, &vblk->config_work); } static int init_vq(struct virtio_blk *vblk) { int err = 0; /* We expect one virtqueue, for output. */ vblk->vq = virtio_find_single_vq(vblk->vdev, blk_done, "requests"); if (IS_ERR(vblk->vq)) err = PTR_ERR(vblk->vq); return err; } /* * Legacy naming scheme used for virtio devices. We are stuck with it for * virtio blk but don't ever use it for any new driver. */ static int virtblk_name_format(char *prefix, int index, char *buf, int buflen) { const int base = 'z' - 'a' + 1; char *begin = buf + strlen(prefix); char *end = buf + buflen; char *p; int unit; p = end - 1; *p = '\0'; unit = base; do { if (p == begin) return -EINVAL; *--p = 'a' + (index % unit); index = (index / unit) - 1; } while (index >= 0); memmove(begin, p, end - p); memcpy(buf, prefix, strlen(prefix)); return 0; } static int __devinit virtblk_probe(struct virtio_device *vdev) { struct virtio_blk *vblk; struct request_queue *q; int err, index; u64 cap; u32 v, blk_size, sg_elems, opt_io_size; u16 min_io_size; u8 physical_block_exp, alignment_offset; err = ida_simple_get(&vd_index_ida, 0, minor_to_index(1 << MINORBITS), GFP_KERNEL); if (err < 0) goto out; index = err; /* We need to know how many segments before we allocate. */ err = virtio_config_val(vdev, VIRTIO_BLK_F_SEG_MAX, offsetof(struct virtio_blk_config, seg_max), &sg_elems); /* We need at least one SG element, whatever they say. */ if (err || !sg_elems) sg_elems = 1; /* We need an extra sg elements at head and tail. */ sg_elems += 2; vdev->priv = vblk = kmalloc(sizeof(*vblk) + sizeof(vblk->sg[0]) * sg_elems, GFP_KERNEL); if (!vblk) { err = -ENOMEM; goto out_free_index; } INIT_LIST_HEAD(&vblk->reqs); spin_lock_init(&vblk->lock); vblk->vdev = vdev; vblk->sg_elems = sg_elems; sg_init_table(vblk->sg, vblk->sg_elems); mutex_init(&vblk->config_lock); INIT_WORK(&vblk->config_work, virtblk_config_changed_work); vblk->config_enable = true; err = init_vq(vblk); if (err) goto out_free_vblk; vblk->pool = mempool_create_kmalloc_pool(1,sizeof(struct virtblk_req)); if (!vblk->pool) { err = -ENOMEM; goto out_free_vq; } /* FIXME: How many partitions? How long is a piece of string? */ vblk->disk = alloc_disk(1 << PART_BITS); if (!vblk->disk) { err = -ENOMEM; goto out_mempool; } q = vblk->disk->queue = blk_init_queue(do_virtblk_request, &vblk->lock); if (!q) { err = -ENOMEM; goto out_put_disk; } q->queuedata = vblk; virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN); vblk->disk->major = major; vblk->disk->first_minor = index_to_minor(index); vblk->disk->private_data = vblk; vblk->disk->fops = &virtblk_fops; vblk->disk->driverfs_dev = &vdev->dev; vblk->index = index; /* configure queue flush support */ if (virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH)) blk_queue_flush(q, REQ_FLUSH); /* If disk is read-only in the host, the guest should obey */ if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO)) set_disk_ro(vblk->disk, 1); /* Host must always specify the capacity. */ vdev->config->get(vdev, offsetof(struct virtio_blk_config, capacity), &cap, sizeof(cap)); /* If capacity is too big, truncate with warning. */ if ((sector_t)cap != cap) { dev_warn(&vdev->dev, "Capacity %llu too large: truncating\n", (unsigned long long)cap); cap = (sector_t)-1; } set_capacity(vblk->disk, cap); /* We can handle whatever the host told us to handle. */ blk_queue_max_segments(q, vblk->sg_elems-2); /* No need to bounce any requests */ blk_queue_bounce_limit(q, BLK_BOUNCE_ANY); /* No real sector limit. */ blk_queue_max_hw_sectors(q, -1U); /* Host can optionally specify maximum segment size and number of * segments. */ err = virtio_config_val(vdev, VIRTIO_BLK_F_SIZE_MAX, offsetof(struct virtio_blk_config, size_max), &v); if (!err) blk_queue_max_segment_size(q, v); else blk_queue_max_segment_size(q, -1U); /* Host can optionally specify the block size of the device */ err = virtio_config_val(vdev, VIRTIO_BLK_F_BLK_SIZE, offsetof(struct virtio_blk_config, blk_size), &blk_size); if (!err) blk_queue_logical_block_size(q, blk_size); else blk_size = queue_logical_block_size(q); /* Use topology information if available */ err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY, offsetof(struct virtio_blk_config, physical_block_exp), &physical_block_exp); if (!err && physical_block_exp) blk_queue_physical_block_size(q, blk_size * (1 << physical_block_exp)); err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY, offsetof(struct virtio_blk_config, alignment_offset), &alignment_offset); if (!err && alignment_offset) blk_queue_alignment_offset(q, blk_size * alignment_offset); err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY, offsetof(struct virtio_blk_config, min_io_size), &min_io_size); if (!err && min_io_size) blk_queue_io_min(q, blk_size * min_io_size); err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY, offsetof(struct virtio_blk_config, opt_io_size), &opt_io_size); if (!err && opt_io_size) blk_queue_io_opt(q, blk_size * opt_io_size); add_disk(vblk->disk); err = device_create_file(disk_to_dev(vblk->disk), &dev_attr_serial); if (err) goto out_del_disk; return 0; out_del_disk: del_gendisk(vblk->disk); blk_cleanup_queue(vblk->disk->queue); out_put_disk: put_disk(vblk->disk); out_mempool: mempool_destroy(vblk->pool); out_free_vq: vdev->config->del_vqs(vdev); out_free_vblk: kfree(vblk); out_free_index: ida_simple_remove(&vd_index_ida, index); out: return err; } static void __devexit virtblk_remove(struct virtio_device *vdev) { struct virtio_blk *vblk = vdev->priv; int index = vblk->index; /* Prevent config work handler from accessing the device. */ mutex_lock(&vblk->config_lock); vblk->config_enable = false; mutex_unlock(&vblk->config_lock); /* Nothing should be pending. */ BUG_ON(!list_empty(&vblk->reqs)); /* Stop all the virtqueues. */ vdev->config->reset(vdev); flush_work(&vblk->config_work); del_gendisk(vblk->disk); blk_cleanup_queue(vblk->disk->queue); put_disk(vblk->disk); mempool_destroy(vblk->pool); vdev->config->del_vqs(vdev); kfree(vblk); ida_simple_remove(&vd_index_ida, index); } #ifdef CONFIG_PM static int virtblk_freeze(struct virtio_device *vdev) { struct virtio_blk *vblk = vdev->priv; /* Ensure we don't receive any more interrupts */ vdev->config->reset(vdev); /* Prevent config work handler from accessing the device. */ mutex_lock(&vblk->config_lock); vblk->config_enable = false; mutex_unlock(&vblk->config_lock); flush_work(&vblk->config_work); spin_lock_irq(vblk->disk->queue->queue_lock); blk_stop_queue(vblk->disk->queue); spin_unlock_irq(vblk->disk->queue->queue_lock); blk_sync_queue(vblk->disk->queue); vdev->config->del_vqs(vdev); return 0; } static int virtblk_restore(struct virtio_device *vdev) { struct virtio_blk *vblk = vdev->priv; int ret; vblk->config_enable = true; ret = init_vq(vdev->priv); if (!ret) { spin_lock_irq(vblk->disk->queue->queue_lock); blk_start_queue(vblk->disk->queue); spin_unlock_irq(vblk->disk->queue->queue_lock); } return ret; } #endif static const struct virtio_device_id id_table[] = { { VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID }, { 0 }, }; static unsigned int features[] = { VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY, VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE, VIRTIO_BLK_F_SCSI, VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY }; /* * virtio_blk causes spurious section mismatch warning by * simultaneously referring to a __devinit and a __devexit function. * Use __refdata to avoid this warning. */ static struct virtio_driver __refdata virtio_blk = { .feature_table = features, .feature_table_size = ARRAY_SIZE(features), .driver.name = KBUILD_MODNAME, .driver.owner = THIS_MODULE, .id_table = id_table, .probe = virtblk_probe, .remove = __devexit_p(virtblk_remove), .config_changed = virtblk_config_changed, #ifdef CONFIG_PM .freeze = virtblk_freeze, .restore = virtblk_restore, #endif }; static int __init init(void) { int error; virtblk_wq = alloc_workqueue("virtio-blk", 0, 0); if (!virtblk_wq) return -ENOMEM; major = register_blkdev(0, "virtblk"); if (major < 0) { error = major; goto out_destroy_workqueue; } error = register_virtio_driver(&virtio_blk); if (error) goto out_unregister_blkdev; return 0; out_unregister_blkdev: unregister_blkdev(major, "virtblk"); out_destroy_workqueue: destroy_workqueue(virtblk_wq); return error; } static void __exit fini(void) { unregister_blkdev(major, "virtblk"); unregister_virtio_driver(&virtio_blk); destroy_workqueue(virtblk_wq); } module_init(init); module_exit(fini); MODULE_DEVICE_TABLE(virtio, id_table); MODULE_DESCRIPTION("Virtio block driver"); MODULE_LICENSE("GPL");
gpl-2.0
rutvik95/android_kernel_frostbite
net/ipv6/netfilter/ip6table_filter.c
4160
3022
/* * This is the 1999 rewrite of IP Firewalling, aiming for kernel 2.3.x. * * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling * Copyright (C) 2000-2004 Netfilter Core Team <coreteam@netfilter.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/netfilter_ipv6/ip6_tables.h> #include <linux/slab.h> MODULE_LICENSE("GPL"); MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); MODULE_DESCRIPTION("ip6tables filter table"); #define FILTER_VALID_HOOKS ((1 << NF_INET_LOCAL_IN) | \ (1 << NF_INET_FORWARD) | \ (1 << NF_INET_LOCAL_OUT)) static const struct xt_table packet_filter = { .name = "filter", .valid_hooks = FILTER_VALID_HOOKS, .me = THIS_MODULE, .af = NFPROTO_IPV6, .priority = NF_IP6_PRI_FILTER, }; /* The work comes in here from netfilter.c. */ static unsigned int ip6table_filter_hook(unsigned int hook, struct sk_buff *skb, const struct net_device *in, const struct net_device *out, int (*okfn)(struct sk_buff *)) { const struct net *net = dev_net((in != NULL) ? in : out); return ip6t_do_table(skb, hook, in, out, net->ipv6.ip6table_filter); } static struct nf_hook_ops *filter_ops __read_mostly; /* Default to forward because I got too much mail already. */ static int forward = NF_ACCEPT; module_param(forward, bool, 0000); static int __net_init ip6table_filter_net_init(struct net *net) { struct ip6t_replace *repl; repl = ip6t_alloc_initial_table(&packet_filter); if (repl == NULL) return -ENOMEM; /* Entry 1 is the FORWARD hook */ ((struct ip6t_standard *)repl->entries)[1].target.verdict = -forward - 1; net->ipv6.ip6table_filter = ip6t_register_table(net, &packet_filter, repl); kfree(repl); if (IS_ERR(net->ipv6.ip6table_filter)) return PTR_ERR(net->ipv6.ip6table_filter); return 0; } static void __net_exit ip6table_filter_net_exit(struct net *net) { ip6t_unregister_table(net, net->ipv6.ip6table_filter); } static struct pernet_operations ip6table_filter_net_ops = { .init = ip6table_filter_net_init, .exit = ip6table_filter_net_exit, }; static int __init ip6table_filter_init(void) { int ret; if (forward < 0 || forward > NF_MAX_VERDICT) { pr_err("iptables forward must be 0 or 1\n"); return -EINVAL; } ret = register_pernet_subsys(&ip6table_filter_net_ops); if (ret < 0) return ret; /* Register hooks */ filter_ops = xt_hook_link(&packet_filter, ip6table_filter_hook); if (IS_ERR(filter_ops)) { ret = PTR_ERR(filter_ops); goto cleanup_table; } return ret; cleanup_table: unregister_pernet_subsys(&ip6table_filter_net_ops); return ret; } static void __exit ip6table_filter_fini(void) { xt_hook_unlink(&packet_filter, filter_ops); unregister_pernet_subsys(&ip6table_filter_net_ops); } module_init(ip6table_filter_init); module_exit(ip6table_filter_fini);
gpl-2.0
bilalliberty/android_kernel_htc_zaraul
arch/sh/boards/mach-se/7724/setup.c
4416
22670
/* * linux/arch/sh/boards/se/7724/setup.c * * Copyright (C) 2009 Renesas Solutions Corp. * * Kuninori Morimoto <morimoto.kuninori@renesas.com> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/device.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/mmc/host.h> #include <linux/mmc/sh_mobile_sdhi.h> #include <linux/mtd/physmap.h> #include <linux/delay.h> #include <linux/smc91x.h> #include <linux/gpio.h> #include <linux/input.h> #include <linux/input/sh_keysc.h> #include <linux/usb/r8a66597.h> #include <linux/sh_eth.h> #include <linux/videodev2.h> #include <video/sh_mobile_lcdc.h> #include <media/sh_mobile_ceu.h> #include <sound/sh_fsi.h> #include <asm/io.h> #include <asm/heartbeat.h> #include <asm/clock.h> #include <asm/suspend.h> #include <cpu/sh7724.h> #include <mach-se/mach/se7724.h> /* * SWx 1234 5678 * ------------------------------------ * SW31 : 1001 1100 : default * SW32 : 0111 1111 : use on board flash * * SW41 : abxx xxxx -> a = 0 : Analog monitor * 1 : Digital monitor * b = 0 : VGA * 1 : 720p */ /* * about 720p * * When you use 1280 x 720 lcdc output, * you should change OSC6 lcdc clock from 25.175MHz to 74.25MHz, * and change SW41 to use 720p */ /* * about sound * * This setup.c supports FSI slave mode. * Please change J20, J21, J22 pin to 1-2 connection. */ /* Heartbeat */ static struct resource heartbeat_resource = { .start = PA_LED, .end = PA_LED, .flags = IORESOURCE_MEM | IORESOURCE_MEM_16BIT, }; static struct platform_device heartbeat_device = { .name = "heartbeat", .id = -1, .num_resources = 1, .resource = &heartbeat_resource, }; /* LAN91C111 */ static struct smc91x_platdata smc91x_info = { .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT, }; static struct resource smc91x_eth_resources[] = { [0] = { .name = "SMC91C111" , .start = 0x1a300300, .end = 0x1a30030f, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ0_SMC, .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, }, }; static struct platform_device smc91x_eth_device = { .name = "smc91x", .num_resources = ARRAY_SIZE(smc91x_eth_resources), .resource = smc91x_eth_resources, .dev = { .platform_data = &smc91x_info, }, }; /* MTD */ static struct mtd_partition nor_flash_partitions[] = { { .name = "uboot", .offset = 0, .size = (1 * 1024 * 1024), .mask_flags = MTD_WRITEABLE, /* Read-only */ }, { .name = "kernel", .offset = MTDPART_OFS_APPEND, .size = (2 * 1024 * 1024), }, { .name = "free-area", .offset = MTDPART_OFS_APPEND, .size = MTDPART_SIZ_FULL, }, }; static struct physmap_flash_data nor_flash_data = { .width = 2, .parts = nor_flash_partitions, .nr_parts = ARRAY_SIZE(nor_flash_partitions), }; static struct resource nor_flash_resources[] = { [0] = { .name = "NOR Flash", .start = 0x00000000, .end = 0x01ffffff, .flags = IORESOURCE_MEM, } }; static struct platform_device nor_flash_device = { .name = "physmap-flash", .resource = nor_flash_resources, .num_resources = ARRAY_SIZE(nor_flash_resources), .dev = { .platform_data = &nor_flash_data, }, }; /* LCDC */ static const struct fb_videomode lcdc_720p_modes[] = { { .name = "LB070WV1", .sync = 0, /* hsync and vsync are active low */ .xres = 1280, .yres = 720, .left_margin = 220, .right_margin = 110, .hsync_len = 40, .upper_margin = 20, .lower_margin = 5, .vsync_len = 5, }, }; static const struct fb_videomode lcdc_vga_modes[] = { { .name = "LB070WV1", .sync = 0, /* hsync and vsync are active low */ .xres = 640, .yres = 480, .left_margin = 105, .right_margin = 50, .hsync_len = 96, .upper_margin = 33, .lower_margin = 10, .vsync_len = 2, }, }; static struct sh_mobile_lcdc_info lcdc_info = { .clock_source = LCDC_CLK_EXTERNAL, .ch[0] = { .chan = LCDC_CHAN_MAINLCD, .fourcc = V4L2_PIX_FMT_RGB565, .clock_divider = 1, .panel_cfg = { /* 7.0 inch */ .width = 152, .height = 91, }, } }; static struct resource lcdc_resources[] = { [0] = { .name = "LCDC", .start = 0xfe940000, .end = 0xfe942fff, .flags = IORESOURCE_MEM, }, [1] = { .start = 106, .flags = IORESOURCE_IRQ, }, }; static struct platform_device lcdc_device = { .name = "sh_mobile_lcdc_fb", .num_resources = ARRAY_SIZE(lcdc_resources), .resource = lcdc_resources, .dev = { .platform_data = &lcdc_info, }, }; /* CEU0 */ static struct sh_mobile_ceu_info sh_mobile_ceu0_info = { .flags = SH_CEU_FLAG_USE_8BIT_BUS, }; static struct resource ceu0_resources[] = { [0] = { .name = "CEU0", .start = 0xfe910000, .end = 0xfe91009f, .flags = IORESOURCE_MEM, }, [1] = { .start = 52, .flags = IORESOURCE_IRQ, }, [2] = { /* place holder for contiguous memory */ }, }; static struct platform_device ceu0_device = { .name = "sh_mobile_ceu", .id = 0, /* "ceu0" clock */ .num_resources = ARRAY_SIZE(ceu0_resources), .resource = ceu0_resources, .dev = { .platform_data = &sh_mobile_ceu0_info, }, }; /* CEU1 */ static struct sh_mobile_ceu_info sh_mobile_ceu1_info = { .flags = SH_CEU_FLAG_USE_8BIT_BUS, }; static struct resource ceu1_resources[] = { [0] = { .name = "CEU1", .start = 0xfe914000, .end = 0xfe91409f, .flags = IORESOURCE_MEM, }, [1] = { .start = 63, .flags = IORESOURCE_IRQ, }, [2] = { /* place holder for contiguous memory */ }, }; static struct platform_device ceu1_device = { .name = "sh_mobile_ceu", .id = 1, /* "ceu1" clock */ .num_resources = ARRAY_SIZE(ceu1_resources), .resource = ceu1_resources, .dev = { .platform_data = &sh_mobile_ceu1_info, }, }; /* FSI */ /* change J20, J21, J22 pin to 1-2 connection to use slave mode */ static struct sh_fsi_platform_info fsi_info = { .port_a = { .flags = SH_FSI_BRS_INV, }, }; static struct resource fsi_resources[] = { [0] = { .name = "FSI", .start = 0xFE3C0000, .end = 0xFE3C021d, .flags = IORESOURCE_MEM, }, [1] = { .start = 108, .flags = IORESOURCE_IRQ, }, }; static struct platform_device fsi_device = { .name = "sh_fsi", .id = 0, .num_resources = ARRAY_SIZE(fsi_resources), .resource = fsi_resources, .dev = { .platform_data = &fsi_info, }, }; static struct fsi_ak4642_info fsi_ak4642_info = { .name = "AK4642", .card = "FSIA-AK4642", .cpu_dai = "fsia-dai", .codec = "ak4642-codec.0-0012", .platform = "sh_fsi.0", .id = FSI_PORT_A, }; static struct platform_device fsi_ak4642_device = { .name = "fsi-ak4642-audio", .dev = { .platform_data = &fsi_ak4642_info, }, }; /* KEYSC in SoC (Needs SW33-2 set to ON) */ static struct sh_keysc_info keysc_info = { .mode = SH_KEYSC_MODE_1, .scan_timing = 3, .delay = 50, .keycodes = { KEY_1, KEY_2, KEY_3, KEY_4, KEY_5, KEY_6, KEY_7, KEY_8, KEY_9, KEY_A, KEY_B, KEY_C, KEY_D, KEY_E, KEY_F, KEY_G, KEY_H, KEY_I, KEY_K, KEY_L, KEY_M, KEY_N, KEY_O, KEY_P, KEY_Q, KEY_R, KEY_S, KEY_T, KEY_U, KEY_V, }, }; static struct resource keysc_resources[] = { [0] = { .name = "KEYSC", .start = 0x044b0000, .end = 0x044b000f, .flags = IORESOURCE_MEM, }, [1] = { .start = 79, .flags = IORESOURCE_IRQ, }, }; static struct platform_device keysc_device = { .name = "sh_keysc", .id = 0, /* "keysc0" clock */ .num_resources = ARRAY_SIZE(keysc_resources), .resource = keysc_resources, .dev = { .platform_data = &keysc_info, }, }; /* SH Eth */ static struct resource sh_eth_resources[] = { [0] = { .start = SH_ETH_ADDR, .end = SH_ETH_ADDR + 0x1FC, .flags = IORESOURCE_MEM, }, [1] = { .start = 91, .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, }, }; static struct sh_eth_plat_data sh_eth_plat = { .phy = 0x1f, /* SMSC LAN8187 */ .edmac_endian = EDMAC_LITTLE_ENDIAN, }; static struct platform_device sh_eth_device = { .name = "sh-eth", .id = 0, .dev = { .platform_data = &sh_eth_plat, }, .num_resources = ARRAY_SIZE(sh_eth_resources), .resource = sh_eth_resources, }; static struct r8a66597_platdata sh7724_usb0_host_data = { .on_chip = 1, }; static struct resource sh7724_usb0_host_resources[] = { [0] = { .start = 0xa4d80000, .end = 0xa4d80124 - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = 65, .end = 65, .flags = IORESOURCE_IRQ | IRQF_TRIGGER_LOW, }, }; static struct platform_device sh7724_usb0_host_device = { .name = "r8a66597_hcd", .id = 0, .dev = { .dma_mask = NULL, /* not use dma */ .coherent_dma_mask = 0xffffffff, .platform_data = &sh7724_usb0_host_data, }, .num_resources = ARRAY_SIZE(sh7724_usb0_host_resources), .resource = sh7724_usb0_host_resources, }; static struct r8a66597_platdata sh7724_usb1_gadget_data = { .on_chip = 1, }; static struct resource sh7724_usb1_gadget_resources[] = { [0] = { .start = 0xa4d90000, .end = 0xa4d90123, .flags = IORESOURCE_MEM, }, [1] = { .start = 66, .end = 66, .flags = IORESOURCE_IRQ | IRQF_TRIGGER_LOW, }, }; static struct platform_device sh7724_usb1_gadget_device = { .name = "r8a66597_udc", .id = 1, /* USB1 */ .dev = { .dma_mask = NULL, /* not use dma */ .coherent_dma_mask = 0xffffffff, .platform_data = &sh7724_usb1_gadget_data, }, .num_resources = ARRAY_SIZE(sh7724_usb1_gadget_resources), .resource = sh7724_usb1_gadget_resources, }; static struct resource sdhi0_cn7_resources[] = { [0] = { .name = "SDHI0", .start = 0x04ce0000, .end = 0x04ce00ff, .flags = IORESOURCE_MEM, }, [1] = { .start = 100, .flags = IORESOURCE_IRQ, }, }; static struct sh_mobile_sdhi_info sh7724_sdhi0_data = { .dma_slave_tx = SHDMA_SLAVE_SDHI0_TX, .dma_slave_rx = SHDMA_SLAVE_SDHI0_RX, .tmio_caps = MMC_CAP_SDIO_IRQ, }; static struct platform_device sdhi0_cn7_device = { .name = "sh_mobile_sdhi", .id = 0, .num_resources = ARRAY_SIZE(sdhi0_cn7_resources), .resource = sdhi0_cn7_resources, .dev = { .platform_data = &sh7724_sdhi0_data, }, }; static struct resource sdhi1_cn8_resources[] = { [0] = { .name = "SDHI1", .start = 0x04cf0000, .end = 0x04cf00ff, .flags = IORESOURCE_MEM, }, [1] = { .start = 23, .flags = IORESOURCE_IRQ, }, }; static struct sh_mobile_sdhi_info sh7724_sdhi1_data = { .dma_slave_tx = SHDMA_SLAVE_SDHI1_TX, .dma_slave_rx = SHDMA_SLAVE_SDHI1_RX, .tmio_caps = MMC_CAP_SDIO_IRQ, }; static struct platform_device sdhi1_cn8_device = { .name = "sh_mobile_sdhi", .id = 1, .num_resources = ARRAY_SIZE(sdhi1_cn8_resources), .resource = sdhi1_cn8_resources, .dev = { .platform_data = &sh7724_sdhi1_data, }, }; /* IrDA */ static struct resource irda_resources[] = { [0] = { .name = "IrDA", .start = 0xA45D0000, .end = 0xA45D0049, .flags = IORESOURCE_MEM, }, [1] = { .start = 20, .flags = IORESOURCE_IRQ, }, }; static struct platform_device irda_device = { .name = "sh_sir", .num_resources = ARRAY_SIZE(irda_resources), .resource = irda_resources, }; #include <media/ak881x.h> #include <media/sh_vou.h> static struct ak881x_pdata ak881x_pdata = { .flags = AK881X_IF_MODE_SLAVE, }; static struct i2c_board_info ak8813 = { /* With open J18 jumper address is 0x21 */ I2C_BOARD_INFO("ak8813", 0x20), .platform_data = &ak881x_pdata, }; static struct sh_vou_pdata sh_vou_pdata = { .bus_fmt = SH_VOU_BUS_8BIT, .flags = SH_VOU_HSYNC_LOW | SH_VOU_VSYNC_LOW, .board_info = &ak8813, .i2c_adap = 0, }; static struct resource sh_vou_resources[] = { [0] = { .start = 0xfe960000, .end = 0xfe962043, .flags = IORESOURCE_MEM, }, [1] = { .start = 55, .flags = IORESOURCE_IRQ, }, }; static struct platform_device vou_device = { .name = "sh-vou", .id = -1, .num_resources = ARRAY_SIZE(sh_vou_resources), .resource = sh_vou_resources, .dev = { .platform_data = &sh_vou_pdata, }, }; static struct platform_device *ms7724se_devices[] __initdata = { &heartbeat_device, &smc91x_eth_device, &lcdc_device, &nor_flash_device, &ceu0_device, &ceu1_device, &keysc_device, &sh_eth_device, &sh7724_usb0_host_device, &sh7724_usb1_gadget_device, &fsi_device, &fsi_ak4642_device, &sdhi0_cn7_device, &sdhi1_cn8_device, &irda_device, &vou_device, }; /* I2C device */ static struct i2c_board_info i2c0_devices[] = { { I2C_BOARD_INFO("ak4642", 0x12), }, }; #define EEPROM_OP 0xBA206000 #define EEPROM_ADR 0xBA206004 #define EEPROM_DATA 0xBA20600C #define EEPROM_STAT 0xBA206010 #define EEPROM_STRT 0xBA206014 static int __init sh_eth_is_eeprom_ready(void) { int t = 10000; while (t--) { if (!__raw_readw(EEPROM_STAT)) return 1; udelay(1); } printk(KERN_ERR "ms7724se can not access to eeprom\n"); return 0; } static void __init sh_eth_init(void) { int i; u16 mac; /* check EEPROM status */ if (!sh_eth_is_eeprom_ready()) return; /* read MAC addr from EEPROM */ for (i = 0 ; i < 3 ; i++) { __raw_writew(0x0, EEPROM_OP); /* read */ __raw_writew(i*2, EEPROM_ADR); __raw_writew(0x1, EEPROM_STRT); if (!sh_eth_is_eeprom_ready()) return; mac = __raw_readw(EEPROM_DATA); sh_eth_plat.mac_addr[i << 1] = mac & 0xff; sh_eth_plat.mac_addr[(i << 1) + 1] = mac >> 8; } } #define SW4140 0xBA201000 #define FPGA_OUT 0xBA200400 #define PORT_HIZA 0xA4050158 #define PORT_MSELCRB 0xA4050182 #define SW41_A 0x0100 #define SW41_B 0x0200 #define SW41_C 0x0400 #define SW41_D 0x0800 #define SW41_E 0x1000 #define SW41_F 0x2000 #define SW41_G 0x4000 #define SW41_H 0x8000 extern char ms7724se_sdram_enter_start; extern char ms7724se_sdram_enter_end; extern char ms7724se_sdram_leave_start; extern char ms7724se_sdram_leave_end; static int __init arch_setup(void) { /* enable I2C device */ i2c_register_board_info(0, i2c0_devices, ARRAY_SIZE(i2c0_devices)); return 0; } arch_initcall(arch_setup); static int __init devices_setup(void) { u16 sw = __raw_readw(SW4140); /* select camera, monitor */ struct clk *clk; u16 fpga_out; /* register board specific self-refresh code */ sh_mobile_register_self_refresh(SUSP_SH_STANDBY | SUSP_SH_SF | SUSP_SH_RSTANDBY, &ms7724se_sdram_enter_start, &ms7724se_sdram_enter_end, &ms7724se_sdram_leave_start, &ms7724se_sdram_leave_end); /* Reset Release */ fpga_out = __raw_readw(FPGA_OUT); /* bit4: NTSC_PDN, bit5: NTSC_RESET */ fpga_out &= ~((1 << 1) | /* LAN */ (1 << 4) | /* AK8813 PDN */ (1 << 5) | /* AK8813 RESET */ (1 << 6) | /* VIDEO DAC */ (1 << 7) | /* AK4643 */ (1 << 8) | /* IrDA */ (1 << 12) | /* USB0 */ (1 << 14)); /* RMII */ __raw_writew(fpga_out | (1 << 4), FPGA_OUT); udelay(10); /* AK8813 RESET */ __raw_writew(fpga_out | (1 << 5), FPGA_OUT); udelay(10); __raw_writew(fpga_out, FPGA_OUT); /* turn on USB clocks, use external clock */ __raw_writew((__raw_readw(PORT_MSELCRB) & ~0xc000) | 0x8000, PORT_MSELCRB); /* Let LED9 show STATUS2 */ gpio_request(GPIO_FN_STATUS2, NULL); /* Lit LED10 show STATUS0 */ gpio_request(GPIO_FN_STATUS0, NULL); /* Lit LED11 show PDSTATUS */ gpio_request(GPIO_FN_PDSTATUS, NULL); /* enable USB0 port */ __raw_writew(0x0600, 0xa40501d4); /* enable USB1 port */ __raw_writew(0x0600, 0xa4050192); /* enable IRQ 0,1,2 */ gpio_request(GPIO_FN_INTC_IRQ0, NULL); gpio_request(GPIO_FN_INTC_IRQ1, NULL); gpio_request(GPIO_FN_INTC_IRQ2, NULL); /* enable SCIFA3 */ gpio_request(GPIO_FN_SCIF3_I_SCK, NULL); gpio_request(GPIO_FN_SCIF3_I_RXD, NULL); gpio_request(GPIO_FN_SCIF3_I_TXD, NULL); gpio_request(GPIO_FN_SCIF3_I_CTS, NULL); gpio_request(GPIO_FN_SCIF3_I_RTS, NULL); /* enable LCDC */ gpio_request(GPIO_FN_LCDD23, NULL); gpio_request(GPIO_FN_LCDD22, NULL); gpio_request(GPIO_FN_LCDD21, NULL); gpio_request(GPIO_FN_LCDD20, NULL); gpio_request(GPIO_FN_LCDD19, NULL); gpio_request(GPIO_FN_LCDD18, NULL); gpio_request(GPIO_FN_LCDD17, NULL); gpio_request(GPIO_FN_LCDD16, NULL); gpio_request(GPIO_FN_LCDD15, NULL); gpio_request(GPIO_FN_LCDD14, NULL); gpio_request(GPIO_FN_LCDD13, NULL); gpio_request(GPIO_FN_LCDD12, NULL); gpio_request(GPIO_FN_LCDD11, NULL); gpio_request(GPIO_FN_LCDD10, NULL); gpio_request(GPIO_FN_LCDD9, NULL); gpio_request(GPIO_FN_LCDD8, NULL); gpio_request(GPIO_FN_LCDD7, NULL); gpio_request(GPIO_FN_LCDD6, NULL); gpio_request(GPIO_FN_LCDD5, NULL); gpio_request(GPIO_FN_LCDD4, NULL); gpio_request(GPIO_FN_LCDD3, NULL); gpio_request(GPIO_FN_LCDD2, NULL); gpio_request(GPIO_FN_LCDD1, NULL); gpio_request(GPIO_FN_LCDD0, NULL); gpio_request(GPIO_FN_LCDDISP, NULL); gpio_request(GPIO_FN_LCDHSYN, NULL); gpio_request(GPIO_FN_LCDDCK, NULL); gpio_request(GPIO_FN_LCDVSYN, NULL); gpio_request(GPIO_FN_LCDDON, NULL); gpio_request(GPIO_FN_LCDVEPWC, NULL); gpio_request(GPIO_FN_LCDVCPWC, NULL); gpio_request(GPIO_FN_LCDRD, NULL); gpio_request(GPIO_FN_LCDLCLK, NULL); __raw_writew((__raw_readw(PORT_HIZA) & ~0x0001), PORT_HIZA); /* enable CEU0 */ gpio_request(GPIO_FN_VIO0_D15, NULL); gpio_request(GPIO_FN_VIO0_D14, NULL); gpio_request(GPIO_FN_VIO0_D13, NULL); gpio_request(GPIO_FN_VIO0_D12, NULL); gpio_request(GPIO_FN_VIO0_D11, NULL); gpio_request(GPIO_FN_VIO0_D10, NULL); gpio_request(GPIO_FN_VIO0_D9, NULL); gpio_request(GPIO_FN_VIO0_D8, NULL); gpio_request(GPIO_FN_VIO0_D7, NULL); gpio_request(GPIO_FN_VIO0_D6, NULL); gpio_request(GPIO_FN_VIO0_D5, NULL); gpio_request(GPIO_FN_VIO0_D4, NULL); gpio_request(GPIO_FN_VIO0_D3, NULL); gpio_request(GPIO_FN_VIO0_D2, NULL); gpio_request(GPIO_FN_VIO0_D1, NULL); gpio_request(GPIO_FN_VIO0_D0, NULL); gpio_request(GPIO_FN_VIO0_VD, NULL); gpio_request(GPIO_FN_VIO0_CLK, NULL); gpio_request(GPIO_FN_VIO0_FLD, NULL); gpio_request(GPIO_FN_VIO0_HD, NULL); platform_resource_setup_memory(&ceu0_device, "ceu0", 4 << 20); /* enable CEU1 */ gpio_request(GPIO_FN_VIO1_D7, NULL); gpio_request(GPIO_FN_VIO1_D6, NULL); gpio_request(GPIO_FN_VIO1_D5, NULL); gpio_request(GPIO_FN_VIO1_D4, NULL); gpio_request(GPIO_FN_VIO1_D3, NULL); gpio_request(GPIO_FN_VIO1_D2, NULL); gpio_request(GPIO_FN_VIO1_D1, NULL); gpio_request(GPIO_FN_VIO1_D0, NULL); gpio_request(GPIO_FN_VIO1_FLD, NULL); gpio_request(GPIO_FN_VIO1_HD, NULL); gpio_request(GPIO_FN_VIO1_VD, NULL); gpio_request(GPIO_FN_VIO1_CLK, NULL); platform_resource_setup_memory(&ceu1_device, "ceu1", 4 << 20); /* KEYSC */ gpio_request(GPIO_FN_KEYOUT5_IN5, NULL); gpio_request(GPIO_FN_KEYOUT4_IN6, NULL); gpio_request(GPIO_FN_KEYIN4, NULL); gpio_request(GPIO_FN_KEYIN3, NULL); gpio_request(GPIO_FN_KEYIN2, NULL); gpio_request(GPIO_FN_KEYIN1, NULL); gpio_request(GPIO_FN_KEYIN0, NULL); gpio_request(GPIO_FN_KEYOUT3, NULL); gpio_request(GPIO_FN_KEYOUT2, NULL); gpio_request(GPIO_FN_KEYOUT1, NULL); gpio_request(GPIO_FN_KEYOUT0, NULL); /* enable FSI */ gpio_request(GPIO_FN_FSIMCKA, NULL); gpio_request(GPIO_FN_FSIIASD, NULL); gpio_request(GPIO_FN_FSIOASD, NULL); gpio_request(GPIO_FN_FSIIABCK, NULL); gpio_request(GPIO_FN_FSIIALRCK, NULL); gpio_request(GPIO_FN_FSIOABCK, NULL); gpio_request(GPIO_FN_FSIOALRCK, NULL); gpio_request(GPIO_FN_CLKAUDIOAO, NULL); /* set SPU2 clock to 83.4 MHz */ clk = clk_get(NULL, "spu_clk"); if (!IS_ERR(clk)) { clk_set_rate(clk, clk_round_rate(clk, 83333333)); clk_put(clk); } /* change parent of FSI A */ clk = clk_get(NULL, "fsia_clk"); if (!IS_ERR(clk)) { /* 48kHz dummy clock was used to make sure 1/1 divide */ clk_set_rate(&sh7724_fsimcka_clk, 48000); clk_set_parent(clk, &sh7724_fsimcka_clk); clk_set_rate(clk, 48000); clk_put(clk); } /* SDHI0 connected to cn7 */ gpio_request(GPIO_FN_SDHI0CD, NULL); gpio_request(GPIO_FN_SDHI0WP, NULL); gpio_request(GPIO_FN_SDHI0D3, NULL); gpio_request(GPIO_FN_SDHI0D2, NULL); gpio_request(GPIO_FN_SDHI0D1, NULL); gpio_request(GPIO_FN_SDHI0D0, NULL); gpio_request(GPIO_FN_SDHI0CMD, NULL); gpio_request(GPIO_FN_SDHI0CLK, NULL); /* SDHI1 connected to cn8 */ gpio_request(GPIO_FN_SDHI1CD, NULL); gpio_request(GPIO_FN_SDHI1WP, NULL); gpio_request(GPIO_FN_SDHI1D3, NULL); gpio_request(GPIO_FN_SDHI1D2, NULL); gpio_request(GPIO_FN_SDHI1D1, NULL); gpio_request(GPIO_FN_SDHI1D0, NULL); gpio_request(GPIO_FN_SDHI1CMD, NULL); gpio_request(GPIO_FN_SDHI1CLK, NULL); /* enable IrDA */ gpio_request(GPIO_FN_IRDA_OUT, NULL); gpio_request(GPIO_FN_IRDA_IN, NULL); /* * enable SH-Eth * * please remove J33 pin from your board !! * * ms7724 board should not use GPIO_FN_LNKSTA pin * So, This time PTX5 is set to input pin */ gpio_request(GPIO_FN_RMII_RXD0, NULL); gpio_request(GPIO_FN_RMII_RXD1, NULL); gpio_request(GPIO_FN_RMII_TXD0, NULL); gpio_request(GPIO_FN_RMII_TXD1, NULL); gpio_request(GPIO_FN_RMII_REF_CLK, NULL); gpio_request(GPIO_FN_RMII_TX_EN, NULL); gpio_request(GPIO_FN_RMII_RX_ER, NULL); gpio_request(GPIO_FN_RMII_CRS_DV, NULL); gpio_request(GPIO_FN_MDIO, NULL); gpio_request(GPIO_FN_MDC, NULL); gpio_request(GPIO_PTX5, NULL); gpio_direction_input(GPIO_PTX5); sh_eth_init(); if (sw & SW41_B) { /* 720p */ lcdc_info.ch[0].lcd_modes = lcdc_720p_modes; lcdc_info.ch[0].num_modes = ARRAY_SIZE(lcdc_720p_modes); } else { /* VGA */ lcdc_info.ch[0].lcd_modes = lcdc_vga_modes; lcdc_info.ch[0].num_modes = ARRAY_SIZE(lcdc_vga_modes); } if (sw & SW41_A) { /* Digital monitor */ lcdc_info.ch[0].interface_type = RGB18; lcdc_info.ch[0].flags = 0; } else { /* Analog monitor */ lcdc_info.ch[0].interface_type = RGB24; lcdc_info.ch[0].flags = LCDC_FLAGS_DWPOL; } /* VOU */ gpio_request(GPIO_FN_DV_D15, NULL); gpio_request(GPIO_FN_DV_D14, NULL); gpio_request(GPIO_FN_DV_D13, NULL); gpio_request(GPIO_FN_DV_D12, NULL); gpio_request(GPIO_FN_DV_D11, NULL); gpio_request(GPIO_FN_DV_D10, NULL); gpio_request(GPIO_FN_DV_D9, NULL); gpio_request(GPIO_FN_DV_D8, NULL); gpio_request(GPIO_FN_DV_CLKI, NULL); gpio_request(GPIO_FN_DV_CLK, NULL); gpio_request(GPIO_FN_DV_VSYNC, NULL); gpio_request(GPIO_FN_DV_HSYNC, NULL); return platform_add_devices(ms7724se_devices, ARRAY_SIZE(ms7724se_devices)); } device_initcall(devices_setup); static struct sh_machine_vector mv_ms7724se __initmv = { .mv_name = "ms7724se", .mv_init_irq = init_se7724_IRQ, .mv_nr_irqs = SE7724_FPGA_IRQ_BASE + SE7724_FPGA_IRQ_NR, };
gpl-2.0
Orion116/kernel_samsung_lt03wifi
drivers/power/gpio-charger.c
4928
4993
/* * Copyright (C) 2010, Lars-Peter Clausen <lars@metafoo.de> * Driver for chargers which report their online status through a GPIO pin * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <linux/device.h> #include <linux/gpio.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/power_supply.h> #include <linux/slab.h> #include <linux/power/gpio-charger.h> struct gpio_charger { const struct gpio_charger_platform_data *pdata; unsigned int irq; struct power_supply charger; }; static irqreturn_t gpio_charger_irq(int irq, void *devid) { struct power_supply *charger = devid; power_supply_changed(charger); return IRQ_HANDLED; } static inline struct gpio_charger *psy_to_gpio_charger(struct power_supply *psy) { return container_of(psy, struct gpio_charger, charger); } static int gpio_charger_get_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { struct gpio_charger *gpio_charger = psy_to_gpio_charger(psy); const struct gpio_charger_platform_data *pdata = gpio_charger->pdata; switch (psp) { case POWER_SUPPLY_PROP_ONLINE: val->intval = gpio_get_value(pdata->gpio); val->intval ^= pdata->gpio_active_low; break; default: return -EINVAL; } return 0; } static enum power_supply_property gpio_charger_properties[] = { POWER_SUPPLY_PROP_ONLINE, }; static int __devinit gpio_charger_probe(struct platform_device *pdev) { const struct gpio_charger_platform_data *pdata = pdev->dev.platform_data; struct gpio_charger *gpio_charger; struct power_supply *charger; int ret; int irq; if (!pdata) { dev_err(&pdev->dev, "No platform data\n"); return -EINVAL; } if (!gpio_is_valid(pdata->gpio)) { dev_err(&pdev->dev, "Invalid gpio pin\n"); return -EINVAL; } gpio_charger = kzalloc(sizeof(*gpio_charger), GFP_KERNEL); if (!gpio_charger) { dev_err(&pdev->dev, "Failed to alloc driver structure\n"); return -ENOMEM; } charger = &gpio_charger->charger; charger->name = pdata->name ? pdata->name : "gpio-charger"; charger->type = pdata->type; charger->properties = gpio_charger_properties; charger->num_properties = ARRAY_SIZE(gpio_charger_properties); charger->get_property = gpio_charger_get_property; charger->supplied_to = pdata->supplied_to; charger->num_supplicants = pdata->num_supplicants; ret = gpio_request(pdata->gpio, dev_name(&pdev->dev)); if (ret) { dev_err(&pdev->dev, "Failed to request gpio pin: %d\n", ret); goto err_free; } ret = gpio_direction_input(pdata->gpio); if (ret) { dev_err(&pdev->dev, "Failed to set gpio to input: %d\n", ret); goto err_gpio_free; } gpio_charger->pdata = pdata; ret = power_supply_register(&pdev->dev, charger); if (ret < 0) { dev_err(&pdev->dev, "Failed to register power supply: %d\n", ret); goto err_gpio_free; } irq = gpio_to_irq(pdata->gpio); if (irq > 0) { ret = request_any_context_irq(irq, gpio_charger_irq, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, dev_name(&pdev->dev), charger); if (ret < 0) dev_warn(&pdev->dev, "Failed to request irq: %d\n", ret); else gpio_charger->irq = irq; } platform_set_drvdata(pdev, gpio_charger); return 0; err_gpio_free: gpio_free(pdata->gpio); err_free: kfree(gpio_charger); return ret; } static int __devexit gpio_charger_remove(struct platform_device *pdev) { struct gpio_charger *gpio_charger = platform_get_drvdata(pdev); if (gpio_charger->irq) free_irq(gpio_charger->irq, &gpio_charger->charger); power_supply_unregister(&gpio_charger->charger); gpio_free(gpio_charger->pdata->gpio); platform_set_drvdata(pdev, NULL); kfree(gpio_charger); return 0; } #ifdef CONFIG_PM_SLEEP static int gpio_charger_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct gpio_charger *gpio_charger = platform_get_drvdata(pdev); power_supply_changed(&gpio_charger->charger); return 0; } #endif static SIMPLE_DEV_PM_OPS(gpio_charger_pm_ops, NULL, gpio_charger_resume); static struct platform_driver gpio_charger_driver = { .probe = gpio_charger_probe, .remove = __devexit_p(gpio_charger_remove), .driver = { .name = "gpio-charger", .owner = THIS_MODULE, .pm = &gpio_charger_pm_ops, }, }; module_platform_driver(gpio_charger_driver); MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>"); MODULE_DESCRIPTION("Driver for chargers which report their online status through a GPIO"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:gpio-charger");
gpl-2.0
VanirAOSP/kernel_htc_m7
drivers/staging/ramster/cluster/nodemanager.c
4928
25030
/* -*- mode: c; c-basic-offset: 8; -*- * vim: noexpandtab sw=8 ts=8 sts=0: * * Copyright (C) 2004, 2005, 2012 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. */ #include <linux/slab.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/configfs.h> #include "tcp.h" #include "nodemanager.h" #include "heartbeat.h" #include "masklog.h" /* for now we operate under the assertion that there can be only one * cluster active at a time. Changing this will require trickling * cluster references throughout where nodes are looked up */ struct r2nm_cluster *r2nm_single_cluster; char *r2nm_fence_method_desc[R2NM_FENCE_METHODS] = { "reset", /* R2NM_FENCE_RESET */ "panic", /* R2NM_FENCE_PANIC */ }; struct r2nm_node *r2nm_get_node_by_num(u8 node_num) { struct r2nm_node *node = NULL; if (node_num >= R2NM_MAX_NODES || r2nm_single_cluster == NULL) goto out; read_lock(&r2nm_single_cluster->cl_nodes_lock); node = r2nm_single_cluster->cl_nodes[node_num]; if (node) config_item_get(&node->nd_item); read_unlock(&r2nm_single_cluster->cl_nodes_lock); out: return node; } EXPORT_SYMBOL_GPL(r2nm_get_node_by_num); int r2nm_configured_node_map(unsigned long *map, unsigned bytes) { struct r2nm_cluster *cluster = r2nm_single_cluster; BUG_ON(bytes < (sizeof(cluster->cl_nodes_bitmap))); if (cluster == NULL) return -EINVAL; read_lock(&cluster->cl_nodes_lock); memcpy(map, cluster->cl_nodes_bitmap, sizeof(cluster->cl_nodes_bitmap)); read_unlock(&cluster->cl_nodes_lock); return 0; } EXPORT_SYMBOL_GPL(r2nm_configured_node_map); static struct r2nm_node *r2nm_node_ip_tree_lookup(struct r2nm_cluster *cluster, __be32 ip_needle, struct rb_node ***ret_p, struct rb_node **ret_parent) { struct rb_node **p = &cluster->cl_node_ip_tree.rb_node; struct rb_node *parent = NULL; struct r2nm_node *node, *ret = NULL; while (*p) { int cmp; parent = *p; node = rb_entry(parent, struct r2nm_node, nd_ip_node); cmp = memcmp(&ip_needle, &node->nd_ipv4_address, sizeof(ip_needle)); if (cmp < 0) p = &(*p)->rb_left; else if (cmp > 0) p = &(*p)->rb_right; else { ret = node; break; } } if (ret_p != NULL) *ret_p = p; if (ret_parent != NULL) *ret_parent = parent; return ret; } struct r2nm_node *r2nm_get_node_by_ip(__be32 addr) { struct r2nm_node *node = NULL; struct r2nm_cluster *cluster = r2nm_single_cluster; if (cluster == NULL) goto out; read_lock(&cluster->cl_nodes_lock); node = r2nm_node_ip_tree_lookup(cluster, addr, NULL, NULL); if (node) config_item_get(&node->nd_item); read_unlock(&cluster->cl_nodes_lock); out: return node; } EXPORT_SYMBOL_GPL(r2nm_get_node_by_ip); void r2nm_node_put(struct r2nm_node *node) { config_item_put(&node->nd_item); } EXPORT_SYMBOL_GPL(r2nm_node_put); void r2nm_node_get(struct r2nm_node *node) { config_item_get(&node->nd_item); } EXPORT_SYMBOL_GPL(r2nm_node_get); u8 r2nm_this_node(void) { u8 node_num = R2NM_MAX_NODES; if (r2nm_single_cluster && r2nm_single_cluster->cl_has_local) node_num = r2nm_single_cluster->cl_local_node; return node_num; } EXPORT_SYMBOL_GPL(r2nm_this_node); /* node configfs bits */ static struct r2nm_cluster *to_r2nm_cluster(struct config_item *item) { return item ? container_of(to_config_group(item), struct r2nm_cluster, cl_group) : NULL; } static struct r2nm_node *to_r2nm_node(struct config_item *item) { return item ? container_of(item, struct r2nm_node, nd_item) : NULL; } static void r2nm_node_release(struct config_item *item) { struct r2nm_node *node = to_r2nm_node(item); kfree(node); } static ssize_t r2nm_node_num_read(struct r2nm_node *node, char *page) { return sprintf(page, "%d\n", node->nd_num); } static struct r2nm_cluster *to_r2nm_cluster_from_node(struct r2nm_node *node) { /* through the first node_set .parent * mycluster/nodes/mynode == r2nm_cluster->r2nm_node_group->r2nm_node */ return to_r2nm_cluster(node->nd_item.ci_parent->ci_parent); } enum { R2NM_NODE_ATTR_NUM = 0, R2NM_NODE_ATTR_PORT, R2NM_NODE_ATTR_ADDRESS, R2NM_NODE_ATTR_LOCAL, }; static ssize_t r2nm_node_num_write(struct r2nm_node *node, const char *page, size_t count) { struct r2nm_cluster *cluster = to_r2nm_cluster_from_node(node); unsigned long tmp; char *p = (char *)page; int err; err = kstrtoul(p, 10, &tmp); if (err) return err; if (tmp >= R2NM_MAX_NODES) return -ERANGE; /* once we're in the cl_nodes tree networking can look us up by * node number and try to use our address and port attributes * to connect to this node.. make sure that they've been set * before writing the node attribute? */ if (!test_bit(R2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes) || !test_bit(R2NM_NODE_ATTR_PORT, &node->nd_set_attributes)) return -EINVAL; /* XXX */ write_lock(&cluster->cl_nodes_lock); if (cluster->cl_nodes[tmp]) p = NULL; else { cluster->cl_nodes[tmp] = node; node->nd_num = tmp; set_bit(tmp, cluster->cl_nodes_bitmap); } write_unlock(&cluster->cl_nodes_lock); if (p == NULL) return -EEXIST; return count; } static ssize_t r2nm_node_ipv4_port_read(struct r2nm_node *node, char *page) { return sprintf(page, "%u\n", ntohs(node->nd_ipv4_port)); } static ssize_t r2nm_node_ipv4_port_write(struct r2nm_node *node, const char *page, size_t count) { unsigned long tmp; char *p = (char *)page; int err; err = kstrtoul(p, 10, &tmp); if (err) return err; if (tmp == 0) return -EINVAL; if (tmp >= (u16)-1) return -ERANGE; node->nd_ipv4_port = htons(tmp); return count; } static ssize_t r2nm_node_ipv4_address_read(struct r2nm_node *node, char *page) { return sprintf(page, "%pI4\n", &node->nd_ipv4_address); } static ssize_t r2nm_node_ipv4_address_write(struct r2nm_node *node, const char *page, size_t count) { struct r2nm_cluster *cluster = to_r2nm_cluster_from_node(node); int ret, i; struct rb_node **p, *parent; unsigned int octets[4]; __be32 ipv4_addr = 0; ret = sscanf(page, "%3u.%3u.%3u.%3u", &octets[3], &octets[2], &octets[1], &octets[0]); if (ret != 4) return -EINVAL; for (i = 0; i < ARRAY_SIZE(octets); i++) { if (octets[i] > 255) return -ERANGE; be32_add_cpu(&ipv4_addr, octets[i] << (i * 8)); } ret = 0; write_lock(&cluster->cl_nodes_lock); if (r2nm_node_ip_tree_lookup(cluster, ipv4_addr, &p, &parent)) ret = -EEXIST; else { rb_link_node(&node->nd_ip_node, parent, p); rb_insert_color(&node->nd_ip_node, &cluster->cl_node_ip_tree); } write_unlock(&cluster->cl_nodes_lock); if (ret) return ret; memcpy(&node->nd_ipv4_address, &ipv4_addr, sizeof(ipv4_addr)); return count; } static ssize_t r2nm_node_local_read(struct r2nm_node *node, char *page) { return sprintf(page, "%d\n", node->nd_local); } static ssize_t r2nm_node_local_write(struct r2nm_node *node, const char *page, size_t count) { struct r2nm_cluster *cluster = to_r2nm_cluster_from_node(node); unsigned long tmp; char *p = (char *)page; ssize_t ret; int err; err = kstrtoul(p, 10, &tmp); if (err) return err; tmp = !!tmp; /* boolean of whether this node wants to be local */ /* setting local turns on networking rx for now so we require having * set everything else first */ if (!test_bit(R2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes) || !test_bit(R2NM_NODE_ATTR_NUM, &node->nd_set_attributes) || !test_bit(R2NM_NODE_ATTR_PORT, &node->nd_set_attributes)) return -EINVAL; /* XXX */ /* the only failure case is trying to set a new local node * when a different one is already set */ if (tmp && tmp == cluster->cl_has_local && cluster->cl_local_node != node->nd_num) return -EBUSY; /* bring up the rx thread if we're setting the new local node. */ if (tmp && !cluster->cl_has_local) { ret = r2net_start_listening(node); if (ret) return ret; } if (!tmp && cluster->cl_has_local && cluster->cl_local_node == node->nd_num) { r2net_stop_listening(node); cluster->cl_local_node = R2NM_INVALID_NODE_NUM; } node->nd_local = tmp; if (node->nd_local) { cluster->cl_has_local = tmp; cluster->cl_local_node = node->nd_num; } return count; } struct r2nm_node_attribute { struct configfs_attribute attr; ssize_t (*show)(struct r2nm_node *, char *); ssize_t (*store)(struct r2nm_node *, const char *, size_t); }; static struct r2nm_node_attribute r2nm_node_attr_num = { .attr = { .ca_owner = THIS_MODULE, .ca_name = "num", .ca_mode = S_IRUGO | S_IWUSR }, .show = r2nm_node_num_read, .store = r2nm_node_num_write, }; static struct r2nm_node_attribute r2nm_node_attr_ipv4_port = { .attr = { .ca_owner = THIS_MODULE, .ca_name = "ipv4_port", .ca_mode = S_IRUGO | S_IWUSR }, .show = r2nm_node_ipv4_port_read, .store = r2nm_node_ipv4_port_write, }; static struct r2nm_node_attribute r2nm_node_attr_ipv4_address = { .attr = { .ca_owner = THIS_MODULE, .ca_name = "ipv4_address", .ca_mode = S_IRUGO | S_IWUSR }, .show = r2nm_node_ipv4_address_read, .store = r2nm_node_ipv4_address_write, }; static struct r2nm_node_attribute r2nm_node_attr_local = { .attr = { .ca_owner = THIS_MODULE, .ca_name = "local", .ca_mode = S_IRUGO | S_IWUSR }, .show = r2nm_node_local_read, .store = r2nm_node_local_write, }; static struct configfs_attribute *r2nm_node_attrs[] = { [R2NM_NODE_ATTR_NUM] = &r2nm_node_attr_num.attr, [R2NM_NODE_ATTR_PORT] = &r2nm_node_attr_ipv4_port.attr, [R2NM_NODE_ATTR_ADDRESS] = &r2nm_node_attr_ipv4_address.attr, [R2NM_NODE_ATTR_LOCAL] = &r2nm_node_attr_local.attr, NULL, }; static int r2nm_attr_index(struct configfs_attribute *attr) { int i; for (i = 0; i < ARRAY_SIZE(r2nm_node_attrs); i++) { if (attr == r2nm_node_attrs[i]) return i; } BUG(); return 0; } static ssize_t r2nm_node_show(struct config_item *item, struct configfs_attribute *attr, char *page) { struct r2nm_node *node = to_r2nm_node(item); struct r2nm_node_attribute *r2nm_node_attr = container_of(attr, struct r2nm_node_attribute, attr); ssize_t ret = 0; if (r2nm_node_attr->show) ret = r2nm_node_attr->show(node, page); return ret; } static ssize_t r2nm_node_store(struct config_item *item, struct configfs_attribute *attr, const char *page, size_t count) { struct r2nm_node *node = to_r2nm_node(item); struct r2nm_node_attribute *r2nm_node_attr = container_of(attr, struct r2nm_node_attribute, attr); ssize_t ret; int attr_index = r2nm_attr_index(attr); if (r2nm_node_attr->store == NULL) { ret = -EINVAL; goto out; } if (test_bit(attr_index, &node->nd_set_attributes)) return -EBUSY; ret = r2nm_node_attr->store(node, page, count); if (ret < count) goto out; set_bit(attr_index, &node->nd_set_attributes); out: return ret; } static struct configfs_item_operations r2nm_node_item_ops = { .release = r2nm_node_release, .show_attribute = r2nm_node_show, .store_attribute = r2nm_node_store, }; static struct config_item_type r2nm_node_type = { .ct_item_ops = &r2nm_node_item_ops, .ct_attrs = r2nm_node_attrs, .ct_owner = THIS_MODULE, }; /* node set */ struct r2nm_node_group { struct config_group ns_group; /* some stuff? */ }; #if 0 static struct r2nm_node_group *to_r2nm_node_group(struct config_group *group) { return group ? container_of(group, struct r2nm_node_group, ns_group) : NULL; } #endif struct r2nm_cluster_attribute { struct configfs_attribute attr; ssize_t (*show)(struct r2nm_cluster *, char *); ssize_t (*store)(struct r2nm_cluster *, const char *, size_t); }; static ssize_t r2nm_cluster_attr_write(const char *page, ssize_t count, unsigned int *val) { unsigned long tmp; char *p = (char *)page; int err; err = kstrtoul(p, 10, &tmp); if (err) return err; if (tmp == 0) return -EINVAL; if (tmp >= (u32)-1) return -ERANGE; *val = tmp; return count; } static ssize_t r2nm_cluster_attr_idle_timeout_ms_read( struct r2nm_cluster *cluster, char *page) { return sprintf(page, "%u\n", cluster->cl_idle_timeout_ms); } static ssize_t r2nm_cluster_attr_idle_timeout_ms_write( struct r2nm_cluster *cluster, const char *page, size_t count) { ssize_t ret; unsigned int val = 0; ret = r2nm_cluster_attr_write(page, count, &val); if (ret > 0) { if (cluster->cl_idle_timeout_ms != val && r2net_num_connected_peers()) { mlog(ML_NOTICE, "r2net: cannot change idle timeout after " "the first peer has agreed to it." " %d connected peers\n", r2net_num_connected_peers()); ret = -EINVAL; } else if (val <= cluster->cl_keepalive_delay_ms) { mlog(ML_NOTICE, "r2net: idle timeout must be larger " "than keepalive delay\n"); ret = -EINVAL; } else { cluster->cl_idle_timeout_ms = val; } } return ret; } static ssize_t r2nm_cluster_attr_keepalive_delay_ms_read( struct r2nm_cluster *cluster, char *page) { return sprintf(page, "%u\n", cluster->cl_keepalive_delay_ms); } static ssize_t r2nm_cluster_attr_keepalive_delay_ms_write( struct r2nm_cluster *cluster, const char *page, size_t count) { ssize_t ret; unsigned int val = 0; ret = r2nm_cluster_attr_write(page, count, &val); if (ret > 0) { if (cluster->cl_keepalive_delay_ms != val && r2net_num_connected_peers()) { mlog(ML_NOTICE, "r2net: cannot change keepalive delay after" " the first peer has agreed to it." " %d connected peers\n", r2net_num_connected_peers()); ret = -EINVAL; } else if (val >= cluster->cl_idle_timeout_ms) { mlog(ML_NOTICE, "r2net: keepalive delay must be " "smaller than idle timeout\n"); ret = -EINVAL; } else { cluster->cl_keepalive_delay_ms = val; } } return ret; } static ssize_t r2nm_cluster_attr_reconnect_delay_ms_read( struct r2nm_cluster *cluster, char *page) { return sprintf(page, "%u\n", cluster->cl_reconnect_delay_ms); } static ssize_t r2nm_cluster_attr_reconnect_delay_ms_write( struct r2nm_cluster *cluster, const char *page, size_t count) { return r2nm_cluster_attr_write(page, count, &cluster->cl_reconnect_delay_ms); } static ssize_t r2nm_cluster_attr_fence_method_read( struct r2nm_cluster *cluster, char *page) { ssize_t ret = 0; if (cluster) ret = sprintf(page, "%s\n", r2nm_fence_method_desc[cluster->cl_fence_method]); return ret; } static ssize_t r2nm_cluster_attr_fence_method_write( struct r2nm_cluster *cluster, const char *page, size_t count) { unsigned int i; if (page[count - 1] != '\n') goto bail; for (i = 0; i < R2NM_FENCE_METHODS; ++i) { if (count != strlen(r2nm_fence_method_desc[i]) + 1) continue; if (strncasecmp(page, r2nm_fence_method_desc[i], count - 1)) continue; if (cluster->cl_fence_method != i) { printk(KERN_INFO "ramster: Changing fence method to %s\n", r2nm_fence_method_desc[i]); cluster->cl_fence_method = i; } return count; } bail: return -EINVAL; } static struct r2nm_cluster_attribute r2nm_cluster_attr_idle_timeout_ms = { .attr = { .ca_owner = THIS_MODULE, .ca_name = "idle_timeout_ms", .ca_mode = S_IRUGO | S_IWUSR }, .show = r2nm_cluster_attr_idle_timeout_ms_read, .store = r2nm_cluster_attr_idle_timeout_ms_write, }; static struct r2nm_cluster_attribute r2nm_cluster_attr_keepalive_delay_ms = { .attr = { .ca_owner = THIS_MODULE, .ca_name = "keepalive_delay_ms", .ca_mode = S_IRUGO | S_IWUSR }, .show = r2nm_cluster_attr_keepalive_delay_ms_read, .store = r2nm_cluster_attr_keepalive_delay_ms_write, }; static struct r2nm_cluster_attribute r2nm_cluster_attr_reconnect_delay_ms = { .attr = { .ca_owner = THIS_MODULE, .ca_name = "reconnect_delay_ms", .ca_mode = S_IRUGO | S_IWUSR }, .show = r2nm_cluster_attr_reconnect_delay_ms_read, .store = r2nm_cluster_attr_reconnect_delay_ms_write, }; static struct r2nm_cluster_attribute r2nm_cluster_attr_fence_method = { .attr = { .ca_owner = THIS_MODULE, .ca_name = "fence_method", .ca_mode = S_IRUGO | S_IWUSR }, .show = r2nm_cluster_attr_fence_method_read, .store = r2nm_cluster_attr_fence_method_write, }; static struct configfs_attribute *r2nm_cluster_attrs[] = { &r2nm_cluster_attr_idle_timeout_ms.attr, &r2nm_cluster_attr_keepalive_delay_ms.attr, &r2nm_cluster_attr_reconnect_delay_ms.attr, &r2nm_cluster_attr_fence_method.attr, NULL, }; static ssize_t r2nm_cluster_show(struct config_item *item, struct configfs_attribute *attr, char *page) { struct r2nm_cluster *cluster = to_r2nm_cluster(item); struct r2nm_cluster_attribute *r2nm_cluster_attr = container_of(attr, struct r2nm_cluster_attribute, attr); ssize_t ret = 0; if (r2nm_cluster_attr->show) ret = r2nm_cluster_attr->show(cluster, page); return ret; } static ssize_t r2nm_cluster_store(struct config_item *item, struct configfs_attribute *attr, const char *page, size_t count) { struct r2nm_cluster *cluster = to_r2nm_cluster(item); struct r2nm_cluster_attribute *r2nm_cluster_attr = container_of(attr, struct r2nm_cluster_attribute, attr); ssize_t ret; if (r2nm_cluster_attr->store == NULL) { ret = -EINVAL; goto out; } ret = r2nm_cluster_attr->store(cluster, page, count); if (ret < count) goto out; out: return ret; } static struct config_item *r2nm_node_group_make_item(struct config_group *group, const char *name) { struct r2nm_node *node = NULL; if (strlen(name) > R2NM_MAX_NAME_LEN) return ERR_PTR(-ENAMETOOLONG); node = kzalloc(sizeof(struct r2nm_node), GFP_KERNEL); if (node == NULL) return ERR_PTR(-ENOMEM); strcpy(node->nd_name, name); /* use item.ci_namebuf instead? */ config_item_init_type_name(&node->nd_item, name, &r2nm_node_type); spin_lock_init(&node->nd_lock); mlog(ML_CLUSTER, "r2nm: Registering node %s\n", name); return &node->nd_item; } static void r2nm_node_group_drop_item(struct config_group *group, struct config_item *item) { struct r2nm_node *node = to_r2nm_node(item); struct r2nm_cluster *cluster = to_r2nm_cluster(group->cg_item.ci_parent); r2net_disconnect_node(node); if (cluster->cl_has_local && (cluster->cl_local_node == node->nd_num)) { cluster->cl_has_local = 0; cluster->cl_local_node = R2NM_INVALID_NODE_NUM; r2net_stop_listening(node); } /* XXX call into net to stop this node from trading messages */ write_lock(&cluster->cl_nodes_lock); /* XXX sloppy */ if (node->nd_ipv4_address) rb_erase(&node->nd_ip_node, &cluster->cl_node_ip_tree); /* nd_num might be 0 if the node number hasn't been set.. */ if (cluster->cl_nodes[node->nd_num] == node) { cluster->cl_nodes[node->nd_num] = NULL; clear_bit(node->nd_num, cluster->cl_nodes_bitmap); } write_unlock(&cluster->cl_nodes_lock); mlog(ML_CLUSTER, "r2nm: Unregistered node %s\n", config_item_name(&node->nd_item)); config_item_put(item); } static struct configfs_group_operations r2nm_node_group_group_ops = { .make_item = r2nm_node_group_make_item, .drop_item = r2nm_node_group_drop_item, }; static struct config_item_type r2nm_node_group_type = { .ct_group_ops = &r2nm_node_group_group_ops, .ct_owner = THIS_MODULE, }; /* cluster */ static void r2nm_cluster_release(struct config_item *item) { struct r2nm_cluster *cluster = to_r2nm_cluster(item); kfree(cluster->cl_group.default_groups); kfree(cluster); } static struct configfs_item_operations r2nm_cluster_item_ops = { .release = r2nm_cluster_release, .show_attribute = r2nm_cluster_show, .store_attribute = r2nm_cluster_store, }; static struct config_item_type r2nm_cluster_type = { .ct_item_ops = &r2nm_cluster_item_ops, .ct_attrs = r2nm_cluster_attrs, .ct_owner = THIS_MODULE, }; /* cluster set */ struct r2nm_cluster_group { struct configfs_subsystem cs_subsys; /* some stuff? */ }; #if 0 static struct r2nm_cluster_group * to_r2nm_cluster_group(struct config_group *group) { return group ? container_of(to_configfs_subsystem(group), struct r2nm_cluster_group, cs_subsys) : NULL; } #endif static struct config_group * r2nm_cluster_group_make_group(struct config_group *group, const char *name) { struct r2nm_cluster *cluster = NULL; struct r2nm_node_group *ns = NULL; struct config_group *r2hb_group = NULL, *ret = NULL; void *defs = NULL; /* this runs under the parent dir's i_mutex; there can be only * one caller in here at a time */ if (r2nm_single_cluster) return ERR_PTR(-ENOSPC); cluster = kzalloc(sizeof(struct r2nm_cluster), GFP_KERNEL); ns = kzalloc(sizeof(struct r2nm_node_group), GFP_KERNEL); defs = kcalloc(3, sizeof(struct config_group *), GFP_KERNEL); r2hb_group = r2hb_alloc_hb_set(); if (cluster == NULL || ns == NULL || r2hb_group == NULL || defs == NULL) goto out; config_group_init_type_name(&cluster->cl_group, name, &r2nm_cluster_type); config_group_init_type_name(&ns->ns_group, "node", &r2nm_node_group_type); cluster->cl_group.default_groups = defs; cluster->cl_group.default_groups[0] = &ns->ns_group; cluster->cl_group.default_groups[1] = r2hb_group; cluster->cl_group.default_groups[2] = NULL; rwlock_init(&cluster->cl_nodes_lock); cluster->cl_node_ip_tree = RB_ROOT; cluster->cl_reconnect_delay_ms = R2NET_RECONNECT_DELAY_MS_DEFAULT; cluster->cl_idle_timeout_ms = R2NET_IDLE_TIMEOUT_MS_DEFAULT; cluster->cl_keepalive_delay_ms = R2NET_KEEPALIVE_DELAY_MS_DEFAULT; cluster->cl_fence_method = R2NM_FENCE_RESET; ret = &cluster->cl_group; r2nm_single_cluster = cluster; out: if (ret == NULL) { kfree(cluster); kfree(ns); r2hb_free_hb_set(r2hb_group); kfree(defs); ret = ERR_PTR(-ENOMEM); } return ret; } static void r2nm_cluster_group_drop_item(struct config_group *group, struct config_item *item) { struct r2nm_cluster *cluster = to_r2nm_cluster(item); int i; struct config_item *killme; BUG_ON(r2nm_single_cluster != cluster); r2nm_single_cluster = NULL; for (i = 0; cluster->cl_group.default_groups[i]; i++) { killme = &cluster->cl_group.default_groups[i]->cg_item; cluster->cl_group.default_groups[i] = NULL; config_item_put(killme); } config_item_put(item); } static struct configfs_group_operations r2nm_cluster_group_group_ops = { .make_group = r2nm_cluster_group_make_group, .drop_item = r2nm_cluster_group_drop_item, }; static struct config_item_type r2nm_cluster_group_type = { .ct_group_ops = &r2nm_cluster_group_group_ops, .ct_owner = THIS_MODULE, }; static struct r2nm_cluster_group r2nm_cluster_group = { .cs_subsys = { .su_group = { .cg_item = { .ci_namebuf = "cluster", .ci_type = &r2nm_cluster_group_type, }, }, }, }; int r2nm_depend_item(struct config_item *item) { return configfs_depend_item(&r2nm_cluster_group.cs_subsys, item); } void r2nm_undepend_item(struct config_item *item) { configfs_undepend_item(&r2nm_cluster_group.cs_subsys, item); } int r2nm_depend_this_node(void) { int ret = 0; struct r2nm_node *local_node; local_node = r2nm_get_node_by_num(r2nm_this_node()); if (!local_node) { ret = -EINVAL; goto out; } ret = r2nm_depend_item(&local_node->nd_item); r2nm_node_put(local_node); out: return ret; } void r2nm_undepend_this_node(void) { struct r2nm_node *local_node; local_node = r2nm_get_node_by_num(r2nm_this_node()); BUG_ON(!local_node); r2nm_undepend_item(&local_node->nd_item); r2nm_node_put(local_node); } static void __exit exit_r2nm(void) { /* XXX sync with hb callbacks and shut down hb? */ r2net_unregister_hb_callbacks(); configfs_unregister_subsystem(&r2nm_cluster_group.cs_subsys); r2net_exit(); r2hb_exit(); } static int __init init_r2nm(void) { int ret = -1; ret = r2hb_init(); if (ret) goto out; ret = r2net_init(); if (ret) goto out_r2hb; ret = r2net_register_hb_callbacks(); if (ret) goto out_r2net; config_group_init(&r2nm_cluster_group.cs_subsys.su_group); mutex_init(&r2nm_cluster_group.cs_subsys.su_mutex); ret = configfs_register_subsystem(&r2nm_cluster_group.cs_subsys); if (ret) { printk(KERN_ERR "nodemanager: Registration returned %d\n", ret); goto out_callbacks; } if (!ret) goto out; configfs_unregister_subsystem(&r2nm_cluster_group.cs_subsys); out_callbacks: r2net_unregister_hb_callbacks(); out_r2net: r2net_exit(); out_r2hb: r2hb_exit(); out: return ret; } MODULE_AUTHOR("Oracle"); MODULE_LICENSE("GPL"); module_init(init_r2nm) module_exit(exit_r2nm)
gpl-2.0
Renzo-Olivares/android_kernel_htc_m7wlv
drivers/net/wireless/ath/carl9170/debug.c
5184
24159
/* * Atheros CARL9170 driver * * debug(fs) probing * * Copyright 2008, Johannes Berg <johannes@sipsolutions.net> * Copyright 2009, 2010, Christian Lamparter <chunkeey@googlemail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; see the file COPYING. If not, see * http://www.gnu.org/licenses/. * * This file incorporates work covered by the following copyright and * permission notice: * Copyright (c) 2008-2009 Atheros Communications, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <linux/init.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/seq_file.h> #include <linux/vmalloc.h> #include "carl9170.h" #include "cmd.h" #define ADD(buf, off, max, fmt, args...) \ off += snprintf(&buf[off], max - off, fmt, ##args); struct carl9170_debugfs_fops { unsigned int read_bufsize; umode_t attr; char *(*read)(struct ar9170 *ar, char *buf, size_t bufsize, ssize_t *len); ssize_t (*write)(struct ar9170 *aru, const char *buf, size_t size); const struct file_operations fops; enum carl9170_device_state req_dev_state; }; static ssize_t carl9170_debugfs_read(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { struct carl9170_debugfs_fops *dfops; struct ar9170 *ar; char *buf = NULL, *res_buf = NULL; ssize_t ret = 0; int err = 0; if (!count) return 0; ar = file->private_data; if (!ar) return -ENODEV; dfops = container_of(file->f_op, struct carl9170_debugfs_fops, fops); if (!dfops->read) return -ENOSYS; if (dfops->read_bufsize) { buf = vmalloc(dfops->read_bufsize); if (!buf) return -ENOMEM; } mutex_lock(&ar->mutex); if (!CHK_DEV_STATE(ar, dfops->req_dev_state)) { err = -ENODEV; res_buf = buf; goto out_free; } res_buf = dfops->read(ar, buf, dfops->read_bufsize, &ret); if (ret > 0) err = simple_read_from_buffer(userbuf, count, ppos, res_buf, ret); else err = ret; WARN_ON_ONCE(dfops->read_bufsize && (res_buf != buf)); out_free: vfree(res_buf); mutex_unlock(&ar->mutex); return err; } static ssize_t carl9170_debugfs_write(struct file *file, const char __user *userbuf, size_t count, loff_t *ppos) { struct carl9170_debugfs_fops *dfops; struct ar9170 *ar; char *buf = NULL; int err = 0; if (!count) return 0; if (count > PAGE_SIZE) return -E2BIG; ar = file->private_data; if (!ar) return -ENODEV; dfops = container_of(file->f_op, struct carl9170_debugfs_fops, fops); if (!dfops->write) return -ENOSYS; buf = vmalloc(count); if (!buf) return -ENOMEM; if (copy_from_user(buf, userbuf, count)) { err = -EFAULT; goto out_free; } if (mutex_trylock(&ar->mutex) == 0) { err = -EAGAIN; goto out_free; } if (!CHK_DEV_STATE(ar, dfops->req_dev_state)) { err = -ENODEV; goto out_unlock; } err = dfops->write(ar, buf, count); if (err) goto out_unlock; out_unlock: mutex_unlock(&ar->mutex); out_free: vfree(buf); return err; } #define __DEBUGFS_DECLARE_FILE(name, _read, _write, _read_bufsize, \ _attr, _dstate) \ static const struct carl9170_debugfs_fops carl_debugfs_##name ##_ops = {\ .read_bufsize = _read_bufsize, \ .read = _read, \ .write = _write, \ .attr = _attr, \ .req_dev_state = _dstate, \ .fops = { \ .open = simple_open, \ .read = carl9170_debugfs_read, \ .write = carl9170_debugfs_write, \ .owner = THIS_MODULE \ }, \ } #define DEBUGFS_DECLARE_FILE(name, _read, _write, _read_bufsize, _attr) \ __DEBUGFS_DECLARE_FILE(name, _read, _write, _read_bufsize, \ _attr, CARL9170_STARTED) \ #define DEBUGFS_DECLARE_RO_FILE(name, _read_bufsize) \ DEBUGFS_DECLARE_FILE(name, carl9170_debugfs_##name ##_read, \ NULL, _read_bufsize, S_IRUSR) #define DEBUGFS_DECLARE_WO_FILE(name) \ DEBUGFS_DECLARE_FILE(name, NULL, carl9170_debugfs_##name ##_write,\ 0, S_IWUSR) #define DEBUGFS_DECLARE_RW_FILE(name, _read_bufsize) \ DEBUGFS_DECLARE_FILE(name, carl9170_debugfs_##name ##_read, \ carl9170_debugfs_##name ##_write, \ _read_bufsize, S_IRUSR | S_IWUSR) #define __DEBUGFS_DECLARE_RW_FILE(name, _read_bufsize, _dstate) \ __DEBUGFS_DECLARE_FILE(name, carl9170_debugfs_##name ##_read, \ carl9170_debugfs_##name ##_write, \ _read_bufsize, S_IRUSR | S_IWUSR, _dstate) #define DEBUGFS_READONLY_FILE(name, _read_bufsize, fmt, value...) \ static char *carl9170_debugfs_ ##name ## _read(struct ar9170 *ar, \ char *buf, size_t buf_size,\ ssize_t *len) \ { \ ADD(buf, *len, buf_size, fmt "\n", ##value); \ return buf; \ } \ DEBUGFS_DECLARE_RO_FILE(name, _read_bufsize) static char *carl9170_debugfs_mem_usage_read(struct ar9170 *ar, char *buf, size_t bufsize, ssize_t *len) { ADD(buf, *len, bufsize, "jar: ["); spin_lock_bh(&ar->mem_lock); *len += bitmap_scnprintf(&buf[*len], bufsize - *len, ar->mem_bitmap, ar->fw.mem_blocks); ADD(buf, *len, bufsize, "]\n"); ADD(buf, *len, bufsize, "cookies: used:%3d / total:%3d, allocs:%d\n", bitmap_weight(ar->mem_bitmap, ar->fw.mem_blocks), ar->fw.mem_blocks, atomic_read(&ar->mem_allocs)); ADD(buf, *len, bufsize, "memory: free:%3d (%3d KiB) / total:%3d KiB)\n", atomic_read(&ar->mem_free_blocks), (atomic_read(&ar->mem_free_blocks) * ar->fw.mem_block_size) / 1024, (ar->fw.mem_blocks * ar->fw.mem_block_size) / 1024); spin_unlock_bh(&ar->mem_lock); return buf; } DEBUGFS_DECLARE_RO_FILE(mem_usage, 512); static char *carl9170_debugfs_qos_stat_read(struct ar9170 *ar, char *buf, size_t bufsize, ssize_t *len) { ADD(buf, *len, bufsize, "%s QoS AC\n", modparam_noht ? "Hardware" : "Software"); ADD(buf, *len, bufsize, "[ VO VI " " BE BK ]\n"); spin_lock_bh(&ar->tx_stats_lock); ADD(buf, *len, bufsize, "[length/limit length/limit " "length/limit length/limit ]\n" "[ %3d/%3d %3d/%3d " " %3d/%3d %3d/%3d ]\n\n", ar->tx_stats[0].len, ar->tx_stats[0].limit, ar->tx_stats[1].len, ar->tx_stats[1].limit, ar->tx_stats[2].len, ar->tx_stats[2].limit, ar->tx_stats[3].len, ar->tx_stats[3].limit); ADD(buf, *len, bufsize, "[ total total " " total total ]\n" "[%10d %10d %10d %10d ]\n\n", ar->tx_stats[0].count, ar->tx_stats[1].count, ar->tx_stats[2].count, ar->tx_stats[3].count); spin_unlock_bh(&ar->tx_stats_lock); ADD(buf, *len, bufsize, "[ pend/waittx pend/waittx " " pend/waittx pend/waittx]\n" "[ %3d/%3d %3d/%3d " " %3d/%3d %3d/%3d ]\n\n", skb_queue_len(&ar->tx_pending[0]), skb_queue_len(&ar->tx_status[0]), skb_queue_len(&ar->tx_pending[1]), skb_queue_len(&ar->tx_status[1]), skb_queue_len(&ar->tx_pending[2]), skb_queue_len(&ar->tx_status[2]), skb_queue_len(&ar->tx_pending[3]), skb_queue_len(&ar->tx_status[3])); return buf; } DEBUGFS_DECLARE_RO_FILE(qos_stat, 512); static void carl9170_debugfs_format_frame(struct ar9170 *ar, struct sk_buff *skb, const char *prefix, char *buf, ssize_t *off, ssize_t bufsize) { struct _carl9170_tx_superframe *txc = (void *) skb->data; struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb); struct carl9170_tx_info *arinfo = (void *) txinfo->rate_driver_data; struct ieee80211_hdr *hdr = (void *) txc->frame_data; ADD(buf, *off, bufsize, "%s %p, c:%2x, DA:%pM, sq:%4d, mc:%.4x, " "pc:%.8x, to:%d ms\n", prefix, skb, txc->s.cookie, ieee80211_get_DA(hdr), get_seq_h(hdr), le16_to_cpu(txc->f.mac_control), le32_to_cpu(txc->f.phy_control), jiffies_to_msecs(jiffies - arinfo->timeout)); } static char *carl9170_debugfs_ampdu_state_read(struct ar9170 *ar, char *buf, size_t bufsize, ssize_t *len) { struct carl9170_sta_tid *iter; struct sk_buff *skb; int cnt = 0, fc; int offset; rcu_read_lock(); list_for_each_entry_rcu(iter, &ar->tx_ampdu_list, list) { spin_lock_bh(&iter->lock); ADD(buf, *len, bufsize, "Entry: #%2d TID:%1d, BSN:%4d, " "SNX:%4d, HSN:%4d, BAW:%2d, state:%1d, toggles:%d\n", cnt, iter->tid, iter->bsn, iter->snx, iter->hsn, iter->max, iter->state, iter->counter); ADD(buf, *len, bufsize, "\tWindow: ["); *len += bitmap_scnprintf(&buf[*len], bufsize - *len, iter->bitmap, CARL9170_BAW_BITS); #define BM_STR_OFF(offset) \ ((CARL9170_BAW_BITS - (offset) - 1) / 4 + \ (CARL9170_BAW_BITS - (offset) - 1) / 32 + 1) ADD(buf, *len, bufsize, ",W]\n"); offset = BM_STR_OFF(0); ADD(buf, *len, bufsize, "\tBase Seq: %*s\n", offset, "T"); offset = BM_STR_OFF(SEQ_DIFF(iter->snx, iter->bsn)); ADD(buf, *len, bufsize, "\tNext Seq: %*s\n", offset, "W"); offset = BM_STR_OFF(((int)iter->hsn - (int)iter->bsn) % CARL9170_BAW_BITS); ADD(buf, *len, bufsize, "\tLast Seq: %*s\n", offset, "N"); ADD(buf, *len, bufsize, "\tPre-Aggregation reorder buffer: " " currently queued:%d\n", skb_queue_len(&iter->queue)); fc = 0; skb_queue_walk(&iter->queue, skb) { char prefix[32]; snprintf(prefix, sizeof(prefix), "\t\t%3d :", fc); carl9170_debugfs_format_frame(ar, skb, prefix, buf, len, bufsize); fc++; } spin_unlock_bh(&iter->lock); cnt++; } rcu_read_unlock(); return buf; } DEBUGFS_DECLARE_RO_FILE(ampdu_state, 8000); static void carl9170_debugfs_queue_dump(struct ar9170 *ar, char *buf, ssize_t *len, size_t bufsize, struct sk_buff_head *queue) { struct sk_buff *skb; char prefix[16]; int fc = 0; spin_lock_bh(&queue->lock); skb_queue_walk(queue, skb) { snprintf(prefix, sizeof(prefix), "%3d :", fc); carl9170_debugfs_format_frame(ar, skb, prefix, buf, len, bufsize); fc++; } spin_unlock_bh(&queue->lock); } #define DEBUGFS_QUEUE_DUMP(q, qi) \ static char *carl9170_debugfs_##q ##_##qi ##_read(struct ar9170 *ar, \ char *buf, size_t bufsize, ssize_t *len) \ { \ carl9170_debugfs_queue_dump(ar, buf, len, bufsize, &ar->q[qi]); \ return buf; \ } \ DEBUGFS_DECLARE_RO_FILE(q##_##qi, 8000); static char *carl9170_debugfs_sta_psm_read(struct ar9170 *ar, char *buf, size_t bufsize, ssize_t *len) { ADD(buf, *len, bufsize, "psm state: %s\n", (ar->ps.off_override ? "FORCE CAM" : (ar->ps.state ? "PSM" : "CAM"))); ADD(buf, *len, bufsize, "sleep duration: %d ms.\n", ar->ps.sleep_ms); ADD(buf, *len, bufsize, "last power-state transition: %d ms ago.\n", jiffies_to_msecs(jiffies - ar->ps.last_action)); ADD(buf, *len, bufsize, "last CAM->PSM transition: %d ms ago.\n", jiffies_to_msecs(jiffies - ar->ps.last_slept)); return buf; } DEBUGFS_DECLARE_RO_FILE(sta_psm, 160); static char *carl9170_debugfs_tx_stuck_read(struct ar9170 *ar, char *buf, size_t bufsize, ssize_t *len) { int i; for (i = 0; i < ar->hw->queues; i++) { ADD(buf, *len, bufsize, "TX queue [%d]: %10d max:%10d ms.\n", i, ieee80211_queue_stopped(ar->hw, i) ? jiffies_to_msecs(jiffies - ar->queue_stop_timeout[i]) : 0, jiffies_to_msecs(ar->max_queue_stop_timeout[i])); ar->max_queue_stop_timeout[i] = 0; } return buf; } DEBUGFS_DECLARE_RO_FILE(tx_stuck, 180); static char *carl9170_debugfs_phy_noise_read(struct ar9170 *ar, char *buf, size_t bufsize, ssize_t *len) { int err; err = carl9170_get_noisefloor(ar); if (err) { *len = err; return buf; } ADD(buf, *len, bufsize, "Chain 0: %10d dBm, ext. chan.:%10d dBm\n", ar->noise[0], ar->noise[2]); ADD(buf, *len, bufsize, "Chain 2: %10d dBm, ext. chan.:%10d dBm\n", ar->noise[1], ar->noise[3]); return buf; } DEBUGFS_DECLARE_RO_FILE(phy_noise, 180); static char *carl9170_debugfs_vif_dump_read(struct ar9170 *ar, char *buf, size_t bufsize, ssize_t *len) { struct carl9170_vif_info *iter; int i = 0; ADD(buf, *len, bufsize, "registered VIFs:%d \\ %d\n", ar->vifs, ar->fw.vif_num); ADD(buf, *len, bufsize, "VIF bitmap: ["); *len += bitmap_scnprintf(&buf[*len], bufsize - *len, &ar->vif_bitmap, ar->fw.vif_num); ADD(buf, *len, bufsize, "]\n"); rcu_read_lock(); list_for_each_entry_rcu(iter, &ar->vif_list, list) { struct ieee80211_vif *vif = carl9170_get_vif(iter); ADD(buf, *len, bufsize, "\t%d = [%s VIF, id:%d, type:%x " " mac:%pM %s]\n", i, (carl9170_get_main_vif(ar) == vif ? "Master" : " Slave"), iter->id, vif->type, vif->addr, iter->enable_beacon ? "beaconing " : ""); i++; } rcu_read_unlock(); return buf; } DEBUGFS_DECLARE_RO_FILE(vif_dump, 8000); #define UPDATE_COUNTER(ar, name) ({ \ u32 __tmp[ARRAY_SIZE(name##_regs)]; \ unsigned int __i, __err = -ENODEV; \ \ for (__i = 0; __i < ARRAY_SIZE(name##_regs); __i++) { \ __tmp[__i] = name##_regs[__i].reg; \ ar->debug.stats.name##_counter[__i] = 0; \ } \ \ if (IS_STARTED(ar)) \ __err = carl9170_read_mreg(ar, ARRAY_SIZE(name##_regs), \ __tmp, ar->debug.stats.name##_counter); \ (__err); }) #define TALLY_SUM_UP(ar, name) do { \ unsigned int __i; \ \ for (__i = 0; __i < ARRAY_SIZE(name##_regs); __i++) { \ ar->debug.stats.name##_sum[__i] += \ ar->debug.stats.name##_counter[__i]; \ } \ } while (0) #define DEBUGFS_HW_TALLY_FILE(name, f) \ static char *carl9170_debugfs_##name ## _read(struct ar9170 *ar, \ char *dum, size_t bufsize, ssize_t *ret) \ { \ char *buf; \ int i, max_len, err; \ \ max_len = ARRAY_SIZE(name##_regs) * 80; \ buf = vmalloc(max_len); \ if (!buf) \ return NULL; \ \ err = UPDATE_COUNTER(ar, name); \ if (err) { \ *ret = err; \ return buf; \ } \ \ TALLY_SUM_UP(ar, name); \ \ for (i = 0; i < ARRAY_SIZE(name##_regs); i++) { \ ADD(buf, *ret, max_len, "%22s = %" f "[+%" f "]\n", \ name##_regs[i].nreg, ar->debug.stats.name ##_sum[i],\ ar->debug.stats.name ##_counter[i]); \ } \ \ return buf; \ } \ DEBUGFS_DECLARE_RO_FILE(name, 0); #define DEBUGFS_HW_REG_FILE(name, f) \ static char *carl9170_debugfs_##name ## _read(struct ar9170 *ar, \ char *dum, size_t bufsize, ssize_t *ret) \ { \ char *buf; \ int i, max_len, err; \ \ max_len = ARRAY_SIZE(name##_regs) * 80; \ buf = vmalloc(max_len); \ if (!buf) \ return NULL; \ \ err = UPDATE_COUNTER(ar, name); \ if (err) { \ *ret = err; \ return buf; \ } \ \ for (i = 0; i < ARRAY_SIZE(name##_regs); i++) { \ ADD(buf, *ret, max_len, "%22s = %" f "\n", \ name##_regs[i].nreg, \ ar->debug.stats.name##_counter[i]); \ } \ \ return buf; \ } \ DEBUGFS_DECLARE_RO_FILE(name, 0); static ssize_t carl9170_debugfs_hw_ioread32_write(struct ar9170 *ar, const char *buf, size_t count) { int err = 0, i, n = 0, max_len = 32, res; unsigned int reg, tmp; if (!count) return 0; if (count > max_len) return -E2BIG; res = sscanf(buf, "0x%X %d", &reg, &n); if (res < 1) { err = -EINVAL; goto out; } if (res == 1) n = 1; if (n > 15) { err = -EMSGSIZE; goto out; } if ((reg >= 0x280000) || ((reg + (n << 2)) >= 0x280000)) { err = -EADDRNOTAVAIL; goto out; } if (reg & 3) { err = -EINVAL; goto out; } for (i = 0; i < n; i++) { err = carl9170_read_reg(ar, reg + (i << 2), &tmp); if (err) goto out; ar->debug.ring[ar->debug.ring_tail].reg = reg + (i << 2); ar->debug.ring[ar->debug.ring_tail].value = tmp; ar->debug.ring_tail++; ar->debug.ring_tail %= CARL9170_DEBUG_RING_SIZE; } out: return err ? err : count; } static char *carl9170_debugfs_hw_ioread32_read(struct ar9170 *ar, char *buf, size_t bufsize, ssize_t *ret) { int i = 0; while (ar->debug.ring_head != ar->debug.ring_tail) { ADD(buf, *ret, bufsize, "%.8x = %.8x\n", ar->debug.ring[ar->debug.ring_head].reg, ar->debug.ring[ar->debug.ring_head].value); ar->debug.ring_head++; ar->debug.ring_head %= CARL9170_DEBUG_RING_SIZE; if (i++ == 64) break; } ar->debug.ring_head = ar->debug.ring_tail; return buf; } DEBUGFS_DECLARE_RW_FILE(hw_ioread32, CARL9170_DEBUG_RING_SIZE * 40); static ssize_t carl9170_debugfs_bug_write(struct ar9170 *ar, const char *buf, size_t count) { int err; if (count < 1) return -EINVAL; switch (buf[0]) { case 'F': ar->needs_full_reset = true; break; case 'R': if (!IS_STARTED(ar)) { err = -EAGAIN; goto out; } ar->needs_full_reset = false; break; case 'M': err = carl9170_mac_reset(ar); if (err < 0) count = err; goto out; case 'P': err = carl9170_set_channel(ar, ar->hw->conf.channel, ar->hw->conf.channel_type, CARL9170_RFI_COLD); if (err < 0) count = err; goto out; default: return -EINVAL; } carl9170_restart(ar, CARL9170_RR_USER_REQUEST); out: return count; } static char *carl9170_debugfs_bug_read(struct ar9170 *ar, char *buf, size_t bufsize, ssize_t *ret) { ADD(buf, *ret, bufsize, "[P]hy reinit, [R]estart, [F]ull usb reset, " "[M]ac reset\n"); ADD(buf, *ret, bufsize, "firmware restarts:%d, last reason:%d\n", ar->restart_counter, ar->last_reason); ADD(buf, *ret, bufsize, "phy reinit errors:%d (%d)\n", ar->total_chan_fail, ar->chan_fail); ADD(buf, *ret, bufsize, "reported firmware errors:%d\n", ar->fw.err_counter); ADD(buf, *ret, bufsize, "reported firmware BUGs:%d\n", ar->fw.bug_counter); ADD(buf, *ret, bufsize, "pending restart requests:%d\n", atomic_read(&ar->pending_restarts)); return buf; } __DEBUGFS_DECLARE_RW_FILE(bug, 400, CARL9170_STOPPED); static const char *const erp_modes[] = { [CARL9170_ERP_INVALID] = "INVALID", [CARL9170_ERP_AUTO] = "Automatic", [CARL9170_ERP_MAC80211] = "Set by MAC80211", [CARL9170_ERP_OFF] = "Force Off", [CARL9170_ERP_RTS] = "Force RTS", [CARL9170_ERP_CTS] = "Force CTS" }; static char *carl9170_debugfs_erp_read(struct ar9170 *ar, char *buf, size_t bufsize, ssize_t *ret) { ADD(buf, *ret, bufsize, "ERP Setting: (%d) -> %s\n", ar->erp_mode, erp_modes[ar->erp_mode]); return buf; } static ssize_t carl9170_debugfs_erp_write(struct ar9170 *ar, const char *buf, size_t count) { int res, val; if (count < 1) return -EINVAL; res = sscanf(buf, "%d", &val); if (res != 1) return -EINVAL; if (!((val > CARL9170_ERP_INVALID) && (val < __CARL9170_ERP_NUM))) return -EINVAL; ar->erp_mode = val; return count; } DEBUGFS_DECLARE_RW_FILE(erp, 80); static ssize_t carl9170_debugfs_hw_iowrite32_write(struct ar9170 *ar, const char *buf, size_t count) { int err = 0, max_len = 22, res; u32 reg, val; if (!count) return 0; if (count > max_len) return -E2BIG; res = sscanf(buf, "0x%X 0x%X", &reg, &val); if (res != 2) { err = -EINVAL; goto out; } if (reg <= 0x100000 || reg >= 0x280000) { err = -EADDRNOTAVAIL; goto out; } if (reg & 3) { err = -EINVAL; goto out; } err = carl9170_write_reg(ar, reg, val); if (err) goto out; out: return err ? err : count; } DEBUGFS_DECLARE_WO_FILE(hw_iowrite32); DEBUGFS_HW_TALLY_FILE(hw_tx_tally, "u"); DEBUGFS_HW_TALLY_FILE(hw_rx_tally, "u"); DEBUGFS_HW_TALLY_FILE(hw_phy_errors, "u"); DEBUGFS_HW_REG_FILE(hw_wlan_queue, ".8x"); DEBUGFS_HW_REG_FILE(hw_pta_queue, ".8x"); DEBUGFS_HW_REG_FILE(hw_ampdu_info, ".8x"); DEBUGFS_QUEUE_DUMP(tx_status, 0); DEBUGFS_QUEUE_DUMP(tx_status, 1); DEBUGFS_QUEUE_DUMP(tx_status, 2); DEBUGFS_QUEUE_DUMP(tx_status, 3); DEBUGFS_QUEUE_DUMP(tx_pending, 0); DEBUGFS_QUEUE_DUMP(tx_pending, 1); DEBUGFS_QUEUE_DUMP(tx_pending, 2); DEBUGFS_QUEUE_DUMP(tx_pending, 3); DEBUGFS_READONLY_FILE(usb_tx_anch_urbs, 20, "%d", atomic_read(&ar->tx_anch_urbs)); DEBUGFS_READONLY_FILE(usb_rx_anch_urbs, 20, "%d", atomic_read(&ar->rx_anch_urbs)); DEBUGFS_READONLY_FILE(usb_rx_work_urbs, 20, "%d", atomic_read(&ar->rx_work_urbs)); DEBUGFS_READONLY_FILE(usb_rx_pool_urbs, 20, "%d", atomic_read(&ar->rx_pool_urbs)); DEBUGFS_READONLY_FILE(tx_total_queued, 20, "%d", atomic_read(&ar->tx_total_queued)); DEBUGFS_READONLY_FILE(tx_ampdu_scheduler, 20, "%d", atomic_read(&ar->tx_ampdu_scheduler)); DEBUGFS_READONLY_FILE(tx_total_pending, 20, "%d", atomic_read(&ar->tx_total_pending)); DEBUGFS_READONLY_FILE(tx_ampdu_list_len, 20, "%d", ar->tx_ampdu_list_len); DEBUGFS_READONLY_FILE(tx_ampdu_upload, 20, "%d", atomic_read(&ar->tx_ampdu_upload)); DEBUGFS_READONLY_FILE(tx_janitor_last_run, 64, "last run:%d ms ago", jiffies_to_msecs(jiffies - ar->tx_janitor_last_run)); DEBUGFS_READONLY_FILE(tx_dropped, 20, "%d", ar->tx_dropped); DEBUGFS_READONLY_FILE(rx_dropped, 20, "%d", ar->rx_dropped); DEBUGFS_READONLY_FILE(sniffer_enabled, 20, "%d", ar->sniffer_enabled); DEBUGFS_READONLY_FILE(rx_software_decryption, 20, "%d", ar->rx_software_decryption); DEBUGFS_READONLY_FILE(ampdu_factor, 20, "%d", ar->current_factor); DEBUGFS_READONLY_FILE(ampdu_density, 20, "%d", ar->current_density); DEBUGFS_READONLY_FILE(beacon_int, 20, "%d TU", ar->global_beacon_int); DEBUGFS_READONLY_FILE(pretbtt, 20, "%d TU", ar->global_pretbtt); void carl9170_debugfs_register(struct ar9170 *ar) { ar->debug_dir = debugfs_create_dir(KBUILD_MODNAME, ar->hw->wiphy->debugfsdir); #define DEBUGFS_ADD(name) \ debugfs_create_file(#name, carl_debugfs_##name ##_ops.attr, \ ar->debug_dir, ar, \ &carl_debugfs_##name ## _ops.fops); DEBUGFS_ADD(usb_tx_anch_urbs); DEBUGFS_ADD(usb_rx_pool_urbs); DEBUGFS_ADD(usb_rx_anch_urbs); DEBUGFS_ADD(usb_rx_work_urbs); DEBUGFS_ADD(tx_total_queued); DEBUGFS_ADD(tx_total_pending); DEBUGFS_ADD(tx_dropped); DEBUGFS_ADD(tx_stuck); DEBUGFS_ADD(tx_ampdu_upload); DEBUGFS_ADD(tx_ampdu_scheduler); DEBUGFS_ADD(tx_ampdu_list_len); DEBUGFS_ADD(rx_dropped); DEBUGFS_ADD(sniffer_enabled); DEBUGFS_ADD(rx_software_decryption); DEBUGFS_ADD(mem_usage); DEBUGFS_ADD(qos_stat); DEBUGFS_ADD(sta_psm); DEBUGFS_ADD(ampdu_state); DEBUGFS_ADD(hw_tx_tally); DEBUGFS_ADD(hw_rx_tally); DEBUGFS_ADD(hw_phy_errors); DEBUGFS_ADD(phy_noise); DEBUGFS_ADD(hw_wlan_queue); DEBUGFS_ADD(hw_pta_queue); DEBUGFS_ADD(hw_ampdu_info); DEBUGFS_ADD(ampdu_density); DEBUGFS_ADD(ampdu_factor); DEBUGFS_ADD(tx_janitor_last_run); DEBUGFS_ADD(tx_status_0); DEBUGFS_ADD(tx_status_1); DEBUGFS_ADD(tx_status_2); DEBUGFS_ADD(tx_status_3); DEBUGFS_ADD(tx_pending_0); DEBUGFS_ADD(tx_pending_1); DEBUGFS_ADD(tx_pending_2); DEBUGFS_ADD(tx_pending_3); DEBUGFS_ADD(hw_ioread32); DEBUGFS_ADD(hw_iowrite32); DEBUGFS_ADD(bug); DEBUGFS_ADD(erp); DEBUGFS_ADD(vif_dump); DEBUGFS_ADD(beacon_int); DEBUGFS_ADD(pretbtt); #undef DEBUGFS_ADD } void carl9170_debugfs_unregister(struct ar9170 *ar) { debugfs_remove_recursive(ar->debug_dir); }
gpl-2.0
jianC/android_kernel_htc_msm7x30-3.0
drivers/pci/pcie/portdrv_acpi.c
8512
1817
/* * PCIe Port Native Services Support, ACPI-Related Part * * Copyright (C) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. * * This file is subject to the terms and conditions of the GNU General Public * License V2. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/pci.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/acpi.h> #include <linux/pci-acpi.h> #include <linux/pcieport_if.h> #include "aer/aerdrv.h" #include "../pci.h" /** * pcie_port_acpi_setup - Request the BIOS to release control of PCIe services. * @port: PCIe Port service for a root port or event collector. * @srv_mask: Bit mask of services that can be enabled for @port. * * Invoked when @port is identified as a PCIe port device. To avoid conflicts * with the BIOS PCIe port native services support requires the BIOS to yield * control of these services to the kernel. The mask of services that the BIOS * allows to be enabled for @port is written to @srv_mask. * * NOTE: It turns out that we cannot do that for individual port services * separately, because that would make some systems work incorrectly. */ int pcie_port_acpi_setup(struct pci_dev *port, int *srv_mask) { struct acpi_pci_root *root; acpi_handle handle; u32 flags; if (acpi_pci_disabled) return 0; handle = acpi_find_root_bridge_handle(port); if (!handle) return -EINVAL; root = acpi_pci_find_root(handle); if (!root) return -ENODEV; flags = root->osc_control_set; *srv_mask = PCIE_PORT_SERVICE_VC; if (flags & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL) *srv_mask |= PCIE_PORT_SERVICE_HP; if (flags & OSC_PCI_EXPRESS_PME_CONTROL) *srv_mask |= PCIE_PORT_SERVICE_PME; if (flags & OSC_PCI_EXPRESS_AER_CONTROL) *srv_mask |= PCIE_PORT_SERVICE_AER; return 0; }
gpl-2.0
tycoo/moto_x_kernel
drivers/media/rc/keymaps/rc-digittrade.c
9536
2708
/* * Digittrade DVB-T USB Stick remote controller keytable * * Copyright (C) 2010 Antti Palosaari <crope@iki.fi> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include <media/rc-map.h> #include <linux/module.h> /* Digittrade DVB-T USB Stick remote controller. */ /* Imported from af9015.h. Initial keytable was from Alain Kalker <miki@dds.nl> */ /* Digittrade DVB-T USB Stick */ static struct rc_map_table digittrade[] = { { 0x0000, KEY_9 }, { 0x0001, KEY_EPG }, /* EPG */ { 0x0002, KEY_VOLUMEDOWN }, /* Vol Dn */ { 0x0003, KEY_TEXT }, /* TELETEXT */ { 0x0004, KEY_8 }, { 0x0005, KEY_MUTE }, /* MUTE */ { 0x0006, KEY_POWER2 }, /* POWER */ { 0x0009, KEY_ZOOM }, /* FULLSCREEN */ { 0x000a, KEY_RECORD }, /* RECORD */ { 0x000d, KEY_SUBTITLE }, /* SUBTITLE */ { 0x000e, KEY_STOP }, /* STOP */ { 0x0010, KEY_OK }, /* RETURN */ { 0x0011, KEY_2 }, { 0x0012, KEY_4 }, { 0x0015, KEY_3 }, { 0x0016, KEY_5 }, { 0x0017, KEY_CHANNELDOWN }, /* Ch Dn */ { 0x0019, KEY_CHANNELUP }, /* CH Up */ { 0x001a, KEY_PAUSE }, /* PAUSE */ { 0x001b, KEY_1 }, { 0x001d, KEY_AUDIO }, /* DUAL SOUND */ { 0x001e, KEY_PLAY }, /* PLAY */ { 0x001f, KEY_CAMERA }, /* SNAPSHOT */ { 0x0040, KEY_VOLUMEUP }, /* Vol Up */ { 0x0048, KEY_7 }, { 0x004c, KEY_6 }, { 0x004d, KEY_PLAYPAUSE }, /* TIMESHIFT */ { 0x0054, KEY_0 }, }; static struct rc_map_list digittrade_map = { .map = { .scan = digittrade, .size = ARRAY_SIZE(digittrade), .rc_type = RC_TYPE_NEC, .name = RC_MAP_DIGITTRADE, } }; static int __init init_rc_map_digittrade(void) { return rc_map_register(&digittrade_map); } static void __exit exit_rc_map_digittrade(void) { rc_map_unregister(&digittrade_map); } module_init(init_rc_map_digittrade) module_exit(exit_rc_map_digittrade) MODULE_LICENSE("GPL"); MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
gpl-2.0
Elite-Kernels/elite_shamu
drivers/media/rc/keymaps/rc-digittrade.c
9536
2708
/* * Digittrade DVB-T USB Stick remote controller keytable * * Copyright (C) 2010 Antti Palosaari <crope@iki.fi> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include <media/rc-map.h> #include <linux/module.h> /* Digittrade DVB-T USB Stick remote controller. */ /* Imported from af9015.h. Initial keytable was from Alain Kalker <miki@dds.nl> */ /* Digittrade DVB-T USB Stick */ static struct rc_map_table digittrade[] = { { 0x0000, KEY_9 }, { 0x0001, KEY_EPG }, /* EPG */ { 0x0002, KEY_VOLUMEDOWN }, /* Vol Dn */ { 0x0003, KEY_TEXT }, /* TELETEXT */ { 0x0004, KEY_8 }, { 0x0005, KEY_MUTE }, /* MUTE */ { 0x0006, KEY_POWER2 }, /* POWER */ { 0x0009, KEY_ZOOM }, /* FULLSCREEN */ { 0x000a, KEY_RECORD }, /* RECORD */ { 0x000d, KEY_SUBTITLE }, /* SUBTITLE */ { 0x000e, KEY_STOP }, /* STOP */ { 0x0010, KEY_OK }, /* RETURN */ { 0x0011, KEY_2 }, { 0x0012, KEY_4 }, { 0x0015, KEY_3 }, { 0x0016, KEY_5 }, { 0x0017, KEY_CHANNELDOWN }, /* Ch Dn */ { 0x0019, KEY_CHANNELUP }, /* CH Up */ { 0x001a, KEY_PAUSE }, /* PAUSE */ { 0x001b, KEY_1 }, { 0x001d, KEY_AUDIO }, /* DUAL SOUND */ { 0x001e, KEY_PLAY }, /* PLAY */ { 0x001f, KEY_CAMERA }, /* SNAPSHOT */ { 0x0040, KEY_VOLUMEUP }, /* Vol Up */ { 0x0048, KEY_7 }, { 0x004c, KEY_6 }, { 0x004d, KEY_PLAYPAUSE }, /* TIMESHIFT */ { 0x0054, KEY_0 }, }; static struct rc_map_list digittrade_map = { .map = { .scan = digittrade, .size = ARRAY_SIZE(digittrade), .rc_type = RC_TYPE_NEC, .name = RC_MAP_DIGITTRADE, } }; static int __init init_rc_map_digittrade(void) { return rc_map_register(&digittrade_map); } static void __exit exit_rc_map_digittrade(void) { rc_map_unregister(&digittrade_map); } module_init(init_rc_map_digittrade) module_exit(exit_rc_map_digittrade) MODULE_LICENSE("GPL"); MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
gpl-2.0
bgat/linux-udoo
arch/parisc/math-emu/driver.c
13888
3850
/* * Linux/PA-RISC Project (http://www.parisc-linux.org/) * * Floating-point emulation code * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* * linux/arch/math-emu/driver.c.c * * decodes and dispatches unimplemented FPU instructions * * Copyright (C) 1999, 2000 Philipp Rumpf <prumpf@tux.org> * Copyright (C) 2001 Hewlett-Packard <bame@debian.org> */ #include <linux/sched.h> #include "float.h" #include "math-emu.h" #define fptpos 31 #define fpr1pos 10 #define extru(r,pos,len) (((r) >> (31-(pos))) & (( 1 << (len)) - 1)) #define FPUDEBUG 0 /* Format of the floating-point exception registers. */ struct exc_reg { unsigned int exception : 6; unsigned int ei : 26; }; /* Macros for grabbing bits of the instruction format from the 'ei' field above. */ /* Major opcode 0c and 0e */ #define FP0CE_UID(i) (((i) >> 6) & 3) #define FP0CE_CLASS(i) (((i) >> 9) & 3) #define FP0CE_SUBOP(i) (((i) >> 13) & 7) #define FP0CE_SUBOP1(i) (((i) >> 15) & 7) /* Class 1 subopcode */ #define FP0C_FORMAT(i) (((i) >> 11) & 3) #define FP0E_FORMAT(i) (((i) >> 11) & 1) /* Major opcode 0c, uid 2 (performance monitoring) */ #define FPPM_SUBOP(i) (((i) >> 9) & 0x1f) /* Major opcode 2e (fused operations). */ #define FP2E_SUBOP(i) (((i) >> 5) & 1) #define FP2E_FORMAT(i) (((i) >> 11) & 1) /* Major opcode 26 (FMPYSUB) */ /* Major opcode 06 (FMPYADD) */ #define FPx6_FORMAT(i) ((i) & 0x1f) /* Flags and enable bits of the status word. */ #define FPSW_FLAGS(w) ((w) >> 27) #define FPSW_ENABLE(w) ((w) & 0x1f) #define FPSW_V (1<<4) #define FPSW_Z (1<<3) #define FPSW_O (1<<2) #define FPSW_U (1<<1) #define FPSW_I (1<<0) /* Handle a floating point exception. Return zero if the faulting instruction can be completed successfully. */ int handle_fpe(struct pt_regs *regs) { extern void printbinary(unsigned long x, int nbits); struct siginfo si; unsigned int orig_sw, sw; int signalcode; /* need an intermediate copy of float regs because FPU emulation * code expects an artificial last entry which contains zero * * also, the passed in fr registers contain one word that defines * the fpu type. the fpu type information is constructed * inside the emulation code */ __u64 frcopy[36]; memcpy(frcopy, regs->fr, sizeof regs->fr); frcopy[32] = 0; memcpy(&orig_sw, frcopy, sizeof(orig_sw)); if (FPUDEBUG) { printk(KERN_DEBUG "FP VZOUICxxxxCQCQCQCQCQCRMxxTDVZOUI ->\n "); printbinary(orig_sw, 32); printk(KERN_DEBUG "\n"); } signalcode = decode_fpu(frcopy, 0x666); /* Status word = FR0L. */ memcpy(&sw, frcopy, sizeof(sw)); if (FPUDEBUG) { printk(KERN_DEBUG "VZOUICxxxxCQCQCQCQCQCRMxxTDVZOUI decode_fpu returns %d|0x%x\n", signalcode >> 24, signalcode & 0xffffff); printbinary(sw, 32); printk(KERN_DEBUG "\n"); } memcpy(regs->fr, frcopy, sizeof regs->fr); if (signalcode != 0) { si.si_signo = signalcode >> 24; si.si_errno = 0; si.si_code = signalcode & 0xffffff; si.si_addr = (void __user *) regs->iaoq[0]; force_sig_info(si.si_signo, &si, current); return -1; } return signalcode ? -1 : 0; }
gpl-2.0
sdwuyawen/u-boot
arch/sandbox/cpu/os.c
65
6115
/* * Copyright (c) 2011 The Chromium OS Authors. * See file CREDITS for list of people who contributed to this * project. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, * MA 02111-1307 USA */ #include <errno.h> #include <fcntl.h> #include <getopt.h> #include <stdlib.h> #include <termios.h> #include <time.h> #include <unistd.h> #include <sys/mman.h> #include <sys/stat.h> #include <sys/time.h> #include <sys/types.h> #include <linux/types.h> #include <asm/getopt.h> #include <asm/sections.h> #include <asm/state.h> #include <os.h> /* Operating System Interface */ ssize_t os_read(int fd, void *buf, size_t count) { return read(fd, buf, count); } ssize_t os_write(int fd, const void *buf, size_t count) { return write(fd, buf, count); } off_t os_lseek(int fd, off_t offset, int whence) { if (whence == OS_SEEK_SET) whence = SEEK_SET; else if (whence == OS_SEEK_CUR) whence = SEEK_CUR; else if (whence == OS_SEEK_END) whence = SEEK_END; else os_exit(1); return lseek(fd, offset, whence); } int os_open(const char *pathname, int os_flags) { int flags; switch (os_flags & OS_O_MASK) { case OS_O_RDONLY: default: flags = O_RDONLY; break; case OS_O_WRONLY: flags = O_WRONLY; break; case OS_O_RDWR: flags = O_RDWR; break; } if (os_flags & OS_O_CREAT) flags |= O_CREAT; return open(pathname, flags, 0777); } int os_close(int fd) { return close(fd); } void os_exit(int exit_code) { exit(exit_code); } /* Restore tty state when we exit */ static struct termios orig_term; static void os_fd_restore(void) { tcsetattr(0, TCSANOW, &orig_term); } /* Put tty into raw mode so <tab> and <ctrl+c> work */ void os_tty_raw(int fd) { static int setup = 0; struct termios term; if (setup) return; setup = 1; /* If not a tty, don't complain */ if (tcgetattr(fd, &orig_term)) return; term = orig_term; term.c_iflag = IGNBRK | IGNPAR; term.c_oflag = OPOST | ONLCR; term.c_cflag = CS8 | CREAD | CLOCAL; term.c_lflag = 0; if (tcsetattr(fd, TCSANOW, &term)) return; atexit(os_fd_restore); } void *os_malloc(size_t length) { return mmap(NULL, length, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); } void os_usleep(unsigned long usec) { usleep(usec); } u64 os_get_nsec(void) { #if defined(CLOCK_MONOTONIC) && defined(_POSIX_MONOTONIC_CLOCK) struct timespec tp; if (EINVAL == clock_gettime(CLOCK_MONOTONIC, &tp)) { struct timeval tv; gettimeofday(&tv, NULL); tp.tv_sec = tv.tv_sec; tp.tv_nsec = tv.tv_usec * 1000; } return tp.tv_sec * 1000000000ULL + tp.tv_nsec; #else struct timeval tv; gettimeofday(&tv, NULL); return tv.tv_sec * 1000000000ULL + tv.tv_usec * 1000; #endif } static char *short_opts; static struct option *long_opts; int os_parse_args(struct sandbox_state *state, int argc, char *argv[]) { struct sb_cmdline_option **sb_opt = __u_boot_sandbox_option_start; size_t num_options = __u_boot_sandbox_option_count(); size_t i; int hidden_short_opt; size_t si; int c; if (short_opts || long_opts) return 1; state->argc = argc; state->argv = argv; /* dynamically construct the arguments to the system getopt_long */ short_opts = os_malloc(sizeof(*short_opts) * num_options * 2 + 1); long_opts = os_malloc(sizeof(*long_opts) * num_options); if (!short_opts || !long_opts) return 1; /* * getopt_long requires "val" to be unique (since that is what the * func returns), so generate unique values automatically for flags * that don't have a short option. pick 0x100 as that is above the * single byte range (where ASCII/ISO-XXXX-X charsets live). */ hidden_short_opt = 0x100; si = 0; for (i = 0; i < num_options; ++i) { long_opts[i].name = sb_opt[i]->flag; long_opts[i].has_arg = sb_opt[i]->has_arg ? required_argument : no_argument; long_opts[i].flag = NULL; if (sb_opt[i]->flag_short) { short_opts[si++] = long_opts[i].val = sb_opt[i]->flag_short; if (long_opts[i].has_arg == required_argument) short_opts[si++] = ':'; } else long_opts[i].val = sb_opt[i]->flag_short = hidden_short_opt++; } short_opts[si] = '\0'; /* we need to handle output ourselves since u-boot provides printf */ opterr = 0; /* * walk all of the options the user gave us on the command line, * figure out what u-boot option structure they belong to (via * the unique short val key), and call the appropriate callback. */ while ((c = getopt_long(argc, argv, short_opts, long_opts, NULL)) != -1) { for (i = 0; i < num_options; ++i) { if (sb_opt[i]->flag_short == c) { if (sb_opt[i]->callback(state, optarg)) { state->parse_err = sb_opt[i]->flag; return 0; } break; } } if (i == num_options) { /* * store the faulting flag for later display. we have to * store the flag itself as the getopt parsing itself is * tricky: need to handle the following flags (assume all * of the below are unknown): * -a optopt='a' optind=<next> * -abbbb optopt='a' optind=<this> * -aaaaa optopt='a' optind=<this> * --a optopt=0 optind=<this> * as you can see, it is impossible to determine the exact * faulting flag without doing the parsing ourselves, so * we just report the specific flag that failed. */ if (optopt) { static char parse_err[3] = { '-', 0, '\0', }; parse_err[1] = optopt; state->parse_err = parse_err; } else state->parse_err = argv[optind - 1]; break; } } return 0; }
gpl-2.0
KonstaT/zte-kernel-msm7x27a
drivers/video/msm/vidc/1080p/ddl/vcd_ddl_shared_mem.c
65
28556
/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include "vcd_ddl_shared_mem.h" #define VIDC_SM_EXTENDED_DECODE_STATUS_ADDR 0x0000 #define VIDC_SM_EXT_DEC_STATUS_RESOLUTION_CHANGE_BMSK 0x1 #define VIDC_SM_EXT_DEC_STATUS_RESOLUTION_CHANGE_SHFT 0x0 #define VIDC_SM_EXT_DEC_STATUS_MORE_FIELD_NEEDED_BMSK 0x4 #define VIDC_SM_EXT_DEC_STATUS_MORE_FIELD_NEEDED_SHFT 0x2 #define VIDC_SM_SET_FRAME_TAG_ADDR 0x0004 #define VIDC_SM_GET_FRAME_TAG_TOP_ADDR 0x0008 #define VIDC_SM_GET_FRAME_TAG_BOTTOM_ADDR 0x000c #define VIDC_SM_PIC_TIME_TOP_ADDR 0x0010 #define VIDC_SM_PIC_TIME_BOTTOM_ADDR 0x0014 #define VIDC_SM_START_BYTE_NUM_ADDR 0x0018 #define VIDC_SM_CROP_INFO1_ADDR 0x0020 #define VIDC_SM_CROP_INFO1_RIGHT_OFFSET_BMSK 0xffff0000 #define VIDC_SM_CROP_INFO1_RIGHT_OFFSET_SHFT 16 #define VIDC_SM_CROP_INFO1_LEFT_OFFSET_BMSK 0x0000ffff #define VIDC_SM_CROP_INFO1_LEFT_OFFSET_SHFT 0 #define VIDC_SM_CROP_INFO2_ADDR 0x0024 #define VIDC_SM_CROP_INFO2_BOTTOM_OFFSET_BMSK 0xffff0000 #define VIDC_SM_CROP_INFO2_BOTTOM_OFFSET_SHFT 16 #define VIDC_SM_CROP_INFO2_TOP_OFFSET_BMSK 0x0000ffff #define VIDC_SM_CROP_INFO2_TOP_OFFSET_SHFT 0 #define VIDC_SM_DISP_PIC_PROFILE_ADDR 0x007c #define VIDC_SM_DISP_PIC_PROFILE_DISP_PIC_LEVEL_BMASK 0x0000ff00 #define VIDC_SM_DISP_PIC_PROFILE_DISP_PIC_LEVEL_SHFT 8 #define VIDC_SM_DISP_PIC_PROFILE_DISP_PIC_PROFILE_BMASK 0x0000001f #define VIDC_SM_DISP_PIC_PROFILE_DISP_PIC_PROFILE_SHFT 0 #define VIDC_SM_DISP_PIC_FRAME_TYPE_ADDR 0x00c0 #define VIDC_SM_DISP_PIC_FRAME_TYPE_BMSK 0x00000003 #define VIDC_SM_DISP_PIC_FRAME_TYPE_SHFT 0 #define VIDC_SM_FREE_LUMA_DPB_ADDR 0x00c4 #define VIDC_SM_FREE_LUMA_DPB_BMSK 0xffffffff #define VIDC_SM_FREE_LUMA_DPB_SHFT 0 #define VIDC_SM_FREE_LUMA_DPB_DEC_ORDER_ADDR 0x00fc #define VIDC_SM_FREE_LUMA_DPB_DEC_ORDER_BMSK 0xffffffff #define VIDC_SM_FREE_LUMA_DPB_DEC_ORDER_SHFT 0 #define VIDC_SM_DEC_ORDER_WIDTH_ADDR 0x00e8 #define VIDC_SM_DEC_ORDER_WIDTH_BMSK 0xffffffff #define VIDC_SM_DEC_ORDER_WIDTH_SHFT 0 #define VIDC_SM_DEC_ORDER_HEIGHT_ADDR 0x00ec #define VIDC_SM_DEC_ORDER_HEIGHT_BMSK 0xffffffff #define VIDC_SM_DEC_ORDER_HEIGHT_SHFT 0 #define VIDC_SM_DEC_CROP_INFO1_ADDR 0x00f4 #define VIDC_SM_DEC_CROP_INFO1_RIGHT_OFFSET_BMSK 0xffff0000 #define VIDC_SM_DEC_CROP_INFO1_RIGHT_OFFSET_SHFT 16 #define VIDC_SM_DEC_CROP_INFO1_LEFT_OFFSET_BMSK 0x0000ffff #define VIDC_SM_DEC_CROP_INFO1_LEFT_OFFSET_SHFT 0 #define VIDC_SM_DEC_CROP_INFO2_ADDR 0x00f8 #define VIDC_SM_DEC_CROP_INFO2_BOTTOM_OFFSET_BMSK 0xffff0000 #define VIDC_SM_DEC_CROP_INFO2_BOTTOM_OFFSET_SHFT 16 #define VIDC_SM_DEC_CROP_INFO2_TOP_OFFSET_BMSK 0x0000ffff #define VIDC_SM_DEC_CROP_INFO2_TOP_OFFSET_SHFT 0 #define VIDC_SM_IDR_DECODING_ONLY_ADDR 0x0108 #define VIDC_SM_IDR_DECODING_ONLY_BMSK 0x00000001 #define VIDC_SM_IDR_DECODING_ONLY_SHIFT 0 #define VIDC_SM_ENC_EXT_CTRL_ADDR 0x0028 #define VIDC_SM_ENC_EXT_CTRL_VBV_BUFFER_SIZE_BMSK 0xffff0000 #define VIDC_SM_ENC_EXT_CTRL_VBV_BUFFER_SIZE_SHFT 16 #define VIDC_SM_ENC_EXT_CTRL_H263_CPCFC_ENABLE_BMSK 0x80 #define VIDC_SM_ENC_EXT_CTRL_H263_CPCFC_ENABLE_SHFT 7 #define VIDC_SM_ENC_EXT_CTRL_SPS_PPS_CONTROL_BMSK 0X100 #define VIDC_SM_ENC_EXT_CTRL_SPS_PPS_CONTROL_SHFT 8 #define VIDC_SM_ENC_EXT_CTRL_SEQ_HDR_CTRL_BMSK 0x8 #define VIDC_SM_ENC_EXT_CTRL_SEQ_HDR_CTRL_SHFT 3 #define VIDC_SM_ENC_EXT_CTRL_FRAME_SKIP_ENABLE_BMSK 0x6 #define VIDC_SM_ENC_EXT_CTRL_FRAME_SKIP_ENABLE_SHFT 1 #define VIDC_SM_ENC_EXT_CTRL_HEC_ENABLE_BMSK 0x1 #define VIDC_SM_ENC_EXT_CTRL_HEC_ENABLE_SHFT 0 #define VIDC_SM_ENC_PARAM_CHANGE_ADDR 0x002c #define VIDC_SM_ENC_PARAM_CHANGE_RC_BIT_RATE_BMSK 0x4 #define VIDC_SM_ENC_PARAM_CHANGE_RC_BIT_RATE_SHFT 2 #define VIDC_SM_ENC_PARAM_CHANGE_RC_FRAME_RATE_BMSK 0x2 #define VIDC_SM_ENC_PARAM_CHANGE_RC_FRAME_RATE_SHFT 1 #define VIDC_SM_ENC_PARAM_CHANGE_I_PERIOD_BMSK 0x1 #define VIDC_SM_ENC_PARAM_CHANGE_I_PERIOD_SHFT 0 #define VIDC_SM_ENC_VOP_TIMING_ADDR 0x0030 #define VIDC_SM_ENC_VOP_TIMING_ENABLE_BMSK 0x80000000 #define VIDC_SM_ENC_VOP_TIMING_ENABLE_SHFT 31 #define VIDC_SM_ENC_VOP_TIMING_TIME_RESOLUTION_BMSK 0x7fff0000 #define VIDC_SM_ENC_VOP_TIMING_TIME_RESOLUTION_SHFT 16 #define VIDC_SM_ENC_VOP_TIMING_FRAME_DELTA_BMSK 0x0000ffff #define VIDC_SM_ENC_VOP_TIMING_FRAME_DELTA_SHFT 0 #define VIDC_SM_ENC_HEC_PERIOD_ADDR 0x0034 #define VIDC_SM_H264_REF_L0_ADDR 0x005c #define VIDC_SM_H264_REF_L0_CHRO_BTM_FLG_1_BMSK 0x80000000 #define VIDC_SM_H264_REF_L0_CHRO_BTM_FLG_1_SHFT 31 #define VIDC_SM_H264_REF_L0_CHRO_REF_1_BMSK 0x7f000000 #define VIDC_SM_H264_REF_L0_CHRO_REF_1_SHFT 24 #define VIDC_SM_H264_REF_L0_CHRO_BTM_FLG_0_BMSK 0x00800000 #define VIDC_SM_H264_REF_L0_CHRO_BTM_FLG_0_SHFT 23 #define VIDC_SM_H264_REF_L0_CHRO_REF_0_BMSK 0x007f0000 #define VIDC_SM_H264_REF_L0_CHRO_REF_0_SHFT 16 #define VIDC_SM_H264_REF_L0_LUMA_BTM_FLG_1_BMSK 0x00008000 #define VIDC_SM_H264_REF_L0_LUMA_BTM_FLG_1_SHFT 15 #define VIDC_SM_H264_REF_L0_LUMA_REF_1_BMSK 0x00007f00 #define VIDC_SM_H264_REF_L0_LUMA_REF_1_SHFT 8 #define VIDC_SM_H264_REF_L0_LUMA_BTM_FLG_0_BMSK 0x00000080 #define VIDC_SM_H264_REF_L0_LUMA_BTM_FLG_0_SHFT 7 #define VIDC_SM_H264_REF_L0_LUMA_REF_0_BMSK 0x0000007f #define VIDC_SM_H264_REF_L0_LUMA_REF_0_SHFT 0 #define VIDC_SM_H264_REF_L1_ADDR 0x0060 #define VIDC_SM_H264_REF_L1_CHRO_BTM_FLG_0_BMSK 0x00800000 #define VIDC_SM_H264_REF_L1_CHRO_BTM_FLG_0_SHFT 23 #define VIDC_SM_H264_REF_L1_CHRO_REF_0_BMSK 0x007f0000 #define VIDC_SM_H264_REF_L1_CHRO_REF_0_SHFT 16 #define VIDC_SM_H264_REF_L1_LUMA_BTM_FLG_0_BMSK 0x00000080 #define VIDC_SM_H264_REF_L1_LUMA_BTM_FLG_0_SHFT 7 #define VIDC_SM_H264_REF_L1_LUMA_REF_0_BMSK 0x0000007f #define VIDC_SM_H264_REF_L1_LUMA_REF_0_SHFT 0 #define VIDC_SM_P_B_FRAME_QP_ADDR 0x0070 #define VIDC_SM_P_B_FRAME_QP_B_FRAME_QP_BMASK 0x00000fc0 #define VIDC_SM_P_B_FRAME_QP_B_FRAME_QP_SHFT 6 #define VIDC_SM_P_B_FRAME_QP_P_FRAME_QP_BMASK 0x0000003f #define VIDC_SM_P_B_FRAME_QP_P_FRAME_QP_SHFT 0 #define VIDC_SM_NEW_RC_BIT_RATE_ADDR 0x0090 #define VIDC_SM_NEW_RC_BIT_RATE_VALUE_BMASK 0xffffffff #define VIDC_SM_NEW_RC_BIT_RATE_VALUE_SHFT 0 #define VIDC_SM_NEW_RC_FRAME_RATE_ADDR 0x0094 #define VIDC_SM_NEW_RC_FRAME_RATE_VALUE_BMASK 0xffffffff #define VIDC_SM_NEW_RC_FRAME_RATE_VALUE_SHFT 0 #define VIDC_SM_NEW_I_PERIOD_ADDR 0x0098 #define VIDC_SM_NEW_I_PERIOD_VALUE_BMASK 0xffffffff #define VIDC_SM_NEW_I_PERIOD_VALUE_SHFT 0 #define VIDC_SM_ALLOCATED_LUMA_DPB_SIZE_ADDR 0x0064 #define VIDC_SM_ALLOCATED_CHROMA_DPB_SIZE_ADDR 0x0068 #define VIDC_SM_ALLOCATED_MV_SIZE_ADDR 0x006c #define VIDC_SM_FLUSH_CMD_TYPE_ADDR 0x0080 #define VIDC_SM_FLUSH_CMD_INBUF1_ADDR 0x0084 #define VIDC_SM_FLUSH_CMD_INBUF2_ADDR 0x0088 #define VIDC_SM_FLUSH_CMD_OUTBUF_ADDR 0x008c #define VIDC_SM_MIN_LUMA_DPB_SIZE_ADDR 0x00b0 #define VIDC_SM_MIN_CHROMA_DPB_SIZE_ADDR 0x00bc #define VIDC_SM_METADATA_ENABLE_ADDR 0x0038 #define VIDC_SM_METADATA_ENABLE_EXTRADATA_BMSK 0x40 #define VIDC_SM_METADATA_ENABLE_EXTRADATA_SHFT 6 #define VIDC_SM_METADATA_ENABLE_ENC_SLICE_SIZE_BMSK 0x20 #define VIDC_SM_METADATA_ENABLE_ENC_SLICE_SIZE_SHFT 5 #define VIDC_SM_METADATA_ENABLE_VUI_BMSK 0x10 #define VIDC_SM_METADATA_ENABLE_VUI_SHFT 4 #define VIDC_SM_METADATA_ENABLE_SEI_VIDC_BMSK 0x8 #define VIDC_SM_METADATA_ENABLE_SEI_VIDC_SHFT 3 #define VIDC_SM_METADATA_ENABLE_VC1_PARAM_BMSK 0x4 #define VIDC_SM_METADATA_ENABLE_VC1_PARAM_SHFT 2 #define VIDC_SM_METADATA_ENABLE_CONCEALED_MB_BMSK 0x2 #define VIDC_SM_METADATA_ENABLE_CONCEALED_MB_SHFT 1 #define VIDC_SM_METADATA_ENABLE_QP_BMSK 0x1 #define VIDC_SM_METADATA_ENABLE_QP_SHFT 0 #define VIDC_SM_METADATA_STATUS_ADDR 0x003c #define VIDC_SM_METADATA_STATUS_STATUS_BMSK 0x1 #define VIDC_SM_METADATA_STATUS_STATUS_SHFT 0 #define VIDC_SM_METADATA_DISPLAY_INDEX_ADDR 0x0040 #define VIDC_SM_EXT_METADATA_START_ADDR_ADDR 0x0044 #define VIDC_SM_PUT_EXTRADATA_ADDR 0x0048 #define VIDC_SM_PUT_EXTRADATA_PUT_BMSK 0x1 #define VIDC_SM_PUT_EXTRADATA_PUT_SHFT 0 #define VIDC_SM_EXTRADATA_ADDR_ADDR 0x004c #define VIDC_SM_CHROMA_ADDR_CHANGE_ADDR 0x0148 #define VIDC_SM_CHROMA_ADDR_CHANGE_BMASK 0x00000001 #define VIDC_SM_CHROMA_ADDR_CHANGE_SHFT 0 #define VIDC_SM_ERROR_CONCEALMENT_CONFIG_ADDR 0x0154 #define VIDC_SM_ERROR_CONCEALMENT_CONFIG_INTER_SLICE_BMSK 0x0c #define VIDC_SM_ERROR_CONCEALMENT_CONFIG_INTER_SLICE_SHFT 2 #define VIDC_SM_ERROR_CONCEALMENT_CONFIG_INTRA_SLICE_BMSK 0X02 #define VIDC_SM_ERROR_CONCEALMENT_CONFIG_INTRA_SLICE_SHFT 1 #define VIDC_SM_ERROR_CONCEALMENT_CONFIG_CONCEAL_ENABLE_BMSK 0x01 #define VIDC_SM_ERROR_CONCEALMENT_CONFIG_CONCEAL_ENABLE_SHFT 0 #define VIDC_SM_SEI_ENABLE_ADDR 0x0180 #define VIDC_SM_SEI_ENABLE_RECOVERY_POINT_SEI_BMSK 0x00000001 #define VIDC_SM_SEI_ENABLE_RECOVERY_POINT_SEI_SHFT 0 #define VIDC_SM_NUM_STUFF_BYTES_CONSUME_ADDR 0X01ac #define VIDC_SM_TIMEOUT_VALUE_ADDR 0x0158 #define VIDC_SM_TIMEOUT_VALUE_BMSK 0xffffffff #define VIDC_SM_TIMEOUT_VALUE_SHFT 0 #define VIDC_SM_ENC_EXT_CTRL_CLOSED_GOP_ENABLE_BMSK 0x40 #define VIDC_SM_ENC_EXT_CTRL_CLOSED_GOP_ENABLE_SHFT 6 #define DDL_MEM_WRITE_32(base, offset, val) ddl_mem_write_32(\ (u32 *) ((u8 *) (base)->align_virtual_addr + (offset)), (val)) #define DDL_MEM_READ_32(base, offset) ddl_mem_read_32(\ (u32 *) ((u8 *) (base)->align_virtual_addr + (offset))) #define DDL_SHARED_MEM_11BIT_RIGHT_SHIFT 11 static void ddl_mem_write_32(u32 *addr, u32 data) { *addr = data; } static u32 ddl_mem_read_32(u32 *addr) { return *addr; } void vidc_sm_get_extended_decode_status(struct ddl_buf_addr *shared_mem, u32 *more_field_needed, u32 *resl_change) { u32 decode_status = DDL_MEM_READ_32(shared_mem, VIDC_SM_EXTENDED_DECODE_STATUS_ADDR); if (more_field_needed) *more_field_needed = VIDC_GETFIELD(decode_status, VIDC_SM_EXT_DEC_STATUS_MORE_FIELD_NEEDED_BMSK, VIDC_SM_EXT_DEC_STATUS_MORE_FIELD_NEEDED_SHFT); if (resl_change) *resl_change = VIDC_GETFIELD(decode_status, VIDC_SM_EXT_DEC_STATUS_RESOLUTION_CHANGE_BMSK, VIDC_SM_EXT_DEC_STATUS_RESOLUTION_CHANGE_SHFT); } void vidc_sm_set_frame_tag(struct ddl_buf_addr *shared_mem, u32 frame_tag) { DDL_MEM_WRITE_32(shared_mem, VIDC_SM_SET_FRAME_TAG_ADDR, frame_tag); } void vidc_sm_get_frame_tags(struct ddl_buf_addr *shared_mem, u32 *pn_frame_tag_top, u32 *pn_frame_tag_bottom) { *pn_frame_tag_top = DDL_MEM_READ_32(shared_mem, VIDC_SM_GET_FRAME_TAG_TOP_ADDR); *pn_frame_tag_bottom = DDL_MEM_READ_32(shared_mem, VIDC_SM_GET_FRAME_TAG_BOTTOM_ADDR); } void vidc_sm_get_picture_times(struct ddl_buf_addr *shared_mem, u32 *pn_time_top, u32 *pn_time_bottom) { *pn_time_top = DDL_MEM_READ_32(shared_mem, VIDC_SM_PIC_TIME_TOP_ADDR); *pn_time_bottom = DDL_MEM_READ_32(shared_mem, VIDC_SM_PIC_TIME_BOTTOM_ADDR); } void vidc_sm_set_start_byte_number(struct ddl_buf_addr *shared_mem, u32 byte_num) { DDL_MEM_WRITE_32(shared_mem, VIDC_SM_START_BYTE_NUM_ADDR, byte_num); } void vidc_sm_get_crop_info(struct ddl_buf_addr *shared_mem, u32 *pn_left, u32 *pn_right, u32 *pn_top, u32 *pn_bottom) { u32 info1, info2; info1 = DDL_MEM_READ_32(shared_mem, VIDC_SM_CROP_INFO1_ADDR); *pn_left = VIDC_GETFIELD(info1, VIDC_SM_CROP_INFO1_LEFT_OFFSET_BMSK, VIDC_SM_CROP_INFO1_LEFT_OFFSET_SHFT); *pn_right = VIDC_GETFIELD(info1, VIDC_SM_CROP_INFO1_RIGHT_OFFSET_BMSK, VIDC_SM_CROP_INFO1_RIGHT_OFFSET_SHFT); info2 = DDL_MEM_READ_32(shared_mem, VIDC_SM_CROP_INFO2_ADDR); *pn_top = VIDC_GETFIELD(info2, VIDC_SM_CROP_INFO2_TOP_OFFSET_BMSK, VIDC_SM_CROP_INFO2_TOP_OFFSET_SHFT); *pn_bottom = VIDC_GETFIELD(info2, VIDC_SM_CROP_INFO2_BOTTOM_OFFSET_BMSK, VIDC_SM_CROP_INFO2_BOTTOM_OFFSET_SHFT); } void vidc_sm_get_displayed_picture_frame(struct ddl_buf_addr *shared_mem, u32 *n_disp_picture_frame) { u32 disp_pict_frame; disp_pict_frame = DDL_MEM_READ_32(shared_mem, VIDC_SM_DISP_PIC_FRAME_TYPE_ADDR); *n_disp_picture_frame = VIDC_GETFIELD(disp_pict_frame, VIDC_SM_DISP_PIC_FRAME_TYPE_BMSK, VIDC_SM_DISP_PIC_FRAME_TYPE_SHFT); } void vidc_sm_get_available_luma_dpb_address(struct ddl_buf_addr *shared_mem, u32 *pn_free_luma_dpb_address) { *pn_free_luma_dpb_address = DDL_MEM_READ_32(shared_mem, VIDC_SM_FREE_LUMA_DPB_ADDR); } void vidc_sm_get_available_luma_dpb_dec_order_address( struct ddl_buf_addr *shared_mem, u32 *pn_free_luma_dpb_address) { *pn_free_luma_dpb_address = DDL_MEM_READ_32(shared_mem, VIDC_SM_FREE_LUMA_DPB_DEC_ORDER_ADDR); } void vidc_sm_get_dec_order_resl( struct ddl_buf_addr *shared_mem, u32 *width, u32 *height) { *width = DDL_MEM_READ_32(shared_mem, VIDC_SM_DEC_ORDER_WIDTH_ADDR); *height = DDL_MEM_READ_32(shared_mem, VIDC_SM_DEC_ORDER_HEIGHT_ADDR); } void vidc_sm_get_dec_order_crop_info( struct ddl_buf_addr *shared_mem, u32 *left, u32 *right, u32 *top, u32 *bottom) { u32 crop_data; crop_data = DDL_MEM_READ_32(shared_mem, VIDC_SM_DEC_CROP_INFO1_ADDR); *left = VIDC_GETFIELD(crop_data, VIDC_SM_DEC_CROP_INFO1_LEFT_OFFSET_BMSK, VIDC_SM_DEC_CROP_INFO1_LEFT_OFFSET_SHFT); *right = VIDC_GETFIELD(crop_data, VIDC_SM_DEC_CROP_INFO1_RIGHT_OFFSET_BMSK, VIDC_SM_DEC_CROP_INFO1_RIGHT_OFFSET_SHFT); crop_data = DDL_MEM_READ_32(shared_mem, VIDC_SM_DEC_CROP_INFO2_ADDR); *top = VIDC_GETFIELD(crop_data, VIDC_SM_DEC_CROP_INFO2_TOP_OFFSET_BMSK, VIDC_SM_DEC_CROP_INFO2_TOP_OFFSET_SHFT); *bottom = VIDC_GETFIELD(crop_data, VIDC_SM_DEC_CROP_INFO2_BOTTOM_OFFSET_BMSK, VIDC_SM_DEC_CROP_INFO2_BOTTOM_OFFSET_SHFT); } void vidc_sm_set_extended_encoder_control(struct ddl_buf_addr *shared_mem, u32 hec_enable, enum VIDC_SM_frame_skip frame_skip_mode, u32 seq_hdr_in_band, u32 vbv_buffer_size, u32 cpcfc_enable, u32 sps_pps_control, u32 closed_gop_enable) { u32 enc_ctrl; enc_ctrl = VIDC_SETFIELD((hec_enable) ? 1 : 0, VIDC_SM_ENC_EXT_CTRL_HEC_ENABLE_SHFT, VIDC_SM_ENC_EXT_CTRL_HEC_ENABLE_BMSK) | VIDC_SETFIELD((u32) frame_skip_mode, VIDC_SM_ENC_EXT_CTRL_FRAME_SKIP_ENABLE_SHFT, VIDC_SM_ENC_EXT_CTRL_FRAME_SKIP_ENABLE_BMSK) | VIDC_SETFIELD((seq_hdr_in_band) ? 1 : 0 , VIDC_SM_ENC_EXT_CTRL_SEQ_HDR_CTRL_SHFT , VIDC_SM_ENC_EXT_CTRL_SEQ_HDR_CTRL_BMSK) | VIDC_SETFIELD(vbv_buffer_size, VIDC_SM_ENC_EXT_CTRL_VBV_BUFFER_SIZE_SHFT, VIDC_SM_ENC_EXT_CTRL_VBV_BUFFER_SIZE_BMSK) | VIDC_SETFIELD((cpcfc_enable) ? 1 : 0, VIDC_SM_ENC_EXT_CTRL_H263_CPCFC_ENABLE_SHFT, VIDC_SM_ENC_EXT_CTRL_H263_CPCFC_ENABLE_BMSK) | VIDC_SETFIELD((sps_pps_control) ? 1 : 0, VIDC_SM_ENC_EXT_CTRL_SPS_PPS_CONTROL_SHFT, VIDC_SM_ENC_EXT_CTRL_SPS_PPS_CONTROL_BMSK) | VIDC_SETFIELD(closed_gop_enable, VIDC_SM_ENC_EXT_CTRL_CLOSED_GOP_ENABLE_SHFT, VIDC_SM_ENC_EXT_CTRL_CLOSED_GOP_ENABLE_BMSK); DDL_MEM_WRITE_32(shared_mem, VIDC_SM_ENC_EXT_CTRL_ADDR, enc_ctrl); } void vidc_sm_set_encoder_param_change(struct ddl_buf_addr *shared_mem, u32 bit_rate_chg, u32 frame_rate_chg, u32 i_period_chg) { u32 enc_param_chg; enc_param_chg = VIDC_SETFIELD((bit_rate_chg) ? 1 : 0, VIDC_SM_ENC_PARAM_CHANGE_RC_BIT_RATE_SHFT, VIDC_SM_ENC_PARAM_CHANGE_RC_BIT_RATE_BMSK) | VIDC_SETFIELD((frame_rate_chg) ? 1 : 0, VIDC_SM_ENC_PARAM_CHANGE_RC_FRAME_RATE_SHFT, VIDC_SM_ENC_PARAM_CHANGE_RC_FRAME_RATE_BMSK) | VIDC_SETFIELD((i_period_chg) ? 1 : 0, VIDC_SM_ENC_PARAM_CHANGE_I_PERIOD_SHFT, VIDC_SM_ENC_PARAM_CHANGE_I_PERIOD_BMSK); DDL_MEM_WRITE_32(shared_mem, VIDC_SM_ENC_PARAM_CHANGE_ADDR, enc_param_chg); } void vidc_sm_set_encoder_vop_time(struct ddl_buf_addr *shared_mem, u32 vop_time_enable, u32 time_resolution, u32 frame_delta) { u32 vop_time; vop_time = VIDC_SETFIELD((vop_time_enable) ? 1 : 0, VIDC_SM_ENC_VOP_TIMING_ENABLE_SHFT , VIDC_SM_ENC_VOP_TIMING_ENABLE_BMSK) | VIDC_SETFIELD(time_resolution , VIDC_SM_ENC_VOP_TIMING_TIME_RESOLUTION_SHFT, VIDC_SM_ENC_VOP_TIMING_TIME_RESOLUTION_BMSK) | VIDC_SETFIELD(frame_delta, VIDC_SM_ENC_VOP_TIMING_FRAME_DELTA_SHFT, VIDC_SM_ENC_VOP_TIMING_FRAME_DELTA_BMSK); DDL_MEM_WRITE_32(shared_mem, VIDC_SM_ENC_VOP_TIMING_ADDR, vop_time); } void vidc_sm_set_encoder_hec_period(struct ddl_buf_addr *shared_mem, u32 hec_period) { DDL_MEM_WRITE_32(shared_mem, VIDC_SM_ENC_HEC_PERIOD_ADDR, hec_period); } void vidc_sm_get_h264_encoder_reference_list0(struct ddl_buf_addr *shared_mem, enum VIDC_SM_ref_picture *pe_luma_picture0, u32 *pn_luma_picture_index0, enum VIDC_SM_ref_picture *pe_luma_picture1, u32 *pn_luma_picture_index1, enum VIDC_SM_ref_picture *pe_chroma_picture0, u32 *pn_chroma_picture_index0, enum VIDC_SM_ref_picture *pe_chroma_picture1, u32 *pn_chroma_picture_index1) { u32 ref_list; ref_list = DDL_MEM_READ_32(shared_mem, VIDC_SM_H264_REF_L0_ADDR); *pe_luma_picture0 = (enum VIDC_SM_ref_picture) VIDC_GETFIELD(ref_list, VIDC_SM_H264_REF_L0_LUMA_BTM_FLG_0_BMSK, VIDC_SM_H264_REF_L0_LUMA_BTM_FLG_0_SHFT); *pn_luma_picture_index0 = VIDC_GETFIELD(ref_list, VIDC_SM_H264_REF_L0_LUMA_REF_0_BMSK, VIDC_SM_H264_REF_L0_LUMA_REF_0_SHFT); *pe_luma_picture1 = (enum VIDC_SM_ref_picture) VIDC_GETFIELD(ref_list, VIDC_SM_H264_REF_L0_LUMA_BTM_FLG_1_BMSK, VIDC_SM_H264_REF_L0_LUMA_BTM_FLG_1_SHFT); *pn_luma_picture_index1 = VIDC_GETFIELD(ref_list, VIDC_SM_H264_REF_L0_LUMA_REF_1_BMSK, VIDC_SM_H264_REF_L0_LUMA_REF_1_SHFT); *pe_chroma_picture0 = (enum VIDC_SM_ref_picture) VIDC_GETFIELD(ref_list, VIDC_SM_H264_REF_L0_CHRO_BTM_FLG_0_BMSK, VIDC_SM_H264_REF_L0_CHRO_BTM_FLG_0_SHFT); *pn_chroma_picture_index0 = VIDC_GETFIELD(ref_list, VIDC_SM_H264_REF_L0_CHRO_REF_0_BMSK, VIDC_SM_H264_REF_L0_CHRO_REF_0_SHFT); *pe_chroma_picture1 = (enum VIDC_SM_ref_picture) VIDC_GETFIELD(ref_list, VIDC_SM_H264_REF_L0_CHRO_BTM_FLG_1_BMSK, VIDC_SM_H264_REF_L0_CHRO_BTM_FLG_1_SHFT); *pn_chroma_picture_index1 = VIDC_GETFIELD(ref_list, VIDC_SM_H264_REF_L0_CHRO_REF_1_BMSK, VIDC_SM_H264_REF_L0_CHRO_REF_1_SHFT); } void vidc_sm_get_h264_encoder_reference_list1(struct ddl_buf_addr *shared_mem, enum VIDC_SM_ref_picture *pe_luma_picture, u32 *pn_luma_picture_index, enum VIDC_SM_ref_picture *pe_chroma_picture, u32 *pn_chroma_picture_index) { u32 ref_list; ref_list = DDL_MEM_READ_32(shared_mem, VIDC_SM_H264_REF_L1_ADDR); *pe_luma_picture = (enum VIDC_SM_ref_picture) VIDC_GETFIELD(ref_list, VIDC_SM_H264_REF_L1_LUMA_BTM_FLG_0_BMSK, VIDC_SM_H264_REF_L1_LUMA_BTM_FLG_0_SHFT); *pn_luma_picture_index = VIDC_GETFIELD(ref_list, VIDC_SM_H264_REF_L1_LUMA_REF_0_BMSK, VIDC_SM_H264_REF_L1_LUMA_REF_0_SHFT); *pe_chroma_picture = (enum VIDC_SM_ref_picture) VIDC_GETFIELD(ref_list, VIDC_SM_H264_REF_L1_CHRO_BTM_FLG_0_BMSK, VIDC_SM_H264_REF_L1_CHRO_BTM_FLG_0_SHFT); *pn_chroma_picture_index = VIDC_GETFIELD(ref_list, VIDC_SM_H264_REF_L1_CHRO_REF_0_BMSK, VIDC_SM_H264_REF_L1_CHRO_REF_0_SHFT); } void vidc_sm_set_allocated_dpb_size(struct ddl_buf_addr *shared_mem, u32 y_size, u32 c_size) { DDL_MEM_WRITE_32(shared_mem, VIDC_SM_ALLOCATED_LUMA_DPB_SIZE_ADDR, y_size); DDL_MEM_WRITE_32(shared_mem, VIDC_SM_ALLOCATED_CHROMA_DPB_SIZE_ADDR, c_size); } void vidc_sm_set_allocated_h264_mv_size(struct ddl_buf_addr *shared_mem, u32 mv_size) { DDL_MEM_WRITE_32(shared_mem, VIDC_SM_ALLOCATED_MV_SIZE_ADDR, mv_size); } void vidc_sm_get_min_yc_dpb_sizes(struct ddl_buf_addr *shared_mem, u32 *pn_min_luma_dpb_size, u32 *pn_min_chroma_dpb_size) { *pn_min_luma_dpb_size = DDL_MEM_READ_32(shared_mem, VIDC_SM_MIN_LUMA_DPB_SIZE_ADDR); *pn_min_chroma_dpb_size = DDL_MEM_READ_32(shared_mem, VIDC_SM_MIN_CHROMA_DPB_SIZE_ADDR); } void vidc_sm_set_concealment_color(struct ddl_buf_addr *shared_mem, u32 conceal_ycolor, u32 conceal_ccolor) { u32 conceal_color; conceal_color = (((conceal_ycolor << 8) & 0xff00) | (conceal_ccolor & 0xff)); DDL_MEM_WRITE_32(shared_mem, 0x00f0, conceal_color); } void vidc_sm_set_metadata_enable(struct ddl_buf_addr *shared_mem, u32 extradata_enable, u32 qp_enable, u32 concealed_mb_enable, u32 vc1Param_enable, u32 sei_nal_enable, u32 vui_enable, u32 enc_slice_size_enable) { u32 metadata_enable; metadata_enable = VIDC_SETFIELD((extradata_enable) ? 1 : 0, VIDC_SM_METADATA_ENABLE_EXTRADATA_SHFT, VIDC_SM_METADATA_ENABLE_EXTRADATA_BMSK) | VIDC_SETFIELD((enc_slice_size_enable) ? 1 : 0, VIDC_SM_METADATA_ENABLE_ENC_SLICE_SIZE_SHFT, VIDC_SM_METADATA_ENABLE_ENC_SLICE_SIZE_BMSK) | VIDC_SETFIELD((vui_enable) ? 1 : 0, VIDC_SM_METADATA_ENABLE_VUI_SHFT, VIDC_SM_METADATA_ENABLE_VUI_BMSK) | VIDC_SETFIELD((sei_nal_enable) ? 1 : 0, VIDC_SM_METADATA_ENABLE_SEI_VIDC_SHFT, VIDC_SM_METADATA_ENABLE_SEI_VIDC_BMSK) | VIDC_SETFIELD((vc1Param_enable) ? 1 : 0, VIDC_SM_METADATA_ENABLE_VC1_PARAM_SHFT, VIDC_SM_METADATA_ENABLE_VC1_PARAM_BMSK) | VIDC_SETFIELD((concealed_mb_enable) ? 1 : 0, VIDC_SM_METADATA_ENABLE_CONCEALED_MB_SHFT, VIDC_SM_METADATA_ENABLE_CONCEALED_MB_BMSK) | VIDC_SETFIELD((qp_enable) ? 1 : 0, VIDC_SM_METADATA_ENABLE_QP_SHFT, VIDC_SM_METADATA_ENABLE_QP_BMSK); DDL_MEM_WRITE_32(shared_mem, VIDC_SM_METADATA_ENABLE_ADDR, metadata_enable); } void vidc_sm_get_metadata_status(struct ddl_buf_addr *shared_mem, u32 *pb_metadata_present) { u32 status; status = DDL_MEM_READ_32(shared_mem, VIDC_SM_METADATA_STATUS_ADDR); *pb_metadata_present = (u32) VIDC_GETFIELD(status, VIDC_SM_METADATA_STATUS_STATUS_BMSK, VIDC_SM_METADATA_STATUS_STATUS_SHFT); } void vidc_sm_get_metadata_display_index(struct ddl_buf_addr *shared_mem, u32 *pn_dixplay_index) { *pn_dixplay_index = DDL_MEM_READ_32(shared_mem, VIDC_SM_METADATA_DISPLAY_INDEX_ADDR); } void vidc_sm_set_metadata_start_address(struct ddl_buf_addr *shared_mem, u32 address) { u32 address_shift = address; DDL_MEM_WRITE_32(shared_mem, VIDC_SM_EXT_METADATA_START_ADDR_ADDR, address_shift); } void vidc_sm_set_extradata_presence(struct ddl_buf_addr *shared_mem, u32 extradata_present) { u32 put_extradata; put_extradata = VIDC_SETFIELD((extradata_present) ? 1 : 0, VIDC_SM_PUT_EXTRADATA_PUT_SHFT, VIDC_SM_PUT_EXTRADATA_PUT_BMSK); DDL_MEM_WRITE_32(shared_mem, VIDC_SM_PUT_EXTRADATA_ADDR, put_extradata); } void vidc_sm_set_extradata_addr(struct ddl_buf_addr *shared_mem, u32 extradata_addr) { u32 address_shift = extradata_addr; DDL_MEM_WRITE_32(shared_mem, VIDC_SM_EXTRADATA_ADDR_ADDR, address_shift); } void vidc_sm_set_pand_b_frame_qp(struct ddl_buf_addr *shared_mem, u32 b_frame_qp, u32 p_frame_qp) { u32 nP_B_frame_qp; nP_B_frame_qp = VIDC_SETFIELD(b_frame_qp, VIDC_SM_P_B_FRAME_QP_B_FRAME_QP_SHFT, VIDC_SM_P_B_FRAME_QP_B_FRAME_QP_BMASK); nP_B_frame_qp |= VIDC_SETFIELD(p_frame_qp, VIDC_SM_P_B_FRAME_QP_P_FRAME_QP_SHFT, VIDC_SM_P_B_FRAME_QP_P_FRAME_QP_BMASK); DDL_MEM_WRITE_32(shared_mem , VIDC_SM_P_B_FRAME_QP_ADDR, nP_B_frame_qp); } void vidc_sm_get_profile_info(struct ddl_buf_addr *shared_mem, struct ddl_profile_info_type *ddl_profile_info) { u32 disp_pic_profile; disp_pic_profile = DDL_MEM_READ_32(shared_mem, VIDC_SM_DISP_PIC_PROFILE_ADDR); ddl_profile_info->bit_depth_chroma_minus8 = (disp_pic_profile & 0x00380000) >> 19; ddl_profile_info->bit_depth_luma_minus8 = (disp_pic_profile & 0x00070000) >> 16; ddl_profile_info->pic_profile = VIDC_GETFIELD( disp_pic_profile, VIDC_SM_DISP_PIC_PROFILE_DISP_PIC_PROFILE_BMASK, VIDC_SM_DISP_PIC_PROFILE_DISP_PIC_PROFILE_SHFT); ddl_profile_info->pic_level = VIDC_GETFIELD( disp_pic_profile, VIDC_SM_DISP_PIC_PROFILE_DISP_PIC_LEVEL_BMASK, VIDC_SM_DISP_PIC_PROFILE_DISP_PIC_LEVEL_SHFT); ddl_profile_info->chroma_format_idc = (disp_pic_profile & 0x60) >> 5; } void vidc_sm_set_encoder_new_bit_rate(struct ddl_buf_addr *shared_mem, u32 new_bit_rate) { DDL_MEM_WRITE_32(shared_mem, VIDC_SM_NEW_RC_BIT_RATE_ADDR, new_bit_rate); } void vidc_sm_set_encoder_new_frame_rate(struct ddl_buf_addr *shared_mem, u32 new_frame_rate) { DDL_MEM_WRITE_32(shared_mem, VIDC_SM_NEW_RC_FRAME_RATE_ADDR, new_frame_rate); } void vidc_sm_set_encoder_new_i_period(struct ddl_buf_addr *shared_mem, u32 new_i_period) { DDL_MEM_WRITE_32(shared_mem, VIDC_SM_NEW_I_PERIOD_ADDR, new_i_period); } void vidc_sm_set_encoder_init_rc_value(struct ddl_buf_addr *shared_mem, u32 new_rc_value) { DDL_MEM_WRITE_32(shared_mem, 0x011C, new_rc_value); } void vidc_sm_set_idr_decode_only(struct ddl_buf_addr *shared_mem, u32 enable) { u32 idr_decode_only = VIDC_SETFIELD((enable) ? 1 : 0, VIDC_SM_IDR_DECODING_ONLY_SHIFT, VIDC_SM_IDR_DECODING_ONLY_BMSK ); DDL_MEM_WRITE_32(shared_mem, VIDC_SM_IDR_DECODING_ONLY_ADDR, idr_decode_only); } void vidc_sm_set_chroma_addr_change(struct ddl_buf_addr *shared_mem, u32 addr_change) { u32 chroma_addr_change = VIDC_SETFIELD((addr_change) ? 1 : 0, VIDC_SM_CHROMA_ADDR_CHANGE_SHFT, VIDC_SM_CHROMA_ADDR_CHANGE_BMASK); DDL_MEM_WRITE_32(shared_mem, VIDC_SM_CHROMA_ADDR_CHANGE_ADDR, chroma_addr_change); } void vidc_sm_set_mpeg4_profile_override(struct ddl_buf_addr *shared_mem, enum vidc_sm_mpeg4_profileinfo profile_info) { u32 profile_enforce = 0; if (shared_mem != NULL) { profile_enforce = 1; switch (profile_info) { case VIDC_SM_PROFILE_INFO_ASP: profile_enforce |= 4; break; case VIDC_SM_PROFILE_INFO_SP: profile_enforce |= 2; break; case VIDC_SM_PROFILE_INFO_DISABLE: default: profile_enforce = 0; break; } DDL_MEM_WRITE_32(shared_mem, 0x15c, profile_enforce); } } void vidc_sm_set_decoder_sei_enable(struct ddl_buf_addr *shared_mem, u32 sei_enable) { DDL_MEM_WRITE_32(shared_mem, VIDC_SM_SEI_ENABLE_ADDR, sei_enable); } void vidc_sm_get_decoder_sei_enable(struct ddl_buf_addr *shared_mem, u32 *sei_enable) { *sei_enable = DDL_MEM_READ_32(shared_mem, VIDC_SM_SEI_ENABLE_ADDR); } void vidc_sm_set_decoder_stuff_bytes_consumption( struct ddl_buf_addr *shared_mem, enum vidc_sm_num_stuff_bytes_consume_info consume_info) { DDL_MEM_WRITE_32(shared_mem, VIDC_SM_NUM_STUFF_BYTES_CONSUME_ADDR, consume_info); } void vidc_sm_set_video_core_timeout_value(struct ddl_buf_addr *shared_mem, u32 timeout) { DDL_MEM_WRITE_32(shared_mem, VIDC_SM_TIMEOUT_VALUE_ADDR, timeout); } void vidc_sm_set_error_concealment_config(struct ddl_buf_addr *shared_mem, u32 inter_slice, u32 intra_slice, u32 conceal_config_enable) { u32 error_conceal_config = 0; error_conceal_config = VIDC_SETFIELD(inter_slice, VIDC_SM_ERROR_CONCEALMENT_CONFIG_INTER_SLICE_SHFT, VIDC_SM_ERROR_CONCEALMENT_CONFIG_INTER_SLICE_BMSK); error_conceal_config |= VIDC_SETFIELD(intra_slice, VIDC_SM_ERROR_CONCEALMENT_CONFIG_INTRA_SLICE_SHFT, VIDC_SM_ERROR_CONCEALMENT_CONFIG_INTRA_SLICE_BMSK); error_conceal_config |= VIDC_SETFIELD(conceal_config_enable, VIDC_SM_ERROR_CONCEALMENT_CONFIG_CONCEAL_ENABLE_SHFT, VIDC_SM_ERROR_CONCEALMENT_CONFIG_CONCEAL_ENABLE_BMSK); DDL_MEM_WRITE_32(shared_mem, VIDC_SM_ERROR_CONCEALMENT_CONFIG_ADDR, error_conceal_config); }
gpl-2.0
zaclimon/Quanta-Mako
ipc/mqueue.c
65
31208
/* * POSIX message queues filesystem for Linux. * * Copyright (C) 2003,2004 Krzysztof Benedyczak (golbi@mat.uni.torun.pl) * Michal Wronski (michal.wronski@gmail.com) * * Spinlocks: Mohamed Abbas (abbas.mohamed@intel.com) * Lockless receive & send, fd based notify: * Manfred Spraul (manfred@colorfullife.com) * * Audit: George Wilson (ltcgcw@us.ibm.com) * * This file is released under the GPL. */ #include <linux/capability.h> #include <linux/init.h> #include <linux/pagemap.h> #include <linux/file.h> #include <linux/mount.h> #include <linux/namei.h> #include <linux/sysctl.h> #include <linux/poll.h> #include <linux/mqueue.h> #include <linux/msg.h> #include <linux/skbuff.h> #include <linux/netlink.h> #include <linux/syscalls.h> #include <linux/audit.h> #include <linux/signal.h> #include <linux/mutex.h> #include <linux/nsproxy.h> #include <linux/pid.h> #include <linux/ipc_namespace.h> #include <linux/user_namespace.h> #include <linux/slab.h> #include <net/sock.h> #include "util.h" #define MQUEUE_MAGIC 0x19800202 #define DIRENT_SIZE 20 #define FILENT_SIZE 80 #define SEND 0 #define RECV 1 #define STATE_NONE 0 #define STATE_PENDING 1 #define STATE_READY 2 struct ext_wait_queue { /* queue of sleeping tasks */ struct task_struct *task; struct list_head list; struct msg_msg *msg; /* ptr of loaded message */ int state; /* one of STATE_* values */ }; struct mqueue_inode_info { spinlock_t lock; struct inode vfs_inode; wait_queue_head_t wait_q; struct msg_msg **messages; struct mq_attr attr; struct sigevent notify; struct pid* notify_owner; struct user_struct *user; /* user who created, for accounting */ struct sock *notify_sock; struct sk_buff *notify_cookie; /* for tasks waiting for free space and messages, respectively */ struct ext_wait_queue e_wait_q[2]; unsigned long qsize; /* size of queue in memory (sum of all msgs) */ }; static const struct inode_operations mqueue_dir_inode_operations; static const struct file_operations mqueue_file_operations; static const struct super_operations mqueue_super_ops; static void remove_notification(struct mqueue_inode_info *info); static struct kmem_cache *mqueue_inode_cachep; static struct ctl_table_header * mq_sysctl_table; static inline struct mqueue_inode_info *MQUEUE_I(struct inode *inode) { return container_of(inode, struct mqueue_inode_info, vfs_inode); } /* * This routine should be called with the mq_lock held. */ static inline struct ipc_namespace *__get_ns_from_inode(struct inode *inode) { return get_ipc_ns(inode->i_sb->s_fs_info); } static struct ipc_namespace *get_ns_from_inode(struct inode *inode) { struct ipc_namespace *ns; spin_lock(&mq_lock); ns = __get_ns_from_inode(inode); spin_unlock(&mq_lock); return ns; } static struct inode *mqueue_get_inode(struct super_block *sb, struct ipc_namespace *ipc_ns, umode_t mode, struct mq_attr *attr) { struct user_struct *u = current_user(); struct inode *inode; int ret = -ENOMEM; inode = new_inode(sb); if (!inode) goto err; inode->i_ino = get_next_ino(); inode->i_mode = mode; inode->i_uid = current_fsuid(); inode->i_gid = current_fsgid(); inode->i_mtime = inode->i_ctime = inode->i_atime = CURRENT_TIME; if (S_ISREG(mode)) { struct mqueue_inode_info *info; unsigned long mq_bytes, mq_msg_tblsz; inode->i_fop = &mqueue_file_operations; inode->i_size = FILENT_SIZE; /* mqueue specific info */ info = MQUEUE_I(inode); spin_lock_init(&info->lock); init_waitqueue_head(&info->wait_q); INIT_LIST_HEAD(&info->e_wait_q[0].list); INIT_LIST_HEAD(&info->e_wait_q[1].list); info->notify_owner = NULL; info->qsize = 0; info->user = NULL; /* set when all is ok */ memset(&info->attr, 0, sizeof(info->attr)); info->attr.mq_maxmsg = ipc_ns->mq_msg_max; info->attr.mq_msgsize = ipc_ns->mq_msgsize_max; if (attr) { info->attr.mq_maxmsg = attr->mq_maxmsg; info->attr.mq_msgsize = attr->mq_msgsize; } mq_msg_tblsz = info->attr.mq_maxmsg * sizeof(struct msg_msg *); info->messages = kmalloc(mq_msg_tblsz, GFP_KERNEL); if (!info->messages) goto out_inode; mq_bytes = (mq_msg_tblsz + (info->attr.mq_maxmsg * info->attr.mq_msgsize)); spin_lock(&mq_lock); if (u->mq_bytes + mq_bytes < u->mq_bytes || u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) { spin_unlock(&mq_lock); /* mqueue_evict_inode() releases info->messages */ ret = -EMFILE; goto out_inode; } u->mq_bytes += mq_bytes; spin_unlock(&mq_lock); /* all is ok */ info->user = get_uid(u); } else if (S_ISDIR(mode)) { inc_nlink(inode); /* Some things misbehave if size == 0 on a directory */ inode->i_size = 2 * DIRENT_SIZE; inode->i_op = &mqueue_dir_inode_operations; inode->i_fop = &simple_dir_operations; } return inode; out_inode: iput(inode); err: return ERR_PTR(ret); } static int mqueue_fill_super(struct super_block *sb, void *data, int silent) { struct inode *inode; struct ipc_namespace *ns = data; sb->s_blocksize = PAGE_CACHE_SIZE; sb->s_blocksize_bits = PAGE_CACHE_SHIFT; sb->s_magic = MQUEUE_MAGIC; sb->s_op = &mqueue_super_ops; inode = mqueue_get_inode(sb, ns, S_IFDIR | S_ISVTX | S_IRWXUGO, NULL); if (IS_ERR(inode)) return PTR_ERR(inode); sb->s_root = d_make_root(inode); if (!sb->s_root) return -ENOMEM; return 0; } static struct dentry *mqueue_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { if (!(flags & MS_KERNMOUNT)) data = current->nsproxy->ipc_ns; return mount_ns(fs_type, flags, data, mqueue_fill_super); } static void init_once(void *foo) { struct mqueue_inode_info *p = (struct mqueue_inode_info *) foo; inode_init_once(&p->vfs_inode); } static struct inode *mqueue_alloc_inode(struct super_block *sb) { struct mqueue_inode_info *ei; ei = kmem_cache_alloc(mqueue_inode_cachep, GFP_KERNEL); if (!ei) return NULL; return &ei->vfs_inode; } static void mqueue_i_callback(struct rcu_head *head) { struct inode *inode = container_of(head, struct inode, i_rcu); kmem_cache_free(mqueue_inode_cachep, MQUEUE_I(inode)); } static void mqueue_destroy_inode(struct inode *inode) { call_rcu(&inode->i_rcu, mqueue_i_callback); } static void mqueue_evict_inode(struct inode *inode) { struct mqueue_inode_info *info; struct user_struct *user; unsigned long mq_bytes; int i; struct ipc_namespace *ipc_ns; clear_inode(inode); if (S_ISDIR(inode->i_mode)) return; ipc_ns = get_ns_from_inode(inode); info = MQUEUE_I(inode); spin_lock(&info->lock); for (i = 0; i < info->attr.mq_curmsgs; i++) free_msg(info->messages[i]); kfree(info->messages); spin_unlock(&info->lock); /* Total amount of bytes accounted for the mqueue */ mq_bytes = info->attr.mq_maxmsg * (sizeof(struct msg_msg *) + info->attr.mq_msgsize); user = info->user; if (user) { spin_lock(&mq_lock); user->mq_bytes -= mq_bytes; /* * get_ns_from_inode() ensures that the * (ipc_ns = sb->s_fs_info) is either a valid ipc_ns * to which we now hold a reference, or it is NULL. * We can't put it here under mq_lock, though. */ if (ipc_ns) ipc_ns->mq_queues_count--; spin_unlock(&mq_lock); free_uid(user); } if (ipc_ns) put_ipc_ns(ipc_ns); } static int mqueue_create(struct inode *dir, struct dentry *dentry, umode_t mode, struct nameidata *nd) { struct inode *inode; struct mq_attr *attr = dentry->d_fsdata; int error; struct ipc_namespace *ipc_ns; spin_lock(&mq_lock); ipc_ns = __get_ns_from_inode(dir); if (!ipc_ns) { error = -EACCES; goto out_unlock; } if (ipc_ns->mq_queues_count >= ipc_ns->mq_queues_max && !capable(CAP_SYS_RESOURCE)) { error = -ENOSPC; goto out_unlock; } ipc_ns->mq_queues_count++; spin_unlock(&mq_lock); inode = mqueue_get_inode(dir->i_sb, ipc_ns, mode, attr); if (IS_ERR(inode)) { error = PTR_ERR(inode); spin_lock(&mq_lock); ipc_ns->mq_queues_count--; goto out_unlock; } put_ipc_ns(ipc_ns); dir->i_size += DIRENT_SIZE; dir->i_ctime = dir->i_mtime = dir->i_atime = CURRENT_TIME; d_instantiate(dentry, inode); dget(dentry); return 0; out_unlock: spin_unlock(&mq_lock); if (ipc_ns) put_ipc_ns(ipc_ns); return error; } static int mqueue_unlink(struct inode *dir, struct dentry *dentry) { struct inode *inode = dentry->d_inode; dir->i_ctime = dir->i_mtime = dir->i_atime = CURRENT_TIME; dir->i_size -= DIRENT_SIZE; drop_nlink(inode); dput(dentry); return 0; } /* * This is routine for system read from queue file. * To avoid mess with doing here some sort of mq_receive we allow * to read only queue size & notification info (the only values * that are interesting from user point of view and aren't accessible * through std routines) */ static ssize_t mqueue_read_file(struct file *filp, char __user *u_data, size_t count, loff_t *off) { struct mqueue_inode_info *info = MQUEUE_I(filp->f_path.dentry->d_inode); char buffer[FILENT_SIZE]; ssize_t ret; spin_lock(&info->lock); snprintf(buffer, sizeof(buffer), "QSIZE:%-10lu NOTIFY:%-5d SIGNO:%-5d NOTIFY_PID:%-6d\n", info->qsize, info->notify_owner ? info->notify.sigev_notify : 0, (info->notify_owner && info->notify.sigev_notify == SIGEV_SIGNAL) ? info->notify.sigev_signo : 0, pid_vnr(info->notify_owner)); spin_unlock(&info->lock); buffer[sizeof(buffer)-1] = '\0'; ret = simple_read_from_buffer(u_data, count, off, buffer, strlen(buffer)); if (ret <= 0) return ret; filp->f_path.dentry->d_inode->i_atime = filp->f_path.dentry->d_inode->i_ctime = CURRENT_TIME; return ret; } static int mqueue_flush_file(struct file *filp, fl_owner_t id) { struct mqueue_inode_info *info = MQUEUE_I(filp->f_path.dentry->d_inode); spin_lock(&info->lock); if (task_tgid(current) == info->notify_owner) remove_notification(info); spin_unlock(&info->lock); return 0; } static unsigned int mqueue_poll_file(struct file *filp, struct poll_table_struct *poll_tab) { struct mqueue_inode_info *info = MQUEUE_I(filp->f_path.dentry->d_inode); int retval = 0; poll_wait(filp, &info->wait_q, poll_tab); spin_lock(&info->lock); if (info->attr.mq_curmsgs) retval = POLLIN | POLLRDNORM; if (info->attr.mq_curmsgs < info->attr.mq_maxmsg) retval |= POLLOUT | POLLWRNORM; spin_unlock(&info->lock); return retval; } /* Adds current to info->e_wait_q[sr] before element with smaller prio */ static void wq_add(struct mqueue_inode_info *info, int sr, struct ext_wait_queue *ewp) { struct ext_wait_queue *walk; ewp->task = current; list_for_each_entry(walk, &info->e_wait_q[sr].list, list) { if (walk->task->static_prio <= current->static_prio) { list_add_tail(&ewp->list, &walk->list); return; } } list_add_tail(&ewp->list, &info->e_wait_q[sr].list); } /* * Puts current task to sleep. Caller must hold queue lock. After return * lock isn't held. * sr: SEND or RECV */ static int wq_sleep(struct mqueue_inode_info *info, int sr, ktime_t *timeout, struct ext_wait_queue *ewp) { int retval; signed long time; wq_add(info, sr, ewp); for (;;) { set_current_state(TASK_INTERRUPTIBLE); spin_unlock(&info->lock); time = schedule_hrtimeout_range_clock(timeout, 0, HRTIMER_MODE_ABS, CLOCK_REALTIME); while (ewp->state == STATE_PENDING) cpu_relax(); if (ewp->state == STATE_READY) { retval = 0; goto out; } spin_lock(&info->lock); if (ewp->state == STATE_READY) { retval = 0; goto out_unlock; } if (signal_pending(current)) { retval = -ERESTARTSYS; break; } if (time == 0) { retval = -ETIMEDOUT; break; } } list_del(&ewp->list); out_unlock: spin_unlock(&info->lock); out: return retval; } /* * Returns waiting task that should be serviced first or NULL if none exists */ static struct ext_wait_queue *wq_get_first_waiter( struct mqueue_inode_info *info, int sr) { struct list_head *ptr; ptr = info->e_wait_q[sr].list.prev; if (ptr == &info->e_wait_q[sr].list) return NULL; return list_entry(ptr, struct ext_wait_queue, list); } /* Auxiliary functions to manipulate messages' list */ static void msg_insert(struct msg_msg *ptr, struct mqueue_inode_info *info) { int k; k = info->attr.mq_curmsgs - 1; while (k >= 0 && info->messages[k]->m_type >= ptr->m_type) { info->messages[k + 1] = info->messages[k]; k--; } info->attr.mq_curmsgs++; info->qsize += ptr->m_ts; info->messages[k + 1] = ptr; } static inline struct msg_msg *msg_get(struct mqueue_inode_info *info) { info->qsize -= info->messages[--info->attr.mq_curmsgs]->m_ts; return info->messages[info->attr.mq_curmsgs]; } static inline void set_cookie(struct sk_buff *skb, char code) { ((char*)skb->data)[NOTIFY_COOKIE_LEN-1] = code; } /* * The next function is only to split too long sys_mq_timedsend */ static void __do_notify(struct mqueue_inode_info *info) { /* notification * invoked when there is registered process and there isn't process * waiting synchronously for message AND state of queue changed from * empty to not empty. Here we are sure that no one is waiting * synchronously. */ if (info->notify_owner && info->attr.mq_curmsgs == 1) { struct siginfo sig_i; switch (info->notify.sigev_notify) { case SIGEV_NONE: break; case SIGEV_SIGNAL: /* sends signal */ sig_i.si_signo = info->notify.sigev_signo; sig_i.si_errno = 0; sig_i.si_code = SI_MESGQ; sig_i.si_value = info->notify.sigev_value; /* map current pid/uid into info->owner's namespaces */ rcu_read_lock(); sig_i.si_pid = task_tgid_nr_ns(current, ns_of_pid(info->notify_owner)); sig_i.si_uid = user_ns_map_uid(info->user->user_ns, current_cred(), current_uid()); rcu_read_unlock(); kill_pid_info(info->notify.sigev_signo, &sig_i, info->notify_owner); break; case SIGEV_THREAD: set_cookie(info->notify_cookie, NOTIFY_WOKENUP); netlink_sendskb(info->notify_sock, info->notify_cookie); break; } /* after notification unregisters process */ put_pid(info->notify_owner); info->notify_owner = NULL; } wake_up(&info->wait_q); } static int prepare_timeout(const struct timespec __user *u_abs_timeout, ktime_t *expires, struct timespec *ts) { if (copy_from_user(ts, u_abs_timeout, sizeof(struct timespec))) return -EFAULT; if (!timespec_valid(ts)) return -EINVAL; *expires = timespec_to_ktime(*ts); return 0; } static void remove_notification(struct mqueue_inode_info *info) { if (info->notify_owner != NULL && info->notify.sigev_notify == SIGEV_THREAD) { set_cookie(info->notify_cookie, NOTIFY_REMOVED); netlink_sendskb(info->notify_sock, info->notify_cookie); } put_pid(info->notify_owner); info->notify_owner = NULL; } static int mq_attr_ok(struct ipc_namespace *ipc_ns, struct mq_attr *attr) { if (attr->mq_maxmsg <= 0 || attr->mq_msgsize <= 0) return 0; if (capable(CAP_SYS_RESOURCE)) { if (attr->mq_maxmsg > HARD_MSGMAX) return 0; } else { if (attr->mq_maxmsg > ipc_ns->mq_msg_max || attr->mq_msgsize > ipc_ns->mq_msgsize_max) return 0; } /* check for overflow */ if (attr->mq_msgsize > ULONG_MAX/attr->mq_maxmsg) return 0; if ((unsigned long)(attr->mq_maxmsg * (attr->mq_msgsize + sizeof (struct msg_msg *))) < (unsigned long)(attr->mq_maxmsg * attr->mq_msgsize)) return 0; return 1; } /* * Invoked when creating a new queue via sys_mq_open */ static struct file *do_create(struct ipc_namespace *ipc_ns, struct dentry *dir, struct dentry *dentry, int oflag, umode_t mode, struct mq_attr *attr) { const struct cred *cred = current_cred(); struct file *result; int ret; if (attr) { if (!mq_attr_ok(ipc_ns, attr)) { ret = -EINVAL; goto out; } /* store for use during create */ dentry->d_fsdata = attr; } mode &= ~current_umask(); ret = mnt_want_write(ipc_ns->mq_mnt); if (ret) goto out; ret = vfs_create(dir->d_inode, dentry, mode, NULL); dentry->d_fsdata = NULL; if (ret) goto out_drop_write; result = dentry_open(dentry, ipc_ns->mq_mnt, oflag, cred); /* * dentry_open() took a persistent mnt_want_write(), * so we can now drop this one. */ mnt_drop_write(ipc_ns->mq_mnt); return result; out_drop_write: mnt_drop_write(ipc_ns->mq_mnt); out: dput(dentry); mntput(ipc_ns->mq_mnt); return ERR_PTR(ret); } /* Opens existing queue */ static struct file *do_open(struct ipc_namespace *ipc_ns, struct dentry *dentry, int oflag) { int ret; const struct cred *cred = current_cred(); static const int oflag2acc[O_ACCMODE] = { MAY_READ, MAY_WRITE, MAY_READ | MAY_WRITE }; if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY)) { ret = -EINVAL; goto err; } if (inode_permission(dentry->d_inode, oflag2acc[oflag & O_ACCMODE])) { ret = -EACCES; goto err; } return dentry_open(dentry, ipc_ns->mq_mnt, oflag, cred); err: dput(dentry); mntput(ipc_ns->mq_mnt); return ERR_PTR(ret); } SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode, struct mq_attr __user *, u_attr) { struct dentry *dentry; struct file *filp; char *name; struct mq_attr attr; int fd, error; struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns; if (u_attr && copy_from_user(&attr, u_attr, sizeof(struct mq_attr))) return -EFAULT; audit_mq_open(oflag, mode, u_attr ? &attr : NULL); if (IS_ERR(name = getname(u_name))) return PTR_ERR(name); fd = get_unused_fd_flags(O_CLOEXEC); if (fd < 0) goto out_putname; mutex_lock(&ipc_ns->mq_mnt->mnt_root->d_inode->i_mutex); dentry = lookup_one_len(name, ipc_ns->mq_mnt->mnt_root, strlen(name)); if (IS_ERR(dentry)) { error = PTR_ERR(dentry); goto out_putfd; } mntget(ipc_ns->mq_mnt); if (oflag & O_CREAT) { if (dentry->d_inode) { /* entry already exists */ audit_inode(name, dentry); if (oflag & O_EXCL) { error = -EEXIST; goto out; } filp = do_open(ipc_ns, dentry, oflag); } else { filp = do_create(ipc_ns, ipc_ns->mq_mnt->mnt_root, dentry, oflag, mode, u_attr ? &attr : NULL); } } else { if (!dentry->d_inode) { error = -ENOENT; goto out; } audit_inode(name, dentry); filp = do_open(ipc_ns, dentry, oflag); } if (IS_ERR(filp)) { error = PTR_ERR(filp); goto out_putfd; } fd_install(fd, filp); goto out_upsem; out: dput(dentry); mntput(ipc_ns->mq_mnt); out_putfd: put_unused_fd(fd); fd = error; out_upsem: mutex_unlock(&ipc_ns->mq_mnt->mnt_root->d_inode->i_mutex); out_putname: putname(name); return fd; } SYSCALL_DEFINE1(mq_unlink, const char __user *, u_name) { int err; char *name; struct dentry *dentry; struct inode *inode = NULL; struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns; name = getname(u_name); if (IS_ERR(name)) return PTR_ERR(name); mutex_lock_nested(&ipc_ns->mq_mnt->mnt_root->d_inode->i_mutex, I_MUTEX_PARENT); dentry = lookup_one_len(name, ipc_ns->mq_mnt->mnt_root, strlen(name)); if (IS_ERR(dentry)) { err = PTR_ERR(dentry); goto out_unlock; } if (!dentry->d_inode) { err = -ENOENT; goto out_err; } inode = dentry->d_inode; if (inode) ihold(inode); err = mnt_want_write(ipc_ns->mq_mnt); if (err) goto out_err; err = vfs_unlink(dentry->d_parent->d_inode, dentry); mnt_drop_write(ipc_ns->mq_mnt); out_err: dput(dentry); out_unlock: mutex_unlock(&ipc_ns->mq_mnt->mnt_root->d_inode->i_mutex); putname(name); if (inode) iput(inode); return err; } /* Pipelined send and receive functions. * * If a receiver finds no waiting message, then it registers itself in the * list of waiting receivers. A sender checks that list before adding the new * message into the message array. If there is a waiting receiver, then it * bypasses the message array and directly hands the message over to the * receiver. * The receiver accepts the message and returns without grabbing the queue * spinlock. Therefore an intermediate STATE_PENDING state and memory barriers * are necessary. The same algorithm is used for sysv semaphores, see * ipc/sem.c for more details. * * The same algorithm is used for senders. */ /* pipelined_send() - send a message directly to the task waiting in * sys_mq_timedreceive() (without inserting message into a queue). */ static inline void pipelined_send(struct mqueue_inode_info *info, struct msg_msg *message, struct ext_wait_queue *receiver) { receiver->msg = message; list_del(&receiver->list); receiver->state = STATE_PENDING; wake_up_process(receiver->task); smp_wmb(); receiver->state = STATE_READY; } /* pipelined_receive() - if there is task waiting in sys_mq_timedsend() * gets its message and put to the queue (we have one free place for sure). */ static inline void pipelined_receive(struct mqueue_inode_info *info) { struct ext_wait_queue *sender = wq_get_first_waiter(info, SEND); if (!sender) { /* for poll */ wake_up_interruptible(&info->wait_q); return; } msg_insert(sender->msg, info); list_del(&sender->list); sender->state = STATE_PENDING; wake_up_process(sender->task); smp_wmb(); sender->state = STATE_READY; } SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr, size_t, msg_len, unsigned int, msg_prio, const struct timespec __user *, u_abs_timeout) { struct file *filp; struct inode *inode; struct ext_wait_queue wait; struct ext_wait_queue *receiver; struct msg_msg *msg_ptr; struct mqueue_inode_info *info; ktime_t expires, *timeout = NULL; struct timespec ts; int ret; if (u_abs_timeout) { int res = prepare_timeout(u_abs_timeout, &expires, &ts); if (res) return res; timeout = &expires; } if (unlikely(msg_prio >= (unsigned long) MQ_PRIO_MAX)) return -EINVAL; audit_mq_sendrecv(mqdes, msg_len, msg_prio, timeout ? &ts : NULL); filp = fget(mqdes); if (unlikely(!filp)) { ret = -EBADF; goto out; } inode = filp->f_path.dentry->d_inode; if (unlikely(filp->f_op != &mqueue_file_operations)) { ret = -EBADF; goto out_fput; } info = MQUEUE_I(inode); audit_inode(NULL, filp->f_path.dentry); if (unlikely(!(filp->f_mode & FMODE_WRITE))) { ret = -EBADF; goto out_fput; } if (unlikely(msg_len > info->attr.mq_msgsize)) { ret = -EMSGSIZE; goto out_fput; } /* First try to allocate memory, before doing anything with * existing queues. */ msg_ptr = load_msg(u_msg_ptr, msg_len); if (IS_ERR(msg_ptr)) { ret = PTR_ERR(msg_ptr); goto out_fput; } msg_ptr->m_ts = msg_len; msg_ptr->m_type = msg_prio; spin_lock(&info->lock); if (info->attr.mq_curmsgs == info->attr.mq_maxmsg) { if (filp->f_flags & O_NONBLOCK) { spin_unlock(&info->lock); ret = -EAGAIN; } else { wait.task = current; wait.msg = (void *) msg_ptr; wait.state = STATE_NONE; ret = wq_sleep(info, SEND, timeout, &wait); } if (ret < 0) free_msg(msg_ptr); } else { receiver = wq_get_first_waiter(info, RECV); if (receiver) { pipelined_send(info, msg_ptr, receiver); } else { /* adds message to the queue */ msg_insert(msg_ptr, info); __do_notify(info); } inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; spin_unlock(&info->lock); ret = 0; } out_fput: fput(filp); out: return ret; } SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr, size_t, msg_len, unsigned int __user *, u_msg_prio, const struct timespec __user *, u_abs_timeout) { ssize_t ret; struct msg_msg *msg_ptr; struct file *filp; struct inode *inode; struct mqueue_inode_info *info; struct ext_wait_queue wait; ktime_t expires, *timeout = NULL; struct timespec ts; if (u_abs_timeout) { int res = prepare_timeout(u_abs_timeout, &expires, &ts); if (res) return res; timeout = &expires; } audit_mq_sendrecv(mqdes, msg_len, 0, timeout ? &ts : NULL); filp = fget(mqdes); if (unlikely(!filp)) { ret = -EBADF; goto out; } inode = filp->f_path.dentry->d_inode; if (unlikely(filp->f_op != &mqueue_file_operations)) { ret = -EBADF; goto out_fput; } info = MQUEUE_I(inode); audit_inode(NULL, filp->f_path.dentry); if (unlikely(!(filp->f_mode & FMODE_READ))) { ret = -EBADF; goto out_fput; } /* checks if buffer is big enough */ if (unlikely(msg_len < info->attr.mq_msgsize)) { ret = -EMSGSIZE; goto out_fput; } spin_lock(&info->lock); if (info->attr.mq_curmsgs == 0) { if (filp->f_flags & O_NONBLOCK) { spin_unlock(&info->lock); ret = -EAGAIN; } else { wait.task = current; wait.state = STATE_NONE; ret = wq_sleep(info, RECV, timeout, &wait); msg_ptr = wait.msg; } } else { msg_ptr = msg_get(info); inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; /* There is now free space in queue. */ pipelined_receive(info); spin_unlock(&info->lock); ret = 0; } if (ret == 0) { ret = msg_ptr->m_ts; if ((u_msg_prio && put_user(msg_ptr->m_type, u_msg_prio)) || store_msg(u_msg_ptr, msg_ptr, msg_ptr->m_ts)) { ret = -EFAULT; } free_msg(msg_ptr); } out_fput: fput(filp); out: return ret; } /* * Notes: the case when user wants us to deregister (with NULL as pointer) * and he isn't currently owner of notification, will be silently discarded. * It isn't explicitly defined in the POSIX. */ SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes, const struct sigevent __user *, u_notification) { int ret; struct file *filp; struct sock *sock; struct inode *inode; struct sigevent notification; struct mqueue_inode_info *info; struct sk_buff *nc; if (u_notification) { if (copy_from_user(&notification, u_notification, sizeof(struct sigevent))) return -EFAULT; } audit_mq_notify(mqdes, u_notification ? &notification : NULL); nc = NULL; sock = NULL; if (u_notification != NULL) { if (unlikely(notification.sigev_notify != SIGEV_NONE && notification.sigev_notify != SIGEV_SIGNAL && notification.sigev_notify != SIGEV_THREAD)) return -EINVAL; if (notification.sigev_notify == SIGEV_SIGNAL && !valid_signal(notification.sigev_signo)) { return -EINVAL; } if (notification.sigev_notify == SIGEV_THREAD) { long timeo; /* create the notify skb */ nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL); if (!nc) { ret = -ENOMEM; goto out; } if (copy_from_user(nc->data, notification.sigev_value.sival_ptr, NOTIFY_COOKIE_LEN)) { ret = -EFAULT; goto out; } /* TODO: add a header? */ skb_put(nc, NOTIFY_COOKIE_LEN); /* and attach it to the socket */ retry: filp = fget(notification.sigev_signo); if (!filp) { ret = -EBADF; goto out; } sock = netlink_getsockbyfilp(filp); fput(filp); if (IS_ERR(sock)) { ret = PTR_ERR(sock); sock = NULL; goto out; } timeo = MAX_SCHEDULE_TIMEOUT; ret = netlink_attachskb(sock, nc, &timeo, NULL); if (ret == 1) goto retry; if (ret) { sock = NULL; nc = NULL; goto out; } } } filp = fget(mqdes); if (!filp) { ret = -EBADF; goto out; } inode = filp->f_path.dentry->d_inode; if (unlikely(filp->f_op != &mqueue_file_operations)) { ret = -EBADF; goto out_fput; } info = MQUEUE_I(inode); ret = 0; spin_lock(&info->lock); if (u_notification == NULL) { if (info->notify_owner == task_tgid(current)) { remove_notification(info); inode->i_atime = inode->i_ctime = CURRENT_TIME; } } else if (info->notify_owner != NULL) { ret = -EBUSY; } else { switch (notification.sigev_notify) { case SIGEV_NONE: info->notify.sigev_notify = SIGEV_NONE; break; case SIGEV_THREAD: info->notify_sock = sock; info->notify_cookie = nc; sock = NULL; nc = NULL; info->notify.sigev_notify = SIGEV_THREAD; break; case SIGEV_SIGNAL: info->notify.sigev_signo = notification.sigev_signo; info->notify.sigev_value = notification.sigev_value; info->notify.sigev_notify = SIGEV_SIGNAL; break; } info->notify_owner = get_pid(task_tgid(current)); inode->i_atime = inode->i_ctime = CURRENT_TIME; } spin_unlock(&info->lock); out_fput: fput(filp); out: if (sock) { netlink_detachskb(sock, nc); } else if (nc) { dev_kfree_skb(nc); } return ret; } SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes, const struct mq_attr __user *, u_mqstat, struct mq_attr __user *, u_omqstat) { int ret; struct mq_attr mqstat, omqstat; struct file *filp; struct inode *inode; struct mqueue_inode_info *info; if (u_mqstat != NULL) { if (copy_from_user(&mqstat, u_mqstat, sizeof(struct mq_attr))) return -EFAULT; if (mqstat.mq_flags & (~O_NONBLOCK)) return -EINVAL; } filp = fget(mqdes); if (!filp) { ret = -EBADF; goto out; } inode = filp->f_path.dentry->d_inode; if (unlikely(filp->f_op != &mqueue_file_operations)) { ret = -EBADF; goto out_fput; } info = MQUEUE_I(inode); spin_lock(&info->lock); omqstat = info->attr; omqstat.mq_flags = filp->f_flags & O_NONBLOCK; if (u_mqstat) { audit_mq_getsetattr(mqdes, &mqstat); spin_lock(&filp->f_lock); if (mqstat.mq_flags & O_NONBLOCK) filp->f_flags |= O_NONBLOCK; else filp->f_flags &= ~O_NONBLOCK; spin_unlock(&filp->f_lock); inode->i_atime = inode->i_ctime = CURRENT_TIME; } spin_unlock(&info->lock); ret = 0; if (u_omqstat != NULL && copy_to_user(u_omqstat, &omqstat, sizeof(struct mq_attr))) ret = -EFAULT; out_fput: fput(filp); out: return ret; } static const struct inode_operations mqueue_dir_inode_operations = { .lookup = simple_lookup, .create = mqueue_create, .unlink = mqueue_unlink, }; static const struct file_operations mqueue_file_operations = { .flush = mqueue_flush_file, .poll = mqueue_poll_file, .read = mqueue_read_file, .llseek = default_llseek, }; static const struct super_operations mqueue_super_ops = { .alloc_inode = mqueue_alloc_inode, .destroy_inode = mqueue_destroy_inode, .evict_inode = mqueue_evict_inode, .statfs = simple_statfs, }; static struct file_system_type mqueue_fs_type = { .name = "mqueue", .mount = mqueue_mount, .kill_sb = kill_litter_super, }; int mq_init_ns(struct ipc_namespace *ns) { ns->mq_queues_count = 0; ns->mq_queues_max = DFLT_QUEUESMAX; ns->mq_msg_max = DFLT_MSGMAX; ns->mq_msgsize_max = DFLT_MSGSIZEMAX; ns->mq_mnt = kern_mount_data(&mqueue_fs_type, ns); if (IS_ERR(ns->mq_mnt)) { int err = PTR_ERR(ns->mq_mnt); ns->mq_mnt = NULL; return err; } return 0; } void mq_clear_sbinfo(struct ipc_namespace *ns) { ns->mq_mnt->mnt_sb->s_fs_info = NULL; } void mq_put_mnt(struct ipc_namespace *ns) { kern_unmount(ns->mq_mnt); } static int __init init_mqueue_fs(void) { int error; mqueue_inode_cachep = kmem_cache_create("mqueue_inode_cache", sizeof(struct mqueue_inode_info), 0, SLAB_HWCACHE_ALIGN, init_once); if (mqueue_inode_cachep == NULL) return -ENOMEM; /* ignore failures - they are not fatal */ mq_sysctl_table = mq_register_sysctl_table(); error = register_filesystem(&mqueue_fs_type); if (error) goto out_sysctl; spin_lock_init(&mq_lock); error = mq_init_ns(&init_ipc_ns); if (error) goto out_filesystem; return 0; out_filesystem: unregister_filesystem(&mqueue_fs_type); out_sysctl: if (mq_sysctl_table) unregister_sysctl_table(mq_sysctl_table); kmem_cache_destroy(mqueue_inode_cachep); return error; } __initcall(init_mqueue_fs);
gpl-2.0
PhSchmitt/mptcp
drivers/watchdog/sc520_wdt.c
321
11102
/* * AMD Elan SC520 processor Watchdog Timer driver * * Based on acquirewdt.c by Alan Cox, * and sbc60xxwdt.c by Jakob Oestergaard <jakob@unthought.net> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * The authors do NOT admit liability nor provide warranty for * any of this software. This material is provided "AS-IS" in * the hope that it may be useful for others. * * (c) Copyright 2001 Scott Jennings <linuxdrivers@oro.net> * 9/27 - 2001 [Initial release] * * Additional fixes Alan Cox * - Fixed formatting * - Removed debug printks * - Fixed SMP built kernel deadlock * - Switched to private locks not lock_kernel * - Used ioremap/writew/readw * - Added NOWAYOUT support * 4/12 - 2002 Changes by Rob Radez <rob@osinvestor.com> * - Change comments * - Eliminate fop_llseek * - Change CONFIG_WATCHDOG_NOWAYOUT semantics * - Add KERN_* tags to printks * - fix possible wdt_is_open race * - Report proper capabilities in watchdog_info * - Add WDIOC_{GETSTATUS, GETBOOTSTATUS, SETTIMEOUT, * GETTIMEOUT, SETOPTIONS} ioctls * 09/8 - 2003 Changes by Wim Van Sebroeck <wim@iguana.be> * - cleanup of trailing spaces * - added extra printk's for startup problems * - use module_param * - made timeout (the emulated heartbeat) a module_param * - made the keepalive ping an internal subroutine * 3/27 - 2004 Changes by Sean Young <sean@mess.org> * - set MMCR_BASE to 0xfffef000 * - CBAR does not need to be read * - removed debugging printks * * This WDT driver is different from most other Linux WDT * drivers in that the driver will ping the watchdog by itself, * because this particular WDT has a very short timeout (1.6 * seconds) and it would be insane to count on any userspace * daemon always getting scheduled within that time frame. * * This driver uses memory mapped IO, and spinlock. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/types.h> #include <linux/timer.h> #include <linux/miscdevice.h> #include <linux/watchdog.h> #include <linux/fs.h> #include <linux/ioport.h> #include <linux/notifier.h> #include <linux/reboot.h> #include <linux/init.h> #include <linux/jiffies.h> #include <linux/io.h> #include <linux/uaccess.h> /* * The AMD Elan SC520 timeout value is 492us times a power of 2 (0-7) * * 0: 492us 2: 1.01s 4: 4.03s 6: 16.22s * 1: 503ms 3: 2.01s 5: 8.05s 7: 32.21s * * We will program the SC520 watchdog for a timeout of 2.01s. * If we reset the watchdog every ~250ms we should be safe. */ #define WDT_INTERVAL (HZ/4+1) /* * We must not require too good response from the userspace daemon. * Here we require the userspace daemon to send us a heartbeat * char to /dev/watchdog every 30 seconds. */ #define WATCHDOG_TIMEOUT 30 /* 30 sec default timeout */ /* in seconds, will be multiplied by HZ to get seconds to wait for a ping */ static int timeout = WATCHDOG_TIMEOUT; module_param(timeout, int, 0); MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds. (1 <= timeout <= 3600, default=" __MODULE_STRING(WATCHDOG_TIMEOUT) ")"); static bool nowayout = WATCHDOG_NOWAYOUT; module_param(nowayout, bool, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); /* * AMD Elan SC520 - Watchdog Timer Registers */ #define MMCR_BASE 0xfffef000 /* The default base address */ #define OFFS_WDTMRCTL 0xCB0 /* Watchdog Timer Control Register */ /* WDT Control Register bit definitions */ #define WDT_EXP_SEL_01 0x0001 /* [01] Time-out = 496 us (with 33 Mhz clk). */ #define WDT_EXP_SEL_02 0x0002 /* [02] Time-out = 508 ms (with 33 Mhz clk). */ #define WDT_EXP_SEL_03 0x0004 /* [03] Time-out = 1.02 s (with 33 Mhz clk). */ #define WDT_EXP_SEL_04 0x0008 /* [04] Time-out = 2.03 s (with 33 Mhz clk). */ #define WDT_EXP_SEL_05 0x0010 /* [05] Time-out = 4.07 s (with 33 Mhz clk). */ #define WDT_EXP_SEL_06 0x0020 /* [06] Time-out = 8.13 s (with 33 Mhz clk). */ #define WDT_EXP_SEL_07 0x0040 /* [07] Time-out = 16.27s (with 33 Mhz clk). */ #define WDT_EXP_SEL_08 0x0080 /* [08] Time-out = 32.54s (with 33 Mhz clk). */ #define WDT_IRQ_FLG 0x1000 /* [12] Interrupt Request Flag */ #define WDT_WRST_ENB 0x4000 /* [14] Watchdog Timer Reset Enable */ #define WDT_ENB 0x8000 /* [15] Watchdog Timer Enable */ static __u16 __iomem *wdtmrctl; static void wdt_timer_ping(unsigned long); static DEFINE_TIMER(timer, wdt_timer_ping, 0, 0); static unsigned long next_heartbeat; static unsigned long wdt_is_open; static char wdt_expect_close; static DEFINE_SPINLOCK(wdt_spinlock); /* * Whack the dog */ static void wdt_timer_ping(unsigned long data) { /* If we got a heartbeat pulse within the WDT_US_INTERVAL * we agree to ping the WDT */ if (time_before(jiffies, next_heartbeat)) { /* Ping the WDT */ spin_lock(&wdt_spinlock); writew(0xAAAA, wdtmrctl); writew(0x5555, wdtmrctl); spin_unlock(&wdt_spinlock); /* Re-set the timer interval */ mod_timer(&timer, jiffies + WDT_INTERVAL); } else pr_warn("Heartbeat lost! Will not ping the watchdog\n"); } /* * Utility routines */ static void wdt_config(int writeval) { __u16 dummy; unsigned long flags; /* buy some time (ping) */ spin_lock_irqsave(&wdt_spinlock, flags); dummy = readw(wdtmrctl); /* ensure write synchronization */ writew(0xAAAA, wdtmrctl); writew(0x5555, wdtmrctl); /* unlock WDT = make WDT configuration register writable one time */ writew(0x3333, wdtmrctl); writew(0xCCCC, wdtmrctl); /* write WDT configuration register */ writew(writeval, wdtmrctl); spin_unlock_irqrestore(&wdt_spinlock, flags); } static int wdt_startup(void) { next_heartbeat = jiffies + (timeout * HZ); /* Start the timer */ mod_timer(&timer, jiffies + WDT_INTERVAL); /* Start the watchdog */ wdt_config(WDT_ENB | WDT_WRST_ENB | WDT_EXP_SEL_04); pr_info("Watchdog timer is now enabled\n"); return 0; } static int wdt_turnoff(void) { /* Stop the timer */ del_timer(&timer); /* Stop the watchdog */ wdt_config(0); pr_info("Watchdog timer is now disabled...\n"); return 0; } static int wdt_keepalive(void) { /* user land ping */ next_heartbeat = jiffies + (timeout * HZ); return 0; } static int wdt_set_heartbeat(int t) { if ((t < 1) || (t > 3600)) /* arbitrary upper limit */ return -EINVAL; timeout = t; return 0; } /* * /dev/watchdog handling */ static ssize_t fop_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { /* See if we got the magic character 'V' and reload the timer */ if (count) { if (!nowayout) { size_t ofs; /* note: just in case someone wrote the magic character * five months ago... */ wdt_expect_close = 0; /* now scan */ for (ofs = 0; ofs != count; ofs++) { char c; if (get_user(c, buf + ofs)) return -EFAULT; if (c == 'V') wdt_expect_close = 42; } } /* Well, anyhow someone wrote to us, we should return that favour */ wdt_keepalive(); } return count; } static int fop_open(struct inode *inode, struct file *file) { /* Just in case we're already talking to someone... */ if (test_and_set_bit(0, &wdt_is_open)) return -EBUSY; if (nowayout) __module_get(THIS_MODULE); /* Good, fire up the show */ wdt_startup(); return nonseekable_open(inode, file); } static int fop_close(struct inode *inode, struct file *file) { if (wdt_expect_close == 42) wdt_turnoff(); else { pr_crit("Unexpected close, not stopping watchdog!\n"); wdt_keepalive(); } clear_bit(0, &wdt_is_open); wdt_expect_close = 0; return 0; } static long fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { void __user *argp = (void __user *)arg; int __user *p = argp; static const struct watchdog_info ident = { .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE, .firmware_version = 1, .identity = "SC520", }; switch (cmd) { case WDIOC_GETSUPPORT: return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0; case WDIOC_GETSTATUS: case WDIOC_GETBOOTSTATUS: return put_user(0, p); case WDIOC_SETOPTIONS: { int new_options, retval = -EINVAL; if (get_user(new_options, p)) return -EFAULT; if (new_options & WDIOS_DISABLECARD) { wdt_turnoff(); retval = 0; } if (new_options & WDIOS_ENABLECARD) { wdt_startup(); retval = 0; } return retval; } case WDIOC_KEEPALIVE: wdt_keepalive(); return 0; case WDIOC_SETTIMEOUT: { int new_timeout; if (get_user(new_timeout, p)) return -EFAULT; if (wdt_set_heartbeat(new_timeout)) return -EINVAL; wdt_keepalive(); /* Fall through */ } case WDIOC_GETTIMEOUT: return put_user(timeout, p); default: return -ENOTTY; } } static const struct file_operations wdt_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .write = fop_write, .open = fop_open, .release = fop_close, .unlocked_ioctl = fop_ioctl, }; static struct miscdevice wdt_miscdev = { .minor = WATCHDOG_MINOR, .name = "watchdog", .fops = &wdt_fops, }; /* * Notifier for system down */ static int wdt_notify_sys(struct notifier_block *this, unsigned long code, void *unused) { if (code == SYS_DOWN || code == SYS_HALT) wdt_turnoff(); return NOTIFY_DONE; } /* * The WDT needs to learn about soft shutdowns in order to * turn the timebomb registers off. */ static struct notifier_block wdt_notifier = { .notifier_call = wdt_notify_sys, }; static void __exit sc520_wdt_unload(void) { if (!nowayout) wdt_turnoff(); /* Deregister */ misc_deregister(&wdt_miscdev); unregister_reboot_notifier(&wdt_notifier); iounmap(wdtmrctl); } static int __init sc520_wdt_init(void) { int rc = -EBUSY; /* Check that the timeout value is within it's range ; if not reset to the default */ if (wdt_set_heartbeat(timeout)) { wdt_set_heartbeat(WATCHDOG_TIMEOUT); pr_info("timeout value must be 1 <= timeout <= 3600, using %d\n", WATCHDOG_TIMEOUT); } wdtmrctl = ioremap(MMCR_BASE + OFFS_WDTMRCTL, 2); if (!wdtmrctl) { pr_err("Unable to remap memory\n"); rc = -ENOMEM; goto err_out_region2; } rc = register_reboot_notifier(&wdt_notifier); if (rc) { pr_err("cannot register reboot notifier (err=%d)\n", rc); goto err_out_ioremap; } rc = misc_register(&wdt_miscdev); if (rc) { pr_err("cannot register miscdev on minor=%d (err=%d)\n", WATCHDOG_MINOR, rc); goto err_out_notifier; } pr_info("WDT driver for SC520 initialised. timeout=%d sec (nowayout=%d)\n", timeout, nowayout); return 0; err_out_notifier: unregister_reboot_notifier(&wdt_notifier); err_out_ioremap: iounmap(wdtmrctl); err_out_region2: return rc; } module_init(sc520_wdt_init); module_exit(sc520_wdt_unload); MODULE_AUTHOR("Scott and Bill Jennings"); MODULE_DESCRIPTION( "Driver for watchdog timer in AMD \"Elan\" SC520 uProcessor"); MODULE_LICENSE("GPL");
gpl-2.0
sysexits/cs530
fs/ocfs2/cluster/netdebug.c
1601
13872
/* -*- mode: c; c-basic-offset: 8; -*- * vim: noexpandtab sw=8 ts=8 sts=0: * * netdebug.c * * debug functionality for o2net * * Copyright (C) 2005, 2008 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. * */ #ifdef CONFIG_DEBUG_FS #include <linux/module.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/idr.h> #include <linux/kref.h> #include <linux/seq_file.h> #include <linux/debugfs.h> #include <linux/uaccess.h> #include "tcp.h" #include "nodemanager.h" #define MLOG_MASK_PREFIX ML_TCP #include "masklog.h" #include "tcp_internal.h" #define O2NET_DEBUG_DIR "o2net" #define SC_DEBUG_NAME "sock_containers" #define NST_DEBUG_NAME "send_tracking" #define STATS_DEBUG_NAME "stats" #define NODES_DEBUG_NAME "connected_nodes" #define SHOW_SOCK_CONTAINERS 0 #define SHOW_SOCK_STATS 1 static struct dentry *o2net_dentry; static struct dentry *sc_dentry; static struct dentry *nst_dentry; static struct dentry *stats_dentry; static struct dentry *nodes_dentry; static DEFINE_SPINLOCK(o2net_debug_lock); static LIST_HEAD(sock_containers); static LIST_HEAD(send_tracking); void o2net_debug_add_nst(struct o2net_send_tracking *nst) { spin_lock(&o2net_debug_lock); list_add(&nst->st_net_debug_item, &send_tracking); spin_unlock(&o2net_debug_lock); } void o2net_debug_del_nst(struct o2net_send_tracking *nst) { spin_lock(&o2net_debug_lock); if (!list_empty(&nst->st_net_debug_item)) list_del_init(&nst->st_net_debug_item); spin_unlock(&o2net_debug_lock); } static struct o2net_send_tracking *next_nst(struct o2net_send_tracking *nst_start) { struct o2net_send_tracking *nst, *ret = NULL; assert_spin_locked(&o2net_debug_lock); list_for_each_entry(nst, &nst_start->st_net_debug_item, st_net_debug_item) { /* discover the head of the list */ if (&nst->st_net_debug_item == &send_tracking) break; /* use st_task to detect real nsts in the list */ if (nst->st_task != NULL) { ret = nst; break; } } return ret; } static void *nst_seq_start(struct seq_file *seq, loff_t *pos) { struct o2net_send_tracking *nst, *dummy_nst = seq->private; spin_lock(&o2net_debug_lock); nst = next_nst(dummy_nst); spin_unlock(&o2net_debug_lock); return nst; } static void *nst_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct o2net_send_tracking *nst, *dummy_nst = seq->private; spin_lock(&o2net_debug_lock); nst = next_nst(dummy_nst); list_del_init(&dummy_nst->st_net_debug_item); if (nst) list_add(&dummy_nst->st_net_debug_item, &nst->st_net_debug_item); spin_unlock(&o2net_debug_lock); return nst; /* unused, just needs to be null when done */ } static int nst_seq_show(struct seq_file *seq, void *v) { struct o2net_send_tracking *nst, *dummy_nst = seq->private; ktime_t now; s64 sock, send, status; spin_lock(&o2net_debug_lock); nst = next_nst(dummy_nst); if (!nst) goto out; now = ktime_get(); sock = ktime_to_us(ktime_sub(now, nst->st_sock_time)); send = ktime_to_us(ktime_sub(now, nst->st_send_time)); status = ktime_to_us(ktime_sub(now, nst->st_status_time)); /* get_task_comm isn't exported. oh well. */ seq_printf(seq, "%p:\n" " pid: %lu\n" " tgid: %lu\n" " process name: %s\n" " node: %u\n" " sc: %p\n" " message id: %d\n" " message type: %u\n" " message key: 0x%08x\n" " sock acquiry: %lld usecs ago\n" " send start: %lld usecs ago\n" " wait start: %lld usecs ago\n", nst, (unsigned long)task_pid_nr(nst->st_task), (unsigned long)nst->st_task->tgid, nst->st_task->comm, nst->st_node, nst->st_sc, nst->st_id, nst->st_msg_type, nst->st_msg_key, (long long)sock, (long long)send, (long long)status); out: spin_unlock(&o2net_debug_lock); return 0; } static void nst_seq_stop(struct seq_file *seq, void *v) { } static const struct seq_operations nst_seq_ops = { .start = nst_seq_start, .next = nst_seq_next, .stop = nst_seq_stop, .show = nst_seq_show, }; static int nst_fop_open(struct inode *inode, struct file *file) { struct o2net_send_tracking *dummy_nst; dummy_nst = __seq_open_private(file, &nst_seq_ops, sizeof(*dummy_nst)); if (!dummy_nst) return -ENOMEM; o2net_debug_add_nst(dummy_nst); return 0; } static int nst_fop_release(struct inode *inode, struct file *file) { struct seq_file *seq = file->private_data; struct o2net_send_tracking *dummy_nst = seq->private; o2net_debug_del_nst(dummy_nst); return seq_release_private(inode, file); } static const struct file_operations nst_seq_fops = { .open = nst_fop_open, .read = seq_read, .llseek = seq_lseek, .release = nst_fop_release, }; void o2net_debug_add_sc(struct o2net_sock_container *sc) { spin_lock(&o2net_debug_lock); list_add(&sc->sc_net_debug_item, &sock_containers); spin_unlock(&o2net_debug_lock); } void o2net_debug_del_sc(struct o2net_sock_container *sc) { spin_lock(&o2net_debug_lock); list_del_init(&sc->sc_net_debug_item); spin_unlock(&o2net_debug_lock); } struct o2net_sock_debug { int dbg_ctxt; struct o2net_sock_container *dbg_sock; }; static struct o2net_sock_container *next_sc(struct o2net_sock_container *sc_start) { struct o2net_sock_container *sc, *ret = NULL; assert_spin_locked(&o2net_debug_lock); list_for_each_entry(sc, &sc_start->sc_net_debug_item, sc_net_debug_item) { /* discover the head of the list miscast as a sc */ if (&sc->sc_net_debug_item == &sock_containers) break; /* use sc_page to detect real scs in the list */ if (sc->sc_page != NULL) { ret = sc; break; } } return ret; } static void *sc_seq_start(struct seq_file *seq, loff_t *pos) { struct o2net_sock_debug *sd = seq->private; struct o2net_sock_container *sc, *dummy_sc = sd->dbg_sock; spin_lock(&o2net_debug_lock); sc = next_sc(dummy_sc); spin_unlock(&o2net_debug_lock); return sc; } static void *sc_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct o2net_sock_debug *sd = seq->private; struct o2net_sock_container *sc, *dummy_sc = sd->dbg_sock; spin_lock(&o2net_debug_lock); sc = next_sc(dummy_sc); list_del_init(&dummy_sc->sc_net_debug_item); if (sc) list_add(&dummy_sc->sc_net_debug_item, &sc->sc_net_debug_item); spin_unlock(&o2net_debug_lock); return sc; /* unused, just needs to be null when done */ } #ifdef CONFIG_OCFS2_FS_STATS # define sc_send_count(_s) ((_s)->sc_send_count) # define sc_recv_count(_s) ((_s)->sc_recv_count) # define sc_tv_acquiry_total_ns(_s) (ktime_to_ns((_s)->sc_tv_acquiry_total)) # define sc_tv_send_total_ns(_s) (ktime_to_ns((_s)->sc_tv_send_total)) # define sc_tv_status_total_ns(_s) (ktime_to_ns((_s)->sc_tv_status_total)) # define sc_tv_process_total_ns(_s) (ktime_to_ns((_s)->sc_tv_process_total)) #else # define sc_send_count(_s) (0U) # define sc_recv_count(_s) (0U) # define sc_tv_acquiry_total_ns(_s) (0LL) # define sc_tv_send_total_ns(_s) (0LL) # define sc_tv_status_total_ns(_s) (0LL) # define sc_tv_process_total_ns(_s) (0LL) #endif /* So that debugfs.ocfs2 can determine which format is being used */ #define O2NET_STATS_STR_VERSION 1 static void sc_show_sock_stats(struct seq_file *seq, struct o2net_sock_container *sc) { if (!sc) return; seq_printf(seq, "%d,%u,%lu,%lld,%lld,%lld,%lu,%lld\n", O2NET_STATS_STR_VERSION, sc->sc_node->nd_num, (unsigned long)sc_send_count(sc), (long long)sc_tv_acquiry_total_ns(sc), (long long)sc_tv_send_total_ns(sc), (long long)sc_tv_status_total_ns(sc), (unsigned long)sc_recv_count(sc), (long long)sc_tv_process_total_ns(sc)); } static void sc_show_sock_container(struct seq_file *seq, struct o2net_sock_container *sc) { struct inet_sock *inet = NULL; __be32 saddr = 0, daddr = 0; __be16 sport = 0, dport = 0; if (!sc) return; if (sc->sc_sock) { inet = inet_sk(sc->sc_sock->sk); /* the stack's structs aren't sparse endian clean */ saddr = (__force __be32)inet->inet_saddr; daddr = (__force __be32)inet->inet_daddr; sport = (__force __be16)inet->inet_sport; dport = (__force __be16)inet->inet_dport; } /* XXX sigh, inet-> doesn't have sparse annotation so any * use of it here generates a warning with -Wbitwise */ seq_printf(seq, "%p:\n" " krefs: %d\n" " sock: %pI4:%u -> " "%pI4:%u\n" " remote node: %s\n" " page off: %zu\n" " handshake ok: %u\n" " timer: %lld usecs\n" " data ready: %lld usecs\n" " advance start: %lld usecs\n" " advance stop: %lld usecs\n" " func start: %lld usecs\n" " func stop: %lld usecs\n" " func key: 0x%08x\n" " func type: %u\n", sc, atomic_read(&sc->sc_kref.refcount), &saddr, inet ? ntohs(sport) : 0, &daddr, inet ? ntohs(dport) : 0, sc->sc_node->nd_name, sc->sc_page_off, sc->sc_handshake_ok, (long long)ktime_to_us(sc->sc_tv_timer), (long long)ktime_to_us(sc->sc_tv_data_ready), (long long)ktime_to_us(sc->sc_tv_advance_start), (long long)ktime_to_us(sc->sc_tv_advance_stop), (long long)ktime_to_us(sc->sc_tv_func_start), (long long)ktime_to_us(sc->sc_tv_func_stop), sc->sc_msg_key, sc->sc_msg_type); } static int sc_seq_show(struct seq_file *seq, void *v) { struct o2net_sock_debug *sd = seq->private; struct o2net_sock_container *sc, *dummy_sc = sd->dbg_sock; spin_lock(&o2net_debug_lock); sc = next_sc(dummy_sc); if (sc) { if (sd->dbg_ctxt == SHOW_SOCK_CONTAINERS) sc_show_sock_container(seq, sc); else sc_show_sock_stats(seq, sc); } spin_unlock(&o2net_debug_lock); return 0; } static void sc_seq_stop(struct seq_file *seq, void *v) { } static const struct seq_operations sc_seq_ops = { .start = sc_seq_start, .next = sc_seq_next, .stop = sc_seq_stop, .show = sc_seq_show, }; static int sc_common_open(struct file *file, int ctxt) { struct o2net_sock_debug *sd; struct o2net_sock_container *dummy_sc; dummy_sc = kzalloc(sizeof(*dummy_sc), GFP_KERNEL); if (!dummy_sc) return -ENOMEM; sd = __seq_open_private(file, &sc_seq_ops, sizeof(*sd)); if (!sd) { kfree(dummy_sc); return -ENOMEM; } sd->dbg_ctxt = ctxt; sd->dbg_sock = dummy_sc; o2net_debug_add_sc(dummy_sc); return 0; } static int sc_fop_release(struct inode *inode, struct file *file) { struct seq_file *seq = file->private_data; struct o2net_sock_debug *sd = seq->private; struct o2net_sock_container *dummy_sc = sd->dbg_sock; o2net_debug_del_sc(dummy_sc); return seq_release_private(inode, file); } static int stats_fop_open(struct inode *inode, struct file *file) { return sc_common_open(file, SHOW_SOCK_STATS); } static const struct file_operations stats_seq_fops = { .open = stats_fop_open, .read = seq_read, .llseek = seq_lseek, .release = sc_fop_release, }; static int sc_fop_open(struct inode *inode, struct file *file) { return sc_common_open(file, SHOW_SOCK_CONTAINERS); } static const struct file_operations sc_seq_fops = { .open = sc_fop_open, .read = seq_read, .llseek = seq_lseek, .release = sc_fop_release, }; static int o2net_fill_bitmap(char *buf, int len) { unsigned long map[BITS_TO_LONGS(O2NM_MAX_NODES)]; int i = -1, out = 0; o2net_fill_node_map(map, sizeof(map)); while ((i = find_next_bit(map, O2NM_MAX_NODES, i + 1)) < O2NM_MAX_NODES) out += snprintf(buf + out, PAGE_SIZE - out, "%d ", i); out += snprintf(buf + out, PAGE_SIZE - out, "\n"); return out; } static int nodes_fop_open(struct inode *inode, struct file *file) { char *buf; buf = kmalloc(PAGE_SIZE, GFP_KERNEL); if (!buf) return -ENOMEM; i_size_write(inode, o2net_fill_bitmap(buf, PAGE_SIZE)); file->private_data = buf; return 0; } static int o2net_debug_release(struct inode *inode, struct file *file) { kfree(file->private_data); return 0; } static ssize_t o2net_debug_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) { return simple_read_from_buffer(buf, nbytes, ppos, file->private_data, i_size_read(file->f_mapping->host)); } static const struct file_operations nodes_fops = { .open = nodes_fop_open, .release = o2net_debug_release, .read = o2net_debug_read, .llseek = generic_file_llseek, }; void o2net_debugfs_exit(void) { debugfs_remove(nodes_dentry); debugfs_remove(stats_dentry); debugfs_remove(sc_dentry); debugfs_remove(nst_dentry); debugfs_remove(o2net_dentry); } int o2net_debugfs_init(void) { umode_t mode = S_IFREG|S_IRUSR; o2net_dentry = debugfs_create_dir(O2NET_DEBUG_DIR, NULL); if (o2net_dentry) nst_dentry = debugfs_create_file(NST_DEBUG_NAME, mode, o2net_dentry, NULL, &nst_seq_fops); if (nst_dentry) sc_dentry = debugfs_create_file(SC_DEBUG_NAME, mode, o2net_dentry, NULL, &sc_seq_fops); if (sc_dentry) stats_dentry = debugfs_create_file(STATS_DEBUG_NAME, mode, o2net_dentry, NULL, &stats_seq_fops); if (stats_dentry) nodes_dentry = debugfs_create_file(NODES_DEBUG_NAME, mode, o2net_dentry, NULL, &nodes_fops); if (nodes_dentry) return 0; o2net_debugfs_exit(); mlog_errno(-ENOMEM); return -ENOMEM; } #endif /* CONFIG_DEBUG_FS */
gpl-2.0
CunningLogic/vivo-2.6.35
arch/powerpc/kernel/udbg_16550.c
1601
6562
/* * udbg for NS16550 compatable serial ports * * Copyright (C) 2001-2005 PPC 64 Team, IBM Corp * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/types.h> #include <asm/udbg.h> #include <asm/io.h> extern u8 real_readb(volatile u8 __iomem *addr); extern void real_writeb(u8 data, volatile u8 __iomem *addr); extern u8 real_205_readb(volatile u8 __iomem *addr); extern void real_205_writeb(u8 data, volatile u8 __iomem *addr); struct NS16550 { /* this struct must be packed */ unsigned char rbr; /* 0 */ unsigned char ier; /* 1 */ unsigned char fcr; /* 2 */ unsigned char lcr; /* 3 */ unsigned char mcr; /* 4 */ unsigned char lsr; /* 5 */ unsigned char msr; /* 6 */ unsigned char scr; /* 7 */ }; #define thr rbr #define iir fcr #define dll rbr #define dlm ier #define dlab lcr #define LSR_DR 0x01 /* Data ready */ #define LSR_OE 0x02 /* Overrun */ #define LSR_PE 0x04 /* Parity error */ #define LSR_FE 0x08 /* Framing error */ #define LSR_BI 0x10 /* Break */ #define LSR_THRE 0x20 /* Xmit holding register empty */ #define LSR_TEMT 0x40 /* Xmitter empty */ #define LSR_ERR 0x80 /* Error */ #define LCR_DLAB 0x80 static struct NS16550 __iomem *udbg_comport; static void udbg_550_flush(void) { if (udbg_comport) { while ((in_8(&udbg_comport->lsr) & LSR_THRE) == 0) /* wait for idle */; } } static void udbg_550_putc(char c) { if (udbg_comport) { if (c == '\n') udbg_550_putc('\r'); udbg_550_flush(); out_8(&udbg_comport->thr, c); } } static int udbg_550_getc_poll(void) { if (udbg_comport) { if ((in_8(&udbg_comport->lsr) & LSR_DR) != 0) return in_8(&udbg_comport->rbr); else return -1; } return -1; } static int udbg_550_getc(void) { if (udbg_comport) { while ((in_8(&udbg_comport->lsr) & LSR_DR) == 0) /* wait for char */; return in_8(&udbg_comport->rbr); } return -1; } void udbg_init_uart(void __iomem *comport, unsigned int speed, unsigned int clock) { unsigned int dll, base_bauds; if (clock == 0) clock = 1843200; if (speed == 0) speed = 9600; base_bauds = clock / 16; dll = base_bauds / speed; if (comport) { udbg_comport = (struct NS16550 __iomem *)comport; out_8(&udbg_comport->lcr, 0x00); out_8(&udbg_comport->ier, 0xff); out_8(&udbg_comport->ier, 0x00); out_8(&udbg_comport->lcr, LCR_DLAB); out_8(&udbg_comport->dll, dll & 0xff); out_8(&udbg_comport->dlm, dll >> 8); /* 8 data, 1 stop, no parity */ out_8(&udbg_comport->lcr, 0x03); /* RTS/DTR */ out_8(&udbg_comport->mcr, 0x03); /* Clear & enable FIFOs */ out_8(&udbg_comport->fcr ,0x07); udbg_putc = udbg_550_putc; udbg_flush = udbg_550_flush; udbg_getc = udbg_550_getc; udbg_getc_poll = udbg_550_getc_poll; } } unsigned int udbg_probe_uart_speed(void __iomem *comport, unsigned int clock) { unsigned int dll, dlm, divisor, prescaler, speed; u8 old_lcr; struct NS16550 __iomem *port = comport; old_lcr = in_8(&port->lcr); /* select divisor latch registers. */ out_8(&port->lcr, LCR_DLAB); /* now, read the divisor */ dll = in_8(&port->dll); dlm = in_8(&port->dlm); divisor = dlm << 8 | dll; /* check prescaling */ if (in_8(&port->mcr) & 0x80) prescaler = 4; else prescaler = 1; /* restore the LCR */ out_8(&port->lcr, old_lcr); /* calculate speed */ speed = (clock / prescaler) / (divisor * 16); /* sanity check */ if (speed > (clock / 16)) speed = 9600; return speed; } #ifdef CONFIG_PPC_MAPLE void udbg_maple_real_flush(void) { if (udbg_comport) { while ((real_readb(&udbg_comport->lsr) & LSR_THRE) == 0) /* wait for idle */; } } void udbg_maple_real_putc(char c) { if (udbg_comport) { if (c == '\n') udbg_maple_real_putc('\r'); udbg_maple_real_flush(); real_writeb(c, &udbg_comport->thr); eieio(); } } void __init udbg_init_maple_realmode(void) { udbg_comport = (struct NS16550 __iomem *)0xf40003f8; udbg_putc = udbg_maple_real_putc; udbg_flush = udbg_maple_real_flush; udbg_getc = NULL; udbg_getc_poll = NULL; } #endif /* CONFIG_PPC_MAPLE */ #ifdef CONFIG_PPC_PASEMI void udbg_pas_real_flush(void) { if (udbg_comport) { while ((real_205_readb(&udbg_comport->lsr) & LSR_THRE) == 0) /* wait for idle */; } } void udbg_pas_real_putc(char c) { if (udbg_comport) { if (c == '\n') udbg_pas_real_putc('\r'); udbg_pas_real_flush(); real_205_writeb(c, &udbg_comport->thr); eieio(); } } void udbg_init_pas_realmode(void) { udbg_comport = (struct NS16550 __iomem *)0xfcff03f8UL; udbg_putc = udbg_pas_real_putc; udbg_flush = udbg_pas_real_flush; udbg_getc = NULL; udbg_getc_poll = NULL; } #endif /* CONFIG_PPC_MAPLE */ #ifdef CONFIG_PPC_EARLY_DEBUG_44x #include <platforms/44x/44x.h> static void udbg_44x_as1_flush(void) { if (udbg_comport) { while ((as1_readb(&udbg_comport->lsr) & LSR_THRE) == 0) /* wait for idle */; } } static void udbg_44x_as1_putc(char c) { if (udbg_comport) { if (c == '\n') udbg_44x_as1_putc('\r'); udbg_44x_as1_flush(); as1_writeb(c, &udbg_comport->thr); eieio(); } } static int udbg_44x_as1_getc(void) { if (udbg_comport) { while ((as1_readb(&udbg_comport->lsr) & LSR_DR) == 0) ; /* wait for char */ return as1_readb(&udbg_comport->rbr); } return -1; } void __init udbg_init_44x_as1(void) { udbg_comport = (struct NS16550 __iomem *)PPC44x_EARLY_DEBUG_VIRTADDR; udbg_putc = udbg_44x_as1_putc; udbg_flush = udbg_44x_as1_flush; udbg_getc = udbg_44x_as1_getc; } #endif /* CONFIG_PPC_EARLY_DEBUG_44x */ #ifdef CONFIG_PPC_EARLY_DEBUG_40x static void udbg_40x_real_flush(void) { if (udbg_comport) { while ((real_readb(&udbg_comport->lsr) & LSR_THRE) == 0) /* wait for idle */; } } static void udbg_40x_real_putc(char c) { if (udbg_comport) { if (c == '\n') udbg_40x_real_putc('\r'); udbg_40x_real_flush(); real_writeb(c, &udbg_comport->thr); eieio(); } } static int udbg_40x_real_getc(void) { if (udbg_comport) { while ((real_readb(&udbg_comport->lsr) & LSR_DR) == 0) ; /* wait for char */ return real_readb(&udbg_comport->rbr); } return -1; } void __init udbg_init_40x_realmode(void) { udbg_comport = (struct NS16550 __iomem *) CONFIG_PPC_EARLY_DEBUG_40x_PHYSADDR; udbg_putc = udbg_40x_real_putc; udbg_flush = udbg_40x_real_flush; udbg_getc = udbg_40x_real_getc; udbg_getc_poll = NULL; } #endif /* CONFIG_PPC_EARLY_DEBUG_40x */
gpl-2.0
venkatkamesh/android_kernel_sony_msm8994
arch/arm/mach-shmobile/board-ag5evm.c
2113
17290
/* * arch/arm/mach-shmobile/board-ag5evm.c * * Copyright (C) 2010 Takashi Yoshii <yoshii.takashi.zj@renesas.com> * Copyright (C) 2009 Yoshihiro Shimoda <shimoda.yoshihiro@renesas.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/pinctrl/machine.h> #include <linux/pinctrl/pinconf-generic.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/io.h> #include <linux/dma-mapping.h> #include <linux/regulator/fixed.h> #include <linux/regulator/machine.h> #include <linux/serial_sci.h> #include <linux/smsc911x.h> #include <linux/gpio.h> #include <linux/videodev2.h> #include <linux/input.h> #include <linux/input/sh_keysc.h> #include <linux/mmc/host.h> #include <linux/mmc/sh_mmcif.h> #include <linux/mmc/sh_mobile_sdhi.h> #include <linux/mfd/tmio.h> #include <linux/sh_clk.h> #include <linux/irqchip/arm-gic.h> #include <video/sh_mobile_lcdc.h> #include <video/sh_mipi_dsi.h> #include <sound/sh_fsi.h> #include <mach/hardware.h> #include <mach/irqs.h> #include <mach/sh73a0.h> #include <mach/common.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/hardware/cache-l2x0.h> #include <asm/traps.h> /* Dummy supplies, where voltage doesn't matter */ static struct regulator_consumer_supply dummy_supplies[] = { REGULATOR_SUPPLY("vddvario", "smsc911x"), REGULATOR_SUPPLY("vdd33a", "smsc911x"), }; static struct resource smsc9220_resources[] = { [0] = { .start = 0x14000000, .end = 0x14000000 + SZ_64K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = SH73A0_PINT0_IRQ(2), /* PINTA2 */ .flags = IORESOURCE_IRQ, }, }; static struct smsc911x_platform_config smsc9220_platdata = { .flags = SMSC911X_USE_32BIT | SMSC911X_SAVE_MAC_ADDRESS, .phy_interface = PHY_INTERFACE_MODE_MII, .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW, .irq_type = SMSC911X_IRQ_TYPE_PUSH_PULL, }; static struct platform_device eth_device = { .name = "smsc911x", .id = 0, .dev = { .platform_data = &smsc9220_platdata, }, .resource = smsc9220_resources, .num_resources = ARRAY_SIZE(smsc9220_resources), }; static struct sh_keysc_info keysc_platdata = { .mode = SH_KEYSC_MODE_6, .scan_timing = 3, .delay = 100, .keycodes = { KEY_A, KEY_B, KEY_C, KEY_D, KEY_E, KEY_F, KEY_G, KEY_H, KEY_I, KEY_J, KEY_K, KEY_L, KEY_M, KEY_N, KEY_O, KEY_P, KEY_Q, KEY_R, KEY_S, KEY_T, KEY_U, KEY_V, KEY_W, KEY_X, KEY_Y, KEY_Z, KEY_HOME, KEY_SLEEP, KEY_SPACE, KEY_9, KEY_6, KEY_3, KEY_WAKEUP, KEY_RIGHT, \ KEY_COFFEE, KEY_0, KEY_8, KEY_5, KEY_2, KEY_DOWN, KEY_ENTER, KEY_UP, KEY_KPASTERISK, KEY_7, KEY_4, KEY_1, KEY_STOP, KEY_LEFT, \ KEY_COMPUTER, }, }; static struct resource keysc_resources[] = { [0] = { .name = "KEYSC", .start = 0xe61b0000, .end = 0xe61b0098 - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = gic_spi(71), .flags = IORESOURCE_IRQ, }, }; static struct platform_device keysc_device = { .name = "sh_keysc", .id = 0, .num_resources = ARRAY_SIZE(keysc_resources), .resource = keysc_resources, .dev = { .platform_data = &keysc_platdata, }, }; /* FSI A */ static struct resource fsi_resources[] = { [0] = { .name = "FSI", .start = 0xEC230000, .end = 0xEC230400 - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = gic_spi(146), .flags = IORESOURCE_IRQ, }, }; static struct platform_device fsi_device = { .name = "sh_fsi2", .id = -1, .num_resources = ARRAY_SIZE(fsi_resources), .resource = fsi_resources, }; /* Fixed 1.8V regulator to be used by MMCIF */ static struct regulator_consumer_supply fixed1v8_power_consumers[] = { REGULATOR_SUPPLY("vmmc", "sh_mmcif.0"), REGULATOR_SUPPLY("vqmmc", "sh_mmcif.0"), }; static struct resource sh_mmcif_resources[] = { [0] = { .name = "MMCIF", .start = 0xe6bd0000, .end = 0xe6bd00ff, .flags = IORESOURCE_MEM, }, [1] = { .start = gic_spi(141), .flags = IORESOURCE_IRQ, }, [2] = { .start = gic_spi(140), .flags = IORESOURCE_IRQ, }, }; static struct sh_mmcif_plat_data sh_mmcif_platdata = { .sup_pclk = 0, .ocr = MMC_VDD_165_195, .caps = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE, .slave_id_tx = SHDMA_SLAVE_MMCIF_TX, .slave_id_rx = SHDMA_SLAVE_MMCIF_RX, }; static struct platform_device mmc_device = { .name = "sh_mmcif", .id = 0, .dev = { .dma_mask = NULL, .coherent_dma_mask = 0xffffffff, .platform_data = &sh_mmcif_platdata, }, .num_resources = ARRAY_SIZE(sh_mmcif_resources), .resource = sh_mmcif_resources, }; /* IrDA */ static struct resource irda_resources[] = { [0] = { .start = 0xE6D00000, .end = 0xE6D01FD4 - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = gic_spi(95), .flags = IORESOURCE_IRQ, }, }; static struct platform_device irda_device = { .name = "sh_irda", .id = 0, .resource = irda_resources, .num_resources = ARRAY_SIZE(irda_resources), }; /* MIPI-DSI */ static struct resource mipidsi0_resources[] = { [0] = { .name = "DSI0", .start = 0xfeab0000, .end = 0xfeab3fff, .flags = IORESOURCE_MEM, }, [1] = { .name = "DSI0", .start = 0xfeab4000, .end = 0xfeab7fff, .flags = IORESOURCE_MEM, }, }; static int sh_mipi_set_dot_clock(struct platform_device *pdev, void __iomem *base, int enable) { struct clk *pck, *phy; int ret; pck = clk_get(&pdev->dev, "dsip_clk"); if (IS_ERR(pck)) { ret = PTR_ERR(pck); goto sh_mipi_set_dot_clock_pck_err; } phy = clk_get(&pdev->dev, "dsiphy_clk"); if (IS_ERR(phy)) { ret = PTR_ERR(phy); goto sh_mipi_set_dot_clock_phy_err; } if (enable) { clk_set_rate(pck, clk_round_rate(pck, 24000000)); clk_set_rate(phy, clk_round_rate(pck, 510000000)); clk_enable(pck); clk_enable(phy); } else { clk_disable(pck); clk_disable(phy); } ret = 0; clk_put(phy); sh_mipi_set_dot_clock_phy_err: clk_put(pck); sh_mipi_set_dot_clock_pck_err: return ret; } static struct sh_mipi_dsi_info mipidsi0_info = { .data_format = MIPI_RGB888, .channel = LCDC_CHAN_MAINLCD, .lane = 2, .vsynw_offset = 20, .clksrc = 1, .flags = SH_MIPI_DSI_HSABM | SH_MIPI_DSI_SYNC_PULSES_MODE | SH_MIPI_DSI_HSbyteCLK, .set_dot_clock = sh_mipi_set_dot_clock, }; static struct platform_device mipidsi0_device = { .name = "sh-mipi-dsi", .num_resources = ARRAY_SIZE(mipidsi0_resources), .resource = mipidsi0_resources, .id = 0, .dev = { .platform_data = &mipidsi0_info, }, }; static unsigned char lcd_backlight_seq[3][2] = { { 0x04, 0x07 }, { 0x23, 0x80 }, { 0x03, 0x01 }, }; static int lcd_backlight_set_brightness(int brightness) { struct i2c_adapter *adap; struct i2c_msg msg; unsigned int i; int ret; if (brightness == 0) { /* Reset the chip */ gpio_set_value(235, 0); mdelay(24); gpio_set_value(235, 1); return 0; } adap = i2c_get_adapter(1); if (adap == NULL) return -ENODEV; for (i = 0; i < ARRAY_SIZE(lcd_backlight_seq); i++) { msg.addr = 0x6d; msg.buf = &lcd_backlight_seq[i][0]; msg.len = 2; msg.flags = 0; ret = i2c_transfer(adap, &msg, 1); if (ret < 0) break; } i2c_put_adapter(adap); return ret < 0 ? ret : 0; } /* LCDC0 */ static const struct fb_videomode lcdc0_modes[] = { { .name = "R63302(QHD)", .xres = 544, .yres = 961, .left_margin = 72, .right_margin = 600, .hsync_len = 16, .upper_margin = 8, .lower_margin = 8, .vsync_len = 2, .sync = FB_SYNC_VERT_HIGH_ACT | FB_SYNC_HOR_HIGH_ACT, }, }; static struct sh_mobile_lcdc_info lcdc0_info = { .clock_source = LCDC_CLK_PERIPHERAL, .ch[0] = { .chan = LCDC_CHAN_MAINLCD, .interface_type = RGB24, .clock_divider = 1, .flags = LCDC_FLAGS_DWPOL, .fourcc = V4L2_PIX_FMT_RGB565, .lcd_modes = lcdc0_modes, .num_modes = ARRAY_SIZE(lcdc0_modes), .panel_cfg = { .width = 44, .height = 79, }, .bl_info = { .name = "sh_mobile_lcdc_bl", .max_brightness = 1, .set_brightness = lcd_backlight_set_brightness, }, .tx_dev = &mipidsi0_device, } }; static struct resource lcdc0_resources[] = { [0] = { .name = "LCDC0", .start = 0xfe940000, /* P4-only space */ .end = 0xfe943fff, .flags = IORESOURCE_MEM, }, [1] = { .start = intcs_evt2irq(0x580), .flags = IORESOURCE_IRQ, }, }; static struct platform_device lcdc0_device = { .name = "sh_mobile_lcdc_fb", .num_resources = ARRAY_SIZE(lcdc0_resources), .resource = lcdc0_resources, .id = 0, .dev = { .platform_data = &lcdc0_info, .coherent_dma_mask = ~0, }, }; /* Fixed 2.8V regulators to be used by SDHI0 */ static struct regulator_consumer_supply fixed2v8_power_consumers[] = { REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.0"), REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.0"), }; /* SDHI0 */ static struct sh_mobile_sdhi_info sdhi0_info = { .dma_slave_tx = SHDMA_SLAVE_SDHI0_TX, .dma_slave_rx = SHDMA_SLAVE_SDHI0_RX, .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_USE_GPIO_CD, .tmio_caps = MMC_CAP_SD_HIGHSPEED, .tmio_ocr_mask = MMC_VDD_27_28 | MMC_VDD_28_29, .cd_gpio = 251, }; static struct resource sdhi0_resources[] = { [0] = { .name = "SDHI0", .start = 0xee100000, .end = 0xee1000ff, .flags = IORESOURCE_MEM, }, [1] = { .name = SH_MOBILE_SDHI_IRQ_CARD_DETECT, .start = gic_spi(83), .flags = IORESOURCE_IRQ, }, [2] = { .name = SH_MOBILE_SDHI_IRQ_SDCARD, .start = gic_spi(84), .flags = IORESOURCE_IRQ, }, [3] = { .name = SH_MOBILE_SDHI_IRQ_SDIO, .start = gic_spi(85), .flags = IORESOURCE_IRQ, }, }; static struct platform_device sdhi0_device = { .name = "sh_mobile_sdhi", .id = 0, .num_resources = ARRAY_SIZE(sdhi0_resources), .resource = sdhi0_resources, .dev = { .platform_data = &sdhi0_info, }, }; /* Fixed 3.3V regulator to be used by SDHI1 */ static struct regulator_consumer_supply cn4_power_consumers[] = { REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.1"), REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.1"), }; static struct regulator_init_data cn4_power_init_data = { .constraints = { .valid_ops_mask = REGULATOR_CHANGE_STATUS, }, .num_consumer_supplies = ARRAY_SIZE(cn4_power_consumers), .consumer_supplies = cn4_power_consumers, }; static struct fixed_voltage_config cn4_power_info = { .supply_name = "CN4 SD/MMC Vdd", .microvolts = 3300000, .gpio = 114, .enable_high = 1, .init_data = &cn4_power_init_data, }; static struct platform_device cn4_power = { .name = "reg-fixed-voltage", .id = 2, .dev = { .platform_data = &cn4_power_info, }, }; static void ag5evm_sdhi1_set_pwr(struct platform_device *pdev, int state) { static int power_gpio = -EINVAL; if (power_gpio < 0) { int ret = gpio_request_one(114, GPIOF_OUT_INIT_LOW, "sdhi1_power"); if (!ret) power_gpio = 114; } /* * If requesting the GPIO above failed, it means, that the regulator got * probed and grabbed the GPIO, but we don't know, whether the sdhi * driver already uses the regulator. If it doesn't, we have to toggle * the GPIO ourselves, even though it is now owned by the fixed * regulator driver. We have to live with the race in case the driver * gets unloaded and the GPIO freed between these two steps. */ gpio_set_value(114, state); } static struct sh_mobile_sdhi_info sh_sdhi1_info = { .tmio_flags = TMIO_MMC_WRPROTECT_DISABLE | TMIO_MMC_HAS_IDLE_WAIT, .tmio_caps = MMC_CAP_NONREMOVABLE | MMC_CAP_SDIO_IRQ, .tmio_ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34, .set_pwr = ag5evm_sdhi1_set_pwr, }; static struct resource sdhi1_resources[] = { [0] = { .name = "SDHI1", .start = 0xee120000, .end = 0xee1200ff, .flags = IORESOURCE_MEM, }, [1] = { .name = SH_MOBILE_SDHI_IRQ_CARD_DETECT, .start = gic_spi(87), .flags = IORESOURCE_IRQ, }, [2] = { .name = SH_MOBILE_SDHI_IRQ_SDCARD, .start = gic_spi(88), .flags = IORESOURCE_IRQ, }, [3] = { .name = SH_MOBILE_SDHI_IRQ_SDIO, .start = gic_spi(89), .flags = IORESOURCE_IRQ, }, }; static struct platform_device sdhi1_device = { .name = "sh_mobile_sdhi", .id = 1, .dev = { .platform_data = &sh_sdhi1_info, }, .num_resources = ARRAY_SIZE(sdhi1_resources), .resource = sdhi1_resources, }; static struct platform_device *ag5evm_devices[] __initdata = { &cn4_power, &eth_device, &keysc_device, &fsi_device, &mmc_device, &irda_device, &mipidsi0_device, &lcdc0_device, &sdhi0_device, &sdhi1_device, }; static unsigned long pin_pullup_conf[] = { PIN_CONF_PACKED(PIN_CONFIG_BIAS_PULL_UP, 0), }; static const struct pinctrl_map ag5evm_pinctrl_map[] = { /* FSIA */ PIN_MAP_MUX_GROUP_DEFAULT("sh_fsi2.0", "pfc-sh73a0", "fsia_mclk_in", "fsia"), PIN_MAP_MUX_GROUP_DEFAULT("sh_fsi2.0", "pfc-sh73a0", "fsia_sclk_in", "fsia"), PIN_MAP_MUX_GROUP_DEFAULT("sh_fsi2.0", "pfc-sh73a0", "fsia_data_in", "fsia"), PIN_MAP_MUX_GROUP_DEFAULT("sh_fsi2.0", "pfc-sh73a0", "fsia_data_out", "fsia"), /* I2C2 & I2C3 */ PIN_MAP_MUX_GROUP_DEFAULT("i2c-sh_mobile.2", "pfc-sh73a0", "i2c2_0", "i2c2"), PIN_MAP_MUX_GROUP_DEFAULT("i2c-sh_mobile.3", "pfc-sh73a0", "i2c3_0", "i2c3"), /* IrDA */ PIN_MAP_MUX_GROUP_DEFAULT("sh_irda.0", "pfc-sh73a0", "irda_0", "irda"), /* KEYSC */ PIN_MAP_MUX_GROUP_DEFAULT("sh_keysc.0", "pfc-sh73a0", "keysc_in8", "keysc"), PIN_MAP_MUX_GROUP_DEFAULT("sh_keysc.0", "pfc-sh73a0", "keysc_out04", "keysc"), PIN_MAP_MUX_GROUP_DEFAULT("sh_keysc.0", "pfc-sh73a0", "keysc_out5", "keysc"), PIN_MAP_MUX_GROUP_DEFAULT("sh_keysc.0", "pfc-sh73a0", "keysc_out6_0", "keysc"), PIN_MAP_MUX_GROUP_DEFAULT("sh_keysc.0", "pfc-sh73a0", "keysc_out7_0", "keysc"), PIN_MAP_MUX_GROUP_DEFAULT("sh_keysc.0", "pfc-sh73a0", "keysc_out8_0", "keysc"), PIN_MAP_MUX_GROUP_DEFAULT("sh_keysc.0", "pfc-sh73a0", "keysc_out9_2", "keysc"), PIN_MAP_CONFIGS_GROUP_DEFAULT("sh_keysc.0", "pfc-sh73a0", "keysc_in8", pin_pullup_conf), /* MMCIF */ PIN_MAP_MUX_GROUP_DEFAULT("sh_mmcif.0", "pfc-sh73a0", "mmc0_data8_0", "mmc0"), PIN_MAP_MUX_GROUP_DEFAULT("sh_mmcif.0", "pfc-sh73a0", "mmc0_ctrl_0", "mmc0"), PIN_MAP_CONFIGS_PIN_DEFAULT("sh_mmcif.0", "pfc-sh73a0", "PORT279", pin_pullup_conf), PIN_MAP_CONFIGS_GROUP_DEFAULT("sh_mmcif.0", "pfc-sh73a0", "mmc0_data8_0", pin_pullup_conf), /* SCIFA2 */ PIN_MAP_MUX_GROUP_DEFAULT("sh-sci.2", "pfc-sh73a0", "scifa2_data_0", "scifa2"), PIN_MAP_MUX_GROUP_DEFAULT("sh-sci.2", "pfc-sh73a0", "scifa2_ctrl_0", "scifa2"), /* SDHI0 (CN15 [SD I/F]) */ PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-sh73a0", "sdhi0_data4", "sdhi0"), PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-sh73a0", "sdhi0_ctrl", "sdhi0"), PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-sh73a0", "sdhi0_wp", "sdhi0"), /* SDHI1 (CN4 [WLAN I/F]) */ PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.1", "pfc-sh73a0", "sdhi1_data4", "sdhi1"), PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.1", "pfc-sh73a0", "sdhi1_ctrl", "sdhi1"), PIN_MAP_CONFIGS_GROUP_DEFAULT("sh_mobile_sdhi.1", "pfc-sh73a0", "sdhi1_data4", pin_pullup_conf), PIN_MAP_CONFIGS_PIN_DEFAULT("sh_mobile_sdhi.1", "pfc-sh73a0", "PORT263", pin_pullup_conf), }; static void __init ag5evm_init(void) { regulator_register_always_on(0, "fixed-1.8V", fixed1v8_power_consumers, ARRAY_SIZE(fixed1v8_power_consumers), 1800000); regulator_register_always_on(1, "fixed-2.8V", fixed2v8_power_consumers, ARRAY_SIZE(fixed2v8_power_consumers), 3300000); regulator_register_fixed(3, dummy_supplies, ARRAY_SIZE(dummy_supplies)); pinctrl_register_mappings(ag5evm_pinctrl_map, ARRAY_SIZE(ag5evm_pinctrl_map)); sh73a0_pinmux_init(); /* enable MMCIF */ gpio_request_one(208, GPIOF_OUT_INIT_HIGH, NULL); /* Reset */ /* enable SMSC911X */ gpio_request_one(144, GPIOF_IN, NULL); /* PINTA2 */ gpio_request_one(145, GPIOF_OUT_INIT_HIGH, NULL); /* RESET */ /* LCD panel */ gpio_request_one(217, GPIOF_OUT_INIT_LOW, NULL); /* RESET */ mdelay(1); gpio_set_value(217, 1); mdelay(100); /* LCD backlight controller */ gpio_request_one(235, GPIOF_OUT_INIT_LOW, NULL); /* RESET */ lcd_backlight_set_brightness(0); #ifdef CONFIG_CACHE_L2X0 /* Shared attribute override enable, 64K*8way */ l2x0_init(IOMEM(0xf0100000), 0x00460000, 0xc2000fff); #endif sh73a0_add_standard_devices(); platform_add_devices(ag5evm_devices, ARRAY_SIZE(ag5evm_devices)); } MACHINE_START(AG5EVM, "ag5evm") .smp = smp_ops(sh73a0_smp_ops), .map_io = sh73a0_map_io, .init_early = sh73a0_add_early_devices, .nr_irqs = NR_IRQS_LEGACY, .init_irq = sh73a0_init_irq, .init_machine = ag5evm_init, .init_late = shmobile_init_late, .init_time = sh73a0_earlytimer_init, MACHINE_END
gpl-2.0
AndroidDeveloperAlliance/ZenKernel_Grouper
fs/nfs/mount_clnt.c
2369
11934
/* * In-kernel MOUNT protocol client * * Copyright (C) 1997, Olaf Kirch <okir@monad.swb.de> */ #include <linux/types.h> #include <linux/socket.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/uio.h> #include <linux/net.h> #include <linux/in.h> #include <linux/sunrpc/clnt.h> #include <linux/sunrpc/sched.h> #include <linux/nfs_fs.h> #include "internal.h" #ifdef RPC_DEBUG # define NFSDBG_FACILITY NFSDBG_MOUNT #endif /* * Defined by RFC 1094, section A.3; and RFC 1813, section 5.1.4 */ #define MNTPATHLEN (1024) /* * XDR data type sizes */ #define encode_dirpath_sz (1 + XDR_QUADLEN(MNTPATHLEN)) #define MNT_status_sz (1) #define MNT_fhs_status_sz (1) #define MNT_fhandle_sz XDR_QUADLEN(NFS2_FHSIZE) #define MNT_fhandle3_sz (1 + XDR_QUADLEN(NFS3_FHSIZE)) #define MNT_authflav3_sz (1 + NFS_MAX_SECFLAVORS) /* * XDR argument and result sizes */ #define MNT_enc_dirpath_sz encode_dirpath_sz #define MNT_dec_mountres_sz (MNT_status_sz + MNT_fhandle_sz) #define MNT_dec_mountres3_sz (MNT_status_sz + MNT_fhandle_sz + \ MNT_authflav3_sz) /* * Defined by RFC 1094, section A.5 */ enum { MOUNTPROC_NULL = 0, MOUNTPROC_MNT = 1, MOUNTPROC_DUMP = 2, MOUNTPROC_UMNT = 3, MOUNTPROC_UMNTALL = 4, MOUNTPROC_EXPORT = 5, }; /* * Defined by RFC 1813, section 5.2 */ enum { MOUNTPROC3_NULL = 0, MOUNTPROC3_MNT = 1, MOUNTPROC3_DUMP = 2, MOUNTPROC3_UMNT = 3, MOUNTPROC3_UMNTALL = 4, MOUNTPROC3_EXPORT = 5, }; static struct rpc_program mnt_program; /* * Defined by OpenGroup XNFS Version 3W, chapter 8 */ enum mountstat { MNT_OK = 0, MNT_EPERM = 1, MNT_ENOENT = 2, MNT_EACCES = 13, MNT_EINVAL = 22, }; static struct { u32 status; int errno; } mnt_errtbl[] = { { .status = MNT_OK, .errno = 0, }, { .status = MNT_EPERM, .errno = -EPERM, }, { .status = MNT_ENOENT, .errno = -ENOENT, }, { .status = MNT_EACCES, .errno = -EACCES, }, { .status = MNT_EINVAL, .errno = -EINVAL, }, }; /* * Defined by RFC 1813, section 5.1.5 */ enum mountstat3 { MNT3_OK = 0, /* no error */ MNT3ERR_PERM = 1, /* Not owner */ MNT3ERR_NOENT = 2, /* No such file or directory */ MNT3ERR_IO = 5, /* I/O error */ MNT3ERR_ACCES = 13, /* Permission denied */ MNT3ERR_NOTDIR = 20, /* Not a directory */ MNT3ERR_INVAL = 22, /* Invalid argument */ MNT3ERR_NAMETOOLONG = 63, /* Filename too long */ MNT3ERR_NOTSUPP = 10004, /* Operation not supported */ MNT3ERR_SERVERFAULT = 10006, /* A failure on the server */ }; static struct { u32 status; int errno; } mnt3_errtbl[] = { { .status = MNT3_OK, .errno = 0, }, { .status = MNT3ERR_PERM, .errno = -EPERM, }, { .status = MNT3ERR_NOENT, .errno = -ENOENT, }, { .status = MNT3ERR_IO, .errno = -EIO, }, { .status = MNT3ERR_ACCES, .errno = -EACCES, }, { .status = MNT3ERR_NOTDIR, .errno = -ENOTDIR, }, { .status = MNT3ERR_INVAL, .errno = -EINVAL, }, { .status = MNT3ERR_NAMETOOLONG, .errno = -ENAMETOOLONG, }, { .status = MNT3ERR_NOTSUPP, .errno = -ENOTSUPP, }, { .status = MNT3ERR_SERVERFAULT, .errno = -EREMOTEIO, }, }; struct mountres { int errno; struct nfs_fh *fh; unsigned int *auth_count; rpc_authflavor_t *auth_flavors; }; struct mnt_fhstatus { u32 status; struct nfs_fh *fh; }; /** * nfs_mount - Obtain an NFS file handle for the given host and path * @info: pointer to mount request arguments * * Uses default timeout parameters specified by underlying transport. */ int nfs_mount(struct nfs_mount_request *info) { struct mountres result = { .fh = info->fh, .auth_count = info->auth_flav_len, .auth_flavors = info->auth_flavs, }; struct rpc_message msg = { .rpc_argp = info->dirpath, .rpc_resp = &result, }; struct rpc_create_args args = { .net = &init_net, .protocol = info->protocol, .address = info->sap, .addrsize = info->salen, .servername = info->hostname, .program = &mnt_program, .version = info->version, .authflavor = RPC_AUTH_UNIX, }; struct rpc_clnt *mnt_clnt; int status; dprintk("NFS: sending MNT request for %s:%s\n", (info->hostname ? info->hostname : "server"), info->dirpath); if (info->noresvport) args.flags |= RPC_CLNT_CREATE_NONPRIVPORT; mnt_clnt = rpc_create(&args); if (IS_ERR(mnt_clnt)) goto out_clnt_err; if (info->version == NFS_MNT3_VERSION) msg.rpc_proc = &mnt_clnt->cl_procinfo[MOUNTPROC3_MNT]; else msg.rpc_proc = &mnt_clnt->cl_procinfo[MOUNTPROC_MNT]; status = rpc_call_sync(mnt_clnt, &msg, 0); rpc_shutdown_client(mnt_clnt); if (status < 0) goto out_call_err; if (result.errno != 0) goto out_mnt_err; dprintk("NFS: MNT request succeeded\n"); status = 0; out: return status; out_clnt_err: status = PTR_ERR(mnt_clnt); dprintk("NFS: failed to create MNT RPC client, status=%d\n", status); goto out; out_call_err: dprintk("NFS: MNT request failed, status=%d\n", status); goto out; out_mnt_err: dprintk("NFS: MNT server returned result %d\n", result.errno); status = result.errno; goto out; } /** * nfs_umount - Notify a server that we have unmounted this export * @info: pointer to umount request arguments * * MOUNTPROC_UMNT is advisory, so we set a short timeout, and always * use UDP. */ void nfs_umount(const struct nfs_mount_request *info) { static const struct rpc_timeout nfs_umnt_timeout = { .to_initval = 1 * HZ, .to_maxval = 3 * HZ, .to_retries = 2, }; struct rpc_create_args args = { .net = &init_net, .protocol = IPPROTO_UDP, .address = info->sap, .addrsize = info->salen, .timeout = &nfs_umnt_timeout, .servername = info->hostname, .program = &mnt_program, .version = info->version, .authflavor = RPC_AUTH_UNIX, .flags = RPC_CLNT_CREATE_NOPING, }; struct rpc_message msg = { .rpc_argp = info->dirpath, }; struct rpc_clnt *clnt; int status; if (info->noresvport) args.flags |= RPC_CLNT_CREATE_NONPRIVPORT; clnt = rpc_create(&args); if (IS_ERR(clnt)) goto out_clnt_err; dprintk("NFS: sending UMNT request for %s:%s\n", (info->hostname ? info->hostname : "server"), info->dirpath); if (info->version == NFS_MNT3_VERSION) msg.rpc_proc = &clnt->cl_procinfo[MOUNTPROC3_UMNT]; else msg.rpc_proc = &clnt->cl_procinfo[MOUNTPROC_UMNT]; status = rpc_call_sync(clnt, &msg, 0); rpc_shutdown_client(clnt); if (unlikely(status < 0)) goto out_call_err; return; out_clnt_err: dprintk("NFS: failed to create UMNT RPC client, status=%ld\n", PTR_ERR(clnt)); return; out_call_err: dprintk("NFS: UMNT request failed, status=%d\n", status); } /* * XDR encode/decode functions for MOUNT */ static void encode_mntdirpath(struct xdr_stream *xdr, const char *pathname) { const u32 pathname_len = strlen(pathname); __be32 *p; BUG_ON(pathname_len > MNTPATHLEN); p = xdr_reserve_space(xdr, 4 + pathname_len); xdr_encode_opaque(p, pathname, pathname_len); } static void mnt_xdr_enc_dirpath(struct rpc_rqst *req, struct xdr_stream *xdr, const char *dirpath) { encode_mntdirpath(xdr, dirpath); } /* * RFC 1094: "A non-zero status indicates some sort of error. In this * case, the status is a UNIX error number." This can be problematic * if the server and client use different errno values for the same * error. * * However, the OpenGroup XNFS spec provides a simple mapping that is * independent of local errno values on the server and the client. */ static int decode_status(struct xdr_stream *xdr, struct mountres *res) { unsigned int i; u32 status; __be32 *p; p = xdr_inline_decode(xdr, 4); if (unlikely(p == NULL)) return -EIO; status = be32_to_cpup(p); for (i = 0; i < ARRAY_SIZE(mnt_errtbl); i++) { if (mnt_errtbl[i].status == status) { res->errno = mnt_errtbl[i].errno; return 0; } } dprintk("NFS: unrecognized MNT status code: %u\n", status); res->errno = -EACCES; return 0; } static int decode_fhandle(struct xdr_stream *xdr, struct mountres *res) { struct nfs_fh *fh = res->fh; __be32 *p; p = xdr_inline_decode(xdr, NFS2_FHSIZE); if (unlikely(p == NULL)) return -EIO; fh->size = NFS2_FHSIZE; memcpy(fh->data, p, NFS2_FHSIZE); return 0; } static int mnt_xdr_dec_mountres(struct rpc_rqst *req, struct xdr_stream *xdr, struct mountres *res) { int status; status = decode_status(xdr, res); if (unlikely(status != 0 || res->errno != 0)) return status; return decode_fhandle(xdr, res); } static int decode_fhs_status(struct xdr_stream *xdr, struct mountres *res) { unsigned int i; u32 status; __be32 *p; p = xdr_inline_decode(xdr, 4); if (unlikely(p == NULL)) return -EIO; status = be32_to_cpup(p); for (i = 0; i < ARRAY_SIZE(mnt3_errtbl); i++) { if (mnt3_errtbl[i].status == status) { res->errno = mnt3_errtbl[i].errno; return 0; } } dprintk("NFS: unrecognized MNT3 status code: %u\n", status); res->errno = -EACCES; return 0; } static int decode_fhandle3(struct xdr_stream *xdr, struct mountres *res) { struct nfs_fh *fh = res->fh; u32 size; __be32 *p; p = xdr_inline_decode(xdr, 4); if (unlikely(p == NULL)) return -EIO; size = be32_to_cpup(p); if (size > NFS3_FHSIZE || size == 0) return -EIO; p = xdr_inline_decode(xdr, size); if (unlikely(p == NULL)) return -EIO; fh->size = size; memcpy(fh->data, p, size); return 0; } static int decode_auth_flavors(struct xdr_stream *xdr, struct mountres *res) { rpc_authflavor_t *flavors = res->auth_flavors; unsigned int *count = res->auth_count; u32 entries, i; __be32 *p; if (*count == 0) return 0; p = xdr_inline_decode(xdr, 4); if (unlikely(p == NULL)) return -EIO; entries = be32_to_cpup(p); dprintk("NFS: received %u auth flavors\n", entries); if (entries > NFS_MAX_SECFLAVORS) entries = NFS_MAX_SECFLAVORS; p = xdr_inline_decode(xdr, 4 * entries); if (unlikely(p == NULL)) return -EIO; if (entries > *count) entries = *count; for (i = 0; i < entries; i++) { flavors[i] = be32_to_cpup(p++); dprintk("NFS: auth flavor[%u]: %d\n", i, flavors[i]); } *count = i; return 0; } static int mnt_xdr_dec_mountres3(struct rpc_rqst *req, struct xdr_stream *xdr, struct mountres *res) { int status; status = decode_fhs_status(xdr, res); if (unlikely(status != 0 || res->errno != 0)) return status; status = decode_fhandle3(xdr, res); if (unlikely(status != 0)) { res->errno = -EBADHANDLE; return 0; } return decode_auth_flavors(xdr, res); } static struct rpc_procinfo mnt_procedures[] = { [MOUNTPROC_MNT] = { .p_proc = MOUNTPROC_MNT, .p_encode = (kxdreproc_t)mnt_xdr_enc_dirpath, .p_decode = (kxdrdproc_t)mnt_xdr_dec_mountres, .p_arglen = MNT_enc_dirpath_sz, .p_replen = MNT_dec_mountres_sz, .p_statidx = MOUNTPROC_MNT, .p_name = "MOUNT", }, [MOUNTPROC_UMNT] = { .p_proc = MOUNTPROC_UMNT, .p_encode = (kxdreproc_t)mnt_xdr_enc_dirpath, .p_arglen = MNT_enc_dirpath_sz, .p_statidx = MOUNTPROC_UMNT, .p_name = "UMOUNT", }, }; static struct rpc_procinfo mnt3_procedures[] = { [MOUNTPROC3_MNT] = { .p_proc = MOUNTPROC3_MNT, .p_encode = (kxdreproc_t)mnt_xdr_enc_dirpath, .p_decode = (kxdrdproc_t)mnt_xdr_dec_mountres3, .p_arglen = MNT_enc_dirpath_sz, .p_replen = MNT_dec_mountres3_sz, .p_statidx = MOUNTPROC3_MNT, .p_name = "MOUNT", }, [MOUNTPROC3_UMNT] = { .p_proc = MOUNTPROC3_UMNT, .p_encode = (kxdreproc_t)mnt_xdr_enc_dirpath, .p_arglen = MNT_enc_dirpath_sz, .p_statidx = MOUNTPROC3_UMNT, .p_name = "UMOUNT", }, }; static struct rpc_version mnt_version1 = { .number = 1, .nrprocs = ARRAY_SIZE(mnt_procedures), .procs = mnt_procedures, }; static struct rpc_version mnt_version3 = { .number = 3, .nrprocs = ARRAY_SIZE(mnt3_procedures), .procs = mnt3_procedures, }; static struct rpc_version *mnt_version[] = { NULL, &mnt_version1, NULL, &mnt_version3, }; static struct rpc_stat mnt_stats; static struct rpc_program mnt_program = { .name = "mount", .number = NFS_MNT_PROGRAM, .nrvers = ARRAY_SIZE(mnt_version), .version = mnt_version, .stats = &mnt_stats, };
gpl-2.0
Buckmarble/Elite_Kernel
sound/pci/hda/patch_cmedia.c
2369
22604
/* * Universal Interface for Intel High Definition Audio Codec * * HD audio interface patch for C-Media CMI9880 * * Copyright (c) 2004 Takashi Iwai <tiwai@suse.de> * * * This driver is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This driver is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/pci.h> #include <sound/core.h> #include "hda_codec.h" #include "hda_local.h" #define NUM_PINS 11 /* board config type */ enum { CMI_MINIMAL, /* back 3-jack */ CMI_MIN_FP, /* back 3-jack + front-panel 2-jack */ CMI_FULL, /* back 6-jack + front-panel 2-jack */ CMI_FULL_DIG, /* back 6-jack + front-panel 2-jack + digital I/O */ CMI_ALLOUT, /* back 5-jack + front-panel 2-jack + digital out */ CMI_AUTO, /* let driver guess it */ CMI_MODELS }; struct cmi_spec { int board_config; unsigned int no_line_in: 1; /* no line-in (5-jack) */ unsigned int front_panel: 1; /* has front-panel 2-jack */ /* playback */ struct hda_multi_out multiout; hda_nid_t dac_nids[AUTO_CFG_MAX_OUTS]; /* NID for each DAC */ int num_dacs; /* capture */ const hda_nid_t *adc_nids; hda_nid_t dig_in_nid; /* capture source */ const struct hda_input_mux *input_mux; unsigned int cur_mux[2]; /* channel mode */ int num_channel_modes; const struct hda_channel_mode *channel_modes; struct hda_pcm pcm_rec[2]; /* PCM information */ /* pin default configuration */ hda_nid_t pin_nid[NUM_PINS]; unsigned int def_conf[NUM_PINS]; unsigned int pin_def_confs; /* multichannel pins */ struct hda_verb multi_init[9]; /* 2 verbs for each pin + terminator */ }; /* * input MUX */ static int cmi_mux_enum_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct cmi_spec *spec = codec->spec; return snd_hda_input_mux_info(spec->input_mux, uinfo); } static int cmi_mux_enum_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct cmi_spec *spec = codec->spec; unsigned int adc_idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id); ucontrol->value.enumerated.item[0] = spec->cur_mux[adc_idx]; return 0; } static int cmi_mux_enum_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct cmi_spec *spec = codec->spec; unsigned int adc_idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id); return snd_hda_input_mux_put(codec, spec->input_mux, ucontrol, spec->adc_nids[adc_idx], &spec->cur_mux[adc_idx]); } /* * shared line-in, mic for surrounds */ /* 3-stack / 2 channel */ static const struct hda_verb cmi9880_ch2_init[] = { /* set line-in PIN for input */ { 0x0c, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN }, /* set mic PIN for input, also enable vref */ { 0x0d, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80 }, /* route front PCM (DAC1) to HP */ { 0x0f, AC_VERB_SET_CONNECT_SEL, 0x00 }, {} }; /* 3-stack / 6 channel */ static const struct hda_verb cmi9880_ch6_init[] = { /* set line-in PIN for output */ { 0x0c, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT }, /* set mic PIN for output */ { 0x0d, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT }, /* route front PCM (DAC1) to HP */ { 0x0f, AC_VERB_SET_CONNECT_SEL, 0x00 }, {} }; /* 3-stack+front / 8 channel */ static const struct hda_verb cmi9880_ch8_init[] = { /* set line-in PIN for output */ { 0x0c, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT }, /* set mic PIN for output */ { 0x0d, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT }, /* route rear-surround PCM (DAC4) to HP */ { 0x0f, AC_VERB_SET_CONNECT_SEL, 0x03 }, {} }; static const struct hda_channel_mode cmi9880_channel_modes[3] = { { 2, cmi9880_ch2_init }, { 6, cmi9880_ch6_init }, { 8, cmi9880_ch8_init }, }; static int cmi_ch_mode_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct cmi_spec *spec = codec->spec; return snd_hda_ch_mode_info(codec, uinfo, spec->channel_modes, spec->num_channel_modes); } static int cmi_ch_mode_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct cmi_spec *spec = codec->spec; return snd_hda_ch_mode_get(codec, ucontrol, spec->channel_modes, spec->num_channel_modes, spec->multiout.max_channels); } static int cmi_ch_mode_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct cmi_spec *spec = codec->spec; return snd_hda_ch_mode_put(codec, ucontrol, spec->channel_modes, spec->num_channel_modes, &spec->multiout.max_channels); } /* */ static const struct snd_kcontrol_new cmi9880_basic_mixer[] = { /* CMI9880 has no playback volumes! */ HDA_CODEC_MUTE("PCM Playback Switch", 0x03, 0x0, HDA_OUTPUT), /* front */ HDA_CODEC_MUTE("Surround Playback Switch", 0x04, 0x0, HDA_OUTPUT), HDA_CODEC_MUTE_MONO("Center Playback Switch", 0x05, 1, 0x0, HDA_OUTPUT), HDA_CODEC_MUTE_MONO("LFE Playback Switch", 0x05, 2, 0x0, HDA_OUTPUT), HDA_CODEC_MUTE("Side Playback Switch", 0x06, 0x0, HDA_OUTPUT), { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, /* The multiple "Capture Source" controls confuse alsamixer * So call somewhat different.. */ /* .name = "Capture Source", */ .name = "Input Source", .count = 2, .info = cmi_mux_enum_info, .get = cmi_mux_enum_get, .put = cmi_mux_enum_put, }, HDA_CODEC_VOLUME("Capture Volume", 0x08, 0, HDA_INPUT), HDA_CODEC_VOLUME_IDX("Capture Volume", 1, 0x09, 0, HDA_INPUT), HDA_CODEC_MUTE("Capture Switch", 0x08, 0, HDA_INPUT), HDA_CODEC_MUTE_IDX("Capture Switch", 1, 0x09, 0, HDA_INPUT), HDA_CODEC_VOLUME("Beep Playback Volume", 0x23, 0, HDA_OUTPUT), HDA_CODEC_MUTE("Beep Playback Switch", 0x23, 0, HDA_OUTPUT), { } /* end */ }; /* * shared I/O pins */ static const struct snd_kcontrol_new cmi9880_ch_mode_mixer[] = { { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Channel Mode", .info = cmi_ch_mode_info, .get = cmi_ch_mode_get, .put = cmi_ch_mode_put, }, { } /* end */ }; /* AUD-in selections: * 0x0b 0x0c 0x0d 0x0e 0x0f 0x10 0x11 0x1f 0x20 */ static const struct hda_input_mux cmi9880_basic_mux = { .num_items = 4, .items = { { "Front Mic", 0x5 }, { "Rear Mic", 0x2 }, { "Line", 0x1 }, { "CD", 0x7 }, } }; static const struct hda_input_mux cmi9880_no_line_mux = { .num_items = 3, .items = { { "Front Mic", 0x5 }, { "Rear Mic", 0x2 }, { "CD", 0x7 }, } }; /* front, rear, clfe, rear_surr */ static const hda_nid_t cmi9880_dac_nids[4] = { 0x03, 0x04, 0x05, 0x06 }; /* ADC0, ADC1 */ static const hda_nid_t cmi9880_adc_nids[2] = { 0x08, 0x09 }; #define CMI_DIG_OUT_NID 0x07 #define CMI_DIG_IN_NID 0x0a /* */ static const struct hda_verb cmi9880_basic_init[] = { /* port-D for line out (rear panel) */ { 0x0b, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP }, /* port-E for HP out (front panel) */ { 0x0f, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP }, /* route front PCM to HP */ { 0x0f, AC_VERB_SET_CONNECT_SEL, 0x00 }, /* port-A for surround (rear panel) */ { 0x0e, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP }, /* port-G for CLFE (rear panel) */ { 0x1f, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP }, { 0x1f, AC_VERB_SET_CONNECT_SEL, 0x02 }, /* port-H for side (rear panel) */ { 0x20, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP }, { 0x20, AC_VERB_SET_CONNECT_SEL, 0x01 }, /* port-C for line-in (rear panel) */ { 0x0c, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN }, /* port-B for mic-in (rear panel) with vref */ { 0x0d, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80 }, /* port-F for mic-in (front panel) with vref */ { 0x10, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80 }, /* CD-in */ { 0x11, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN }, /* route front mic to ADC1/2 */ { 0x08, AC_VERB_SET_CONNECT_SEL, 0x05 }, { 0x09, AC_VERB_SET_CONNECT_SEL, 0x05 }, {} /* terminator */ }; static const struct hda_verb cmi9880_allout_init[] = { /* port-D for line out (rear panel) */ { 0x0b, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP }, /* port-E for HP out (front panel) */ { 0x0f, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP }, /* route front PCM to HP */ { 0x0f, AC_VERB_SET_CONNECT_SEL, 0x00 }, /* port-A for side (rear panel) */ { 0x0e, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP }, /* port-G for CLFE (rear panel) */ { 0x1f, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP }, { 0x1f, AC_VERB_SET_CONNECT_SEL, 0x02 }, /* port-H for side (rear panel) */ { 0x20, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP }, { 0x20, AC_VERB_SET_CONNECT_SEL, 0x01 }, /* port-C for surround (rear panel) */ { 0x0c, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP }, /* port-B for mic-in (rear panel) with vref */ { 0x0d, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80 }, /* port-F for mic-in (front panel) with vref */ { 0x10, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80 }, /* CD-in */ { 0x11, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN }, /* route front mic to ADC1/2 */ { 0x08, AC_VERB_SET_CONNECT_SEL, 0x05 }, { 0x09, AC_VERB_SET_CONNECT_SEL, 0x05 }, {} /* terminator */ }; /* */ static int cmi9880_build_controls(struct hda_codec *codec) { struct cmi_spec *spec = codec->spec; struct snd_kcontrol *kctl; int i, err; err = snd_hda_add_new_ctls(codec, cmi9880_basic_mixer); if (err < 0) return err; if (spec->channel_modes) { err = snd_hda_add_new_ctls(codec, cmi9880_ch_mode_mixer); if (err < 0) return err; } if (spec->multiout.dig_out_nid) { err = snd_hda_create_spdif_out_ctls(codec, spec->multiout.dig_out_nid); if (err < 0) return err; err = snd_hda_create_spdif_share_sw(codec, &spec->multiout); if (err < 0) return err; spec->multiout.share_spdif = 1; } if (spec->dig_in_nid) { err = snd_hda_create_spdif_in_ctls(codec, spec->dig_in_nid); if (err < 0) return err; } /* assign Capture Source enums to NID */ kctl = snd_hda_find_mixer_ctl(codec, "Capture Source"); for (i = 0; kctl && i < kctl->count; i++) { err = snd_hda_add_nid(codec, kctl, i, spec->adc_nids[i]); if (err < 0) return err; } return 0; } /* fill in the multi_dac_nids table, which will decide which audio widget to use for each channel */ static int cmi9880_fill_multi_dac_nids(struct hda_codec *codec, const struct auto_pin_cfg *cfg) { struct cmi_spec *spec = codec->spec; hda_nid_t nid; int assigned[4]; int i, j; /* clear the table, only one c-media dac assumed here */ memset(spec->dac_nids, 0, sizeof(spec->dac_nids)); memset(assigned, 0, sizeof(assigned)); /* check the pins we found */ for (i = 0; i < cfg->line_outs; i++) { nid = cfg->line_out_pins[i]; /* nid 0x0b~0x0e is hardwired to audio widget 0x3~0x6 */ if (nid >= 0x0b && nid <= 0x0e) { spec->dac_nids[i] = (nid - 0x0b) + 0x03; assigned[nid - 0x0b] = 1; } } /* left pin can be connect to any audio widget */ for (i = 0; i < cfg->line_outs; i++) { nid = cfg->line_out_pins[i]; if (nid <= 0x0e) continue; /* search for an empty channel */ for (j = 0; j < cfg->line_outs; j++) { if (! assigned[j]) { spec->dac_nids[i] = j + 0x03; assigned[j] = 1; break; } } } spec->num_dacs = cfg->line_outs; return 0; } /* create multi_init table, which is used for multichannel initialization */ static int cmi9880_fill_multi_init(struct hda_codec *codec, const struct auto_pin_cfg *cfg) { struct cmi_spec *spec = codec->spec; hda_nid_t nid; int i, j, k, len; /* clear the table, only one c-media dac assumed here */ memset(spec->multi_init, 0, sizeof(spec->multi_init)); for (j = 0, i = 0; i < cfg->line_outs; i++) { hda_nid_t conn[4]; nid = cfg->line_out_pins[i]; /* set as output */ spec->multi_init[j].nid = nid; spec->multi_init[j].verb = AC_VERB_SET_PIN_WIDGET_CONTROL; spec->multi_init[j].param = PIN_OUT; j++; if (nid > 0x0e) { /* set connection */ spec->multi_init[j].nid = nid; spec->multi_init[j].verb = AC_VERB_SET_CONNECT_SEL; spec->multi_init[j].param = 0; /* find the index in connect list */ len = snd_hda_get_connections(codec, nid, conn, 4); for (k = 0; k < len; k++) if (conn[k] == spec->dac_nids[i]) { spec->multi_init[j].param = k; break; } j++; } } return 0; } static int cmi9880_init(struct hda_codec *codec) { struct cmi_spec *spec = codec->spec; if (spec->board_config == CMI_ALLOUT) snd_hda_sequence_write(codec, cmi9880_allout_init); else snd_hda_sequence_write(codec, cmi9880_basic_init); if (spec->board_config == CMI_AUTO) snd_hda_sequence_write(codec, spec->multi_init); return 0; } /* * Analog playback callbacks */ static int cmi9880_playback_pcm_open(struct hda_pcm_stream *hinfo, struct hda_codec *codec, struct snd_pcm_substream *substream) { struct cmi_spec *spec = codec->spec; return snd_hda_multi_out_analog_open(codec, &spec->multiout, substream, hinfo); } static int cmi9880_playback_pcm_prepare(struct hda_pcm_stream *hinfo, struct hda_codec *codec, unsigned int stream_tag, unsigned int format, struct snd_pcm_substream *substream) { struct cmi_spec *spec = codec->spec; return snd_hda_multi_out_analog_prepare(codec, &spec->multiout, stream_tag, format, substream); } static int cmi9880_playback_pcm_cleanup(struct hda_pcm_stream *hinfo, struct hda_codec *codec, struct snd_pcm_substream *substream) { struct cmi_spec *spec = codec->spec; return snd_hda_multi_out_analog_cleanup(codec, &spec->multiout); } /* * Digital out */ static int cmi9880_dig_playback_pcm_open(struct hda_pcm_stream *hinfo, struct hda_codec *codec, struct snd_pcm_substream *substream) { struct cmi_spec *spec = codec->spec; return snd_hda_multi_out_dig_open(codec, &spec->multiout); } static int cmi9880_dig_playback_pcm_close(struct hda_pcm_stream *hinfo, struct hda_codec *codec, struct snd_pcm_substream *substream) { struct cmi_spec *spec = codec->spec; return snd_hda_multi_out_dig_close(codec, &spec->multiout); } static int cmi9880_dig_playback_pcm_prepare(struct hda_pcm_stream *hinfo, struct hda_codec *codec, unsigned int stream_tag, unsigned int format, struct snd_pcm_substream *substream) { struct cmi_spec *spec = codec->spec; return snd_hda_multi_out_dig_prepare(codec, &spec->multiout, stream_tag, format, substream); } /* * Analog capture */ static int cmi9880_capture_pcm_prepare(struct hda_pcm_stream *hinfo, struct hda_codec *codec, unsigned int stream_tag, unsigned int format, struct snd_pcm_substream *substream) { struct cmi_spec *spec = codec->spec; snd_hda_codec_setup_stream(codec, spec->adc_nids[substream->number], stream_tag, 0, format); return 0; } static int cmi9880_capture_pcm_cleanup(struct hda_pcm_stream *hinfo, struct hda_codec *codec, struct snd_pcm_substream *substream) { struct cmi_spec *spec = codec->spec; snd_hda_codec_cleanup_stream(codec, spec->adc_nids[substream->number]); return 0; } /* */ static const struct hda_pcm_stream cmi9880_pcm_analog_playback = { .substreams = 1, .channels_min = 2, .channels_max = 8, .nid = 0x03, /* NID to query formats and rates */ .ops = { .open = cmi9880_playback_pcm_open, .prepare = cmi9880_playback_pcm_prepare, .cleanup = cmi9880_playback_pcm_cleanup }, }; static const struct hda_pcm_stream cmi9880_pcm_analog_capture = { .substreams = 2, .channels_min = 2, .channels_max = 2, .nid = 0x08, /* NID to query formats and rates */ .ops = { .prepare = cmi9880_capture_pcm_prepare, .cleanup = cmi9880_capture_pcm_cleanup }, }; static const struct hda_pcm_stream cmi9880_pcm_digital_playback = { .substreams = 1, .channels_min = 2, .channels_max = 2, /* NID is set in cmi9880_build_pcms */ .ops = { .open = cmi9880_dig_playback_pcm_open, .close = cmi9880_dig_playback_pcm_close, .prepare = cmi9880_dig_playback_pcm_prepare }, }; static const struct hda_pcm_stream cmi9880_pcm_digital_capture = { .substreams = 1, .channels_min = 2, .channels_max = 2, /* NID is set in cmi9880_build_pcms */ }; static int cmi9880_build_pcms(struct hda_codec *codec) { struct cmi_spec *spec = codec->spec; struct hda_pcm *info = spec->pcm_rec; codec->num_pcms = 1; codec->pcm_info = info; info->name = "CMI9880"; info->stream[SNDRV_PCM_STREAM_PLAYBACK] = cmi9880_pcm_analog_playback; info->stream[SNDRV_PCM_STREAM_CAPTURE] = cmi9880_pcm_analog_capture; if (spec->multiout.dig_out_nid || spec->dig_in_nid) { codec->num_pcms++; info++; info->name = "CMI9880 Digital"; info->pcm_type = HDA_PCM_TYPE_SPDIF; if (spec->multiout.dig_out_nid) { info->stream[SNDRV_PCM_STREAM_PLAYBACK] = cmi9880_pcm_digital_playback; info->stream[SNDRV_PCM_STREAM_PLAYBACK].nid = spec->multiout.dig_out_nid; } if (spec->dig_in_nid) { info->stream[SNDRV_PCM_STREAM_CAPTURE] = cmi9880_pcm_digital_capture; info->stream[SNDRV_PCM_STREAM_CAPTURE].nid = spec->dig_in_nid; } } return 0; } static void cmi9880_free(struct hda_codec *codec) { kfree(codec->spec); } /* */ static const char * const cmi9880_models[CMI_MODELS] = { [CMI_MINIMAL] = "minimal", [CMI_MIN_FP] = "min_fp", [CMI_FULL] = "full", [CMI_FULL_DIG] = "full_dig", [CMI_ALLOUT] = "allout", [CMI_AUTO] = "auto", }; static const struct snd_pci_quirk cmi9880_cfg_tbl[] = { SND_PCI_QUIRK(0x1043, 0x813d, "ASUS P5AD2", CMI_FULL_DIG), SND_PCI_QUIRK(0x1854, 0x002b, "LG LS75", CMI_MINIMAL), SND_PCI_QUIRK(0x1854, 0x0032, "LG", CMI_FULL_DIG), {} /* terminator */ }; static const struct hda_codec_ops cmi9880_patch_ops = { .build_controls = cmi9880_build_controls, .build_pcms = cmi9880_build_pcms, .init = cmi9880_init, .free = cmi9880_free, }; static int patch_cmi9880(struct hda_codec *codec) { struct cmi_spec *spec; spec = kzalloc(sizeof(*spec), GFP_KERNEL); if (spec == NULL) return -ENOMEM; codec->spec = spec; spec->board_config = snd_hda_check_board_config(codec, CMI_MODELS, cmi9880_models, cmi9880_cfg_tbl); if (spec->board_config < 0) { snd_printdd(KERN_INFO "hda_codec: %s: BIOS auto-probing.\n", codec->chip_name); spec->board_config = CMI_AUTO; /* try everything */ } /* copy default DAC NIDs */ memcpy(spec->dac_nids, cmi9880_dac_nids, sizeof(spec->dac_nids)); spec->num_dacs = 4; switch (spec->board_config) { case CMI_MINIMAL: case CMI_MIN_FP: spec->channel_modes = cmi9880_channel_modes; if (spec->board_config == CMI_MINIMAL) spec->num_channel_modes = 2; else { spec->front_panel = 1; spec->num_channel_modes = 3; } spec->multiout.max_channels = cmi9880_channel_modes[0].channels; spec->input_mux = &cmi9880_basic_mux; break; case CMI_FULL: case CMI_FULL_DIG: spec->front_panel = 1; spec->multiout.max_channels = 8; spec->input_mux = &cmi9880_basic_mux; if (spec->board_config == CMI_FULL_DIG) { spec->multiout.dig_out_nid = CMI_DIG_OUT_NID; spec->dig_in_nid = CMI_DIG_IN_NID; } break; case CMI_ALLOUT: spec->front_panel = 1; spec->multiout.max_channels = 8; spec->no_line_in = 1; spec->input_mux = &cmi9880_no_line_mux; spec->multiout.dig_out_nid = CMI_DIG_OUT_NID; break; case CMI_AUTO: { unsigned int port_e, port_f, port_g, port_h; unsigned int port_spdifi, port_spdifo; struct auto_pin_cfg cfg; /* collect pin default configuration */ port_e = snd_hda_codec_get_pincfg(codec, 0x0f); port_f = snd_hda_codec_get_pincfg(codec, 0x10); spec->front_panel = 1; if (get_defcfg_connect(port_e) == AC_JACK_PORT_NONE || get_defcfg_connect(port_f) == AC_JACK_PORT_NONE) { port_g = snd_hda_codec_get_pincfg(codec, 0x1f); port_h = snd_hda_codec_get_pincfg(codec, 0x20); spec->channel_modes = cmi9880_channel_modes; /* no front panel */ if (get_defcfg_connect(port_g) == AC_JACK_PORT_NONE || get_defcfg_connect(port_h) == AC_JACK_PORT_NONE) { /* no optional rear panel */ spec->board_config = CMI_MINIMAL; spec->front_panel = 0; spec->num_channel_modes = 2; } else { spec->board_config = CMI_MIN_FP; spec->num_channel_modes = 3; } spec->input_mux = &cmi9880_basic_mux; spec->multiout.max_channels = cmi9880_channel_modes[0].channels; } else { spec->input_mux = &cmi9880_basic_mux; port_spdifi = snd_hda_codec_get_pincfg(codec, 0x13); port_spdifo = snd_hda_codec_get_pincfg(codec, 0x12); if (get_defcfg_connect(port_spdifo) != AC_JACK_PORT_NONE) spec->multiout.dig_out_nid = CMI_DIG_OUT_NID; if (get_defcfg_connect(port_spdifi) != AC_JACK_PORT_NONE) spec->dig_in_nid = CMI_DIG_IN_NID; spec->multiout.max_channels = 8; } snd_hda_parse_pin_def_config(codec, &cfg, NULL); if (cfg.line_outs) { spec->multiout.max_channels = cfg.line_outs * 2; cmi9880_fill_multi_dac_nids(codec, &cfg); cmi9880_fill_multi_init(codec, &cfg); } else snd_printd("patch_cmedia: cannot detect association in defcfg\n"); break; } } spec->multiout.num_dacs = spec->num_dacs; spec->multiout.dac_nids = spec->dac_nids; spec->adc_nids = cmi9880_adc_nids; codec->patch_ops = cmi9880_patch_ops; return 0; } /* * patch entries */ static const struct hda_codec_preset snd_hda_preset_cmedia[] = { { .id = 0x13f69880, .name = "CMI9880", .patch = patch_cmi9880 }, { .id = 0x434d4980, .name = "CMI9880", .patch = patch_cmi9880 }, {} /* terminator */ }; MODULE_ALIAS("snd-hda-codec-id:13f69880"); MODULE_ALIAS("snd-hda-codec-id:434d4980"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("C-Media HD-audio codec"); static struct hda_codec_preset_list cmedia_list = { .preset = snd_hda_preset_cmedia, .owner = THIS_MODULE, }; static int __init patch_cmedia_init(void) { return snd_hda_add_codec_preset(&cmedia_list); } static void __exit patch_cmedia_exit(void) { snd_hda_delete_codec_preset(&cmedia_list); } module_init(patch_cmedia_init) module_exit(patch_cmedia_exit)
gpl-2.0
ffolkes/android_kernel_samsung_trlte
fs/jfs/namei.c
2369
38016
/* * Copyright (C) International Business Machines Corp., 2000-2004 * Portions Copyright (C) Christoph Hellwig, 2001-2002 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/fs.h> #include <linux/namei.h> #include <linux/ctype.h> #include <linux/quotaops.h> #include <linux/exportfs.h> #include "jfs_incore.h" #include "jfs_superblock.h" #include "jfs_inode.h" #include "jfs_dinode.h" #include "jfs_dmap.h" #include "jfs_unicode.h" #include "jfs_metapage.h" #include "jfs_xattr.h" #include "jfs_acl.h" #include "jfs_debug.h" /* * forward references */ const struct dentry_operations jfs_ci_dentry_operations; static s64 commitZeroLink(tid_t, struct inode *); /* * NAME: free_ea_wmap(inode) * * FUNCTION: free uncommitted extended attributes from working map * */ static inline void free_ea_wmap(struct inode *inode) { dxd_t *ea = &JFS_IP(inode)->ea; if (ea->flag & DXD_EXTENT) { /* free EA pages from cache */ invalidate_dxd_metapages(inode, *ea); dbFree(inode, addressDXD(ea), lengthDXD(ea)); } ea->flag = 0; } /* * NAME: jfs_create(dip, dentry, mode) * * FUNCTION: create a regular file in the parent directory <dip> * with name = <from dentry> and mode = <mode> * * PARAMETER: dip - parent directory vnode * dentry - dentry of new file * mode - create mode (rwxrwxrwx). * nd- nd struct * * RETURN: Errors from subroutines * */ static int jfs_create(struct inode *dip, struct dentry *dentry, umode_t mode, bool excl) { int rc = 0; tid_t tid; /* transaction id */ struct inode *ip = NULL; /* child directory inode */ ino_t ino; struct component_name dname; /* child directory name */ struct btstack btstack; struct inode *iplist[2]; struct tblock *tblk; jfs_info("jfs_create: dip:0x%p name:%s", dip, dentry->d_name.name); dquot_initialize(dip); /* * search parent directory for entry/freespace * (dtSearch() returns parent directory page pinned) */ if ((rc = get_UCSname(&dname, dentry))) goto out1; /* * Either iAlloc() or txBegin() may block. Deadlock can occur if we * block there while holding dtree page, so we allocate the inode & * begin the transaction before we search the directory. */ ip = ialloc(dip, mode); if (IS_ERR(ip)) { rc = PTR_ERR(ip); goto out2; } tid = txBegin(dip->i_sb, 0); mutex_lock_nested(&JFS_IP(dip)->commit_mutex, COMMIT_MUTEX_PARENT); mutex_lock_nested(&JFS_IP(ip)->commit_mutex, COMMIT_MUTEX_CHILD); rc = jfs_init_acl(tid, ip, dip); if (rc) goto out3; rc = jfs_init_security(tid, ip, dip, &dentry->d_name); if (rc) { txAbort(tid, 0); goto out3; } if ((rc = dtSearch(dip, &dname, &ino, &btstack, JFS_CREATE))) { jfs_err("jfs_create: dtSearch returned %d", rc); txAbort(tid, 0); goto out3; } tblk = tid_to_tblock(tid); tblk->xflag |= COMMIT_CREATE; tblk->ino = ip->i_ino; tblk->u.ixpxd = JFS_IP(ip)->ixpxd; iplist[0] = dip; iplist[1] = ip; /* * initialize the child XAD tree root in-line in inode */ xtInitRoot(tid, ip); /* * create entry in parent directory for child directory * (dtInsert() releases parent directory page) */ ino = ip->i_ino; if ((rc = dtInsert(tid, dip, &dname, &ino, &btstack))) { if (rc == -EIO) { jfs_err("jfs_create: dtInsert returned -EIO"); txAbort(tid, 1); /* Marks Filesystem dirty */ } else txAbort(tid, 0); /* Filesystem full */ goto out3; } ip->i_op = &jfs_file_inode_operations; ip->i_fop = &jfs_file_operations; ip->i_mapping->a_ops = &jfs_aops; mark_inode_dirty(ip); dip->i_ctime = dip->i_mtime = CURRENT_TIME; mark_inode_dirty(dip); rc = txCommit(tid, 2, &iplist[0], 0); out3: txEnd(tid); mutex_unlock(&JFS_IP(ip)->commit_mutex); mutex_unlock(&JFS_IP(dip)->commit_mutex); if (rc) { free_ea_wmap(ip); clear_nlink(ip); unlock_new_inode(ip); iput(ip); } else { unlock_new_inode(ip); d_instantiate(dentry, ip); } out2: free_UCSname(&dname); out1: jfs_info("jfs_create: rc:%d", rc); return rc; } /* * NAME: jfs_mkdir(dip, dentry, mode) * * FUNCTION: create a child directory in the parent directory <dip> * with name = <from dentry> and mode = <mode> * * PARAMETER: dip - parent directory vnode * dentry - dentry of child directory * mode - create mode (rwxrwxrwx). * * RETURN: Errors from subroutines * * note: * EACCESS: user needs search+write permission on the parent directory */ static int jfs_mkdir(struct inode *dip, struct dentry *dentry, umode_t mode) { int rc = 0; tid_t tid; /* transaction id */ struct inode *ip = NULL; /* child directory inode */ ino_t ino; struct component_name dname; /* child directory name */ struct btstack btstack; struct inode *iplist[2]; struct tblock *tblk; jfs_info("jfs_mkdir: dip:0x%p name:%s", dip, dentry->d_name.name); dquot_initialize(dip); /* * search parent directory for entry/freespace * (dtSearch() returns parent directory page pinned) */ if ((rc = get_UCSname(&dname, dentry))) goto out1; /* * Either iAlloc() or txBegin() may block. Deadlock can occur if we * block there while holding dtree page, so we allocate the inode & * begin the transaction before we search the directory. */ ip = ialloc(dip, S_IFDIR | mode); if (IS_ERR(ip)) { rc = PTR_ERR(ip); goto out2; } tid = txBegin(dip->i_sb, 0); mutex_lock_nested(&JFS_IP(dip)->commit_mutex, COMMIT_MUTEX_PARENT); mutex_lock_nested(&JFS_IP(ip)->commit_mutex, COMMIT_MUTEX_CHILD); rc = jfs_init_acl(tid, ip, dip); if (rc) goto out3; rc = jfs_init_security(tid, ip, dip, &dentry->d_name); if (rc) { txAbort(tid, 0); goto out3; } if ((rc = dtSearch(dip, &dname, &ino, &btstack, JFS_CREATE))) { jfs_err("jfs_mkdir: dtSearch returned %d", rc); txAbort(tid, 0); goto out3; } tblk = tid_to_tblock(tid); tblk->xflag |= COMMIT_CREATE; tblk->ino = ip->i_ino; tblk->u.ixpxd = JFS_IP(ip)->ixpxd; iplist[0] = dip; iplist[1] = ip; /* * initialize the child directory in-line in inode */ dtInitRoot(tid, ip, dip->i_ino); /* * create entry in parent directory for child directory * (dtInsert() releases parent directory page) */ ino = ip->i_ino; if ((rc = dtInsert(tid, dip, &dname, &ino, &btstack))) { if (rc == -EIO) { jfs_err("jfs_mkdir: dtInsert returned -EIO"); txAbort(tid, 1); /* Marks Filesystem dirty */ } else txAbort(tid, 0); /* Filesystem full */ goto out3; } set_nlink(ip, 2); /* for '.' */ ip->i_op = &jfs_dir_inode_operations; ip->i_fop = &jfs_dir_operations; mark_inode_dirty(ip); /* update parent directory inode */ inc_nlink(dip); /* for '..' from child directory */ dip->i_ctime = dip->i_mtime = CURRENT_TIME; mark_inode_dirty(dip); rc = txCommit(tid, 2, &iplist[0], 0); out3: txEnd(tid); mutex_unlock(&JFS_IP(ip)->commit_mutex); mutex_unlock(&JFS_IP(dip)->commit_mutex); if (rc) { free_ea_wmap(ip); clear_nlink(ip); unlock_new_inode(ip); iput(ip); } else { unlock_new_inode(ip); d_instantiate(dentry, ip); } out2: free_UCSname(&dname); out1: jfs_info("jfs_mkdir: rc:%d", rc); return rc; } /* * NAME: jfs_rmdir(dip, dentry) * * FUNCTION: remove a link to child directory * * PARAMETER: dip - parent inode * dentry - child directory dentry * * RETURN: -EINVAL - if name is . or .. * -EINVAL - if . or .. exist but are invalid. * errors from subroutines * * note: * if other threads have the directory open when the last link * is removed, the "." and ".." entries, if present, are removed before * rmdir() returns and no new entries may be created in the directory, * but the directory is not removed until the last reference to * the directory is released (cf.unlink() of regular file). */ static int jfs_rmdir(struct inode *dip, struct dentry *dentry) { int rc; tid_t tid; /* transaction id */ struct inode *ip = dentry->d_inode; ino_t ino; struct component_name dname; struct inode *iplist[2]; struct tblock *tblk; jfs_info("jfs_rmdir: dip:0x%p name:%s", dip, dentry->d_name.name); /* Init inode for quota operations. */ dquot_initialize(dip); dquot_initialize(ip); /* directory must be empty to be removed */ if (!dtEmpty(ip)) { rc = -ENOTEMPTY; goto out; } if ((rc = get_UCSname(&dname, dentry))) { goto out; } tid = txBegin(dip->i_sb, 0); mutex_lock_nested(&JFS_IP(dip)->commit_mutex, COMMIT_MUTEX_PARENT); mutex_lock_nested(&JFS_IP(ip)->commit_mutex, COMMIT_MUTEX_CHILD); iplist[0] = dip; iplist[1] = ip; tblk = tid_to_tblock(tid); tblk->xflag |= COMMIT_DELETE; tblk->u.ip = ip; /* * delete the entry of target directory from parent directory */ ino = ip->i_ino; if ((rc = dtDelete(tid, dip, &dname, &ino, JFS_REMOVE))) { jfs_err("jfs_rmdir: dtDelete returned %d", rc); if (rc == -EIO) txAbort(tid, 1); txEnd(tid); mutex_unlock(&JFS_IP(ip)->commit_mutex); mutex_unlock(&JFS_IP(dip)->commit_mutex); goto out2; } /* update parent directory's link count corresponding * to ".." entry of the target directory deleted */ dip->i_ctime = dip->i_mtime = CURRENT_TIME; inode_dec_link_count(dip); /* * OS/2 could have created EA and/or ACL */ /* free EA from both persistent and working map */ if (JFS_IP(ip)->ea.flag & DXD_EXTENT) { /* free EA pages */ txEA(tid, ip, &JFS_IP(ip)->ea, NULL); } JFS_IP(ip)->ea.flag = 0; /* free ACL from both persistent and working map */ if (JFS_IP(ip)->acl.flag & DXD_EXTENT) { /* free ACL pages */ txEA(tid, ip, &JFS_IP(ip)->acl, NULL); } JFS_IP(ip)->acl.flag = 0; /* mark the target directory as deleted */ clear_nlink(ip); mark_inode_dirty(ip); rc = txCommit(tid, 2, &iplist[0], 0); txEnd(tid); mutex_unlock(&JFS_IP(ip)->commit_mutex); mutex_unlock(&JFS_IP(dip)->commit_mutex); /* * Truncating the directory index table is not guaranteed. It * may need to be done iteratively */ if (test_cflag(COMMIT_Stale, dip)) { if (dip->i_size > 1) jfs_truncate_nolock(dip, 0); clear_cflag(COMMIT_Stale, dip); } out2: free_UCSname(&dname); out: jfs_info("jfs_rmdir: rc:%d", rc); return rc; } /* * NAME: jfs_unlink(dip, dentry) * * FUNCTION: remove a link to object <vp> named by <name> * from parent directory <dvp> * * PARAMETER: dip - inode of parent directory * dentry - dentry of object to be removed * * RETURN: errors from subroutines * * note: * temporary file: if one or more processes have the file open * when the last link is removed, the link will be removed before * unlink() returns, but the removal of the file contents will be * postponed until all references to the files are closed. * * JFS does NOT support unlink() on directories. * */ static int jfs_unlink(struct inode *dip, struct dentry *dentry) { int rc; tid_t tid; /* transaction id */ struct inode *ip = dentry->d_inode; ino_t ino; struct component_name dname; /* object name */ struct inode *iplist[2]; struct tblock *tblk; s64 new_size = 0; int commit_flag; jfs_info("jfs_unlink: dip:0x%p name:%s", dip, dentry->d_name.name); /* Init inode for quota operations. */ dquot_initialize(dip); dquot_initialize(ip); if ((rc = get_UCSname(&dname, dentry))) goto out; IWRITE_LOCK(ip, RDWRLOCK_NORMAL); tid = txBegin(dip->i_sb, 0); mutex_lock_nested(&JFS_IP(dip)->commit_mutex, COMMIT_MUTEX_PARENT); mutex_lock_nested(&JFS_IP(ip)->commit_mutex, COMMIT_MUTEX_CHILD); iplist[0] = dip; iplist[1] = ip; /* * delete the entry of target file from parent directory */ ino = ip->i_ino; if ((rc = dtDelete(tid, dip, &dname, &ino, JFS_REMOVE))) { jfs_err("jfs_unlink: dtDelete returned %d", rc); if (rc == -EIO) txAbort(tid, 1); /* Marks FS Dirty */ txEnd(tid); mutex_unlock(&JFS_IP(ip)->commit_mutex); mutex_unlock(&JFS_IP(dip)->commit_mutex); IWRITE_UNLOCK(ip); goto out1; } ASSERT(ip->i_nlink); ip->i_ctime = dip->i_ctime = dip->i_mtime = CURRENT_TIME; mark_inode_dirty(dip); /* update target's inode */ inode_dec_link_count(ip); /* * commit zero link count object */ if (ip->i_nlink == 0) { assert(!test_cflag(COMMIT_Nolink, ip)); /* free block resources */ if ((new_size = commitZeroLink(tid, ip)) < 0) { txAbort(tid, 1); /* Marks FS Dirty */ txEnd(tid); mutex_unlock(&JFS_IP(ip)->commit_mutex); mutex_unlock(&JFS_IP(dip)->commit_mutex); IWRITE_UNLOCK(ip); rc = new_size; goto out1; } tblk = tid_to_tblock(tid); tblk->xflag |= COMMIT_DELETE; tblk->u.ip = ip; } /* * Incomplete truncate of file data can * result in timing problems unless we synchronously commit the * transaction. */ if (new_size) commit_flag = COMMIT_SYNC; else commit_flag = 0; /* * If xtTruncate was incomplete, commit synchronously to avoid * timing complications */ rc = txCommit(tid, 2, &iplist[0], commit_flag); txEnd(tid); mutex_unlock(&JFS_IP(ip)->commit_mutex); mutex_unlock(&JFS_IP(dip)->commit_mutex); while (new_size && (rc == 0)) { tid = txBegin(dip->i_sb, 0); mutex_lock(&JFS_IP(ip)->commit_mutex); new_size = xtTruncate_pmap(tid, ip, new_size); if (new_size < 0) { txAbort(tid, 1); /* Marks FS Dirty */ rc = new_size; } else rc = txCommit(tid, 2, &iplist[0], COMMIT_SYNC); txEnd(tid); mutex_unlock(&JFS_IP(ip)->commit_mutex); } if (ip->i_nlink == 0) set_cflag(COMMIT_Nolink, ip); IWRITE_UNLOCK(ip); /* * Truncating the directory index table is not guaranteed. It * may need to be done iteratively */ if (test_cflag(COMMIT_Stale, dip)) { if (dip->i_size > 1) jfs_truncate_nolock(dip, 0); clear_cflag(COMMIT_Stale, dip); } out1: free_UCSname(&dname); out: jfs_info("jfs_unlink: rc:%d", rc); return rc; } /* * NAME: commitZeroLink() * * FUNCTION: for non-directory, called by jfs_remove(), * truncate a regular file, directory or symbolic * link to zero length. return 0 if type is not * one of these. * * if the file is currently associated with a VM segment * only permanent disk and inode map resources are freed, * and neither the inode nor indirect blocks are modified * so that the resources can be later freed in the work * map by ctrunc1. * if there is no VM segment on entry, the resources are * freed in both work and permanent map. * (? for temporary file - memory object is cached even * after no reference: * reference count > 0 - ) * * PARAMETERS: cd - pointer to commit data structure. * current inode is the one to truncate. * * RETURN: Errors from subroutines */ static s64 commitZeroLink(tid_t tid, struct inode *ip) { int filetype; struct tblock *tblk; jfs_info("commitZeroLink: tid = %d, ip = 0x%p", tid, ip); filetype = ip->i_mode & S_IFMT; switch (filetype) { case S_IFREG: break; case S_IFLNK: /* fast symbolic link */ if (ip->i_size < IDATASIZE) { ip->i_size = 0; return 0; } break; default: assert(filetype != S_IFDIR); return 0; } set_cflag(COMMIT_Freewmap, ip); /* mark transaction of block map update type */ tblk = tid_to_tblock(tid); tblk->xflag |= COMMIT_PMAP; /* * free EA */ if (JFS_IP(ip)->ea.flag & DXD_EXTENT) /* acquire maplock on EA to be freed from block map */ txEA(tid, ip, &JFS_IP(ip)->ea, NULL); /* * free ACL */ if (JFS_IP(ip)->acl.flag & DXD_EXTENT) /* acquire maplock on EA to be freed from block map */ txEA(tid, ip, &JFS_IP(ip)->acl, NULL); /* * free xtree/data (truncate to zero length): * free xtree/data pages from cache if COMMIT_PWMAP, * free xtree/data blocks from persistent block map, and * free xtree/data blocks from working block map if COMMIT_PWMAP; */ if (ip->i_size) return xtTruncate_pmap(tid, ip, 0); return 0; } /* * NAME: jfs_free_zero_link() * * FUNCTION: for non-directory, called by iClose(), * free resources of a file from cache and WORKING map * for a file previously committed with zero link count * while associated with a pager object, * * PARAMETER: ip - pointer to inode of file. */ void jfs_free_zero_link(struct inode *ip) { int type; jfs_info("jfs_free_zero_link: ip = 0x%p", ip); /* return if not reg or symbolic link or if size is * already ok. */ type = ip->i_mode & S_IFMT; switch (type) { case S_IFREG: break; case S_IFLNK: /* if its contained in inode nothing to do */ if (ip->i_size < IDATASIZE) return; break; default: return; } /* * free EA */ if (JFS_IP(ip)->ea.flag & DXD_EXTENT) { s64 xaddr = addressDXD(&JFS_IP(ip)->ea); int xlen = lengthDXD(&JFS_IP(ip)->ea); struct maplock maplock; /* maplock for COMMIT_WMAP */ struct pxd_lock *pxdlock; /* maplock for COMMIT_WMAP */ /* free EA pages from cache */ invalidate_dxd_metapages(ip, JFS_IP(ip)->ea); /* free EA extent from working block map */ maplock.index = 1; pxdlock = (struct pxd_lock *) & maplock; pxdlock->flag = mlckFREEPXD; PXDaddress(&pxdlock->pxd, xaddr); PXDlength(&pxdlock->pxd, xlen); txFreeMap(ip, pxdlock, NULL, COMMIT_WMAP); } /* * free ACL */ if (JFS_IP(ip)->acl.flag & DXD_EXTENT) { s64 xaddr = addressDXD(&JFS_IP(ip)->acl); int xlen = lengthDXD(&JFS_IP(ip)->acl); struct maplock maplock; /* maplock for COMMIT_WMAP */ struct pxd_lock *pxdlock; /* maplock for COMMIT_WMAP */ invalidate_dxd_metapages(ip, JFS_IP(ip)->acl); /* free ACL extent from working block map */ maplock.index = 1; pxdlock = (struct pxd_lock *) & maplock; pxdlock->flag = mlckFREEPXD; PXDaddress(&pxdlock->pxd, xaddr); PXDlength(&pxdlock->pxd, xlen); txFreeMap(ip, pxdlock, NULL, COMMIT_WMAP); } /* * free xtree/data (truncate to zero length): * free xtree/data pages from cache, and * free xtree/data blocks from working block map; */ if (ip->i_size) xtTruncate(0, ip, 0, COMMIT_WMAP); } /* * NAME: jfs_link(vp, dvp, name, crp) * * FUNCTION: create a link to <vp> by the name = <name> * in the parent directory <dvp> * * PARAMETER: vp - target object * dvp - parent directory of new link * name - name of new link to target object * crp - credential * * RETURN: Errors from subroutines * * note: * JFS does NOT support link() on directories (to prevent circular * path in the directory hierarchy); * EPERM: the target object is a directory, and either the caller * does not have appropriate privileges or the implementation prohibits * using link() on directories [XPG4.2]. * * JFS does NOT support links between file systems: * EXDEV: target object and new link are on different file systems and * implementation does not support links between file systems [XPG4.2]. */ static int jfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) { int rc; tid_t tid; struct inode *ip = old_dentry->d_inode; ino_t ino; struct component_name dname; struct btstack btstack; struct inode *iplist[2]; jfs_info("jfs_link: %s %s", old_dentry->d_name.name, dentry->d_name.name); dquot_initialize(dir); tid = txBegin(ip->i_sb, 0); mutex_lock_nested(&JFS_IP(dir)->commit_mutex, COMMIT_MUTEX_PARENT); mutex_lock_nested(&JFS_IP(ip)->commit_mutex, COMMIT_MUTEX_CHILD); /* * scan parent directory for entry/freespace */ if ((rc = get_UCSname(&dname, dentry))) goto out; if ((rc = dtSearch(dir, &dname, &ino, &btstack, JFS_CREATE))) goto free_dname; /* * create entry for new link in parent directory */ ino = ip->i_ino; if ((rc = dtInsert(tid, dir, &dname, &ino, &btstack))) goto free_dname; /* update object inode */ inc_nlink(ip); /* for new link */ ip->i_ctime = CURRENT_TIME; dir->i_ctime = dir->i_mtime = CURRENT_TIME; mark_inode_dirty(dir); ihold(ip); iplist[0] = ip; iplist[1] = dir; rc = txCommit(tid, 2, &iplist[0], 0); if (rc) { drop_nlink(ip); /* never instantiated */ iput(ip); } else d_instantiate(dentry, ip); free_dname: free_UCSname(&dname); out: txEnd(tid); mutex_unlock(&JFS_IP(ip)->commit_mutex); mutex_unlock(&JFS_IP(dir)->commit_mutex); jfs_info("jfs_link: rc:%d", rc); return rc; } /* * NAME: jfs_symlink(dip, dentry, name) * * FUNCTION: creates a symbolic link to <symlink> by name <name> * in directory <dip> * * PARAMETER: dip - parent directory vnode * dentry - dentry of symbolic link * name - the path name of the existing object * that will be the source of the link * * RETURN: errors from subroutines * * note: * ENAMETOOLONG: pathname resolution of a symbolic link produced * an intermediate result whose length exceeds PATH_MAX [XPG4.2] */ static int jfs_symlink(struct inode *dip, struct dentry *dentry, const char *name) { int rc; tid_t tid; ino_t ino = 0; struct component_name dname; int ssize; /* source pathname size */ struct btstack btstack; struct inode *ip = dentry->d_inode; unchar *i_fastsymlink; s64 xlen = 0; int bmask = 0, xsize; s64 xaddr; struct metapage *mp; struct super_block *sb; struct tblock *tblk; struct inode *iplist[2]; jfs_info("jfs_symlink: dip:0x%p name:%s", dip, name); dquot_initialize(dip); ssize = strlen(name) + 1; /* * search parent directory for entry/freespace * (dtSearch() returns parent directory page pinned) */ if ((rc = get_UCSname(&dname, dentry))) goto out1; /* * allocate on-disk/in-memory inode for symbolic link: * (iAlloc() returns new, locked inode) */ ip = ialloc(dip, S_IFLNK | 0777); if (IS_ERR(ip)) { rc = PTR_ERR(ip); goto out2; } tid = txBegin(dip->i_sb, 0); mutex_lock_nested(&JFS_IP(dip)->commit_mutex, COMMIT_MUTEX_PARENT); mutex_lock_nested(&JFS_IP(ip)->commit_mutex, COMMIT_MUTEX_CHILD); rc = jfs_init_security(tid, ip, dip, &dentry->d_name); if (rc) goto out3; tblk = tid_to_tblock(tid); tblk->xflag |= COMMIT_CREATE; tblk->ino = ip->i_ino; tblk->u.ixpxd = JFS_IP(ip)->ixpxd; /* fix symlink access permission * (dir_create() ANDs in the u.u_cmask, * but symlinks really need to be 777 access) */ ip->i_mode |= 0777; /* * write symbolic link target path name */ xtInitRoot(tid, ip); /* * write source path name inline in on-disk inode (fast symbolic link) */ if (ssize <= IDATASIZE) { ip->i_op = &jfs_fast_symlink_inode_operations; i_fastsymlink = JFS_IP(ip)->i_inline; memcpy(i_fastsymlink, name, ssize); ip->i_size = ssize - 1; /* * if symlink is > 128 bytes, we don't have the space to * store inline extended attributes */ if (ssize > sizeof (JFS_IP(ip)->i_inline)) JFS_IP(ip)->mode2 &= ~INLINEEA; jfs_info("jfs_symlink: fast symlink added ssize:%d name:%s ", ssize, name); } /* * write source path name in a single extent */ else { jfs_info("jfs_symlink: allocate extent ip:0x%p", ip); ip->i_op = &jfs_symlink_inode_operations; ip->i_mapping->a_ops = &jfs_aops; /* * even though the data of symlink object (source * path name) is treated as non-journaled user data, * it is read/written thru buffer cache for performance. */ sb = ip->i_sb; bmask = JFS_SBI(sb)->bsize - 1; xsize = (ssize + bmask) & ~bmask; xaddr = 0; xlen = xsize >> JFS_SBI(sb)->l2bsize; if ((rc = xtInsert(tid, ip, 0, 0, xlen, &xaddr, 0))) { txAbort(tid, 0); goto out3; } ip->i_size = ssize - 1; while (ssize) { /* This is kind of silly since PATH_MAX == 4K */ int copy_size = min(ssize, PSIZE); mp = get_metapage(ip, xaddr, PSIZE, 1); if (mp == NULL) { xtTruncate(tid, ip, 0, COMMIT_PWMAP); rc = -EIO; txAbort(tid, 0); goto out3; } memcpy(mp->data, name, copy_size); flush_metapage(mp); ssize -= copy_size; name += copy_size; xaddr += JFS_SBI(sb)->nbperpage; } } /* * create entry for symbolic link in parent directory */ rc = dtSearch(dip, &dname, &ino, &btstack, JFS_CREATE); if (rc == 0) { ino = ip->i_ino; rc = dtInsert(tid, dip, &dname, &ino, &btstack); } if (rc) { if (xlen) xtTruncate(tid, ip, 0, COMMIT_PWMAP); txAbort(tid, 0); /* discard new inode */ goto out3; } mark_inode_dirty(ip); dip->i_ctime = dip->i_mtime = CURRENT_TIME; mark_inode_dirty(dip); /* * commit update of parent directory and link object */ iplist[0] = dip; iplist[1] = ip; rc = txCommit(tid, 2, &iplist[0], 0); out3: txEnd(tid); mutex_unlock(&JFS_IP(ip)->commit_mutex); mutex_unlock(&JFS_IP(dip)->commit_mutex); if (rc) { free_ea_wmap(ip); clear_nlink(ip); unlock_new_inode(ip); iput(ip); } else { unlock_new_inode(ip); d_instantiate(dentry, ip); } out2: free_UCSname(&dname); out1: jfs_info("jfs_symlink: rc:%d", rc); return rc; } /* * NAME: jfs_rename * * FUNCTION: rename a file or directory */ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { struct btstack btstack; ino_t ino; struct component_name new_dname; struct inode *new_ip; struct component_name old_dname; struct inode *old_ip; int rc; tid_t tid; struct tlock *tlck; struct dt_lock *dtlck; struct lv *lv; int ipcount; struct inode *iplist[4]; struct tblock *tblk; s64 new_size = 0; int commit_flag; jfs_info("jfs_rename: %s %s", old_dentry->d_name.name, new_dentry->d_name.name); dquot_initialize(old_dir); dquot_initialize(new_dir); old_ip = old_dentry->d_inode; new_ip = new_dentry->d_inode; if ((rc = get_UCSname(&old_dname, old_dentry))) goto out1; if ((rc = get_UCSname(&new_dname, new_dentry))) goto out2; /* * Make sure source inode number is what we think it is */ rc = dtSearch(old_dir, &old_dname, &ino, &btstack, JFS_LOOKUP); if (rc || (ino != old_ip->i_ino)) { rc = -ENOENT; goto out3; } /* * Make sure dest inode number (if any) is what we think it is */ rc = dtSearch(new_dir, &new_dname, &ino, &btstack, JFS_LOOKUP); if (!rc) { if ((!new_ip) || (ino != new_ip->i_ino)) { rc = -ESTALE; goto out3; } } else if (rc != -ENOENT) goto out3; else if (new_ip) { /* no entry exists, but one was expected */ rc = -ESTALE; goto out3; } if (S_ISDIR(old_ip->i_mode)) { if (new_ip) { if (!dtEmpty(new_ip)) { rc = -ENOTEMPTY; goto out3; } } } else if (new_ip) { IWRITE_LOCK(new_ip, RDWRLOCK_NORMAL); /* Init inode for quota operations. */ dquot_initialize(new_ip); } /* * The real work starts here */ tid = txBegin(new_dir->i_sb, 0); /* * How do we know the locking is safe from deadlocks? * The vfs does the hard part for us. Any time we are taking nested * commit_mutexes, the vfs already has i_mutex held on the parent. * Here, the vfs has already taken i_mutex on both old_dir and new_dir. */ mutex_lock_nested(&JFS_IP(new_dir)->commit_mutex, COMMIT_MUTEX_PARENT); mutex_lock_nested(&JFS_IP(old_ip)->commit_mutex, COMMIT_MUTEX_CHILD); if (old_dir != new_dir) mutex_lock_nested(&JFS_IP(old_dir)->commit_mutex, COMMIT_MUTEX_SECOND_PARENT); if (new_ip) { mutex_lock_nested(&JFS_IP(new_ip)->commit_mutex, COMMIT_MUTEX_VICTIM); /* * Change existing directory entry to new inode number */ ino = new_ip->i_ino; rc = dtModify(tid, new_dir, &new_dname, &ino, old_ip->i_ino, JFS_RENAME); if (rc) goto out4; drop_nlink(new_ip); if (S_ISDIR(new_ip->i_mode)) { drop_nlink(new_ip); if (new_ip->i_nlink) { mutex_unlock(&JFS_IP(new_ip)->commit_mutex); if (old_dir != new_dir) mutex_unlock(&JFS_IP(old_dir)->commit_mutex); mutex_unlock(&JFS_IP(old_ip)->commit_mutex); mutex_unlock(&JFS_IP(new_dir)->commit_mutex); if (!S_ISDIR(old_ip->i_mode) && new_ip) IWRITE_UNLOCK(new_ip); jfs_error(new_ip->i_sb, "jfs_rename: new_ip->i_nlink != 0"); return -EIO; } tblk = tid_to_tblock(tid); tblk->xflag |= COMMIT_DELETE; tblk->u.ip = new_ip; } else if (new_ip->i_nlink == 0) { assert(!test_cflag(COMMIT_Nolink, new_ip)); /* free block resources */ if ((new_size = commitZeroLink(tid, new_ip)) < 0) { txAbort(tid, 1); /* Marks FS Dirty */ rc = new_size; goto out4; } tblk = tid_to_tblock(tid); tblk->xflag |= COMMIT_DELETE; tblk->u.ip = new_ip; } else { new_ip->i_ctime = CURRENT_TIME; mark_inode_dirty(new_ip); } } else { /* * Add new directory entry */ rc = dtSearch(new_dir, &new_dname, &ino, &btstack, JFS_CREATE); if (rc) { jfs_err("jfs_rename didn't expect dtSearch to fail " "w/rc = %d", rc); goto out4; } ino = old_ip->i_ino; rc = dtInsert(tid, new_dir, &new_dname, &ino, &btstack); if (rc) { if (rc == -EIO) jfs_err("jfs_rename: dtInsert returned -EIO"); goto out4; } if (S_ISDIR(old_ip->i_mode)) inc_nlink(new_dir); } /* * Remove old directory entry */ ino = old_ip->i_ino; rc = dtDelete(tid, old_dir, &old_dname, &ino, JFS_REMOVE); if (rc) { jfs_err("jfs_rename did not expect dtDelete to return rc = %d", rc); txAbort(tid, 1); /* Marks Filesystem dirty */ goto out4; } if (S_ISDIR(old_ip->i_mode)) { drop_nlink(old_dir); if (old_dir != new_dir) { /* * Change inode number of parent for moved directory */ JFS_IP(old_ip)->i_dtroot.header.idotdot = cpu_to_le32(new_dir->i_ino); /* Linelock header of dtree */ tlck = txLock(tid, old_ip, (struct metapage *) &JFS_IP(old_ip)->bxflag, tlckDTREE | tlckBTROOT | tlckRELINK); dtlck = (struct dt_lock *) & tlck->lock; ASSERT(dtlck->index == 0); lv = & dtlck->lv[0]; lv->offset = 0; lv->length = 1; dtlck->index++; } } /* * Update ctime on changed/moved inodes & mark dirty */ old_ip->i_ctime = CURRENT_TIME; mark_inode_dirty(old_ip); new_dir->i_ctime = new_dir->i_mtime = current_fs_time(new_dir->i_sb); mark_inode_dirty(new_dir); /* Build list of inodes modified by this transaction */ ipcount = 0; iplist[ipcount++] = old_ip; if (new_ip) iplist[ipcount++] = new_ip; iplist[ipcount++] = old_dir; if (old_dir != new_dir) { iplist[ipcount++] = new_dir; old_dir->i_ctime = old_dir->i_mtime = CURRENT_TIME; mark_inode_dirty(old_dir); } /* * Incomplete truncate of file data can * result in timing problems unless we synchronously commit the * transaction. */ if (new_size) commit_flag = COMMIT_SYNC; else commit_flag = 0; rc = txCommit(tid, ipcount, iplist, commit_flag); out4: txEnd(tid); if (new_ip) mutex_unlock(&JFS_IP(new_ip)->commit_mutex); if (old_dir != new_dir) mutex_unlock(&JFS_IP(old_dir)->commit_mutex); mutex_unlock(&JFS_IP(old_ip)->commit_mutex); mutex_unlock(&JFS_IP(new_dir)->commit_mutex); while (new_size && (rc == 0)) { tid = txBegin(new_ip->i_sb, 0); mutex_lock(&JFS_IP(new_ip)->commit_mutex); new_size = xtTruncate_pmap(tid, new_ip, new_size); if (new_size < 0) { txAbort(tid, 1); rc = new_size; } else rc = txCommit(tid, 1, &new_ip, COMMIT_SYNC); txEnd(tid); mutex_unlock(&JFS_IP(new_ip)->commit_mutex); } if (new_ip && (new_ip->i_nlink == 0)) set_cflag(COMMIT_Nolink, new_ip); out3: free_UCSname(&new_dname); out2: free_UCSname(&old_dname); out1: if (new_ip && !S_ISDIR(new_ip->i_mode)) IWRITE_UNLOCK(new_ip); /* * Truncating the directory index table is not guaranteed. It * may need to be done iteratively */ if (test_cflag(COMMIT_Stale, old_dir)) { if (old_dir->i_size > 1) jfs_truncate_nolock(old_dir, 0); clear_cflag(COMMIT_Stale, old_dir); } jfs_info("jfs_rename: returning %d", rc); return rc; } /* * NAME: jfs_mknod * * FUNCTION: Create a special file (device) */ static int jfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t rdev) { struct jfs_inode_info *jfs_ip; struct btstack btstack; struct component_name dname; ino_t ino; struct inode *ip; struct inode *iplist[2]; int rc; tid_t tid; struct tblock *tblk; if (!new_valid_dev(rdev)) return -EINVAL; jfs_info("jfs_mknod: %s", dentry->d_name.name); dquot_initialize(dir); if ((rc = get_UCSname(&dname, dentry))) goto out; ip = ialloc(dir, mode); if (IS_ERR(ip)) { rc = PTR_ERR(ip); goto out1; } jfs_ip = JFS_IP(ip); tid = txBegin(dir->i_sb, 0); mutex_lock_nested(&JFS_IP(dir)->commit_mutex, COMMIT_MUTEX_PARENT); mutex_lock_nested(&JFS_IP(ip)->commit_mutex, COMMIT_MUTEX_CHILD); rc = jfs_init_acl(tid, ip, dir); if (rc) goto out3; rc = jfs_init_security(tid, ip, dir, &dentry->d_name); if (rc) { txAbort(tid, 0); goto out3; } if ((rc = dtSearch(dir, &dname, &ino, &btstack, JFS_CREATE))) { txAbort(tid, 0); goto out3; } tblk = tid_to_tblock(tid); tblk->xflag |= COMMIT_CREATE; tblk->ino = ip->i_ino; tblk->u.ixpxd = JFS_IP(ip)->ixpxd; ino = ip->i_ino; if ((rc = dtInsert(tid, dir, &dname, &ino, &btstack))) { txAbort(tid, 0); goto out3; } ip->i_op = &jfs_file_inode_operations; jfs_ip->dev = new_encode_dev(rdev); init_special_inode(ip, ip->i_mode, rdev); mark_inode_dirty(ip); dir->i_ctime = dir->i_mtime = CURRENT_TIME; mark_inode_dirty(dir); iplist[0] = dir; iplist[1] = ip; rc = txCommit(tid, 2, iplist, 0); out3: txEnd(tid); mutex_unlock(&JFS_IP(ip)->commit_mutex); mutex_unlock(&JFS_IP(dir)->commit_mutex); if (rc) { free_ea_wmap(ip); clear_nlink(ip); unlock_new_inode(ip); iput(ip); } else { unlock_new_inode(ip); d_instantiate(dentry, ip); } out1: free_UCSname(&dname); out: jfs_info("jfs_mknod: returning %d", rc); return rc; } static struct dentry *jfs_lookup(struct inode *dip, struct dentry *dentry, unsigned int flags) { struct btstack btstack; ino_t inum; struct inode *ip; struct component_name key; int rc; jfs_info("jfs_lookup: name = %s", dentry->d_name.name); if ((rc = get_UCSname(&key, dentry))) return ERR_PTR(rc); rc = dtSearch(dip, &key, &inum, &btstack, JFS_LOOKUP); free_UCSname(&key); if (rc == -ENOENT) { ip = NULL; } else if (rc) { jfs_err("jfs_lookup: dtSearch returned %d", rc); ip = ERR_PTR(rc); } else { ip = jfs_iget(dip->i_sb, inum); if (IS_ERR(ip)) jfs_err("jfs_lookup: iget failed on inum %d", (uint)inum); } return d_splice_alias(ip, dentry); } static struct inode *jfs_nfs_get_inode(struct super_block *sb, u64 ino, u32 generation) { struct inode *inode; if (ino == 0) return ERR_PTR(-ESTALE); inode = jfs_iget(sb, ino); if (IS_ERR(inode)) return ERR_CAST(inode); if (generation && inode->i_generation != generation) { iput(inode); return ERR_PTR(-ESTALE); } return inode; } struct dentry *jfs_fh_to_dentry(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { return generic_fh_to_dentry(sb, fid, fh_len, fh_type, jfs_nfs_get_inode); } struct dentry *jfs_fh_to_parent(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { return generic_fh_to_parent(sb, fid, fh_len, fh_type, jfs_nfs_get_inode); } struct dentry *jfs_get_parent(struct dentry *dentry) { unsigned long parent_ino; parent_ino = le32_to_cpu(JFS_IP(dentry->d_inode)->i_dtroot.header.idotdot); return d_obtain_alias(jfs_iget(dentry->d_inode->i_sb, parent_ino)); } const struct inode_operations jfs_dir_inode_operations = { .create = jfs_create, .lookup = jfs_lookup, .link = jfs_link, .unlink = jfs_unlink, .symlink = jfs_symlink, .mkdir = jfs_mkdir, .rmdir = jfs_rmdir, .mknod = jfs_mknod, .rename = jfs_rename, .setxattr = jfs_setxattr, .getxattr = jfs_getxattr, .listxattr = jfs_listxattr, .removexattr = jfs_removexattr, .setattr = jfs_setattr, #ifdef CONFIG_JFS_POSIX_ACL .get_acl = jfs_get_acl, #endif }; const struct file_operations jfs_dir_operations = { .read = generic_read_dir, .readdir = jfs_readdir, .fsync = jfs_fsync, .unlocked_ioctl = jfs_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = jfs_compat_ioctl, #endif .llseek = generic_file_llseek, }; static int jfs_ci_hash(const struct dentry *dir, const struct inode *inode, struct qstr *this) { unsigned long hash; int i; hash = init_name_hash(); for (i=0; i < this->len; i++) hash = partial_name_hash(tolower(this->name[i]), hash); this->hash = end_name_hash(hash); return 0; } static int jfs_ci_compare(const struct dentry *parent, const struct inode *pinode, const struct dentry *dentry, const struct inode *inode, unsigned int len, const char *str, const struct qstr *name) { int i, result = 1; if (len != name->len) goto out; for (i=0; i < len; i++) { if (tolower(str[i]) != tolower(name->name[i])) goto out; } result = 0; out: return result; } static int jfs_ci_revalidate(struct dentry *dentry, unsigned int flags) { /* * This is not negative dentry. Always valid. * * Note, rename() to existing directory entry will have ->d_inode, * and will use existing name which isn't specified name by user. * * We may be able to drop this positive dentry here. But dropping * positive dentry isn't good idea. So it's unsupported like * rename("filename", "FILENAME") for now. */ if (dentry->d_inode) return 1; /* * This may be nfsd (or something), anyway, we can't see the * intent of this. So, since this can be for creation, drop it. */ if (!flags) return 0; /* * Drop the negative dentry, in order to make sure to use the * case sensitive name which is specified by user if this is * for creation. */ if (flags & (LOOKUP_CREATE | LOOKUP_RENAME_TARGET)) return 0; return 1; } const struct dentry_operations jfs_ci_dentry_operations = { .d_hash = jfs_ci_hash, .d_compare = jfs_ci_compare, .d_revalidate = jfs_ci_revalidate, };
gpl-2.0
MoKee/android_kernel_htc_villec2
drivers/media/video/et61x251/et61x251_core.c
2625
65497
/*************************************************************************** * V4L2 driver for ET61X[12]51 PC Camera Controllers * * * * Copyright (C) 2006-2007 by Luca Risolia <luca.risolia@studio.unibo.it> * * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * * This program is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * * * You should have received a copy of the GNU General Public License * * along with this program; if not, write to the Free Software * * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * ***************************************************************************/ #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/param.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/device.h> #include <linux/fs.h> #include <linux/delay.h> #include <linux/compiler.h> #include <linux/ioctl.h> #include <linux/poll.h> #include <linux/stat.h> #include <linux/mm.h> #include <linux/vmalloc.h> #include <linux/page-flags.h> #include <media/v4l2-ioctl.h> #include <asm/byteorder.h> #include <asm/page.h> #include <asm/uaccess.h> #include "et61x251.h" /*****************************************************************************/ #define ET61X251_MODULE_NAME "V4L2 driver for ET61X[12]51 " \ "PC Camera Controllers" #define ET61X251_MODULE_AUTHOR "(C) 2006-2007 Luca Risolia" #define ET61X251_AUTHOR_EMAIL "<luca.risolia@studio.unibo.it>" #define ET61X251_MODULE_LICENSE "GPL" #define ET61X251_MODULE_VERSION "1:1.09" #define ET61X251_MODULE_VERSION_CODE KERNEL_VERSION(1, 1, 9) /*****************************************************************************/ MODULE_DEVICE_TABLE(usb, et61x251_id_table); MODULE_AUTHOR(ET61X251_MODULE_AUTHOR " " ET61X251_AUTHOR_EMAIL); MODULE_DESCRIPTION(ET61X251_MODULE_NAME); MODULE_VERSION(ET61X251_MODULE_VERSION); MODULE_LICENSE(ET61X251_MODULE_LICENSE); static short video_nr[] = {[0 ... ET61X251_MAX_DEVICES-1] = -1}; module_param_array(video_nr, short, NULL, 0444); MODULE_PARM_DESC(video_nr, "\n<-1|n[,...]> Specify V4L2 minor mode number." "\n -1 = use next available (default)" "\n n = use minor number n (integer >= 0)" "\nYou can specify up to " __MODULE_STRING(ET61X251_MAX_DEVICES) " cameras this way." "\nFor example:" "\nvideo_nr=-1,2,-1 would assign minor number 2 to" "\nthe second registered camera and use auto for the first" "\none and for every other camera." "\n"); static short force_munmap[] = {[0 ... ET61X251_MAX_DEVICES-1] = ET61X251_FORCE_MUNMAP}; module_param_array(force_munmap, bool, NULL, 0444); MODULE_PARM_DESC(force_munmap, "\n<0|1[,...]> Force the application to unmap previously" "\nmapped buffer memory before calling any VIDIOC_S_CROP or" "\nVIDIOC_S_FMT ioctl's. Not all the applications support" "\nthis feature. This parameter is specific for each" "\ndetected camera." "\n 0 = do not force memory unmapping" "\n 1 = force memory unmapping (save memory)" "\nDefault value is "__MODULE_STRING(ET61X251_FORCE_MUNMAP)"." "\n"); static unsigned int frame_timeout[] = {[0 ... ET61X251_MAX_DEVICES-1] = ET61X251_FRAME_TIMEOUT}; module_param_array(frame_timeout, uint, NULL, 0644); MODULE_PARM_DESC(frame_timeout, "\n<n[,...]> Timeout for a video frame in seconds." "\nThis parameter is specific for each detected camera." "\nDefault value is " __MODULE_STRING(ET61X251_FRAME_TIMEOUT)"." "\n"); #ifdef ET61X251_DEBUG static unsigned short debug = ET61X251_DEBUG_LEVEL; module_param(debug, ushort, 0644); MODULE_PARM_DESC(debug, "\n<n> Debugging information level, from 0 to 3:" "\n0 = none (use carefully)" "\n1 = critical errors" "\n2 = significant informations" "\n3 = more verbose messages" "\nLevel 3 is useful for testing only, when only " "one device is used." "\nDefault value is "__MODULE_STRING(ET61X251_DEBUG_LEVEL)"." "\n"); #endif /*****************************************************************************/ static u32 et61x251_request_buffers(struct et61x251_device* cam, u32 count, enum et61x251_io_method io) { struct v4l2_pix_format* p = &(cam->sensor.pix_format); struct v4l2_rect* r = &(cam->sensor.cropcap.bounds); const size_t imagesize = cam->module_param.force_munmap || io == IO_READ ? (p->width * p->height * p->priv) / 8 : (r->width * r->height * p->priv) / 8; void* buff = NULL; u32 i; if (count > ET61X251_MAX_FRAMES) count = ET61X251_MAX_FRAMES; cam->nbuffers = count; while (cam->nbuffers > 0) { if ((buff = vmalloc_32_user(cam->nbuffers * PAGE_ALIGN(imagesize)))) break; cam->nbuffers--; } for (i = 0; i < cam->nbuffers; i++) { cam->frame[i].bufmem = buff + i*PAGE_ALIGN(imagesize); cam->frame[i].buf.index = i; cam->frame[i].buf.m.offset = i*PAGE_ALIGN(imagesize); cam->frame[i].buf.length = imagesize; cam->frame[i].buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; cam->frame[i].buf.sequence = 0; cam->frame[i].buf.field = V4L2_FIELD_NONE; cam->frame[i].buf.memory = V4L2_MEMORY_MMAP; cam->frame[i].buf.flags = 0; } return cam->nbuffers; } static void et61x251_release_buffers(struct et61x251_device* cam) { if (cam->nbuffers) { vfree(cam->frame[0].bufmem); cam->nbuffers = 0; } cam->frame_current = NULL; } static void et61x251_empty_framequeues(struct et61x251_device* cam) { u32 i; INIT_LIST_HEAD(&cam->inqueue); INIT_LIST_HEAD(&cam->outqueue); for (i = 0; i < ET61X251_MAX_FRAMES; i++) { cam->frame[i].state = F_UNUSED; cam->frame[i].buf.bytesused = 0; } } static void et61x251_requeue_outqueue(struct et61x251_device* cam) { struct et61x251_frame_t *i; list_for_each_entry(i, &cam->outqueue, frame) { i->state = F_QUEUED; list_add(&i->frame, &cam->inqueue); } INIT_LIST_HEAD(&cam->outqueue); } static void et61x251_queue_unusedframes(struct et61x251_device* cam) { unsigned long lock_flags; u32 i; for (i = 0; i < cam->nbuffers; i++) if (cam->frame[i].state == F_UNUSED) { cam->frame[i].state = F_QUEUED; spin_lock_irqsave(&cam->queue_lock, lock_flags); list_add_tail(&cam->frame[i].frame, &cam->inqueue); spin_unlock_irqrestore(&cam->queue_lock, lock_flags); } } /*****************************************************************************/ int et61x251_write_reg(struct et61x251_device* cam, u8 value, u16 index) { struct usb_device* udev = cam->usbdev; u8* buff = cam->control_buffer; int res; *buff = value; res = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x00, 0x41, 0, index, buff, 1, ET61X251_CTRL_TIMEOUT); if (res < 0) { DBG(3, "Failed to write a register (value 0x%02X, index " "0x%02X, error %d)", value, index, res); return -1; } return 0; } static int et61x251_read_reg(struct et61x251_device* cam, u16 index) { struct usb_device* udev = cam->usbdev; u8* buff = cam->control_buffer; int res; res = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 0x00, 0xc1, 0, index, buff, 1, ET61X251_CTRL_TIMEOUT); if (res < 0) DBG(3, "Failed to read a register (index 0x%02X, error %d)", index, res); return (res >= 0) ? (int)(*buff) : -1; } static int et61x251_i2c_wait(struct et61x251_device* cam, const struct et61x251_sensor* sensor) { int i, r; for (i = 1; i <= 8; i++) { if (sensor->interface == ET61X251_I2C_3WIRES) { r = et61x251_read_reg(cam, 0x8e); if (!(r & 0x02) && (r >= 0)) return 0; } else { r = et61x251_read_reg(cam, 0x8b); if (!(r & 0x01) && (r >= 0)) return 0; } if (r < 0) return -EIO; udelay(8*8); /* minimum for sensors at 400kHz */ } return -EBUSY; } int et61x251_i2c_raw_write(struct et61x251_device* cam, u8 n, u8 data1, u8 data2, u8 data3, u8 data4, u8 data5, u8 data6, u8 data7, u8 data8, u8 address) { struct usb_device* udev = cam->usbdev; u8* data = cam->control_buffer; int err = 0, res; data[0] = data2; data[1] = data3; data[2] = data4; data[3] = data5; data[4] = data6; data[5] = data7; data[6] = data8; res = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x00, 0x41, 0, 0x81, data, n-1, ET61X251_CTRL_TIMEOUT); if (res < 0) err += res; data[0] = address; data[1] = cam->sensor.i2c_slave_id; data[2] = cam->sensor.rsta | 0x02 | (n << 4); res = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x00, 0x41, 0, 0x88, data, 3, ET61X251_CTRL_TIMEOUT); if (res < 0) err += res; /* Start writing through the serial interface */ data[0] = data1; res = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x00, 0x41, 0, 0x80, data, 1, ET61X251_CTRL_TIMEOUT); if (res < 0) err += res; err += et61x251_i2c_wait(cam, &cam->sensor); if (err) DBG(3, "I2C raw write failed for %s image sensor", cam->sensor.name); PDBGG("I2C raw write: %u bytes, address = 0x%02X, data1 = 0x%02X, " "data2 = 0x%02X, data3 = 0x%02X, data4 = 0x%02X, data5 = 0x%02X," " data6 = 0x%02X, data7 = 0x%02X, data8 = 0x%02X", n, address, data1, data2, data3, data4, data5, data6, data7, data8); return err ? -1 : 0; } /*****************************************************************************/ static void et61x251_urb_complete(struct urb *urb) { struct et61x251_device* cam = urb->context; struct et61x251_frame_t** f; size_t imagesize; u8 i; int err = 0; if (urb->status == -ENOENT) return; f = &cam->frame_current; if (cam->stream == STREAM_INTERRUPT) { cam->stream = STREAM_OFF; if ((*f)) (*f)->state = F_QUEUED; DBG(3, "Stream interrupted"); wake_up(&cam->wait_stream); } if (cam->state & DEV_DISCONNECTED) return; if (cam->state & DEV_MISCONFIGURED) { wake_up_interruptible(&cam->wait_frame); return; } if (cam->stream == STREAM_OFF || list_empty(&cam->inqueue)) goto resubmit_urb; if (!(*f)) (*f) = list_entry(cam->inqueue.next, struct et61x251_frame_t, frame); imagesize = (cam->sensor.pix_format.width * cam->sensor.pix_format.height * cam->sensor.pix_format.priv) / 8; for (i = 0; i < urb->number_of_packets; i++) { unsigned int len, status; void *pos; u8* b1, * b2, sof; const u8 VOID_BYTES = 6; size_t imglen; len = urb->iso_frame_desc[i].actual_length; status = urb->iso_frame_desc[i].status; pos = urb->iso_frame_desc[i].offset + urb->transfer_buffer; if (status) { DBG(3, "Error in isochronous frame"); (*f)->state = F_ERROR; continue; } b1 = pos++; b2 = pos++; sof = ((*b1 & 0x3f) == 63); imglen = ((*b1 & 0xc0) << 2) | *b2; PDBGG("Isochrnous frame: length %u, #%u i, image length %zu", len, i, imglen); if ((*f)->state == F_QUEUED || (*f)->state == F_ERROR) start_of_frame: if (sof) { (*f)->state = F_GRABBING; (*f)->buf.bytesused = 0; do_gettimeofday(&(*f)->buf.timestamp); pos += 22; DBG(3, "SOF detected: new video frame"); } if ((*f)->state == F_GRABBING) { if (sof && (*f)->buf.bytesused) { if (cam->sensor.pix_format.pixelformat == V4L2_PIX_FMT_ET61X251) goto end_of_frame; else { DBG(3, "Not expected SOF detected " "after %lu bytes", (unsigned long)(*f)->buf.bytesused); (*f)->state = F_ERROR; continue; } } if ((*f)->buf.bytesused + imglen > imagesize) { DBG(3, "Video frame size exceeded"); (*f)->state = F_ERROR; continue; } pos += VOID_BYTES; memcpy((*f)->bufmem+(*f)->buf.bytesused, pos, imglen); (*f)->buf.bytesused += imglen; if ((*f)->buf.bytesused == imagesize) { u32 b; end_of_frame: b = (*f)->buf.bytesused; (*f)->state = F_DONE; (*f)->buf.sequence= ++cam->frame_count; spin_lock(&cam->queue_lock); list_move_tail(&(*f)->frame, &cam->outqueue); if (!list_empty(&cam->inqueue)) (*f) = list_entry(cam->inqueue.next, struct et61x251_frame_t, frame); else (*f) = NULL; spin_unlock(&cam->queue_lock); DBG(3, "Video frame captured: : %lu bytes", (unsigned long)(b)); if (!(*f)) goto resubmit_urb; if (sof && cam->sensor.pix_format.pixelformat == V4L2_PIX_FMT_ET61X251) goto start_of_frame; } } } resubmit_urb: urb->dev = cam->usbdev; err = usb_submit_urb(urb, GFP_ATOMIC); if (err < 0 && err != -EPERM) { cam->state |= DEV_MISCONFIGURED; DBG(1, "usb_submit_urb() failed"); } wake_up_interruptible(&cam->wait_frame); } static int et61x251_start_transfer(struct et61x251_device* cam) { struct usb_device *udev = cam->usbdev; struct urb* urb; struct usb_host_interface* altsetting = usb_altnum_to_altsetting( usb_ifnum_to_if(udev, 0), ET61X251_ALTERNATE_SETTING); const unsigned int psz = le16_to_cpu(altsetting-> endpoint[0].desc.wMaxPacketSize); s8 i, j; int err = 0; for (i = 0; i < ET61X251_URBS; i++) { cam->transfer_buffer[i] = kzalloc(ET61X251_ISO_PACKETS * psz, GFP_KERNEL); if (!cam->transfer_buffer[i]) { err = -ENOMEM; DBG(1, "Not enough memory"); goto free_buffers; } } for (i = 0; i < ET61X251_URBS; i++) { urb = usb_alloc_urb(ET61X251_ISO_PACKETS, GFP_KERNEL); cam->urb[i] = urb; if (!urb) { err = -ENOMEM; DBG(1, "usb_alloc_urb() failed"); goto free_urbs; } urb->dev = udev; urb->context = cam; urb->pipe = usb_rcvisocpipe(udev, 1); urb->transfer_flags = URB_ISO_ASAP; urb->number_of_packets = ET61X251_ISO_PACKETS; urb->complete = et61x251_urb_complete; urb->transfer_buffer = cam->transfer_buffer[i]; urb->transfer_buffer_length = psz * ET61X251_ISO_PACKETS; urb->interval = 1; for (j = 0; j < ET61X251_ISO_PACKETS; j++) { urb->iso_frame_desc[j].offset = psz * j; urb->iso_frame_desc[j].length = psz; } } err = et61x251_write_reg(cam, 0x01, 0x03); err = et61x251_write_reg(cam, 0x00, 0x03); err = et61x251_write_reg(cam, 0x08, 0x03); if (err) { err = -EIO; DBG(1, "I/O hardware error"); goto free_urbs; } err = usb_set_interface(udev, 0, ET61X251_ALTERNATE_SETTING); if (err) { DBG(1, "usb_set_interface() failed"); goto free_urbs; } cam->frame_current = NULL; for (i = 0; i < ET61X251_URBS; i++) { err = usb_submit_urb(cam->urb[i], GFP_KERNEL); if (err) { for (j = i-1; j >= 0; j--) usb_kill_urb(cam->urb[j]); DBG(1, "usb_submit_urb() failed, error %d", err); goto free_urbs; } } return 0; free_urbs: for (i = 0; (i < ET61X251_URBS) && cam->urb[i]; i++) usb_free_urb(cam->urb[i]); free_buffers: for (i = 0; (i < ET61X251_URBS) && cam->transfer_buffer[i]; i++) kfree(cam->transfer_buffer[i]); return err; } static int et61x251_stop_transfer(struct et61x251_device* cam) { struct usb_device *udev = cam->usbdev; s8 i; int err = 0; if (cam->state & DEV_DISCONNECTED) return 0; for (i = ET61X251_URBS-1; i >= 0; i--) { usb_kill_urb(cam->urb[i]); usb_free_urb(cam->urb[i]); kfree(cam->transfer_buffer[i]); } err = usb_set_interface(udev, 0, 0); /* 0 Mb/s */ if (err) DBG(3, "usb_set_interface() failed"); return err; } static int et61x251_stream_interrupt(struct et61x251_device* cam) { long timeout; cam->stream = STREAM_INTERRUPT; timeout = wait_event_timeout(cam->wait_stream, (cam->stream == STREAM_OFF) || (cam->state & DEV_DISCONNECTED), ET61X251_URB_TIMEOUT); if (cam->state & DEV_DISCONNECTED) return -ENODEV; else if (cam->stream != STREAM_OFF) { cam->state |= DEV_MISCONFIGURED; DBG(1, "URB timeout reached. The camera is misconfigured. To " "use it, close and open %s again.", video_device_node_name(cam->v4ldev)); return -EIO; } return 0; } /*****************************************************************************/ #ifdef CONFIG_VIDEO_ADV_DEBUG static int et61x251_i2c_try_read(struct et61x251_device* cam, const struct et61x251_sensor* sensor, u8 address) { struct usb_device* udev = cam->usbdev; u8* data = cam->control_buffer; int err = 0, res; data[0] = address; data[1] = cam->sensor.i2c_slave_id; data[2] = cam->sensor.rsta | 0x10; data[3] = !(et61x251_read_reg(cam, 0x8b) & 0x02); res = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x00, 0x41, 0, 0x88, data, 4, ET61X251_CTRL_TIMEOUT); if (res < 0) err += res; err += et61x251_i2c_wait(cam, sensor); res = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 0x00, 0xc1, 0, 0x80, data, 8, ET61X251_CTRL_TIMEOUT); if (res < 0) err += res; if (err) DBG(3, "I2C read failed for %s image sensor", sensor->name); PDBGG("I2C read: address 0x%02X, value: 0x%02X", address, data[0]); return err ? -1 : (int)data[0]; } static int et61x251_i2c_try_write(struct et61x251_device* cam, const struct et61x251_sensor* sensor, u8 address, u8 value) { struct usb_device* udev = cam->usbdev; u8* data = cam->control_buffer; int err = 0, res; data[0] = address; data[1] = cam->sensor.i2c_slave_id; data[2] = cam->sensor.rsta | 0x12; res = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x00, 0x41, 0, 0x88, data, 3, ET61X251_CTRL_TIMEOUT); if (res < 0) err += res; data[0] = value; res = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x00, 0x41, 0, 0x80, data, 1, ET61X251_CTRL_TIMEOUT); if (res < 0) err += res; err += et61x251_i2c_wait(cam, sensor); if (err) DBG(3, "I2C write failed for %s image sensor", sensor->name); PDBGG("I2C write: address 0x%02X, value: 0x%02X", address, value); return err ? -1 : 0; } static int et61x251_i2c_read(struct et61x251_device* cam, u8 address) { return et61x251_i2c_try_read(cam, &cam->sensor, address); } static int et61x251_i2c_write(struct et61x251_device* cam, u8 address, u8 value) { return et61x251_i2c_try_write(cam, &cam->sensor, address, value); } static u8 et61x251_strtou8(const char* buff, size_t len, ssize_t* count) { char str[5]; char* endp; unsigned long val; if (len < 4) { strncpy(str, buff, len); str[len] = '\0'; } else { strncpy(str, buff, 4); str[4] = '\0'; } val = simple_strtoul(str, &endp, 0); *count = 0; if (val <= 0xff) *count = (ssize_t)(endp - str); if ((*count) && (len == *count+1) && (buff[*count] == '\n')) *count += 1; return (u8)val; } /* NOTE 1: being inside one of the following methods implies that the v4l device exists for sure (see kobjects and reference counters) NOTE 2: buffers are PAGE_SIZE long */ static ssize_t et61x251_show_reg(struct device* cd, struct device_attribute *attr, char* buf) { struct et61x251_device* cam; ssize_t count; if (mutex_lock_interruptible(&et61x251_sysfs_lock)) return -ERESTARTSYS; cam = video_get_drvdata(to_video_device(cd)); if (!cam) { mutex_unlock(&et61x251_sysfs_lock); return -ENODEV; } count = sprintf(buf, "%u\n", cam->sysfs.reg); mutex_unlock(&et61x251_sysfs_lock); return count; } static ssize_t et61x251_store_reg(struct device* cd, struct device_attribute *attr, const char* buf, size_t len) { struct et61x251_device* cam; u8 index; ssize_t count; if (mutex_lock_interruptible(&et61x251_sysfs_lock)) return -ERESTARTSYS; cam = video_get_drvdata(to_video_device(cd)); if (!cam) { mutex_unlock(&et61x251_sysfs_lock); return -ENODEV; } index = et61x251_strtou8(buf, len, &count); if (index > 0x8e || !count) { mutex_unlock(&et61x251_sysfs_lock); return -EINVAL; } cam->sysfs.reg = index; DBG(2, "Moved ET61X[12]51 register index to 0x%02X", cam->sysfs.reg); DBG(3, "Written bytes: %zd", count); mutex_unlock(&et61x251_sysfs_lock); return count; } static ssize_t et61x251_show_val(struct device* cd, struct device_attribute *attr, char* buf) { struct et61x251_device* cam; ssize_t count; int val; if (mutex_lock_interruptible(&et61x251_sysfs_lock)) return -ERESTARTSYS; cam = video_get_drvdata(to_video_device(cd)); if (!cam) { mutex_unlock(&et61x251_sysfs_lock); return -ENODEV; } if ((val = et61x251_read_reg(cam, cam->sysfs.reg)) < 0) { mutex_unlock(&et61x251_sysfs_lock); return -EIO; } count = sprintf(buf, "%d\n", val); DBG(3, "Read bytes: %zd", count); mutex_unlock(&et61x251_sysfs_lock); return count; } static ssize_t et61x251_store_val(struct device* cd, struct device_attribute *attr, const char* buf, size_t len) { struct et61x251_device* cam; u8 value; ssize_t count; int err; if (mutex_lock_interruptible(&et61x251_sysfs_lock)) return -ERESTARTSYS; cam = video_get_drvdata(to_video_device(cd)); if (!cam) { mutex_unlock(&et61x251_sysfs_lock); return -ENODEV; } value = et61x251_strtou8(buf, len, &count); if (!count) { mutex_unlock(&et61x251_sysfs_lock); return -EINVAL; } err = et61x251_write_reg(cam, value, cam->sysfs.reg); if (err) { mutex_unlock(&et61x251_sysfs_lock); return -EIO; } DBG(2, "Written ET61X[12]51 reg. 0x%02X, val. 0x%02X", cam->sysfs.reg, value); DBG(3, "Written bytes: %zd", count); mutex_unlock(&et61x251_sysfs_lock); return count; } static ssize_t et61x251_show_i2c_reg(struct device* cd, struct device_attribute *attr, char* buf) { struct et61x251_device* cam; ssize_t count; if (mutex_lock_interruptible(&et61x251_sysfs_lock)) return -ERESTARTSYS; cam = video_get_drvdata(to_video_device(cd)); if (!cam) { mutex_unlock(&et61x251_sysfs_lock); return -ENODEV; } count = sprintf(buf, "%u\n", cam->sysfs.i2c_reg); DBG(3, "Read bytes: %zd", count); mutex_unlock(&et61x251_sysfs_lock); return count; } static ssize_t et61x251_store_i2c_reg(struct device* cd, struct device_attribute *attr, const char* buf, size_t len) { struct et61x251_device* cam; u8 index; ssize_t count; if (mutex_lock_interruptible(&et61x251_sysfs_lock)) return -ERESTARTSYS; cam = video_get_drvdata(to_video_device(cd)); if (!cam) { mutex_unlock(&et61x251_sysfs_lock); return -ENODEV; } index = et61x251_strtou8(buf, len, &count); if (!count) { mutex_unlock(&et61x251_sysfs_lock); return -EINVAL; } cam->sysfs.i2c_reg = index; DBG(2, "Moved sensor register index to 0x%02X", cam->sysfs.i2c_reg); DBG(3, "Written bytes: %zd", count); mutex_unlock(&et61x251_sysfs_lock); return count; } static ssize_t et61x251_show_i2c_val(struct device* cd, struct device_attribute *attr, char* buf) { struct et61x251_device* cam; ssize_t count; int val; if (mutex_lock_interruptible(&et61x251_sysfs_lock)) return -ERESTARTSYS; cam = video_get_drvdata(to_video_device(cd)); if (!cam) { mutex_unlock(&et61x251_sysfs_lock); return -ENODEV; } if (!(cam->sensor.sysfs_ops & ET61X251_I2C_READ)) { mutex_unlock(&et61x251_sysfs_lock); return -ENOSYS; } if ((val = et61x251_i2c_read(cam, cam->sysfs.i2c_reg)) < 0) { mutex_unlock(&et61x251_sysfs_lock); return -EIO; } count = sprintf(buf, "%d\n", val); DBG(3, "Read bytes: %zd", count); mutex_unlock(&et61x251_sysfs_lock); return count; } static ssize_t et61x251_store_i2c_val(struct device* cd, struct device_attribute *attr, const char* buf, size_t len) { struct et61x251_device* cam; u8 value; ssize_t count; int err; if (mutex_lock_interruptible(&et61x251_sysfs_lock)) return -ERESTARTSYS; cam = video_get_drvdata(to_video_device(cd)); if (!cam) { mutex_unlock(&et61x251_sysfs_lock); return -ENODEV; } if (!(cam->sensor.sysfs_ops & ET61X251_I2C_READ)) { mutex_unlock(&et61x251_sysfs_lock); return -ENOSYS; } value = et61x251_strtou8(buf, len, &count); if (!count) { mutex_unlock(&et61x251_sysfs_lock); return -EINVAL; } err = et61x251_i2c_write(cam, cam->sysfs.i2c_reg, value); if (err) { mutex_unlock(&et61x251_sysfs_lock); return -EIO; } DBG(2, "Written sensor reg. 0x%02X, val. 0x%02X", cam->sysfs.i2c_reg, value); DBG(3, "Written bytes: %zd", count); mutex_unlock(&et61x251_sysfs_lock); return count; } static DEVICE_ATTR(reg, S_IRUGO | S_IWUSR, et61x251_show_reg, et61x251_store_reg); static DEVICE_ATTR(val, S_IRUGO | S_IWUSR, et61x251_show_val, et61x251_store_val); static DEVICE_ATTR(i2c_reg, S_IRUGO | S_IWUSR, et61x251_show_i2c_reg, et61x251_store_i2c_reg); static DEVICE_ATTR(i2c_val, S_IRUGO | S_IWUSR, et61x251_show_i2c_val, et61x251_store_i2c_val); static int et61x251_create_sysfs(struct et61x251_device* cam) { struct device *classdev = &(cam->v4ldev->dev); int err = 0; if ((err = device_create_file(classdev, &dev_attr_reg))) goto err_out; if ((err = device_create_file(classdev, &dev_attr_val))) goto err_reg; if (cam->sensor.sysfs_ops) { if ((err = device_create_file(classdev, &dev_attr_i2c_reg))) goto err_val; if ((err = device_create_file(classdev, &dev_attr_i2c_val))) goto err_i2c_reg; } err_i2c_reg: if (cam->sensor.sysfs_ops) device_remove_file(classdev, &dev_attr_i2c_reg); err_val: device_remove_file(classdev, &dev_attr_val); err_reg: device_remove_file(classdev, &dev_attr_reg); err_out: return err; } #endif /* CONFIG_VIDEO_ADV_DEBUG */ /*****************************************************************************/ static int et61x251_set_pix_format(struct et61x251_device* cam, struct v4l2_pix_format* pix) { int r, err = 0; if ((r = et61x251_read_reg(cam, 0x12)) < 0) err += r; if (pix->pixelformat == V4L2_PIX_FMT_ET61X251) err += et61x251_write_reg(cam, r & 0xfd, 0x12); else err += et61x251_write_reg(cam, r | 0x02, 0x12); return err ? -EIO : 0; } static int et61x251_set_compression(struct et61x251_device* cam, struct v4l2_jpegcompression* compression) { int r, err = 0; if ((r = et61x251_read_reg(cam, 0x12)) < 0) err += r; if (compression->quality == 0) err += et61x251_write_reg(cam, r & 0xfb, 0x12); else err += et61x251_write_reg(cam, r | 0x04, 0x12); return err ? -EIO : 0; } static int et61x251_set_scale(struct et61x251_device* cam, u8 scale) { int r = 0, err = 0; r = et61x251_read_reg(cam, 0x12); if (r < 0) err += r; if (scale == 1) err += et61x251_write_reg(cam, r & ~0x01, 0x12); else if (scale == 2) err += et61x251_write_reg(cam, r | 0x01, 0x12); if (err) return -EIO; PDBGG("Scaling factor: %u", scale); return 0; } static int et61x251_set_crop(struct et61x251_device* cam, struct v4l2_rect* rect) { struct et61x251_sensor* s = &cam->sensor; u16 fmw_sx = (u16)(rect->left - s->cropcap.bounds.left + s->active_pixel.left), fmw_sy = (u16)(rect->top - s->cropcap.bounds.top + s->active_pixel.top), fmw_length = (u16)(rect->width), fmw_height = (u16)(rect->height); int err = 0; err += et61x251_write_reg(cam, fmw_sx & 0xff, 0x69); err += et61x251_write_reg(cam, fmw_sy & 0xff, 0x6a); err += et61x251_write_reg(cam, fmw_length & 0xff, 0x6b); err += et61x251_write_reg(cam, fmw_height & 0xff, 0x6c); err += et61x251_write_reg(cam, (fmw_sx >> 8) | ((fmw_sy & 0x300) >> 6) | ((fmw_length & 0x300) >> 4) | ((fmw_height & 0x300) >> 2), 0x6d); if (err) return -EIO; PDBGG("fmw_sx, fmw_sy, fmw_length, fmw_height: %u %u %u %u", fmw_sx, fmw_sy, fmw_length, fmw_height); return 0; } static int et61x251_init(struct et61x251_device* cam) { struct et61x251_sensor* s = &cam->sensor; struct v4l2_control ctrl; struct v4l2_queryctrl *qctrl; struct v4l2_rect* rect; u8 i = 0; int err = 0; if (!(cam->state & DEV_INITIALIZED)) { mutex_init(&cam->open_mutex); init_waitqueue_head(&cam->wait_open); qctrl = s->qctrl; rect = &(s->cropcap.defrect); cam->compression.quality = ET61X251_COMPRESSION_QUALITY; } else { /* use current values */ qctrl = s->_qctrl; rect = &(s->_rect); } err += et61x251_set_scale(cam, rect->width / s->pix_format.width); err += et61x251_set_crop(cam, rect); if (err) return err; if (s->init) { err = s->init(cam); if (err) { DBG(3, "Sensor initialization failed"); return err; } } err += et61x251_set_compression(cam, &cam->compression); err += et61x251_set_pix_format(cam, &s->pix_format); if (s->set_pix_format) err += s->set_pix_format(cam, &s->pix_format); if (err) return err; if (s->pix_format.pixelformat == V4L2_PIX_FMT_ET61X251) DBG(3, "Compressed video format is active, quality %d", cam->compression.quality); else DBG(3, "Uncompressed video format is active"); if (s->set_crop) if ((err = s->set_crop(cam, rect))) { DBG(3, "set_crop() failed"); return err; } if (s->set_ctrl) { for (i = 0; i < ARRAY_SIZE(s->qctrl); i++) if (s->qctrl[i].id != 0 && !(s->qctrl[i].flags & V4L2_CTRL_FLAG_DISABLED)) { ctrl.id = s->qctrl[i].id; ctrl.value = qctrl[i].default_value; err = s->set_ctrl(cam, &ctrl); if (err) { DBG(3, "Set %s control failed", s->qctrl[i].name); return err; } DBG(3, "Image sensor supports '%s' control", s->qctrl[i].name); } } if (!(cam->state & DEV_INITIALIZED)) { mutex_init(&cam->fileop_mutex); spin_lock_init(&cam->queue_lock); init_waitqueue_head(&cam->wait_frame); init_waitqueue_head(&cam->wait_stream); cam->nreadbuffers = 2; memcpy(s->_qctrl, s->qctrl, sizeof(s->qctrl)); memcpy(&(s->_rect), &(s->cropcap.defrect), sizeof(struct v4l2_rect)); cam->state |= DEV_INITIALIZED; } DBG(2, "Initialization succeeded"); return 0; } /*****************************************************************************/ static void et61x251_release_resources(struct kref *kref) { struct et61x251_device *cam; mutex_lock(&et61x251_sysfs_lock); cam = container_of(kref, struct et61x251_device, kref); DBG(2, "V4L2 device %s deregistered", video_device_node_name(cam->v4ldev)); video_set_drvdata(cam->v4ldev, NULL); video_unregister_device(cam->v4ldev); usb_put_dev(cam->usbdev); kfree(cam->control_buffer); kfree(cam); mutex_unlock(&et61x251_sysfs_lock); } static int et61x251_open(struct file *filp) { struct et61x251_device* cam; int err = 0; if (!down_read_trylock(&et61x251_dev_lock)) return -ERESTARTSYS; cam = video_drvdata(filp); if (wait_for_completion_interruptible(&cam->probe)) { up_read(&et61x251_dev_lock); return -ERESTARTSYS; } kref_get(&cam->kref); if (mutex_lock_interruptible(&cam->open_mutex)) { kref_put(&cam->kref, et61x251_release_resources); up_read(&et61x251_dev_lock); return -ERESTARTSYS; } if (cam->state & DEV_DISCONNECTED) { DBG(1, "Device not present"); err = -ENODEV; goto out; } if (cam->users) { DBG(2, "Device %s is already in use", video_device_node_name(cam->v4ldev)); DBG(3, "Simultaneous opens are not supported"); if ((filp->f_flags & O_NONBLOCK) || (filp->f_flags & O_NDELAY)) { err = -EWOULDBLOCK; goto out; } DBG(2, "A blocking open() has been requested. Wait for the " "device to be released..."); up_read(&et61x251_dev_lock); err = wait_event_interruptible_exclusive(cam->wait_open, (cam->state & DEV_DISCONNECTED) || !cam->users); down_read(&et61x251_dev_lock); if (err) goto out; if (cam->state & DEV_DISCONNECTED) { err = -ENODEV; goto out; } } if (cam->state & DEV_MISCONFIGURED) { err = et61x251_init(cam); if (err) { DBG(1, "Initialization failed again. " "I will retry on next open()."); goto out; } cam->state &= ~DEV_MISCONFIGURED; } if ((err = et61x251_start_transfer(cam))) goto out; filp->private_data = cam; cam->users++; cam->io = IO_NONE; cam->stream = STREAM_OFF; cam->nbuffers = 0; cam->frame_count = 0; et61x251_empty_framequeues(cam); DBG(3, "Video device %s is open", video_device_node_name(cam->v4ldev)); out: mutex_unlock(&cam->open_mutex); if (err) kref_put(&cam->kref, et61x251_release_resources); up_read(&et61x251_dev_lock); return err; } static int et61x251_release(struct file *filp) { struct et61x251_device* cam; down_write(&et61x251_dev_lock); cam = video_drvdata(filp); et61x251_stop_transfer(cam); et61x251_release_buffers(cam); cam->users--; wake_up_interruptible_nr(&cam->wait_open, 1); DBG(3, "Video device %s closed", video_device_node_name(cam->v4ldev)); kref_put(&cam->kref, et61x251_release_resources); up_write(&et61x251_dev_lock); return 0; } static ssize_t et61x251_read(struct file* filp, char __user * buf, size_t count, loff_t* f_pos) { struct et61x251_device *cam = video_drvdata(filp); struct et61x251_frame_t* f, * i; unsigned long lock_flags; long timeout; int err = 0; if (mutex_lock_interruptible(&cam->fileop_mutex)) return -ERESTARTSYS; if (cam->state & DEV_DISCONNECTED) { DBG(1, "Device not present"); mutex_unlock(&cam->fileop_mutex); return -ENODEV; } if (cam->state & DEV_MISCONFIGURED) { DBG(1, "The camera is misconfigured. Close and open it " "again."); mutex_unlock(&cam->fileop_mutex); return -EIO; } if (cam->io == IO_MMAP) { DBG(3, "Close and open the device again to choose the read " "method"); mutex_unlock(&cam->fileop_mutex); return -EBUSY; } if (cam->io == IO_NONE) { if (!et61x251_request_buffers(cam, cam->nreadbuffers, IO_READ)) { DBG(1, "read() failed, not enough memory"); mutex_unlock(&cam->fileop_mutex); return -ENOMEM; } cam->io = IO_READ; cam->stream = STREAM_ON; } if (list_empty(&cam->inqueue)) { if (!list_empty(&cam->outqueue)) et61x251_empty_framequeues(cam); et61x251_queue_unusedframes(cam); } if (!count) { mutex_unlock(&cam->fileop_mutex); return 0; } if (list_empty(&cam->outqueue)) { if (filp->f_flags & O_NONBLOCK) { mutex_unlock(&cam->fileop_mutex); return -EAGAIN; } timeout = wait_event_interruptible_timeout ( cam->wait_frame, (!list_empty(&cam->outqueue)) || (cam->state & DEV_DISCONNECTED) || (cam->state & DEV_MISCONFIGURED), msecs_to_jiffies( cam->module_param.frame_timeout * 1000 ) ); if (timeout < 0) { mutex_unlock(&cam->fileop_mutex); return timeout; } if (cam->state & DEV_DISCONNECTED) { mutex_unlock(&cam->fileop_mutex); return -ENODEV; } if (!timeout || (cam->state & DEV_MISCONFIGURED)) { mutex_unlock(&cam->fileop_mutex); return -EIO; } } f = list_entry(cam->outqueue.prev, struct et61x251_frame_t, frame); if (count > f->buf.bytesused) count = f->buf.bytesused; if (copy_to_user(buf, f->bufmem, count)) { err = -EFAULT; goto exit; } *f_pos += count; exit: spin_lock_irqsave(&cam->queue_lock, lock_flags); list_for_each_entry(i, &cam->outqueue, frame) i->state = F_UNUSED; INIT_LIST_HEAD(&cam->outqueue); spin_unlock_irqrestore(&cam->queue_lock, lock_flags); et61x251_queue_unusedframes(cam); PDBGG("Frame #%lu, bytes read: %zu", (unsigned long)f->buf.index, count); mutex_unlock(&cam->fileop_mutex); return err ? err : count; } static unsigned int et61x251_poll(struct file *filp, poll_table *wait) { struct et61x251_device *cam = video_drvdata(filp); struct et61x251_frame_t* f; unsigned long lock_flags; unsigned int mask = 0; if (mutex_lock_interruptible(&cam->fileop_mutex)) return POLLERR; if (cam->state & DEV_DISCONNECTED) { DBG(1, "Device not present"); goto error; } if (cam->state & DEV_MISCONFIGURED) { DBG(1, "The camera is misconfigured. Close and open it " "again."); goto error; } if (cam->io == IO_NONE) { if (!et61x251_request_buffers(cam, cam->nreadbuffers, IO_READ)) { DBG(1, "poll() failed, not enough memory"); goto error; } cam->io = IO_READ; cam->stream = STREAM_ON; } if (cam->io == IO_READ) { spin_lock_irqsave(&cam->queue_lock, lock_flags); list_for_each_entry(f, &cam->outqueue, frame) f->state = F_UNUSED; INIT_LIST_HEAD(&cam->outqueue); spin_unlock_irqrestore(&cam->queue_lock, lock_flags); et61x251_queue_unusedframes(cam); } poll_wait(filp, &cam->wait_frame, wait); if (!list_empty(&cam->outqueue)) mask |= POLLIN | POLLRDNORM; mutex_unlock(&cam->fileop_mutex); return mask; error: mutex_unlock(&cam->fileop_mutex); return POLLERR; } static void et61x251_vm_open(struct vm_area_struct* vma) { struct et61x251_frame_t* f = vma->vm_private_data; f->vma_use_count++; } static void et61x251_vm_close(struct vm_area_struct* vma) { /* NOTE: buffers are not freed here */ struct et61x251_frame_t* f = vma->vm_private_data; f->vma_use_count--; } static const struct vm_operations_struct et61x251_vm_ops = { .open = et61x251_vm_open, .close = et61x251_vm_close, }; static int et61x251_mmap(struct file* filp, struct vm_area_struct *vma) { struct et61x251_device *cam = video_drvdata(filp); unsigned long size = vma->vm_end - vma->vm_start, start = vma->vm_start; void *pos; u32 i; if (mutex_lock_interruptible(&cam->fileop_mutex)) return -ERESTARTSYS; if (cam->state & DEV_DISCONNECTED) { DBG(1, "Device not present"); mutex_unlock(&cam->fileop_mutex); return -ENODEV; } if (cam->state & DEV_MISCONFIGURED) { DBG(1, "The camera is misconfigured. Close and open it " "again."); mutex_unlock(&cam->fileop_mutex); return -EIO; } if (!(vma->vm_flags & (VM_WRITE | VM_READ))) { mutex_unlock(&cam->fileop_mutex); return -EACCES; } if (cam->io != IO_MMAP || size != PAGE_ALIGN(cam->frame[0].buf.length)) { mutex_unlock(&cam->fileop_mutex); return -EINVAL; } for (i = 0; i < cam->nbuffers; i++) { if ((cam->frame[i].buf.m.offset>>PAGE_SHIFT) == vma->vm_pgoff) break; } if (i == cam->nbuffers) { mutex_unlock(&cam->fileop_mutex); return -EINVAL; } vma->vm_flags |= VM_IO; vma->vm_flags |= VM_RESERVED; pos = cam->frame[i].bufmem; while (size > 0) { /* size is page-aligned */ if (vm_insert_page(vma, start, vmalloc_to_page(pos))) { mutex_unlock(&cam->fileop_mutex); return -EAGAIN; } start += PAGE_SIZE; pos += PAGE_SIZE; size -= PAGE_SIZE; } vma->vm_ops = &et61x251_vm_ops; vma->vm_private_data = &cam->frame[i]; et61x251_vm_open(vma); mutex_unlock(&cam->fileop_mutex); return 0; } /*****************************************************************************/ static int et61x251_vidioc_querycap(struct et61x251_device* cam, void __user * arg) { struct v4l2_capability cap = { .driver = "et61x251", .version = ET61X251_MODULE_VERSION_CODE, .capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE | V4L2_CAP_STREAMING, }; strlcpy(cap.card, cam->v4ldev->name, sizeof(cap.card)); if (usb_make_path(cam->usbdev, cap.bus_info, sizeof(cap.bus_info)) < 0) strlcpy(cap.bus_info, dev_name(&cam->usbdev->dev), sizeof(cap.bus_info)); if (copy_to_user(arg, &cap, sizeof(cap))) return -EFAULT; return 0; } static int et61x251_vidioc_enuminput(struct et61x251_device* cam, void __user * arg) { struct v4l2_input i; if (copy_from_user(&i, arg, sizeof(i))) return -EFAULT; if (i.index) return -EINVAL; memset(&i, 0, sizeof(i)); strcpy(i.name, "Camera"); i.type = V4L2_INPUT_TYPE_CAMERA; i.capabilities = V4L2_IN_CAP_STD; if (copy_to_user(arg, &i, sizeof(i))) return -EFAULT; return 0; } static int et61x251_vidioc_g_input(struct et61x251_device* cam, void __user * arg) { int index = 0; if (copy_to_user(arg, &index, sizeof(index))) return -EFAULT; return 0; } static int et61x251_vidioc_s_input(struct et61x251_device* cam, void __user * arg) { int index; if (copy_from_user(&index, arg, sizeof(index))) return -EFAULT; if (index != 0) return -EINVAL; return 0; } static int et61x251_vidioc_query_ctrl(struct et61x251_device* cam, void __user * arg) { struct et61x251_sensor* s = &cam->sensor; struct v4l2_queryctrl qc; u8 i; if (copy_from_user(&qc, arg, sizeof(qc))) return -EFAULT; for (i = 0; i < ARRAY_SIZE(s->qctrl); i++) if (qc.id && qc.id == s->qctrl[i].id) { memcpy(&qc, &(s->qctrl[i]), sizeof(qc)); if (copy_to_user(arg, &qc, sizeof(qc))) return -EFAULT; return 0; } return -EINVAL; } static int et61x251_vidioc_g_ctrl(struct et61x251_device* cam, void __user * arg) { struct et61x251_sensor* s = &cam->sensor; struct v4l2_control ctrl; int err = 0; u8 i; if (!s->get_ctrl && !s->set_ctrl) return -EINVAL; if (copy_from_user(&ctrl, arg, sizeof(ctrl))) return -EFAULT; if (!s->get_ctrl) { for (i = 0; i < ARRAY_SIZE(s->qctrl); i++) if (ctrl.id == s->qctrl[i].id) { ctrl.value = s->_qctrl[i].default_value; goto exit; } return -EINVAL; } else err = s->get_ctrl(cam, &ctrl); exit: if (copy_to_user(arg, &ctrl, sizeof(ctrl))) return -EFAULT; return err; } static int et61x251_vidioc_s_ctrl(struct et61x251_device* cam, void __user * arg) { struct et61x251_sensor* s = &cam->sensor; struct v4l2_control ctrl; u8 i; int err = 0; if (!s->set_ctrl) return -EINVAL; if (copy_from_user(&ctrl, arg, sizeof(ctrl))) return -EFAULT; for (i = 0; i < ARRAY_SIZE(s->qctrl); i++) { if (ctrl.id == s->qctrl[i].id) { if (s->qctrl[i].flags & V4L2_CTRL_FLAG_DISABLED) return -EINVAL; if (ctrl.value < s->qctrl[i].minimum || ctrl.value > s->qctrl[i].maximum) return -ERANGE; ctrl.value -= ctrl.value % s->qctrl[i].step; break; } } if (i == ARRAY_SIZE(s->qctrl)) return -EINVAL; if ((err = s->set_ctrl(cam, &ctrl))) return err; s->_qctrl[i].default_value = ctrl.value; return 0; } static int et61x251_vidioc_cropcap(struct et61x251_device* cam, void __user * arg) { struct v4l2_cropcap* cc = &(cam->sensor.cropcap); cc->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; cc->pixelaspect.numerator = 1; cc->pixelaspect.denominator = 1; if (copy_to_user(arg, cc, sizeof(*cc))) return -EFAULT; return 0; } static int et61x251_vidioc_g_crop(struct et61x251_device* cam, void __user * arg) { struct et61x251_sensor* s = &cam->sensor; struct v4l2_crop crop = { .type = V4L2_BUF_TYPE_VIDEO_CAPTURE, }; memcpy(&(crop.c), &(s->_rect), sizeof(struct v4l2_rect)); if (copy_to_user(arg, &crop, sizeof(crop))) return -EFAULT; return 0; } static int et61x251_vidioc_s_crop(struct et61x251_device* cam, void __user * arg) { struct et61x251_sensor* s = &cam->sensor; struct v4l2_crop crop; struct v4l2_rect* rect; struct v4l2_rect* bounds = &(s->cropcap.bounds); struct v4l2_pix_format* pix_format = &(s->pix_format); u8 scale; const enum et61x251_stream_state stream = cam->stream; const u32 nbuffers = cam->nbuffers; u32 i; int err = 0; if (copy_from_user(&crop, arg, sizeof(crop))) return -EFAULT; rect = &(crop.c); if (crop.type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; if (cam->module_param.force_munmap) for (i = 0; i < cam->nbuffers; i++) if (cam->frame[i].vma_use_count) { DBG(3, "VIDIOC_S_CROP failed. " "Unmap the buffers first."); return -EBUSY; } /* Preserve R,G or B origin */ rect->left = (s->_rect.left & 1L) ? rect->left | 1L : rect->left & ~1L; rect->top = (s->_rect.top & 1L) ? rect->top | 1L : rect->top & ~1L; if (rect->width < 16) rect->width = 16; if (rect->height < 16) rect->height = 16; if (rect->width > bounds->width) rect->width = bounds->width; if (rect->height > bounds->height) rect->height = bounds->height; if (rect->left < bounds->left) rect->left = bounds->left; if (rect->top < bounds->top) rect->top = bounds->top; if (rect->left + rect->width > bounds->left + bounds->width) rect->left = bounds->left+bounds->width - rect->width; if (rect->top + rect->height > bounds->top + bounds->height) rect->top = bounds->top+bounds->height - rect->height; rect->width &= ~15L; rect->height &= ~15L; if (ET61X251_PRESERVE_IMGSCALE) { /* Calculate the actual scaling factor */ u32 a, b; a = rect->width * rect->height; b = pix_format->width * pix_format->height; scale = b ? (u8)((a / b) < 4 ? 1 : 2) : 1; } else scale = 1; if (cam->stream == STREAM_ON) if ((err = et61x251_stream_interrupt(cam))) return err; if (copy_to_user(arg, &crop, sizeof(crop))) { cam->stream = stream; return -EFAULT; } if (cam->module_param.force_munmap || cam->io == IO_READ) et61x251_release_buffers(cam); err = et61x251_set_crop(cam, rect); if (s->set_crop) err += s->set_crop(cam, rect); err += et61x251_set_scale(cam, scale); if (err) { /* atomic, no rollback in ioctl() */ cam->state |= DEV_MISCONFIGURED; DBG(1, "VIDIOC_S_CROP failed because of hardware problems. To " "use the camera, close and open %s again.", video_device_node_name(cam->v4ldev)); return -EIO; } s->pix_format.width = rect->width/scale; s->pix_format.height = rect->height/scale; memcpy(&(s->_rect), rect, sizeof(*rect)); if ((cam->module_param.force_munmap || cam->io == IO_READ) && nbuffers != et61x251_request_buffers(cam, nbuffers, cam->io)) { cam->state |= DEV_MISCONFIGURED; DBG(1, "VIDIOC_S_CROP failed because of not enough memory. To " "use the camera, close and open %s again.", video_device_node_name(cam->v4ldev)); return -ENOMEM; } if (cam->io == IO_READ) et61x251_empty_framequeues(cam); else if (cam->module_param.force_munmap) et61x251_requeue_outqueue(cam); cam->stream = stream; return 0; } static int et61x251_vidioc_enum_framesizes(struct et61x251_device* cam, void __user * arg) { struct v4l2_frmsizeenum frmsize; if (copy_from_user(&frmsize, arg, sizeof(frmsize))) return -EFAULT; if (frmsize.index != 0) return -EINVAL; if (frmsize.pixel_format != V4L2_PIX_FMT_ET61X251 && frmsize.pixel_format != V4L2_PIX_FMT_SBGGR8) return -EINVAL; frmsize.type = V4L2_FRMSIZE_TYPE_STEPWISE; frmsize.stepwise.min_width = frmsize.stepwise.step_width = 16; frmsize.stepwise.min_height = frmsize.stepwise.step_height = 16; frmsize.stepwise.max_width = cam->sensor.cropcap.bounds.width; frmsize.stepwise.max_height = cam->sensor.cropcap.bounds.height; memset(&frmsize.reserved, 0, sizeof(frmsize.reserved)); if (copy_to_user(arg, &frmsize, sizeof(frmsize))) return -EFAULT; return 0; } static int et61x251_vidioc_enum_fmt(struct et61x251_device* cam, void __user * arg) { struct v4l2_fmtdesc fmtd; if (copy_from_user(&fmtd, arg, sizeof(fmtd))) return -EFAULT; if (fmtd.type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; if (fmtd.index == 0) { strcpy(fmtd.description, "bayer rgb"); fmtd.pixelformat = V4L2_PIX_FMT_SBGGR8; } else if (fmtd.index == 1) { strcpy(fmtd.description, "compressed"); fmtd.pixelformat = V4L2_PIX_FMT_ET61X251; fmtd.flags = V4L2_FMT_FLAG_COMPRESSED; } else return -EINVAL; fmtd.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; memset(&fmtd.reserved, 0, sizeof(fmtd.reserved)); if (copy_to_user(arg, &fmtd, sizeof(fmtd))) return -EFAULT; return 0; } static int et61x251_vidioc_g_fmt(struct et61x251_device* cam, void __user * arg) { struct v4l2_format format; struct v4l2_pix_format* pfmt = &(cam->sensor.pix_format); if (copy_from_user(&format, arg, sizeof(format))) return -EFAULT; if (format.type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; pfmt->colorspace = (pfmt->pixelformat == V4L2_PIX_FMT_ET61X251) ? 0 : V4L2_COLORSPACE_SRGB; pfmt->bytesperline = (pfmt->pixelformat==V4L2_PIX_FMT_ET61X251) ? 0 : (pfmt->width * pfmt->priv) / 8; pfmt->sizeimage = pfmt->height * ((pfmt->width*pfmt->priv)/8); pfmt->field = V4L2_FIELD_NONE; memcpy(&(format.fmt.pix), pfmt, sizeof(*pfmt)); if (copy_to_user(arg, &format, sizeof(format))) return -EFAULT; return 0; } static int et61x251_vidioc_try_s_fmt(struct et61x251_device* cam, unsigned int cmd, void __user * arg) { struct et61x251_sensor* s = &cam->sensor; struct v4l2_format format; struct v4l2_pix_format* pix; struct v4l2_pix_format* pfmt = &(s->pix_format); struct v4l2_rect* bounds = &(s->cropcap.bounds); struct v4l2_rect rect; u8 scale; const enum et61x251_stream_state stream = cam->stream; const u32 nbuffers = cam->nbuffers; u32 i; int err = 0; if (copy_from_user(&format, arg, sizeof(format))) return -EFAULT; pix = &(format.fmt.pix); if (format.type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; memcpy(&rect, &(s->_rect), sizeof(rect)); { /* calculate the actual scaling factor */ u32 a, b; a = rect.width * rect.height; b = pix->width * pix->height; scale = b ? (u8)((a / b) < 4 ? 1 : 2) : 1; } rect.width = scale * pix->width; rect.height = scale * pix->height; if (rect.width < 16) rect.width = 16; if (rect.height < 16) rect.height = 16; if (rect.width > bounds->left + bounds->width - rect.left) rect.width = bounds->left + bounds->width - rect.left; if (rect.height > bounds->top + bounds->height - rect.top) rect.height = bounds->top + bounds->height - rect.top; rect.width &= ~15L; rect.height &= ~15L; { /* adjust the scaling factor */ u32 a, b; a = rect.width * rect.height; b = pix->width * pix->height; scale = b ? (u8)((a / b) < 4 ? 1 : 2) : 1; } pix->width = rect.width / scale; pix->height = rect.height / scale; if (pix->pixelformat != V4L2_PIX_FMT_ET61X251 && pix->pixelformat != V4L2_PIX_FMT_SBGGR8) pix->pixelformat = pfmt->pixelformat; pix->priv = pfmt->priv; /* bpp */ pix->colorspace = (pix->pixelformat == V4L2_PIX_FMT_ET61X251) ? 0 : V4L2_COLORSPACE_SRGB; pix->colorspace = pfmt->colorspace; pix->bytesperline = (pix->pixelformat == V4L2_PIX_FMT_ET61X251) ? 0 : (pix->width * pix->priv) / 8; pix->sizeimage = pix->height * ((pix->width * pix->priv) / 8); pix->field = V4L2_FIELD_NONE; if (cmd == VIDIOC_TRY_FMT) { if (copy_to_user(arg, &format, sizeof(format))) return -EFAULT; return 0; } if (cam->module_param.force_munmap) for (i = 0; i < cam->nbuffers; i++) if (cam->frame[i].vma_use_count) { DBG(3, "VIDIOC_S_FMT failed. " "Unmap the buffers first."); return -EBUSY; } if (cam->stream == STREAM_ON) if ((err = et61x251_stream_interrupt(cam))) return err; if (copy_to_user(arg, &format, sizeof(format))) { cam->stream = stream; return -EFAULT; } if (cam->module_param.force_munmap || cam->io == IO_READ) et61x251_release_buffers(cam); err += et61x251_set_pix_format(cam, pix); err += et61x251_set_crop(cam, &rect); if (s->set_pix_format) err += s->set_pix_format(cam, pix); if (s->set_crop) err += s->set_crop(cam, &rect); err += et61x251_set_scale(cam, scale); if (err) { /* atomic, no rollback in ioctl() */ cam->state |= DEV_MISCONFIGURED; DBG(1, "VIDIOC_S_FMT failed because of hardware problems. To " "use the camera, close and open %s again.", video_device_node_name(cam->v4ldev)); return -EIO; } memcpy(pfmt, pix, sizeof(*pix)); memcpy(&(s->_rect), &rect, sizeof(rect)); if ((cam->module_param.force_munmap || cam->io == IO_READ) && nbuffers != et61x251_request_buffers(cam, nbuffers, cam->io)) { cam->state |= DEV_MISCONFIGURED; DBG(1, "VIDIOC_S_FMT failed because of not enough memory. To " "use the camera, close and open %s again.", video_device_node_name(cam->v4ldev)); return -ENOMEM; } if (cam->io == IO_READ) et61x251_empty_framequeues(cam); else if (cam->module_param.force_munmap) et61x251_requeue_outqueue(cam); cam->stream = stream; return 0; } static int et61x251_vidioc_g_jpegcomp(struct et61x251_device* cam, void __user * arg) { if (copy_to_user(arg, &cam->compression, sizeof(cam->compression))) return -EFAULT; return 0; } static int et61x251_vidioc_s_jpegcomp(struct et61x251_device* cam, void __user * arg) { struct v4l2_jpegcompression jc; const enum et61x251_stream_state stream = cam->stream; int err = 0; if (copy_from_user(&jc, arg, sizeof(jc))) return -EFAULT; if (jc.quality != 0 && jc.quality != 1) return -EINVAL; if (cam->stream == STREAM_ON) if ((err = et61x251_stream_interrupt(cam))) return err; err += et61x251_set_compression(cam, &jc); if (err) { /* atomic, no rollback in ioctl() */ cam->state |= DEV_MISCONFIGURED; DBG(1, "VIDIOC_S_JPEGCOMP failed because of hardware " "problems. To use the camera, close and open " "%s again.", video_device_node_name(cam->v4ldev)); return -EIO; } cam->compression.quality = jc.quality; cam->stream = stream; return 0; } static int et61x251_vidioc_reqbufs(struct et61x251_device* cam, void __user * arg) { struct v4l2_requestbuffers rb; u32 i; int err; if (copy_from_user(&rb, arg, sizeof(rb))) return -EFAULT; if (rb.type != V4L2_BUF_TYPE_VIDEO_CAPTURE || rb.memory != V4L2_MEMORY_MMAP) return -EINVAL; if (cam->io == IO_READ) { DBG(3, "Close and open the device again to choose the mmap " "I/O method"); return -EBUSY; } for (i = 0; i < cam->nbuffers; i++) if (cam->frame[i].vma_use_count) { DBG(3, "VIDIOC_REQBUFS failed. " "Previous buffers are still mapped."); return -EBUSY; } if (cam->stream == STREAM_ON) if ((err = et61x251_stream_interrupt(cam))) return err; et61x251_empty_framequeues(cam); et61x251_release_buffers(cam); if (rb.count) rb.count = et61x251_request_buffers(cam, rb.count, IO_MMAP); if (copy_to_user(arg, &rb, sizeof(rb))) { et61x251_release_buffers(cam); cam->io = IO_NONE; return -EFAULT; } cam->io = rb.count ? IO_MMAP : IO_NONE; return 0; } static int et61x251_vidioc_querybuf(struct et61x251_device* cam, void __user * arg) { struct v4l2_buffer b; if (copy_from_user(&b, arg, sizeof(b))) return -EFAULT; if (b.type != V4L2_BUF_TYPE_VIDEO_CAPTURE || b.index >= cam->nbuffers || cam->io != IO_MMAP) return -EINVAL; memcpy(&b, &cam->frame[b.index].buf, sizeof(b)); if (cam->frame[b.index].vma_use_count) b.flags |= V4L2_BUF_FLAG_MAPPED; if (cam->frame[b.index].state == F_DONE) b.flags |= V4L2_BUF_FLAG_DONE; else if (cam->frame[b.index].state != F_UNUSED) b.flags |= V4L2_BUF_FLAG_QUEUED; if (copy_to_user(arg, &b, sizeof(b))) return -EFAULT; return 0; } static int et61x251_vidioc_qbuf(struct et61x251_device* cam, void __user * arg) { struct v4l2_buffer b; unsigned long lock_flags; if (copy_from_user(&b, arg, sizeof(b))) return -EFAULT; if (b.type != V4L2_BUF_TYPE_VIDEO_CAPTURE || b.index >= cam->nbuffers || cam->io != IO_MMAP) return -EINVAL; if (cam->frame[b.index].state != F_UNUSED) return -EINVAL; cam->frame[b.index].state = F_QUEUED; spin_lock_irqsave(&cam->queue_lock, lock_flags); list_add_tail(&cam->frame[b.index].frame, &cam->inqueue); spin_unlock_irqrestore(&cam->queue_lock, lock_flags); PDBGG("Frame #%lu queued", (unsigned long)b.index); return 0; } static int et61x251_vidioc_dqbuf(struct et61x251_device* cam, struct file* filp, void __user * arg) { struct v4l2_buffer b; struct et61x251_frame_t *f; unsigned long lock_flags; long timeout; if (copy_from_user(&b, arg, sizeof(b))) return -EFAULT; if (b.type != V4L2_BUF_TYPE_VIDEO_CAPTURE || cam->io!= IO_MMAP) return -EINVAL; if (list_empty(&cam->outqueue)) { if (cam->stream == STREAM_OFF) return -EINVAL; if (filp->f_flags & O_NONBLOCK) return -EAGAIN; timeout = wait_event_interruptible_timeout ( cam->wait_frame, (!list_empty(&cam->outqueue)) || (cam->state & DEV_DISCONNECTED) || (cam->state & DEV_MISCONFIGURED), cam->module_param.frame_timeout * 1000 * msecs_to_jiffies(1) ); if (timeout < 0) return timeout; if (cam->state & DEV_DISCONNECTED) return -ENODEV; if (!timeout || (cam->state & DEV_MISCONFIGURED)) return -EIO; } spin_lock_irqsave(&cam->queue_lock, lock_flags); f = list_entry(cam->outqueue.next, struct et61x251_frame_t, frame); list_del(cam->outqueue.next); spin_unlock_irqrestore(&cam->queue_lock, lock_flags); f->state = F_UNUSED; memcpy(&b, &f->buf, sizeof(b)); if (f->vma_use_count) b.flags |= V4L2_BUF_FLAG_MAPPED; if (copy_to_user(arg, &b, sizeof(b))) return -EFAULT; PDBGG("Frame #%lu dequeued", (unsigned long)f->buf.index); return 0; } static int et61x251_vidioc_streamon(struct et61x251_device* cam, void __user * arg) { int type; if (copy_from_user(&type, arg, sizeof(type))) return -EFAULT; if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE || cam->io != IO_MMAP) return -EINVAL; cam->stream = STREAM_ON; DBG(3, "Stream on"); return 0; } static int et61x251_vidioc_streamoff(struct et61x251_device* cam, void __user * arg) { int type, err; if (copy_from_user(&type, arg, sizeof(type))) return -EFAULT; if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE || cam->io != IO_MMAP) return -EINVAL; if (cam->stream == STREAM_ON) if ((err = et61x251_stream_interrupt(cam))) return err; et61x251_empty_framequeues(cam); DBG(3, "Stream off"); return 0; } static int et61x251_vidioc_g_parm(struct et61x251_device* cam, void __user * arg) { struct v4l2_streamparm sp; if (copy_from_user(&sp, arg, sizeof(sp))) return -EFAULT; if (sp.type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; sp.parm.capture.extendedmode = 0; sp.parm.capture.readbuffers = cam->nreadbuffers; if (copy_to_user(arg, &sp, sizeof(sp))) return -EFAULT; return 0; } static int et61x251_vidioc_s_parm(struct et61x251_device* cam, void __user * arg) { struct v4l2_streamparm sp; if (copy_from_user(&sp, arg, sizeof(sp))) return -EFAULT; if (sp.type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; sp.parm.capture.extendedmode = 0; if (sp.parm.capture.readbuffers == 0) sp.parm.capture.readbuffers = cam->nreadbuffers; if (sp.parm.capture.readbuffers > ET61X251_MAX_FRAMES) sp.parm.capture.readbuffers = ET61X251_MAX_FRAMES; if (copy_to_user(arg, &sp, sizeof(sp))) return -EFAULT; cam->nreadbuffers = sp.parm.capture.readbuffers; return 0; } static long et61x251_ioctl_v4l2(struct file *filp, unsigned int cmd, void __user *arg) { struct et61x251_device *cam = video_drvdata(filp); switch (cmd) { case VIDIOC_QUERYCAP: return et61x251_vidioc_querycap(cam, arg); case VIDIOC_ENUMINPUT: return et61x251_vidioc_enuminput(cam, arg); case VIDIOC_G_INPUT: return et61x251_vidioc_g_input(cam, arg); case VIDIOC_S_INPUT: return et61x251_vidioc_s_input(cam, arg); case VIDIOC_QUERYCTRL: return et61x251_vidioc_query_ctrl(cam, arg); case VIDIOC_G_CTRL: return et61x251_vidioc_g_ctrl(cam, arg); case VIDIOC_S_CTRL: return et61x251_vidioc_s_ctrl(cam, arg); case VIDIOC_CROPCAP: return et61x251_vidioc_cropcap(cam, arg); case VIDIOC_G_CROP: return et61x251_vidioc_g_crop(cam, arg); case VIDIOC_S_CROP: return et61x251_vidioc_s_crop(cam, arg); case VIDIOC_ENUM_FMT: return et61x251_vidioc_enum_fmt(cam, arg); case VIDIOC_G_FMT: return et61x251_vidioc_g_fmt(cam, arg); case VIDIOC_TRY_FMT: case VIDIOC_S_FMT: return et61x251_vidioc_try_s_fmt(cam, cmd, arg); case VIDIOC_ENUM_FRAMESIZES: return et61x251_vidioc_enum_framesizes(cam, arg); case VIDIOC_G_JPEGCOMP: return et61x251_vidioc_g_jpegcomp(cam, arg); case VIDIOC_S_JPEGCOMP: return et61x251_vidioc_s_jpegcomp(cam, arg); case VIDIOC_REQBUFS: return et61x251_vidioc_reqbufs(cam, arg); case VIDIOC_QUERYBUF: return et61x251_vidioc_querybuf(cam, arg); case VIDIOC_QBUF: return et61x251_vidioc_qbuf(cam, arg); case VIDIOC_DQBUF: return et61x251_vidioc_dqbuf(cam, filp, arg); case VIDIOC_STREAMON: return et61x251_vidioc_streamon(cam, arg); case VIDIOC_STREAMOFF: return et61x251_vidioc_streamoff(cam, arg); case VIDIOC_G_PARM: return et61x251_vidioc_g_parm(cam, arg); case VIDIOC_S_PARM: return et61x251_vidioc_s_parm(cam, arg); case VIDIOC_G_STD: case VIDIOC_S_STD: case VIDIOC_QUERYSTD: case VIDIOC_ENUMSTD: case VIDIOC_QUERYMENU: case VIDIOC_ENUM_FRAMEINTERVALS: return -EINVAL; default: return -EINVAL; } } static long et61x251_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct et61x251_device *cam = video_drvdata(filp); long err = 0; if (mutex_lock_interruptible(&cam->fileop_mutex)) return -ERESTARTSYS; if (cam->state & DEV_DISCONNECTED) { DBG(1, "Device not present"); mutex_unlock(&cam->fileop_mutex); return -ENODEV; } if (cam->state & DEV_MISCONFIGURED) { DBG(1, "The camera is misconfigured. Close and open it " "again."); mutex_unlock(&cam->fileop_mutex); return -EIO; } V4LDBG(3, "et61x251", cmd); err = et61x251_ioctl_v4l2(filp, cmd, (void __user *)arg); mutex_unlock(&cam->fileop_mutex); return err; } static const struct v4l2_file_operations et61x251_fops = { .owner = THIS_MODULE, .open = et61x251_open, .release = et61x251_release, .unlocked_ioctl = et61x251_ioctl, .read = et61x251_read, .poll = et61x251_poll, .mmap = et61x251_mmap, }; /*****************************************************************************/ /* It exists a single interface only. We do not need to validate anything. */ static int et61x251_usb_probe(struct usb_interface* intf, const struct usb_device_id* id) { struct usb_device *udev = interface_to_usbdev(intf); struct et61x251_device* cam; static unsigned int dev_nr; unsigned int i; int err = 0; if (!(cam = kzalloc(sizeof(struct et61x251_device), GFP_KERNEL))) return -ENOMEM; cam->usbdev = udev; if (!(cam->control_buffer = kzalloc(8, GFP_KERNEL))) { DBG(1, "kmalloc() failed"); err = -ENOMEM; goto fail; } if (!(cam->v4ldev = video_device_alloc())) { DBG(1, "video_device_alloc() failed"); err = -ENOMEM; goto fail; } DBG(2, "ET61X[12]51 PC Camera Controller detected " "(vid/pid 0x%04X:0x%04X)",id->idVendor, id->idProduct); for (i = 0; et61x251_sensor_table[i]; i++) { err = et61x251_sensor_table[i](cam); if (!err) break; } if (!err) DBG(2, "%s image sensor detected", cam->sensor.name); else { DBG(1, "No supported image sensor detected"); err = -ENODEV; goto fail; } if (et61x251_init(cam)) { DBG(1, "Initialization failed. I will retry on open()."); cam->state |= DEV_MISCONFIGURED; } strcpy(cam->v4ldev->name, "ET61X[12]51 PC Camera"); cam->v4ldev->fops = &et61x251_fops; cam->v4ldev->release = video_device_release; cam->v4ldev->parent = &udev->dev; video_set_drvdata(cam->v4ldev, cam); init_completion(&cam->probe); err = video_register_device(cam->v4ldev, VFL_TYPE_GRABBER, video_nr[dev_nr]); if (err) { DBG(1, "V4L2 device registration failed"); if (err == -ENFILE && video_nr[dev_nr] == -1) DBG(1, "Free /dev/videoX node not found"); video_nr[dev_nr] = -1; dev_nr = (dev_nr < ET61X251_MAX_DEVICES-1) ? dev_nr+1 : 0; complete_all(&cam->probe); goto fail; } DBG(2, "V4L2 device registered as %s", video_device_node_name(cam->v4ldev)); cam->module_param.force_munmap = force_munmap[dev_nr]; cam->module_param.frame_timeout = frame_timeout[dev_nr]; dev_nr = (dev_nr < ET61X251_MAX_DEVICES-1) ? dev_nr+1 : 0; #ifdef CONFIG_VIDEO_ADV_DEBUG err = et61x251_create_sysfs(cam); if (!err) DBG(2, "Optional device control through 'sysfs' " "interface ready"); else DBG(2, "Failed to create 'sysfs' interface for optional " "device controlling. Error #%d", err); #else DBG(2, "Optional device control through 'sysfs' interface disabled"); DBG(3, "Compile the kernel with the 'CONFIG_VIDEO_ADV_DEBUG' " "configuration option to enable it."); #endif usb_set_intfdata(intf, cam); kref_init(&cam->kref); usb_get_dev(cam->usbdev); complete_all(&cam->probe); return 0; fail: if (cam) { kfree(cam->control_buffer); if (cam->v4ldev) video_device_release(cam->v4ldev); kfree(cam); } return err; } static void et61x251_usb_disconnect(struct usb_interface* intf) { struct et61x251_device* cam; down_write(&et61x251_dev_lock); cam = usb_get_intfdata(intf); DBG(2, "Disconnecting %s...", cam->v4ldev->name); if (cam->users) { DBG(2, "Device %s is open! Deregistration and memory " "deallocation are deferred.", video_device_node_name(cam->v4ldev)); cam->state |= DEV_MISCONFIGURED; et61x251_stop_transfer(cam); cam->state |= DEV_DISCONNECTED; wake_up_interruptible(&cam->wait_frame); wake_up(&cam->wait_stream); } else cam->state |= DEV_DISCONNECTED; wake_up_interruptible_all(&cam->wait_open); kref_put(&cam->kref, et61x251_release_resources); up_write(&et61x251_dev_lock); } static struct usb_driver et61x251_usb_driver = { .name = "et61x251", .id_table = et61x251_id_table, .probe = et61x251_usb_probe, .disconnect = et61x251_usb_disconnect, }; /*****************************************************************************/ static int __init et61x251_module_init(void) { int err = 0; KDBG(2, ET61X251_MODULE_NAME " v" ET61X251_MODULE_VERSION); KDBG(3, ET61X251_MODULE_AUTHOR); if ((err = usb_register(&et61x251_usb_driver))) KDBG(1, "usb_register() failed"); return err; } static void __exit et61x251_module_exit(void) { usb_deregister(&et61x251_usb_driver); } module_init(et61x251_module_init); module_exit(et61x251_module_exit);
gpl-2.0
dineshram/linux-media-si4713USBDriver
drivers/staging/bcm/hostmibs.c
2625
6057
/* * File Name: hostmibs.c * * Author: Beceem Communications Pvt. Ltd * * Abstract: This file contains the routines to copy the statistics used by * the driver to the Host MIBS structure and giving the same to Application. */ #include "headers.h" INT ProcessGetHostMibs(struct bcm_mini_adapter *Adapter, struct bcm_host_stats_mibs *pstHostMibs) { struct bcm_phs_entry *pstServiceFlowEntry = NULL; struct bcm_phs_rule *pstPhsRule = NULL; struct bcm_phs_classifier_table *pstClassifierTable = NULL; struct bcm_phs_classifier_entry *pstClassifierRule = NULL; struct bcm_phs_extension *pDeviceExtension = (struct bcm_phs_extension *) &Adapter->stBCMPhsContext; UINT nClassifierIndex = 0, nPhsTableIndex = 0, nSfIndex = 0, uiIndex = 0; if (pDeviceExtension == NULL) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, HOST_MIBS, DBG_LVL_ALL, "Invalid Device Extension\n"); return STATUS_FAILURE; } /* Copy the classifier Table */ for (nClassifierIndex = 0; nClassifierIndex < MAX_CLASSIFIERS; nClassifierIndex++) { if (Adapter->astClassifierTable[nClassifierIndex].bUsed == TRUE) memcpy((PVOID) & pstHostMibs-> astClassifierTable[nClassifierIndex], (PVOID) & Adapter-> astClassifierTable[nClassifierIndex], sizeof(struct bcm_mibs_classifier_rule)); } /* Copy the SF Table */ for (nSfIndex = 0; nSfIndex < NO_OF_QUEUES; nSfIndex++) { if (Adapter->PackInfo[nSfIndex].bValid) { memcpy((PVOID) & pstHostMibs->astSFtable[nSfIndex], (PVOID) & Adapter->PackInfo[nSfIndex], sizeof(struct bcm_mibs_table)); } else { /* If index in not valid, * don't process this for the PHS table. * Go For the next entry. */ continue; } /* Retrieve the SFID Entry Index for requested Service Flow */ if (PHS_INVALID_TABLE_INDEX == GetServiceFlowEntry(pDeviceExtension-> pstServiceFlowPhsRulesTable, Adapter->PackInfo[nSfIndex]. usVCID_Value, &pstServiceFlowEntry)) continue; pstClassifierTable = pstServiceFlowEntry->pstClassifierTable; for (uiIndex = 0; uiIndex < MAX_PHSRULE_PER_SF; uiIndex++) { pstClassifierRule = &pstClassifierTable->stActivePhsRulesList[uiIndex]; if (pstClassifierRule->bUsed) { pstPhsRule = pstClassifierRule->pstPhsRule; pstHostMibs->astPhsRulesTable[nPhsTableIndex]. ulSFID = Adapter->PackInfo[nSfIndex].ulSFID; memcpy(&pstHostMibs-> astPhsRulesTable[nPhsTableIndex].u8PHSI, &pstPhsRule->u8PHSI, sizeof(struct bcm_phs_rule)); nPhsTableIndex++; } } } /* Copy other Host Statistics parameters */ pstHostMibs->stHostInfo.GoodTransmits = Adapter->dev->stats.tx_packets; pstHostMibs->stHostInfo.GoodReceives = Adapter->dev->stats.rx_packets; pstHostMibs->stHostInfo.CurrNumFreeDesc = atomic_read(&Adapter->CurrNumFreeTxDesc); pstHostMibs->stHostInfo.BEBucketSize = Adapter->BEBucketSize; pstHostMibs->stHostInfo.rtPSBucketSize = Adapter->rtPSBucketSize; pstHostMibs->stHostInfo.TimerActive = Adapter->TimerActive; pstHostMibs->stHostInfo.u32TotalDSD = Adapter->u32TotalDSD; memcpy(pstHostMibs->stHostInfo.aTxPktSizeHist, Adapter->aTxPktSizeHist, sizeof(UINT32) * MIBS_MAX_HIST_ENTRIES); memcpy(pstHostMibs->stHostInfo.aRxPktSizeHist, Adapter->aRxPktSizeHist, sizeof(UINT32) * MIBS_MAX_HIST_ENTRIES); return STATUS_SUCCESS; } VOID GetDroppedAppCntrlPktMibs(struct bcm_host_stats_mibs *pstHostMibs, struct bcm_tarang_data *pTarang) { memcpy(&(pstHostMibs->stDroppedAppCntrlMsgs), &(pTarang->stDroppedAppCntrlMsgs), sizeof(struct bcm_mibs_dropped_cntrl_msg)); } VOID CopyMIBSExtendedSFParameters(struct bcm_mini_adapter *Adapter, struct bcm_connect_mgr_params *psfLocalSet, UINT uiSearchRuleIndex) { struct bcm_mibs_parameters *t = &Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable; t->wmanIfSfid = psfLocalSet->u32SFID; t->wmanIfCmnCpsMaxSustainedRate = psfLocalSet->u32MaxSustainedTrafficRate; t->wmanIfCmnCpsMaxTrafficBurst = psfLocalSet->u32MaxTrafficBurst; t->wmanIfCmnCpsMinReservedRate = psfLocalSet->u32MinReservedTrafficRate; t->wmanIfCmnCpsToleratedJitter = psfLocalSet->u32ToleratedJitter; t->wmanIfCmnCpsMaxLatency = psfLocalSet->u32MaximumLatency; t->wmanIfCmnCpsFixedVsVariableSduInd = psfLocalSet->u8FixedLengthVSVariableLengthSDUIndicator; t->wmanIfCmnCpsFixedVsVariableSduInd = ntohl(t->wmanIfCmnCpsFixedVsVariableSduInd); t->wmanIfCmnCpsSduSize = psfLocalSet->u8SDUSize; t->wmanIfCmnCpsSduSize = ntohl(t->wmanIfCmnCpsSduSize); t->wmanIfCmnCpsSfSchedulingType = psfLocalSet->u8ServiceFlowSchedulingType; t->wmanIfCmnCpsSfSchedulingType = ntohl(t->wmanIfCmnCpsSfSchedulingType); t->wmanIfCmnCpsArqEnable = psfLocalSet->u8ARQEnable; t->wmanIfCmnCpsArqEnable = ntohl(t->wmanIfCmnCpsArqEnable); t->wmanIfCmnCpsArqWindowSize = ntohs(psfLocalSet->u16ARQWindowSize); t->wmanIfCmnCpsArqWindowSize = ntohl(t->wmanIfCmnCpsArqWindowSize); t->wmanIfCmnCpsArqBlockLifetime = ntohs(psfLocalSet->u16ARQBlockLifeTime); t->wmanIfCmnCpsArqBlockLifetime = ntohl(t->wmanIfCmnCpsArqBlockLifetime); t->wmanIfCmnCpsArqSyncLossTimeout = ntohs(psfLocalSet->u16ARQSyncLossTimeOut); t->wmanIfCmnCpsArqSyncLossTimeout = ntohl(t->wmanIfCmnCpsArqSyncLossTimeout); t->wmanIfCmnCpsArqDeliverInOrder = psfLocalSet->u8ARQDeliverInOrder; t->wmanIfCmnCpsArqDeliverInOrder = ntohl(t->wmanIfCmnCpsArqDeliverInOrder); t->wmanIfCmnCpsArqRxPurgeTimeout = ntohs(psfLocalSet->u16ARQRxPurgeTimeOut); t->wmanIfCmnCpsArqRxPurgeTimeout = ntohl(t->wmanIfCmnCpsArqRxPurgeTimeout); t->wmanIfCmnCpsArqBlockSize = ntohs(psfLocalSet->u16ARQBlockSize); t->wmanIfCmnCpsArqBlockSize = ntohl(t->wmanIfCmnCpsArqBlockSize); t->wmanIfCmnCpsReqTxPolicy = psfLocalSet->u8RequesttransmissionPolicy; t->wmanIfCmnCpsReqTxPolicy = ntohl(t->wmanIfCmnCpsReqTxPolicy); t->wmanIfCmnSfCsSpecification = psfLocalSet->u8CSSpecification; t->wmanIfCmnSfCsSpecification = ntohl(t->wmanIfCmnSfCsSpecification); t->wmanIfCmnCpsTargetSaid = ntohs(psfLocalSet->u16TargetSAID); t->wmanIfCmnCpsTargetSaid = ntohl(t->wmanIfCmnCpsTargetSaid); }
gpl-2.0
CyanogenMod/android_kernel_google_steelhead
arch/frv/mb93090-mb00/pci-vdk.c
3905
12741
/* pci-vdk.c: MB93090-MB00 (VDK) PCI support * * Copyright (C) 2003, 2004 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/delay.h> #include <asm/segment.h> #include <asm/io.h> #include <asm/mb-regs.h> #include <asm/mb86943a.h> #include "pci-frv.h" unsigned int __nongpreldata pci_probe = 1; int __nongpreldata pcibios_last_bus = -1; struct pci_bus *__nongpreldata pci_root_bus; struct pci_ops *__nongpreldata pci_root_ops; /* * The accessible PCI window does not cover the entire CPU address space, but * there are devices we want to access outside of that window, so we need to * insert specific PCI bus resources instead of using the platform-level bus * resources directly for the PCI root bus. * * These are configured and inserted by pcibios_init() and are attached to the * root bus by pcibios_fixup_bus(). */ static struct resource pci_ioport_resource = { .name = "PCI IO", .start = 0, .end = IO_SPACE_LIMIT, .flags = IORESOURCE_IO, }; static struct resource pci_iomem_resource = { .name = "PCI mem", .start = 0, .end = -1, .flags = IORESOURCE_MEM, }; /* * Functions for accessing PCI configuration space */ #define CONFIG_CMD(bus, dev, where) \ (0x80000000 | (bus->number << 16) | (devfn << 8) | (where & ~3)) #define __set_PciCfgAddr(A) writel((A), (volatile void __iomem *) __region_CS1 + 0x80) #define __get_PciCfgDataB(A) readb((volatile void __iomem *) __region_CS1 + 0x88 + ((A) & 3)) #define __get_PciCfgDataW(A) readw((volatile void __iomem *) __region_CS1 + 0x88 + ((A) & 2)) #define __get_PciCfgDataL(A) readl((volatile void __iomem *) __region_CS1 + 0x88) #define __set_PciCfgDataB(A,V) \ writeb((V), (volatile void __iomem *) __region_CS1 + 0x88 + (3 - ((A) & 3))) #define __set_PciCfgDataW(A,V) \ writew((V), (volatile void __iomem *) __region_CS1 + 0x88 + (2 - ((A) & 2))) #define __set_PciCfgDataL(A,V) \ writel((V), (volatile void __iomem *) __region_CS1 + 0x88) #define __get_PciBridgeDataB(A) readb((volatile void __iomem *) __region_CS1 + 0x800 + (A)) #define __get_PciBridgeDataW(A) readw((volatile void __iomem *) __region_CS1 + 0x800 + (A)) #define __get_PciBridgeDataL(A) readl((volatile void __iomem *) __region_CS1 + 0x800 + (A)) #define __set_PciBridgeDataB(A,V) writeb((V), (volatile void __iomem *) __region_CS1 + 0x800 + (A)) #define __set_PciBridgeDataW(A,V) writew((V), (volatile void __iomem *) __region_CS1 + 0x800 + (A)) #define __set_PciBridgeDataL(A,V) writel((V), (volatile void __iomem *) __region_CS1 + 0x800 + (A)) static inline int __query(const struct pci_dev *dev) { // return dev->bus->number==0 && (dev->devfn==PCI_DEVFN(0,0)); // return dev->bus->number==1; // return dev->bus->number==0 && // (dev->devfn==PCI_DEVFN(2,0) || dev->devfn==PCI_DEVFN(3,0)); return 0; } /*****************************************************************************/ /* * */ static int pci_frv_read_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) { u32 _value; if (bus->number == 0 && devfn == PCI_DEVFN(0, 0)) { _value = __get_PciBridgeDataL(where & ~3); } else { __set_PciCfgAddr(CONFIG_CMD(bus, devfn, where)); _value = __get_PciCfgDataL(where & ~3); } switch (size) { case 1: _value = _value >> ((where & 3) * 8); break; case 2: _value = _value >> ((where & 2) * 8); break; case 4: break; default: BUG(); } *val = _value; return PCIBIOS_SUCCESSFUL; } static int pci_frv_write_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 value) { switch (size) { case 1: if (bus->number == 0 && devfn == PCI_DEVFN(0, 0)) { __set_PciBridgeDataB(where, value); } else { __set_PciCfgAddr(CONFIG_CMD(bus, devfn, where)); __set_PciCfgDataB(where, value); } break; case 2: if (bus->number == 0 && devfn == PCI_DEVFN(0, 0)) { __set_PciBridgeDataW(where, value); } else { __set_PciCfgAddr(CONFIG_CMD(bus, devfn, where)); __set_PciCfgDataW(where, value); } break; case 4: if (bus->number == 0 && devfn == PCI_DEVFN(0, 0)) { __set_PciBridgeDataL(where, value); } else { __set_PciCfgAddr(CONFIG_CMD(bus, devfn, where)); __set_PciCfgDataL(where, value); } break; default: BUG(); } return PCIBIOS_SUCCESSFUL; } static struct pci_ops pci_direct_frv = { pci_frv_read_config, pci_frv_write_config, }; /* * Before we decide to use direct hardware access mechanisms, we try to do some * trivial checks to ensure it at least _seems_ to be working -- we just test * whether bus 00 contains a host bridge (this is similar to checking * techniques used in XFree86, but ours should be more reliable since we * attempt to make use of direct access hints provided by the PCI BIOS). * * This should be close to trivial, but it isn't, because there are buggy * chipsets (yes, you guessed it, by Intel and Compaq) that have no class ID. */ static int __init pci_sanity_check(struct pci_ops *o) { struct pci_bus bus; /* Fake bus and device */ u32 id; bus.number = 0; if (o->read(&bus, 0, PCI_VENDOR_ID, 4, &id) == PCIBIOS_SUCCESSFUL) { printk("PCI: VDK Bridge device:vendor: %08x\n", id); if (id == 0x200e10cf) return 1; } printk("PCI: VDK Bridge: Sanity check failed\n"); return 0; } static struct pci_ops * __init pci_check_direct(void) { unsigned long flags; local_irq_save(flags); /* check if access works */ if (pci_sanity_check(&pci_direct_frv)) { local_irq_restore(flags); printk("PCI: Using configuration frv\n"); // request_mem_region(0xBE040000, 256, "FRV bridge"); // request_mem_region(0xBFFFFFF4, 12, "PCI frv"); return &pci_direct_frv; } local_irq_restore(flags); return NULL; } /* * Discover remaining PCI buses in case there are peer host bridges. * We use the number of last PCI bus provided by the PCI BIOS. */ static void __init pcibios_fixup_peer_bridges(void) { struct pci_bus bus; struct pci_dev dev; int n; u16 l; if (pcibios_last_bus <= 0 || pcibios_last_bus >= 0xff) return; printk("PCI: Peer bridge fixup\n"); for (n=0; n <= pcibios_last_bus; n++) { if (pci_find_bus(0, n)) continue; bus.number = n; bus.ops = pci_root_ops; dev.bus = &bus; for(dev.devfn=0; dev.devfn<256; dev.devfn += 8) if (!pci_read_config_word(&dev, PCI_VENDOR_ID, &l) && l != 0x0000 && l != 0xffff) { printk("Found device at %02x:%02x [%04x]\n", n, dev.devfn, l); printk("PCI: Discovered peer bus %02x\n", n); pci_scan_bus(n, pci_root_ops, NULL); break; } } } /* * Exceptions for specific devices. Usually work-arounds for fatal design flaws. */ static void __init pci_fixup_umc_ide(struct pci_dev *d) { /* * UM8886BF IDE controller sets region type bits incorrectly, * therefore they look like memory despite of them being I/O. */ int i; printk("PCI: Fixing base address flags for device %s\n", pci_name(d)); for(i=0; i<4; i++) d->resource[i].flags |= PCI_BASE_ADDRESS_SPACE_IO; } static void __init pci_fixup_ide_bases(struct pci_dev *d) { int i; /* * PCI IDE controllers use non-standard I/O port decoding, respect it. */ if ((d->class >> 8) != PCI_CLASS_STORAGE_IDE) return; printk("PCI: IDE base address fixup for %s\n", pci_name(d)); for(i=0; i<4; i++) { struct resource *r = &d->resource[i]; if ((r->start & ~0x80) == 0x374) { r->start |= 2; r->end = r->start; } } } static void __init pci_fixup_ide_trash(struct pci_dev *d) { int i; /* * There exist PCI IDE controllers which have utter garbage * in first four base registers. Ignore that. */ printk("PCI: IDE base address trash cleared for %s\n", pci_name(d)); for(i=0; i<4; i++) d->resource[i].start = d->resource[i].end = d->resource[i].flags = 0; } static void __devinit pci_fixup_latency(struct pci_dev *d) { /* * SiS 5597 and 5598 chipsets require latency timer set to * at most 32 to avoid lockups. */ DBG("PCI: Setting max latency to 32\n"); pcibios_max_latency = 32; } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_UMC, PCI_DEVICE_ID_UMC_UM8886BF, pci_fixup_umc_ide); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_5513, pci_fixup_ide_trash); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_5597, pci_fixup_latency); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_5598, pci_fixup_latency); DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pci_fixup_ide_bases); /* * Called after each bus is probed, but before its children * are examined. */ void __init pcibios_fixup_bus(struct pci_bus *bus) { #if 0 printk("### PCIBIOS_FIXUP_BUS(%d)\n",bus->number); #endif if (bus->number == 0) { bus->resource[0] = &pci_ioport_resource; bus->resource[1] = &pci_iomem_resource; } pci_read_bridge_bases(bus); if (bus->number == 0) { struct list_head *ln; struct pci_dev *dev; for (ln=bus->devices.next; ln != &bus->devices; ln=ln->next) { dev = pci_dev_b(ln); if (dev->devfn == 0) { dev->resource[0].start = 0; dev->resource[0].end = 0; } } } } /* * Initialization. Try all known PCI access methods. Note that we support * using both PCI BIOS and direct access: in such cases, we use I/O ports * to access config space, but we still keep BIOS order of cards to be * compatible with 2.0.X. This should go away some day. */ int __init pcibios_init(void) { struct pci_ops *dir = NULL; if (!mb93090_mb00_detected) return -ENXIO; __reg_MB86943_sl_ctl |= MB86943_SL_CTL_DRCT_MASTER_SWAP | MB86943_SL_CTL_DRCT_SLAVE_SWAP; __reg_MB86943_ecs_base(1) = ((__region_CS2 + 0x01000000) >> 9) | 0x08000000; __reg_MB86943_ecs_base(2) = ((__region_CS2 + 0x00000000) >> 9) | 0x08000000; *(volatile uint32_t *) (__region_CS1 + 0x848) = 0xe0000000; *(volatile uint32_t *) (__region_CS1 + 0x8b8) = 0x00000000; __reg_MB86943_sl_pci_io_base = (__region_CS2 + 0x04000000) >> 9; __reg_MB86943_sl_pci_mem_base = (__region_CS2 + 0x08000000) >> 9; __reg_MB86943_pci_sl_io_base = __region_CS2 + 0x04000000; __reg_MB86943_pci_sl_mem_base = __region_CS2 + 0x08000000; mb(); /* enable PCI arbitration */ __reg_MB86943_pci_arbiter = MB86943_PCIARB_EN; pci_ioport_resource.start = (__reg_MB86943_sl_pci_io_base << 9) & 0xfffffc00; pci_ioport_resource.end = (__reg_MB86943_sl_pci_io_range << 9) | 0x3ff; pci_ioport_resource.end += pci_ioport_resource.start; printk("PCI IO window: %08llx-%08llx\n", (unsigned long long) pci_ioport_resource.start, (unsigned long long) pci_ioport_resource.end); pci_iomem_resource.start = (__reg_MB86943_sl_pci_mem_base << 9) & 0xfffffc00; pci_iomem_resource.end = (__reg_MB86943_sl_pci_mem_range << 9) | 0x3ff; pci_iomem_resource.end += pci_iomem_resource.start; /* Reserve somewhere to write to flush posted writes. This is used by * __flush_PCI_writes() from asm/io.h to force the write FIFO in the * CPU-PCI bridge to flush as this doesn't happen automatically when a * read is performed on the MB93090 development kit motherboard. */ pci_iomem_resource.start += 0x400; printk("PCI MEM window: %08llx-%08llx\n", (unsigned long long) pci_iomem_resource.start, (unsigned long long) pci_iomem_resource.end); printk("PCI DMA memory: %08lx-%08lx\n", dma_coherent_mem_start, dma_coherent_mem_end); if (insert_resource(&iomem_resource, &pci_iomem_resource) < 0) panic("Unable to insert PCI IOMEM resource\n"); if (insert_resource(&ioport_resource, &pci_ioport_resource) < 0) panic("Unable to insert PCI IOPORT resource\n"); if (!pci_probe) return -ENXIO; dir = pci_check_direct(); if (dir) pci_root_ops = dir; else { printk("PCI: No PCI bus detected\n"); return -ENXIO; } printk("PCI: Probing PCI hardware\n"); pci_root_bus = pci_scan_bus(0, pci_root_ops, NULL); pcibios_irq_init(); pcibios_fixup_peer_bridges(); pcibios_fixup_irqs(); pcibios_resource_survey(); return 0; } arch_initcall(pcibios_init); char * __init pcibios_setup(char *str) { if (!strcmp(str, "off")) { pci_probe = 0; return NULL; } else if (!strncmp(str, "lastbus=", 8)) { pcibios_last_bus = simple_strtol(str+8, NULL, 0); return NULL; } return str; } int pcibios_enable_device(struct pci_dev *dev, int mask) { int err; if ((err = pci_enable_resources(dev, mask)) < 0) return err; if (!dev->msi_enabled) pcibios_enable_irq(dev); return 0; }
gpl-2.0
snuk182/android_kernel_asus_A66
drivers/hwmon/emc1403.c
4161
10966
/* * emc1403.c - SMSC Thermal Driver * * Copyright (C) 2008 Intel Corp * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * TODO * - cache alarm and critical limit registers * - add emc1404 support */ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/i2c.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/err.h> #include <linux/sysfs.h> #include <linux/mutex.h> #define THERMAL_PID_REG 0xfd #define THERMAL_SMSC_ID_REG 0xfe #define THERMAL_REVISION_REG 0xff struct thermal_data { struct device *hwmon_dev; struct mutex mutex; /* * Cache the hyst value so we don't keep re-reading it. In theory * we could cache it forever as nobody else should be writing it. */ u8 cached_hyst; unsigned long hyst_valid; }; static ssize_t show_temp(struct device *dev, struct device_attribute *attr, char *buf) { struct i2c_client *client = to_i2c_client(dev); struct sensor_device_attribute *sda = to_sensor_dev_attr(attr); int retval = i2c_smbus_read_byte_data(client, sda->index); if (retval < 0) return retval; return sprintf(buf, "%d000\n", retval); } static ssize_t show_bit(struct device *dev, struct device_attribute *attr, char *buf) { struct i2c_client *client = to_i2c_client(dev); struct sensor_device_attribute_2 *sda = to_sensor_dev_attr_2(attr); int retval = i2c_smbus_read_byte_data(client, sda->nr); if (retval < 0) return retval; retval &= sda->index; return sprintf(buf, "%d\n", retval ? 1 : 0); } static ssize_t store_temp(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute *sda = to_sensor_dev_attr(attr); struct i2c_client *client = to_i2c_client(dev); unsigned long val; int retval; if (kstrtoul(buf, 10, &val)) return -EINVAL; retval = i2c_smbus_write_byte_data(client, sda->index, DIV_ROUND_CLOSEST(val, 1000)); if (retval < 0) return retval; return count; } static ssize_t store_bit(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct thermal_data *data = i2c_get_clientdata(client); struct sensor_device_attribute_2 *sda = to_sensor_dev_attr_2(attr); unsigned long val; int retval; if (kstrtoul(buf, 10, &val)) return -EINVAL; mutex_lock(&data->mutex); retval = i2c_smbus_read_byte_data(client, sda->nr); if (retval < 0) goto fail; retval &= ~sda->index; if (val) retval |= sda->index; retval = i2c_smbus_write_byte_data(client, sda->index, retval); if (retval == 0) retval = count; fail: mutex_unlock(&data->mutex); return retval; } static ssize_t show_hyst(struct device *dev, struct device_attribute *attr, char *buf) { struct i2c_client *client = to_i2c_client(dev); struct thermal_data *data = i2c_get_clientdata(client); struct sensor_device_attribute *sda = to_sensor_dev_attr(attr); int retval; int hyst; retval = i2c_smbus_read_byte_data(client, sda->index); if (retval < 0) return retval; if (time_after(jiffies, data->hyst_valid)) { hyst = i2c_smbus_read_byte_data(client, 0x21); if (hyst < 0) return retval; data->cached_hyst = hyst; data->hyst_valid = jiffies + HZ; } return sprintf(buf, "%d000\n", retval - data->cached_hyst); } static ssize_t store_hyst(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct thermal_data *data = i2c_get_clientdata(client); struct sensor_device_attribute *sda = to_sensor_dev_attr(attr); int retval; int hyst; unsigned long val; if (kstrtoul(buf, 10, &val)) return -EINVAL; mutex_lock(&data->mutex); retval = i2c_smbus_read_byte_data(client, sda->index); if (retval < 0) goto fail; hyst = val - retval * 1000; hyst = DIV_ROUND_CLOSEST(hyst, 1000); if (hyst < 0 || hyst > 255) { retval = -ERANGE; goto fail; } retval = i2c_smbus_write_byte_data(client, 0x21, hyst); if (retval == 0) { retval = count; data->cached_hyst = hyst; data->hyst_valid = jiffies + HZ; } fail: mutex_unlock(&data->mutex); return retval; } /* * Sensors. We pass the actual i2c register to the methods. */ static SENSOR_DEVICE_ATTR(temp1_min, S_IRUGO | S_IWUSR, show_temp, store_temp, 0x06); static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO | S_IWUSR, show_temp, store_temp, 0x05); static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO | S_IWUSR, show_temp, store_temp, 0x20); static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0x00); static SENSOR_DEVICE_ATTR_2(temp1_min_alarm, S_IRUGO, show_bit, NULL, 0x36, 0x01); static SENSOR_DEVICE_ATTR_2(temp1_max_alarm, S_IRUGO, show_bit, NULL, 0x35, 0x01); static SENSOR_DEVICE_ATTR_2(temp1_crit_alarm, S_IRUGO, show_bit, NULL, 0x37, 0x01); static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO | S_IWUSR, show_hyst, store_hyst, 0x20); static SENSOR_DEVICE_ATTR(temp2_min, S_IRUGO | S_IWUSR, show_temp, store_temp, 0x08); static SENSOR_DEVICE_ATTR(temp2_max, S_IRUGO | S_IWUSR, show_temp, store_temp, 0x07); static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO | S_IWUSR, show_temp, store_temp, 0x19); static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp, NULL, 0x01); static SENSOR_DEVICE_ATTR_2(temp2_min_alarm, S_IRUGO, show_bit, NULL, 0x36, 0x02); static SENSOR_DEVICE_ATTR_2(temp2_max_alarm, S_IRUGO, show_bit, NULL, 0x35, 0x02); static SENSOR_DEVICE_ATTR_2(temp2_crit_alarm, S_IRUGO, show_bit, NULL, 0x37, 0x02); static SENSOR_DEVICE_ATTR(temp2_crit_hyst, S_IRUGO | S_IWUSR, show_hyst, store_hyst, 0x19); static SENSOR_DEVICE_ATTR(temp3_min, S_IRUGO | S_IWUSR, show_temp, store_temp, 0x16); static SENSOR_DEVICE_ATTR(temp3_max, S_IRUGO | S_IWUSR, show_temp, store_temp, 0x15); static SENSOR_DEVICE_ATTR(temp3_crit, S_IRUGO | S_IWUSR, show_temp, store_temp, 0x1A); static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, show_temp, NULL, 0x23); static SENSOR_DEVICE_ATTR_2(temp3_min_alarm, S_IRUGO, show_bit, NULL, 0x36, 0x04); static SENSOR_DEVICE_ATTR_2(temp3_max_alarm, S_IRUGO, show_bit, NULL, 0x35, 0x04); static SENSOR_DEVICE_ATTR_2(temp3_crit_alarm, S_IRUGO, show_bit, NULL, 0x37, 0x04); static SENSOR_DEVICE_ATTR(temp3_crit_hyst, S_IRUGO | S_IWUSR, show_hyst, store_hyst, 0x1A); static SENSOR_DEVICE_ATTR_2(power_state, S_IRUGO | S_IWUSR, show_bit, store_bit, 0x03, 0x40); static struct attribute *mid_att_thermal[] = { &sensor_dev_attr_temp1_min.dev_attr.attr, &sensor_dev_attr_temp1_max.dev_attr.attr, &sensor_dev_attr_temp1_crit.dev_attr.attr, &sensor_dev_attr_temp1_input.dev_attr.attr, &sensor_dev_attr_temp1_min_alarm.dev_attr.attr, &sensor_dev_attr_temp1_max_alarm.dev_attr.attr, &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr, &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr, &sensor_dev_attr_temp2_min.dev_attr.attr, &sensor_dev_attr_temp2_max.dev_attr.attr, &sensor_dev_attr_temp2_crit.dev_attr.attr, &sensor_dev_attr_temp2_input.dev_attr.attr, &sensor_dev_attr_temp2_min_alarm.dev_attr.attr, &sensor_dev_attr_temp2_max_alarm.dev_attr.attr, &sensor_dev_attr_temp2_crit_alarm.dev_attr.attr, &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr, &sensor_dev_attr_temp3_min.dev_attr.attr, &sensor_dev_attr_temp3_max.dev_attr.attr, &sensor_dev_attr_temp3_crit.dev_attr.attr, &sensor_dev_attr_temp3_input.dev_attr.attr, &sensor_dev_attr_temp3_min_alarm.dev_attr.attr, &sensor_dev_attr_temp3_max_alarm.dev_attr.attr, &sensor_dev_attr_temp3_crit_alarm.dev_attr.attr, &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr, &sensor_dev_attr_power_state.dev_attr.attr, NULL }; static const struct attribute_group m_thermal_gr = { .attrs = mid_att_thermal }; static int emc1403_detect(struct i2c_client *client, struct i2c_board_info *info) { int id; /* Check if thermal chip is SMSC and EMC1403 or EMC1423 */ id = i2c_smbus_read_byte_data(client, THERMAL_SMSC_ID_REG); if (id != 0x5d) return -ENODEV; id = i2c_smbus_read_byte_data(client, THERMAL_PID_REG); switch (id) { case 0x21: strlcpy(info->type, "emc1403", I2C_NAME_SIZE); break; case 0x23: strlcpy(info->type, "emc1423", I2C_NAME_SIZE); break; /* * Note: 0x25 is the 1404 which is very similar and this * driver could be extended */ default: return -ENODEV; } id = i2c_smbus_read_byte_data(client, THERMAL_REVISION_REG); if (id != 0x01) return -ENODEV; return 0; } static int emc1403_probe(struct i2c_client *client, const struct i2c_device_id *id) { int res; struct thermal_data *data; data = kzalloc(sizeof(struct thermal_data), GFP_KERNEL); if (data == NULL) { dev_warn(&client->dev, "out of memory"); return -ENOMEM; } i2c_set_clientdata(client, data); mutex_init(&data->mutex); data->hyst_valid = jiffies - 1; /* Expired */ res = sysfs_create_group(&client->dev.kobj, &m_thermal_gr); if (res) { dev_warn(&client->dev, "create group failed\n"); goto thermal_error1; } data->hwmon_dev = hwmon_device_register(&client->dev); if (IS_ERR(data->hwmon_dev)) { res = PTR_ERR(data->hwmon_dev); dev_warn(&client->dev, "register hwmon dev failed\n"); goto thermal_error2; } dev_info(&client->dev, "EMC1403 Thermal chip found\n"); return res; thermal_error2: sysfs_remove_group(&client->dev.kobj, &m_thermal_gr); thermal_error1: kfree(data); return res; } static int emc1403_remove(struct i2c_client *client) { struct thermal_data *data = i2c_get_clientdata(client); hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&client->dev.kobj, &m_thermal_gr); kfree(data); return 0; } static const unsigned short emc1403_address_list[] = { 0x18, 0x29, 0x4c, 0x4d, I2C_CLIENT_END }; static const struct i2c_device_id emc1403_idtable[] = { { "emc1403", 0 }, { "emc1423", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, emc1403_idtable); static struct i2c_driver sensor_emc1403 = { .class = I2C_CLASS_HWMON, .driver = { .name = "emc1403", }, .detect = emc1403_detect, .probe = emc1403_probe, .remove = emc1403_remove, .id_table = emc1403_idtable, .address_list = emc1403_address_list, }; module_i2c_driver(sensor_emc1403); MODULE_AUTHOR("Kalhan Trisal <kalhan.trisal@intel.com"); MODULE_DESCRIPTION("emc1403 Thermal Driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
NookSimpleTouchTeam/android_kernel_bn_nst
arch/powerpc/platforms/embedded6xx/linkstation.c
4673
4199
/* * Board setup routines for the Buffalo Linkstation / Kurobox Platform. * * Copyright (C) 2006 G. Liakhovetski (g.liakhovetski@gmx.de) * * Based on sandpoint.c by Mark A. Greer * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of * any kind, whether express or implied. */ #include <linux/kernel.h> #include <linux/initrd.h> #include <linux/of_platform.h> #include <asm/time.h> #include <asm/prom.h> #include <asm/mpic.h> #include <asm/pci-bridge.h> #include "mpc10x.h" static __initdata struct of_device_id of_bus_ids[] = { { .type = "soc", }, { .compatible = "simple-bus", }, {}, }; static int __init declare_of_platform_devices(void) { of_platform_bus_probe(NULL, of_bus_ids, NULL); return 0; } machine_device_initcall(linkstation, declare_of_platform_devices); static int __init linkstation_add_bridge(struct device_node *dev) { #ifdef CONFIG_PCI int len; struct pci_controller *hose; const int *bus_range; printk("Adding PCI host bridge %s\n", dev->full_name); bus_range = of_get_property(dev, "bus-range", &len); if (bus_range == NULL || len < 2 * sizeof(int)) printk(KERN_WARNING "Can't get bus-range for %s, assume" " bus 0\n", dev->full_name); hose = pcibios_alloc_controller(dev); if (hose == NULL) return -ENOMEM; hose->first_busno = bus_range ? bus_range[0] : 0; hose->last_busno = bus_range ? bus_range[1] : 0xff; setup_indirect_pci(hose, 0xfec00000, 0xfee00000, 0); /* Interpret the "ranges" property */ /* This also maps the I/O region and sets isa_io/mem_base */ pci_process_bridge_OF_ranges(hose, dev, 1); #endif return 0; } static void __init linkstation_setup_arch(void) { struct device_node *np; /* Lookup PCI host bridges */ for_each_compatible_node(np, "pci", "mpc10x-pci") linkstation_add_bridge(np); printk(KERN_INFO "BUFFALO Network Attached Storage Series\n"); printk(KERN_INFO "(C) 2002-2005 BUFFALO INC.\n"); } /* * Interrupt setup and service. Interrupts on the linkstation come * from the four PCI slots plus onboard 8241 devices: I2C, DUART. */ static void __init linkstation_init_IRQ(void) { struct mpic *mpic; struct device_node *dnp; const u32 *prop; int size; phys_addr_t paddr; dnp = of_find_node_by_type(NULL, "open-pic"); if (dnp == NULL) return; prop = of_get_property(dnp, "reg", &size); paddr = (phys_addr_t)of_translate_address(dnp, prop); mpic = mpic_alloc(dnp, paddr, MPIC_PRIMARY | MPIC_WANTS_RESET, 4, 32, " EPIC "); BUG_ON(mpic == NULL); /* PCI IRQs */ mpic_assign_isu(mpic, 0, paddr + 0x10200); /* I2C */ mpic_assign_isu(mpic, 1, paddr + 0x11000); /* ttyS0, ttyS1 */ mpic_assign_isu(mpic, 2, paddr + 0x11100); mpic_init(mpic); } extern void avr_uart_configure(void); extern void avr_uart_send(const char); static void linkstation_restart(char *cmd) { local_irq_disable(); /* Reset system via AVR */ avr_uart_configure(); /* Send reboot command */ avr_uart_send('C'); for(;;) /* Spin until reset happens */ avr_uart_send('G'); /* "kick" */ } static void linkstation_power_off(void) { local_irq_disable(); /* Power down system via AVR */ avr_uart_configure(); /* send shutdown command */ avr_uart_send('E'); for(;;) /* Spin until power-off happens */ avr_uart_send('G'); /* "kick" */ /* NOTREACHED */ } static void linkstation_halt(void) { linkstation_power_off(); /* NOTREACHED */ } static void linkstation_show_cpuinfo(struct seq_file *m) { seq_printf(m, "vendor\t\t: Buffalo Technology\n"); seq_printf(m, "machine\t\t: Linkstation I/Kurobox(HG)\n"); } static int __init linkstation_probe(void) { unsigned long root; root = of_get_flat_dt_root(); if (!of_flat_dt_is_compatible(root, "linkstation")) return 0; return 1; } define_machine(linkstation){ .name = "Buffalo Linkstation", .probe = linkstation_probe, .setup_arch = linkstation_setup_arch, .init_IRQ = linkstation_init_IRQ, .show_cpuinfo = linkstation_show_cpuinfo, .get_irq = mpic_get_irq, .restart = linkstation_restart, .power_off = linkstation_power_off, .halt = linkstation_halt, .calibrate_decr = generic_calibrate_decr, };
gpl-2.0
wangsai008/NewWorld-F160-JB-Kernel
arch/mips/txx9/generic/spi_eeprom.c
8769
2614
/* * spi_eeprom.c * Copyright (C) 2000-2001 Toshiba Corporation * * 2003-2005 (c) MontaVista Software, Inc. This file is licensed under the * terms of the GNU General Public License version 2. This program is * licensed "as is" without any warranty of any kind, whether express * or implied. * * Support for TX4938 in 2.6 - Manish Lachwani (mlachwani@mvista.com) */ #include <linux/init.h> #include <linux/slab.h> #include <linux/export.h> #include <linux/device.h> #include <linux/spi/spi.h> #include <linux/spi/eeprom.h> #include <asm/txx9/spi.h> #define AT250X0_PAGE_SIZE 8 /* register board information for at25 driver */ int __init spi_eeprom_register(int busid, int chipid, int size) { struct spi_board_info info = { .modalias = "at25", .max_speed_hz = 1500000, /* 1.5Mbps */ .bus_num = busid, .chip_select = chipid, /* Mode 0: High-Active, Sample-Then-Shift */ }; struct spi_eeprom *eeprom; eeprom = kzalloc(sizeof(*eeprom), GFP_KERNEL); if (!eeprom) return -ENOMEM; strcpy(eeprom->name, "at250x0"); eeprom->byte_len = size; eeprom->page_size = AT250X0_PAGE_SIZE; eeprom->flags = EE_ADDR1; info.platform_data = eeprom; return spi_register_board_info(&info, 1); } /* simple temporary spi driver to provide early access to seeprom. */ static struct read_param { int busid; int chipid; int address; unsigned char *buf; int len; } *read_param; static int __init early_seeprom_probe(struct spi_device *spi) { int stat = 0; u8 cmd[2]; int len = read_param->len; char *buf = read_param->buf; int address = read_param->address; dev_info(&spi->dev, "spiclk %u KHz.\n", (spi->max_speed_hz + 500) / 1000); if (read_param->busid != spi->master->bus_num || read_param->chipid != spi->chip_select) return -ENODEV; while (len > 0) { /* spi_write_then_read can only work with small chunk */ int c = len < AT250X0_PAGE_SIZE ? len : AT250X0_PAGE_SIZE; cmd[0] = 0x03; /* AT25_READ */ cmd[1] = address; stat = spi_write_then_read(spi, cmd, sizeof(cmd), buf, c); buf += c; len -= c; address += c; } return stat; } static struct spi_driver early_seeprom_driver __initdata = { .driver = { .name = "at25", .owner = THIS_MODULE, }, .probe = early_seeprom_probe, }; int __init spi_eeprom_read(int busid, int chipid, int address, unsigned char *buf, int len) { int ret; struct read_param param = { .busid = busid, .chipid = chipid, .address = address, .buf = buf, .len = len }; read_param = &param; ret = spi_register_driver(&early_seeprom_driver); if (!ret) spi_unregister_driver(&early_seeprom_driver); return ret; }
gpl-2.0
brinlyaus/um-test-thing
arch/mips/txx9/generic/spi_eeprom.c
8769
2614
/* * spi_eeprom.c * Copyright (C) 2000-2001 Toshiba Corporation * * 2003-2005 (c) MontaVista Software, Inc. This file is licensed under the * terms of the GNU General Public License version 2. This program is * licensed "as is" without any warranty of any kind, whether express * or implied. * * Support for TX4938 in 2.6 - Manish Lachwani (mlachwani@mvista.com) */ #include <linux/init.h> #include <linux/slab.h> #include <linux/export.h> #include <linux/device.h> #include <linux/spi/spi.h> #include <linux/spi/eeprom.h> #include <asm/txx9/spi.h> #define AT250X0_PAGE_SIZE 8 /* register board information for at25 driver */ int __init spi_eeprom_register(int busid, int chipid, int size) { struct spi_board_info info = { .modalias = "at25", .max_speed_hz = 1500000, /* 1.5Mbps */ .bus_num = busid, .chip_select = chipid, /* Mode 0: High-Active, Sample-Then-Shift */ }; struct spi_eeprom *eeprom; eeprom = kzalloc(sizeof(*eeprom), GFP_KERNEL); if (!eeprom) return -ENOMEM; strcpy(eeprom->name, "at250x0"); eeprom->byte_len = size; eeprom->page_size = AT250X0_PAGE_SIZE; eeprom->flags = EE_ADDR1; info.platform_data = eeprom; return spi_register_board_info(&info, 1); } /* simple temporary spi driver to provide early access to seeprom. */ static struct read_param { int busid; int chipid; int address; unsigned char *buf; int len; } *read_param; static int __init early_seeprom_probe(struct spi_device *spi) { int stat = 0; u8 cmd[2]; int len = read_param->len; char *buf = read_param->buf; int address = read_param->address; dev_info(&spi->dev, "spiclk %u KHz.\n", (spi->max_speed_hz + 500) / 1000); if (read_param->busid != spi->master->bus_num || read_param->chipid != spi->chip_select) return -ENODEV; while (len > 0) { /* spi_write_then_read can only work with small chunk */ int c = len < AT250X0_PAGE_SIZE ? len : AT250X0_PAGE_SIZE; cmd[0] = 0x03; /* AT25_READ */ cmd[1] = address; stat = spi_write_then_read(spi, cmd, sizeof(cmd), buf, c); buf += c; len -= c; address += c; } return stat; } static struct spi_driver early_seeprom_driver __initdata = { .driver = { .name = "at25", .owner = THIS_MODULE, }, .probe = early_seeprom_probe, }; int __init spi_eeprom_read(int busid, int chipid, int address, unsigned char *buf, int len) { int ret; struct read_param param = { .busid = busid, .chipid = chipid, .address = address, .buf = buf, .len = len }; read_param = &param; ret = spi_register_driver(&early_seeprom_driver); if (!ret) spi_unregister_driver(&early_seeprom_driver); return ret; }
gpl-2.0
selva-simple/galaxyr_cm10_kernel
sound/pci/ice1712/quartet.c
9281
30730
/* * ALSA driver for ICEnsemble VT1724 (Envy24HT) * * Lowlevel functions for Infrasonic Quartet * * Copyright (c) 2009 Pavel Hofman <pavel.hofman@ivitera.com> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <asm/io.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/slab.h> #include <sound/core.h> #include <sound/tlv.h> #include <sound/info.h> #include "ice1712.h" #include "envy24ht.h" #include <sound/ak4113.h> #include "quartet.h" struct qtet_spec { struct ak4113 *ak4113; unsigned int scr; /* system control register */ unsigned int mcr; /* monitoring control register */ unsigned int cpld; /* cpld register */ }; struct qtet_kcontrol_private { unsigned int bit; void (*set_register)(struct snd_ice1712 *ice, unsigned int val); unsigned int (*get_register)(struct snd_ice1712 *ice); unsigned char *texts[2]; }; enum { IN12_SEL = 0, IN34_SEL, AIN34_SEL, COAX_OUT, IN12_MON12, IN12_MON34, IN34_MON12, IN34_MON34, OUT12_MON34, OUT34_MON12, }; static char *ext_clock_names[3] = {"IEC958 In", "Word Clock 1xFS", "Word Clock 256xFS"}; /* chip address on I2C bus */ #define AK4113_ADDR 0x26 /* S/PDIF receiver */ /* chip address on SPI bus */ #define AK4620_ADDR 0x02 /* ADC/DAC */ /* * GPIO pins */ /* GPIO0 - O - DATA0, def. 0 */ #define GPIO_D0 (1<<0) /* GPIO1 - I/O - DATA1, Jack Detect Input0 (0:present, 1:missing), def. 1 */ #define GPIO_D1_JACKDTC0 (1<<1) /* GPIO2 - I/O - DATA2, Jack Detect Input1 (0:present, 1:missing), def. 1 */ #define GPIO_D2_JACKDTC1 (1<<2) /* GPIO3 - I/O - DATA3, def. 1 */ #define GPIO_D3 (1<<3) /* GPIO4 - I/O - DATA4, SPI CDTO, def. 1 */ #define GPIO_D4_SPI_CDTO (1<<4) /* GPIO5 - I/O - DATA5, SPI CCLK, def. 1 */ #define GPIO_D5_SPI_CCLK (1<<5) /* GPIO6 - I/O - DATA6, Cable Detect Input (0:detected, 1:not detected */ #define GPIO_D6_CD (1<<6) /* GPIO7 - I/O - DATA7, Device Detect Input (0:detected, 1:not detected */ #define GPIO_D7_DD (1<<7) /* GPIO8 - O - CPLD Chip Select, def. 1 */ #define GPIO_CPLD_CSN (1<<8) /* GPIO9 - O - CPLD register read/write (0:write, 1:read), def. 0 */ #define GPIO_CPLD_RW (1<<9) /* GPIO10 - O - SPI Chip Select for CODEC#0, def. 1 */ #define GPIO_SPI_CSN0 (1<<10) /* GPIO11 - O - SPI Chip Select for CODEC#1, def. 1 */ #define GPIO_SPI_CSN1 (1<<11) /* GPIO12 - O - Ex. Register Output Enable (0:enable, 1:disable), def. 1, * init 0 */ #define GPIO_EX_GPIOE (1<<12) /* GPIO13 - O - Ex. Register0 Chip Select for System Control Register, * def. 1 */ #define GPIO_SCR (1<<13) /* GPIO14 - O - Ex. Register1 Chip Select for Monitor Control Register, * def. 1 */ #define GPIO_MCR (1<<14) #define GPIO_SPI_ALL (GPIO_D4_SPI_CDTO | GPIO_D5_SPI_CCLK |\ GPIO_SPI_CSN0 | GPIO_SPI_CSN1) #define GPIO_DATA_MASK (GPIO_D0 | GPIO_D1_JACKDTC0 | \ GPIO_D2_JACKDTC1 | GPIO_D3 | \ GPIO_D4_SPI_CDTO | GPIO_D5_SPI_CCLK | \ GPIO_D6_CD | GPIO_D7_DD) /* System Control Register GPIO_SCR data bits */ /* Mic/Line select relay (0:line, 1:mic) */ #define SCR_RELAY GPIO_D0 /* Phantom power drive control (0:5V, 1:48V) */ #define SCR_PHP_V GPIO_D1_JACKDTC0 /* H/W mute control (0:Normal, 1:Mute) */ #define SCR_MUTE GPIO_D2_JACKDTC1 /* Phantom power control (0:Phantom on, 1:off) */ #define SCR_PHP GPIO_D3 /* Analog input 1/2 Source Select */ #define SCR_AIN12_SEL0 GPIO_D4_SPI_CDTO #define SCR_AIN12_SEL1 GPIO_D5_SPI_CCLK /* Analog input 3/4 Source Select (0:line, 1:hi-z) */ #define SCR_AIN34_SEL GPIO_D6_CD /* Codec Power Down (0:power down, 1:normal) */ #define SCR_CODEC_PDN GPIO_D7_DD #define SCR_AIN12_LINE (0) #define SCR_AIN12_MIC (SCR_AIN12_SEL0) #define SCR_AIN12_LOWCUT (SCR_AIN12_SEL1 | SCR_AIN12_SEL0) /* Monitor Control Register GPIO_MCR data bits */ /* Input 1/2 to Monitor 1/2 (0:off, 1:on) */ #define MCR_IN12_MON12 GPIO_D0 /* Input 1/2 to Monitor 3/4 (0:off, 1:on) */ #define MCR_IN12_MON34 GPIO_D1_JACKDTC0 /* Input 3/4 to Monitor 1/2 (0:off, 1:on) */ #define MCR_IN34_MON12 GPIO_D2_JACKDTC1 /* Input 3/4 to Monitor 3/4 (0:off, 1:on) */ #define MCR_IN34_MON34 GPIO_D3 /* Output to Monitor 1/2 (0:off, 1:on) */ #define MCR_OUT34_MON12 GPIO_D4_SPI_CDTO /* Output to Monitor 3/4 (0:off, 1:on) */ #define MCR_OUT12_MON34 GPIO_D5_SPI_CCLK /* CPLD Register DATA bits */ /* Clock Rate Select */ #define CPLD_CKS0 GPIO_D0 #define CPLD_CKS1 GPIO_D1_JACKDTC0 #define CPLD_CKS2 GPIO_D2_JACKDTC1 /* Sync Source Select (0:Internal, 1:External) */ #define CPLD_SYNC_SEL GPIO_D3 /* Word Clock FS Select (0:FS, 1:256FS) */ #define CPLD_WORD_SEL GPIO_D4_SPI_CDTO /* Coaxial Output Source (IS-Link) (0:SPDIF, 1:I2S) */ #define CPLD_COAX_OUT GPIO_D5_SPI_CCLK /* Input 1/2 Source Select (0:Analog12, 1:An34) */ #define CPLD_IN12_SEL GPIO_D6_CD /* Input 3/4 Source Select (0:Analog34, 1:Digital In) */ #define CPLD_IN34_SEL GPIO_D7_DD /* internal clock (CPLD_SYNC_SEL = 0) options */ #define CPLD_CKS_44100HZ (0) #define CPLD_CKS_48000HZ (CPLD_CKS0) #define CPLD_CKS_88200HZ (CPLD_CKS1) #define CPLD_CKS_96000HZ (CPLD_CKS1 | CPLD_CKS0) #define CPLD_CKS_176400HZ (CPLD_CKS2) #define CPLD_CKS_192000HZ (CPLD_CKS2 | CPLD_CKS0) #define CPLD_CKS_MASK (CPLD_CKS0 | CPLD_CKS1 | CPLD_CKS2) /* external clock (CPLD_SYNC_SEL = 1) options */ /* external clock - SPDIF */ #define CPLD_EXT_SPDIF (0 | CPLD_SYNC_SEL) /* external clock - WordClock 1xfs */ #define CPLD_EXT_WORDCLOCK_1FS (CPLD_CKS1 | CPLD_SYNC_SEL) /* external clock - WordClock 256xfs */ #define CPLD_EXT_WORDCLOCK_256FS (CPLD_CKS1 | CPLD_WORD_SEL |\ CPLD_SYNC_SEL) #define EXT_SPDIF_TYPE 0 #define EXT_WORDCLOCK_1FS_TYPE 1 #define EXT_WORDCLOCK_256FS_TYPE 2 #define AK4620_DFS0 (1<<0) #define AK4620_DFS1 (1<<1) #define AK4620_CKS0 (1<<2) #define AK4620_CKS1 (1<<3) /* Clock and Format Control register */ #define AK4620_DFS_REG 0x02 /* Deem and Volume Control register */ #define AK4620_DEEMVOL_REG 0x03 #define AK4620_SMUTE (1<<7) /* * Conversion from int value to its binary form. Used for debugging. * The output buffer must be allocated prior to calling the function. */ static char *get_binary(char *buffer, int value) { int i, j, pos; pos = 0; for (i = 0; i < 4; ++i) { for (j = 0; j < 8; ++j) { if (value & (1 << (31-(i*8 + j)))) buffer[pos] = '1'; else buffer[pos] = '0'; pos++; } if (i < 3) { buffer[pos] = ' '; pos++; } } buffer[pos] = '\0'; return buffer; } /* * Initial setup of the conversion array GPIO <-> rate */ static unsigned int qtet_rates[] = { 44100, 48000, 88200, 96000, 176400, 192000, }; static unsigned int cks_vals[] = { CPLD_CKS_44100HZ, CPLD_CKS_48000HZ, CPLD_CKS_88200HZ, CPLD_CKS_96000HZ, CPLD_CKS_176400HZ, CPLD_CKS_192000HZ, }; static struct snd_pcm_hw_constraint_list qtet_rates_info = { .count = ARRAY_SIZE(qtet_rates), .list = qtet_rates, .mask = 0, }; static void qtet_ak4113_write(void *private_data, unsigned char reg, unsigned char val) { snd_vt1724_write_i2c((struct snd_ice1712 *)private_data, AK4113_ADDR, reg, val); } static unsigned char qtet_ak4113_read(void *private_data, unsigned char reg) { return snd_vt1724_read_i2c((struct snd_ice1712 *)private_data, AK4113_ADDR, reg); } /* * AK4620 section */ /* * Write data to addr register of ak4620 */ static void qtet_akm_write(struct snd_akm4xxx *ak, int chip, unsigned char addr, unsigned char data) { unsigned int tmp, orig_dir; int idx; unsigned int addrdata; struct snd_ice1712 *ice = ak->private_data[0]; if (snd_BUG_ON(chip < 0 || chip >= 4)) return; /*printk(KERN_DEBUG "Writing to AK4620: chip=%d, addr=0x%x, data=0x%x\n", chip, addr, data);*/ orig_dir = ice->gpio.get_dir(ice); ice->gpio.set_dir(ice, orig_dir | GPIO_SPI_ALL); /* set mask - only SPI bits */ ice->gpio.set_mask(ice, ~GPIO_SPI_ALL); tmp = ice->gpio.get_data(ice); /* high all */ tmp |= GPIO_SPI_ALL; ice->gpio.set_data(ice, tmp); udelay(100); /* drop chip select */ if (chip) /* CODEC 1 */ tmp &= ~GPIO_SPI_CSN1; else tmp &= ~GPIO_SPI_CSN0; ice->gpio.set_data(ice, tmp); udelay(100); /* build I2C address + data byte */ addrdata = (AK4620_ADDR << 6) | 0x20 | (addr & 0x1f); addrdata = (addrdata << 8) | data; for (idx = 15; idx >= 0; idx--) { /* drop clock */ tmp &= ~GPIO_D5_SPI_CCLK; ice->gpio.set_data(ice, tmp); udelay(100); /* set data */ if (addrdata & (1 << idx)) tmp |= GPIO_D4_SPI_CDTO; else tmp &= ~GPIO_D4_SPI_CDTO; ice->gpio.set_data(ice, tmp); udelay(100); /* raise clock */ tmp |= GPIO_D5_SPI_CCLK; ice->gpio.set_data(ice, tmp); udelay(100); } /* all back to 1 */ tmp |= GPIO_SPI_ALL; ice->gpio.set_data(ice, tmp); udelay(100); /* return all gpios to non-writable */ ice->gpio.set_mask(ice, 0xffffff); /* restore GPIOs direction */ ice->gpio.set_dir(ice, orig_dir); } static void qtet_akm_set_regs(struct snd_akm4xxx *ak, unsigned char addr, unsigned char mask, unsigned char value) { unsigned char tmp; int chip; for (chip = 0; chip < ak->num_chips; chip++) { tmp = snd_akm4xxx_get(ak, chip, addr); /* clear the bits */ tmp &= ~mask; /* set the new bits */ tmp |= value; snd_akm4xxx_write(ak, chip, addr, tmp); } } /* * change the rate of AK4620 */ static void qtet_akm_set_rate_val(struct snd_akm4xxx *ak, unsigned int rate) { unsigned char ak4620_dfs; if (rate == 0) /* no hint - S/PDIF input is master or the new spdif input rate undetected, simply return */ return; /* adjust DFS on codecs - see datasheet */ if (rate > 108000) ak4620_dfs = AK4620_DFS1 | AK4620_CKS1; else if (rate > 54000) ak4620_dfs = AK4620_DFS0 | AK4620_CKS0; else ak4620_dfs = 0; /* set new value */ qtet_akm_set_regs(ak, AK4620_DFS_REG, AK4620_DFS0 | AK4620_DFS1 | AK4620_CKS0 | AK4620_CKS1, ak4620_dfs); } #define AK_CONTROL(xname, xch) { .name = xname, .num_channels = xch } #define PCM_12_PLAYBACK_VOLUME "PCM 1/2 Playback Volume" #define PCM_34_PLAYBACK_VOLUME "PCM 3/4 Playback Volume" #define PCM_12_CAPTURE_VOLUME "PCM 1/2 Capture Volume" #define PCM_34_CAPTURE_VOLUME "PCM 3/4 Capture Volume" static const struct snd_akm4xxx_dac_channel qtet_dac[] = { AK_CONTROL(PCM_12_PLAYBACK_VOLUME, 2), AK_CONTROL(PCM_34_PLAYBACK_VOLUME, 2), }; static const struct snd_akm4xxx_adc_channel qtet_adc[] = { AK_CONTROL(PCM_12_CAPTURE_VOLUME, 2), AK_CONTROL(PCM_34_CAPTURE_VOLUME, 2), }; static struct snd_akm4xxx akm_qtet_dac __devinitdata = { .type = SND_AK4620, .num_dacs = 4, /* DAC1 - Output 12 */ .num_adcs = 4, /* ADC1 - Input 12 */ .ops = { .write = qtet_akm_write, .set_rate_val = qtet_akm_set_rate_val, }, .dac_info = qtet_dac, .adc_info = qtet_adc, }; /* Communication routines with the CPLD */ /* Writes data to external register reg, both reg and data are * GPIO representations */ static void reg_write(struct snd_ice1712 *ice, unsigned int reg, unsigned int data) { unsigned int tmp; mutex_lock(&ice->gpio_mutex); /* set direction of used GPIOs*/ /* all outputs */ tmp = 0x00ffff; ice->gpio.set_dir(ice, tmp); /* mask - writable bits */ ice->gpio.set_mask(ice, ~(tmp)); /* write the data */ tmp = ice->gpio.get_data(ice); tmp &= ~GPIO_DATA_MASK; tmp |= data; ice->gpio.set_data(ice, tmp); udelay(100); /* drop output enable */ tmp &= ~GPIO_EX_GPIOE; ice->gpio.set_data(ice, tmp); udelay(100); /* drop the register gpio */ tmp &= ~reg; ice->gpio.set_data(ice, tmp); udelay(100); /* raise the register GPIO */ tmp |= reg; ice->gpio.set_data(ice, tmp); udelay(100); /* raise all data gpios */ tmp |= GPIO_DATA_MASK; ice->gpio.set_data(ice, tmp); /* mask - immutable bits */ ice->gpio.set_mask(ice, 0xffffff); /* outputs only 8-15 */ ice->gpio.set_dir(ice, 0x00ff00); mutex_unlock(&ice->gpio_mutex); } static unsigned int get_scr(struct snd_ice1712 *ice) { struct qtet_spec *spec = ice->spec; return spec->scr; } static unsigned int get_mcr(struct snd_ice1712 *ice) { struct qtet_spec *spec = ice->spec; return spec->mcr; } static unsigned int get_cpld(struct snd_ice1712 *ice) { struct qtet_spec *spec = ice->spec; return spec->cpld; } static void set_scr(struct snd_ice1712 *ice, unsigned int val) { struct qtet_spec *spec = ice->spec; reg_write(ice, GPIO_SCR, val); spec->scr = val; } static void set_mcr(struct snd_ice1712 *ice, unsigned int val) { struct qtet_spec *spec = ice->spec; reg_write(ice, GPIO_MCR, val); spec->mcr = val; } static void set_cpld(struct snd_ice1712 *ice, unsigned int val) { struct qtet_spec *spec = ice->spec; reg_write(ice, GPIO_CPLD_CSN, val); spec->cpld = val; } #ifdef CONFIG_PROC_FS static void proc_regs_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_ice1712 *ice = entry->private_data; char bin_buffer[36]; snd_iprintf(buffer, "SCR: %s\n", get_binary(bin_buffer, get_scr(ice))); snd_iprintf(buffer, "MCR: %s\n", get_binary(bin_buffer, get_mcr(ice))); snd_iprintf(buffer, "CPLD: %s\n", get_binary(bin_buffer, get_cpld(ice))); } static void proc_init(struct snd_ice1712 *ice) { struct snd_info_entry *entry; if (!snd_card_proc_new(ice->card, "quartet", &entry)) snd_info_set_text_ops(entry, ice, proc_regs_read); } #else /* !CONFIG_PROC_FS */ static void proc_init(struct snd_ice1712 *ice) {} #endif static int qtet_mute_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); unsigned int val; val = get_scr(ice) & SCR_MUTE; ucontrol->value.integer.value[0] = (val) ? 0 : 1; return 0; } static int qtet_mute_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); unsigned int old, new, smute; old = get_scr(ice) & SCR_MUTE; if (ucontrol->value.integer.value[0]) { /* unmute */ new = 0; /* un-smuting DAC */ smute = 0; } else { /* mute */ new = SCR_MUTE; /* smuting DAC */ smute = AK4620_SMUTE; } if (old != new) { struct snd_akm4xxx *ak = ice->akm; set_scr(ice, (get_scr(ice) & ~SCR_MUTE) | new); /* set smute */ qtet_akm_set_regs(ak, AK4620_DEEMVOL_REG, AK4620_SMUTE, smute); return 1; } /* no change */ return 0; } static int qtet_ain12_enum_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { static char *texts[3] = {"Line In 1/2", "Mic", "Mic + Low-cut"}; uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; uinfo->count = 1; uinfo->value.enumerated.items = ARRAY_SIZE(texts); if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items) uinfo->value.enumerated.item = uinfo->value.enumerated.items - 1; strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]); return 0; } static int qtet_ain12_sw_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); unsigned int val, result; val = get_scr(ice) & (SCR_AIN12_SEL1 | SCR_AIN12_SEL0); switch (val) { case SCR_AIN12_LINE: result = 0; break; case SCR_AIN12_MIC: result = 1; break; case SCR_AIN12_LOWCUT: result = 2; break; default: /* BUG - no other combinations allowed */ snd_BUG(); result = 0; } ucontrol->value.integer.value[0] = result; return 0; } static int qtet_ain12_sw_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); unsigned int old, new, tmp, masked_old; old = new = get_scr(ice); masked_old = old & (SCR_AIN12_SEL1 | SCR_AIN12_SEL0); tmp = ucontrol->value.integer.value[0]; if (tmp == 2) tmp = 3; /* binary 10 is not supported */ tmp <<= 4; /* shifting to SCR_AIN12_SEL0 */ if (tmp != masked_old) { /* change requested */ switch (tmp) { case SCR_AIN12_LINE: new = old & ~(SCR_AIN12_SEL1 | SCR_AIN12_SEL0); set_scr(ice, new); /* turn off relay */ new &= ~SCR_RELAY; set_scr(ice, new); break; case SCR_AIN12_MIC: /* turn on relay */ new = old | SCR_RELAY; set_scr(ice, new); new = (new & ~SCR_AIN12_SEL1) | SCR_AIN12_SEL0; set_scr(ice, new); break; case SCR_AIN12_LOWCUT: /* turn on relay */ new = old | SCR_RELAY; set_scr(ice, new); new |= SCR_AIN12_SEL1 | SCR_AIN12_SEL0; set_scr(ice, new); break; default: snd_BUG(); } return 1; } /* no change */ return 0; } static int qtet_php_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); unsigned int val; /* if phantom voltage =48V, phantom on */ val = get_scr(ice) & SCR_PHP_V; ucontrol->value.integer.value[0] = val ? 1 : 0; return 0; } static int qtet_php_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); unsigned int old, new; old = new = get_scr(ice); if (ucontrol->value.integer.value[0] /* phantom on requested */ && (~old & SCR_PHP_V)) /* 0 = voltage 5V */ { /* is off, turn on */ /* turn voltage on first, = 1 */ new = old | SCR_PHP_V; set_scr(ice, new); /* turn phantom on, = 0 */ new &= ~SCR_PHP; set_scr(ice, new); } else if (!ucontrol->value.integer.value[0] && (old & SCR_PHP_V)) { /* phantom off requested and 1 = voltage 48V */ /* is on, turn off */ /* turn voltage off first, = 0 */ new = old & ~SCR_PHP_V; set_scr(ice, new); /* turn phantom off, = 1 */ new |= SCR_PHP; set_scr(ice, new); } if (old != new) return 1; /* no change */ return 0; } #define PRIV_SW(xid, xbit, xreg) [xid] = {.bit = xbit,\ .set_register = set_##xreg,\ .get_register = get_##xreg, } #define PRIV_ENUM2(xid, xbit, xreg, xtext1, xtext2) [xid] = {.bit = xbit,\ .set_register = set_##xreg,\ .get_register = get_##xreg,\ .texts = {xtext1, xtext2} } static struct qtet_kcontrol_private qtet_privates[] = { PRIV_ENUM2(IN12_SEL, CPLD_IN12_SEL, cpld, "An In 1/2", "An In 3/4"), PRIV_ENUM2(IN34_SEL, CPLD_IN34_SEL, cpld, "An In 3/4", "IEC958 In"), PRIV_ENUM2(AIN34_SEL, SCR_AIN34_SEL, scr, "Line In 3/4", "Hi-Z"), PRIV_ENUM2(COAX_OUT, CPLD_COAX_OUT, cpld, "IEC958", "I2S"), PRIV_SW(IN12_MON12, MCR_IN12_MON12, mcr), PRIV_SW(IN12_MON34, MCR_IN12_MON34, mcr), PRIV_SW(IN34_MON12, MCR_IN34_MON12, mcr), PRIV_SW(IN34_MON34, MCR_IN34_MON34, mcr), PRIV_SW(OUT12_MON34, MCR_OUT12_MON34, mcr), PRIV_SW(OUT34_MON12, MCR_OUT34_MON12, mcr), }; static int qtet_enum_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct qtet_kcontrol_private private = qtet_privates[kcontrol->private_value]; uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; uinfo->count = 1; uinfo->value.enumerated.items = ARRAY_SIZE(private.texts); if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items) uinfo->value.enumerated.item = uinfo->value.enumerated.items - 1; strcpy(uinfo->value.enumerated.name, private.texts[uinfo->value.enumerated.item]); return 0; } static int qtet_sw_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct qtet_kcontrol_private private = qtet_privates[kcontrol->private_value]; struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); ucontrol->value.integer.value[0] = (private.get_register(ice) & private.bit) ? 1 : 0; return 0; } static int qtet_sw_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct qtet_kcontrol_private private = qtet_privates[kcontrol->private_value]; struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); unsigned int old, new; old = private.get_register(ice); if (ucontrol->value.integer.value[0]) new = old | private.bit; else new = old & ~private.bit; if (old != new) { private.set_register(ice, new); return 1; } /* no change */ return 0; } #define qtet_sw_info snd_ctl_boolean_mono_info #define QTET_CONTROL(xname, xtype, xpriv) \ {.iface = SNDRV_CTL_ELEM_IFACE_MIXER,\ .name = xname,\ .info = qtet_##xtype##_info,\ .get = qtet_sw_get,\ .put = qtet_sw_put,\ .private_value = xpriv } static struct snd_kcontrol_new qtet_controls[] __devinitdata = { { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Master Playback Switch", .info = qtet_sw_info, .get = qtet_mute_get, .put = qtet_mute_put, .private_value = 0 }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Phantom Power", .info = qtet_sw_info, .get = qtet_php_get, .put = qtet_php_put, .private_value = 0 }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Analog In 1/2 Capture Switch", .info = qtet_ain12_enum_info, .get = qtet_ain12_sw_get, .put = qtet_ain12_sw_put, .private_value = 0 }, QTET_CONTROL("Analog In 3/4 Capture Switch", enum, AIN34_SEL), QTET_CONTROL("PCM In 1/2 Capture Switch", enum, IN12_SEL), QTET_CONTROL("PCM In 3/4 Capture Switch", enum, IN34_SEL), QTET_CONTROL("Coax Output Source", enum, COAX_OUT), QTET_CONTROL("Analog In 1/2 to Monitor 1/2", sw, IN12_MON12), QTET_CONTROL("Analog In 1/2 to Monitor 3/4", sw, IN12_MON34), QTET_CONTROL("Analog In 3/4 to Monitor 1/2", sw, IN34_MON12), QTET_CONTROL("Analog In 3/4 to Monitor 3/4", sw, IN34_MON34), QTET_CONTROL("Output 1/2 to Monitor 3/4", sw, OUT12_MON34), QTET_CONTROL("Output 3/4 to Monitor 1/2", sw, OUT34_MON12), }; static char *slave_vols[] __devinitdata = { PCM_12_PLAYBACK_VOLUME, PCM_34_PLAYBACK_VOLUME, NULL }; static __devinitdata DECLARE_TLV_DB_SCALE(qtet_master_db_scale, -6350, 50, 1); static struct snd_kcontrol __devinit *ctl_find(struct snd_card *card, const char *name) { struct snd_ctl_elem_id sid; memset(&sid, 0, sizeof(sid)); /* FIXME: strcpy is bad. */ strcpy(sid.name, name); sid.iface = SNDRV_CTL_ELEM_IFACE_MIXER; return snd_ctl_find_id(card, &sid); } static void __devinit add_slaves(struct snd_card *card, struct snd_kcontrol *master, char **list) { for (; *list; list++) { struct snd_kcontrol *slave = ctl_find(card, *list); if (slave) snd_ctl_add_slave(master, slave); } } static int __devinit qtet_add_controls(struct snd_ice1712 *ice) { struct qtet_spec *spec = ice->spec; int err, i; struct snd_kcontrol *vmaster; err = snd_ice1712_akm4xxx_build_controls(ice); if (err < 0) return err; for (i = 0; i < ARRAY_SIZE(qtet_controls); i++) { err = snd_ctl_add(ice->card, snd_ctl_new1(&qtet_controls[i], ice)); if (err < 0) return err; } /* Create virtual master control */ vmaster = snd_ctl_make_virtual_master("Master Playback Volume", qtet_master_db_scale); if (!vmaster) return -ENOMEM; add_slaves(ice->card, vmaster, slave_vols); err = snd_ctl_add(ice->card, vmaster); if (err < 0) return err; /* only capture SPDIF over AK4113 */ err = snd_ak4113_build(spec->ak4113, ice->pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream); if (err < 0) return err; return 0; } static inline int qtet_is_spdif_master(struct snd_ice1712 *ice) { /* CPLD_SYNC_SEL: 0 = internal, 1 = external (i.e. spdif master) */ return (get_cpld(ice) & CPLD_SYNC_SEL) ? 1 : 0; } static unsigned int qtet_get_rate(struct snd_ice1712 *ice) { int i; unsigned char result; result = get_cpld(ice) & CPLD_CKS_MASK; for (i = 0; i < ARRAY_SIZE(cks_vals); i++) if (cks_vals[i] == result) return qtet_rates[i]; return 0; } static int get_cks_val(int rate) { int i; for (i = 0; i < ARRAY_SIZE(qtet_rates); i++) if (qtet_rates[i] == rate) return cks_vals[i]; return 0; } /* setting new rate */ static void qtet_set_rate(struct snd_ice1712 *ice, unsigned int rate) { unsigned int new; unsigned char val; /* switching ice1724 to external clock - supplied by ext. circuits */ val = inb(ICEMT1724(ice, RATE)); outb(val | VT1724_SPDIF_MASTER, ICEMT1724(ice, RATE)); new = (get_cpld(ice) & ~CPLD_CKS_MASK) | get_cks_val(rate); /* switch to internal clock, drop CPLD_SYNC_SEL */ new &= ~CPLD_SYNC_SEL; /* printk(KERN_DEBUG "QT - set_rate: old %x, new %x\n", get_cpld(ice), new); */ set_cpld(ice, new); } static inline unsigned char qtet_set_mclk(struct snd_ice1712 *ice, unsigned int rate) { /* no change in master clock */ return 0; } /* setting clock to external - SPDIF */ static int qtet_set_spdif_clock(struct snd_ice1712 *ice, int type) { unsigned int old, new; old = new = get_cpld(ice); new &= ~(CPLD_CKS_MASK | CPLD_WORD_SEL); switch (type) { case EXT_SPDIF_TYPE: new |= CPLD_EXT_SPDIF; break; case EXT_WORDCLOCK_1FS_TYPE: new |= CPLD_EXT_WORDCLOCK_1FS; break; case EXT_WORDCLOCK_256FS_TYPE: new |= CPLD_EXT_WORDCLOCK_256FS; break; default: snd_BUG(); } if (old != new) { set_cpld(ice, new); /* changed */ return 1; } return 0; } static int qtet_get_spdif_master_type(struct snd_ice1712 *ice) { unsigned int val; int result; val = get_cpld(ice); /* checking only rate/clock-related bits */ val &= (CPLD_CKS_MASK | CPLD_WORD_SEL | CPLD_SYNC_SEL); if (!(val & CPLD_SYNC_SEL)) { /* switched to internal clock, is not any external type */ result = -1; } else { switch (val) { case (CPLD_EXT_SPDIF): result = EXT_SPDIF_TYPE; break; case (CPLD_EXT_WORDCLOCK_1FS): result = EXT_WORDCLOCK_1FS_TYPE; break; case (CPLD_EXT_WORDCLOCK_256FS): result = EXT_WORDCLOCK_256FS_TYPE; break; default: /* undefined combination of external clock setup */ snd_BUG(); result = 0; } } return result; } /* Called when ak4113 detects change in the input SPDIF stream */ static void qtet_ak4113_change(struct ak4113 *ak4113, unsigned char c0, unsigned char c1) { struct snd_ice1712 *ice = ak4113->change_callback_private; int rate; if ((qtet_get_spdif_master_type(ice) == EXT_SPDIF_TYPE) && c1) { /* only for SPDIF master mode, rate was changed */ rate = snd_ak4113_external_rate(ak4113); /* printk(KERN_DEBUG "ak4113 - input rate changed to %d\n", rate); */ qtet_akm_set_rate_val(ice->akm, rate); } } /* * If clock slaved to SPDIF-IN, setting runtime rate * to the detected external rate */ static void qtet_spdif_in_open(struct snd_ice1712 *ice, struct snd_pcm_substream *substream) { struct qtet_spec *spec = ice->spec; struct snd_pcm_runtime *runtime = substream->runtime; int rate; if (qtet_get_spdif_master_type(ice) != EXT_SPDIF_TYPE) /* not external SPDIF, no rate limitation */ return; /* only external SPDIF can detect incoming sample rate */ rate = snd_ak4113_external_rate(spec->ak4113); if (rate >= runtime->hw.rate_min && rate <= runtime->hw.rate_max) { runtime->hw.rate_min = rate; runtime->hw.rate_max = rate; } } /* * initialize the chip */ static int __devinit qtet_init(struct snd_ice1712 *ice) { static const unsigned char ak4113_init_vals[] = { /* AK4113_REG_PWRDN */ AK4113_RST | AK4113_PWN | AK4113_OCKS0 | AK4113_OCKS1, /* AK4113_REQ_FORMAT */ AK4113_DIF_I24I2S | AK4113_VTX | AK4113_DEM_OFF | AK4113_DEAU, /* AK4113_REG_IO0 */ AK4113_OPS2 | AK4113_TXE | AK4113_XTL_24_576M, /* AK4113_REG_IO1 */ AK4113_EFH_1024LRCLK | AK4113_IPS(0), /* AK4113_REG_INT0_MASK */ 0, /* AK4113_REG_INT1_MASK */ 0, /* AK4113_REG_DATDTS */ 0, }; int err; struct qtet_spec *spec; struct snd_akm4xxx *ak; unsigned char val; /* switching ice1724 to external clock - supplied by ext. circuits */ val = inb(ICEMT1724(ice, RATE)); outb(val | VT1724_SPDIF_MASTER, ICEMT1724(ice, RATE)); spec = kzalloc(sizeof(*spec), GFP_KERNEL); if (!spec) return -ENOMEM; /* qtet is clocked by Xilinx array */ ice->hw_rates = &qtet_rates_info; ice->is_spdif_master = qtet_is_spdif_master; ice->get_rate = qtet_get_rate; ice->set_rate = qtet_set_rate; ice->set_mclk = qtet_set_mclk; ice->set_spdif_clock = qtet_set_spdif_clock; ice->get_spdif_master_type = qtet_get_spdif_master_type; ice->ext_clock_names = ext_clock_names; ice->ext_clock_count = ARRAY_SIZE(ext_clock_names); /* since Qtet can detect correct SPDIF-in rate, all streams can be * limited to this specific rate */ ice->spdif.ops.open = ice->pro_open = qtet_spdif_in_open; ice->spec = spec; /* Mute Off */ /* SCR Initialize*/ /* keep codec power down first */ set_scr(ice, SCR_PHP); udelay(1); /* codec power up */ set_scr(ice, SCR_PHP | SCR_CODEC_PDN); /* MCR Initialize */ set_mcr(ice, 0); /* CPLD Initialize */ set_cpld(ice, 0); ice->num_total_dacs = 2; ice->num_total_adcs = 2; ice->akm = kcalloc(2, sizeof(struct snd_akm4xxx), GFP_KERNEL); ak = ice->akm; if (!ak) return -ENOMEM; /* only one codec with two chips */ ice->akm_codecs = 1; err = snd_ice1712_akm4xxx_init(ak, &akm_qtet_dac, NULL, ice); if (err < 0) return err; err = snd_ak4113_create(ice->card, qtet_ak4113_read, qtet_ak4113_write, ak4113_init_vals, ice, &spec->ak4113); if (err < 0) return err; /* callback for codecs rate setting */ spec->ak4113->change_callback = qtet_ak4113_change; spec->ak4113->change_callback_private = ice; /* AK41143 in Quartet can detect external rate correctly * (i.e. check_flags = 0) */ spec->ak4113->check_flags = 0; proc_init(ice); qtet_set_rate(ice, 44100); return 0; } static unsigned char qtet_eeprom[] __devinitdata = { [ICE_EEP2_SYSCONF] = 0x28, /* clock 256(24MHz), mpu401, 1xADC, 1xDACs, SPDIF in */ [ICE_EEP2_ACLINK] = 0x80, /* I2S */ [ICE_EEP2_I2S] = 0x78, /* 96k, 24bit, 192k */ [ICE_EEP2_SPDIF] = 0xc3, /* out-en, out-int, in, out-ext */ [ICE_EEP2_GPIO_DIR] = 0x00, /* 0-7 inputs, switched to output only during output operations */ [ICE_EEP2_GPIO_DIR1] = 0xff, /* 8-15 outputs */ [ICE_EEP2_GPIO_DIR2] = 0x00, [ICE_EEP2_GPIO_MASK] = 0xff, /* changed only for OUT operations */ [ICE_EEP2_GPIO_MASK1] = 0x00, [ICE_EEP2_GPIO_MASK2] = 0xff, [ICE_EEP2_GPIO_STATE] = 0x00, /* inputs */ [ICE_EEP2_GPIO_STATE1] = 0x7d, /* all 1, but GPIO_CPLD_RW and GPIO15 always zero */ [ICE_EEP2_GPIO_STATE2] = 0x00, /* inputs */ }; /* entry point */ struct snd_ice1712_card_info snd_vt1724_qtet_cards[] __devinitdata = { { .subvendor = VT1724_SUBDEVICE_QTET, .name = "Infrasonic Quartet", .model = "quartet", .chip_init = qtet_init, .build_controls = qtet_add_controls, .eeprom_size = sizeof(qtet_eeprom), .eeprom_data = qtet_eeprom, }, { } /* terminator */ };
gpl-2.0
retailnext/linux
drivers/media/dvb/frontends/tda665x.c
9281
6350
/* TDA665x tuner driver Copyright (C) Manu Abraham (abraham.manu@gmail.com) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include "dvb_frontend.h" #include "tda665x.h" struct tda665x_state { struct dvb_frontend *fe; struct i2c_adapter *i2c; const struct tda665x_config *config; u32 frequency; u32 bandwidth; }; static int tda665x_read(struct tda665x_state *state, u8 *buf) { const struct tda665x_config *config = state->config; int err = 0; struct i2c_msg msg = { .addr = config->addr, .flags = I2C_M_RD, .buf = buf, .len = 2 }; err = i2c_transfer(state->i2c, &msg, 1); if (err != 1) goto exit; return err; exit: printk(KERN_ERR "%s: I/O Error err=<%d>\n", __func__, err); return err; } static int tda665x_write(struct tda665x_state *state, u8 *buf, u8 length) { const struct tda665x_config *config = state->config; int err = 0; struct i2c_msg msg = { .addr = config->addr, .flags = 0, .buf = buf, .len = length }; err = i2c_transfer(state->i2c, &msg, 1); if (err != 1) goto exit; return err; exit: printk(KERN_ERR "%s: I/O Error err=<%d>\n", __func__, err); return err; } static int tda665x_get_state(struct dvb_frontend *fe, enum tuner_param param, struct tuner_state *tstate) { struct tda665x_state *state = fe->tuner_priv; int err = 0; switch (param) { case DVBFE_TUNER_FREQUENCY: tstate->frequency = state->frequency; break; case DVBFE_TUNER_BANDWIDTH: break; default: printk(KERN_ERR "%s: Unknown parameter (param=%d)\n", __func__, param); err = -EINVAL; break; } return err; } static int tda665x_get_status(struct dvb_frontend *fe, u32 *status) { struct tda665x_state *state = fe->tuner_priv; u8 result = 0; int err = 0; *status = 0; err = tda665x_read(state, &result); if (err < 0) goto exit; if ((result >> 6) & 0x01) { printk(KERN_DEBUG "%s: Tuner Phase Locked\n", __func__); *status = 1; } return err; exit: printk(KERN_ERR "%s: I/O Error\n", __func__); return err; } static int tda665x_set_state(struct dvb_frontend *fe, enum tuner_param param, struct tuner_state *tstate) { struct tda665x_state *state = fe->tuner_priv; const struct tda665x_config *config = state->config; u32 frequency, status = 0; u8 buf[4]; int err = 0; if (param & DVBFE_TUNER_FREQUENCY) { frequency = tstate->frequency; if ((frequency < config->frequency_max) || (frequency > config->frequency_min)) { printk(KERN_ERR "%s: Frequency beyond limits, frequency=%d\n", __func__, frequency); return -EINVAL; } frequency += config->frequency_offst; frequency *= config->ref_multiplier; frequency += config->ref_divider >> 1; frequency /= config->ref_divider; buf[0] = (u8) ((frequency & 0x7f00) >> 8); buf[1] = (u8) (frequency & 0x00ff) >> 0; buf[2] = 0x80 | 0x40 | 0x02; buf[3] = 0x00; /* restore frequency */ frequency = tstate->frequency; if (frequency < 153000000) { /* VHF-L */ buf[3] |= 0x01; /* fc, Low Band, 47 - 153 MHz */ if (frequency < 68000000) buf[3] |= 0x40; /* 83uA */ if (frequency < 1040000000) buf[3] |= 0x60; /* 122uA */ if (frequency < 1250000000) buf[3] |= 0x80; /* 163uA */ else buf[3] |= 0xa0; /* 254uA */ } else if (frequency < 438000000) { /* VHF-H */ buf[3] |= 0x02; /* fc, Mid Band, 153 - 438 MHz */ if (frequency < 230000000) buf[3] |= 0x40; if (frequency < 300000000) buf[3] |= 0x60; else buf[3] |= 0x80; } else { /* UHF */ buf[3] |= 0x04; /* fc, High Band, 438 - 862 MHz */ if (frequency < 470000000) buf[3] |= 0x60; if (frequency < 526000000) buf[3] |= 0x80; else buf[3] |= 0xa0; } /* Set params */ err = tda665x_write(state, buf, 5); if (err < 0) goto exit; /* sleep for some time */ printk(KERN_DEBUG "%s: Waiting to Phase LOCK\n", __func__); msleep(20); /* check status */ err = tda665x_get_status(fe, &status); if (err < 0) goto exit; if (status == 1) { printk(KERN_DEBUG "%s: Tuner Phase locked: status=%d\n", __func__, status); state->frequency = frequency; /* cache successful state */ } else { printk(KERN_ERR "%s: No Phase lock: status=%d\n", __func__, status); } } else { printk(KERN_ERR "%s: Unknown parameter (param=%d)\n", __func__, param); return -EINVAL; } return 0; exit: printk(KERN_ERR "%s: I/O Error\n", __func__); return err; } static int tda665x_release(struct dvb_frontend *fe) { struct tda665x_state *state = fe->tuner_priv; fe->tuner_priv = NULL; kfree(state); return 0; } static struct dvb_tuner_ops tda665x_ops = { .set_state = tda665x_set_state, .get_state = tda665x_get_state, .get_status = tda665x_get_status, .release = tda665x_release }; struct dvb_frontend *tda665x_attach(struct dvb_frontend *fe, const struct tda665x_config *config, struct i2c_adapter *i2c) { struct tda665x_state *state = NULL; struct dvb_tuner_info *info; state = kzalloc(sizeof(struct tda665x_state), GFP_KERNEL); if (state == NULL) goto exit; state->config = config; state->i2c = i2c; state->fe = fe; fe->tuner_priv = state; fe->ops.tuner_ops = tda665x_ops; info = &fe->ops.tuner_ops.info; memcpy(info->name, config->name, sizeof(config->name)); info->frequency_min = config->frequency_min; info->frequency_max = config->frequency_max; info->frequency_step = config->frequency_offst; printk(KERN_DEBUG "%s: Attaching TDA665x (%s) tuner\n", __func__, info->name); return fe; exit: kfree(state); return NULL; } EXPORT_SYMBOL(tda665x_attach); MODULE_DESCRIPTION("TDA665x driver"); MODULE_AUTHOR("Manu Abraham"); MODULE_LICENSE("GPL");
gpl-2.0
ztemt/A465_5.1_kernel
drivers/input/touchscreen/mediatek/mms128/mms128_driver.c
66
26754
/**************************************************************************** * Include Files ****************************************************************************/ #include <linux/init.h> #include <linux/module.h> #include <linux/delay.h> #include <linux/i2c.h> #include <linux/input.h> #include <linux/slab.h> #include <linux/gpio.h> #include <linux/sched.h> #include <linux/kthread.h> #include <linux/bitops.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/byteorder/generic.h> #ifdef CONFIG_HAS_EARLYSUSPEND #include <linux/earlysuspend.h> #endif #include <linux/interrupt.h> #include <linux/time.h> #include <linux/rtpm_prio.h> #include <mach/eint.h> #include <linux/proc_fs.h> #include <asm/uaccess.h> #include <cust_eint.h> #include <linux/jiffies.h> #include "tpd.h" #include <linux/spinlock.h> #include <mach/mt_wdt.h> #include <mach/mt_gpt.h> #include <mach/mt_reg_base.h> //#include <wd_kicker.h> #include <mach/wd_api.h> #include <mach/mt_pm_ldo.h> #include <mach/mt_typedefs.h> #include <mach/mt_boot.h> #include <cust_eint.h> #include <linux/wakelock.h> /**************************************************************************** * Constants / Definitions ****************************************************************************/ #define TP_DEV_NAME "mms128" #define I2C_RETRY_CNT 5 //Fixed value #define DOWNLOAD_RETRY_CNT 5 //Fixed value #define MELFAS_DOWNLOAD 1 //Fixed value #define PRESS_KEY 1 //Fixed value #define RELEASE_KEY 0 //Fixed value #define TS_READ_LEN_ADDR 0x0F //Fixed value #define TS_READ_START_ADDR 0x10 //Fixed value #define TS_READ_REGS_LEN 66 //Fixed value #define TS_WRITE_REGS_LEN 16 //Fixed value #define TS_MAX_TOUCH 5 //Model Dependent #define TS_READ_HW_VER_ADDR 0xF1 //Model Dependent #define TS_READ_SW_VER_ADDR 0xF0 //Model Dependent #define MELFAS_FW_VERSION 0xF5 //Model Dependent #define MELFAS_CURRENT_FW_VERSION 122 #define MELFAS_HW_REVISON 0x01 //Model Dependent #define MELFAS_MAX_TRANSACTION_LENGTH 66 #define MELFAS_MAX_I2C_TRANSFER_SIZE 7 #define MELFAS_I2C_DEVICE_ADDRESS_LEN 1 //#define I2C_MASTER_CLOCK 400 #define MELFAS_I2C_MASTER_CLOCK 100 #define MELFAS_I2C_ADDRESS 0x48 #define VAR_CHAR_NUM_MAX 20 #define TPD_HAVE_BUTTON #ifdef TPD_HAVE_BUTTON #ifdef LGE_USE_DOME_KEY #define TPD_KEY_COUNT 2 static int tpd_keys_local[TPD_KEY_COUNT] = {KEY_BACK , KEY_MENU}; #else #define TPD_KEY_COUNT 4 static int tpd_keys_local[TPD_KEY_COUNT] = {KEY_BACK ,KEY_HOMEPAGE, KEY_MENU}; #endif #endif enum { None = 0, TOUCH_SCREEN, TOUCH_KEY }; /**************************************************************************** * Macros ****************************************************************************/ #define TPD_TAG "[Melfas] " #define TPD_FUN(f) printk(KERN_ERR TPD_TAG"%s\n", __FUNCTION__) #define TPD_ERR(fmt, args...) printk(KERN_ERR TPD_TAG"%s %d : "fmt, __FUNCTION__, __LINE__, ##args) #define TPD_LOG(fmt, args...) printk(KERN_ERR TPD_TAG fmt, ##args) /**************************************************************************** * Variables ****************************************************************************/ struct muti_touch_info { int strength; // int width int area; int posX; int posY; int status; int pressure; }; extern struct tpd_device *tpd; struct i2c_client *melfas_i2c_client = NULL; static struct muti_touch_info g_Mtouch_info[TS_MAX_TOUCH]; static int melfas_tpd_flag = 0; unsigned char touch_fw_version = 0; /* ghost finger pattern detection */ struct delayed_work ghost_monitor_work; static int ghost_touch_cnt = 0; static int ghost_x = 1000; static int ghost_y = 1000; /* Ignore Key event during touch event actioned */ static int before_touch_time = 0; static int current_key_time = 0; static int is_touch_pressed = 0; static int is_key_pressed = 0; static int pressed_keycode = 0; #define CANCEL_KEY 0xff static DEFINE_MUTEX(i2c_access); static DECLARE_WAIT_QUEUE_HEAD(melfas_waiter); /**************************************************************************** * Extern Function Prototypes ****************************************************************************/ extern int mms100_ISP_download_binary_data ( int dl_mode ); extern int mtk_wdt_enable(enum wk_wdt_en en); /**************************************************************************** * Local Function Prototypes ****************************************************************************/ static void mms128_eint_interrupt_handler ( void ); /**************************************************************************** * Platform(AP) dependent functions ****************************************************************************/ static void mms128_setup_eint ( void ) { TPD_FUN (); /* Configure GPIO settings for external interrupt pin */ mt_set_gpio_dir ( GPIO_CTP_EINT_PIN, GPIO_DIR_IN ); mt_set_gpio_mode ( GPIO_CTP_EINT_PIN, GPIO_CTP_EINT_PIN_M_EINT ); mt_set_gpio_pull_enable ( GPIO_CTP_EINT_PIN, GPIO_PULL_ENABLE ); mt_set_gpio_pull_select ( GPIO_CTP_EINT_PIN, GPIO_PULL_UP ); msleep(50); /* Configure external interrupt settings for external interrupt pin */ mt65xx_eint_set_sens ( CUST_EINT_TOUCH_PANEL_NUM, CUST_EINT_TOUCH_PANEL_SENSITIVE ); mt65xx_eint_set_hw_debounce ( CUST_EINT_TOUCH_PANEL_NUM, CUST_EINT_TOUCH_PANEL_DEBOUNCE_CN ); mt65xx_eint_registration ( CUST_EINT_TOUCH_PANEL_NUM, CUST_EINT_TOUCH_PANEL_DEBOUNCE_EN, CUST_EINT_POLARITY_LOW, mms128_eint_interrupt_handler, 1 ); /* unmask external interrupt */ mt65xx_eint_unmask(CUST_EINT_TOUCH_PANEL_NUM); } void mms128_power ( unsigned int on ) { TPD_FUN (); if ( on ) { hwPowerOn ( MT6323_POWER_LDO_VGP2, VOL_3000, "TP" ); TPD_LOG ( "turned on the power ( VGP2 )\n" ); msleep(10); } else { hwPowerDown ( MT6323_POWER_LDO_VGP2, "TP" ); TPD_LOG ( "turned off the power ( VGP2 )\n" ); } } /**************************************************************************** * MMS128 I2C Read / Write Funtions ****************************************************************************/ int mms128_i2c_write_bytes ( struct i2c_client *client, u16 addr, int len, u8 *txbuf ) { u8 buffer[MELFAS_MAX_TRANSACTION_LENGTH] = { 0 }; u16 left = len; u8 offset = 0; u8 retry = 0; struct i2c_msg msg = { .addr = ( ( client->addr & I2C_MASK_FLAG ) | ( I2C_ENEXT_FLAG ) ), .flags = 0, .buf = buffer, .timing = MELFAS_I2C_MASTER_CLOCK, }; if ( txbuf == NULL ) { return -1; } TPD_DEBUG ( "i2c_write_bytes to device %02X address %04X len %d\n", client->addr, addr, len ); while ( left > 0 ) { retry = 0; buffer[0] = ( u8 ) addr + offset; if ( left > MELFAS_MAX_I2C_TRANSFER_SIZE ) { memcpy ( &buffer[MELFAS_I2C_DEVICE_ADDRESS_LEN], &txbuf[offset], MELFAS_MAX_I2C_TRANSFER_SIZE ); msg.len = MELFAS_MAX_TRANSACTION_LENGTH; left -= MELFAS_MAX_I2C_TRANSFER_SIZE; offset += MELFAS_MAX_I2C_TRANSFER_SIZE; } else { memcpy ( &buffer[MELFAS_I2C_DEVICE_ADDRESS_LEN], &txbuf[offset], left ); msg.len = left + MELFAS_I2C_DEVICE_ADDRESS_LEN; left = 0; } TPD_DEBUG ( "byte left %d offset %d\n", left, offset ); while ( i2c_transfer ( client->adapter, &msg, 1 ) != 1 ) { retry++; if ( retry == I2C_RETRY_CNT ) { TPD_ERR ( "I2C write 0x%X%X length=%d failed\n", buffer[0], buffer[1], len ); return -1; } else { TPD_ERR ( "I2C write retry %d addr 0x%X%X\n", retry, buffer[0], buffer[1] ); } } } return 0; } int mms128_i2c_write_data ( struct i2c_client *client, int len, u8 *txbuf ) { u16 ret = 0; client->addr = client->addr & I2C_MASK_FLAG; if ( txbuf == NULL ) { return -1; } ret = i2c_master_send ( client, txbuf, len ); if ( ret != len ) { return -1; } return 0; } int mms128_i2c_read_data ( struct i2c_client *client, int len, u8 *rxbuf ) { u16 ret = 0; client->addr = client->addr & I2C_MASK_FLAG; if ( rxbuf == NULL ) { return -1; } ret = i2c_master_recv ( client, rxbuf, len ); if ( ret != len ) { return -1; } return 0; } int mms128_i2c_read ( struct i2c_client *client, u16 addr, u16 len, u8 *rxbuf ) { u8 buffer[MELFAS_I2C_DEVICE_ADDRESS_LEN]= { 0 }; u8 retry; u16 left = len; u8 offset = 0; struct i2c_msg msg[2] = { { .addr = ( ( client->addr & I2C_MASK_FLAG ) | ( I2C_ENEXT_FLAG ) ), .flags = 0, .buf = buffer, .len = MELFAS_I2C_DEVICE_ADDRESS_LEN, .timing = MELFAS_I2C_MASTER_CLOCK }, { .addr = ( ( client->addr & I2C_MASK_FLAG ) | ( I2C_ENEXT_FLAG ) ), .flags = I2C_M_RD, .timing = MELFAS_I2C_MASTER_CLOCK }, }; if ( rxbuf == NULL ) { return -1; } TPD_DEBUG ( "i2c_read_bytes to device %02X address %04X len %d\n", client->addr, addr, len ); while ( left > 0 ) { buffer[0] = ( u8 ) addr + offset; msg[1].buf = &rxbuf[offset]; if ( left > MELFAS_MAX_TRANSACTION_LENGTH ) { msg[1].len = MELFAS_MAX_TRANSACTION_LENGTH; left -= MELFAS_MAX_TRANSACTION_LENGTH; offset += MELFAS_MAX_TRANSACTION_LENGTH; } else { msg[1].len = left; left = 0; } retry = 0; while ( i2c_transfer ( client->adapter, &msg[0], 2 ) != 2 ) { retry++; if ( retry == I2C_RETRY_CNT ) { TPD_ERR ( "I2C read 0x%X length=%d failed\n", addr + offset, len ); return -1; } } } return 0; } /**************************************************************************** * Touch malfunction Prevention Function ****************************************************************************/ static void mms128_release_all_finger ( void ) { int i; TPD_FUN (); for ( i = 0 ; i < TS_MAX_TOUCH ; i++ ) { g_Mtouch_info[i].pressure = -1; } input_mt_sync ( tpd->dev ); input_sync ( tpd->dev ); } static void mms128_touch_reset ( void ) { TPD_FUN (); mms128_release_all_finger (); mt65xx_eint_mask ( CUST_EINT_TOUCH_PANEL_NUM ); mms128_power ( 0 ); msleep ( 100 ); mms128_power ( 1 ); msleep ( 100 ); mt65xx_eint_unmask ( CUST_EINT_TOUCH_PANEL_NUM ); } static void mms128_read_dummy_interrupt ( void ) { uint8_t buf[TS_READ_REGS_LEN] = { 0, }; int read_num = 0; int ret_val = 0; ret_val = mms128_i2c_read ( melfas_i2c_client, TS_READ_LEN_ADDR, 1, buf ); if ( ret_val ) { return; } read_num = buf[0]; if ( read_num ) { mms128_i2c_read ( melfas_i2c_client, TS_READ_START_ADDR, read_num, buf ); if ( 0x0F == buf[0] ) { mms128_touch_reset(); } } return; } static void mms128_monitor_ghost_finger ( struct work_struct *work ) { if ( ghost_touch_cnt >= 45 ) { TPD_LOG("ghost finger pattern DETECTED! : %d\n", ghost_touch_cnt ); mt65xx_eint_mask ( CUST_EINT_TOUCH_PANEL_NUM ); wait_event_interruptible ( melfas_waiter, melfas_tpd_flag == 0 ); mms128_power ( 0 ); msleep ( 100 ); mms128_release_all_finger (); input_mt_sync ( tpd->dev ); input_sync ( tpd->dev ); mms128_power ( 1 ); msleep(200); msleep(100); mt65xx_eint_unmask(CUST_EINT_TOUCH_PANEL_NUM); } schedule_delayed_work ( &ghost_monitor_work, msecs_to_jiffies ( HZ * 50 ) ); ghost_touch_cnt = 0; ghost_x = 1000; ghost_y = 1000; return; } /**************************************************************************** * MMS128 Interrupt Service Routines ****************************************************************************/ static void mms128_eint_interrupt_handler ( void ) { TPD_DEBUG_PRINT_INT; melfas_tpd_flag = 1; wake_up_interruptible ( &melfas_waiter ); } static int mms128_event_handler ( void *unused ) { uint8_t buf[TS_READ_REGS_LEN] = { 0 }; int i, read_num, fingerID, Touch_Type = 0, touchState = 0; int keyID = 0, reportID = 0; int ret; int press_count = 0; int is_touch_mix = 0; struct sched_param param = { .sched_priority = RTPM_PRIO_TPD }; sched_setscheduler ( current, SCHED_RR, &param ); do { set_current_state ( TASK_INTERRUPTIBLE ); wait_event_interruptible ( melfas_waiter, melfas_tpd_flag != 0 ); melfas_tpd_flag = 0; set_current_state ( TASK_RUNNING ); mutex_lock ( &i2c_access ); mms128_i2c_read ( melfas_i2c_client, TS_READ_LEN_ADDR, 1, buf ); read_num = buf[0]; if ( read_num ) { ret = mms128_i2c_read ( melfas_i2c_client, TS_READ_START_ADDR, read_num, buf ); if ( ret < 0 ) { TPD_ERR ( "melfas:i2c read error\n" ); mms128_touch_reset (); goto exit_work_func; } if ( 0x0F == buf[0] ) { TPD_LOG ( "ESD Detected!!\n" ); mms128_touch_reset (); goto exit_work_func; } for ( i = 0 ; i < read_num ; i = i + 6 ) { Touch_Type = ( buf[i] >> 5 ) & 0x03; fingerID = ( buf[i] & 0x0F ) - 1; touchState = ( ( buf[i] & 0x80 ) == 0x80 ); reportID = ( buf[i] & 0x0F ); keyID = reportID; /* touch type is panel */ if ( Touch_Type == TOUCH_SCREEN ) { g_Mtouch_info[fingerID].posX = ( uint16_t ) ( buf[i + 1] & 0x0F ) << 8 | buf[i + 2]; g_Mtouch_info[fingerID].posY = ( uint16_t ) ( buf[i + 1] & 0xF0 ) << 4 | buf[i + 3]; g_Mtouch_info[fingerID].area = buf[i + 4]; g_Mtouch_info[fingerID].status = touchState; if ( touchState ) { g_Mtouch_info[fingerID].pressure = buf[i + 5]; //g_Mtouch_info[fingerID].pressure = 10; //g_Mtouch_info[fingerID].pressure = buf[i + 5]; } else { g_Mtouch_info[fingerID].pressure = 0; /* ghost finger pattern detection */ if(ghost_touch_cnt == 0) { ghost_x = g_Mtouch_info[fingerID].posX; ghost_y = g_Mtouch_info[fingerID].posY; ghost_touch_cnt++; } else { if ( ghost_x + 40 >= g_Mtouch_info[fingerID].posX && ghost_x - 40 <= g_Mtouch_info[fingerID].posX ) { if ( ghost_y + 40 >= g_Mtouch_info[fingerID].posY && ghost_y - 40 <= g_Mtouch_info[fingerID].posY ) { ghost_touch_cnt++; } } } } if ( is_key_pressed == PRESS_KEY ) { //TPD_LOG ( " ++++++++ KEY_CANCEL!!!!!!!!\n\n" ); input_report_key ( tpd->dev, pressed_keycode, CANCEL_KEY ); input_sync ( tpd->dev ); is_key_pressed = CANCEL_KEY; } is_touch_mix = 1; } else if ( Touch_Type == TOUCH_KEY ) { current_key_time = jiffies_to_msecs ( jiffies ); if ( before_touch_time > 0 ) { if ( current_key_time - before_touch_time > 150) // 100 { is_touch_pressed = 0; } else { continue; } } before_touch_time = 0; current_key_time = 0; // Ignore Key event during touch event actioned if ( is_touch_mix || is_touch_pressed ) { continue; } if ( keyID == 0x1 ) { input_report_key ( tpd->dev, tpd_keys_local[keyID-1], touchState ? PRESS_KEY : RELEASE_KEY ); } else if ( keyID == 0x2 ) { input_report_key ( tpd->dev, tpd_keys_local[keyID-1], touchState ? PRESS_KEY : RELEASE_KEY ); } else if ( keyID == 0x3 ) { input_report_key ( tpd->dev, tpd_keys_local[keyID-1], touchState ? PRESS_KEY : RELEASE_KEY ); } else if ( keyID == 0x4 ) { input_report_key ( tpd->dev, tpd_keys_local[keyID-1], touchState ? PRESS_KEY : RELEASE_KEY ); } else { TPD_ERR ( " KeyID is incorrect!! (0x%x)\n", keyID ); keyID = 0x00; } if ( keyID != 0 ) { pressed_keycode = tpd_keys_local[keyID-1]; if ( touchState ) { is_key_pressed = PRESS_KEY; TPD_LOG ( "Touch key press (keyID = 0x%x)\n", keyID ); } else { is_key_pressed = RELEASE_KEY; TPD_LOG ( "Touch key release (keyID = 0x%x)\n", keyID ); } } } } press_count = 0; if ( is_touch_mix ) { for ( i = 0 ; i < TS_MAX_TOUCH ; i++ ) { if ( g_Mtouch_info[i].pressure == -1 ) { continue; } if ( g_Mtouch_info[i].status == 0 ) { is_touch_pressed = 0; g_Mtouch_info[i].status = -1; continue; } if ( g_Mtouch_info[i].status == 1 ) { input_report_key ( tpd->dev, BTN_TOUCH, 1 ); input_report_abs ( tpd->dev, ABS_MT_TRACKING_ID, i ); input_report_abs ( tpd->dev, ABS_MT_POSITION_X, g_Mtouch_info[i].posX ); input_report_abs ( tpd->dev, ABS_MT_POSITION_Y, g_Mtouch_info[i].posY ); input_report_abs ( tpd->dev, ABS_MT_TOUCH_MAJOR, g_Mtouch_info[i].area ); //input_report_abs ( tpd->dev, ABS_MT_WIDTH_MAJOR, g_Mtouch_info[i].pressure ); input_report_abs ( tpd->dev, ABS_MT_PRESSURE, g_Mtouch_info[i].pressure ); is_touch_pressed = 1; input_mt_sync ( tpd->dev ); press_count++; } if ( g_Mtouch_info[i].pressure == 0 ) { g_Mtouch_info[i].pressure = -1; } } if ( press_count == 0 ) { input_report_key ( tpd->dev, BTN_TOUCH, 0 ); input_mt_sync ( tpd->dev ); } before_touch_time = jiffies_to_msecs ( jiffies ); } is_touch_mix = 0; input_sync ( tpd->dev ); } exit_work_func: mutex_unlock ( &i2c_access ); } while ( !kthread_should_stop () ); return 0; } /**************************************************************************** * MMS128 Firmware Update Function ****************************************************************************/ static int mms128_fw_load ( struct i2c_client *client, int hw_ver ) { int ret = 0; TPD_FUN (); mt65xx_eint_mask ( CUST_EINT_TOUCH_PANEL_NUM ); //mtk_wdt_enable ( WK_WDT_DIS ); mutex_lock ( &i2c_access ); ret = mms100_ISP_download_binary_data ( hw_ver ); if ( ret ) { TPD_LOG ( "SET Download ISP Fail\n" ); } mutex_unlock ( &i2c_access ); //mtk_wdt_enable ( WK_WDT_EN ); mms128_power ( 0 ); msleep ( 100 ); mms128_power ( 1 ); msleep ( 200 ); mt65xx_eint_unmask ( CUST_EINT_TOUCH_PANEL_NUM ); return ret; } #if 0 static int mms128_check_firmware ( struct i2c_client *client, u8 *val ) { int ret = 0; ret = mms128_i2c_read ( client, TS_READ_HW_VER_ADDR, 1, &val[0] ); if ( ret != 0 ) { return ret; } ret = mms128_i2c_read ( client, MELFAS_FW_VERSION, 1, &val[1] ); TPD_LOG ( "Touch IC ==> H/W Ver[0x%x], F/W Ver[0x%x]\n", val[0], val[1] ); return ret; } static int mms128_firmware_update ( struct i2c_client *client ) { int ret = 0; uint8_t fw_ver[2] = { 0, }; TPD_FUN (); ret = mms128_check_firmware ( client, fw_ver ); if ( ret < 0 ) { TPD_LOG ( "check_firmware fail! [%d]", ret ); mms128_power ( 0 ); msleep ( 100 ); mms128_power ( 1 ); msleep ( 100 ); return -1; } else { if ( fw_ver[1] != MELFAS_CURRENT_FW_VERSION ) { TPD_LOG ( "0x%x version Firmware Update\n", MELFAS_CURRENT_FW_VERSION ); ret = mms128_fw_load ( client, 1 ); } else { TPD_LOG ( "Touch Firmware is the latest version [Ver: 0x%x]\n", fw_ver[1] ); } } return ret; } #endif /**************************************************************************** * MMS128 ADB Shell command function ****************************************************************************/ static ssize_t mms128_show_update (struct device *dev,struct device_attribute *attr, char *buf) { // char ver[20]; // snprintf(buf, VAR_CHAR_NUM_MAX, "%s", ver); return 0; } static ssize_t mms128_store_update (struct device *dev,struct device_attribute *attr, const char *buf, size_t count) { int hw_ver; sscanf ( buf, "%d", &hw_ver ); cancel_delayed_work ( &ghost_monitor_work ); if ( hw_ver == 0 ) { mms128_fw_load ( melfas_i2c_client, 0 ); } else { mms128_fw_load ( melfas_i2c_client, 1 ); } schedule_delayed_work ( &ghost_monitor_work, msecs_to_jiffies ( HZ * 50 ) ); return count; } static DEVICE_ATTR ( update, 0664, mms128_show_update, mms128_store_update ); static ssize_t mms128_show_firmware ( struct device *dev, struct device_attribute *attr, char *buf ) { int r; u8 product_id; u8 product_id2; r = snprintf ( buf, PAGE_SIZE, "%d\n", touch_fw_version ); mms128_i2c_read ( melfas_i2c_client, MELFAS_FW_VERSION, sizeof ( product_id ), &product_id ); mms128_i2c_read ( melfas_i2c_client, TS_READ_HW_VER_ADDR, sizeof ( product_id2 ), &product_id2 ); return sprintf ( buf, "H/W ver: 0x%x, F/W ver: 0x%x\n", product_id2, product_id ); } static DEVICE_ATTR ( fw, 0664, mms128_show_firmware, NULL ); static ssize_t mms128_show_reset ( struct device *dev, struct device_attribute *attr, char *buf ) { mms128_power ( 0 ); msleep(100); mms128_power ( 1 ); msleep(100); return 0; } static ssize_t mms128_store_reset ( struct device *dev, struct device_attribute *attr, const char *buffer100, size_t count ) { uint8_t buf[TS_READ_REGS_LEN] = { 0, }; int read_num = 0; mms128_i2c_read ( melfas_i2c_client, TS_READ_LEN_ADDR, 1, buf ); read_num = buf[0]; if ( read_num ) { mms128_i2c_read ( melfas_i2c_client, TS_READ_START_ADDR, read_num, buf ); if ( 0x0F == buf[0] ) { mms128_touch_reset (); } } return count; } static DEVICE_ATTR ( reset, 0664, mms128_show_reset, mms128_store_reset ); /**************************************************************************** * I2C BUS Related Functions ****************************************************************************/ static int mms128_i2c_probe ( struct i2c_client *client, const struct i2c_device_id *id ) { int i, err = 0, ret = 0; struct task_struct *thread = NULL; int gpio_touch_id = 0; TPD_FUN(); /* Turn on the power for MMS128 */ mms128_power ( 1 ); msleep(200); msleep(100); melfas_i2c_client = client; err = device_create_file ( &client->dev, &dev_attr_update ); if ( err ) { TPD_ERR ( "Touchscreen : update_touch device_create_file: Fail\n" ); device_remove_file ( &client->dev, &dev_attr_update ); return err; } err = device_create_file ( &client->dev, &dev_attr_fw ); if ( err ) { TPD_ERR ( "Touchscreen : fw_touch device_create_file: Fail\n" ); device_remove_file ( &client->dev, &dev_attr_fw ); return err; } err = device_create_file ( &client->dev, &dev_attr_reset ); if ( err ) { TPD_ERR ( "Touchscreen : reset_touch device_create_file: Fail\n" ); device_remove_file ( &client->dev, &dev_attr_reset ); return err; } // gpio_touch_id = mt_get_gpio_in ( GPIO_TOUCH_MAKER_ID ); TPD_LOG ( "TOUCH_ID[%d]\n", gpio_touch_id ); #ifdef TPD_HAVE_BUTTON for ( i = 0 ; i < TPD_KEY_COUNT ; i++ ) { input_set_capability ( tpd->dev, EV_KEY, tpd_keys_local[i] ); } #endif /* Touch Firmware Update */ //ret = mms128_firmware_update ( client ); thread = kthread_run ( mms128_event_handler, 0, TPD_DEVICE ); if ( IS_ERR ( thread ) ) { err = PTR_ERR ( thread ); TPD_ERR ( "failed to create kernel thread: %d\n", err ); } /* Configure external ( GPIO ) interrupt */ mms128_setup_eint (); tpd_load_status = 1; /* ghost finger pattern detection */ INIT_DELAYED_WORK ( &ghost_monitor_work, mms128_monitor_ghost_finger ); schedule_delayed_work ( &ghost_monitor_work, msecs_to_jiffies ( HZ * 50 ) ); if ( ret == 0 ) { mms128_read_dummy_interrupt (); } return 0; } static int mms128_i2c_remove ( struct i2c_client *client ) { return 0; } static int mms128_i2c_detect ( struct i2c_client *client, struct i2c_board_info *info ) { strcpy ( info->type, "mtk-tpd" ); TPD_FUN (); return 0; } static const struct i2c_device_id mms128_i2c_id[] = { { TP_DEV_NAME, 0 }, {} }; static struct i2c_driver mms128_i2c_driver = { .driver.name = "mtk-tpd", .probe = mms128_i2c_probe, .remove = __devexit_p(mms128_i2c_remove), .detect = mms128_i2c_detect, .id_table = mms128_i2c_id, }; /**************************************************************************** * Linux Device Driver Related Functions ****************************************************************************/ static int mms128_local_init ( void ) { TPD_FUN (); if ( i2c_add_driver ( &mms128_i2c_driver ) != 0 ) { TPD_ERR ( "unable to add i2c driver.\n" ); return -1; } if ( tpd_load_status == 0 ) { TPD_ERR ( "add error touch panel driver.\n" ); i2c_del_driver ( &mms128_i2c_driver ); return -1; } tpd_type_cap = 1; return 0; } static void mms128_suspend ( struct early_suspend *h ) { TPD_FUN (); mms128_release_all_finger (); /* mask external interrupt */ mt65xx_eint_mask ( CUST_EINT_TOUCH_PANEL_NUM ); /* Turn off the power for MMS128 */ mms128_power ( 0 ); /* ghost finger pattern detection */ cancel_delayed_work ( &ghost_monitor_work ); } static void mms128_resume ( struct early_suspend *h ) { TPD_FUN (); /* Turn on the power for MMS128 */ mms128_power ( 1 ); mms128_release_all_finger (); msleep ( 100 ); mms128_read_dummy_interrupt (); /* unmask external interrupt */ mt65xx_eint_unmask ( CUST_EINT_TOUCH_PANEL_NUM ); /* ghost finger pattern detection */ ghost_touch_cnt = 0; schedule_delayed_work ( &ghost_monitor_work, msecs_to_jiffies ( HZ * 50 ) ); } static struct i2c_board_info __initdata i2c_MMS128={ I2C_BOARD_INFO ( TP_DEV_NAME, MELFAS_I2C_ADDRESS ) }; static struct tpd_driver_t mms128_device_driver = { .tpd_device_name = "mms128", .tpd_local_init = mms128_local_init, .suspend = mms128_suspend, .resume = mms128_resume, #ifdef TPD_HAVE_BUTTON .tpd_have_button = 1, #else .tpd_have_button = 0, #endif }; static int __init mms128_driver_init ( void ) { TPD_FUN (); i2c_register_board_info ( 1, &i2c_MMS128, 1 ); if ( tpd_driver_add ( &mms128_device_driver ) < 0 ) { TPD_ERR ( "melfas driver add failed\n" ); } return 0; } static void __exit mms128_driver_exit ( void ) { TPD_FUN (); tpd_driver_remove ( &mms128_device_driver ); } module_init ( mms128_driver_init ); module_exit ( mms128_driver_exit ); MODULE_AUTHOR ( "Kang Jun Mo" ); MODULE_DESCRIPTION ( "mms128 driver" ); MODULE_LICENSE ( "GPL" ); /* End Of File */
gpl-2.0
beattie/uclinux
drivers/media/video/m52790.c
578
5916
/* * m52790 i2c ivtv driver. * Copyright (C) 2007 Hans Verkuil * * A/V source switching Mitsubishi M52790SP/FP * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/types.h> #include <linux/ioctl.h> #include <asm/uaccess.h> #include <linux/i2c.h> #include <linux/i2c-id.h> #include <linux/videodev2.h> #include <media/m52790.h> #include <media/v4l2-device.h> #include <media/v4l2-chip-ident.h> #include <media/v4l2-i2c-drv.h> MODULE_DESCRIPTION("i2c device driver for m52790 A/V switch"); MODULE_AUTHOR("Hans Verkuil"); MODULE_LICENSE("GPL"); struct m52790_state { struct v4l2_subdev sd; u16 input; u16 output; }; static inline struct m52790_state *to_state(struct v4l2_subdev *sd) { return container_of(sd, struct m52790_state, sd); } /* ----------------------------------------------------------------------- */ static int m52790_write(struct v4l2_subdev *sd) { struct m52790_state *state = to_state(sd); struct i2c_client *client = v4l2_get_subdevdata(sd); u8 sw1 = (state->input | state->output) & 0xff; u8 sw2 = (state->input | state->output) >> 8; return i2c_smbus_write_byte_data(client, sw1, sw2); } /* Note: audio and video are linked and cannot be switched separately. So audio and video routing commands are identical for this chip. In theory the video amplifier and audio modes could be handled separately for the output, but that seems to be overkill right now. The same holds for implementing an audio mute control, this is now part of the audio output routing. The normal case is that another chip takes care of the actual muting so making it part of the output routing seems to be the right thing to do for now. */ static int m52790_s_routing(struct v4l2_subdev *sd, u32 input, u32 output, u32 config) { struct m52790_state *state = to_state(sd); state->input = input; state->output = output; m52790_write(sd); return 0; } #ifdef CONFIG_VIDEO_ADV_DEBUG static int m52790_g_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg) { struct m52790_state *state = to_state(sd); struct i2c_client *client = v4l2_get_subdevdata(sd); if (!v4l2_chip_match_i2c_client(client, &reg->match)) return -EINVAL; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (reg->reg != 0) return -EINVAL; reg->size = 1; reg->val = state->input | state->output; return 0; } static int m52790_s_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg) { struct m52790_state *state = to_state(sd); struct i2c_client *client = v4l2_get_subdevdata(sd); if (!v4l2_chip_match_i2c_client(client, &reg->match)) return -EINVAL; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (reg->reg != 0) return -EINVAL; state->input = reg->val & 0x0303; state->output = reg->val & ~0x0303; m52790_write(sd); return 0; } #endif static int m52790_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *chip) { struct i2c_client *client = v4l2_get_subdevdata(sd); return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_M52790, 0); } static int m52790_log_status(struct v4l2_subdev *sd) { struct m52790_state *state = to_state(sd); v4l2_info(sd, "Switch 1: %02x\n", (state->input | state->output) & 0xff); v4l2_info(sd, "Switch 2: %02x\n", (state->input | state->output) >> 8); return 0; } /* ----------------------------------------------------------------------- */ static const struct v4l2_subdev_core_ops m52790_core_ops = { .log_status = m52790_log_status, .g_chip_ident = m52790_g_chip_ident, #ifdef CONFIG_VIDEO_ADV_DEBUG .g_register = m52790_g_register, .s_register = m52790_s_register, #endif }; static const struct v4l2_subdev_audio_ops m52790_audio_ops = { .s_routing = m52790_s_routing, }; static const struct v4l2_subdev_video_ops m52790_video_ops = { .s_routing = m52790_s_routing, }; static const struct v4l2_subdev_ops m52790_ops = { .core = &m52790_core_ops, .audio = &m52790_audio_ops, .video = &m52790_video_ops, }; /* ----------------------------------------------------------------------- */ /* i2c implementation */ static int m52790_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct m52790_state *state; struct v4l2_subdev *sd; /* Check if the adapter supports the needed features */ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) return -EIO; v4l_info(client, "chip found @ 0x%x (%s)\n", client->addr << 1, client->adapter->name); state = kmalloc(sizeof(struct m52790_state), GFP_KERNEL); if (state == NULL) return -ENOMEM; sd = &state->sd; v4l2_i2c_subdev_init(sd, client, &m52790_ops); state->input = M52790_IN_TUNER; state->output = M52790_OUT_STEREO; m52790_write(sd); return 0; } static int m52790_remove(struct i2c_client *client) { struct v4l2_subdev *sd = i2c_get_clientdata(client); v4l2_device_unregister_subdev(sd); kfree(to_state(sd)); return 0; } /* ----------------------------------------------------------------------- */ static const struct i2c_device_id m52790_id[] = { { "m52790", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, m52790_id); static struct v4l2_i2c_driver_data v4l2_i2c_data = { .name = "m52790", .probe = m52790_probe, .remove = m52790_remove, .id_table = m52790_id, };
gpl-2.0
kraml/desire-sense-kernel
drivers/media/video/wm8739.c
578
8688
/* * wm8739 * * Copyright (C) 2005 T. Adachi <tadachi@tadachi-net.com> * * Copyright (C) 2005 Hans Verkuil <hverkuil@xs4all.nl> * - Cleanup * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/types.h> #include <linux/ioctl.h> #include <asm/uaccess.h> #include <linux/i2c.h> #include <linux/i2c-id.h> #include <linux/videodev2.h> #include <media/v4l2-device.h> #include <media/v4l2-chip-ident.h> #include <media/v4l2-i2c-drv.h> MODULE_DESCRIPTION("wm8739 driver"); MODULE_AUTHOR("T. Adachi, Hans Verkuil"); MODULE_LICENSE("GPL"); static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Debug level (0-1)"); /* ------------------------------------------------------------------------ */ enum { R0 = 0, R1, R5 = 5, R6, R7, R8, R9, R15 = 15, TOT_REGS }; struct wm8739_state { struct v4l2_subdev sd; u32 clock_freq; u8 muted; u16 volume; u16 balance; u8 vol_l; /* +12dB to -34.5dB 1.5dB step (5bit) def:0dB */ u8 vol_r; /* +12dB to -34.5dB 1.5dB step (5bit) def:0dB */ }; static inline struct wm8739_state *to_state(struct v4l2_subdev *sd) { return container_of(sd, struct wm8739_state, sd); } /* ------------------------------------------------------------------------ */ static int wm8739_write(struct v4l2_subdev *sd, int reg, u16 val) { struct i2c_client *client = v4l2_get_subdevdata(sd); int i; if (reg < 0 || reg >= TOT_REGS) { v4l2_err(sd, "Invalid register R%d\n", reg); return -1; } v4l2_dbg(1, debug, sd, "write: %02x %02x\n", reg, val); for (i = 0; i < 3; i++) if (i2c_smbus_write_byte_data(client, (reg << 1) | (val >> 8), val & 0xff) == 0) return 0; v4l2_err(sd, "I2C: cannot write %03x to register R%d\n", val, reg); return -1; } /* write regs to set audio volume etc */ static void wm8739_set_audio(struct v4l2_subdev *sd) { struct wm8739_state *state = to_state(sd); u16 mute = state->muted ? 0x80 : 0; /* Volume setting: bits 0-4, 0x1f = 12 dB, 0x00 = -34.5 dB * Default setting: 0x17 = 0 dB */ wm8739_write(sd, R0, (state->vol_l & 0x1f) | mute); wm8739_write(sd, R1, (state->vol_r & 0x1f) | mute); } static int wm8739_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl) { struct wm8739_state *state = to_state(sd); switch (ctrl->id) { case V4L2_CID_AUDIO_MUTE: ctrl->value = state->muted; break; case V4L2_CID_AUDIO_VOLUME: ctrl->value = state->volume; break; case V4L2_CID_AUDIO_BALANCE: ctrl->value = state->balance; break; default: return -EINVAL; } return 0; } static int wm8739_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl) { struct wm8739_state *state = to_state(sd); unsigned int work_l, work_r; switch (ctrl->id) { case V4L2_CID_AUDIO_MUTE: state->muted = ctrl->value; break; case V4L2_CID_AUDIO_VOLUME: state->volume = ctrl->value; break; case V4L2_CID_AUDIO_BALANCE: state->balance = ctrl->value; break; default: return -EINVAL; } /* normalize ( 65535 to 0 -> 31 to 0 (12dB to -34.5dB) ) */ work_l = (min(65536 - state->balance, 32768) * state->volume) / 32768; work_r = (min(state->balance, (u16)32768) * state->volume) / 32768; state->vol_l = (long)work_l * 31 / 65535; state->vol_r = (long)work_r * 31 / 65535; /* set audio volume etc. */ wm8739_set_audio(sd); return 0; } /* ------------------------------------------------------------------------ */ static struct v4l2_queryctrl wm8739_qctrl[] = { { .id = V4L2_CID_AUDIO_VOLUME, .name = "Volume", .minimum = 0, .maximum = 65535, .step = 65535/100, .default_value = 58880, .flags = 0, .type = V4L2_CTRL_TYPE_INTEGER, }, { .id = V4L2_CID_AUDIO_MUTE, .name = "Mute", .minimum = 0, .maximum = 1, .step = 1, .default_value = 1, .flags = 0, .type = V4L2_CTRL_TYPE_BOOLEAN, }, { .id = V4L2_CID_AUDIO_BALANCE, .name = "Balance", .minimum = 0, .maximum = 65535, .step = 65535/100, .default_value = 32768, .flags = 0, .type = V4L2_CTRL_TYPE_INTEGER, } }; /* ------------------------------------------------------------------------ */ static int wm8739_s_clock_freq(struct v4l2_subdev *sd, u32 audiofreq) { struct wm8739_state *state = to_state(sd); state->clock_freq = audiofreq; /* de-activate */ wm8739_write(sd, R9, 0x000); switch (audiofreq) { case 44100: /* 256fps, fs=44.1k */ wm8739_write(sd, R8, 0x020); break; case 48000: /* 256fps, fs=48k */ wm8739_write(sd, R8, 0x000); break; case 32000: /* 256fps, fs=32k */ wm8739_write(sd, R8, 0x018); break; default: break; } /* activate */ wm8739_write(sd, R9, 0x001); return 0; } static int wm8739_queryctrl(struct v4l2_subdev *sd, struct v4l2_queryctrl *qc) { int i; for (i = 0; i < ARRAY_SIZE(wm8739_qctrl); i++) if (qc->id && qc->id == wm8739_qctrl[i].id) { memcpy(qc, &wm8739_qctrl[i], sizeof(*qc)); return 0; } return -EINVAL; } static int wm8739_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *chip) { struct i2c_client *client = v4l2_get_subdevdata(sd); return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_WM8739, 0); } static int wm8739_log_status(struct v4l2_subdev *sd) { struct wm8739_state *state = to_state(sd); v4l2_info(sd, "Frequency: %u Hz\n", state->clock_freq); v4l2_info(sd, "Volume L: %02x%s\n", state->vol_l & 0x1f, state->muted ? " (muted)" : ""); v4l2_info(sd, "Volume R: %02x%s\n", state->vol_r & 0x1f, state->muted ? " (muted)" : ""); return 0; } /* ----------------------------------------------------------------------- */ static const struct v4l2_subdev_core_ops wm8739_core_ops = { .log_status = wm8739_log_status, .g_chip_ident = wm8739_g_chip_ident, .queryctrl = wm8739_queryctrl, .g_ctrl = wm8739_g_ctrl, .s_ctrl = wm8739_s_ctrl, }; static const struct v4l2_subdev_audio_ops wm8739_audio_ops = { .s_clock_freq = wm8739_s_clock_freq, }; static const struct v4l2_subdev_ops wm8739_ops = { .core = &wm8739_core_ops, .audio = &wm8739_audio_ops, }; /* ------------------------------------------------------------------------ */ /* i2c implementation */ static int wm8739_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct wm8739_state *state; struct v4l2_subdev *sd; /* Check if the adapter supports the needed features */ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) return -EIO; v4l_info(client, "chip found @ 0x%x (%s)\n", client->addr << 1, client->adapter->name); state = kmalloc(sizeof(struct wm8739_state), GFP_KERNEL); if (state == NULL) return -ENOMEM; sd = &state->sd; v4l2_i2c_subdev_init(sd, client, &wm8739_ops); state->vol_l = 0x17; /* 0dB */ state->vol_r = 0x17; /* 0dB */ state->muted = 0; state->balance = 32768; /* normalize (12dB(31) to -34.5dB(0) [0dB(23)] -> 65535 to 0) */ state->volume = ((long)state->vol_l + 1) * 65535 / 31; state->clock_freq = 48000; /* Initialize wm8739 */ /* reset */ wm8739_write(sd, R15, 0x00); /* filter setting, high path, offet clear */ wm8739_write(sd, R5, 0x000); /* ADC, OSC, Power Off mode Disable */ wm8739_write(sd, R6, 0x000); /* Digital Audio interface format: Enable Master mode, 24 bit, MSB first/left justified */ wm8739_write(sd, R7, 0x049); /* sampling control: normal, 256fs, 48KHz sampling rate */ wm8739_write(sd, R8, 0x000); /* activate */ wm8739_write(sd, R9, 0x001); /* set volume/mute */ wm8739_set_audio(sd); return 0; } static int wm8739_remove(struct i2c_client *client) { struct v4l2_subdev *sd = i2c_get_clientdata(client); v4l2_device_unregister_subdev(sd); kfree(to_state(sd)); return 0; } static const struct i2c_device_id wm8739_id[] = { { "wm8739", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, wm8739_id); static struct v4l2_i2c_driver_data v4l2_i2c_data = { .name = "wm8739", .probe = wm8739_probe, .remove = wm8739_remove, .id_table = wm8739_id, };
gpl-2.0
LGBean/arm_kernel_2.6.32
drivers/video/backlight/locomolcd.c
834
6342
/* * Backlight control code for Sharp Zaurus SL-5500 * * Copyright 2005 John Lenz <lenz@cs.wisc.edu> * Maintainer: Pavel Machek <pavel@suse.cz> (unless John wants to :-) * GPL v2 * * This driver assumes single CPU. That's okay, because collie is * slightly old hardware, and noone is going to retrofit second CPU to * old PDA. */ /* LCD power functions */ #include <linux/module.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/interrupt.h> #include <linux/fb.h> #include <linux/backlight.h> #include <asm/hardware/locomo.h> #include <asm/irq.h> #include <asm/mach/sharpsl_param.h> #include <asm/mach-types.h> #include "../../../arch/arm/mach-sa1100/generic.h" static struct backlight_device *locomolcd_bl_device; static struct locomo_dev *locomolcd_dev; static unsigned long locomolcd_flags; #define LOCOMOLCD_SUSPENDED 0x01 static void locomolcd_on(int comadj) { locomo_gpio_set_dir(locomolcd_dev->dev.parent, LOCOMO_GPIO_LCD_VSHA_ON, 0); locomo_gpio_write(locomolcd_dev->dev.parent, LOCOMO_GPIO_LCD_VSHA_ON, 1); mdelay(2); locomo_gpio_set_dir(locomolcd_dev->dev.parent, LOCOMO_GPIO_LCD_VSHD_ON, 0); locomo_gpio_write(locomolcd_dev->dev.parent, LOCOMO_GPIO_LCD_VSHD_ON, 1); mdelay(2); locomo_m62332_senddata(locomolcd_dev, comadj, 0); mdelay(5); locomo_gpio_set_dir(locomolcd_dev->dev.parent, LOCOMO_GPIO_LCD_VEE_ON, 0); locomo_gpio_write(locomolcd_dev->dev.parent, LOCOMO_GPIO_LCD_VEE_ON, 1); mdelay(10); /* TFTCRST | CPSOUT=0 | CPSEN */ locomo_writel(0x01, locomolcd_dev->mapbase + LOCOMO_TC); /* Set CPSD */ locomo_writel(6, locomolcd_dev->mapbase + LOCOMO_CPSD); /* TFTCRST | CPSOUT=0 | CPSEN */ locomo_writel((0x04 | 0x01), locomolcd_dev->mapbase + LOCOMO_TC); mdelay(10); locomo_gpio_set_dir(locomolcd_dev->dev.parent, LOCOMO_GPIO_LCD_MOD, 0); locomo_gpio_write(locomolcd_dev->dev.parent, LOCOMO_GPIO_LCD_MOD, 1); } static void locomolcd_off(int comadj) { /* TFTCRST=1 | CPSOUT=1 | CPSEN = 0 */ locomo_writel(0x06, locomolcd_dev->mapbase + LOCOMO_TC); mdelay(1); locomo_gpio_write(locomolcd_dev->dev.parent, LOCOMO_GPIO_LCD_VSHA_ON, 0); mdelay(110); locomo_gpio_write(locomolcd_dev->dev.parent, LOCOMO_GPIO_LCD_VEE_ON, 0); mdelay(700); /* TFTCRST=0 | CPSOUT=0 | CPSEN = 0 */ locomo_writel(0, locomolcd_dev->mapbase + LOCOMO_TC); locomo_gpio_write(locomolcd_dev->dev.parent, LOCOMO_GPIO_LCD_MOD, 0); locomo_gpio_write(locomolcd_dev->dev.parent, LOCOMO_GPIO_LCD_VSHD_ON, 0); } void locomolcd_power(int on) { int comadj = sharpsl_param.comadj; unsigned long flags; local_irq_save(flags); if (!locomolcd_dev) { local_irq_restore(flags); return; } /* read comadj */ if (comadj == -1 && machine_is_collie()) comadj = 128; if (comadj == -1 && machine_is_poodle()) comadj = 118; if (on) locomolcd_on(comadj); else locomolcd_off(comadj); local_irq_restore(flags); } EXPORT_SYMBOL(locomolcd_power); static int current_intensity; static int locomolcd_set_intensity(struct backlight_device *bd) { int intensity = bd->props.brightness; if (bd->props.power != FB_BLANK_UNBLANK) intensity = 0; if (bd->props.fb_blank != FB_BLANK_UNBLANK) intensity = 0; if (locomolcd_flags & LOCOMOLCD_SUSPENDED) intensity = 0; switch (intensity) { /* AC and non-AC are handled differently, but produce same results in sharp code? */ case 0: locomo_frontlight_set(locomolcd_dev, 0, 0, 161); break; case 1: locomo_frontlight_set(locomolcd_dev, 117, 0, 161); break; case 2: locomo_frontlight_set(locomolcd_dev, 163, 0, 148); break; case 3: locomo_frontlight_set(locomolcd_dev, 194, 0, 161); break; case 4: locomo_frontlight_set(locomolcd_dev, 194, 1, 161); break; default: return -ENODEV; } current_intensity = intensity; return 0; } static int locomolcd_get_intensity(struct backlight_device *bd) { return current_intensity; } static struct backlight_ops locomobl_data = { .get_brightness = locomolcd_get_intensity, .update_status = locomolcd_set_intensity, }; #ifdef CONFIG_PM static int locomolcd_suspend(struct locomo_dev *dev, pm_message_t state) { locomolcd_flags |= LOCOMOLCD_SUSPENDED; locomolcd_set_intensity(locomolcd_bl_device); return 0; } static int locomolcd_resume(struct locomo_dev *dev) { locomolcd_flags &= ~LOCOMOLCD_SUSPENDED; locomolcd_set_intensity(locomolcd_bl_device); return 0; } #else #define locomolcd_suspend NULL #define locomolcd_resume NULL #endif static int locomolcd_probe(struct locomo_dev *ldev) { unsigned long flags; local_irq_save(flags); locomolcd_dev = ldev; locomo_gpio_set_dir(ldev->dev.parent, LOCOMO_GPIO_FL_VR, 0); /* the poodle_lcd_power function is called for the first time * from fs_initcall, which is before locomo is activated. * We need to recall poodle_lcd_power here*/ if (machine_is_poodle()) locomolcd_power(1); local_irq_restore(flags); locomolcd_bl_device = backlight_device_register("locomo-bl", &ldev->dev, NULL, &locomobl_data); if (IS_ERR (locomolcd_bl_device)) return PTR_ERR (locomolcd_bl_device); /* Set up frontlight so that screen is readable */ locomolcd_bl_device->props.max_brightness = 4, locomolcd_bl_device->props.brightness = 2; locomolcd_set_intensity(locomolcd_bl_device); return 0; } static int locomolcd_remove(struct locomo_dev *dev) { unsigned long flags; locomolcd_bl_device->props.brightness = 0; locomolcd_bl_device->props.power = 0; locomolcd_set_intensity(locomolcd_bl_device); backlight_device_unregister(locomolcd_bl_device); local_irq_save(flags); locomolcd_dev = NULL; local_irq_restore(flags); return 0; } static struct locomo_driver poodle_lcd_driver = { .drv = { .name = "locomo-backlight", }, .devid = LOCOMO_DEVID_BACKLIGHT, .probe = locomolcd_probe, .remove = locomolcd_remove, .suspend = locomolcd_suspend, .resume = locomolcd_resume, }; static int __init locomolcd_init(void) { int ret = locomo_driver_register(&poodle_lcd_driver); if (ret) return ret; #ifdef CONFIG_SA1100_COLLIE sa1100fb_lcd_power = locomolcd_power; #endif return 0; } static void __exit locomolcd_exit(void) { locomo_driver_unregister(&poodle_lcd_driver); } module_init(locomolcd_init); module_exit(locomolcd_exit); MODULE_AUTHOR("John Lenz <lenz@cs.wisc.edu>, Pavel Machek <pavel@suse.cz>"); MODULE_DESCRIPTION("Collie LCD driver"); MODULE_LICENSE("GPL");
gpl-2.0
lx324310/linux
drivers/parisc/power.c
1602
7425
/* * linux/drivers/parisc/power.c * HP PARISC soft power switch support driver * * Copyright (c) 2001-2007 Helge Deller <deller@gmx.de> * All rights reserved. * * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL"). * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * * * HINT: * Support of the soft power switch button may be enabled or disabled at * runtime through the "/proc/sys/kernel/power" procfs entry. */ #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/notifier.h> #include <linux/reboot.h> #include <linux/sched.h> #include <linux/kthread.h> #include <linux/pm.h> #include <asm/pdc.h> #include <asm/io.h> #include <asm/led.h> #define DRIVER_NAME "powersw" #define KTHREAD_NAME "kpowerswd" /* how often should the power button be polled ? */ #define POWERSWITCH_POLL_PER_SEC 2 /* how long does the power button needs to be down until we react ? */ #define POWERSWITCH_DOWN_SEC 2 /* assembly code to access special registers */ /* taken from PCXL ERS page 82 */ #define DIAG_CODE(code) (0x14000000 + ((code)<<5)) #define MFCPU_X(rDiagReg, t_ch, t_th, code) \ (DIAG_CODE(code) + ((rDiagReg)<<21) + ((t_ch)<<16) + ((t_th)<<0) ) #define MTCPU(dr, gr) MFCPU_X(dr, gr, 0, 0x12) /* move value of gr to dr[dr] */ #define MFCPU_C(dr, gr) MFCPU_X(dr, gr, 0, 0x30) /* for dr0 and dr8 only ! */ #define MFCPU_T(dr, gr) MFCPU_X(dr, 0, gr, 0xa0) /* all dr except dr0 and dr8 */ #define __getDIAG(dr) ( { \ register unsigned long __res asm("r28");\ __asm__ __volatile__ ( \ ".word %1" : "=&r" (__res) : "i" (MFCPU_T(dr,28) ) \ ); \ __res; \ } ) /* local shutdown counter */ static int shutdown_timer __read_mostly; /* check, give feedback and start shutdown after one second */ static void process_shutdown(void) { if (shutdown_timer == 0) printk(KERN_ALERT KTHREAD_NAME ": Shutdown requested...\n"); shutdown_timer++; /* wait until the button was pressed for 1 second */ if (shutdown_timer == (POWERSWITCH_DOWN_SEC*POWERSWITCH_POLL_PER_SEC)) { static const char msg[] = "Shutting down..."; printk(KERN_INFO KTHREAD_NAME ": %s\n", msg); lcd_print(msg); /* send kill signal */ if (kill_cad_pid(SIGINT, 1)) { /* just in case killing init process failed */ if (pm_power_off) pm_power_off(); } } } /* main power switch task struct */ static struct task_struct *power_task; /* filename in /proc which can be used to enable/disable the power switch */ #define SYSCTL_FILENAME "sys/kernel/power" /* soft power switch enabled/disabled */ int pwrsw_enabled __read_mostly = 1; /* main kernel thread worker. It polls the button state */ static int kpowerswd(void *param) { __set_current_state(TASK_RUNNING); do { int button_not_pressed; unsigned long soft_power_reg = (unsigned long) param; schedule_timeout_interruptible(pwrsw_enabled ? HZ : HZ/POWERSWITCH_POLL_PER_SEC); if (unlikely(!pwrsw_enabled)) continue; if (soft_power_reg) { /* * Non-Gecko-style machines: * Check the power switch status which is read from the * real I/O location at soft_power_reg. * Bit 31 ("the lowest bit) is the status of the power switch. * This bit is "1" if the button is NOT pressed. */ button_not_pressed = (gsc_readl(soft_power_reg) & 0x1); } else { /* * On gecko style machines (e.g. 712/xx and 715/xx) * the power switch status is stored in Bit 0 ("the highest bit") * of CPU diagnose register 25. * Warning: Some machines never reset the DIAG flag, even if * the button has been released again. */ button_not_pressed = (__getDIAG(25) & 0x80000000); } if (likely(button_not_pressed)) { if (unlikely(shutdown_timer && /* avoid writing if not necessary */ shutdown_timer < (POWERSWITCH_DOWN_SEC*POWERSWITCH_POLL_PER_SEC))) { shutdown_timer = 0; printk(KERN_INFO KTHREAD_NAME ": Shutdown request aborted.\n"); } } else process_shutdown(); } while (!kthread_should_stop()); return 0; } /* * powerfail interruption handler (irq IRQ_FROM_REGION(CPU_IRQ_REGION)+2) */ #if 0 static void powerfail_interrupt(int code, void *x) { printk(KERN_CRIT "POWERFAIL INTERRUPTION !\n"); poweroff(); } #endif /* parisc_panic_event() is called by the panic handler. * As soon as a panic occurs, our tasklets above will not be * executed any longer. This function then re-enables the * soft-power switch and allows the user to switch off the system */ static int parisc_panic_event(struct notifier_block *this, unsigned long event, void *ptr) { /* re-enable the soft-power switch */ pdc_soft_power_button(0); return NOTIFY_DONE; } static struct notifier_block parisc_panic_block = { .notifier_call = parisc_panic_event, .priority = INT_MAX, }; static int __init power_init(void) { unsigned long ret; unsigned long soft_power_reg; #if 0 request_irq( IRQ_FROM_REGION(CPU_IRQ_REGION)+2, &powerfail_interrupt, 0, "powerfail", NULL); #endif /* enable the soft power switch if possible */ ret = pdc_soft_power_info(&soft_power_reg); if (ret == PDC_OK) ret = pdc_soft_power_button(1); if (ret != PDC_OK) soft_power_reg = -1UL; switch (soft_power_reg) { case 0: printk(KERN_INFO DRIVER_NAME ": Gecko-style soft power switch enabled.\n"); break; case -1UL: printk(KERN_INFO DRIVER_NAME ": Soft power switch support not available.\n"); return -ENODEV; default: printk(KERN_INFO DRIVER_NAME ": Soft power switch at 0x%08lx enabled.\n", soft_power_reg); } power_task = kthread_run(kpowerswd, (void*)soft_power_reg, KTHREAD_NAME); if (IS_ERR(power_task)) { printk(KERN_ERR DRIVER_NAME ": thread creation failed. Driver not loaded.\n"); pdc_soft_power_button(0); return -EIO; } /* Register a call for panic conditions. */ atomic_notifier_chain_register(&panic_notifier_list, &parisc_panic_block); return 0; } static void __exit power_exit(void) { kthread_stop(power_task); atomic_notifier_chain_unregister(&panic_notifier_list, &parisc_panic_block); pdc_soft_power_button(0); } arch_initcall(power_init); module_exit(power_exit); MODULE_AUTHOR("Helge Deller <deller@gmx.de>"); MODULE_DESCRIPTION("Soft power switch driver"); MODULE_LICENSE("Dual BSD/GPL");
gpl-2.0
TeamWin/android_kernel_samsung_noblelte
drivers/gpu/drm/drm_agpsupport.c
2370
12408
/** * \file drm_agpsupport.c * DRM support for AGP/GART backend * * \author Rickard E. (Rik) Faith <faith@valinux.com> * \author Gareth Hughes <gareth@valinux.com> */ /* * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ #include <drm/drmP.h> #include <linux/module.h> #include <linux/slab.h> #if __OS_HAS_AGP #include <asm/agp.h> /** * Get AGP information. * * \param inode device inode. * \param file_priv DRM file private. * \param cmd command. * \param arg pointer to a (output) drm_agp_info structure. * \return zero on success or a negative number on failure. * * Verifies the AGP device has been initialized and acquired and fills in the * drm_agp_info structure with the information in drm_agp_head::agp_info. */ int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info) { DRM_AGP_KERN *kern; if (!dev->agp || !dev->agp->acquired) return -EINVAL; kern = &dev->agp->agp_info; info->agp_version_major = kern->version.major; info->agp_version_minor = kern->version.minor; info->mode = kern->mode; info->aperture_base = kern->aper_base; info->aperture_size = kern->aper_size * 1024 * 1024; info->memory_allowed = kern->max_memory << PAGE_SHIFT; info->memory_used = kern->current_memory << PAGE_SHIFT; info->id_vendor = kern->device->vendor; info->id_device = kern->device->device; return 0; } EXPORT_SYMBOL(drm_agp_info); int drm_agp_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_agp_info *info = data; int err; err = drm_agp_info(dev, info); if (err) return err; return 0; } /** * Acquire the AGP device. * * \param dev DRM device that is to acquire AGP. * \return zero on success or a negative number on failure. * * Verifies the AGP device hasn't been acquired before and calls * \c agp_backend_acquire. */ int drm_agp_acquire(struct drm_device * dev) { if (!dev->agp) return -ENODEV; if (dev->agp->acquired) return -EBUSY; if (!(dev->agp->bridge = agp_backend_acquire(dev->pdev))) return -ENODEV; dev->agp->acquired = 1; return 0; } EXPORT_SYMBOL(drm_agp_acquire); /** * Acquire the AGP device (ioctl). * * \param inode device inode. * \param file_priv DRM file private. * \param cmd command. * \param arg user argument. * \return zero on success or a negative number on failure. * * Verifies the AGP device hasn't been acquired before and calls * \c agp_backend_acquire. */ int drm_agp_acquire_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { return drm_agp_acquire((struct drm_device *) file_priv->minor->dev); } /** * Release the AGP device. * * \param dev DRM device that is to release AGP. * \return zero on success or a negative number on failure. * * Verifies the AGP device has been acquired and calls \c agp_backend_release. */ int drm_agp_release(struct drm_device * dev) { if (!dev->agp || !dev->agp->acquired) return -EINVAL; agp_backend_release(dev->agp->bridge); dev->agp->acquired = 0; return 0; } EXPORT_SYMBOL(drm_agp_release); int drm_agp_release_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { return drm_agp_release(dev); } /** * Enable the AGP bus. * * \param dev DRM device that has previously acquired AGP. * \param mode Requested AGP mode. * \return zero on success or a negative number on failure. * * Verifies the AGP device has been acquired but not enabled, and calls * \c agp_enable. */ int drm_agp_enable(struct drm_device * dev, struct drm_agp_mode mode) { if (!dev->agp || !dev->agp->acquired) return -EINVAL; dev->agp->mode = mode.mode; agp_enable(dev->agp->bridge, mode.mode); dev->agp->enabled = 1; return 0; } EXPORT_SYMBOL(drm_agp_enable); int drm_agp_enable_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_agp_mode *mode = data; return drm_agp_enable(dev, *mode); } /** * Allocate AGP memory. * * \param inode device inode. * \param file_priv file private pointer. * \param cmd command. * \param arg pointer to a drm_agp_buffer structure. * \return zero on success or a negative number on failure. * * Verifies the AGP device is present and has been acquired, allocates the * memory via agp_allocate_memory() and creates a drm_agp_mem entry for it. */ int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request) { struct drm_agp_mem *entry; DRM_AGP_MEM *memory; unsigned long pages; u32 type; if (!dev->agp || !dev->agp->acquired) return -EINVAL; if (!(entry = kmalloc(sizeof(*entry), GFP_KERNEL))) return -ENOMEM; memset(entry, 0, sizeof(*entry)); pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE; type = (u32) request->type; if (!(memory = agp_allocate_memory(dev->agp->bridge, pages, type))) { kfree(entry); return -ENOMEM; } entry->handle = (unsigned long)memory->key + 1; entry->memory = memory; entry->bound = 0; entry->pages = pages; list_add(&entry->head, &dev->agp->memory); request->handle = entry->handle; request->physical = memory->physical; return 0; } EXPORT_SYMBOL(drm_agp_alloc); int drm_agp_alloc_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_agp_buffer *request = data; return drm_agp_alloc(dev, request); } /** * Search for the AGP memory entry associated with a handle. * * \param dev DRM device structure. * \param handle AGP memory handle. * \return pointer to the drm_agp_mem structure associated with \p handle. * * Walks through drm_agp_head::memory until finding a matching handle. */ static struct drm_agp_mem *drm_agp_lookup_entry(struct drm_device * dev, unsigned long handle) { struct drm_agp_mem *entry; list_for_each_entry(entry, &dev->agp->memory, head) { if (entry->handle == handle) return entry; } return NULL; } /** * Unbind AGP memory from the GATT (ioctl). * * \param inode device inode. * \param file_priv DRM file private. * \param cmd command. * \param arg pointer to a drm_agp_binding structure. * \return zero on success or a negative number on failure. * * Verifies the AGP device is present and acquired, looks-up the AGP memory * entry and passes it to the unbind_agp() function. */ int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request) { struct drm_agp_mem *entry; int ret; if (!dev->agp || !dev->agp->acquired) return -EINVAL; if (!(entry = drm_agp_lookup_entry(dev, request->handle))) return -EINVAL; if (!entry->bound) return -EINVAL; ret = drm_unbind_agp(entry->memory); if (ret == 0) entry->bound = 0; return ret; } EXPORT_SYMBOL(drm_agp_unbind); int drm_agp_unbind_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_agp_binding *request = data; return drm_agp_unbind(dev, request); } /** * Bind AGP memory into the GATT (ioctl) * * \param inode device inode. * \param file_priv DRM file private. * \param cmd command. * \param arg pointer to a drm_agp_binding structure. * \return zero on success or a negative number on failure. * * Verifies the AGP device is present and has been acquired and that no memory * is currently bound into the GATT. Looks-up the AGP memory entry and passes * it to bind_agp() function. */ int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request) { struct drm_agp_mem *entry; int retcode; int page; if (!dev->agp || !dev->agp->acquired) return -EINVAL; if (!(entry = drm_agp_lookup_entry(dev, request->handle))) return -EINVAL; if (entry->bound) return -EINVAL; page = (request->offset + PAGE_SIZE - 1) / PAGE_SIZE; if ((retcode = drm_bind_agp(entry->memory, page))) return retcode; entry->bound = dev->agp->base + (page << PAGE_SHIFT); DRM_DEBUG("base = 0x%lx entry->bound = 0x%lx\n", dev->agp->base, entry->bound); return 0; } EXPORT_SYMBOL(drm_agp_bind); int drm_agp_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_agp_binding *request = data; return drm_agp_bind(dev, request); } /** * Free AGP memory (ioctl). * * \param inode device inode. * \param file_priv DRM file private. * \param cmd command. * \param arg pointer to a drm_agp_buffer structure. * \return zero on success or a negative number on failure. * * Verifies the AGP device is present and has been acquired and looks up the * AGP memory entry. If the memory it's currently bound, unbind it via * unbind_agp(). Frees it via free_agp() as well as the entry itself * and unlinks from the doubly linked list it's inserted in. */ int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request) { struct drm_agp_mem *entry; if (!dev->agp || !dev->agp->acquired) return -EINVAL; if (!(entry = drm_agp_lookup_entry(dev, request->handle))) return -EINVAL; if (entry->bound) drm_unbind_agp(entry->memory); list_del(&entry->head); drm_free_agp(entry->memory, entry->pages); kfree(entry); return 0; } EXPORT_SYMBOL(drm_agp_free); int drm_agp_free_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_agp_buffer *request = data; return drm_agp_free(dev, request); } /** * Initialize the AGP resources. * * \return pointer to a drm_agp_head structure. * * Gets the drm_agp_t structure which is made available by the agpgart module * via the inter_module_* functions. Creates and initializes a drm_agp_head * structure. */ struct drm_agp_head *drm_agp_init(struct drm_device *dev) { struct drm_agp_head *head = NULL; if (!(head = kmalloc(sizeof(*head), GFP_KERNEL))) return NULL; memset((void *)head, 0, sizeof(*head)); head->bridge = agp_find_bridge(dev->pdev); if (!head->bridge) { if (!(head->bridge = agp_backend_acquire(dev->pdev))) { kfree(head); return NULL; } agp_copy_info(head->bridge, &head->agp_info); agp_backend_release(head->bridge); } else { agp_copy_info(head->bridge, &head->agp_info); } if (head->agp_info.chipset == NOT_SUPPORTED) { kfree(head); return NULL; } INIT_LIST_HEAD(&head->memory); head->cant_use_aperture = head->agp_info.cant_use_aperture; head->page_mask = head->agp_info.page_mask; head->base = head->agp_info.aper_base; return head; } /** * Binds a collection of pages into AGP memory at the given offset, returning * the AGP memory structure containing them. * * No reference is held on the pages during this time -- it is up to the * caller to handle that. */ DRM_AGP_MEM * drm_agp_bind_pages(struct drm_device *dev, struct page **pages, unsigned long num_pages, uint32_t gtt_offset, u32 type) { DRM_AGP_MEM *mem; int ret, i; DRM_DEBUG("\n"); mem = agp_allocate_memory(dev->agp->bridge, num_pages, type); if (mem == NULL) { DRM_ERROR("Failed to allocate memory for %ld pages\n", num_pages); return NULL; } for (i = 0; i < num_pages; i++) mem->pages[i] = pages[i]; mem->page_count = num_pages; mem->is_flushed = true; ret = agp_bind_memory(mem, gtt_offset / PAGE_SIZE); if (ret != 0) { DRM_ERROR("Failed to bind AGP memory: %d\n", ret); agp_free_memory(mem); return NULL; } return mem; } EXPORT_SYMBOL(drm_agp_bind_pages); #endif /* __OS_HAS_AGP */
gpl-2.0
eagleeyetom/android_kernel_mediatek
arch/arm/mach-tegra/pcie.c
4674
24401
/* * arch/arm/mach-tegra/pci.c * * PCIe host controller driver for TEGRA(2) SOCs * * Copyright (c) 2010, CompuLab, Ltd. * Author: Mike Rapoport <mike@compulab.co.il> * * Based on NVIDIA PCIe driver * Copyright (c) 2008-2009, NVIDIA Corporation. * * Bits taken from arch/arm/mach-dove/pcie.c * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include <linux/kernel.h> #include <linux/pci.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/export.h> #include <asm/sizes.h> #include <asm/mach/pci.h> #include <mach/iomap.h> #include <mach/clk.h> #include <mach/powergate.h> #include "board.h" /* register definitions */ #define AFI_OFFSET 0x3800 #define PADS_OFFSET 0x3000 #define RP0_OFFSET 0x0000 #define RP1_OFFSET 0x1000 #define AFI_AXI_BAR0_SZ 0x00 #define AFI_AXI_BAR1_SZ 0x04 #define AFI_AXI_BAR2_SZ 0x08 #define AFI_AXI_BAR3_SZ 0x0c #define AFI_AXI_BAR4_SZ 0x10 #define AFI_AXI_BAR5_SZ 0x14 #define AFI_AXI_BAR0_START 0x18 #define AFI_AXI_BAR1_START 0x1c #define AFI_AXI_BAR2_START 0x20 #define AFI_AXI_BAR3_START 0x24 #define AFI_AXI_BAR4_START 0x28 #define AFI_AXI_BAR5_START 0x2c #define AFI_FPCI_BAR0 0x30 #define AFI_FPCI_BAR1 0x34 #define AFI_FPCI_BAR2 0x38 #define AFI_FPCI_BAR3 0x3c #define AFI_FPCI_BAR4 0x40 #define AFI_FPCI_BAR5 0x44 #define AFI_CACHE_BAR0_SZ 0x48 #define AFI_CACHE_BAR0_ST 0x4c #define AFI_CACHE_BAR1_SZ 0x50 #define AFI_CACHE_BAR1_ST 0x54 #define AFI_MSI_BAR_SZ 0x60 #define AFI_MSI_FPCI_BAR_ST 0x64 #define AFI_MSI_AXI_BAR_ST 0x68 #define AFI_CONFIGURATION 0xac #define AFI_CONFIGURATION_EN_FPCI (1 << 0) #define AFI_FPCI_ERROR_MASKS 0xb0 #define AFI_INTR_MASK 0xb4 #define AFI_INTR_MASK_INT_MASK (1 << 0) #define AFI_INTR_MASK_MSI_MASK (1 << 8) #define AFI_INTR_CODE 0xb8 #define AFI_INTR_CODE_MASK 0xf #define AFI_INTR_MASTER_ABORT 4 #define AFI_INTR_LEGACY 6 #define AFI_INTR_SIGNATURE 0xbc #define AFI_SM_INTR_ENABLE 0xc4 #define AFI_AFI_INTR_ENABLE 0xc8 #define AFI_INTR_EN_INI_SLVERR (1 << 0) #define AFI_INTR_EN_INI_DECERR (1 << 1) #define AFI_INTR_EN_TGT_SLVERR (1 << 2) #define AFI_INTR_EN_TGT_DECERR (1 << 3) #define AFI_INTR_EN_TGT_WRERR (1 << 4) #define AFI_INTR_EN_DFPCI_DECERR (1 << 5) #define AFI_INTR_EN_AXI_DECERR (1 << 6) #define AFI_INTR_EN_FPCI_TIMEOUT (1 << 7) #define AFI_PCIE_CONFIG 0x0f8 #define AFI_PCIE_CONFIG_PCIEC0_DISABLE_DEVICE (1 << 1) #define AFI_PCIE_CONFIG_PCIEC1_DISABLE_DEVICE (1 << 2) #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK (0xf << 20) #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE (0x0 << 20) #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL (0x1 << 20) #define AFI_FUSE 0x104 #define AFI_FUSE_PCIE_T0_GEN2_DIS (1 << 2) #define AFI_PEX0_CTRL 0x110 #define AFI_PEX1_CTRL 0x118 #define AFI_PEX_CTRL_RST (1 << 0) #define AFI_PEX_CTRL_REFCLK_EN (1 << 3) #define RP_VEND_XP 0x00000F00 #define RP_VEND_XP_DL_UP (1 << 30) #define RP_LINK_CONTROL_STATUS 0x00000090 #define RP_LINK_CONTROL_STATUS_LINKSTAT_MASK 0x3fff0000 #define PADS_CTL_SEL 0x0000009C #define PADS_CTL 0x000000A0 #define PADS_CTL_IDDQ_1L (1 << 0) #define PADS_CTL_TX_DATA_EN_1L (1 << 6) #define PADS_CTL_RX_DATA_EN_1L (1 << 10) #define PADS_PLL_CTL 0x000000B8 #define PADS_PLL_CTL_RST_B4SM (1 << 1) #define PADS_PLL_CTL_LOCKDET (1 << 8) #define PADS_PLL_CTL_REFCLK_MASK (0x3 << 16) #define PADS_PLL_CTL_REFCLK_INTERNAL_CML (0 << 16) #define PADS_PLL_CTL_REFCLK_INTERNAL_CMOS (1 << 16) #define PADS_PLL_CTL_REFCLK_EXTERNAL (2 << 16) #define PADS_PLL_CTL_TXCLKREF_MASK (0x1 << 20) #define PADS_PLL_CTL_TXCLKREF_DIV10 (0 << 20) #define PADS_PLL_CTL_TXCLKREF_DIV5 (1 << 20) /* PMC access is required for PCIE xclk (un)clamping */ #define PMC_SCRATCH42 0x144 #define PMC_SCRATCH42_PCX_CLAMP (1 << 0) static void __iomem *reg_pmc_base = IO_ADDRESS(TEGRA_PMC_BASE); #define pmc_writel(value, reg) \ __raw_writel(value, reg_pmc_base + (reg)) #define pmc_readl(reg) \ __raw_readl(reg_pmc_base + (reg)) /* * Tegra2 defines 1GB in the AXI address map for PCIe. * * That address space is split into different regions, with sizes and * offsets as follows: * * 0x80000000 - 0x80003fff - PCI controller registers * 0x80004000 - 0x80103fff - PCI configuration space * 0x80104000 - 0x80203fff - PCI extended configuration space * 0x80203fff - 0x803fffff - unused * 0x80400000 - 0x8040ffff - downstream IO * 0x80410000 - 0x8fffffff - unused * 0x90000000 - 0x9fffffff - non-prefetchable memory * 0xa0000000 - 0xbfffffff - prefetchable memory */ #define TEGRA_PCIE_BASE 0x80000000 #define PCIE_REGS_SZ SZ_16K #define PCIE_CFG_OFF PCIE_REGS_SZ #define PCIE_CFG_SZ SZ_1M #define PCIE_EXT_CFG_OFF (PCIE_CFG_SZ + PCIE_CFG_OFF) #define PCIE_EXT_CFG_SZ SZ_1M #define PCIE_IOMAP_SZ (PCIE_REGS_SZ + PCIE_CFG_SZ + PCIE_EXT_CFG_SZ) #define MMIO_BASE (TEGRA_PCIE_BASE + SZ_4M) #define MMIO_SIZE SZ_64K #define MEM_BASE_0 (TEGRA_PCIE_BASE + SZ_256M) #define MEM_SIZE_0 SZ_128M #define MEM_BASE_1 (MEM_BASE_0 + MEM_SIZE_0) #define MEM_SIZE_1 SZ_128M #define PREFETCH_MEM_BASE_0 (MEM_BASE_1 + MEM_SIZE_1) #define PREFETCH_MEM_SIZE_0 SZ_128M #define PREFETCH_MEM_BASE_1 (PREFETCH_MEM_BASE_0 + PREFETCH_MEM_SIZE_0) #define PREFETCH_MEM_SIZE_1 SZ_128M #define PCIE_CONF_BUS(b) ((b) << 16) #define PCIE_CONF_DEV(d) ((d) << 11) #define PCIE_CONF_FUNC(f) ((f) << 8) #define PCIE_CONF_REG(r) \ (((r) & ~0x3) | (((r) < 256) ? PCIE_CFG_OFF : PCIE_EXT_CFG_OFF)) struct tegra_pcie_port { int index; u8 root_bus_nr; void __iomem *base; bool link_up; char io_space_name[16]; char mem_space_name[16]; char prefetch_space_name[20]; struct resource res[3]; }; struct tegra_pcie_info { struct tegra_pcie_port port[2]; int num_ports; void __iomem *regs; struct resource res_mmio; struct clk *pex_clk; struct clk *afi_clk; struct clk *pcie_xclk; struct clk *pll_e; }; static struct tegra_pcie_info tegra_pcie = { .res_mmio = { .name = "PCI IO", .start = MMIO_BASE, .end = MMIO_BASE + MMIO_SIZE - 1, .flags = IORESOURCE_MEM, }, }; void __iomem *tegra_pcie_io_base; EXPORT_SYMBOL(tegra_pcie_io_base); static inline void afi_writel(u32 value, unsigned long offset) { writel(value, offset + AFI_OFFSET + tegra_pcie.regs); } static inline u32 afi_readl(unsigned long offset) { return readl(offset + AFI_OFFSET + tegra_pcie.regs); } static inline void pads_writel(u32 value, unsigned long offset) { writel(value, offset + PADS_OFFSET + tegra_pcie.regs); } static inline u32 pads_readl(unsigned long offset) { return readl(offset + PADS_OFFSET + tegra_pcie.regs); } static struct tegra_pcie_port *bus_to_port(int bus) { int i; for (i = tegra_pcie.num_ports - 1; i >= 0; i--) { int rbus = tegra_pcie.port[i].root_bus_nr; if (rbus != -1 && rbus == bus) break; } return i >= 0 ? tegra_pcie.port + i : NULL; } static int tegra_pcie_read_conf(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) { struct tegra_pcie_port *pp = bus_to_port(bus->number); void __iomem *addr; if (pp) { if (devfn != 0) { *val = 0xffffffff; return PCIBIOS_DEVICE_NOT_FOUND; } addr = pp->base + (where & ~0x3); } else { addr = tegra_pcie.regs + (PCIE_CONF_BUS(bus->number) + PCIE_CONF_DEV(PCI_SLOT(devfn)) + PCIE_CONF_FUNC(PCI_FUNC(devfn)) + PCIE_CONF_REG(where)); } *val = readl(addr); if (size == 1) *val = (*val >> (8 * (where & 3))) & 0xff; else if (size == 2) *val = (*val >> (8 * (where & 3))) & 0xffff; return PCIBIOS_SUCCESSFUL; } static int tegra_pcie_write_conf(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val) { struct tegra_pcie_port *pp = bus_to_port(bus->number); void __iomem *addr; u32 mask; u32 tmp; if (pp) { if (devfn != 0) return PCIBIOS_DEVICE_NOT_FOUND; addr = pp->base + (where & ~0x3); } else { addr = tegra_pcie.regs + (PCIE_CONF_BUS(bus->number) + PCIE_CONF_DEV(PCI_SLOT(devfn)) + PCIE_CONF_FUNC(PCI_FUNC(devfn)) + PCIE_CONF_REG(where)); } if (size == 4) { writel(val, addr); return PCIBIOS_SUCCESSFUL; } if (size == 2) mask = ~(0xffff << ((where & 0x3) * 8)); else if (size == 1) mask = ~(0xff << ((where & 0x3) * 8)); else return PCIBIOS_BAD_REGISTER_NUMBER; tmp = readl(addr) & mask; tmp |= val << ((where & 0x3) * 8); writel(tmp, addr); return PCIBIOS_SUCCESSFUL; } static struct pci_ops tegra_pcie_ops = { .read = tegra_pcie_read_conf, .write = tegra_pcie_write_conf, }; static void __devinit tegra_pcie_fixup_bridge(struct pci_dev *dev) { u16 reg; if ((dev->class >> 16) == PCI_BASE_CLASS_BRIDGE) { pci_read_config_word(dev, PCI_COMMAND, &reg); reg |= (PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_SERR); pci_write_config_word(dev, PCI_COMMAND, reg); } } DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_fixup_bridge); /* Tegra PCIE root complex wrongly reports device class */ static void __devinit tegra_pcie_fixup_class(struct pci_dev *dev) { dev->class = PCI_CLASS_BRIDGE_PCI << 8; } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_fixup_class); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_fixup_class); /* Tegra PCIE requires relaxed ordering */ static void __devinit tegra_pcie_relax_enable(struct pci_dev *dev) { u16 val16; int pos = pci_find_capability(dev, PCI_CAP_ID_EXP); if (pos <= 0) { dev_err(&dev->dev, "skipping relaxed ordering fixup\n"); return; } pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &val16); val16 |= PCI_EXP_DEVCTL_RELAX_EN; pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, val16); } DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_relax_enable); static int tegra_pcie_setup(int nr, struct pci_sys_data *sys) { struct tegra_pcie_port *pp; if (nr >= tegra_pcie.num_ports) return 0; pp = tegra_pcie.port + nr; pp->root_bus_nr = sys->busnr; /* * IORESOURCE_IO */ snprintf(pp->io_space_name, sizeof(pp->io_space_name), "PCIe %d I/O", pp->index); pp->io_space_name[sizeof(pp->io_space_name) - 1] = 0; pp->res[0].name = pp->io_space_name; if (pp->index == 0) { pp->res[0].start = PCIBIOS_MIN_IO; pp->res[0].end = pp->res[0].start + SZ_32K - 1; } else { pp->res[0].start = PCIBIOS_MIN_IO + SZ_32K; pp->res[0].end = IO_SPACE_LIMIT; } pp->res[0].flags = IORESOURCE_IO; if (request_resource(&ioport_resource, &pp->res[0])) panic("Request PCIe IO resource failed\n"); pci_add_resource_offset(&sys->resources, &pp->res[0], sys->io_offset); /* * IORESOURCE_MEM */ snprintf(pp->mem_space_name, sizeof(pp->mem_space_name), "PCIe %d MEM", pp->index); pp->mem_space_name[sizeof(pp->mem_space_name) - 1] = 0; pp->res[1].name = pp->mem_space_name; if (pp->index == 0) { pp->res[1].start = MEM_BASE_0; pp->res[1].end = pp->res[1].start + MEM_SIZE_0 - 1; } else { pp->res[1].start = MEM_BASE_1; pp->res[1].end = pp->res[1].start + MEM_SIZE_1 - 1; } pp->res[1].flags = IORESOURCE_MEM; if (request_resource(&iomem_resource, &pp->res[1])) panic("Request PCIe Memory resource failed\n"); pci_add_resource_offset(&sys->resources, &pp->res[1], sys->mem_offset); /* * IORESOURCE_MEM | IORESOURCE_PREFETCH */ snprintf(pp->prefetch_space_name, sizeof(pp->prefetch_space_name), "PCIe %d PREFETCH MEM", pp->index); pp->prefetch_space_name[sizeof(pp->prefetch_space_name) - 1] = 0; pp->res[2].name = pp->prefetch_space_name; if (pp->index == 0) { pp->res[2].start = PREFETCH_MEM_BASE_0; pp->res[2].end = pp->res[2].start + PREFETCH_MEM_SIZE_0 - 1; } else { pp->res[2].start = PREFETCH_MEM_BASE_1; pp->res[2].end = pp->res[2].start + PREFETCH_MEM_SIZE_1 - 1; } pp->res[2].flags = IORESOURCE_MEM | IORESOURCE_PREFETCH; if (request_resource(&iomem_resource, &pp->res[2])) panic("Request PCIe Prefetch Memory resource failed\n"); pci_add_resource_offset(&sys->resources, &pp->res[2], sys->mem_offset); return 1; } static int tegra_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { return INT_PCIE_INTR; } static struct pci_bus __init *tegra_pcie_scan_bus(int nr, struct pci_sys_data *sys) { struct tegra_pcie_port *pp; if (nr >= tegra_pcie.num_ports) return NULL; pp = tegra_pcie.port + nr; pp->root_bus_nr = sys->busnr; return pci_scan_root_bus(NULL, sys->busnr, &tegra_pcie_ops, sys, &sys->resources); } static struct hw_pci tegra_pcie_hw __initdata = { .nr_controllers = 2, .setup = tegra_pcie_setup, .scan = tegra_pcie_scan_bus, .swizzle = pci_std_swizzle, .map_irq = tegra_pcie_map_irq, }; static irqreturn_t tegra_pcie_isr(int irq, void *arg) { const char *err_msg[] = { "Unknown", "AXI slave error", "AXI decode error", "Target abort", "Master abort", "Invalid write", "Response decoding error", "AXI response decoding error", "Transcation timeout", }; u32 code, signature; code = afi_readl(AFI_INTR_CODE) & AFI_INTR_CODE_MASK; signature = afi_readl(AFI_INTR_SIGNATURE); afi_writel(0, AFI_INTR_CODE); if (code == AFI_INTR_LEGACY) return IRQ_NONE; if (code >= ARRAY_SIZE(err_msg)) code = 0; /* * do not pollute kernel log with master abort reports since they * happen a lot during enumeration */ if (code == AFI_INTR_MASTER_ABORT) pr_debug("PCIE: %s, signature: %08x\n", err_msg[code], signature); else pr_err("PCIE: %s, signature: %08x\n", err_msg[code], signature); return IRQ_HANDLED; } static void tegra_pcie_setup_translations(void) { u32 fpci_bar; u32 size; u32 axi_address; /* Bar 0: config Bar */ fpci_bar = ((u32)0xfdff << 16); size = PCIE_CFG_SZ; axi_address = TEGRA_PCIE_BASE + PCIE_CFG_OFF; afi_writel(axi_address, AFI_AXI_BAR0_START); afi_writel(size >> 12, AFI_AXI_BAR0_SZ); afi_writel(fpci_bar, AFI_FPCI_BAR0); /* Bar 1: extended config Bar */ fpci_bar = ((u32)0xfe1 << 20); size = PCIE_EXT_CFG_SZ; axi_address = TEGRA_PCIE_BASE + PCIE_EXT_CFG_OFF; afi_writel(axi_address, AFI_AXI_BAR1_START); afi_writel(size >> 12, AFI_AXI_BAR1_SZ); afi_writel(fpci_bar, AFI_FPCI_BAR1); /* Bar 2: downstream IO bar */ fpci_bar = ((__u32)0xfdfc << 16); size = MMIO_SIZE; axi_address = MMIO_BASE; afi_writel(axi_address, AFI_AXI_BAR2_START); afi_writel(size >> 12, AFI_AXI_BAR2_SZ); afi_writel(fpci_bar, AFI_FPCI_BAR2); /* Bar 3: prefetchable memory BAR */ fpci_bar = (((PREFETCH_MEM_BASE_0 >> 12) & 0x0fffffff) << 4) | 0x1; size = PREFETCH_MEM_SIZE_0 + PREFETCH_MEM_SIZE_1; axi_address = PREFETCH_MEM_BASE_0; afi_writel(axi_address, AFI_AXI_BAR3_START); afi_writel(size >> 12, AFI_AXI_BAR3_SZ); afi_writel(fpci_bar, AFI_FPCI_BAR3); /* Bar 4: non prefetchable memory BAR */ fpci_bar = (((MEM_BASE_0 >> 12) & 0x0FFFFFFF) << 4) | 0x1; size = MEM_SIZE_0 + MEM_SIZE_1; axi_address = MEM_BASE_0; afi_writel(axi_address, AFI_AXI_BAR4_START); afi_writel(size >> 12, AFI_AXI_BAR4_SZ); afi_writel(fpci_bar, AFI_FPCI_BAR4); /* Bar 5: NULL out the remaining BAR as it is not used */ fpci_bar = 0; size = 0; axi_address = 0; afi_writel(axi_address, AFI_AXI_BAR5_START); afi_writel(size >> 12, AFI_AXI_BAR5_SZ); afi_writel(fpci_bar, AFI_FPCI_BAR5); /* map all upstream transactions as uncached */ afi_writel(PHYS_OFFSET, AFI_CACHE_BAR0_ST); afi_writel(0, AFI_CACHE_BAR0_SZ); afi_writel(0, AFI_CACHE_BAR1_ST); afi_writel(0, AFI_CACHE_BAR1_SZ); /* No MSI */ afi_writel(0, AFI_MSI_FPCI_BAR_ST); afi_writel(0, AFI_MSI_BAR_SZ); afi_writel(0, AFI_MSI_AXI_BAR_ST); afi_writel(0, AFI_MSI_BAR_SZ); } static int tegra_pcie_enable_controller(void) { u32 val, reg; int i, timeout; /* Enable slot clock and pulse the reset signals */ for (i = 0, reg = AFI_PEX0_CTRL; i < 2; i++, reg += 0x8) { val = afi_readl(reg) | AFI_PEX_CTRL_REFCLK_EN; afi_writel(val, reg); val &= ~AFI_PEX_CTRL_RST; afi_writel(val, reg); val = afi_readl(reg) | AFI_PEX_CTRL_RST; afi_writel(val, reg); } /* Enable dual controller and both ports */ val = afi_readl(AFI_PCIE_CONFIG); val &= ~(AFI_PCIE_CONFIG_PCIEC0_DISABLE_DEVICE | AFI_PCIE_CONFIG_PCIEC1_DISABLE_DEVICE | AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK); val |= AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL; afi_writel(val, AFI_PCIE_CONFIG); val = afi_readl(AFI_FUSE) & ~AFI_FUSE_PCIE_T0_GEN2_DIS; afi_writel(val, AFI_FUSE); /* Initialze internal PHY, enable up to 16 PCIE lanes */ pads_writel(0x0, PADS_CTL_SEL); /* override IDDQ to 1 on all 4 lanes */ val = pads_readl(PADS_CTL) | PADS_CTL_IDDQ_1L; pads_writel(val, PADS_CTL); /* * set up PHY PLL inputs select PLLE output as refclock, * set TX ref sel to div10 (not div5) */ val = pads_readl(PADS_PLL_CTL); val &= ~(PADS_PLL_CTL_REFCLK_MASK | PADS_PLL_CTL_TXCLKREF_MASK); val |= (PADS_PLL_CTL_REFCLK_INTERNAL_CML | PADS_PLL_CTL_TXCLKREF_DIV10); pads_writel(val, PADS_PLL_CTL); /* take PLL out of reset */ val = pads_readl(PADS_PLL_CTL) | PADS_PLL_CTL_RST_B4SM; pads_writel(val, PADS_PLL_CTL); /* * Hack, set the clock voltage to the DEFAULT provided by hw folks. * This doesn't exist in the documentation */ pads_writel(0xfa5cfa5c, 0xc8); /* Wait for the PLL to lock */ timeout = 300; do { val = pads_readl(PADS_PLL_CTL); usleep_range(1000, 1000); if (--timeout == 0) { pr_err("Tegra PCIe error: timeout waiting for PLL\n"); return -EBUSY; } } while (!(val & PADS_PLL_CTL_LOCKDET)); /* turn off IDDQ override */ val = pads_readl(PADS_CTL) & ~PADS_CTL_IDDQ_1L; pads_writel(val, PADS_CTL); /* enable TX/RX data */ val = pads_readl(PADS_CTL); val |= (PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L); pads_writel(val, PADS_CTL); /* Take the PCIe interface module out of reset */ tegra_periph_reset_deassert(tegra_pcie.pcie_xclk); /* Finally enable PCIe */ val = afi_readl(AFI_CONFIGURATION) | AFI_CONFIGURATION_EN_FPCI; afi_writel(val, AFI_CONFIGURATION); val = (AFI_INTR_EN_INI_SLVERR | AFI_INTR_EN_INI_DECERR | AFI_INTR_EN_TGT_SLVERR | AFI_INTR_EN_TGT_DECERR | AFI_INTR_EN_TGT_WRERR | AFI_INTR_EN_DFPCI_DECERR); afi_writel(val, AFI_AFI_INTR_ENABLE); afi_writel(0xffffffff, AFI_SM_INTR_ENABLE); /* FIXME: No MSI for now, only INT */ afi_writel(AFI_INTR_MASK_INT_MASK, AFI_INTR_MASK); /* Disable all execptions */ afi_writel(0, AFI_FPCI_ERROR_MASKS); return 0; } static void tegra_pcie_xclk_clamp(bool clamp) { u32 reg; reg = pmc_readl(PMC_SCRATCH42) & ~PMC_SCRATCH42_PCX_CLAMP; if (clamp) reg |= PMC_SCRATCH42_PCX_CLAMP; pmc_writel(reg, PMC_SCRATCH42); } static void tegra_pcie_power_off(void) { tegra_periph_reset_assert(tegra_pcie.pcie_xclk); tegra_periph_reset_assert(tegra_pcie.afi_clk); tegra_periph_reset_assert(tegra_pcie.pex_clk); tegra_powergate_power_off(TEGRA_POWERGATE_PCIE); tegra_pcie_xclk_clamp(true); } static int tegra_pcie_power_regate(void) { int err; tegra_pcie_power_off(); tegra_pcie_xclk_clamp(true); tegra_periph_reset_assert(tegra_pcie.pcie_xclk); tegra_periph_reset_assert(tegra_pcie.afi_clk); err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_PCIE, tegra_pcie.pex_clk); if (err) { pr_err("PCIE: powerup sequence failed: %d\n", err); return err; } tegra_periph_reset_deassert(tegra_pcie.afi_clk); tegra_pcie_xclk_clamp(false); clk_enable(tegra_pcie.afi_clk); clk_enable(tegra_pcie.pex_clk); return clk_enable(tegra_pcie.pll_e); } static int tegra_pcie_clocks_get(void) { int err; tegra_pcie.pex_clk = clk_get(NULL, "pex"); if (IS_ERR(tegra_pcie.pex_clk)) return PTR_ERR(tegra_pcie.pex_clk); tegra_pcie.afi_clk = clk_get(NULL, "afi"); if (IS_ERR(tegra_pcie.afi_clk)) { err = PTR_ERR(tegra_pcie.afi_clk); goto err_afi_clk; } tegra_pcie.pcie_xclk = clk_get(NULL, "pcie_xclk"); if (IS_ERR(tegra_pcie.pcie_xclk)) { err = PTR_ERR(tegra_pcie.pcie_xclk); goto err_pcie_xclk; } tegra_pcie.pll_e = clk_get_sys(NULL, "pll_e"); if (IS_ERR(tegra_pcie.pll_e)) { err = PTR_ERR(tegra_pcie.pll_e); goto err_pll_e; } return 0; err_pll_e: clk_put(tegra_pcie.pcie_xclk); err_pcie_xclk: clk_put(tegra_pcie.afi_clk); err_afi_clk: clk_put(tegra_pcie.pex_clk); return err; } static void tegra_pcie_clocks_put(void) { clk_put(tegra_pcie.pll_e); clk_put(tegra_pcie.pcie_xclk); clk_put(tegra_pcie.afi_clk); clk_put(tegra_pcie.pex_clk); } static int __init tegra_pcie_get_resources(void) { struct resource *res_mmio = &tegra_pcie.res_mmio; int err; err = tegra_pcie_clocks_get(); if (err) { pr_err("PCIE: failed to get clocks: %d\n", err); return err; } err = tegra_pcie_power_regate(); if (err) { pr_err("PCIE: failed to power up: %d\n", err); goto err_pwr_on; } tegra_pcie.regs = ioremap_nocache(TEGRA_PCIE_BASE, PCIE_IOMAP_SZ); if (tegra_pcie.regs == NULL) { pr_err("PCIE: Failed to map PCI/AFI registers\n"); err = -ENOMEM; goto err_map_reg; } err = request_resource(&iomem_resource, res_mmio); if (err) { pr_err("PCIE: Failed to request resources: %d\n", err); goto err_req_io; } tegra_pcie_io_base = ioremap_nocache(res_mmio->start, resource_size(res_mmio)); if (tegra_pcie_io_base == NULL) { pr_err("PCIE: Failed to map IO\n"); err = -ENOMEM; goto err_map_io; } err = request_irq(INT_PCIE_INTR, tegra_pcie_isr, IRQF_SHARED, "PCIE", &tegra_pcie); if (err) { pr_err("PCIE: Failed to register IRQ: %d\n", err); goto err_irq; } set_irq_flags(INT_PCIE_INTR, IRQF_VALID); return 0; err_irq: iounmap(tegra_pcie_io_base); err_map_io: release_resource(&tegra_pcie.res_mmio); err_req_io: iounmap(tegra_pcie.regs); err_map_reg: tegra_pcie_power_off(); err_pwr_on: tegra_pcie_clocks_put(); return err; } /* * FIXME: If there are no PCIe cards attached, then calling this function * can result in the increase of the bootup time as there are big timeout * loops. */ #define TEGRA_PCIE_LINKUP_TIMEOUT 200 /* up to 1.2 seconds */ static bool tegra_pcie_check_link(struct tegra_pcie_port *pp, int idx, u32 reset_reg) { u32 reg; int retries = 3; int timeout; do { timeout = TEGRA_PCIE_LINKUP_TIMEOUT; while (timeout) { reg = readl(pp->base + RP_VEND_XP); if (reg & RP_VEND_XP_DL_UP) break; mdelay(1); timeout--; } if (!timeout) { pr_err("PCIE: port %d: link down, retrying\n", idx); goto retry; } timeout = TEGRA_PCIE_LINKUP_TIMEOUT; while (timeout) { reg = readl(pp->base + RP_LINK_CONTROL_STATUS); if (reg & 0x20000000) return true; mdelay(1); timeout--; } retry: /* Pulse the PEX reset */ reg = afi_readl(reset_reg) | AFI_PEX_CTRL_RST; afi_writel(reg, reset_reg); mdelay(1); reg = afi_readl(reset_reg) & ~AFI_PEX_CTRL_RST; afi_writel(reg, reset_reg); retries--; } while (retries); return false; } static void __init tegra_pcie_add_port(int index, u32 offset, u32 reset_reg) { struct tegra_pcie_port *pp; pp = tegra_pcie.port + tegra_pcie.num_ports; pp->index = -1; pp->base = tegra_pcie.regs + offset; pp->link_up = tegra_pcie_check_link(pp, index, reset_reg); if (!pp->link_up) { pp->base = NULL; printk(KERN_INFO "PCIE: port %d: link down, ignoring\n", index); return; } tegra_pcie.num_ports++; pp->index = index; pp->root_bus_nr = -1; memset(pp->res, 0, sizeof(pp->res)); } int __init tegra_pcie_init(bool init_port0, bool init_port1) { int err; if (!(init_port0 || init_port1)) return -ENODEV; pcibios_min_mem = 0; err = tegra_pcie_get_resources(); if (err) return err; err = tegra_pcie_enable_controller(); if (err) return err; /* setup the AFI address translations */ tegra_pcie_setup_translations(); if (init_port0) tegra_pcie_add_port(0, RP0_OFFSET, AFI_PEX0_CTRL); if (init_port1) tegra_pcie_add_port(1, RP1_OFFSET, AFI_PEX1_CTRL); pci_common_init(&tegra_pcie_hw); return 0; }
gpl-2.0
NoelMacwan/SXDNickiLolly
drivers/usb/gadget/multi.c
4930
7761
/* * multi.c -- Multifunction Composite driver * * Copyright (C) 2008 David Brownell * Copyright (C) 2008 Nokia Corporation * Copyright (C) 2009 Samsung Electronics * Author: Michal Nazarewicz (mina86@mina86.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/kernel.h> #include <linux/utsname.h> #include <linux/module.h> #if defined USB_ETH_RNDIS # undef USB_ETH_RNDIS #endif #ifdef CONFIG_USB_G_MULTI_RNDIS # define USB_ETH_RNDIS y #endif #define DRIVER_DESC "Multifunction Composite Gadget" MODULE_DESCRIPTION(DRIVER_DESC); MODULE_AUTHOR("Michal Nazarewicz"); MODULE_LICENSE("GPL"); /***************************** All the files... *****************************/ /* * kbuild is not very cooperative with respect to linking separately * compiled library objects into one module. So for now we won't use * separate compilation ... ensuring init/exit sections work to shrink * the runtime footprint, and giving us at least some parts of what * a "gcc --combine ... part1.c part2.c part3.c ... " build would. */ #include "composite.c" #include "usbstring.c" #include "config.c" #include "epautoconf.c" #include "f_mass_storage.c" #include "u_serial.c" #include "f_acm.c" #include "f_ecm.c" #include "f_subset.c" #ifdef USB_ETH_RNDIS # include "f_rndis.c" # include "rndis.c" #endif #include "u_ether.c" /***************************** Device Descriptor ****************************/ #define MULTI_VENDOR_NUM 0x1d6b /* Linux Foundation */ #define MULTI_PRODUCT_NUM 0x0104 /* Multifunction Composite Gadget */ enum { __MULTI_NO_CONFIG, #ifdef CONFIG_USB_G_MULTI_RNDIS MULTI_RNDIS_CONFIG_NUM, #endif #ifdef CONFIG_USB_G_MULTI_CDC MULTI_CDC_CONFIG_NUM, #endif }; static struct usb_device_descriptor device_desc = { .bLength = sizeof device_desc, .bDescriptorType = USB_DT_DEVICE, .bcdUSB = cpu_to_le16(0x0200), .bDeviceClass = USB_CLASS_MISC /* 0xEF */, .bDeviceSubClass = 2, .bDeviceProtocol = 1, /* Vendor and product id can be overridden by module parameters. */ .idVendor = cpu_to_le16(MULTI_VENDOR_NUM), .idProduct = cpu_to_le16(MULTI_PRODUCT_NUM), }; static const struct usb_descriptor_header *otg_desc[] = { (struct usb_descriptor_header *) &(struct usb_otg_descriptor){ .bLength = sizeof(struct usb_otg_descriptor), .bDescriptorType = USB_DT_OTG, /* * REVISIT SRP-only hardware is possible, although * it would not be called "OTG" ... */ .bmAttributes = USB_OTG_SRP | USB_OTG_HNP, }, NULL, }; enum { #ifdef CONFIG_USB_G_MULTI_RNDIS MULTI_STRING_RNDIS_CONFIG_IDX, #endif #ifdef CONFIG_USB_G_MULTI_CDC MULTI_STRING_CDC_CONFIG_IDX, #endif }; static struct usb_string strings_dev[] = { #ifdef CONFIG_USB_G_MULTI_RNDIS [MULTI_STRING_RNDIS_CONFIG_IDX].s = "Multifunction with RNDIS", #endif #ifdef CONFIG_USB_G_MULTI_CDC [MULTI_STRING_CDC_CONFIG_IDX].s = "Multifunction with CDC ECM", #endif { } /* end of list */ }; static struct usb_gadget_strings *dev_strings[] = { &(struct usb_gadget_strings){ .language = 0x0409, /* en-us */ .strings = strings_dev, }, NULL, }; /****************************** Configurations ******************************/ static struct fsg_module_parameters fsg_mod_data = { .stall = 1 }; FSG_MODULE_PARAMETERS(/* no prefix */, fsg_mod_data); static struct fsg_common fsg_common; static u8 hostaddr[ETH_ALEN]; /********** RNDIS **********/ #ifdef USB_ETH_RNDIS static __init int rndis_do_config(struct usb_configuration *c) { int ret; if (gadget_is_otg(c->cdev->gadget)) { c->descriptors = otg_desc; c->bmAttributes |= USB_CONFIG_ATT_WAKEUP; } ret = rndis_bind_config(c, hostaddr); if (ret < 0) return ret; ret = acm_bind_config(c, 0); if (ret < 0) return ret; ret = fsg_bind_config(c->cdev, c, &fsg_common); if (ret < 0) return ret; return 0; } static int rndis_config_register(struct usb_composite_dev *cdev) { static struct usb_configuration config = { .bConfigurationValue = MULTI_RNDIS_CONFIG_NUM, .bmAttributes = USB_CONFIG_ATT_SELFPOWER, }; config.label = strings_dev[MULTI_STRING_RNDIS_CONFIG_IDX].s; config.iConfiguration = strings_dev[MULTI_STRING_RNDIS_CONFIG_IDX].id; return usb_add_config(cdev, &config, rndis_do_config); } #else static int rndis_config_register(struct usb_composite_dev *cdev) { return 0; } #endif /********** CDC ECM **********/ #ifdef CONFIG_USB_G_MULTI_CDC static __init int cdc_do_config(struct usb_configuration *c) { int ret; if (gadget_is_otg(c->cdev->gadget)) { c->descriptors = otg_desc; c->bmAttributes |= USB_CONFIG_ATT_WAKEUP; } ret = ecm_bind_config(c, hostaddr); if (ret < 0) return ret; ret = acm_bind_config(c, 0); if (ret < 0) return ret; ret = fsg_bind_config(c->cdev, c, &fsg_common); if (ret < 0) return ret; return 0; } static int cdc_config_register(struct usb_composite_dev *cdev) { static struct usb_configuration config = { .bConfigurationValue = MULTI_CDC_CONFIG_NUM, .bmAttributes = USB_CONFIG_ATT_SELFPOWER, }; config.label = strings_dev[MULTI_STRING_CDC_CONFIG_IDX].s; config.iConfiguration = strings_dev[MULTI_STRING_CDC_CONFIG_IDX].id; return usb_add_config(cdev, &config, cdc_do_config); } #else static int cdc_config_register(struct usb_composite_dev *cdev) { return 0; } #endif /****************************** Gadget Bind ******************************/ static int __ref multi_bind(struct usb_composite_dev *cdev) { struct usb_gadget *gadget = cdev->gadget; int status, gcnum; if (!can_support_ecm(cdev->gadget)) { dev_err(&gadget->dev, "controller '%s' not usable\n", gadget->name); return -EINVAL; } /* set up network link layer */ status = gether_setup(cdev->gadget, hostaddr); if (status < 0) return status; /* set up serial link layer */ status = gserial_setup(cdev->gadget, 1); if (status < 0) goto fail0; /* set up mass storage function */ { void *retp; retp = fsg_common_from_params(&fsg_common, cdev, &fsg_mod_data); if (IS_ERR(retp)) { status = PTR_ERR(retp); goto fail1; } } /* set bcdDevice */ gcnum = usb_gadget_controller_number(gadget); if (gcnum >= 0) { device_desc.bcdDevice = cpu_to_le16(0x0300 | gcnum); } else { WARNING(cdev, "controller '%s' not recognized\n", gadget->name); device_desc.bcdDevice = cpu_to_le16(0x0300 | 0x0099); } /* allocate string IDs */ status = usb_string_ids_tab(cdev, strings_dev); if (unlikely(status < 0)) goto fail2; /* register configurations */ status = rndis_config_register(cdev); if (unlikely(status < 0)) goto fail2; status = cdc_config_register(cdev); if (unlikely(status < 0)) goto fail2; /* we're done */ dev_info(&gadget->dev, DRIVER_DESC "\n"); fsg_common_put(&fsg_common); return 0; /* error recovery */ fail2: fsg_common_put(&fsg_common); fail1: gserial_cleanup(); fail0: gether_cleanup(); return status; } static int __exit multi_unbind(struct usb_composite_dev *cdev) { gserial_cleanup(); gether_cleanup(); return 0; } /****************************** Some noise ******************************/ static struct usb_composite_driver multi_driver = { .name = "g_multi", .dev = &device_desc, .strings = dev_strings, .max_speed = USB_SPEED_HIGH, .unbind = __exit_p(multi_unbind), .iProduct = DRIVER_DESC, .needs_serial = 1, }; static int __init multi_init(void) { return usb_composite_probe(&multi_driver, multi_bind); } module_init(multi_init); static void __exit multi_exit(void) { usb_composite_unregister(&multi_driver); } module_exit(multi_exit);
gpl-2.0
RadonX-ROM/paradox_kernel_oneplus_msm8974
drivers/regulator/dummy.c
4930
2132
/* * dummy.c * * Copyright 2010 Wolfson Microelectronics PLC. * * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This is useful for systems with mixed controllable and * non-controllable regulators, as well as for allowing testing on * systems with no controllable regulators. */ #include <linux/err.h> #include <linux/export.h> #include <linux/platform_device.h> #include <linux/regulator/driver.h> #include <linux/regulator/machine.h> #include "dummy.h" struct regulator_dev *dummy_regulator_rdev; static struct regulator_init_data dummy_initdata; static struct regulator_ops dummy_ops; static struct regulator_desc dummy_desc = { .name = "dummy", .id = -1, .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, .ops = &dummy_ops, }; static int __devinit dummy_regulator_probe(struct platform_device *pdev) { int ret; dummy_regulator_rdev = regulator_register(&dummy_desc, NULL, &dummy_initdata, NULL, NULL); if (IS_ERR(dummy_regulator_rdev)) { ret = PTR_ERR(dummy_regulator_rdev); pr_err("Failed to register regulator: %d\n", ret); return ret; } return 0; } static struct platform_driver dummy_regulator_driver = { .probe = dummy_regulator_probe, .driver = { .name = "reg-dummy", .owner = THIS_MODULE, }, }; static struct platform_device *dummy_pdev; void __init regulator_dummy_init(void) { int ret; dummy_pdev = platform_device_alloc("reg-dummy", -1); if (!dummy_pdev) { pr_err("Failed to allocate dummy regulator device\n"); return; } ret = platform_device_add(dummy_pdev); if (ret != 0) { pr_err("Failed to register dummy regulator device: %d\n", ret); platform_device_put(dummy_pdev); return; } ret = platform_driver_register(&dummy_regulator_driver); if (ret != 0) { pr_err("Failed to register dummy regulator driver: %d\n", ret); platform_device_unregister(dummy_pdev); } }
gpl-2.0
Chong-Li/VATC-3.3.7
arch/xtensa/platforms/iss/network.c
4930
17784
/* * * arch/xtensa/platforms/iss/network.c * * Platform specific initialization. * * Authors: Chris Zankel <chris@zankel.net> * Based on work form the UML team. * * Copyright 2005 Tensilica Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/list.h> #include <linux/irq.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/timer.h> #include <linux/if_ether.h> #include <linux/inetdevice.h> #include <linux/init.h> #include <linux/if_tun.h> #include <linux/etherdevice.h> #include <linux/interrupt.h> #include <linux/ioctl.h> #include <linux/bootmem.h> #include <linux/ethtool.h> #include <linux/rtnetlink.h> #include <linux/platform_device.h> #include <platform/simcall.h> #define DRIVER_NAME "iss-netdev" #define ETH_MAX_PACKET 1500 #define ETH_HEADER_OTHER 14 #define ISS_NET_TIMER_VALUE (2 * HZ) static DEFINE_SPINLOCK(opened_lock); static LIST_HEAD(opened); static DEFINE_SPINLOCK(devices_lock); static LIST_HEAD(devices); /* ------------------------------------------------------------------------- */ /* We currently only support the TUNTAP transport protocol. */ #define TRANSPORT_TUNTAP_NAME "tuntap" #define TRANSPORT_TUNTAP_MTU ETH_MAX_PACKET struct tuntap_info { char dev_name[IFNAMSIZ]; int fixed_config; unsigned char gw[ETH_ALEN]; int fd; }; /* ------------------------------------------------------------------------- */ /* This structure contains out private information for the driver. */ struct iss_net_private { struct list_head device_list; struct list_head opened_list; spinlock_t lock; struct net_device *dev; struct platform_device pdev; struct timer_list tl; struct net_device_stats stats; struct timer_list timer; unsigned int timer_val; int index; int mtu; unsigned char mac[ETH_ALEN]; int have_mac; struct { union { struct tuntap_info tuntap; } info; int (*open)(struct iss_net_private *lp); void (*close)(struct iss_net_private *lp); int (*read)(struct iss_net_private *lp, struct sk_buff **skb); int (*write)(struct iss_net_private *lp, struct sk_buff **skb); unsigned short (*protocol)(struct sk_buff *skb); int (*poll)(struct iss_net_private *lp); } tp; }; /* ======================= ISS SIMCALL INTERFACE =========================== */ /* Note: __simc must _not_ be declared inline! */ static int errno; static int __simc (int a, int b, int c, int d, int e, int f) __attribute__((__noinline__)); static int __simc (int a, int b, int c, int d, int e, int f) { int ret; __asm__ __volatile__ ("simcall\n" "mov %0, a2\n" "mov %1, a3\n" : "=a" (ret), "=a" (errno) : : "a2", "a3"); return ret; } static int inline simc_open(char *file, int flags, int mode) { return __simc(SYS_open, (int) file, flags, mode, 0, 0); } static int inline simc_close(int fd) { return __simc(SYS_close, fd, 0, 0, 0, 0); } static int inline simc_ioctl(int fd, int request, void *arg) { return __simc(SYS_ioctl, fd, request, (int) arg, 0, 0); } static int inline simc_read(int fd, void *buf, size_t count) { return __simc(SYS_read, fd, (int) buf, count, 0, 0); } static int inline simc_write(int fd, void *buf, size_t count) { return __simc(SYS_write, fd, (int) buf, count, 0, 0); } static int inline simc_poll(int fd) { struct timeval tv = { .tv_sec = 0, .tv_usec = 0 }; return __simc(SYS_select_one, fd, XTISS_SELECT_ONE_READ, (int)&tv,0,0); } /* ================================ HELPERS ================================ */ static char *split_if_spec(char *str, ...) { char **arg, *end; va_list ap; va_start(ap, str); while ((arg = va_arg(ap, char**)) != NULL) { if (*str == '\0') return NULL; end = strchr(str, ','); if (end != str) *arg = str; if (end == NULL) return NULL; *end ++ = '\0'; str = end; } va_end(ap); return str; } #if 0 /* Adjust SKB. */ struct sk_buff *ether_adjust_skb(struct sk_buff *skb, int extra) { if ((skb != NULL) && (skb_tailroom(skb) < extra)) { struct sk_buff *skb2; skb2 = skb_copy_expand(skb, 0, extra, GFP_ATOMIC); dev_kfree_skb(skb); skb = skb2; } if (skb != NULL) skb_put(skb, extra); return skb; } #endif /* Return the IP address as a string for a given device. */ static void dev_ip_addr(void *d, char *buf, char *bin_buf) { struct net_device *dev = d; struct in_device *ip = dev->ip_ptr; struct in_ifaddr *in; __be32 addr; if ((ip == NULL) || ((in = ip->ifa_list) == NULL)) { printk(KERN_WARNING "Device not assigned an IP address!\n"); return; } addr = in->ifa_address; sprintf(buf, "%d.%d.%d.%d", addr & 0xff, (addr >> 8) & 0xff, (addr >> 16) & 0xff, addr >> 24); if (bin_buf) { bin_buf[0] = addr & 0xff; bin_buf[1] = (addr >> 8) & 0xff; bin_buf[2] = (addr >> 16) & 0xff; bin_buf[3] = addr >> 24; } } /* Set Ethernet address of the specified device. */ static void inline set_ether_mac(void *d, unsigned char *addr) { struct net_device *dev = d; memcpy(dev->dev_addr, addr, ETH_ALEN); } /* ======================= TUNTAP TRANSPORT INTERFACE ====================== */ static int tuntap_open(struct iss_net_private *lp) { struct ifreq ifr; char *dev_name = lp->tp.info.tuntap.dev_name; int err = -EINVAL; int fd; /* We currently only support a fixed configuration. */ if (!lp->tp.info.tuntap.fixed_config) return -EINVAL; if ((fd = simc_open("/dev/net/tun", 02, 0)) < 0) { /* O_RDWR */ printk("Failed to open /dev/net/tun, returned %d " "(errno = %d)\n", fd, errno); return fd; } memset(&ifr, 0, sizeof ifr); ifr.ifr_flags = IFF_TAP | IFF_NO_PI; strlcpy(ifr.ifr_name, dev_name, sizeof ifr.ifr_name); if ((err = simc_ioctl(fd, TUNSETIFF, (void*) &ifr)) < 0) { printk("Failed to set interface, returned %d " "(errno = %d)\n", err, errno); simc_close(fd); return err; } lp->tp.info.tuntap.fd = fd; return err; } static void tuntap_close(struct iss_net_private *lp) { #if 0 if (lp->tp.info.tuntap.fixed_config) iter_addresses(lp->tp.info.tuntap.dev, close_addr, lp->host.dev_name); #endif simc_close(lp->tp.info.tuntap.fd); lp->tp.info.tuntap.fd = -1; } static int tuntap_read (struct iss_net_private *lp, struct sk_buff **skb) { #if 0 *skb = ether_adjust_skb(*skb, ETH_HEADER_OTHER); if (*skb == NULL) return -ENOMEM; #endif return simc_read(lp->tp.info.tuntap.fd, (*skb)->data, (*skb)->dev->mtu + ETH_HEADER_OTHER); } static int tuntap_write (struct iss_net_private *lp, struct sk_buff **skb) { return simc_write(lp->tp.info.tuntap.fd, (*skb)->data, (*skb)->len); } unsigned short tuntap_protocol(struct sk_buff *skb) { return eth_type_trans(skb, skb->dev); } static int tuntap_poll(struct iss_net_private *lp) { return simc_poll(lp->tp.info.tuntap.fd); } /* * Currently only a device name is supported. * ethX=tuntap[,[mac address][,[device name]]] */ static int tuntap_probe(struct iss_net_private *lp, int index, char *init) { const int len = strlen(TRANSPORT_TUNTAP_NAME); char *dev_name = NULL, *mac_str = NULL, *rem = NULL; /* Transport should be 'tuntap': ethX=tuntap,mac,dev_name */ if (strncmp(init, TRANSPORT_TUNTAP_NAME, len)) return 0; if (*(init += strlen(TRANSPORT_TUNTAP_NAME)) == ',') { if ((rem=split_if_spec(init+1, &mac_str, &dev_name)) != NULL) { printk("Extra garbage on specification : '%s'\n", rem); return 0; } } else if (*init != '\0') { printk("Invalid argument: %s. Skipping device!\n", init); return 0; } if (dev_name) { strncpy(lp->tp.info.tuntap.dev_name, dev_name, sizeof lp->tp.info.tuntap.dev_name); lp->tp.info.tuntap.fixed_config = 1; } else strcpy(lp->tp.info.tuntap.dev_name, TRANSPORT_TUNTAP_NAME); #if 0 if (setup_etheraddr(mac_str, lp->mac)) lp->have_mac = 1; #endif lp->mtu = TRANSPORT_TUNTAP_MTU; //lp->info.tuntap.gate_addr = gate_addr; lp->tp.info.tuntap.fd = -1; lp->tp.open = tuntap_open; lp->tp.close = tuntap_close; lp->tp.read = tuntap_read; lp->tp.write = tuntap_write; lp->tp.protocol = tuntap_protocol; lp->tp.poll = tuntap_poll; printk("TUN/TAP backend - "); #if 0 if (lp->host.gate_addr != NULL) printk("IP = %s", lp->host.gate_addr); #endif printk("\n"); return 1; } /* ================================ ISS NET ================================ */ static int iss_net_rx(struct net_device *dev) { struct iss_net_private *lp = netdev_priv(dev); int pkt_len; struct sk_buff *skb; /* Check if there is any new data. */ if (lp->tp.poll(lp) == 0) return 0; /* Try to allocate memory, if it fails, try again next round. */ if ((skb = dev_alloc_skb(dev->mtu + 2 + ETH_HEADER_OTHER)) == NULL) { lp->stats.rx_dropped++; return 0; } skb_reserve(skb, 2); /* Setup skb */ skb->dev = dev; skb_reset_mac_header(skb); pkt_len = lp->tp.read(lp, &skb); skb_put(skb, pkt_len); if (pkt_len > 0) { skb_trim(skb, pkt_len); skb->protocol = lp->tp.protocol(skb); lp->stats.rx_bytes += skb->len; lp->stats.rx_packets++; // netif_rx(skb); netif_rx_ni(skb); return pkt_len; } kfree_skb(skb); return pkt_len; } static int iss_net_poll(void) { struct list_head *ele; int err, ret = 0; spin_lock(&opened_lock); list_for_each(ele, &opened) { struct iss_net_private *lp; lp = list_entry(ele, struct iss_net_private, opened_list); if (!netif_running(lp->dev)) break; spin_lock(&lp->lock); while ((err = iss_net_rx(lp->dev)) > 0) ret++; spin_unlock(&lp->lock); if (err < 0) { printk(KERN_ERR "Device '%s' read returned %d, " "shutting it down\n", lp->dev->name, err); dev_close(lp->dev); } else { // FIXME reactivate_fd(lp->fd, ISS_ETH_IRQ); } } spin_unlock(&opened_lock); return ret; } static void iss_net_timer(unsigned long priv) { struct iss_net_private* lp = (struct iss_net_private*) priv; spin_lock(&lp->lock); iss_net_poll(); mod_timer(&lp->timer, jiffies + lp->timer_val); spin_unlock(&lp->lock); } static int iss_net_open(struct net_device *dev) { struct iss_net_private *lp = netdev_priv(dev); char addr[sizeof "255.255.255.255\0"]; int err; spin_lock(&lp->lock); if ((err = lp->tp.open(lp)) < 0) goto out; if (!lp->have_mac) { dev_ip_addr(dev, addr, &lp->mac[2]); set_ether_mac(dev, lp->mac); } netif_start_queue(dev); /* clear buffer - it can happen that the host side of the interface * is full when we get here. In this case, new data is never queued, * SIGIOs never arrive, and the net never works. */ while ((err = iss_net_rx(dev)) > 0) ; spin_lock(&opened_lock); list_add(&lp->opened_list, &opened); spin_unlock(&opened_lock); init_timer(&lp->timer); lp->timer_val = ISS_NET_TIMER_VALUE; lp->timer.data = (unsigned long) lp; lp->timer.function = iss_net_timer; mod_timer(&lp->timer, jiffies + lp->timer_val); out: spin_unlock(&lp->lock); return err; } static int iss_net_close(struct net_device *dev) { struct iss_net_private *lp = netdev_priv(dev); printk("iss_net_close!\n"); netif_stop_queue(dev); spin_lock(&lp->lock); spin_lock(&opened_lock); list_del(&opened); spin_unlock(&opened_lock); del_timer_sync(&lp->timer); lp->tp.close(lp); spin_unlock(&lp->lock); return 0; } static int iss_net_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct iss_net_private *lp = netdev_priv(dev); unsigned long flags; int len; netif_stop_queue(dev); spin_lock_irqsave(&lp->lock, flags); len = lp->tp.write(lp, &skb); if (len == skb->len) { lp->stats.tx_packets++; lp->stats.tx_bytes += skb->len; dev->trans_start = jiffies; netif_start_queue(dev); /* this is normally done in the interrupt when tx finishes */ netif_wake_queue(dev); } else if (len == 0) { netif_start_queue(dev); lp->stats.tx_dropped++; } else { netif_start_queue(dev); printk(KERN_ERR "iss_net_start_xmit: failed(%d)\n", len); } spin_unlock_irqrestore(&lp->lock, flags); dev_kfree_skb(skb); return NETDEV_TX_OK; } static struct net_device_stats *iss_net_get_stats(struct net_device *dev) { struct iss_net_private *lp = netdev_priv(dev); return &lp->stats; } static void iss_net_set_multicast_list(struct net_device *dev) { #if 0 if (dev->flags & IFF_PROMISC) return; else if (!netdev_mc_empty(dev)) dev->flags |= IFF_ALLMULTI; else dev->flags &= ~IFF_ALLMULTI; #endif } static void iss_net_tx_timeout(struct net_device *dev) { #if 0 dev->trans_start = jiffies; netif_wake_queue(dev); #endif } static int iss_net_set_mac(struct net_device *dev, void *addr) { #if 0 struct iss_net_private *lp = netdev_priv(dev); struct sockaddr *hwaddr = addr; spin_lock(&lp->lock); memcpy(dev->dev_addr, hwaddr->sa_data, ETH_ALEN); spin_unlock(&lp->lock); #endif return 0; } static int iss_net_change_mtu(struct net_device *dev, int new_mtu) { #if 0 struct iss_net_private *lp = netdev_priv(dev); int err = 0; spin_lock(&lp->lock); // FIXME not needed new_mtu = transport_set_mtu(new_mtu, &lp->user); if (new_mtu < 0) err = new_mtu; else dev->mtu = new_mtu; spin_unlock(&lp->lock); return err; #endif return -EINVAL; } void iss_net_user_timer_expire(unsigned long _conn) { } static struct platform_driver iss_net_driver = { .driver = { .name = DRIVER_NAME, }, }; static int driver_registered; static const struct net_device_ops iss_netdev_ops = { .ndo_open = iss_net_open, .ndo_stop = iss_net_close, .ndo_get_stats = iss_net_get_stats, .ndo_start_xmit = iss_net_start_xmit, .ndo_validate_addr = eth_validate_addr, .ndo_change_mtu = iss_net_change_mtu, .ndo_set_mac_address = iss_net_set_mac, //.ndo_do_ioctl = iss_net_ioctl, .ndo_tx_timeout = iss_net_tx_timeout, .ndo_set_rx_mode = iss_net_set_multicast_list, }; static int iss_net_configure(int index, char *init) { struct net_device *dev; struct iss_net_private *lp; int err; if ((dev = alloc_etherdev(sizeof *lp)) == NULL) { printk(KERN_ERR "eth_configure: failed to allocate device\n"); return 1; } /* Initialize private element. */ lp = netdev_priv(dev); *lp = ((struct iss_net_private) { .device_list = LIST_HEAD_INIT(lp->device_list), .opened_list = LIST_HEAD_INIT(lp->opened_list), .lock = __SPIN_LOCK_UNLOCKED(lp.lock), .dev = dev, .index = index, //.fd = -1, .mac = { 0xfe, 0xfd, 0x0, 0x0, 0x0, 0x0 }, .have_mac = 0, }); /* * Try all transport protocols. * Note: more protocols can be added by adding '&& !X_init(lp, eth)'. */ if (!tuntap_probe(lp, index, init)) { printk("Invalid arguments. Skipping device!\n"); goto errout; } printk(KERN_INFO "Netdevice %d ", index); if (lp->have_mac) printk("(%pM) ", lp->mac); printk(": "); /* sysfs register */ if (!driver_registered) { platform_driver_register(&iss_net_driver); driver_registered = 1; } spin_lock(&devices_lock); list_add(&lp->device_list, &devices); spin_unlock(&devices_lock); lp->pdev.id = index; lp->pdev.name = DRIVER_NAME; platform_device_register(&lp->pdev); SET_NETDEV_DEV(dev,&lp->pdev.dev); /* * If this name ends up conflicting with an existing registered * netdevice, that is OK, register_netdev{,ice}() will notice this * and fail. */ snprintf(dev->name, sizeof dev->name, "eth%d", index); dev->netdev_ops = &iss_netdev_ops; dev->mtu = lp->mtu; dev->watchdog_timeo = (HZ >> 1); dev->irq = -1; rtnl_lock(); err = register_netdevice(dev); rtnl_unlock(); if (err) { printk("Error registering net device!\n"); /* XXX: should we call ->remove() here? */ free_netdev(dev); return 1; } init_timer(&lp->tl); lp->tl.function = iss_net_user_timer_expire; #if 0 if (lp->have_mac) set_ether_mac(dev, lp->mac); #endif return 0; errout: // FIXME: unregister; free, etc.. return -EIO; } /* ------------------------------------------------------------------------- */ /* Filled in during early boot */ struct list_head eth_cmd_line = LIST_HEAD_INIT(eth_cmd_line); struct iss_net_init { struct list_head list; char *init; /* init string */ int index; }; /* * Parse the command line and look for 'ethX=...' fields, and register all * those fields. They will be later initialized in iss_net_init. */ #define ERR KERN_ERR "iss_net_setup: " static int iss_net_setup(char *str) { struct iss_net_private *device = NULL; struct iss_net_init *new; struct list_head *ele; char *end; int n; n = simple_strtoul(str, &end, 0); if (end == str) { printk(ERR "Failed to parse '%s'\n", str); return 1; } if (n < 0) { printk(ERR "Device %d is negative\n", n); return 1; } if (*(str = end) != '=') { printk(ERR "Expected '=' after device number\n"); return 1; } spin_lock(&devices_lock); list_for_each(ele, &devices) { device = list_entry(ele, struct iss_net_private, device_list); if (device->index == n) break; } spin_unlock(&devices_lock); if (device && device->index == n) { printk(ERR "Device %d already configured\n", n); return 1; } if ((new = alloc_bootmem(sizeof new)) == NULL) { printk("Alloc_bootmem failed\n"); return 1; } INIT_LIST_HEAD(&new->list); new->index = n; new->init = str + 1; list_add_tail(&new->list, &eth_cmd_line); return 1; } #undef ERR __setup("eth=", iss_net_setup); /* * Initialize all ISS Ethernet devices previously registered in iss_net_setup. */ static int iss_net_init(void) { struct list_head *ele, *next; /* Walk through all Ethernet devices specified in the command line. */ list_for_each_safe(ele, next, &eth_cmd_line) { struct iss_net_init *eth; eth = list_entry(ele, struct iss_net_init, list); iss_net_configure(eth->index, eth->init); } return 1; } module_init(iss_net_init);
gpl-2.0
djvoleur/S6-UniBase
drivers/tty/n_tracerouter.c
12098
7242
/* * n_tracerouter.c - Trace data router through tty space * * Copyright (C) Intel 2011 * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * This trace router uses the Linux line discipline framework to route * trace data coming from a HW Modem to a PTI (Parallel Trace Module) port. * The solution is not specific to a HW modem and this line disciple can * be used to route any stream of data in kernel space. * This is part of a solution for the P1149.7, compact JTAG, standard. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/types.h> #include <linux/ioctl.h> #include <linux/tty.h> #include <linux/tty_ldisc.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/mutex.h> #include <linux/slab.h> #include <asm-generic/bug.h> #include "n_tracesink.h" /* * Other ldisc drivers use 65536 which basically means, * 'I can always accept 64k' and flow control is off. * This number is deemed appropriate for this driver. */ #define RECEIVE_ROOM 65536 #define DRIVERNAME "n_tracerouter" /* * struct to hold private configuration data for this ldisc. * opencalled is used to hold if this ldisc has been opened. * kref_tty holds the tty reference the ldisc sits on top of. */ struct tracerouter_data { u8 opencalled; struct tty_struct *kref_tty; }; static struct tracerouter_data *tr_data; /* lock for when tty reference is being used */ static DEFINE_MUTEX(routelock); /** * n_tracerouter_open() - Called when a tty is opened by a SW entity. * @tty: terminal device to the ldisc. * * Return: * 0 for success. * * Caveats: This should only be opened one time per SW entity. */ static int n_tracerouter_open(struct tty_struct *tty) { int retval = -EEXIST; mutex_lock(&routelock); if (tr_data->opencalled == 0) { tr_data->kref_tty = tty_kref_get(tty); if (tr_data->kref_tty == NULL) { retval = -EFAULT; } else { tr_data->opencalled = 1; tty->disc_data = tr_data; tty->receive_room = RECEIVE_ROOM; tty_driver_flush_buffer(tty); retval = 0; } } mutex_unlock(&routelock); return retval; } /** * n_tracerouter_close() - close connection * @tty: terminal device to the ldisc. * * Called when a software entity wants to close a connection. */ static void n_tracerouter_close(struct tty_struct *tty) { struct tracerouter_data *tptr = tty->disc_data; mutex_lock(&routelock); WARN_ON(tptr->kref_tty != tr_data->kref_tty); tty_driver_flush_buffer(tty); tty_kref_put(tr_data->kref_tty); tr_data->kref_tty = NULL; tr_data->opencalled = 0; tty->disc_data = NULL; mutex_unlock(&routelock); } /** * n_tracerouter_read() - read request from user space * @tty: terminal device passed into the ldisc. * @file: pointer to open file object. * @buf: pointer to the data buffer that gets eventually returned. * @nr: number of bytes of the data buffer that is returned. * * function that allows read() functionality in userspace. By default if this * is not implemented it returns -EIO. This module is functioning like a * router via n_tracerouter_receivebuf(), and there is no real requirement * to implement this function. However, an error return value other than * -EIO should be used just to show that there was an intent not to have * this function implemented. Return value based on read() man pages. * * Return: * -EINVAL */ static ssize_t n_tracerouter_read(struct tty_struct *tty, struct file *file, unsigned char __user *buf, size_t nr) { return -EINVAL; } /** * n_tracerouter_write() - Function that allows write() in userspace. * @tty: terminal device passed into the ldisc. * @file: pointer to open file object. * @buf: pointer to the data buffer that gets eventually returned. * @nr: number of bytes of the data buffer that is returned. * * By default if this is not implemented, it returns -EIO. * This should not be implemented, ever, because * 1. this driver is functioning like a router via * n_tracerouter_receivebuf() * 2. No writes to HW will ever go through this line discpline driver. * However, an error return value other than -EIO should be used * just to show that there was an intent not to have this function * implemented. Return value based on write() man pages. * * Return: * -EINVAL */ static ssize_t n_tracerouter_write(struct tty_struct *tty, struct file *file, const unsigned char *buf, size_t nr) { return -EINVAL; } /** * n_tracerouter_receivebuf() - Routing function for driver. * @tty: terminal device passed into the ldisc. It's assumed * tty will never be NULL. * @cp: buffer, block of characters to be eventually read by * someone, somewhere (user read() call or some kernel function). * @fp: flag buffer. * @count: number of characters (aka, bytes) in cp. * * This function takes the input buffer, cp, and passes it to * an external API function for processing. */ static void n_tracerouter_receivebuf(struct tty_struct *tty, const unsigned char *cp, char *fp, int count) { mutex_lock(&routelock); n_tracesink_datadrain((u8 *) cp, count); mutex_unlock(&routelock); } /* * Flush buffer is not impelemented as the ldisc has no internal buffering * so the tty_driver_flush_buffer() is sufficient for this driver's needs. */ static struct tty_ldisc_ops tty_ptirouter_ldisc = { .owner = THIS_MODULE, .magic = TTY_LDISC_MAGIC, .name = DRIVERNAME, .open = n_tracerouter_open, .close = n_tracerouter_close, .read = n_tracerouter_read, .write = n_tracerouter_write, .receive_buf = n_tracerouter_receivebuf }; /** * n_tracerouter_init - module initialisation * * Registers this module as a line discipline driver. * * Return: * 0 for success, any other value error. */ static int __init n_tracerouter_init(void) { int retval; tr_data = kzalloc(sizeof(struct tracerouter_data), GFP_KERNEL); if (tr_data == NULL) return -ENOMEM; /* Note N_TRACEROUTER is defined in linux/tty.h */ retval = tty_register_ldisc(N_TRACEROUTER, &tty_ptirouter_ldisc); if (retval < 0) { pr_err("%s: Registration failed: %d\n", __func__, retval); kfree(tr_data); } return retval; } /** * n_tracerouter_exit - module unload * * Removes this module as a line discipline driver. */ static void __exit n_tracerouter_exit(void) { int retval = tty_unregister_ldisc(N_TRACEROUTER); if (retval < 0) pr_err("%s: Unregistration failed: %d\n", __func__, retval); else kfree(tr_data); } module_init(n_tracerouter_init); module_exit(n_tracerouter_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Jay Freyensee"); MODULE_ALIAS_LDISC(N_TRACEROUTER); MODULE_DESCRIPTION("Trace router ldisc driver");
gpl-2.0
TeamExodus/legacy_kernel_htc_flounder
arch/mips/bcm63xx/timer.c
12610
4510
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr> */ #include <linux/kernel.h> #include <linux/err.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/clk.h> #include <bcm63xx_cpu.h> #include <bcm63xx_io.h> #include <bcm63xx_timer.h> #include <bcm63xx_regs.h> static DEFINE_RAW_SPINLOCK(timer_reg_lock); static DEFINE_RAW_SPINLOCK(timer_data_lock); static struct clk *periph_clk; static struct timer_data { void (*cb)(void *); void *data; } timer_data[BCM63XX_TIMER_COUNT]; static irqreturn_t timer_interrupt(int irq, void *dev_id) { u32 stat; int i; raw_spin_lock(&timer_reg_lock); stat = bcm_timer_readl(TIMER_IRQSTAT_REG); bcm_timer_writel(stat, TIMER_IRQSTAT_REG); raw_spin_unlock(&timer_reg_lock); for (i = 0; i < BCM63XX_TIMER_COUNT; i++) { if (!(stat & TIMER_IRQSTAT_TIMER_CAUSE(i))) continue; raw_spin_lock(&timer_data_lock); if (!timer_data[i].cb) { raw_spin_unlock(&timer_data_lock); continue; } timer_data[i].cb(timer_data[i].data); raw_spin_unlock(&timer_data_lock); } return IRQ_HANDLED; } int bcm63xx_timer_enable(int id) { u32 reg; unsigned long flags; if (id >= BCM63XX_TIMER_COUNT) return -EINVAL; raw_spin_lock_irqsave(&timer_reg_lock, flags); reg = bcm_timer_readl(TIMER_CTLx_REG(id)); reg |= TIMER_CTL_ENABLE_MASK; bcm_timer_writel(reg, TIMER_CTLx_REG(id)); reg = bcm_timer_readl(TIMER_IRQSTAT_REG); reg |= TIMER_IRQSTAT_TIMER_IR_EN(id); bcm_timer_writel(reg, TIMER_IRQSTAT_REG); raw_spin_unlock_irqrestore(&timer_reg_lock, flags); return 0; } EXPORT_SYMBOL(bcm63xx_timer_enable); int bcm63xx_timer_disable(int id) { u32 reg; unsigned long flags; if (id >= BCM63XX_TIMER_COUNT) return -EINVAL; raw_spin_lock_irqsave(&timer_reg_lock, flags); reg = bcm_timer_readl(TIMER_CTLx_REG(id)); reg &= ~TIMER_CTL_ENABLE_MASK; bcm_timer_writel(reg, TIMER_CTLx_REG(id)); reg = bcm_timer_readl(TIMER_IRQSTAT_REG); reg &= ~TIMER_IRQSTAT_TIMER_IR_EN(id); bcm_timer_writel(reg, TIMER_IRQSTAT_REG); raw_spin_unlock_irqrestore(&timer_reg_lock, flags); return 0; } EXPORT_SYMBOL(bcm63xx_timer_disable); int bcm63xx_timer_register(int id, void (*callback)(void *data), void *data) { unsigned long flags; int ret; if (id >= BCM63XX_TIMER_COUNT || !callback) return -EINVAL; ret = 0; raw_spin_lock_irqsave(&timer_data_lock, flags); if (timer_data[id].cb) { ret = -EBUSY; goto out; } timer_data[id].cb = callback; timer_data[id].data = data; out: raw_spin_unlock_irqrestore(&timer_data_lock, flags); return ret; } EXPORT_SYMBOL(bcm63xx_timer_register); void bcm63xx_timer_unregister(int id) { unsigned long flags; if (id >= BCM63XX_TIMER_COUNT) return; raw_spin_lock_irqsave(&timer_data_lock, flags); timer_data[id].cb = NULL; raw_spin_unlock_irqrestore(&timer_data_lock, flags); } EXPORT_SYMBOL(bcm63xx_timer_unregister); unsigned int bcm63xx_timer_countdown(unsigned int countdown_us) { return (clk_get_rate(periph_clk) / (1000 * 1000)) * countdown_us; } EXPORT_SYMBOL(bcm63xx_timer_countdown); int bcm63xx_timer_set(int id, int monotonic, unsigned int countdown_us) { u32 reg, countdown; unsigned long flags; if (id >= BCM63XX_TIMER_COUNT) return -EINVAL; countdown = bcm63xx_timer_countdown(countdown_us); if (countdown & ~TIMER_CTL_COUNTDOWN_MASK) return -EINVAL; raw_spin_lock_irqsave(&timer_reg_lock, flags); reg = bcm_timer_readl(TIMER_CTLx_REG(id)); if (monotonic) reg &= ~TIMER_CTL_MONOTONIC_MASK; else reg |= TIMER_CTL_MONOTONIC_MASK; reg &= ~TIMER_CTL_COUNTDOWN_MASK; reg |= countdown; bcm_timer_writel(reg, TIMER_CTLx_REG(id)); raw_spin_unlock_irqrestore(&timer_reg_lock, flags); return 0; } EXPORT_SYMBOL(bcm63xx_timer_set); int bcm63xx_timer_init(void) { int ret, irq; u32 reg; reg = bcm_timer_readl(TIMER_IRQSTAT_REG); reg &= ~TIMER_IRQSTAT_TIMER0_IR_EN; reg &= ~TIMER_IRQSTAT_TIMER1_IR_EN; reg &= ~TIMER_IRQSTAT_TIMER2_IR_EN; bcm_timer_writel(reg, TIMER_IRQSTAT_REG); periph_clk = clk_get(NULL, "periph"); if (IS_ERR(periph_clk)) return -ENODEV; irq = bcm63xx_get_irq_number(IRQ_TIMER); ret = request_irq(irq, timer_interrupt, 0, "bcm63xx_timer", NULL); if (ret) { printk(KERN_ERR "bcm63xx_timer: failed to register irq\n"); return ret; } return 0; } arch_initcall(bcm63xx_timer_init);
gpl-2.0
Evisceration/linux-kernel
drivers/gpu/drm/amd/amdgpu/tonga_ih.c
67
13814
/* * Copyright 2014 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * */ #include <drm/drmP.h> #include "amdgpu.h" #include "amdgpu_ih.h" #include "vid.h" #include "oss/oss_3_0_d.h" #include "oss/oss_3_0_sh_mask.h" #include "bif/bif_5_1_d.h" #include "bif/bif_5_1_sh_mask.h" /* * Interrupts * Starting with r6xx, interrupts are handled via a ring buffer. * Ring buffers are areas of GPU accessible memory that the GPU * writes interrupt vectors into and the host reads vectors out of. * There is a rptr (read pointer) that determines where the * host is currently reading, and a wptr (write pointer) * which determines where the GPU has written. When the * pointers are equal, the ring is idle. When the GPU * writes vectors to the ring buffer, it increments the * wptr. When there is an interrupt, the host then starts * fetching commands and processing them until the pointers are * equal again at which point it updates the rptr. */ static void tonga_ih_set_interrupt_funcs(struct amdgpu_device *adev); /** * tonga_ih_enable_interrupts - Enable the interrupt ring buffer * * @adev: amdgpu_device pointer * * Enable the interrupt ring buffer (VI). */ static void tonga_ih_enable_interrupts(struct amdgpu_device *adev) { u32 ih_rb_cntl = RREG32(mmIH_RB_CNTL); ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 1); ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 1); WREG32(mmIH_RB_CNTL, ih_rb_cntl); adev->irq.ih.enabled = true; } /** * tonga_ih_disable_interrupts - Disable the interrupt ring buffer * * @adev: amdgpu_device pointer * * Disable the interrupt ring buffer (VI). */ static void tonga_ih_disable_interrupts(struct amdgpu_device *adev) { u32 ih_rb_cntl = RREG32(mmIH_RB_CNTL); ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 0); ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 0); WREG32(mmIH_RB_CNTL, ih_rb_cntl); /* set rptr, wptr to 0 */ WREG32(mmIH_RB_RPTR, 0); WREG32(mmIH_RB_WPTR, 0); adev->irq.ih.enabled = false; adev->irq.ih.rptr = 0; } /** * tonga_ih_irq_init - init and enable the interrupt ring * * @adev: amdgpu_device pointer * * Allocate a ring buffer for the interrupt controller, * enable the RLC, disable interrupts, enable the IH * ring buffer and enable it (VI). * Called at device load and reume. * Returns 0 for success, errors for failure. */ static int tonga_ih_irq_init(struct amdgpu_device *adev) { int rb_bufsz; u32 interrupt_cntl, ih_rb_cntl, ih_doorbell_rtpr; u64 wptr_off; /* disable irqs */ tonga_ih_disable_interrupts(adev); /* setup interrupt control */ WREG32(mmINTERRUPT_CNTL2, adev->dummy_page.addr >> 8); interrupt_cntl = RREG32(mmINTERRUPT_CNTL); /* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi * INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN */ interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, IH_DUMMY_RD_OVERRIDE, 0); /* INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN_MASK=1 if ring is in non-cacheable memory, e.g., vram */ interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, IH_REQ_NONSNOOP_EN, 0); WREG32(mmINTERRUPT_CNTL, interrupt_cntl); /* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/ if (adev->irq.ih.use_bus_addr) WREG32(mmIH_RB_BASE, adev->irq.ih.rb_dma_addr >> 8); else WREG32(mmIH_RB_BASE, adev->irq.ih.gpu_addr >> 8); rb_bufsz = order_base_2(adev->irq.ih.ring_size / 4); ih_rb_cntl = REG_SET_FIELD(0, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_SIZE, rb_bufsz); /* Ring Buffer write pointer writeback. If enabled, IH_RB_WPTR register value is written to memory */ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, WPTR_WRITEBACK_ENABLE, 1); ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_VMID, 0); if (adev->irq.msi_enabled) ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RPTR_REARM, 1); WREG32(mmIH_RB_CNTL, ih_rb_cntl); /* set the writeback address whether it's enabled or not */ if (adev->irq.ih.use_bus_addr) wptr_off = adev->irq.ih.rb_dma_addr + (adev->irq.ih.wptr_offs * 4); else wptr_off = adev->wb.gpu_addr + (adev->irq.ih.wptr_offs * 4); WREG32(mmIH_RB_WPTR_ADDR_LO, lower_32_bits(wptr_off)); WREG32(mmIH_RB_WPTR_ADDR_HI, upper_32_bits(wptr_off) & 0xFF); /* set rptr, wptr to 0 */ WREG32(mmIH_RB_RPTR, 0); WREG32(mmIH_RB_WPTR, 0); ih_doorbell_rtpr = RREG32(mmIH_DOORBELL_RPTR); if (adev->irq.ih.use_doorbell) { ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr, IH_DOORBELL_RPTR, OFFSET, adev->irq.ih.doorbell_index); ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr, IH_DOORBELL_RPTR, ENABLE, 1); } else { ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr, IH_DOORBELL_RPTR, ENABLE, 0); } WREG32(mmIH_DOORBELL_RPTR, ih_doorbell_rtpr); pci_set_master(adev->pdev); /* enable interrupts */ tonga_ih_enable_interrupts(adev); return 0; } /** * tonga_ih_irq_disable - disable interrupts * * @adev: amdgpu_device pointer * * Disable interrupts on the hw (VI). */ static void tonga_ih_irq_disable(struct amdgpu_device *adev) { tonga_ih_disable_interrupts(adev); /* Wait and acknowledge irq */ mdelay(1); } /** * tonga_ih_get_wptr - get the IH ring buffer wptr * * @adev: amdgpu_device pointer * * Get the IH ring buffer wptr from either the register * or the writeback memory buffer (VI). Also check for * ring buffer overflow and deal with it. * Used by cz_irq_process(VI). * Returns the value of the wptr. */ static u32 tonga_ih_get_wptr(struct amdgpu_device *adev) { u32 wptr, tmp; if (adev->irq.ih.use_bus_addr) wptr = le32_to_cpu(adev->irq.ih.ring[adev->irq.ih.wptr_offs]); else wptr = le32_to_cpu(adev->wb.wb[adev->irq.ih.wptr_offs]); if (REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) { wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0); /* When a ring buffer overflow happen start parsing interrupt * from the last not overwritten vector (wptr + 16). Hopefully * this should allow us to catchup. */ dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n", wptr, adev->irq.ih.rptr, (wptr + 16) & adev->irq.ih.ptr_mask); adev->irq.ih.rptr = (wptr + 16) & adev->irq.ih.ptr_mask; tmp = RREG32(mmIH_RB_CNTL); tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); WREG32(mmIH_RB_CNTL, tmp); } return (wptr & adev->irq.ih.ptr_mask); } /** * tonga_ih_decode_iv - decode an interrupt vector * * @adev: amdgpu_device pointer * * Decodes the interrupt vector at the current rptr * position and also advance the position. */ static void tonga_ih_decode_iv(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry) { /* wptr/rptr are in bytes! */ u32 ring_index = adev->irq.ih.rptr >> 2; uint32_t dw[4]; dw[0] = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]); dw[1] = le32_to_cpu(adev->irq.ih.ring[ring_index + 1]); dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]); dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]); entry->client_id = AMDGPU_IH_CLIENTID_LEGACY; entry->src_id = dw[0] & 0xff; entry->src_data[0] = dw[1] & 0xfffffff; entry->ring_id = dw[2] & 0xff; entry->vm_id = (dw[2] >> 8) & 0xff; entry->pas_id = (dw[2] >> 16) & 0xffff; /* wptr/rptr are in bytes! */ adev->irq.ih.rptr += 16; } /** * tonga_ih_set_rptr - set the IH ring buffer rptr * * @adev: amdgpu_device pointer * * Set the IH ring buffer rptr. */ static void tonga_ih_set_rptr(struct amdgpu_device *adev) { if (adev->irq.ih.use_doorbell) { /* XXX check if swapping is necessary on BE */ if (adev->irq.ih.use_bus_addr) adev->irq.ih.ring[adev->irq.ih.rptr_offs] = adev->irq.ih.rptr; else adev->wb.wb[adev->irq.ih.rptr_offs] = adev->irq.ih.rptr; WDOORBELL32(adev->irq.ih.doorbell_index, adev->irq.ih.rptr); } else { WREG32(mmIH_RB_RPTR, adev->irq.ih.rptr); } } static int tonga_ih_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; int ret; ret = amdgpu_irq_add_domain(adev); if (ret) return ret; tonga_ih_set_interrupt_funcs(adev); return 0; } static int tonga_ih_sw_init(void *handle) { int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; r = amdgpu_ih_ring_init(adev, 64 * 1024, true); if (r) return r; adev->irq.ih.use_doorbell = true; adev->irq.ih.doorbell_index = AMDGPU_DOORBELL_IH; r = amdgpu_irq_init(adev); return r; } static int tonga_ih_sw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; amdgpu_irq_fini(adev); amdgpu_ih_ring_fini(adev); amdgpu_irq_remove_domain(adev); return 0; } static int tonga_ih_hw_init(void *handle) { int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; r = tonga_ih_irq_init(adev); if (r) return r; return 0; } static int tonga_ih_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; tonga_ih_irq_disable(adev); return 0; } static int tonga_ih_suspend(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; return tonga_ih_hw_fini(adev); } static int tonga_ih_resume(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; return tonga_ih_hw_init(adev); } static bool tonga_ih_is_idle(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; u32 tmp = RREG32(mmSRBM_STATUS); if (REG_GET_FIELD(tmp, SRBM_STATUS, IH_BUSY)) return false; return true; } static int tonga_ih_wait_for_idle(void *handle) { unsigned i; u32 tmp; struct amdgpu_device *adev = (struct amdgpu_device *)handle; for (i = 0; i < adev->usec_timeout; i++) { /* read MC_STATUS */ tmp = RREG32(mmSRBM_STATUS); if (!REG_GET_FIELD(tmp, SRBM_STATUS, IH_BUSY)) return 0; udelay(1); } return -ETIMEDOUT; } static bool tonga_ih_check_soft_reset(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; u32 srbm_soft_reset = 0; u32 tmp = RREG32(mmSRBM_STATUS); if (tmp & SRBM_STATUS__IH_BUSY_MASK) srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_IH, 1); if (srbm_soft_reset) { adev->irq.srbm_soft_reset = srbm_soft_reset; return true; } else { adev->irq.srbm_soft_reset = 0; return false; } } static int tonga_ih_pre_soft_reset(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; if (!adev->irq.srbm_soft_reset) return 0; return tonga_ih_hw_fini(adev); } static int tonga_ih_post_soft_reset(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; if (!adev->irq.srbm_soft_reset) return 0; return tonga_ih_hw_init(adev); } static int tonga_ih_soft_reset(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; u32 srbm_soft_reset; if (!adev->irq.srbm_soft_reset) return 0; srbm_soft_reset = adev->irq.srbm_soft_reset; if (srbm_soft_reset) { u32 tmp; tmp = RREG32(mmSRBM_SOFT_RESET); tmp |= srbm_soft_reset; dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); WREG32(mmSRBM_SOFT_RESET, tmp); tmp = RREG32(mmSRBM_SOFT_RESET); udelay(50); tmp &= ~srbm_soft_reset; WREG32(mmSRBM_SOFT_RESET, tmp); tmp = RREG32(mmSRBM_SOFT_RESET); /* Wait a little for things to settle down */ udelay(50); } return 0; } static int tonga_ih_set_clockgating_state(void *handle, enum amd_clockgating_state state) { return 0; } static int tonga_ih_set_powergating_state(void *handle, enum amd_powergating_state state) { return 0; } static const struct amd_ip_funcs tonga_ih_ip_funcs = { .name = "tonga_ih", .early_init = tonga_ih_early_init, .late_init = NULL, .sw_init = tonga_ih_sw_init, .sw_fini = tonga_ih_sw_fini, .hw_init = tonga_ih_hw_init, .hw_fini = tonga_ih_hw_fini, .suspend = tonga_ih_suspend, .resume = tonga_ih_resume, .is_idle = tonga_ih_is_idle, .wait_for_idle = tonga_ih_wait_for_idle, .check_soft_reset = tonga_ih_check_soft_reset, .pre_soft_reset = tonga_ih_pre_soft_reset, .soft_reset = tonga_ih_soft_reset, .post_soft_reset = tonga_ih_post_soft_reset, .set_clockgating_state = tonga_ih_set_clockgating_state, .set_powergating_state = tonga_ih_set_powergating_state, }; static const struct amdgpu_ih_funcs tonga_ih_funcs = { .get_wptr = tonga_ih_get_wptr, .decode_iv = tonga_ih_decode_iv, .set_rptr = tonga_ih_set_rptr }; static void tonga_ih_set_interrupt_funcs(struct amdgpu_device *adev) { if (adev->irq.ih_funcs == NULL) adev->irq.ih_funcs = &tonga_ih_funcs; } const struct amdgpu_ip_block_version tonga_ih_ip_block = { .type = AMD_IP_BLOCK_TYPE_IH, .major = 3, .minor = 0, .rev = 0, .funcs = &tonga_ih_ip_funcs, };
gpl-2.0
pexip/os-iproute
ip/ipntable.c
67
16569
/* * Copyright (C)2006 USAGI/WIDE Project * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* * based on ipneigh.c */ /* * Authors: * Masahide NAKAMURA @USAGI */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> #include <time.h> #include "utils.h" #include "ip_common.h" static struct { int family; int index; #define NONE_DEV (-1) char name[1024]; } filter; static void usage(void) __attribute__((noreturn)); static void usage(void) { fprintf(stderr, "Usage: ip ntable change name NAME [ dev DEV ]\n" " [ thresh1 VAL ] [ thresh2 VAL ] [ thresh3 VAL ] [ gc_int MSEC ]\n" " [ PARMS ]\n" "Usage: ip ntable show [ dev DEV ] [ name NAME ]\n" "PARMS := [ base_reachable MSEC ] [ retrans MSEC ] [ gc_stale MSEC ]\n" " [ delay_probe MSEC ] [ queue LEN ]\n" " [ app_probs VAL ] [ ucast_probes VAL ] [ mcast_probes VAL ]\n" " [ anycast_delay MSEC ] [ proxy_delay MSEC ] [ proxy_queue LEN ]\n" " [ locktime MSEC ]\n" ); exit(-1); } static int ipntable_modify(int cmd, int flags, int argc, char **argv) { struct { struct nlmsghdr n; struct ndtmsg ndtm; char buf[1024]; } req; char *namep = NULL; char *threshsp = NULL; char *gc_intp = NULL; char parms_buf[1024]; struct rtattr *parms_rta = (struct rtattr *)parms_buf; int parms_change = 0; memset(&req, 0, sizeof(req)); req.n.nlmsg_len = NLMSG_LENGTH(sizeof(struct ndtmsg)); req.n.nlmsg_flags = NLM_F_REQUEST|flags; req.n.nlmsg_type = cmd; req.ndtm.ndtm_family = preferred_family; req.ndtm.ndtm_pad1 = 0; req.ndtm.ndtm_pad2 = 0; memset(&parms_buf, 0, sizeof(parms_buf)); parms_rta->rta_type = NDTA_PARMS; parms_rta->rta_len = RTA_LENGTH(0); while (argc > 0) { if (strcmp(*argv, "name") == 0) { int len; NEXT_ARG(); if (namep) duparg("NAME", *argv); namep = *argv; len = strlen(namep) + 1; addattr_l(&req.n, sizeof(req), NDTA_NAME, namep, len); } else if (strcmp(*argv, "thresh1") == 0) { __u32 thresh1; NEXT_ARG(); threshsp = *argv; if (get_u32(&thresh1, *argv, 0)) invarg("\"thresh1\" value is invalid", *argv); addattr32(&req.n, sizeof(req), NDTA_THRESH1, thresh1); } else if (strcmp(*argv, "thresh2") == 0) { __u32 thresh2; NEXT_ARG(); threshsp = *argv; if (get_u32(&thresh2, *argv, 0)) invarg("\"thresh2\" value is invalid", *argv); addattr32(&req.n, sizeof(req), NDTA_THRESH2, thresh2); } else if (strcmp(*argv, "thresh3") == 0) { __u32 thresh3; NEXT_ARG(); threshsp = *argv; if (get_u32(&thresh3, *argv, 0)) invarg("\"thresh3\" value is invalid", *argv); addattr32(&req.n, sizeof(req), NDTA_THRESH3, thresh3); } else if (strcmp(*argv, "gc_int") == 0) { __u64 gc_int; NEXT_ARG(); gc_intp = *argv; if (get_u64(&gc_int, *argv, 0)) invarg("\"gc_int\" value is invalid", *argv); addattr_l(&req.n, sizeof(req), NDTA_GC_INTERVAL, &gc_int, sizeof(gc_int)); } else if (strcmp(*argv, "dev") == 0) { __u32 ifindex; NEXT_ARG(); ifindex = ll_name_to_index(*argv); if (ifindex == 0) { fprintf(stderr, "Cannot find device \"%s\"\n", *argv); return -1; } rta_addattr32(parms_rta, sizeof(parms_buf), NDTPA_IFINDEX, ifindex); } else if (strcmp(*argv, "base_reachable") == 0) { __u64 breachable; NEXT_ARG(); if (get_u64(&breachable, *argv, 0)) invarg("\"base_reachable\" value is invalid", *argv); rta_addattr_l(parms_rta, sizeof(parms_buf), NDTPA_BASE_REACHABLE_TIME, &breachable, sizeof(breachable)); parms_change = 1; } else if (strcmp(*argv, "retrans") == 0) { __u64 retrans; NEXT_ARG(); if (get_u64(&retrans, *argv, 0)) invarg("\"retrans\" value is invalid", *argv); rta_addattr_l(parms_rta, sizeof(parms_buf), NDTPA_RETRANS_TIME, &retrans, sizeof(retrans)); parms_change = 1; } else if (strcmp(*argv, "gc_stale") == 0) { __u64 gc_stale; NEXT_ARG(); if (get_u64(&gc_stale, *argv, 0)) invarg("\"gc_stale\" value is invalid", *argv); rta_addattr_l(parms_rta, sizeof(parms_buf), NDTPA_GC_STALETIME, &gc_stale, sizeof(gc_stale)); parms_change = 1; } else if (strcmp(*argv, "delay_probe") == 0) { __u64 delay_probe; NEXT_ARG(); if (get_u64(&delay_probe, *argv, 0)) invarg("\"delay_probe\" value is invalid", *argv); rta_addattr_l(parms_rta, sizeof(parms_buf), NDTPA_DELAY_PROBE_TIME, &delay_probe, sizeof(delay_probe)); parms_change = 1; } else if (strcmp(*argv, "queue") == 0) { __u32 queue; NEXT_ARG(); if (get_u32(&queue, *argv, 0)) invarg("\"queue\" value is invalid", *argv); if (!parms_rta) parms_rta = (struct rtattr *)&parms_buf; rta_addattr32(parms_rta, sizeof(parms_buf), NDTPA_QUEUE_LEN, queue); parms_change = 1; } else if (strcmp(*argv, "app_probes") == 0) { __u32 aprobe; NEXT_ARG(); if (get_u32(&aprobe, *argv, 0)) invarg("\"app_probes\" value is invalid", *argv); rta_addattr32(parms_rta, sizeof(parms_buf), NDTPA_APP_PROBES, aprobe); parms_change = 1; } else if (strcmp(*argv, "ucast_probes") == 0) { __u32 uprobe; NEXT_ARG(); if (get_u32(&uprobe, *argv, 0)) invarg("\"ucast_probes\" value is invalid", *argv); rta_addattr32(parms_rta, sizeof(parms_buf), NDTPA_UCAST_PROBES, uprobe); parms_change = 1; } else if (strcmp(*argv, "mcast_probes") == 0) { __u32 mprobe; NEXT_ARG(); if (get_u32(&mprobe, *argv, 0)) invarg("\"mcast_probes\" value is invalid", *argv); rta_addattr32(parms_rta, sizeof(parms_buf), NDTPA_MCAST_PROBES, mprobe); parms_change = 1; } else if (strcmp(*argv, "anycast_delay") == 0) { __u64 anycast_delay; NEXT_ARG(); if (get_u64(&anycast_delay, *argv, 0)) invarg("\"anycast_delay\" value is invalid", *argv); rta_addattr_l(parms_rta, sizeof(parms_buf), NDTPA_ANYCAST_DELAY, &anycast_delay, sizeof(anycast_delay)); parms_change = 1; } else if (strcmp(*argv, "proxy_delay") == 0) { __u64 proxy_delay; NEXT_ARG(); if (get_u64(&proxy_delay, *argv, 0)) invarg("\"proxy_delay\" value is invalid", *argv); rta_addattr_l(parms_rta, sizeof(parms_buf), NDTPA_PROXY_DELAY, &proxy_delay, sizeof(proxy_delay)); parms_change = 1; } else if (strcmp(*argv, "proxy_queue") == 0) { __u32 pqueue; NEXT_ARG(); if (get_u32(&pqueue, *argv, 0)) invarg("\"proxy_queue\" value is invalid", *argv); rta_addattr32(parms_rta, sizeof(parms_buf), NDTPA_PROXY_QLEN, pqueue); parms_change = 1; } else if (strcmp(*argv, "locktime") == 0) { __u64 locktime; NEXT_ARG(); if (get_u64(&locktime, *argv, 0)) invarg("\"locktime\" value is invalid", *argv); rta_addattr_l(parms_rta, sizeof(parms_buf), NDTPA_LOCKTIME, &locktime, sizeof(locktime)); parms_change = 1; } else { invarg("unknown", *argv); } argc--; argv++; } if (!namep) missarg("NAME"); if (!threshsp && !gc_intp && !parms_change) { fprintf(stderr, "Not enough information: changable attributes required.\n"); exit(-1); } if (parms_rta->rta_len > RTA_LENGTH(0)) { addattr_l(&req.n, sizeof(req), NDTA_PARMS, RTA_DATA(parms_rta), RTA_PAYLOAD(parms_rta)); } if (rtnl_talk(&rth, &req.n, 0, 0, NULL, NULL, NULL) < 0) exit(2); return 0; } static const char *ntable_strtime_delta(__u32 msec) { static char str[32]; struct timeval now; time_t t; struct tm *tp; if (msec == 0) goto error; memset(&now, 0, sizeof(now)); if (gettimeofday(&now, NULL) < 0) { perror("gettimeofday"); goto error; } t = now.tv_sec - (msec / 1000); tp = localtime(&t); if (!tp) goto error; strftime(str, sizeof(str), "%Y-%m-%d %T", tp); return str; error: strcpy(str, "(error)"); return str; } int print_ntable(const struct sockaddr_nl *who, struct nlmsghdr *n, void *arg) { FILE *fp = (FILE*)arg; struct ndtmsg *ndtm = NLMSG_DATA(n); int len = n->nlmsg_len; struct rtattr *tb[NDTA_MAX+1]; struct rtattr *tpb[NDTPA_MAX+1]; int ret; if (n->nlmsg_type != RTM_NEWNEIGHTBL) { fprintf(stderr, "Not NEIGHTBL: %08x %08x %08x\n", n->nlmsg_len, n->nlmsg_type, n->nlmsg_flags); return 0; } len -= NLMSG_LENGTH(sizeof(*ndtm)); if (len < 0) { fprintf(stderr, "BUG: wrong nlmsg len %d\n", len); return -1; } if (preferred_family && preferred_family != ndtm->ndtm_family) return 0; parse_rtattr(tb, NDTA_MAX, NDTA_RTA(ndtm), n->nlmsg_len - NLMSG_LENGTH(sizeof(*ndtm))); if (tb[NDTA_NAME]) { char *name = RTA_DATA(tb[NDTA_NAME]); if (strlen(filter.name) > 0 && strcmp(filter.name, name)) return 0; } if (tb[NDTA_PARMS]) { parse_rtattr(tpb, NDTPA_MAX, RTA_DATA(tb[NDTA_PARMS]), RTA_PAYLOAD(tb[NDTA_PARMS])); if (tpb[NDTPA_IFINDEX]) { __u32 ifindex = *(__u32 *)RTA_DATA(tpb[NDTPA_IFINDEX]); if (filter.index && filter.index != ifindex) return 0; } else { if (filter.index && filter.index != NONE_DEV) return 0; } } if (ndtm->ndtm_family == AF_INET) fprintf(fp, "inet "); else if (ndtm->ndtm_family == AF_INET6) fprintf(fp, "inet6 "); else if (ndtm->ndtm_family == AF_DECnet) fprintf(fp, "dnet "); else fprintf(fp, "(%d) ", ndtm->ndtm_family); if (tb[NDTA_NAME]) { char *name = RTA_DATA(tb[NDTA_NAME]); fprintf(fp, "%s ", name); } fprintf(fp, "%s", _SL_); ret = (tb[NDTA_THRESH1] || tb[NDTA_THRESH2] || tb[NDTA_THRESH3] || tb[NDTA_GC_INTERVAL]); if (ret) fprintf(fp, " "); if (tb[NDTA_THRESH1]) { __u32 thresh1 = *(__u32 *)RTA_DATA(tb[NDTA_THRESH1]); fprintf(fp, "thresh1 %u ", thresh1); } if (tb[NDTA_THRESH2]) { __u32 thresh2 = *(__u32 *)RTA_DATA(tb[NDTA_THRESH2]); fprintf(fp, "thresh2 %u ", thresh2); } if (tb[NDTA_THRESH3]) { __u32 thresh3 = *(__u32 *)RTA_DATA(tb[NDTA_THRESH3]); fprintf(fp, "thresh3 %u ", thresh3); } if (tb[NDTA_GC_INTERVAL]) { __u64 gc_int = *(__u64 *)RTA_DATA(tb[NDTA_GC_INTERVAL]); fprintf(fp, "gc_int %llu ", gc_int); } if (ret) fprintf(fp, "%s", _SL_); if (tb[NDTA_CONFIG] && show_stats) { struct ndt_config *ndtc = RTA_DATA(tb[NDTA_CONFIG]); fprintf(fp, " "); fprintf(fp, "config "); fprintf(fp, "key_len %u ", ndtc->ndtc_key_len); fprintf(fp, "entry_size %u ", ndtc->ndtc_entry_size); fprintf(fp, "entries %u ", ndtc->ndtc_entries); fprintf(fp, "%s", _SL_); fprintf(fp, " "); fprintf(fp, "last_flush %s ", ntable_strtime_delta(ndtc->ndtc_last_flush)); fprintf(fp, "last_rand %s ", ntable_strtime_delta(ndtc->ndtc_last_rand)); fprintf(fp, "%s", _SL_); fprintf(fp, " "); fprintf(fp, "hash_rnd %u ", ndtc->ndtc_hash_rnd); fprintf(fp, "hash_mask %08x ", ndtc->ndtc_hash_mask); fprintf(fp, "hash_chain_gc %u ", ndtc->ndtc_hash_chain_gc); fprintf(fp, "proxy_qlen %u ", ndtc->ndtc_proxy_qlen); fprintf(fp, "%s", _SL_); } if (tb[NDTA_PARMS]) { if (tpb[NDTPA_IFINDEX]) { __u32 ifindex = *(__u32 *)RTA_DATA(tpb[NDTPA_IFINDEX]); fprintf(fp, " "); fprintf(fp, "dev %s ", ll_index_to_name(ifindex)); fprintf(fp, "%s", _SL_); } fprintf(fp, " "); if (tpb[NDTPA_REFCNT]) { __u32 refcnt = *(__u32 *)RTA_DATA(tpb[NDTPA_REFCNT]); fprintf(fp, "refcnt %u ", refcnt); } if (tpb[NDTPA_REACHABLE_TIME]) { __u64 reachable = *(__u64 *)RTA_DATA(tpb[NDTPA_REACHABLE_TIME]); fprintf(fp, "reachable %llu ", reachable); } if (tpb[NDTPA_BASE_REACHABLE_TIME]) { __u64 breachable = *(__u64 *)RTA_DATA(tpb[NDTPA_BASE_REACHABLE_TIME]); fprintf(fp, "base_reachable %llu ", breachable); } if (tpb[NDTPA_RETRANS_TIME]) { __u64 retrans = *(__u64 *)RTA_DATA(tpb[NDTPA_RETRANS_TIME]); fprintf(fp, "retrans %llu ", retrans); } fprintf(fp, "%s", _SL_); fprintf(fp, " "); if (tpb[NDTPA_GC_STALETIME]) { __u64 gc_stale = *(__u64 *)RTA_DATA(tpb[NDTPA_GC_STALETIME]); fprintf(fp, "gc_stale %llu ", gc_stale); } if (tpb[NDTPA_DELAY_PROBE_TIME]) { __u64 delay_probe = *(__u64 *)RTA_DATA(tpb[NDTPA_DELAY_PROBE_TIME]); fprintf(fp, "delay_probe %llu ", delay_probe); } if (tpb[NDTPA_QUEUE_LEN]) { __u32 queue = *(__u32 *)RTA_DATA(tpb[NDTPA_QUEUE_LEN]); fprintf(fp, "queue %u ", queue); } fprintf(fp, "%s", _SL_); fprintf(fp, " "); if (tpb[NDTPA_APP_PROBES]) { __u32 aprobe = *(__u32 *)RTA_DATA(tpb[NDTPA_APP_PROBES]); fprintf(fp, "app_probes %u ", aprobe); } if (tpb[NDTPA_UCAST_PROBES]) { __u32 uprobe = *(__u32 *)RTA_DATA(tpb[NDTPA_UCAST_PROBES]); fprintf(fp, "ucast_probes %u ", uprobe); } if (tpb[NDTPA_MCAST_PROBES]) { __u32 mprobe = *(__u32 *)RTA_DATA(tpb[NDTPA_MCAST_PROBES]); fprintf(fp, "mcast_probes %u ", mprobe); } fprintf(fp, "%s", _SL_); fprintf(fp, " "); if (tpb[NDTPA_ANYCAST_DELAY]) { __u64 anycast_delay = *(__u64 *)RTA_DATA(tpb[NDTPA_ANYCAST_DELAY]); fprintf(fp, "anycast_delay %llu ", anycast_delay); } if (tpb[NDTPA_PROXY_DELAY]) { __u64 proxy_delay = *(__u64 *)RTA_DATA(tpb[NDTPA_PROXY_DELAY]); fprintf(fp, "proxy_delay %llu ", proxy_delay); } if (tpb[NDTPA_PROXY_QLEN]) { __u32 pqueue = *(__u32 *)RTA_DATA(tpb[NDTPA_PROXY_QLEN]); fprintf(fp, "proxy_queue %u ", pqueue); } if (tpb[NDTPA_LOCKTIME]) { __u64 locktime = *(__u64 *)RTA_DATA(tpb[NDTPA_LOCKTIME]); fprintf(fp, "locktime %llu ", locktime); } fprintf(fp, "%s", _SL_); } if (tb[NDTA_STATS] && show_stats) { struct ndt_stats *ndts = RTA_DATA(tb[NDTA_STATS]); fprintf(fp, " "); fprintf(fp, "stats "); fprintf(fp, "allocs %llu ", ndts->ndts_allocs); fprintf(fp, "destroys %llu ", ndts->ndts_destroys); fprintf(fp, "hash_grows %llu ", ndts->ndts_hash_grows); fprintf(fp, "%s", _SL_); fprintf(fp, " "); fprintf(fp, "res_failed %llu ", ndts->ndts_res_failed); fprintf(fp, "lookups %llu ", ndts->ndts_lookups); fprintf(fp, "hits %llu ", ndts->ndts_hits); fprintf(fp, "%s", _SL_); fprintf(fp, " "); fprintf(fp, "rcv_probes_mcast %llu ", ndts->ndts_rcv_probes_mcast); fprintf(fp, "rcv_probes_ucast %llu ", ndts->ndts_rcv_probes_ucast); fprintf(fp, "%s", _SL_); fprintf(fp, " "); fprintf(fp, "periodic_gc_runs %llu ", ndts->ndts_periodic_gc_runs); fprintf(fp, "forced_gc_runs %llu ", ndts->ndts_forced_gc_runs); fprintf(fp, "%s", _SL_); } fprintf(fp, "\n"); fflush(fp); return 0; } void ipntable_reset_filter(void) { memset(&filter, 0, sizeof(filter)); } static int ipntable_show(int argc, char **argv) { ipntable_reset_filter(); filter.family = preferred_family; while (argc > 0) { if (strcmp(*argv, "dev") == 0) { NEXT_ARG(); if (strcmp("none", *argv) == 0) filter.index = NONE_DEV; else if ((filter.index = ll_name_to_index(*argv)) == 0) invarg("\"DEV\" is invalid", *argv); } else if (strcmp(*argv, "name") == 0) { NEXT_ARG(); strncpy(filter.name, *argv, sizeof(filter.name)); } else invarg("unknown", *argv); argc--; argv++; } if (rtnl_wilddump_request(&rth, preferred_family, RTM_GETNEIGHTBL) < 0) { perror("Cannot send dump request"); exit(1); } if (rtnl_dump_filter(&rth, print_ntable, stdout, NULL, NULL) < 0) { fprintf(stderr, "Dump terminated\n"); exit(1); } return 0; } int do_ipntable(int argc, char **argv) { ll_init_map(&rth); if (argc > 0) { if (matches(*argv, "change") == 0 || matches(*argv, "chg") == 0) return ipntable_modify(RTM_SETNEIGHTBL, NLM_F_REPLACE, argc-1, argv+1); if (matches(*argv, "show") == 0 || matches(*argv, "lst") == 0 || matches(*argv, "list") == 0) return ipntable_show(argc-1, argv+1); if (matches(*argv, "help") == 0) usage(); } else return ipntable_show(0, NULL); fprintf(stderr, "Command \"%s\" is unknown, try \"ip ntable help\".\n", *argv); exit(-1); }
gpl-2.0
irmus/HHC_Engraver
Libraries/CMSIS/DSP_Lib/Source/FilteringFunctions/arm_fir_decimate_init_f32.c
67
4241
/*----------------------------------------------------------------------------- * Copyright (C) 2010-2013 ARM Limited. All rights reserved. * * $Date: 17. January 2013 * $Revision: V1.4.1 * * Project: CMSIS DSP Library * Title: arm_fir_decimate_init_f32.c * * Description: Floating-point FIR Decimator initialization function. * * Target Processor: Cortex-M4/Cortex-M3/Cortex-M0 * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * - Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * - Neither the name of ARM LIMITED nor the names of its contributors * may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * ---------------------------------------------------------------------------*/ #include "arm_math.h" /** * @ingroup groupFilters */ /** * @addtogroup FIR_decimate * @{ */ /** * @brief Initialization function for the floating-point FIR decimator. * @param[in,out] *S points to an instance of the floating-point FIR decimator structure. * @param[in] numTaps number of coefficients in the filter. * @param[in] M decimation factor. * @param[in] *pCoeffs points to the filter coefficients. * @param[in] *pState points to the state buffer. * @param[in] blockSize number of input samples to process per call. * @return The function returns ARM_MATH_SUCCESS if initialization was successful or ARM_MATH_LENGTH_ERROR if * <code>blockSize</code> is not a multiple of <code>M</code>. * * <b>Description:</b> * \par * <code>pCoeffs</code> points to the array of filter coefficients stored in time reversed order: * <pre> * {b[numTaps-1], b[numTaps-2], b[N-2], ..., b[1], b[0]} * </pre> * \par * <code>pState</code> points to the array of state variables. * <code>pState</code> is of length <code>numTaps+blockSize-1</code> words where <code>blockSize</code> is the number of input samples passed to <code>arm_fir_decimate_f32()</code>. * <code>M</code> is the decimation factor. */ arm_status arm_fir_decimate_init_f32( arm_fir_decimate_instance_f32 * S, uint16_t numTaps, uint8_t M, float32_t * pCoeffs, float32_t * pState, uint32_t blockSize) { arm_status status; /* The size of the input block must be a multiple of the decimation factor */ if((blockSize % M) != 0u) { /* Set status as ARM_MATH_LENGTH_ERROR */ status = ARM_MATH_LENGTH_ERROR; } else { /* Assign filter taps */ S->numTaps = numTaps; /* Assign coefficient pointer */ S->pCoeffs = pCoeffs; /* Clear state buffer and size is always (blockSize + numTaps - 1) */ memset(pState, 0, (numTaps + (blockSize - 1u)) * sizeof(float32_t)); /* Assign state pointer */ S->pState = pState; /* Assign Decimation Factor */ S->M = M; status = ARM_MATH_SUCCESS; } return (status); } /** * @} end of FIR_decimate group */
gpl-2.0
HongjianWang/rau-platform
components/external/freetype/src/tools/test_afm.c
323
3970
/* * gcc -DFT2_BUILD_LIBRARY -I../../include -o test_afm test_afm.c \ * -L../../objs/.libs -lfreetype -lz -static */ #include <ft2build.h> #include FT_FREETYPE_H #include FT_INTERNAL_STREAM_H #include FT_INTERNAL_POSTSCRIPT_AUX_H void dump_fontinfo( AFM_FontInfo fi ) { FT_Int i; printf( "This AFM is for %sCID font.\n\n", ( fi->IsCIDFont ) ? "" : "non-" ); printf( "FontBBox: %.2f %.2f %.2f %.2f\n", fi->FontBBox.xMin / 65536., fi->FontBBox.yMin / 65536., fi->FontBBox.xMax / 65536., fi->FontBBox.yMax / 65536. ); printf( "Ascender: %.2f\n", fi->Ascender / 65536. ); printf( "Descender: %.2f\n\n", fi->Descender / 65536. ); if ( fi->NumTrackKern ) printf( "There are %d sets of track kernings:\n", fi->NumTrackKern ); else printf( "There is no track kerning.\n" ); for ( i = 0; i < fi->NumTrackKern; i++ ) { AFM_TrackKern tk = fi->TrackKerns + i; printf( "\t%2d: %5.2f %5.2f %5.2f %5.2f\n", tk->degree, tk->min_ptsize / 65536., tk->min_kern / 65536., tk->max_ptsize / 65536., tk->max_kern / 65536. ); } printf( "\n" ); if ( fi->NumKernPair ) printf( "There are %d kerning pairs:\n", fi->NumKernPair ); else printf( "There is no kerning pair.\n" ); for ( i = 0; i < fi->NumKernPair; i++ ) { AFM_KernPair kp = fi->KernPairs + i; printf( "\t%3d + %3d => (%4d, %4d)\n", kp->index1, kp->index2, kp->x, kp->y ); } } int dummy_get_index( const char* name, FT_Offset len, void* user_data ) { if ( len ) return name[0]; else return 0; } FT_Error parse_afm( FT_Library library, FT_Stream stream, AFM_FontInfo fi ) { PSAux_Service psaux; AFM_ParserRec parser; FT_Error error = FT_Err_Ok; psaux = (PSAux_Service)FT_Get_Module_Interface( library, "psaux" ); if ( !psaux || !psaux->afm_parser_funcs ) return -1; error = FT_Stream_EnterFrame( stream, stream->size ); if ( error ) return error; error = psaux->afm_parser_funcs->init( &parser, library->memory, stream->cursor, stream->limit ); if ( error ) return error; parser.FontInfo = fi; parser.get_index = dummy_get_index; error = psaux->afm_parser_funcs->parse( &parser ); psaux->afm_parser_funcs->done( &parser ); return error; } int main( int argc, char** argv ) { FT_Library library; FT_StreamRec stream; FT_Error error = FT_Err_Ok; AFM_FontInfoRec fi; if ( argc < 2 ) return FT_Err_Invalid_Argument; error = FT_Init_FreeType( &library ); if ( error ) return error; FT_ZERO( &stream ); error = FT_Stream_Open( &stream, argv[1] ); if ( error ) goto Exit; stream.memory = library->memory; FT_ZERO( &fi ); error = parse_afm( library, &stream, &fi ); if ( !error ) { FT_Memory memory = library->memory; dump_fontinfo( &fi ); if ( fi.KernPairs ) FT_FREE( fi.KernPairs ); if ( fi.TrackKerns ) FT_FREE( fi.TrackKerns ); } else printf( "parse error\n" ); FT_Stream_Close( &stream ); Exit: FT_Done_FreeType( library ); return error; }
gpl-2.0
oe5hpm/linux
drivers/leds/led-class-flash.c
579
9875
/* * LED Flash class interface * * Copyright (C) 2015 Samsung Electronics Co., Ltd. * Author: Jacek Anaszewski <j.anaszewski@samsung.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/device.h> #include <linux/init.h> #include <linux/led-class-flash.h> #include <linux/leds.h> #include <linux/module.h> #include <linux/slab.h> #include "leds.h" #define has_flash_op(fled_cdev, op) \ (fled_cdev && fled_cdev->ops->op) #define call_flash_op(fled_cdev, op, args...) \ ((has_flash_op(fled_cdev, op)) ? \ (fled_cdev->ops->op(fled_cdev, args)) : \ -EINVAL) static const char * const led_flash_fault_names[] = { "led-over-voltage", "flash-timeout-exceeded", "controller-over-temperature", "controller-short-circuit", "led-power-supply-over-current", "indicator-led-fault", "led-under-voltage", "controller-under-voltage", "led-over-temperature", }; static ssize_t flash_brightness_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct led_classdev *led_cdev = dev_get_drvdata(dev); struct led_classdev_flash *fled_cdev = lcdev_to_flcdev(led_cdev); unsigned long state; ssize_t ret; mutex_lock(&led_cdev->led_access); if (led_sysfs_is_disabled(led_cdev)) { ret = -EBUSY; goto unlock; } ret = kstrtoul(buf, 10, &state); if (ret) goto unlock; ret = led_set_flash_brightness(fled_cdev, state); if (ret < 0) goto unlock; ret = size; unlock: mutex_unlock(&led_cdev->led_access); return ret; } static ssize_t flash_brightness_show(struct device *dev, struct device_attribute *attr, char *buf) { struct led_classdev *led_cdev = dev_get_drvdata(dev); struct led_classdev_flash *fled_cdev = lcdev_to_flcdev(led_cdev); /* no lock needed for this */ led_update_flash_brightness(fled_cdev); return sprintf(buf, "%u\n", fled_cdev->brightness.val); } static DEVICE_ATTR_RW(flash_brightness); static ssize_t max_flash_brightness_show(struct device *dev, struct device_attribute *attr, char *buf) { struct led_classdev *led_cdev = dev_get_drvdata(dev); struct led_classdev_flash *fled_cdev = lcdev_to_flcdev(led_cdev); return sprintf(buf, "%u\n", fled_cdev->brightness.max); } static DEVICE_ATTR_RO(max_flash_brightness); static ssize_t flash_strobe_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct led_classdev *led_cdev = dev_get_drvdata(dev); struct led_classdev_flash *fled_cdev = lcdev_to_flcdev(led_cdev); unsigned long state; ssize_t ret = -EINVAL; mutex_lock(&led_cdev->led_access); if (led_sysfs_is_disabled(led_cdev)) { ret = -EBUSY; goto unlock; } ret = kstrtoul(buf, 10, &state); if (ret) goto unlock; if (state > 1) { ret = -EINVAL; goto unlock; } ret = led_set_flash_strobe(fled_cdev, state); if (ret < 0) goto unlock; ret = size; unlock: mutex_unlock(&led_cdev->led_access); return ret; } static ssize_t flash_strobe_show(struct device *dev, struct device_attribute *attr, char *buf) { struct led_classdev *led_cdev = dev_get_drvdata(dev); struct led_classdev_flash *fled_cdev = lcdev_to_flcdev(led_cdev); bool state; int ret; /* no lock needed for this */ ret = led_get_flash_strobe(fled_cdev, &state); if (ret < 0) return ret; return sprintf(buf, "%u\n", state); } static DEVICE_ATTR_RW(flash_strobe); static ssize_t flash_timeout_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct led_classdev *led_cdev = dev_get_drvdata(dev); struct led_classdev_flash *fled_cdev = lcdev_to_flcdev(led_cdev); unsigned long flash_timeout; ssize_t ret; mutex_lock(&led_cdev->led_access); if (led_sysfs_is_disabled(led_cdev)) { ret = -EBUSY; goto unlock; } ret = kstrtoul(buf, 10, &flash_timeout); if (ret) goto unlock; ret = led_set_flash_timeout(fled_cdev, flash_timeout); if (ret < 0) goto unlock; ret = size; unlock: mutex_unlock(&led_cdev->led_access); return ret; } static ssize_t flash_timeout_show(struct device *dev, struct device_attribute *attr, char *buf) { struct led_classdev *led_cdev = dev_get_drvdata(dev); struct led_classdev_flash *fled_cdev = lcdev_to_flcdev(led_cdev); return sprintf(buf, "%u\n", fled_cdev->timeout.val); } static DEVICE_ATTR_RW(flash_timeout); static ssize_t max_flash_timeout_show(struct device *dev, struct device_attribute *attr, char *buf) { struct led_classdev *led_cdev = dev_get_drvdata(dev); struct led_classdev_flash *fled_cdev = lcdev_to_flcdev(led_cdev); return sprintf(buf, "%u\n", fled_cdev->timeout.max); } static DEVICE_ATTR_RO(max_flash_timeout); static ssize_t flash_fault_show(struct device *dev, struct device_attribute *attr, char *buf) { struct led_classdev *led_cdev = dev_get_drvdata(dev); struct led_classdev_flash *fled_cdev = lcdev_to_flcdev(led_cdev); u32 fault, mask = 0x1; char *pbuf = buf; int i, ret, buf_len; ret = led_get_flash_fault(fled_cdev, &fault); if (ret < 0) return -EINVAL; *buf = '\0'; for (i = 0; i < LED_NUM_FLASH_FAULTS; ++i) { if (fault & mask) { buf_len = sprintf(pbuf, "%s ", led_flash_fault_names[i]); pbuf += buf_len; } mask <<= 1; } return sprintf(buf, "%s\n", buf); } static DEVICE_ATTR_RO(flash_fault); static struct attribute *led_flash_strobe_attrs[] = { &dev_attr_flash_strobe.attr, NULL, }; static struct attribute *led_flash_timeout_attrs[] = { &dev_attr_flash_timeout.attr, &dev_attr_max_flash_timeout.attr, NULL, }; static struct attribute *led_flash_brightness_attrs[] = { &dev_attr_flash_brightness.attr, &dev_attr_max_flash_brightness.attr, NULL, }; static struct attribute *led_flash_fault_attrs[] = { &dev_attr_flash_fault.attr, NULL, }; static const struct attribute_group led_flash_strobe_group = { .attrs = led_flash_strobe_attrs, }; static const struct attribute_group led_flash_timeout_group = { .attrs = led_flash_timeout_attrs, }; static const struct attribute_group led_flash_brightness_group = { .attrs = led_flash_brightness_attrs, }; static const struct attribute_group led_flash_fault_group = { .attrs = led_flash_fault_attrs, }; static void led_flash_resume(struct led_classdev *led_cdev) { struct led_classdev_flash *fled_cdev = lcdev_to_flcdev(led_cdev); call_flash_op(fled_cdev, flash_brightness_set, fled_cdev->brightness.val); call_flash_op(fled_cdev, timeout_set, fled_cdev->timeout.val); } static void led_flash_init_sysfs_groups(struct led_classdev_flash *fled_cdev) { struct led_classdev *led_cdev = &fled_cdev->led_cdev; const struct led_flash_ops *ops = fled_cdev->ops; const struct attribute_group **flash_groups = fled_cdev->sysfs_groups; int num_sysfs_groups = 0; flash_groups[num_sysfs_groups++] = &led_flash_strobe_group; if (ops->flash_brightness_set) flash_groups[num_sysfs_groups++] = &led_flash_brightness_group; if (ops->timeout_set) flash_groups[num_sysfs_groups++] = &led_flash_timeout_group; if (ops->fault_get) flash_groups[num_sysfs_groups++] = &led_flash_fault_group; led_cdev->groups = flash_groups; } int led_classdev_flash_register(struct device *parent, struct led_classdev_flash *fled_cdev) { struct led_classdev *led_cdev; const struct led_flash_ops *ops; int ret; if (!fled_cdev) return -EINVAL; led_cdev = &fled_cdev->led_cdev; if (led_cdev->flags & LED_DEV_CAP_FLASH) { if (!led_cdev->brightness_set_blocking) return -EINVAL; ops = fled_cdev->ops; if (!ops || !ops->strobe_set) return -EINVAL; led_cdev->flash_resume = led_flash_resume; /* Select the sysfs attributes to be created for the device */ led_flash_init_sysfs_groups(fled_cdev); } /* Register led class device */ ret = led_classdev_register(parent, led_cdev); if (ret < 0) return ret; return 0; } EXPORT_SYMBOL_GPL(led_classdev_flash_register); void led_classdev_flash_unregister(struct led_classdev_flash *fled_cdev) { if (!fled_cdev) return; led_classdev_unregister(&fled_cdev->led_cdev); } EXPORT_SYMBOL_GPL(led_classdev_flash_unregister); static void led_clamp_align(struct led_flash_setting *s) { u32 v, offset; v = s->val + s->step / 2; v = clamp(v, s->min, s->max); offset = v - s->min; offset = s->step * (offset / s->step); s->val = s->min + offset; } int led_set_flash_timeout(struct led_classdev_flash *fled_cdev, u32 timeout) { struct led_classdev *led_cdev = &fled_cdev->led_cdev; struct led_flash_setting *s = &fled_cdev->timeout; s->val = timeout; led_clamp_align(s); if (!(led_cdev->flags & LED_SUSPENDED)) return call_flash_op(fled_cdev, timeout_set, s->val); return 0; } EXPORT_SYMBOL_GPL(led_set_flash_timeout); int led_get_flash_fault(struct led_classdev_flash *fled_cdev, u32 *fault) { return call_flash_op(fled_cdev, fault_get, fault); } EXPORT_SYMBOL_GPL(led_get_flash_fault); int led_set_flash_brightness(struct led_classdev_flash *fled_cdev, u32 brightness) { struct led_classdev *led_cdev = &fled_cdev->led_cdev; struct led_flash_setting *s = &fled_cdev->brightness; s->val = brightness; led_clamp_align(s); if (!(led_cdev->flags & LED_SUSPENDED)) return call_flash_op(fled_cdev, flash_brightness_set, s->val); return 0; } EXPORT_SYMBOL_GPL(led_set_flash_brightness); int led_update_flash_brightness(struct led_classdev_flash *fled_cdev) { struct led_flash_setting *s = &fled_cdev->brightness; u32 brightness; if (has_flash_op(fled_cdev, flash_brightness_get)) { int ret = call_flash_op(fled_cdev, flash_brightness_get, &brightness); if (ret < 0) return ret; s->val = brightness; } return 0; } EXPORT_SYMBOL_GPL(led_update_flash_brightness); MODULE_AUTHOR("Jacek Anaszewski <j.anaszewski@samsung.com>"); MODULE_DESCRIPTION("LED Flash class interface"); MODULE_LICENSE("GPL v2");
gpl-2.0
mantera/WX_435_Kernel-CM7
drivers/pci/hotplug/cpqphp_ctrl.c
579
77832
/* * Compaq Hot Plug Controller Driver * * Copyright (C) 1995,2001 Compaq Computer Corporation * Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com) * Copyright (C) 2001 IBM Corp. * * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Send feedback to <greg@kroah.com> * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/workqueue.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/wait.h> #include <linux/pci.h> #include <linux/pci_hotplug.h> #include <linux/kthread.h> #include "cpqphp.h" static u32 configure_new_device(struct controller* ctrl, struct pci_func *func, u8 behind_bridge, struct resource_lists *resources); static int configure_new_function(struct controller* ctrl, struct pci_func *func, u8 behind_bridge, struct resource_lists *resources); static void interrupt_event_handler(struct controller *ctrl); static struct task_struct *cpqhp_event_thread; static unsigned long pushbutton_pending; /* = 0 */ /* delay is in jiffies to wait for */ static void long_delay(int delay) { /* * XXX(hch): if someone is bored please convert all callers * to call msleep_interruptible directly. They really want * to specify timeouts in natural units and spend a lot of * effort converting them to jiffies.. */ msleep_interruptible(jiffies_to_msecs(delay)); } /* FIXME: The following line needs to be somewhere else... */ #define WRONG_BUS_FREQUENCY 0x07 static u8 handle_switch_change(u8 change, struct controller * ctrl) { int hp_slot; u8 rc = 0; u16 temp_word; struct pci_func *func; struct event_info *taskInfo; if (!change) return 0; /* Switch Change */ dbg("cpqsbd: Switch interrupt received.\n"); for (hp_slot = 0; hp_slot < 6; hp_slot++) { if (change & (0x1L << hp_slot)) { /* * this one changed. */ func = cpqhp_slot_find(ctrl->bus, (hp_slot + ctrl->slot_device_offset), 0); /* this is the structure that tells the worker thread * what to do */ taskInfo = &(ctrl->event_queue[ctrl->next_event]); ctrl->next_event = (ctrl->next_event + 1) % 10; taskInfo->hp_slot = hp_slot; rc++; temp_word = ctrl->ctrl_int_comp >> 16; func->presence_save = (temp_word >> hp_slot) & 0x01; func->presence_save |= (temp_word >> (hp_slot + 7)) & 0x02; if (ctrl->ctrl_int_comp & (0x1L << hp_slot)) { /* * Switch opened */ func->switch_save = 0; taskInfo->event_type = INT_SWITCH_OPEN; } else { /* * Switch closed */ func->switch_save = 0x10; taskInfo->event_type = INT_SWITCH_CLOSE; } } } return rc; } /** * cpqhp_find_slot - find the struct slot of given device * @ctrl: scan lots of this controller * @device: the device id to find */ static struct slot *cpqhp_find_slot(struct controller *ctrl, u8 device) { struct slot *slot = ctrl->slot; while (slot && (slot->device != device)) slot = slot->next; return slot; } static u8 handle_presence_change(u16 change, struct controller * ctrl) { int hp_slot; u8 rc = 0; u8 temp_byte; u16 temp_word; struct pci_func *func; struct event_info *taskInfo; struct slot *p_slot; if (!change) return 0; /* * Presence Change */ dbg("cpqsbd: Presence/Notify input change.\n"); dbg(" Changed bits are 0x%4.4x\n", change ); for (hp_slot = 0; hp_slot < 6; hp_slot++) { if (change & (0x0101 << hp_slot)) { /* * this one changed. */ func = cpqhp_slot_find(ctrl->bus, (hp_slot + ctrl->slot_device_offset), 0); taskInfo = &(ctrl->event_queue[ctrl->next_event]); ctrl->next_event = (ctrl->next_event + 1) % 10; taskInfo->hp_slot = hp_slot; rc++; p_slot = cpqhp_find_slot(ctrl, hp_slot + (readb(ctrl->hpc_reg + SLOT_MASK) >> 4)); if (!p_slot) return 0; /* If the switch closed, must be a button * If not in button mode, nevermind */ if (func->switch_save && (ctrl->push_button == 1)) { temp_word = ctrl->ctrl_int_comp >> 16; temp_byte = (temp_word >> hp_slot) & 0x01; temp_byte |= (temp_word >> (hp_slot + 7)) & 0x02; if (temp_byte != func->presence_save) { /* * button Pressed (doesn't do anything) */ dbg("hp_slot %d button pressed\n", hp_slot); taskInfo->event_type = INT_BUTTON_PRESS; } else { /* * button Released - TAKE ACTION!!!! */ dbg("hp_slot %d button released\n", hp_slot); taskInfo->event_type = INT_BUTTON_RELEASE; /* Cancel if we are still blinking */ if ((p_slot->state == BLINKINGON_STATE) || (p_slot->state == BLINKINGOFF_STATE)) { taskInfo->event_type = INT_BUTTON_CANCEL; dbg("hp_slot %d button cancel\n", hp_slot); } else if ((p_slot->state == POWERON_STATE) || (p_slot->state == POWEROFF_STATE)) { /* info(msg_button_ignore, p_slot->number); */ taskInfo->event_type = INT_BUTTON_IGNORE; dbg("hp_slot %d button ignore\n", hp_slot); } } } else { /* Switch is open, assume a presence change * Save the presence state */ temp_word = ctrl->ctrl_int_comp >> 16; func->presence_save = (temp_word >> hp_slot) & 0x01; func->presence_save |= (temp_word >> (hp_slot + 7)) & 0x02; if ((!(ctrl->ctrl_int_comp & (0x010000 << hp_slot))) || (!(ctrl->ctrl_int_comp & (0x01000000 << hp_slot)))) { /* Present */ taskInfo->event_type = INT_PRESENCE_ON; } else { /* Not Present */ taskInfo->event_type = INT_PRESENCE_OFF; } } } } return rc; } static u8 handle_power_fault(u8 change, struct controller * ctrl) { int hp_slot; u8 rc = 0; struct pci_func *func; struct event_info *taskInfo; if (!change) return 0; /* * power fault */ info("power fault interrupt\n"); for (hp_slot = 0; hp_slot < 6; hp_slot++) { if (change & (0x01 << hp_slot)) { /* * this one changed. */ func = cpqhp_slot_find(ctrl->bus, (hp_slot + ctrl->slot_device_offset), 0); taskInfo = &(ctrl->event_queue[ctrl->next_event]); ctrl->next_event = (ctrl->next_event + 1) % 10; taskInfo->hp_slot = hp_slot; rc++; if (ctrl->ctrl_int_comp & (0x00000100 << hp_slot)) { /* * power fault Cleared */ func->status = 0x00; taskInfo->event_type = INT_POWER_FAULT_CLEAR; } else { /* * power fault */ taskInfo->event_type = INT_POWER_FAULT; if (ctrl->rev < 4) { amber_LED_on (ctrl, hp_slot); green_LED_off (ctrl, hp_slot); set_SOGO (ctrl); /* this is a fatal condition, we want * to crash the machine to protect from * data corruption. simulated_NMI * shouldn't ever return */ /* FIXME simulated_NMI(hp_slot, ctrl); */ /* The following code causes a software * crash just in case simulated_NMI did * return */ /*FIXME panic(msg_power_fault); */ } else { /* set power fault status for this board */ func->status = 0xFF; info("power fault bit %x set\n", hp_slot); } } } } return rc; } /** * sort_by_size - sort nodes on the list by their length, smallest first. * @head: list to sort */ static int sort_by_size(struct pci_resource **head) { struct pci_resource *current_res; struct pci_resource *next_res; int out_of_order = 1; if (!(*head)) return 1; if (!((*head)->next)) return 0; while (out_of_order) { out_of_order = 0; /* Special case for swapping list head */ if (((*head)->next) && ((*head)->length > (*head)->next->length)) { out_of_order++; current_res = *head; *head = (*head)->next; current_res->next = (*head)->next; (*head)->next = current_res; } current_res = *head; while (current_res->next && current_res->next->next) { if (current_res->next->length > current_res->next->next->length) { out_of_order++; next_res = current_res->next; current_res->next = current_res->next->next; current_res = current_res->next; next_res->next = current_res->next; current_res->next = next_res; } else current_res = current_res->next; } } /* End of out_of_order loop */ return 0; } /** * sort_by_max_size - sort nodes on the list by their length, largest first. * @head: list to sort */ static int sort_by_max_size(struct pci_resource **head) { struct pci_resource *current_res; struct pci_resource *next_res; int out_of_order = 1; if (!(*head)) return 1; if (!((*head)->next)) return 0; while (out_of_order) { out_of_order = 0; /* Special case for swapping list head */ if (((*head)->next) && ((*head)->length < (*head)->next->length)) { out_of_order++; current_res = *head; *head = (*head)->next; current_res->next = (*head)->next; (*head)->next = current_res; } current_res = *head; while (current_res->next && current_res->next->next) { if (current_res->next->length < current_res->next->next->length) { out_of_order++; next_res = current_res->next; current_res->next = current_res->next->next; current_res = current_res->next; next_res->next = current_res->next; current_res->next = next_res; } else current_res = current_res->next; } } /* End of out_of_order loop */ return 0; } /** * do_pre_bridge_resource_split - find node of resources that are unused * @head: new list head * @orig_head: original list head * @alignment: max node size (?) */ static struct pci_resource *do_pre_bridge_resource_split(struct pci_resource **head, struct pci_resource **orig_head, u32 alignment) { struct pci_resource *prevnode = NULL; struct pci_resource *node; struct pci_resource *split_node; u32 rc; u32 temp_dword; dbg("do_pre_bridge_resource_split\n"); if (!(*head) || !(*orig_head)) return NULL; rc = cpqhp_resource_sort_and_combine(head); if (rc) return NULL; if ((*head)->base != (*orig_head)->base) return NULL; if ((*head)->length == (*orig_head)->length) return NULL; /* If we got here, there the bridge requires some of the resource, but * we may be able to split some off of the front */ node = *head; if (node->length & (alignment -1)) { /* this one isn't an aligned length, so we'll make a new entry * and split it up. */ split_node = kmalloc(sizeof(*split_node), GFP_KERNEL); if (!split_node) return NULL; temp_dword = (node->length | (alignment-1)) + 1 - alignment; split_node->base = node->base; split_node->length = temp_dword; node->length -= temp_dword; node->base += split_node->length; /* Put it in the list */ *head = split_node; split_node->next = node; } if (node->length < alignment) return NULL; /* Now unlink it */ if (*head == node) { *head = node->next; } else { prevnode = *head; while (prevnode->next != node) prevnode = prevnode->next; prevnode->next = node->next; } node->next = NULL; return node; } /** * do_bridge_resource_split - find one node of resources that aren't in use * @head: list head * @alignment: max node size (?) */ static struct pci_resource *do_bridge_resource_split(struct pci_resource **head, u32 alignment) { struct pci_resource *prevnode = NULL; struct pci_resource *node; u32 rc; u32 temp_dword; rc = cpqhp_resource_sort_and_combine(head); if (rc) return NULL; node = *head; while (node->next) { prevnode = node; node = node->next; kfree(prevnode); } if (node->length < alignment) goto error; if (node->base & (alignment - 1)) { /* Short circuit if adjusted size is too small */ temp_dword = (node->base | (alignment-1)) + 1; if ((node->length - (temp_dword - node->base)) < alignment) goto error; node->length -= (temp_dword - node->base); node->base = temp_dword; } if (node->length & (alignment - 1)) /* There's stuff in use after this node */ goto error; return node; error: kfree(node); return NULL; } /** * get_io_resource - find first node of given size not in ISA aliasing window. * @head: list to search * @size: size of node to find, must be a power of two. * * Description: This function sorts the resource list by size and then returns * returns the first node of "size" length that is not in the ISA aliasing * window. If it finds a node larger than "size" it will split it up. */ static struct pci_resource *get_io_resource(struct pci_resource **head, u32 size) { struct pci_resource *prevnode; struct pci_resource *node; struct pci_resource *split_node; u32 temp_dword; if (!(*head)) return NULL; if (cpqhp_resource_sort_and_combine(head)) return NULL; if (sort_by_size(head)) return NULL; for (node = *head; node; node = node->next) { if (node->length < size) continue; if (node->base & (size - 1)) { /* this one isn't base aligned properly * so we'll make a new entry and split it up */ temp_dword = (node->base | (size-1)) + 1; /* Short circuit if adjusted size is too small */ if ((node->length - (temp_dword - node->base)) < size) continue; split_node = kmalloc(sizeof(*split_node), GFP_KERNEL); if (!split_node) return NULL; split_node->base = node->base; split_node->length = temp_dword - node->base; node->base = temp_dword; node->length -= split_node->length; /* Put it in the list */ split_node->next = node->next; node->next = split_node; } /* End of non-aligned base */ /* Don't need to check if too small since we already did */ if (node->length > size) { /* this one is longer than we need * so we'll make a new entry and split it up */ split_node = kmalloc(sizeof(*split_node), GFP_KERNEL); if (!split_node) return NULL; split_node->base = node->base + size; split_node->length = node->length - size; node->length = size; /* Put it in the list */ split_node->next = node->next; node->next = split_node; } /* End of too big on top end */ /* For IO make sure it's not in the ISA aliasing space */ if (node->base & 0x300L) continue; /* If we got here, then it is the right size * Now take it out of the list and break */ if (*head == node) { *head = node->next; } else { prevnode = *head; while (prevnode->next != node) prevnode = prevnode->next; prevnode->next = node->next; } node->next = NULL; break; } return node; } /** * get_max_resource - get largest node which has at least the given size. * @head: the list to search the node in * @size: the minimum size of the node to find * * Description: Gets the largest node that is at least "size" big from the * list pointed to by head. It aligns the node on top and bottom * to "size" alignment before returning it. */ static struct pci_resource *get_max_resource(struct pci_resource **head, u32 size) { struct pci_resource *max; struct pci_resource *temp; struct pci_resource *split_node; u32 temp_dword; if (cpqhp_resource_sort_and_combine(head)) return NULL; if (sort_by_max_size(head)) return NULL; for (max = *head; max; max = max->next) { /* If not big enough we could probably just bail, * instead we'll continue to the next. */ if (max->length < size) continue; if (max->base & (size - 1)) { /* this one isn't base aligned properly * so we'll make a new entry and split it up */ temp_dword = (max->base | (size-1)) + 1; /* Short circuit if adjusted size is too small */ if ((max->length - (temp_dword - max->base)) < size) continue; split_node = kmalloc(sizeof(*split_node), GFP_KERNEL); if (!split_node) return NULL; split_node->base = max->base; split_node->length = temp_dword - max->base; max->base = temp_dword; max->length -= split_node->length; split_node->next = max->next; max->next = split_node; } if ((max->base + max->length) & (size - 1)) { /* this one isn't end aligned properly at the top * so we'll make a new entry and split it up */ split_node = kmalloc(sizeof(*split_node), GFP_KERNEL); if (!split_node) return NULL; temp_dword = ((max->base + max->length) & ~(size - 1)); split_node->base = temp_dword; split_node->length = max->length + max->base - split_node->base; max->length -= split_node->length; split_node->next = max->next; max->next = split_node; } /* Make sure it didn't shrink too much when we aligned it */ if (max->length < size) continue; /* Now take it out of the list */ temp = *head; if (temp == max) { *head = max->next; } else { while (temp && temp->next != max) { temp = temp->next; } temp->next = max->next; } max->next = NULL; break; } return max; } /** * get_resource - find resource of given size and split up larger ones. * @head: the list to search for resources * @size: the size limit to use * * Description: This function sorts the resource list by size and then * returns the first node of "size" length. If it finds a node * larger than "size" it will split it up. * * size must be a power of two. */ static struct pci_resource *get_resource(struct pci_resource **head, u32 size) { struct pci_resource *prevnode; struct pci_resource *node; struct pci_resource *split_node; u32 temp_dword; if (cpqhp_resource_sort_and_combine(head)) return NULL; if (sort_by_size(head)) return NULL; for (node = *head; node; node = node->next) { dbg("%s: req_size =%x node=%p, base=%x, length=%x\n", __func__, size, node, node->base, node->length); if (node->length < size) continue; if (node->base & (size - 1)) { dbg("%s: not aligned\n", __func__); /* this one isn't base aligned properly * so we'll make a new entry and split it up */ temp_dword = (node->base | (size-1)) + 1; /* Short circuit if adjusted size is too small */ if ((node->length - (temp_dword - node->base)) < size) continue; split_node = kmalloc(sizeof(*split_node), GFP_KERNEL); if (!split_node) return NULL; split_node->base = node->base; split_node->length = temp_dword - node->base; node->base = temp_dword; node->length -= split_node->length; split_node->next = node->next; node->next = split_node; } /* End of non-aligned base */ /* Don't need to check if too small since we already did */ if (node->length > size) { dbg("%s: too big\n", __func__); /* this one is longer than we need * so we'll make a new entry and split it up */ split_node = kmalloc(sizeof(*split_node), GFP_KERNEL); if (!split_node) return NULL; split_node->base = node->base + size; split_node->length = node->length - size; node->length = size; /* Put it in the list */ split_node->next = node->next; node->next = split_node; } /* End of too big on top end */ dbg("%s: got one!!!\n", __func__); /* If we got here, then it is the right size * Now take it out of the list */ if (*head == node) { *head = node->next; } else { prevnode = *head; while (prevnode->next != node) prevnode = prevnode->next; prevnode->next = node->next; } node->next = NULL; break; } return node; } /** * cpqhp_resource_sort_and_combine - sort nodes by base addresses and clean up * @head: the list to sort and clean up * * Description: Sorts all of the nodes in the list in ascending order by * their base addresses. Also does garbage collection by * combining adjacent nodes. * * Returns %0 if success. */ int cpqhp_resource_sort_and_combine(struct pci_resource **head) { struct pci_resource *node1; struct pci_resource *node2; int out_of_order = 1; dbg("%s: head = %p, *head = %p\n", __func__, head, *head); if (!(*head)) return 1; dbg("*head->next = %p\n",(*head)->next); if (!(*head)->next) return 0; /* only one item on the list, already sorted! */ dbg("*head->base = 0x%x\n",(*head)->base); dbg("*head->next->base = 0x%x\n",(*head)->next->base); while (out_of_order) { out_of_order = 0; /* Special case for swapping list head */ if (((*head)->next) && ((*head)->base > (*head)->next->base)) { node1 = *head; (*head) = (*head)->next; node1->next = (*head)->next; (*head)->next = node1; out_of_order++; } node1 = (*head); while (node1->next && node1->next->next) { if (node1->next->base > node1->next->next->base) { out_of_order++; node2 = node1->next; node1->next = node1->next->next; node1 = node1->next; node2->next = node1->next; node1->next = node2; } else node1 = node1->next; } } /* End of out_of_order loop */ node1 = *head; while (node1 && node1->next) { if ((node1->base + node1->length) == node1->next->base) { /* Combine */ dbg("8..\n"); node1->length += node1->next->length; node2 = node1->next; node1->next = node1->next->next; kfree(node2); } else node1 = node1->next; } return 0; } irqreturn_t cpqhp_ctrl_intr(int IRQ, void *data) { struct controller *ctrl = data; u8 schedule_flag = 0; u8 reset; u16 misc; u32 Diff; u32 temp_dword; misc = readw(ctrl->hpc_reg + MISC); /* * Check to see if it was our interrupt */ if (!(misc & 0x000C)) { return IRQ_NONE; } if (misc & 0x0004) { /* * Serial Output interrupt Pending */ /* Clear the interrupt */ misc |= 0x0004; writew(misc, ctrl->hpc_reg + MISC); /* Read to clear posted writes */ misc = readw(ctrl->hpc_reg + MISC); dbg ("%s - waking up\n", __func__); wake_up_interruptible(&ctrl->queue); } if (misc & 0x0008) { /* General-interrupt-input interrupt Pending */ Diff = readl(ctrl->hpc_reg + INT_INPUT_CLEAR) ^ ctrl->ctrl_int_comp; ctrl->ctrl_int_comp = readl(ctrl->hpc_reg + INT_INPUT_CLEAR); /* Clear the interrupt */ writel(Diff, ctrl->hpc_reg + INT_INPUT_CLEAR); /* Read it back to clear any posted writes */ temp_dword = readl(ctrl->hpc_reg + INT_INPUT_CLEAR); if (!Diff) /* Clear all interrupts */ writel(0xFFFFFFFF, ctrl->hpc_reg + INT_INPUT_CLEAR); schedule_flag += handle_switch_change((u8)(Diff & 0xFFL), ctrl); schedule_flag += handle_presence_change((u16)((Diff & 0xFFFF0000L) >> 16), ctrl); schedule_flag += handle_power_fault((u8)((Diff & 0xFF00L) >> 8), ctrl); } reset = readb(ctrl->hpc_reg + RESET_FREQ_MODE); if (reset & 0x40) { /* Bus reset has completed */ reset &= 0xCF; writeb(reset, ctrl->hpc_reg + RESET_FREQ_MODE); reset = readb(ctrl->hpc_reg + RESET_FREQ_MODE); wake_up_interruptible(&ctrl->queue); } if (schedule_flag) { wake_up_process(cpqhp_event_thread); dbg("Waking even thread"); } return IRQ_HANDLED; } /** * cpqhp_slot_create - Creates a node and adds it to the proper bus. * @busnumber: bus where new node is to be located * * Returns pointer to the new node or %NULL if unsuccessful. */ struct pci_func *cpqhp_slot_create(u8 busnumber) { struct pci_func *new_slot; struct pci_func *next; new_slot = kzalloc(sizeof(*new_slot), GFP_KERNEL); if (new_slot == NULL) return new_slot; new_slot->next = NULL; new_slot->configured = 1; if (cpqhp_slot_list[busnumber] == NULL) { cpqhp_slot_list[busnumber] = new_slot; } else { next = cpqhp_slot_list[busnumber]; while (next->next != NULL) next = next->next; next->next = new_slot; } return new_slot; } /** * slot_remove - Removes a node from the linked list of slots. * @old_slot: slot to remove * * Returns %0 if successful, !0 otherwise. */ static int slot_remove(struct pci_func * old_slot) { struct pci_func *next; if (old_slot == NULL) return 1; next = cpqhp_slot_list[old_slot->bus]; if (next == NULL) return 1; if (next == old_slot) { cpqhp_slot_list[old_slot->bus] = old_slot->next; cpqhp_destroy_board_resources(old_slot); kfree(old_slot); return 0; } while ((next->next != old_slot) && (next->next != NULL)) next = next->next; if (next->next == old_slot) { next->next = old_slot->next; cpqhp_destroy_board_resources(old_slot); kfree(old_slot); return 0; } else return 2; } /** * bridge_slot_remove - Removes a node from the linked list of slots. * @bridge: bridge to remove * * Returns %0 if successful, !0 otherwise. */ static int bridge_slot_remove(struct pci_func *bridge) { u8 subordinateBus, secondaryBus; u8 tempBus; struct pci_func *next; secondaryBus = (bridge->config_space[0x06] >> 8) & 0xFF; subordinateBus = (bridge->config_space[0x06] >> 16) & 0xFF; for (tempBus = secondaryBus; tempBus <= subordinateBus; tempBus++) { next = cpqhp_slot_list[tempBus]; while (!slot_remove(next)) next = cpqhp_slot_list[tempBus]; } next = cpqhp_slot_list[bridge->bus]; if (next == NULL) return 1; if (next == bridge) { cpqhp_slot_list[bridge->bus] = bridge->next; goto out; } while ((next->next != bridge) && (next->next != NULL)) next = next->next; if (next->next != bridge) return 2; next->next = bridge->next; out: kfree(bridge); return 0; } /** * cpqhp_slot_find - Looks for a node by bus, and device, multiple functions accessed * @bus: bus to find * @device: device to find * @index: is %0 for first function found, %1 for the second... * * Returns pointer to the node if successful, %NULL otherwise. */ struct pci_func *cpqhp_slot_find(u8 bus, u8 device, u8 index) { int found = -1; struct pci_func *func; func = cpqhp_slot_list[bus]; if ((func == NULL) || ((func->device == device) && (index == 0))) return func; if (func->device == device) found++; while (func->next != NULL) { func = func->next; if (func->device == device) found++; if (found == index) return func; } return NULL; } /* DJZ: I don't think is_bridge will work as is. * FIXME */ static int is_bridge(struct pci_func * func) { /* Check the header type */ if (((func->config_space[0x03] >> 16) & 0xFF) == 0x01) return 1; else return 0; } /** * set_controller_speed - set the frequency and/or mode of a specific controller segment. * @ctrl: controller to change frequency/mode for. * @adapter_speed: the speed of the adapter we want to match. * @hp_slot: the slot number where the adapter is installed. * * Returns %0 if we successfully change frequency and/or mode to match the * adapter speed. */ static u8 set_controller_speed(struct controller *ctrl, u8 adapter_speed, u8 hp_slot) { struct slot *slot; u8 reg; u8 slot_power = readb(ctrl->hpc_reg + SLOT_POWER); u16 reg16; u32 leds = readl(ctrl->hpc_reg + LED_CONTROL); if (ctrl->speed == adapter_speed) return 0; /* We don't allow freq/mode changes if we find another adapter running * in another slot on this controller */ for(slot = ctrl->slot; slot; slot = slot->next) { if (slot->device == (hp_slot + ctrl->slot_device_offset)) continue; if (!slot->hotplug_slot || !slot->hotplug_slot->info) continue; if (slot->hotplug_slot->info->adapter_status == 0) continue; /* If another adapter is running on the same segment but at a * lower speed/mode, we allow the new adapter to function at * this rate if supported */ if (ctrl->speed < adapter_speed) return 0; return 1; } /* If the controller doesn't support freq/mode changes and the * controller is running at a higher mode, we bail */ if ((ctrl->speed > adapter_speed) && (!ctrl->pcix_speed_capability)) return 1; /* But we allow the adapter to run at a lower rate if possible */ if ((ctrl->speed < adapter_speed) && (!ctrl->pcix_speed_capability)) return 0; /* We try to set the max speed supported by both the adapter and * controller */ if (ctrl->speed_capability < adapter_speed) { if (ctrl->speed == ctrl->speed_capability) return 0; adapter_speed = ctrl->speed_capability; } writel(0x0L, ctrl->hpc_reg + LED_CONTROL); writeb(0x00, ctrl->hpc_reg + SLOT_ENABLE); set_SOGO(ctrl); wait_for_ctrl_irq(ctrl); if (adapter_speed != PCI_SPEED_133MHz_PCIX) reg = 0xF5; else reg = 0xF4; pci_write_config_byte(ctrl->pci_dev, 0x41, reg); reg16 = readw(ctrl->hpc_reg + NEXT_CURR_FREQ); reg16 &= ~0x000F; switch(adapter_speed) { case(PCI_SPEED_133MHz_PCIX): reg = 0x75; reg16 |= 0xB; break; case(PCI_SPEED_100MHz_PCIX): reg = 0x74; reg16 |= 0xA; break; case(PCI_SPEED_66MHz_PCIX): reg = 0x73; reg16 |= 0x9; break; case(PCI_SPEED_66MHz): reg = 0x73; reg16 |= 0x1; break; default: /* 33MHz PCI 2.2 */ reg = 0x71; break; } reg16 |= 0xB << 12; writew(reg16, ctrl->hpc_reg + NEXT_CURR_FREQ); mdelay(5); /* Reenable interrupts */ writel(0, ctrl->hpc_reg + INT_MASK); pci_write_config_byte(ctrl->pci_dev, 0x41, reg); /* Restart state machine */ reg = ~0xF; pci_read_config_byte(ctrl->pci_dev, 0x43, &reg); pci_write_config_byte(ctrl->pci_dev, 0x43, reg); /* Only if mode change...*/ if (((ctrl->speed == PCI_SPEED_66MHz) && (adapter_speed == PCI_SPEED_66MHz_PCIX)) || ((ctrl->speed == PCI_SPEED_66MHz_PCIX) && (adapter_speed == PCI_SPEED_66MHz))) set_SOGO(ctrl); wait_for_ctrl_irq(ctrl); mdelay(1100); /* Restore LED/Slot state */ writel(leds, ctrl->hpc_reg + LED_CONTROL); writeb(slot_power, ctrl->hpc_reg + SLOT_ENABLE); set_SOGO(ctrl); wait_for_ctrl_irq(ctrl); ctrl->speed = adapter_speed; slot = cpqhp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset); info("Successfully changed frequency/mode for adapter in slot %d\n", slot->number); return 0; } /* the following routines constitute the bulk of the * hotplug controller logic */ /** * board_replaced - Called after a board has been replaced in the system. * @func: PCI device/function information * @ctrl: hotplug controller * * This is only used if we don't have resources for hot add. * Turns power on for the board. * Checks to see if board is the same. * If board is same, reconfigures it. * If board isn't same, turns it back off. */ static u32 board_replaced(struct pci_func *func, struct controller *ctrl) { u8 hp_slot; u8 temp_byte; u8 adapter_speed; u32 rc = 0; hp_slot = func->device - ctrl->slot_device_offset; /* * The switch is open. */ if (readl(ctrl->hpc_reg + INT_INPUT_CLEAR) & (0x01L << hp_slot)) rc = INTERLOCK_OPEN; /* * The board is already on */ else if (is_slot_enabled (ctrl, hp_slot)) rc = CARD_FUNCTIONING; else { mutex_lock(&ctrl->crit_sect); /* turn on board without attaching to the bus */ enable_slot_power (ctrl, hp_slot); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq (ctrl); /* Change bits in slot power register to force another shift out * NOTE: this is to work around the timer bug */ temp_byte = readb(ctrl->hpc_reg + SLOT_POWER); writeb(0x00, ctrl->hpc_reg + SLOT_POWER); writeb(temp_byte, ctrl->hpc_reg + SLOT_POWER); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq (ctrl); adapter_speed = get_adapter_speed(ctrl, hp_slot); if (ctrl->speed != adapter_speed) if (set_controller_speed(ctrl, adapter_speed, hp_slot)) rc = WRONG_BUS_FREQUENCY; /* turn off board without attaching to the bus */ disable_slot_power (ctrl, hp_slot); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq (ctrl); mutex_unlock(&ctrl->crit_sect); if (rc) return rc; mutex_lock(&ctrl->crit_sect); slot_enable (ctrl, hp_slot); green_LED_blink (ctrl, hp_slot); amber_LED_off (ctrl, hp_slot); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq (ctrl); mutex_unlock(&ctrl->crit_sect); /* Wait for ~1 second because of hot plug spec */ long_delay(1*HZ); /* Check for a power fault */ if (func->status == 0xFF) { /* power fault occurred, but it was benign */ rc = POWER_FAILURE; func->status = 0; } else rc = cpqhp_valid_replace(ctrl, func); if (!rc) { /* It must be the same board */ rc = cpqhp_configure_board(ctrl, func); /* If configuration fails, turn it off * Get slot won't work for devices behind * bridges, but in this case it will always be * called for the "base" bus/dev/func of an * adapter. */ mutex_lock(&ctrl->crit_sect); amber_LED_on (ctrl, hp_slot); green_LED_off (ctrl, hp_slot); slot_disable (ctrl, hp_slot); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq (ctrl); mutex_unlock(&ctrl->crit_sect); if (rc) return rc; else return 1; } else { /* Something is wrong * Get slot won't work for devices behind bridges, but * in this case it will always be called for the "base" * bus/dev/func of an adapter. */ mutex_lock(&ctrl->crit_sect); amber_LED_on (ctrl, hp_slot); green_LED_off (ctrl, hp_slot); slot_disable (ctrl, hp_slot); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq (ctrl); mutex_unlock(&ctrl->crit_sect); } } return rc; } /** * board_added - Called after a board has been added to the system. * @func: PCI device/function info * @ctrl: hotplug controller * * Turns power on for the board. * Configures board. */ static u32 board_added(struct pci_func *func, struct controller *ctrl) { u8 hp_slot; u8 temp_byte; u8 adapter_speed; int index; u32 temp_register = 0xFFFFFFFF; u32 rc = 0; struct pci_func *new_slot = NULL; struct slot *p_slot; struct resource_lists res_lists; hp_slot = func->device - ctrl->slot_device_offset; dbg("%s: func->device, slot_offset, hp_slot = %d, %d ,%d\n", __func__, func->device, ctrl->slot_device_offset, hp_slot); mutex_lock(&ctrl->crit_sect); /* turn on board without attaching to the bus */ enable_slot_power(ctrl, hp_slot); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq (ctrl); /* Change bits in slot power register to force another shift out * NOTE: this is to work around the timer bug */ temp_byte = readb(ctrl->hpc_reg + SLOT_POWER); writeb(0x00, ctrl->hpc_reg + SLOT_POWER); writeb(temp_byte, ctrl->hpc_reg + SLOT_POWER); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq (ctrl); adapter_speed = get_adapter_speed(ctrl, hp_slot); if (ctrl->speed != adapter_speed) if (set_controller_speed(ctrl, adapter_speed, hp_slot)) rc = WRONG_BUS_FREQUENCY; /* turn off board without attaching to the bus */ disable_slot_power (ctrl, hp_slot); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq(ctrl); mutex_unlock(&ctrl->crit_sect); if (rc) return rc; p_slot = cpqhp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset); /* turn on board and blink green LED */ dbg("%s: before down\n", __func__); mutex_lock(&ctrl->crit_sect); dbg("%s: after down\n", __func__); dbg("%s: before slot_enable\n", __func__); slot_enable (ctrl, hp_slot); dbg("%s: before green_LED_blink\n", __func__); green_LED_blink (ctrl, hp_slot); dbg("%s: before amber_LED_blink\n", __func__); amber_LED_off (ctrl, hp_slot); dbg("%s: before set_SOGO\n", __func__); set_SOGO(ctrl); /* Wait for SOBS to be unset */ dbg("%s: before wait_for_ctrl_irq\n", __func__); wait_for_ctrl_irq (ctrl); dbg("%s: after wait_for_ctrl_irq\n", __func__); dbg("%s: before up\n", __func__); mutex_unlock(&ctrl->crit_sect); dbg("%s: after up\n", __func__); /* Wait for ~1 second because of hot plug spec */ dbg("%s: before long_delay\n", __func__); long_delay(1*HZ); dbg("%s: after long_delay\n", __func__); dbg("%s: func status = %x\n", __func__, func->status); /* Check for a power fault */ if (func->status == 0xFF) { /* power fault occurred, but it was benign */ temp_register = 0xFFFFFFFF; dbg("%s: temp register set to %x by power fault\n", __func__, temp_register); rc = POWER_FAILURE; func->status = 0; } else { /* Get vendor/device ID u32 */ ctrl->pci_bus->number = func->bus; rc = pci_bus_read_config_dword (ctrl->pci_bus, PCI_DEVFN(func->device, func->function), PCI_VENDOR_ID, &temp_register); dbg("%s: pci_read_config_dword returns %d\n", __func__, rc); dbg("%s: temp_register is %x\n", __func__, temp_register); if (rc != 0) { /* Something's wrong here */ temp_register = 0xFFFFFFFF; dbg("%s: temp register set to %x by error\n", __func__, temp_register); } /* Preset return code. It will be changed later if things go okay. */ rc = NO_ADAPTER_PRESENT; } /* All F's is an empty slot or an invalid board */ if (temp_register != 0xFFFFFFFF) { res_lists.io_head = ctrl->io_head; res_lists.mem_head = ctrl->mem_head; res_lists.p_mem_head = ctrl->p_mem_head; res_lists.bus_head = ctrl->bus_head; res_lists.irqs = NULL; rc = configure_new_device(ctrl, func, 0, &res_lists); dbg("%s: back from configure_new_device\n", __func__); ctrl->io_head = res_lists.io_head; ctrl->mem_head = res_lists.mem_head; ctrl->p_mem_head = res_lists.p_mem_head; ctrl->bus_head = res_lists.bus_head; cpqhp_resource_sort_and_combine(&(ctrl->mem_head)); cpqhp_resource_sort_and_combine(&(ctrl->p_mem_head)); cpqhp_resource_sort_and_combine(&(ctrl->io_head)); cpqhp_resource_sort_and_combine(&(ctrl->bus_head)); if (rc) { mutex_lock(&ctrl->crit_sect); amber_LED_on (ctrl, hp_slot); green_LED_off (ctrl, hp_slot); slot_disable (ctrl, hp_slot); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq (ctrl); mutex_unlock(&ctrl->crit_sect); return rc; } else { cpqhp_save_slot_config(ctrl, func); } func->status = 0; func->switch_save = 0x10; func->is_a_board = 0x01; /* next, we will instantiate the linux pci_dev structures (with * appropriate driver notification, if already present) */ dbg("%s: configure linux pci_dev structure\n", __func__); index = 0; do { new_slot = cpqhp_slot_find(ctrl->bus, func->device, index++); if (new_slot && !new_slot->pci_dev) cpqhp_configure_device(ctrl, new_slot); } while (new_slot); mutex_lock(&ctrl->crit_sect); green_LED_on (ctrl, hp_slot); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq (ctrl); mutex_unlock(&ctrl->crit_sect); } else { mutex_lock(&ctrl->crit_sect); amber_LED_on (ctrl, hp_slot); green_LED_off (ctrl, hp_slot); slot_disable (ctrl, hp_slot); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq (ctrl); mutex_unlock(&ctrl->crit_sect); return rc; } return 0; } /** * remove_board - Turns off slot and LEDs * @func: PCI device/function info * @replace_flag: whether replacing or adding a new device * @ctrl: target controller */ static u32 remove_board(struct pci_func * func, u32 replace_flag, struct controller * ctrl) { int index; u8 skip = 0; u8 device; u8 hp_slot; u8 temp_byte; u32 rc; struct resource_lists res_lists; struct pci_func *temp_func; if (cpqhp_unconfigure_device(func)) return 1; device = func->device; hp_slot = func->device - ctrl->slot_device_offset; dbg("In %s, hp_slot = %d\n", __func__, hp_slot); /* When we get here, it is safe to change base address registers. * We will attempt to save the base address register lengths */ if (replace_flag || !ctrl->add_support) rc = cpqhp_save_base_addr_length(ctrl, func); else if (!func->bus_head && !func->mem_head && !func->p_mem_head && !func->io_head) { /* Here we check to see if we've saved any of the board's * resources already. If so, we'll skip the attempt to * determine what's being used. */ index = 0; temp_func = cpqhp_slot_find(func->bus, func->device, index++); while (temp_func) { if (temp_func->bus_head || temp_func->mem_head || temp_func->p_mem_head || temp_func->io_head) { skip = 1; break; } temp_func = cpqhp_slot_find(temp_func->bus, temp_func->device, index++); } if (!skip) rc = cpqhp_save_used_resources(ctrl, func); } /* Change status to shutdown */ if (func->is_a_board) func->status = 0x01; func->configured = 0; mutex_lock(&ctrl->crit_sect); green_LED_off (ctrl, hp_slot); slot_disable (ctrl, hp_slot); set_SOGO(ctrl); /* turn off SERR for slot */ temp_byte = readb(ctrl->hpc_reg + SLOT_SERR); temp_byte &= ~(0x01 << hp_slot); writeb(temp_byte, ctrl->hpc_reg + SLOT_SERR); /* Wait for SOBS to be unset */ wait_for_ctrl_irq (ctrl); mutex_unlock(&ctrl->crit_sect); if (!replace_flag && ctrl->add_support) { while (func) { res_lists.io_head = ctrl->io_head; res_lists.mem_head = ctrl->mem_head; res_lists.p_mem_head = ctrl->p_mem_head; res_lists.bus_head = ctrl->bus_head; cpqhp_return_board_resources(func, &res_lists); ctrl->io_head = res_lists.io_head; ctrl->mem_head = res_lists.mem_head; ctrl->p_mem_head = res_lists.p_mem_head; ctrl->bus_head = res_lists.bus_head; cpqhp_resource_sort_and_combine(&(ctrl->mem_head)); cpqhp_resource_sort_and_combine(&(ctrl->p_mem_head)); cpqhp_resource_sort_and_combine(&(ctrl->io_head)); cpqhp_resource_sort_and_combine(&(ctrl->bus_head)); if (is_bridge(func)) { bridge_slot_remove(func); } else slot_remove(func); func = cpqhp_slot_find(ctrl->bus, device, 0); } /* Setup slot structure with entry for empty slot */ func = cpqhp_slot_create(ctrl->bus); if (func == NULL) return 1; func->bus = ctrl->bus; func->device = device; func->function = 0; func->configured = 0; func->switch_save = 0x10; func->is_a_board = 0; func->p_task_event = NULL; } return 0; } static void pushbutton_helper_thread(unsigned long data) { pushbutton_pending = data; wake_up_process(cpqhp_event_thread); } /* this is the main worker thread */ static int event_thread(void* data) { struct controller *ctrl; while (1) { dbg("!!!!event_thread sleeping\n"); set_current_state(TASK_INTERRUPTIBLE); schedule(); if (kthread_should_stop()) break; /* Do stuff here */ if (pushbutton_pending) cpqhp_pushbutton_thread(pushbutton_pending); else for (ctrl = cpqhp_ctrl_list; ctrl; ctrl=ctrl->next) interrupt_event_handler(ctrl); } dbg("event_thread signals exit\n"); return 0; } int cpqhp_event_start_thread(void) { cpqhp_event_thread = kthread_run(event_thread, NULL, "phpd_event"); if (IS_ERR(cpqhp_event_thread)) { err ("Can't start up our event thread\n"); return PTR_ERR(cpqhp_event_thread); } return 0; } void cpqhp_event_stop_thread(void) { kthread_stop(cpqhp_event_thread); } static int update_slot_info(struct controller *ctrl, struct slot *slot) { struct hotplug_slot_info *info; int result; info = kmalloc(sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; info->power_status = get_slot_enabled(ctrl, slot); info->attention_status = cpq_get_attention_status(ctrl, slot); info->latch_status = cpq_get_latch_status(ctrl, slot); info->adapter_status = get_presence_status(ctrl, slot); result = pci_hp_change_slot_info(slot->hotplug_slot, info); kfree (info); return result; } static void interrupt_event_handler(struct controller *ctrl) { int loop = 0; int change = 1; struct pci_func *func; u8 hp_slot; struct slot *p_slot; while (change) { change = 0; for (loop = 0; loop < 10; loop++) { /* dbg("loop %d\n", loop); */ if (ctrl->event_queue[loop].event_type != 0) { hp_slot = ctrl->event_queue[loop].hp_slot; func = cpqhp_slot_find(ctrl->bus, (hp_slot + ctrl->slot_device_offset), 0); if (!func) return; p_slot = cpqhp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset); if (!p_slot) return; dbg("hp_slot %d, func %p, p_slot %p\n", hp_slot, func, p_slot); if (ctrl->event_queue[loop].event_type == INT_BUTTON_PRESS) { dbg("button pressed\n"); } else if (ctrl->event_queue[loop].event_type == INT_BUTTON_CANCEL) { dbg("button cancel\n"); del_timer(&p_slot->task_event); mutex_lock(&ctrl->crit_sect); if (p_slot->state == BLINKINGOFF_STATE) { /* slot is on */ dbg("turn on green LED\n"); green_LED_on (ctrl, hp_slot); } else if (p_slot->state == BLINKINGON_STATE) { /* slot is off */ dbg("turn off green LED\n"); green_LED_off (ctrl, hp_slot); } info(msg_button_cancel, p_slot->number); p_slot->state = STATIC_STATE; amber_LED_off (ctrl, hp_slot); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq (ctrl); mutex_unlock(&ctrl->crit_sect); } /*** button Released (No action on press...) */ else if (ctrl->event_queue[loop].event_type == INT_BUTTON_RELEASE) { dbg("button release\n"); if (is_slot_enabled (ctrl, hp_slot)) { dbg("slot is on\n"); p_slot->state = BLINKINGOFF_STATE; info(msg_button_off, p_slot->number); } else { dbg("slot is off\n"); p_slot->state = BLINKINGON_STATE; info(msg_button_on, p_slot->number); } mutex_lock(&ctrl->crit_sect); dbg("blink green LED and turn off amber\n"); amber_LED_off (ctrl, hp_slot); green_LED_blink (ctrl, hp_slot); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq (ctrl); mutex_unlock(&ctrl->crit_sect); init_timer(&p_slot->task_event); p_slot->hp_slot = hp_slot; p_slot->ctrl = ctrl; /* p_slot->physical_slot = physical_slot; */ p_slot->task_event.expires = jiffies + 5 * HZ; /* 5 second delay */ p_slot->task_event.function = pushbutton_helper_thread; p_slot->task_event.data = (u32) p_slot; dbg("add_timer p_slot = %p\n", p_slot); add_timer(&p_slot->task_event); } /***********POWER FAULT */ else if (ctrl->event_queue[loop].event_type == INT_POWER_FAULT) { dbg("power fault\n"); } else { /* refresh notification */ if (p_slot) update_slot_info(ctrl, p_slot); } ctrl->event_queue[loop].event_type = 0; change = 1; } } /* End of FOR loop */ } return; } /** * cpqhp_pushbutton_thread - handle pushbutton events * @slot: target slot (struct) * * Scheduled procedure to handle blocking stuff for the pushbuttons. * Handles all pending events and exits. */ void cpqhp_pushbutton_thread(unsigned long slot) { u8 hp_slot; u8 device; struct pci_func *func; struct slot *p_slot = (struct slot *) slot; struct controller *ctrl = (struct controller *) p_slot->ctrl; pushbutton_pending = 0; hp_slot = p_slot->hp_slot; device = p_slot->device; if (is_slot_enabled(ctrl, hp_slot)) { p_slot->state = POWEROFF_STATE; /* power Down board */ func = cpqhp_slot_find(p_slot->bus, p_slot->device, 0); dbg("In power_down_board, func = %p, ctrl = %p\n", func, ctrl); if (!func) { dbg("Error! func NULL in %s\n", __func__); return ; } if (cpqhp_process_SS(ctrl, func) != 0) { amber_LED_on(ctrl, hp_slot); green_LED_on(ctrl, hp_slot); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq(ctrl); } p_slot->state = STATIC_STATE; } else { p_slot->state = POWERON_STATE; /* slot is off */ func = cpqhp_slot_find(p_slot->bus, p_slot->device, 0); dbg("In add_board, func = %p, ctrl = %p\n", func, ctrl); if (!func) { dbg("Error! func NULL in %s\n", __func__); return ; } if (ctrl != NULL) { if (cpqhp_process_SI(ctrl, func) != 0) { amber_LED_on(ctrl, hp_slot); green_LED_off(ctrl, hp_slot); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq (ctrl); } } p_slot->state = STATIC_STATE; } return; } int cpqhp_process_SI(struct controller *ctrl, struct pci_func *func) { u8 device, hp_slot; u16 temp_word; u32 tempdword; int rc; struct slot* p_slot; int physical_slot = 0; tempdword = 0; device = func->device; hp_slot = device - ctrl->slot_device_offset; p_slot = cpqhp_find_slot(ctrl, device); if (p_slot) physical_slot = p_slot->number; /* Check to see if the interlock is closed */ tempdword = readl(ctrl->hpc_reg + INT_INPUT_CLEAR); if (tempdword & (0x01 << hp_slot)) { return 1; } if (func->is_a_board) { rc = board_replaced(func, ctrl); } else { /* add board */ slot_remove(func); func = cpqhp_slot_create(ctrl->bus); if (func == NULL) return 1; func->bus = ctrl->bus; func->device = device; func->function = 0; func->configured = 0; func->is_a_board = 1; /* We have to save the presence info for these slots */ temp_word = ctrl->ctrl_int_comp >> 16; func->presence_save = (temp_word >> hp_slot) & 0x01; func->presence_save |= (temp_word >> (hp_slot + 7)) & 0x02; if (ctrl->ctrl_int_comp & (0x1L << hp_slot)) { func->switch_save = 0; } else { func->switch_save = 0x10; } rc = board_added(func, ctrl); if (rc) { if (is_bridge(func)) { bridge_slot_remove(func); } else slot_remove(func); /* Setup slot structure with entry for empty slot */ func = cpqhp_slot_create(ctrl->bus); if (func == NULL) return 1; func->bus = ctrl->bus; func->device = device; func->function = 0; func->configured = 0; func->is_a_board = 0; /* We have to save the presence info for these slots */ temp_word = ctrl->ctrl_int_comp >> 16; func->presence_save = (temp_word >> hp_slot) & 0x01; func->presence_save |= (temp_word >> (hp_slot + 7)) & 0x02; if (ctrl->ctrl_int_comp & (0x1L << hp_slot)) { func->switch_save = 0; } else { func->switch_save = 0x10; } } } if (rc) { dbg("%s: rc = %d\n", __func__, rc); } if (p_slot) update_slot_info(ctrl, p_slot); return rc; } int cpqhp_process_SS(struct controller *ctrl, struct pci_func *func) { u8 device, class_code, header_type, BCR; u8 index = 0; u8 replace_flag; u32 rc = 0; unsigned int devfn; struct slot* p_slot; struct pci_bus *pci_bus = ctrl->pci_bus; int physical_slot=0; device = func->device; func = cpqhp_slot_find(ctrl->bus, device, index++); p_slot = cpqhp_find_slot(ctrl, device); if (p_slot) { physical_slot = p_slot->number; } /* Make sure there are no video controllers here */ while (func && !rc) { pci_bus->number = func->bus; devfn = PCI_DEVFN(func->device, func->function); /* Check the Class Code */ rc = pci_bus_read_config_byte (pci_bus, devfn, 0x0B, &class_code); if (rc) return rc; if (class_code == PCI_BASE_CLASS_DISPLAY) { /* Display/Video adapter (not supported) */ rc = REMOVE_NOT_SUPPORTED; } else { /* See if it's a bridge */ rc = pci_bus_read_config_byte (pci_bus, devfn, PCI_HEADER_TYPE, &header_type); if (rc) return rc; /* If it's a bridge, check the VGA Enable bit */ if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { rc = pci_bus_read_config_byte (pci_bus, devfn, PCI_BRIDGE_CONTROL, &BCR); if (rc) return rc; /* If the VGA Enable bit is set, remove isn't * supported */ if (BCR & PCI_BRIDGE_CTL_VGA) rc = REMOVE_NOT_SUPPORTED; } } func = cpqhp_slot_find(ctrl->bus, device, index++); } func = cpqhp_slot_find(ctrl->bus, device, 0); if ((func != NULL) && !rc) { /* FIXME: Replace flag should be passed into process_SS */ replace_flag = !(ctrl->add_support); rc = remove_board(func, replace_flag, ctrl); } else if (!rc) { rc = 1; } if (p_slot) update_slot_info(ctrl, p_slot); return rc; } /** * switch_leds - switch the leds, go from one site to the other. * @ctrl: controller to use * @num_of_slots: number of slots to use * @work_LED: LED control value * @direction: 1 to start from the left side, 0 to start right. */ static void switch_leds(struct controller *ctrl, const int num_of_slots, u32 *work_LED, const int direction) { int loop; for (loop = 0; loop < num_of_slots; loop++) { if (direction) *work_LED = *work_LED >> 1; else *work_LED = *work_LED << 1; writel(*work_LED, ctrl->hpc_reg + LED_CONTROL); set_SOGO(ctrl); /* Wait for SOGO interrupt */ wait_for_ctrl_irq(ctrl); /* Get ready for next iteration */ long_delay((2*HZ)/10); } } /** * cpqhp_hardware_test - runs hardware tests * @ctrl: target controller * @test_num: the number written to the "test" file in sysfs. * * For hot plug ctrl folks to play with. */ int cpqhp_hardware_test(struct controller *ctrl, int test_num) { u32 save_LED; u32 work_LED; int loop; int num_of_slots; num_of_slots = readb(ctrl->hpc_reg + SLOT_MASK) & 0x0f; switch (test_num) { case 1: /* Do stuff here! */ /* Do that funky LED thing */ /* so we can restore them later */ save_LED = readl(ctrl->hpc_reg + LED_CONTROL); work_LED = 0x01010101; switch_leds(ctrl, num_of_slots, &work_LED, 0); switch_leds(ctrl, num_of_slots, &work_LED, 1); switch_leds(ctrl, num_of_slots, &work_LED, 0); switch_leds(ctrl, num_of_slots, &work_LED, 1); work_LED = 0x01010000; writel(work_LED, ctrl->hpc_reg + LED_CONTROL); switch_leds(ctrl, num_of_slots, &work_LED, 0); switch_leds(ctrl, num_of_slots, &work_LED, 1); work_LED = 0x00000101; writel(work_LED, ctrl->hpc_reg + LED_CONTROL); switch_leds(ctrl, num_of_slots, &work_LED, 0); switch_leds(ctrl, num_of_slots, &work_LED, 1); work_LED = 0x01010000; writel(work_LED, ctrl->hpc_reg + LED_CONTROL); for (loop = 0; loop < num_of_slots; loop++) { set_SOGO(ctrl); /* Wait for SOGO interrupt */ wait_for_ctrl_irq (ctrl); /* Get ready for next iteration */ long_delay((3*HZ)/10); work_LED = work_LED >> 16; writel(work_LED, ctrl->hpc_reg + LED_CONTROL); set_SOGO(ctrl); /* Wait for SOGO interrupt */ wait_for_ctrl_irq (ctrl); /* Get ready for next iteration */ long_delay((3*HZ)/10); work_LED = work_LED << 16; writel(work_LED, ctrl->hpc_reg + LED_CONTROL); work_LED = work_LED << 1; writel(work_LED, ctrl->hpc_reg + LED_CONTROL); } /* put it back the way it was */ writel(save_LED, ctrl->hpc_reg + LED_CONTROL); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq (ctrl); break; case 2: /* Do other stuff here! */ break; case 3: /* and more... */ break; } return 0; } /** * configure_new_device - Configures the PCI header information of one board. * @ctrl: pointer to controller structure * @func: pointer to function structure * @behind_bridge: 1 if this is a recursive call, 0 if not * @resources: pointer to set of resource lists * * Returns 0 if success. */ static u32 configure_new_device(struct controller * ctrl, struct pci_func * func, u8 behind_bridge, struct resource_lists * resources) { u8 temp_byte, function, max_functions, stop_it; int rc; u32 ID; struct pci_func *new_slot; int index; new_slot = func; dbg("%s\n", __func__); /* Check for Multi-function device */ ctrl->pci_bus->number = func->bus; rc = pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(func->device, func->function), 0x0E, &temp_byte); if (rc) { dbg("%s: rc = %d\n", __func__, rc); return rc; } if (temp_byte & 0x80) /* Multi-function device */ max_functions = 8; else max_functions = 1; function = 0; do { rc = configure_new_function(ctrl, new_slot, behind_bridge, resources); if (rc) { dbg("configure_new_function failed %d\n",rc); index = 0; while (new_slot) { new_slot = cpqhp_slot_find(new_slot->bus, new_slot->device, index++); if (new_slot) cpqhp_return_board_resources(new_slot, resources); } return rc; } function++; stop_it = 0; /* The following loop skips to the next present function * and creates a board structure */ while ((function < max_functions) && (!stop_it)) { pci_bus_read_config_dword (ctrl->pci_bus, PCI_DEVFN(func->device, function), 0x00, &ID); if (ID == 0xFFFFFFFF) { function++; } else { /* Setup slot structure. */ new_slot = cpqhp_slot_create(func->bus); if (new_slot == NULL) return 1; new_slot->bus = func->bus; new_slot->device = func->device; new_slot->function = function; new_slot->is_a_board = 1; new_slot->status = 0; stop_it++; } } } while (function < max_functions); dbg("returning from configure_new_device\n"); return 0; } /* * Configuration logic that involves the hotplug data structures and * their bookkeeping */ /** * configure_new_function - Configures the PCI header information of one device * @ctrl: pointer to controller structure * @func: pointer to function structure * @behind_bridge: 1 if this is a recursive call, 0 if not * @resources: pointer to set of resource lists * * Calls itself recursively for bridged devices. * Returns 0 if success. */ static int configure_new_function(struct controller *ctrl, struct pci_func *func, u8 behind_bridge, struct resource_lists *resources) { int cloop; u8 IRQ = 0; u8 temp_byte; u8 device; u8 class_code; u16 command; u16 temp_word; u32 temp_dword; u32 rc; u32 temp_register; u32 base; u32 ID; unsigned int devfn; struct pci_resource *mem_node; struct pci_resource *p_mem_node; struct pci_resource *io_node; struct pci_resource *bus_node; struct pci_resource *hold_mem_node; struct pci_resource *hold_p_mem_node; struct pci_resource *hold_IO_node; struct pci_resource *hold_bus_node; struct irq_mapping irqs; struct pci_func *new_slot; struct pci_bus *pci_bus; struct resource_lists temp_resources; pci_bus = ctrl->pci_bus; pci_bus->number = func->bus; devfn = PCI_DEVFN(func->device, func->function); /* Check for Bridge */ rc = pci_bus_read_config_byte(pci_bus, devfn, PCI_HEADER_TYPE, &temp_byte); if (rc) return rc; if ((temp_byte & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { /* set Primary bus */ dbg("set Primary bus = %d\n", func->bus); rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_PRIMARY_BUS, func->bus); if (rc) return rc; /* find range of busses to use */ dbg("find ranges of buses to use\n"); bus_node = get_max_resource(&(resources->bus_head), 1); /* If we don't have any busses to allocate, we can't continue */ if (!bus_node) return -ENOMEM; /* set Secondary bus */ temp_byte = bus_node->base; dbg("set Secondary bus = %d\n", bus_node->base); rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_SECONDARY_BUS, temp_byte); if (rc) return rc; /* set subordinate bus */ temp_byte = bus_node->base + bus_node->length - 1; dbg("set subordinate bus = %d\n", bus_node->base + bus_node->length - 1); rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_SUBORDINATE_BUS, temp_byte); if (rc) return rc; /* set subordinate Latency Timer and base Latency Timer */ temp_byte = 0x40; rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_SEC_LATENCY_TIMER, temp_byte); if (rc) return rc; rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_LATENCY_TIMER, temp_byte); if (rc) return rc; /* set Cache Line size */ temp_byte = 0x08; rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_CACHE_LINE_SIZE, temp_byte); if (rc) return rc; /* Setup the IO, memory, and prefetchable windows */ io_node = get_max_resource(&(resources->io_head), 0x1000); if (!io_node) return -ENOMEM; mem_node = get_max_resource(&(resources->mem_head), 0x100000); if (!mem_node) return -ENOMEM; p_mem_node = get_max_resource(&(resources->p_mem_head), 0x100000); if (!p_mem_node) return -ENOMEM; dbg("Setup the IO, memory, and prefetchable windows\n"); dbg("io_node\n"); dbg("(base, len, next) (%x, %x, %p)\n", io_node->base, io_node->length, io_node->next); dbg("mem_node\n"); dbg("(base, len, next) (%x, %x, %p)\n", mem_node->base, mem_node->length, mem_node->next); dbg("p_mem_node\n"); dbg("(base, len, next) (%x, %x, %p)\n", p_mem_node->base, p_mem_node->length, p_mem_node->next); /* set up the IRQ info */ if (!resources->irqs) { irqs.barber_pole = 0; irqs.interrupt[0] = 0; irqs.interrupt[1] = 0; irqs.interrupt[2] = 0; irqs.interrupt[3] = 0; irqs.valid_INT = 0; } else { irqs.barber_pole = resources->irqs->barber_pole; irqs.interrupt[0] = resources->irqs->interrupt[0]; irqs.interrupt[1] = resources->irqs->interrupt[1]; irqs.interrupt[2] = resources->irqs->interrupt[2]; irqs.interrupt[3] = resources->irqs->interrupt[3]; irqs.valid_INT = resources->irqs->valid_INT; } /* set up resource lists that are now aligned on top and bottom * for anything behind the bridge. */ temp_resources.bus_head = bus_node; temp_resources.io_head = io_node; temp_resources.mem_head = mem_node; temp_resources.p_mem_head = p_mem_node; temp_resources.irqs = &irqs; /* Make copies of the nodes we are going to pass down so that * if there is a problem,we can just use these to free resources */ hold_bus_node = kmalloc(sizeof(*hold_bus_node), GFP_KERNEL); hold_IO_node = kmalloc(sizeof(*hold_IO_node), GFP_KERNEL); hold_mem_node = kmalloc(sizeof(*hold_mem_node), GFP_KERNEL); hold_p_mem_node = kmalloc(sizeof(*hold_p_mem_node), GFP_KERNEL); if (!hold_bus_node || !hold_IO_node || !hold_mem_node || !hold_p_mem_node) { kfree(hold_bus_node); kfree(hold_IO_node); kfree(hold_mem_node); kfree(hold_p_mem_node); return 1; } memcpy(hold_bus_node, bus_node, sizeof(struct pci_resource)); bus_node->base += 1; bus_node->length -= 1; bus_node->next = NULL; /* If we have IO resources copy them and fill in the bridge's * IO range registers */ if (io_node) { memcpy(hold_IO_node, io_node, sizeof(struct pci_resource)); io_node->next = NULL; /* set IO base and Limit registers */ temp_byte = io_node->base >> 8; rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_IO_BASE, temp_byte); temp_byte = (io_node->base + io_node->length - 1) >> 8; rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_IO_LIMIT, temp_byte); } else { kfree(hold_IO_node); hold_IO_node = NULL; } /* If we have memory resources copy them and fill in the * bridge's memory range registers. Otherwise, fill in the * range registers with values that disable them. */ if (mem_node) { memcpy(hold_mem_node, mem_node, sizeof(struct pci_resource)); mem_node->next = NULL; /* set Mem base and Limit registers */ temp_word = mem_node->base >> 16; rc = pci_bus_write_config_word(pci_bus, devfn, PCI_MEMORY_BASE, temp_word); temp_word = (mem_node->base + mem_node->length - 1) >> 16; rc = pci_bus_write_config_word(pci_bus, devfn, PCI_MEMORY_LIMIT, temp_word); } else { temp_word = 0xFFFF; rc = pci_bus_write_config_word(pci_bus, devfn, PCI_MEMORY_BASE, temp_word); temp_word = 0x0000; rc = pci_bus_write_config_word(pci_bus, devfn, PCI_MEMORY_LIMIT, temp_word); kfree(hold_mem_node); hold_mem_node = NULL; } memcpy(hold_p_mem_node, p_mem_node, sizeof(struct pci_resource)); p_mem_node->next = NULL; /* set Pre Mem base and Limit registers */ temp_word = p_mem_node->base >> 16; rc = pci_bus_write_config_word (pci_bus, devfn, PCI_PREF_MEMORY_BASE, temp_word); temp_word = (p_mem_node->base + p_mem_node->length - 1) >> 16; rc = pci_bus_write_config_word (pci_bus, devfn, PCI_PREF_MEMORY_LIMIT, temp_word); /* Adjust this to compensate for extra adjustment in first loop */ irqs.barber_pole--; rc = 0; /* Here we actually find the devices and configure them */ for (device = 0; (device <= 0x1F) && !rc; device++) { irqs.barber_pole = (irqs.barber_pole + 1) & 0x03; ID = 0xFFFFFFFF; pci_bus->number = hold_bus_node->base; pci_bus_read_config_dword (pci_bus, PCI_DEVFN(device, 0), 0x00, &ID); pci_bus->number = func->bus; if (ID != 0xFFFFFFFF) { /* device present */ /* Setup slot structure. */ new_slot = cpqhp_slot_create(hold_bus_node->base); if (new_slot == NULL) { rc = -ENOMEM; continue; } new_slot->bus = hold_bus_node->base; new_slot->device = device; new_slot->function = 0; new_slot->is_a_board = 1; new_slot->status = 0; rc = configure_new_device(ctrl, new_slot, 1, &temp_resources); dbg("configure_new_device rc=0x%x\n",rc); } /* End of IF (device in slot?) */ } /* End of FOR loop */ if (rc) goto free_and_out; /* save the interrupt routing information */ if (resources->irqs) { resources->irqs->interrupt[0] = irqs.interrupt[0]; resources->irqs->interrupt[1] = irqs.interrupt[1]; resources->irqs->interrupt[2] = irqs.interrupt[2]; resources->irqs->interrupt[3] = irqs.interrupt[3]; resources->irqs->valid_INT = irqs.valid_INT; } else if (!behind_bridge) { /* We need to hook up the interrupts here */ for (cloop = 0; cloop < 4; cloop++) { if (irqs.valid_INT & (0x01 << cloop)) { rc = cpqhp_set_irq(func->bus, func->device, cloop + 1, irqs.interrupt[cloop]); if (rc) goto free_and_out; } } /* end of for loop */ } /* Return unused bus resources * First use the temporary node to store information for * the board */ if (hold_bus_node && bus_node && temp_resources.bus_head) { hold_bus_node->length = bus_node->base - hold_bus_node->base; hold_bus_node->next = func->bus_head; func->bus_head = hold_bus_node; temp_byte = temp_resources.bus_head->base - 1; /* set subordinate bus */ rc = pci_bus_write_config_byte (pci_bus, devfn, PCI_SUBORDINATE_BUS, temp_byte); if (temp_resources.bus_head->length == 0) { kfree(temp_resources.bus_head); temp_resources.bus_head = NULL; } else { return_resource(&(resources->bus_head), temp_resources.bus_head); } } /* If we have IO space available and there is some left, * return the unused portion */ if (hold_IO_node && temp_resources.io_head) { io_node = do_pre_bridge_resource_split(&(temp_resources.io_head), &hold_IO_node, 0x1000); /* Check if we were able to split something off */ if (io_node) { hold_IO_node->base = io_node->base + io_node->length; temp_byte = (hold_IO_node->base) >> 8; rc = pci_bus_write_config_word (pci_bus, devfn, PCI_IO_BASE, temp_byte); return_resource(&(resources->io_head), io_node); } io_node = do_bridge_resource_split(&(temp_resources.io_head), 0x1000); /* Check if we were able to split something off */ if (io_node) { /* First use the temporary node to store * information for the board */ hold_IO_node->length = io_node->base - hold_IO_node->base; /* If we used any, add it to the board's list */ if (hold_IO_node->length) { hold_IO_node->next = func->io_head; func->io_head = hold_IO_node; temp_byte = (io_node->base - 1) >> 8; rc = pci_bus_write_config_byte (pci_bus, devfn, PCI_IO_LIMIT, temp_byte); return_resource(&(resources->io_head), io_node); } else { /* it doesn't need any IO */ temp_word = 0x0000; rc = pci_bus_write_config_word (pci_bus, devfn, PCI_IO_LIMIT, temp_word); return_resource(&(resources->io_head), io_node); kfree(hold_IO_node); } } else { /* it used most of the range */ hold_IO_node->next = func->io_head; func->io_head = hold_IO_node; } } else if (hold_IO_node) { /* it used the whole range */ hold_IO_node->next = func->io_head; func->io_head = hold_IO_node; } /* If we have memory space available and there is some left, * return the unused portion */ if (hold_mem_node && temp_resources.mem_head) { mem_node = do_pre_bridge_resource_split(&(temp_resources. mem_head), &hold_mem_node, 0x100000); /* Check if we were able to split something off */ if (mem_node) { hold_mem_node->base = mem_node->base + mem_node->length; temp_word = (hold_mem_node->base) >> 16; rc = pci_bus_write_config_word (pci_bus, devfn, PCI_MEMORY_BASE, temp_word); return_resource(&(resources->mem_head), mem_node); } mem_node = do_bridge_resource_split(&(temp_resources.mem_head), 0x100000); /* Check if we were able to split something off */ if (mem_node) { /* First use the temporary node to store * information for the board */ hold_mem_node->length = mem_node->base - hold_mem_node->base; if (hold_mem_node->length) { hold_mem_node->next = func->mem_head; func->mem_head = hold_mem_node; /* configure end address */ temp_word = (mem_node->base - 1) >> 16; rc = pci_bus_write_config_word (pci_bus, devfn, PCI_MEMORY_LIMIT, temp_word); /* Return unused resources to the pool */ return_resource(&(resources->mem_head), mem_node); } else { /* it doesn't need any Mem */ temp_word = 0x0000; rc = pci_bus_write_config_word (pci_bus, devfn, PCI_MEMORY_LIMIT, temp_word); return_resource(&(resources->mem_head), mem_node); kfree(hold_mem_node); } } else { /* it used most of the range */ hold_mem_node->next = func->mem_head; func->mem_head = hold_mem_node; } } else if (hold_mem_node) { /* it used the whole range */ hold_mem_node->next = func->mem_head; func->mem_head = hold_mem_node; } /* If we have prefetchable memory space available and there * is some left at the end, return the unused portion */ if (hold_p_mem_node && temp_resources.p_mem_head) { p_mem_node = do_pre_bridge_resource_split(&(temp_resources.p_mem_head), &hold_p_mem_node, 0x100000); /* Check if we were able to split something off */ if (p_mem_node) { hold_p_mem_node->base = p_mem_node->base + p_mem_node->length; temp_word = (hold_p_mem_node->base) >> 16; rc = pci_bus_write_config_word (pci_bus, devfn, PCI_PREF_MEMORY_BASE, temp_word); return_resource(&(resources->p_mem_head), p_mem_node); } p_mem_node = do_bridge_resource_split(&(temp_resources.p_mem_head), 0x100000); /* Check if we were able to split something off */ if (p_mem_node) { /* First use the temporary node to store * information for the board */ hold_p_mem_node->length = p_mem_node->base - hold_p_mem_node->base; /* If we used any, add it to the board's list */ if (hold_p_mem_node->length) { hold_p_mem_node->next = func->p_mem_head; func->p_mem_head = hold_p_mem_node; temp_word = (p_mem_node->base - 1) >> 16; rc = pci_bus_write_config_word (pci_bus, devfn, PCI_PREF_MEMORY_LIMIT, temp_word); return_resource(&(resources->p_mem_head), p_mem_node); } else { /* it doesn't need any PMem */ temp_word = 0x0000; rc = pci_bus_write_config_word (pci_bus, devfn, PCI_PREF_MEMORY_LIMIT, temp_word); return_resource(&(resources->p_mem_head), p_mem_node); kfree(hold_p_mem_node); } } else { /* it used the most of the range */ hold_p_mem_node->next = func->p_mem_head; func->p_mem_head = hold_p_mem_node; } } else if (hold_p_mem_node) { /* it used the whole range */ hold_p_mem_node->next = func->p_mem_head; func->p_mem_head = hold_p_mem_node; } /* We should be configuring an IRQ and the bridge's base address * registers if it needs them. Although we have never seen such * a device */ /* enable card */ command = 0x0157; /* = PCI_COMMAND_IO | * PCI_COMMAND_MEMORY | * PCI_COMMAND_MASTER | * PCI_COMMAND_INVALIDATE | * PCI_COMMAND_PARITY | * PCI_COMMAND_SERR */ rc = pci_bus_write_config_word (pci_bus, devfn, PCI_COMMAND, command); /* set Bridge Control Register */ command = 0x07; /* = PCI_BRIDGE_CTL_PARITY | * PCI_BRIDGE_CTL_SERR | * PCI_BRIDGE_CTL_NO_ISA */ rc = pci_bus_write_config_word (pci_bus, devfn, PCI_BRIDGE_CONTROL, command); } else if ((temp_byte & 0x7F) == PCI_HEADER_TYPE_NORMAL) { /* Standard device */ rc = pci_bus_read_config_byte (pci_bus, devfn, 0x0B, &class_code); if (class_code == PCI_BASE_CLASS_DISPLAY) { /* Display (video) adapter (not supported) */ return DEVICE_TYPE_NOT_SUPPORTED; } /* Figure out IO and memory needs */ for (cloop = 0x10; cloop <= 0x24; cloop += 4) { temp_register = 0xFFFFFFFF; dbg("CND: bus=%d, devfn=%d, offset=%d\n", pci_bus->number, devfn, cloop); rc = pci_bus_write_config_dword (pci_bus, devfn, cloop, temp_register); rc = pci_bus_read_config_dword (pci_bus, devfn, cloop, &temp_register); dbg("CND: base = 0x%x\n", temp_register); if (temp_register) { /* If this register is implemented */ if ((temp_register & 0x03L) == 0x01) { /* Map IO */ /* set base = amount of IO space */ base = temp_register & 0xFFFFFFFC; base = ~base + 1; dbg("CND: length = 0x%x\n", base); io_node = get_io_resource(&(resources->io_head), base); dbg("Got io_node start = %8.8x, length = %8.8x next (%p)\n", io_node->base, io_node->length, io_node->next); dbg("func (%p) io_head (%p)\n", func, func->io_head); /* allocate the resource to the board */ if (io_node) { base = io_node->base; io_node->next = func->io_head; func->io_head = io_node; } else return -ENOMEM; } else if ((temp_register & 0x0BL) == 0x08) { /* Map prefetchable memory */ base = temp_register & 0xFFFFFFF0; base = ~base + 1; dbg("CND: length = 0x%x\n", base); p_mem_node = get_resource(&(resources->p_mem_head), base); /* allocate the resource to the board */ if (p_mem_node) { base = p_mem_node->base; p_mem_node->next = func->p_mem_head; func->p_mem_head = p_mem_node; } else return -ENOMEM; } else if ((temp_register & 0x0BL) == 0x00) { /* Map memory */ base = temp_register & 0xFFFFFFF0; base = ~base + 1; dbg("CND: length = 0x%x\n", base); mem_node = get_resource(&(resources->mem_head), base); /* allocate the resource to the board */ if (mem_node) { base = mem_node->base; mem_node->next = func->mem_head; func->mem_head = mem_node; } else return -ENOMEM; } else if ((temp_register & 0x0BL) == 0x04) { /* Map memory */ base = temp_register & 0xFFFFFFF0; base = ~base + 1; dbg("CND: length = 0x%x\n", base); mem_node = get_resource(&(resources->mem_head), base); /* allocate the resource to the board */ if (mem_node) { base = mem_node->base; mem_node->next = func->mem_head; func->mem_head = mem_node; } else return -ENOMEM; } else if ((temp_register & 0x0BL) == 0x06) { /* Those bits are reserved, we can't handle this */ return 1; } else { /* Requesting space below 1M */ return NOT_ENOUGH_RESOURCES; } rc = pci_bus_write_config_dword(pci_bus, devfn, cloop, base); /* Check for 64-bit base */ if ((temp_register & 0x07L) == 0x04) { cloop += 4; /* Upper 32 bits of address always zero * on today's systems */ /* FIXME this is probably not true on * Alpha and ia64??? */ base = 0; rc = pci_bus_write_config_dword(pci_bus, devfn, cloop, base); } } } /* End of base register loop */ if (cpqhp_legacy_mode) { /* Figure out which interrupt pin this function uses */ rc = pci_bus_read_config_byte (pci_bus, devfn, PCI_INTERRUPT_PIN, &temp_byte); /* If this function needs an interrupt and we are behind * a bridge and the pin is tied to something that's * alread mapped, set this one the same */ if (temp_byte && resources->irqs && (resources->irqs->valid_INT & (0x01 << ((temp_byte + resources->irqs->barber_pole - 1) & 0x03)))) { /* We have to share with something already set up */ IRQ = resources->irqs->interrupt[(temp_byte + resources->irqs->barber_pole - 1) & 0x03]; } else { /* Program IRQ based on card type */ rc = pci_bus_read_config_byte (pci_bus, devfn, 0x0B, &class_code); if (class_code == PCI_BASE_CLASS_STORAGE) IRQ = cpqhp_disk_irq; else IRQ = cpqhp_nic_irq; } /* IRQ Line */ rc = pci_bus_write_config_byte (pci_bus, devfn, PCI_INTERRUPT_LINE, IRQ); } if (!behind_bridge) { rc = cpqhp_set_irq(func->bus, func->device, temp_byte, IRQ); if (rc) return 1; } else { /* TBD - this code may also belong in the other clause * of this If statement */ resources->irqs->interrupt[(temp_byte + resources->irqs->barber_pole - 1) & 0x03] = IRQ; resources->irqs->valid_INT |= 0x01 << (temp_byte + resources->irqs->barber_pole - 1) & 0x03; } /* Latency Timer */ temp_byte = 0x40; rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_LATENCY_TIMER, temp_byte); /* Cache Line size */ temp_byte = 0x08; rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_CACHE_LINE_SIZE, temp_byte); /* disable ROM base Address */ temp_dword = 0x00L; rc = pci_bus_write_config_word(pci_bus, devfn, PCI_ROM_ADDRESS, temp_dword); /* enable card */ temp_word = 0x0157; /* = PCI_COMMAND_IO | * PCI_COMMAND_MEMORY | * PCI_COMMAND_MASTER | * PCI_COMMAND_INVALIDATE | * PCI_COMMAND_PARITY | * PCI_COMMAND_SERR */ rc = pci_bus_write_config_word (pci_bus, devfn, PCI_COMMAND, temp_word); } else { /* End of Not-A-Bridge else */ /* It's some strange type of PCI adapter (Cardbus?) */ return DEVICE_TYPE_NOT_SUPPORTED; } func->configured = 1; return 0; free_and_out: cpqhp_destroy_resource_list (&temp_resources); return_resource(&(resources-> bus_head), hold_bus_node); return_resource(&(resources-> io_head), hold_IO_node); return_resource(&(resources-> mem_head), hold_mem_node); return_resource(&(resources-> p_mem_head), hold_p_mem_node); return rc; }
gpl-2.0
gic4107/HSA-linux
arch/metag/kernel/perf/perf_event.c
579
21968
/* * Meta performance counter support. * Copyright (C) 2012 Imagination Technologies Ltd * * This code is based on the sh pmu code: * Copyright (C) 2009 Paul Mundt * * and on the arm pmu code: * Copyright (C) 2009 picoChip Designs, Ltd., James Iles * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/atomic.h> #include <linux/export.h> #include <linux/init.h> #include <linux/irqchip/metag.h> #include <linux/perf_event.h> #include <linux/slab.h> #include <asm/core_reg.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/processor.h> #include "perf_event.h" static int _hw_perf_event_init(struct perf_event *); static void _hw_perf_event_destroy(struct perf_event *); /* Determines which core type we are */ static struct metag_pmu *metag_pmu __read_mostly; /* Processor specific data */ static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events); /* PMU admin */ const char *perf_pmu_name(void) { if (!metag_pmu) return NULL; return metag_pmu->name; } EXPORT_SYMBOL_GPL(perf_pmu_name); int perf_num_counters(void) { if (metag_pmu) return metag_pmu->max_events; return 0; } EXPORT_SYMBOL_GPL(perf_num_counters); static inline int metag_pmu_initialised(void) { return !!metag_pmu; } static void release_pmu_hardware(void) { int irq; unsigned int version = (metag_pmu->version & (METAC_ID_MINOR_BITS | METAC_ID_REV_BITS)) >> METAC_ID_REV_S; /* Early cores don't have overflow interrupts */ if (version < 0x0104) return; irq = internal_irq_map(17); if (irq >= 0) free_irq(irq, (void *)1); irq = internal_irq_map(16); if (irq >= 0) free_irq(irq, (void *)0); } static int reserve_pmu_hardware(void) { int err = 0, irq[2]; unsigned int version = (metag_pmu->version & (METAC_ID_MINOR_BITS | METAC_ID_REV_BITS)) >> METAC_ID_REV_S; /* Early cores don't have overflow interrupts */ if (version < 0x0104) goto out; /* * Bit 16 on HWSTATMETA is the interrupt for performance counter 0; * similarly, 17 is the interrupt for performance counter 1. * We can't (yet) interrupt on the cycle counter, because it's a * register, however it holds a 32-bit value as opposed to 24-bit. */ irq[0] = internal_irq_map(16); if (irq[0] < 0) { pr_err("unable to map internal IRQ %d\n", 16); goto out; } err = request_irq(irq[0], metag_pmu->handle_irq, IRQF_NOBALANCING, "metagpmu0", (void *)0); if (err) { pr_err("unable to request IRQ%d for metag PMU counters\n", irq[0]); goto out; } irq[1] = internal_irq_map(17); if (irq[1] < 0) { pr_err("unable to map internal IRQ %d\n", 17); goto out_irq1; } err = request_irq(irq[1], metag_pmu->handle_irq, IRQF_NOBALANCING, "metagpmu1", (void *)1); if (err) { pr_err("unable to request IRQ%d for metag PMU counters\n", irq[1]); goto out_irq1; } return 0; out_irq1: free_irq(irq[0], (void *)0); out: return err; } /* PMU operations */ static void metag_pmu_enable(struct pmu *pmu) { } static void metag_pmu_disable(struct pmu *pmu) { } static int metag_pmu_event_init(struct perf_event *event) { int err = 0; atomic_t *active_events = &metag_pmu->active_events; if (!metag_pmu_initialised()) { err = -ENODEV; goto out; } if (has_branch_stack(event)) return -EOPNOTSUPP; event->destroy = _hw_perf_event_destroy; if (!atomic_inc_not_zero(active_events)) { mutex_lock(&metag_pmu->reserve_mutex); if (atomic_read(active_events) == 0) err = reserve_pmu_hardware(); if (!err) atomic_inc(active_events); mutex_unlock(&metag_pmu->reserve_mutex); } /* Hardware and caches counters */ switch (event->attr.type) { case PERF_TYPE_HARDWARE: case PERF_TYPE_HW_CACHE: case PERF_TYPE_RAW: err = _hw_perf_event_init(event); break; default: return -ENOENT; } if (err) event->destroy(event); out: return err; } void metag_pmu_event_update(struct perf_event *event, struct hw_perf_event *hwc, int idx) { u64 prev_raw_count, new_raw_count; s64 delta; /* * If this counter is chained, it may be that the previous counter * value has been changed beneath us. * * To get around this, we read and exchange the new raw count, then * add the delta (new - prev) to the generic counter atomically. * * Without interrupts, this is the simplest approach. */ again: prev_raw_count = local64_read(&hwc->prev_count); new_raw_count = metag_pmu->read(idx); if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, new_raw_count) != prev_raw_count) goto again; /* * Calculate the delta and add it to the counter. */ delta = (new_raw_count - prev_raw_count) & MAX_PERIOD; local64_add(delta, &event->count); local64_sub(delta, &hwc->period_left); } int metag_pmu_event_set_period(struct perf_event *event, struct hw_perf_event *hwc, int idx) { s64 left = local64_read(&hwc->period_left); s64 period = hwc->sample_period; int ret = 0; /* The period may have been changed */ if (unlikely(period != hwc->last_period)) left += period - hwc->last_period; if (unlikely(left <= -period)) { left = period; local64_set(&hwc->period_left, left); hwc->last_period = period; ret = 1; } if (unlikely(left <= 0)) { left += period; local64_set(&hwc->period_left, left); hwc->last_period = period; ret = 1; } if (left > (s64)metag_pmu->max_period) left = metag_pmu->max_period; if (metag_pmu->write) { local64_set(&hwc->prev_count, -(s32)left); metag_pmu->write(idx, -left & MAX_PERIOD); } perf_event_update_userpage(event); return ret; } static void metag_pmu_start(struct perf_event *event, int flags) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); struct hw_perf_event *hwc = &event->hw; int idx = hwc->idx; if (WARN_ON_ONCE(idx == -1)) return; /* * We always have to reprogram the period, so ignore PERF_EF_RELOAD. */ if (flags & PERF_EF_RELOAD) WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); hwc->state = 0; /* * Reset the period. * Some counters can't be stopped (i.e. are core global), so when the * counter was 'stopped' we merely disabled the IRQ. If we don't reset * the period, then we'll either: a) get an overflow too soon; * or b) too late if the overflow happened since disabling. * Obviously, this has little bearing on cores without the overflow * interrupt, as the performance counter resets to zero on write * anyway. */ if (metag_pmu->max_period) metag_pmu_event_set_period(event, hwc, hwc->idx); cpuc->events[idx] = event; metag_pmu->enable(hwc, idx); } static void metag_pmu_stop(struct perf_event *event, int flags) { struct hw_perf_event *hwc = &event->hw; /* * We should always update the counter on stop; see comment above * why. */ if (!(hwc->state & PERF_HES_STOPPED)) { metag_pmu_event_update(event, hwc, hwc->idx); metag_pmu->disable(hwc, hwc->idx); hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; } } static int metag_pmu_add(struct perf_event *event, int flags) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); struct hw_perf_event *hwc = &event->hw; int idx = 0, ret = 0; perf_pmu_disable(event->pmu); /* check whether we're counting instructions */ if (hwc->config == 0x100) { if (__test_and_set_bit(METAG_INST_COUNTER, cpuc->used_mask)) { ret = -EAGAIN; goto out; } idx = METAG_INST_COUNTER; } else { /* Check whether we have a spare counter */ idx = find_first_zero_bit(cpuc->used_mask, atomic_read(&metag_pmu->active_events)); if (idx >= METAG_INST_COUNTER) { ret = -EAGAIN; goto out; } __set_bit(idx, cpuc->used_mask); } hwc->idx = idx; /* Make sure the counter is disabled */ metag_pmu->disable(hwc, idx); hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; if (flags & PERF_EF_START) metag_pmu_start(event, PERF_EF_RELOAD); perf_event_update_userpage(event); out: perf_pmu_enable(event->pmu); return ret; } static void metag_pmu_del(struct perf_event *event, int flags) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); struct hw_perf_event *hwc = &event->hw; int idx = hwc->idx; WARN_ON(idx < 0); metag_pmu_stop(event, PERF_EF_UPDATE); cpuc->events[idx] = NULL; __clear_bit(idx, cpuc->used_mask); perf_event_update_userpage(event); } static void metag_pmu_read(struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; /* Don't read disabled counters! */ if (hwc->idx < 0) return; metag_pmu_event_update(event, hwc, hwc->idx); } static struct pmu pmu = { .pmu_enable = metag_pmu_enable, .pmu_disable = metag_pmu_disable, .event_init = metag_pmu_event_init, .add = metag_pmu_add, .del = metag_pmu_del, .start = metag_pmu_start, .stop = metag_pmu_stop, .read = metag_pmu_read, }; /* Core counter specific functions */ static const int metag_general_events[] = { [PERF_COUNT_HW_CPU_CYCLES] = 0x03, [PERF_COUNT_HW_INSTRUCTIONS] = 0x100, [PERF_COUNT_HW_CACHE_REFERENCES] = -1, [PERF_COUNT_HW_CACHE_MISSES] = -1, [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = -1, [PERF_COUNT_HW_BRANCH_MISSES] = -1, [PERF_COUNT_HW_BUS_CYCLES] = -1, [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = -1, [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = -1, [PERF_COUNT_HW_REF_CPU_CYCLES] = -1, }; static const int metag_pmu_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { [C(L1D)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = 0x08, [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, [C(OP_WRITE)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, }, [C(L1I)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = 0x09, [C(RESULT_MISS)] = 0x0a, }, [C(OP_WRITE)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, }, [C(LL)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, [C(OP_WRITE)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, }, [C(DTLB)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = 0xd0, [C(RESULT_MISS)] = 0xd2, }, [C(OP_WRITE)] = { [C(RESULT_ACCESS)] = 0xd4, [C(RESULT_MISS)] = 0xd5, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, }, [C(ITLB)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = 0xd1, [C(RESULT_MISS)] = 0xd3, }, [C(OP_WRITE)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, }, [C(BPU)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, [C(OP_WRITE)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, }, [C(NODE)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, [C(OP_WRITE)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, }, }; static void _hw_perf_event_destroy(struct perf_event *event) { atomic_t *active_events = &metag_pmu->active_events; struct mutex *pmu_mutex = &metag_pmu->reserve_mutex; if (atomic_dec_and_mutex_lock(active_events, pmu_mutex)) { release_pmu_hardware(); mutex_unlock(pmu_mutex); } } static int _hw_perf_cache_event(int config, int *evp) { unsigned long type, op, result; int ev; if (!metag_pmu->cache_events) return -EINVAL; /* Unpack config */ type = config & 0xff; op = (config >> 8) & 0xff; result = (config >> 16) & 0xff; if (type >= PERF_COUNT_HW_CACHE_MAX || op >= PERF_COUNT_HW_CACHE_OP_MAX || result >= PERF_COUNT_HW_CACHE_RESULT_MAX) return -EINVAL; ev = (*metag_pmu->cache_events)[type][op][result]; if (ev == 0) return -EOPNOTSUPP; if (ev == -1) return -EINVAL; *evp = ev; return 0; } static int _hw_perf_event_init(struct perf_event *event) { struct perf_event_attr *attr = &event->attr; struct hw_perf_event *hwc = &event->hw; int mapping = 0, err; switch (attr->type) { case PERF_TYPE_HARDWARE: if (attr->config >= PERF_COUNT_HW_MAX) return -EINVAL; mapping = metag_pmu->event_map(attr->config); break; case PERF_TYPE_HW_CACHE: err = _hw_perf_cache_event(attr->config, &mapping); if (err) return err; break; case PERF_TYPE_RAW: mapping = attr->config; break; } /* Return early if the event is unsupported */ if (mapping == -1) return -EINVAL; /* * Early cores have "limited" counters - they have no overflow * interrupts - and so are unable to do sampling without extra work * and timer assistance. */ if (metag_pmu->max_period == 0) { if (hwc->sample_period) return -EINVAL; } /* * Don't assign an index until the event is placed into the hardware. * -1 signifies that we're still deciding where to put it. On SMP * systems each core has its own set of counters, so we can't do any * constraint checking yet. */ hwc->idx = -1; /* Store the event encoding */ hwc->config |= (unsigned long)mapping; /* * For non-sampling runs, limit the sample_period to half of the * counter width. This way, the new counter value should be less * likely to overtake the previous one (unless there are IRQ latency * issues...) */ if (metag_pmu->max_period) { if (!hwc->sample_period) { hwc->sample_period = metag_pmu->max_period >> 1; hwc->last_period = hwc->sample_period; local64_set(&hwc->period_left, hwc->sample_period); } } return 0; } static void metag_pmu_enable_counter(struct hw_perf_event *event, int idx) { struct cpu_hw_events *events = &__get_cpu_var(cpu_hw_events); unsigned int config = event->config; unsigned int tmp = config & 0xf0; unsigned long flags; raw_spin_lock_irqsave(&events->pmu_lock, flags); /* * Check if we're enabling the instruction counter (index of * MAX_HWEVENTS - 1) */ if (METAG_INST_COUNTER == idx) { WARN_ONCE((config != 0x100), "invalid configuration (%d) for counter (%d)\n", config, idx); local64_set(&event->prev_count, __core_reg_get(TXTACTCYC)); goto unlock; } /* Check for a core internal or performance channel event. */ if (tmp) { void *perf_addr; /* * Anything other than a cycle count will write the low- * nibble to the correct counter register. */ switch (tmp) { case 0xd0: perf_addr = (void *)PERF_ICORE(idx); break; case 0xf0: perf_addr = (void *)PERF_CHAN(idx); break; default: perf_addr = NULL; break; } if (perf_addr) metag_out32((config & 0x0f), perf_addr); /* * Now we use the high nibble as the performance event to * to count. */ config = tmp >> 4; } tmp = ((config & 0xf) << 28) | ((1 << 24) << hard_processor_id()); if (metag_pmu->max_period) /* * Cores supporting overflow interrupts may have had the counter * set to a specific value that needs preserving. */ tmp |= metag_in32(PERF_COUNT(idx)) & 0x00ffffff; else /* * Older cores reset the counter on write, so prev_count needs * resetting too so we can calculate a correct delta. */ local64_set(&event->prev_count, 0); metag_out32(tmp, PERF_COUNT(idx)); unlock: raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static void metag_pmu_disable_counter(struct hw_perf_event *event, int idx) { struct cpu_hw_events *events = &__get_cpu_var(cpu_hw_events); unsigned int tmp = 0; unsigned long flags; /* * The cycle counter can't be disabled per se, as it's a hardware * thread register which is always counting. We merely return if this * is the counter we're attempting to disable. */ if (METAG_INST_COUNTER == idx) return; /* * The counter value _should_ have been read prior to disabling, * as if we're running on an early core then the value gets reset to * 0, and any read after that would be useless. On the newer cores, * however, it's better to read-modify-update this for purposes of * the overflow interrupt. * Here we remove the thread id AND the event nibble (there are at * least two events that count events that are core global and ignore * the thread id mask). This only works because we don't mix thread * performance counts, and event 0x00 requires a thread id mask! */ raw_spin_lock_irqsave(&events->pmu_lock, flags); tmp = metag_in32(PERF_COUNT(idx)); tmp &= 0x00ffffff; metag_out32(tmp, PERF_COUNT(idx)); raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static u64 metag_pmu_read_counter(int idx) { u32 tmp = 0; if (METAG_INST_COUNTER == idx) { tmp = __core_reg_get(TXTACTCYC); goto out; } tmp = metag_in32(PERF_COUNT(idx)) & 0x00ffffff; out: return tmp; } static void metag_pmu_write_counter(int idx, u32 val) { struct cpu_hw_events *events = &__get_cpu_var(cpu_hw_events); u32 tmp = 0; unsigned long flags; /* * This _shouldn't_ happen, but if it does, then we can just * ignore the write, as the register is read-only and clear-on-write. */ if (METAG_INST_COUNTER == idx) return; /* * We'll keep the thread mask and event id, and just update the * counter itself. Also , we should bound the value to 24-bits. */ raw_spin_lock_irqsave(&events->pmu_lock, flags); val &= 0x00ffffff; tmp = metag_in32(PERF_COUNT(idx)) & 0xff000000; val |= tmp; metag_out32(val, PERF_COUNT(idx)); raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static int metag_pmu_event_map(int idx) { return metag_general_events[idx]; } static irqreturn_t metag_pmu_counter_overflow(int irq, void *dev) { int idx = (int)dev; struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); struct perf_event *event = cpuhw->events[idx]; struct hw_perf_event *hwc = &event->hw; struct pt_regs *regs = get_irq_regs(); struct perf_sample_data sampledata; unsigned long flags; u32 counter = 0; /* * We need to stop the core temporarily from generating another * interrupt while we disable this counter. However, we don't want * to flag the counter as free */ __global_lock2(flags); counter = metag_in32(PERF_COUNT(idx)); metag_out32((counter & 0x00ffffff), PERF_COUNT(idx)); __global_unlock2(flags); /* Update the counts and reset the sample period */ metag_pmu_event_update(event, hwc, idx); perf_sample_data_init(&sampledata, 0, hwc->last_period); metag_pmu_event_set_period(event, hwc, idx); /* * Enable the counter again once core overflow processing has * completed. Note the counter value may have been modified while it was * inactive to set it up ready for the next interrupt. */ if (!perf_event_overflow(event, &sampledata, regs)) { __global_lock2(flags); counter = (counter & 0xff000000) | (metag_in32(PERF_COUNT(idx)) & 0x00ffffff); metag_out32(counter, PERF_COUNT(idx)); __global_unlock2(flags); } return IRQ_HANDLED; } static struct metag_pmu _metag_pmu = { .handle_irq = metag_pmu_counter_overflow, .enable = metag_pmu_enable_counter, .disable = metag_pmu_disable_counter, .read = metag_pmu_read_counter, .write = metag_pmu_write_counter, .event_map = metag_pmu_event_map, .cache_events = &metag_pmu_cache_events, .max_period = MAX_PERIOD, .max_events = MAX_HWEVENTS, }; /* PMU CPU hotplug notifier */ static int metag_pmu_cpu_notify(struct notifier_block *b, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned int)hcpu; struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING) return NOTIFY_DONE; memset(cpuc, 0, sizeof(struct cpu_hw_events)); raw_spin_lock_init(&cpuc->pmu_lock); return NOTIFY_OK; } static struct notifier_block metag_pmu_notifier = { .notifier_call = metag_pmu_cpu_notify, }; /* PMU Initialisation */ static int __init init_hw_perf_events(void) { int ret = 0, cpu; u32 version = *(u32 *)METAC_ID; int major = (version & METAC_ID_MAJOR_BITS) >> METAC_ID_MAJOR_S; int min_rev = (version & (METAC_ID_MINOR_BITS | METAC_ID_REV_BITS)) >> METAC_ID_REV_S; /* Not a Meta 2 core, then not supported */ if (0x02 > major) { pr_info("no hardware counter support available\n"); goto out; } else if (0x02 == major) { metag_pmu = &_metag_pmu; if (min_rev < 0x0104) { /* * A core without overflow interrupts, and clear-on- * write counters. */ metag_pmu->handle_irq = NULL; metag_pmu->write = NULL; metag_pmu->max_period = 0; } metag_pmu->name = "meta2"; metag_pmu->version = version; metag_pmu->pmu = pmu; } pr_info("enabled with %s PMU driver, %d counters available\n", metag_pmu->name, metag_pmu->max_events); /* Initialise the active events and reservation mutex */ atomic_set(&metag_pmu->active_events, 0); mutex_init(&metag_pmu->reserve_mutex); /* Clear the counters */ metag_out32(0, PERF_COUNT(0)); metag_out32(0, PERF_COUNT(1)); for_each_possible_cpu(cpu) { struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); memset(cpuc, 0, sizeof(struct cpu_hw_events)); raw_spin_lock_init(&cpuc->pmu_lock); } register_cpu_notifier(&metag_pmu_notifier); ret = perf_pmu_register(&pmu, metag_pmu->name, PERF_TYPE_RAW); out: return ret; } early_initcall(init_hw_perf_events);
gpl-2.0
tq-systems/linux-2.6_tqc_denx
drivers/power/da9030_battery.c
579
16256
/* * Battery charger driver for Dialog Semiconductor DA9030 * * Copyright (C) 2008 Compulab, Ltd. * Mike Rapoport <mike@compulab.co.il> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/types.h> #include <linux/device.h> #include <linux/workqueue.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/power_supply.h> #include <linux/mfd/da903x.h> #include <linux/debugfs.h> #include <linux/seq_file.h> #define DA9030_FAULT_LOG 0x0a #define DA9030_FAULT_LOG_OVER_TEMP (1 << 7) #define DA9030_FAULT_LOG_VBAT_OVER (1 << 4) #define DA9030_CHARGE_CONTROL 0x28 #define DA9030_CHRG_CHARGER_ENABLE (1 << 7) #define DA9030_ADC_MAN_CONTROL 0x30 #define DA9030_ADC_TBATREF_ENABLE (1 << 5) #define DA9030_ADC_LDO_INT_ENABLE (1 << 4) #define DA9030_ADC_AUTO_CONTROL 0x31 #define DA9030_ADC_TBAT_ENABLE (1 << 5) #define DA9030_ADC_VBAT_IN_TXON (1 << 4) #define DA9030_ADC_VCH_ENABLE (1 << 3) #define DA9030_ADC_ICH_ENABLE (1 << 2) #define DA9030_ADC_VBAT_ENABLE (1 << 1) #define DA9030_ADC_AUTO_SLEEP_ENABLE (1 << 0) #define DA9030_VBATMON 0x32 #define DA9030_VBATMONTXON 0x33 #define DA9030_TBATHIGHP 0x34 #define DA9030_TBATHIGHN 0x35 #define DA9030_TBATLOW 0x36 #define DA9030_VBAT_RES 0x41 #define DA9030_VBATMIN_RES 0x42 #define DA9030_VBATMINTXON_RES 0x43 #define DA9030_ICHMAX_RES 0x44 #define DA9030_ICHMIN_RES 0x45 #define DA9030_ICHAVERAGE_RES 0x46 #define DA9030_VCHMAX_RES 0x47 #define DA9030_VCHMIN_RES 0x48 #define DA9030_TBAT_RES 0x49 struct da9030_adc_res { uint8_t vbat_res; uint8_t vbatmin_res; uint8_t vbatmintxon; uint8_t ichmax_res; uint8_t ichmin_res; uint8_t ichaverage_res; uint8_t vchmax_res; uint8_t vchmin_res; uint8_t tbat_res; uint8_t adc_in4_res; uint8_t adc_in5_res; }; struct da9030_battery_thresholds { int tbat_low; int tbat_high; int tbat_restart; int vbat_low; int vbat_crit; int vbat_charge_start; int vbat_charge_stop; int vbat_charge_restart; int vcharge_min; int vcharge_max; }; struct da9030_charger { struct power_supply psy; struct device *master; struct da9030_adc_res adc; struct delayed_work work; unsigned int interval; struct power_supply_info *battery_info; struct da9030_battery_thresholds thresholds; unsigned int charge_milliamp; unsigned int charge_millivolt; /* charger status */ bool chdet; uint8_t fault; int mA; int mV; bool is_on; struct notifier_block nb; /* platform callbacks for battery low and critical events */ void (*battery_low)(void); void (*battery_critical)(void); struct dentry *debug_file; }; static inline int da9030_reg_to_mV(int reg) { return ((reg * 2650) >> 8) + 2650; } static inline int da9030_millivolt_to_reg(int mV) { return ((mV - 2650) << 8) / 2650; } static inline int da9030_reg_to_mA(int reg) { return ((reg * 24000) >> 8) / 15; } #ifdef CONFIG_DEBUG_FS static int bat_debug_show(struct seq_file *s, void *data) { struct da9030_charger *charger = s->private; seq_printf(s, "charger is %s\n", charger->is_on ? "on" : "off"); if (charger->chdet) { seq_printf(s, "iset = %dmA, vset = %dmV\n", charger->mA, charger->mV); } seq_printf(s, "vbat_res = %d (%dmV)\n", charger->adc.vbat_res, da9030_reg_to_mV(charger->adc.vbat_res)); seq_printf(s, "vbatmin_res = %d (%dmV)\n", charger->adc.vbatmin_res, da9030_reg_to_mV(charger->adc.vbatmin_res)); seq_printf(s, "vbatmintxon = %d (%dmV)\n", charger->adc.vbatmintxon, da9030_reg_to_mV(charger->adc.vbatmintxon)); seq_printf(s, "ichmax_res = %d (%dmA)\n", charger->adc.ichmax_res, da9030_reg_to_mV(charger->adc.ichmax_res)); seq_printf(s, "ichmin_res = %d (%dmA)\n", charger->adc.ichmin_res, da9030_reg_to_mA(charger->adc.ichmin_res)); seq_printf(s, "ichaverage_res = %d (%dmA)\n", charger->adc.ichaverage_res, da9030_reg_to_mA(charger->adc.ichaverage_res)); seq_printf(s, "vchmax_res = %d (%dmV)\n", charger->adc.vchmax_res, da9030_reg_to_mA(charger->adc.vchmax_res)); seq_printf(s, "vchmin_res = %d (%dmV)\n", charger->adc.vchmin_res, da9030_reg_to_mV(charger->adc.vchmin_res)); return 0; } static int debug_open(struct inode *inode, struct file *file) { return single_open(file, bat_debug_show, inode->i_private); } static const struct file_operations bat_debug_fops = { .open = debug_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static struct dentry *da9030_bat_create_debugfs(struct da9030_charger *charger) { charger->debug_file = debugfs_create_file("charger", 0666, 0, charger, &bat_debug_fops); return charger->debug_file; } static void da9030_bat_remove_debugfs(struct da9030_charger *charger) { debugfs_remove(charger->debug_file); } #else static inline struct dentry *da9030_bat_create_debugfs(struct da9030_charger *charger) { return NULL; } static inline void da9030_bat_remove_debugfs(struct da9030_charger *charger) { } #endif static inline void da9030_read_adc(struct da9030_charger *charger, struct da9030_adc_res *adc) { da903x_reads(charger->master, DA9030_VBAT_RES, sizeof(*adc), (uint8_t *)adc); } static void da9030_charger_update_state(struct da9030_charger *charger) { uint8_t val; da903x_read(charger->master, DA9030_CHARGE_CONTROL, &val); charger->is_on = (val & DA9030_CHRG_CHARGER_ENABLE) ? 1 : 0; charger->mA = ((val >> 3) & 0xf) * 100; charger->mV = (val & 0x7) * 50 + 4000; da9030_read_adc(charger, &charger->adc); da903x_read(charger->master, DA9030_FAULT_LOG, &charger->fault); charger->chdet = da903x_query_status(charger->master, DA9030_STATUS_CHDET); } static void da9030_set_charge(struct da9030_charger *charger, int on) { uint8_t val; if (on) { val = DA9030_CHRG_CHARGER_ENABLE; val |= (charger->charge_milliamp / 100) << 3; val |= (charger->charge_millivolt - 4000) / 50; charger->is_on = 1; } else { val = 0; charger->is_on = 0; } da903x_write(charger->master, DA9030_CHARGE_CONTROL, val); power_supply_changed(&charger->psy); } static void da9030_charger_check_state(struct da9030_charger *charger) { da9030_charger_update_state(charger); /* we wake or boot with external power on */ if (!charger->is_on) { if ((charger->chdet) && (charger->adc.vbat_res < charger->thresholds.vbat_charge_start)) { da9030_set_charge(charger, 1); } } else { /* Charger has been pulled out */ if (!charger->chdet) { da9030_set_charge(charger, 0); return; } if (charger->adc.vbat_res >= charger->thresholds.vbat_charge_stop) { da9030_set_charge(charger, 0); da903x_write(charger->master, DA9030_VBATMON, charger->thresholds.vbat_charge_restart); } else if (charger->adc.vbat_res > charger->thresholds.vbat_low) { /* we are charging and passed LOW_THRESH, so upate DA9030 VBAT threshold */ da903x_write(charger->master, DA9030_VBATMON, charger->thresholds.vbat_low); } if (charger->adc.vchmax_res > charger->thresholds.vcharge_max || charger->adc.vchmin_res < charger->thresholds.vcharge_min || /* Tempreture readings are negative */ charger->adc.tbat_res < charger->thresholds.tbat_high || charger->adc.tbat_res > charger->thresholds.tbat_low) { /* disable charger */ da9030_set_charge(charger, 0); } } } static void da9030_charging_monitor(struct work_struct *work) { struct da9030_charger *charger; charger = container_of(work, struct da9030_charger, work.work); da9030_charger_check_state(charger); /* reschedule for the next time */ schedule_delayed_work(&charger->work, charger->interval); } static enum power_supply_property da9030_battery_props[] = { POWER_SUPPLY_PROP_MODEL_NAME, POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_HEALTH, POWER_SUPPLY_PROP_TECHNOLOGY, POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN, POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN, POWER_SUPPLY_PROP_VOLTAGE_NOW, POWER_SUPPLY_PROP_CURRENT_AVG, }; static void da9030_battery_check_status(struct da9030_charger *charger, union power_supply_propval *val) { if (charger->chdet) { if (charger->is_on) val->intval = POWER_SUPPLY_STATUS_CHARGING; else val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING; } else { val->intval = POWER_SUPPLY_STATUS_DISCHARGING; } } static void da9030_battery_check_health(struct da9030_charger *charger, union power_supply_propval *val) { if (charger->fault & DA9030_FAULT_LOG_OVER_TEMP) val->intval = POWER_SUPPLY_HEALTH_OVERHEAT; else if (charger->fault & DA9030_FAULT_LOG_VBAT_OVER) val->intval = POWER_SUPPLY_HEALTH_OVERVOLTAGE; else val->intval = POWER_SUPPLY_HEALTH_GOOD; } static int da9030_battery_get_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { struct da9030_charger *charger; charger = container_of(psy, struct da9030_charger, psy); switch (psp) { case POWER_SUPPLY_PROP_STATUS: da9030_battery_check_status(charger, val); break; case POWER_SUPPLY_PROP_HEALTH: da9030_battery_check_health(charger, val); break; case POWER_SUPPLY_PROP_TECHNOLOGY: val->intval = charger->battery_info->technology; break; case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN: val->intval = charger->battery_info->voltage_max_design; break; case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN: val->intval = charger->battery_info->voltage_min_design; break; case POWER_SUPPLY_PROP_VOLTAGE_NOW: val->intval = da9030_reg_to_mV(charger->adc.vbat_res) * 1000; break; case POWER_SUPPLY_PROP_CURRENT_AVG: val->intval = da9030_reg_to_mA(charger->adc.ichaverage_res) * 1000; break; case POWER_SUPPLY_PROP_MODEL_NAME: val->strval = charger->battery_info->name; break; default: break; } return 0; } static void da9030_battery_vbat_event(struct da9030_charger *charger) { da9030_read_adc(charger, &charger->adc); if (charger->is_on) return; if (charger->adc.vbat_res < charger->thresholds.vbat_low) { /* set VBAT threshold for critical */ da903x_write(charger->master, DA9030_VBATMON, charger->thresholds.vbat_crit); if (charger->battery_low) charger->battery_low(); } else if (charger->adc.vbat_res < charger->thresholds.vbat_crit) { /* notify the system of battery critical */ if (charger->battery_critical) charger->battery_critical(); } } static int da9030_battery_event(struct notifier_block *nb, unsigned long event, void *data) { struct da9030_charger *charger = container_of(nb, struct da9030_charger, nb); switch (event) { case DA9030_EVENT_CHDET: cancel_delayed_work_sync(&charger->work); schedule_work(&charger->work.work); break; case DA9030_EVENT_VBATMON: da9030_battery_vbat_event(charger); break; case DA9030_EVENT_CHIOVER: case DA9030_EVENT_TBAT: da9030_set_charge(charger, 0); break; } return 0; } static void da9030_battery_convert_thresholds(struct da9030_charger *charger, struct da9030_battery_info *pdata) { charger->thresholds.tbat_low = pdata->tbat_low; charger->thresholds.tbat_high = pdata->tbat_high; charger->thresholds.tbat_restart = pdata->tbat_restart; charger->thresholds.vbat_low = da9030_millivolt_to_reg(pdata->vbat_low); charger->thresholds.vbat_crit = da9030_millivolt_to_reg(pdata->vbat_crit); charger->thresholds.vbat_charge_start = da9030_millivolt_to_reg(pdata->vbat_charge_start); charger->thresholds.vbat_charge_stop = da9030_millivolt_to_reg(pdata->vbat_charge_stop); charger->thresholds.vbat_charge_restart = da9030_millivolt_to_reg(pdata->vbat_charge_restart); charger->thresholds.vcharge_min = da9030_millivolt_to_reg(pdata->vcharge_min); charger->thresholds.vcharge_max = da9030_millivolt_to_reg(pdata->vcharge_max); } static void da9030_battery_setup_psy(struct da9030_charger *charger) { struct power_supply *psy = &charger->psy; struct power_supply_info *info = charger->battery_info; psy->name = info->name; psy->use_for_apm = info->use_for_apm; psy->type = POWER_SUPPLY_TYPE_BATTERY; psy->get_property = da9030_battery_get_property; psy->properties = da9030_battery_props; psy->num_properties = ARRAY_SIZE(da9030_battery_props); }; static int da9030_battery_charger_init(struct da9030_charger *charger) { char v[5]; int ret; v[0] = v[1] = charger->thresholds.vbat_low; v[2] = charger->thresholds.tbat_high; v[3] = charger->thresholds.tbat_restart; v[4] = charger->thresholds.tbat_low; ret = da903x_writes(charger->master, DA9030_VBATMON, 5, v); if (ret) return ret; /* * Enable reference voltage supply for ADC from the LDO_INTERNAL * regulator. Must be set before ADC measurements can be made. */ ret = da903x_write(charger->master, DA9030_ADC_MAN_CONTROL, DA9030_ADC_LDO_INT_ENABLE | DA9030_ADC_TBATREF_ENABLE); if (ret) return ret; /* enable auto ADC measuremnts */ return da903x_write(charger->master, DA9030_ADC_AUTO_CONTROL, DA9030_ADC_TBAT_ENABLE | DA9030_ADC_VBAT_IN_TXON | DA9030_ADC_VCH_ENABLE | DA9030_ADC_ICH_ENABLE | DA9030_ADC_VBAT_ENABLE | DA9030_ADC_AUTO_SLEEP_ENABLE); } static int da9030_battery_probe(struct platform_device *pdev) { struct da9030_charger *charger; struct da9030_battery_info *pdata = pdev->dev.platform_data; int ret; if (pdata == NULL) return -EINVAL; if (pdata->charge_milliamp >= 1500 || pdata->charge_millivolt < 4000 || pdata->charge_millivolt > 4350) return -EINVAL; charger = kzalloc(sizeof(*charger), GFP_KERNEL); if (charger == NULL) return -ENOMEM; charger->master = pdev->dev.parent; /* 10 seconds between monotor runs unless platfrom defines other interval */ charger->interval = msecs_to_jiffies( (pdata->batmon_interval ? : 10) * 1000); charger->charge_milliamp = pdata->charge_milliamp; charger->charge_millivolt = pdata->charge_millivolt; charger->battery_info = pdata->battery_info; charger->battery_low = pdata->battery_low; charger->battery_critical = pdata->battery_critical; da9030_battery_convert_thresholds(charger, pdata); ret = da9030_battery_charger_init(charger); if (ret) goto err_charger_init; INIT_DELAYED_WORK(&charger->work, da9030_charging_monitor); schedule_delayed_work(&charger->work, charger->interval); charger->nb.notifier_call = da9030_battery_event; ret = da903x_register_notifier(charger->master, &charger->nb, DA9030_EVENT_CHDET | DA9030_EVENT_VBATMON | DA9030_EVENT_CHIOVER | DA9030_EVENT_TBAT); if (ret) goto err_notifier; da9030_battery_setup_psy(charger); ret = power_supply_register(&pdev->dev, &charger->psy); if (ret) goto err_ps_register; charger->debug_file = da9030_bat_create_debugfs(charger); platform_set_drvdata(pdev, charger); return 0; err_ps_register: da903x_unregister_notifier(charger->master, &charger->nb, DA9030_EVENT_CHDET | DA9030_EVENT_VBATMON | DA9030_EVENT_CHIOVER | DA9030_EVENT_TBAT); err_notifier: cancel_delayed_work(&charger->work); err_charger_init: kfree(charger); return ret; } static int da9030_battery_remove(struct platform_device *dev) { struct da9030_charger *charger = platform_get_drvdata(dev); da9030_bat_remove_debugfs(charger); da903x_unregister_notifier(charger->master, &charger->nb, DA9030_EVENT_CHDET | DA9030_EVENT_VBATMON | DA9030_EVENT_CHIOVER | DA9030_EVENT_TBAT); cancel_delayed_work_sync(&charger->work); da9030_set_charge(charger, 0); power_supply_unregister(&charger->psy); kfree(charger); return 0; } static struct platform_driver da903x_battery_driver = { .driver = { .name = "da903x-battery", .owner = THIS_MODULE, }, .probe = da9030_battery_probe, .remove = da9030_battery_remove, }; static int da903x_battery_init(void) { return platform_driver_register(&da903x_battery_driver); } static void da903x_battery_exit(void) { platform_driver_unregister(&da903x_battery_driver); } module_init(da903x_battery_init); module_exit(da903x_battery_exit); MODULE_DESCRIPTION("DA9030 battery charger driver"); MODULE_AUTHOR("Mike Rapoport, CompuLab"); MODULE_LICENSE("GPL");
gpl-2.0
OMFGB/htc-kernel-msm7x30_omfgb
drivers/i2c/i2c-boardinfo.c
1091
3033
/* * i2c-boardinfo.h - collect pre-declarations of I2C devices * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/kernel.h> #include <linux/i2c.h> #include <linux/slab.h> #include <linux/rwsem.h> #include "i2c-core.h" /* These symbols are exported ONLY FOR the i2c core. * No other users will be supported. */ DECLARE_RWSEM(__i2c_board_lock); EXPORT_SYMBOL_GPL(__i2c_board_lock); LIST_HEAD(__i2c_board_list); EXPORT_SYMBOL_GPL(__i2c_board_list); int __i2c_first_dynamic_bus_num; EXPORT_SYMBOL_GPL(__i2c_first_dynamic_bus_num); /** * i2c_register_board_info - statically declare I2C devices * @busnum: identifies the bus to which these devices belong * @info: vector of i2c device descriptors * @len: how many descriptors in the vector; may be zero to reserve * the specified bus number. * * Systems using the Linux I2C driver stack can declare tables of board info * while they initialize. This should be done in board-specific init code * near arch_initcall() time, or equivalent, before any I2C adapter driver is * registered. For example, mainboard init code could define several devices, * as could the init code for each daughtercard in a board stack. * * The I2C devices will be created later, after the adapter for the relevant * bus has been registered. After that moment, standard driver model tools * are used to bind "new style" I2C drivers to the devices. The bus number * for any device declared using this routine is not available for dynamic * allocation. * * The board info passed can safely be __initdata, but be careful of embedded * pointers (for platform_data, functions, etc) since that won't be copied. */ int __init i2c_register_board_info(int busnum, struct i2c_board_info const *info, unsigned len) { int status; down_write(&__i2c_board_lock); /* dynamic bus numbers will be assigned after the last static one */ if (busnum >= __i2c_first_dynamic_bus_num) __i2c_first_dynamic_bus_num = busnum + 1; for (status = 0; len; len--, info++) { struct i2c_devinfo *devinfo; devinfo = kzalloc(sizeof(*devinfo), GFP_KERNEL); if (!devinfo) { pr_debug("i2c-core: can't register boardinfo!\n"); status = -ENOMEM; break; } devinfo->busnum = busnum; devinfo->board_info = *info; list_add_tail(&devinfo->list, &__i2c_board_list); } up_write(&__i2c_board_lock); return status; }
gpl-2.0
MoKee/android_kernel_lge_sniper
arch/powerpc/kernel/traps.c
1347
40889
/* * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) * Copyright 2007-2010 Freescale Semiconductor, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Modified by Cort Dougan (cort@cs.nmt.edu) * and Paul Mackerras (paulus@samba.org) */ /* * This file handles the architecture-dependent parts of hardware exceptions */ #include <linux/errno.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/stddef.h> #include <linux/unistd.h> #include <linux/ptrace.h> #include <linux/user.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/module.h> #include <linux/prctl.h> #include <linux/delay.h> #include <linux/kprobes.h> #include <linux/kexec.h> #include <linux/backlight.h> #include <linux/bug.h> #include <linux/kdebug.h> #include <linux/debugfs.h> #include <linux/ratelimit.h> #include <asm/emulated_ops.h> #include <asm/pgtable.h> #include <asm/uaccess.h> #include <asm/system.h> #include <asm/io.h> #include <asm/machdep.h> #include <asm/rtas.h> #include <asm/pmc.h> #ifdef CONFIG_PPC32 #include <asm/reg.h> #endif #ifdef CONFIG_PMAC_BACKLIGHT #include <asm/backlight.h> #endif #ifdef CONFIG_PPC64 #include <asm/firmware.h> #include <asm/processor.h> #endif #include <asm/kexec.h> #include <asm/ppc-opcode.h> #include <asm/rio.h> #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC) int (*__debugger)(struct pt_regs *regs) __read_mostly; int (*__debugger_ipi)(struct pt_regs *regs) __read_mostly; int (*__debugger_bpt)(struct pt_regs *regs) __read_mostly; int (*__debugger_sstep)(struct pt_regs *regs) __read_mostly; int (*__debugger_iabr_match)(struct pt_regs *regs) __read_mostly; int (*__debugger_dabr_match)(struct pt_regs *regs) __read_mostly; int (*__debugger_fault_handler)(struct pt_regs *regs) __read_mostly; EXPORT_SYMBOL(__debugger); EXPORT_SYMBOL(__debugger_ipi); EXPORT_SYMBOL(__debugger_bpt); EXPORT_SYMBOL(__debugger_sstep); EXPORT_SYMBOL(__debugger_iabr_match); EXPORT_SYMBOL(__debugger_dabr_match); EXPORT_SYMBOL(__debugger_fault_handler); #endif /* * Trap & Exception support */ #ifdef CONFIG_PMAC_BACKLIGHT static void pmac_backlight_unblank(void) { mutex_lock(&pmac_backlight_mutex); if (pmac_backlight) { struct backlight_properties *props; props = &pmac_backlight->props; props->brightness = props->max_brightness; props->power = FB_BLANK_UNBLANK; backlight_update_status(pmac_backlight); } mutex_unlock(&pmac_backlight_mutex); } #else static inline void pmac_backlight_unblank(void) { } #endif int die(const char *str, struct pt_regs *regs, long err) { static struct { raw_spinlock_t lock; u32 lock_owner; int lock_owner_depth; } die = { .lock = __RAW_SPIN_LOCK_UNLOCKED(die.lock), .lock_owner = -1, .lock_owner_depth = 0 }; static int die_counter; unsigned long flags; if (debugger(regs)) return 1; oops_enter(); if (die.lock_owner != raw_smp_processor_id()) { console_verbose(); raw_spin_lock_irqsave(&die.lock, flags); die.lock_owner = smp_processor_id(); die.lock_owner_depth = 0; bust_spinlocks(1); if (machine_is(powermac)) pmac_backlight_unblank(); } else { local_save_flags(flags); } if (++die.lock_owner_depth < 3) { printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter); #ifdef CONFIG_PREEMPT printk("PREEMPT "); #endif #ifdef CONFIG_SMP printk("SMP NR_CPUS=%d ", NR_CPUS); #endif #ifdef CONFIG_DEBUG_PAGEALLOC printk("DEBUG_PAGEALLOC "); #endif #ifdef CONFIG_NUMA printk("NUMA "); #endif printk("%s\n", ppc_md.name ? ppc_md.name : ""); if (notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV) == NOTIFY_STOP) return 1; print_modules(); show_regs(regs); } else { printk("Recursive die() failure, output suppressed\n"); } bust_spinlocks(0); die.lock_owner = -1; add_taint(TAINT_DIE); raw_spin_unlock_irqrestore(&die.lock, flags); if (kexec_should_crash(current) || kexec_sr_activated(smp_processor_id())) crash_kexec(regs); crash_kexec_secondary(regs); if (in_interrupt()) panic("Fatal exception in interrupt"); if (panic_on_oops) panic("Fatal exception"); oops_exit(); do_exit(err); return 0; } void user_single_step_siginfo(struct task_struct *tsk, struct pt_regs *regs, siginfo_t *info) { memset(info, 0, sizeof(*info)); info->si_signo = SIGTRAP; info->si_code = TRAP_TRACE; info->si_addr = (void __user *)regs->nip; } void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr) { siginfo_t info; const char fmt32[] = KERN_INFO "%s[%d]: unhandled signal %d " \ "at %08lx nip %08lx lr %08lx code %x\n"; const char fmt64[] = KERN_INFO "%s[%d]: unhandled signal %d " \ "at %016lx nip %016lx lr %016lx code %x\n"; if (!user_mode(regs)) { if (die("Exception in kernel mode", regs, signr)) return; } else if (show_unhandled_signals && unhandled_signal(current, signr)) { printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32, current->comm, current->pid, signr, addr, regs->nip, regs->link, code); } memset(&info, 0, sizeof(info)); info.si_signo = signr; info.si_code = code; info.si_addr = (void __user *) addr; force_sig_info(signr, &info, current); } #ifdef CONFIG_PPC64 void system_reset_exception(struct pt_regs *regs) { /* See if any machine dependent calls */ if (ppc_md.system_reset_exception) { if (ppc_md.system_reset_exception(regs)) return; } #ifdef CONFIG_KEXEC cpumask_set_cpu(smp_processor_id(), &cpus_in_sr); #endif die("System Reset", regs, SIGABRT); /* * Some CPUs when released from the debugger will execute this path. * These CPUs entered the debugger via a soft-reset. If the CPU was * hung before entering the debugger it will return to the hung * state when exiting this function. This causes a problem in * kdump since the hung CPU(s) will not respond to the IPI sent * from kdump. To prevent the problem we call crash_kexec_secondary() * here. If a kdump had not been initiated or we exit the debugger * with the "exit and recover" command (x) crash_kexec_secondary() * will return after 5ms and the CPU returns to its previous state. */ crash_kexec_secondary(regs); /* Must die if the interrupt is not recoverable */ if (!(regs->msr & MSR_RI)) panic("Unrecoverable System Reset"); /* What should we do here? We could issue a shutdown or hard reset. */ } #endif /* * I/O accesses can cause machine checks on powermacs. * Check if the NIP corresponds to the address of a sync * instruction for which there is an entry in the exception * table. * Note that the 601 only takes a machine check on TEA * (transfer error ack) signal assertion, and does not * set any of the top 16 bits of SRR1. * -- paulus. */ static inline int check_io_access(struct pt_regs *regs) { #ifdef CONFIG_PPC32 unsigned long msr = regs->msr; const struct exception_table_entry *entry; unsigned int *nip = (unsigned int *)regs->nip; if (((msr & 0xffff0000) == 0 || (msr & (0x80000 | 0x40000))) && (entry = search_exception_tables(regs->nip)) != NULL) { /* * Check that it's a sync instruction, or somewhere * in the twi; isync; nop sequence that inb/inw/inl uses. * As the address is in the exception table * we should be able to read the instr there. * For the debug message, we look at the preceding * load or store. */ if (*nip == 0x60000000) /* nop */ nip -= 2; else if (*nip == 0x4c00012c) /* isync */ --nip; if (*nip == 0x7c0004ac || (*nip >> 26) == 3) { /* sync or twi */ unsigned int rb; --nip; rb = (*nip >> 11) & 0x1f; printk(KERN_DEBUG "%s bad port %lx at %p\n", (*nip & 0x100)? "OUT to": "IN from", regs->gpr[rb] - _IO_BASE, nip); regs->msr |= MSR_RI; regs->nip = entry->fixup; return 1; } } #endif /* CONFIG_PPC32 */ return 0; } #ifdef CONFIG_PPC_ADV_DEBUG_REGS /* On 4xx, the reason for the machine check or program exception is in the ESR. */ #define get_reason(regs) ((regs)->dsisr) #ifndef CONFIG_FSL_BOOKE #define get_mc_reason(regs) ((regs)->dsisr) #else #define get_mc_reason(regs) (mfspr(SPRN_MCSR)) #endif #define REASON_FP ESR_FP #define REASON_ILLEGAL (ESR_PIL | ESR_PUO) #define REASON_PRIVILEGED ESR_PPR #define REASON_TRAP ESR_PTR /* single-step stuff */ #define single_stepping(regs) (current->thread.dbcr0 & DBCR0_IC) #define clear_single_step(regs) (current->thread.dbcr0 &= ~DBCR0_IC) #else /* On non-4xx, the reason for the machine check or program exception is in the MSR. */ #define get_reason(regs) ((regs)->msr) #define get_mc_reason(regs) ((regs)->msr) #define REASON_FP 0x100000 #define REASON_ILLEGAL 0x80000 #define REASON_PRIVILEGED 0x40000 #define REASON_TRAP 0x20000 #define single_stepping(regs) ((regs)->msr & MSR_SE) #define clear_single_step(regs) ((regs)->msr &= ~MSR_SE) #endif #if defined(CONFIG_4xx) int machine_check_4xx(struct pt_regs *regs) { unsigned long reason = get_mc_reason(regs); if (reason & ESR_IMCP) { printk("Instruction"); mtspr(SPRN_ESR, reason & ~ESR_IMCP); } else printk("Data"); printk(" machine check in kernel mode.\n"); return 0; } int machine_check_440A(struct pt_regs *regs) { unsigned long reason = get_mc_reason(regs); printk("Machine check in kernel mode.\n"); if (reason & ESR_IMCP){ printk("Instruction Synchronous Machine Check exception\n"); mtspr(SPRN_ESR, reason & ~ESR_IMCP); } else { u32 mcsr = mfspr(SPRN_MCSR); if (mcsr & MCSR_IB) printk("Instruction Read PLB Error\n"); if (mcsr & MCSR_DRB) printk("Data Read PLB Error\n"); if (mcsr & MCSR_DWB) printk("Data Write PLB Error\n"); if (mcsr & MCSR_TLBP) printk("TLB Parity Error\n"); if (mcsr & MCSR_ICP){ flush_instruction_cache(); printk("I-Cache Parity Error\n"); } if (mcsr & MCSR_DCSP) printk("D-Cache Search Parity Error\n"); if (mcsr & MCSR_DCFP) printk("D-Cache Flush Parity Error\n"); if (mcsr & MCSR_IMPE) printk("Machine Check exception is imprecise\n"); /* Clear MCSR */ mtspr(SPRN_MCSR, mcsr); } return 0; } int machine_check_47x(struct pt_regs *regs) { unsigned long reason = get_mc_reason(regs); u32 mcsr; printk(KERN_ERR "Machine check in kernel mode.\n"); if (reason & ESR_IMCP) { printk(KERN_ERR "Instruction Synchronous Machine Check exception\n"); mtspr(SPRN_ESR, reason & ~ESR_IMCP); return 0; } mcsr = mfspr(SPRN_MCSR); if (mcsr & MCSR_IB) printk(KERN_ERR "Instruction Read PLB Error\n"); if (mcsr & MCSR_DRB) printk(KERN_ERR "Data Read PLB Error\n"); if (mcsr & MCSR_DWB) printk(KERN_ERR "Data Write PLB Error\n"); if (mcsr & MCSR_TLBP) printk(KERN_ERR "TLB Parity Error\n"); if (mcsr & MCSR_ICP) { flush_instruction_cache(); printk(KERN_ERR "I-Cache Parity Error\n"); } if (mcsr & MCSR_DCSP) printk(KERN_ERR "D-Cache Search Parity Error\n"); if (mcsr & PPC47x_MCSR_GPR) printk(KERN_ERR "GPR Parity Error\n"); if (mcsr & PPC47x_MCSR_FPR) printk(KERN_ERR "FPR Parity Error\n"); if (mcsr & PPC47x_MCSR_IPR) printk(KERN_ERR "Machine Check exception is imprecise\n"); /* Clear MCSR */ mtspr(SPRN_MCSR, mcsr); return 0; } #elif defined(CONFIG_E500) int machine_check_e500mc(struct pt_regs *regs) { unsigned long mcsr = mfspr(SPRN_MCSR); unsigned long reason = mcsr; int recoverable = 1; if (reason & MCSR_LD) { recoverable = fsl_rio_mcheck_exception(regs); if (recoverable == 1) goto silent_out; } printk("Machine check in kernel mode.\n"); printk("Caused by (from MCSR=%lx): ", reason); if (reason & MCSR_MCP) printk("Machine Check Signal\n"); if (reason & MCSR_ICPERR) { printk("Instruction Cache Parity Error\n"); /* * This is recoverable by invalidating the i-cache. */ mtspr(SPRN_L1CSR1, mfspr(SPRN_L1CSR1) | L1CSR1_ICFI); while (mfspr(SPRN_L1CSR1) & L1CSR1_ICFI) ; /* * This will generally be accompanied by an instruction * fetch error report -- only treat MCSR_IF as fatal * if it wasn't due to an L1 parity error. */ reason &= ~MCSR_IF; } if (reason & MCSR_DCPERR_MC) { printk("Data Cache Parity Error\n"); recoverable = 0; } if (reason & MCSR_L2MMU_MHIT) { printk("Hit on multiple TLB entries\n"); recoverable = 0; } if (reason & MCSR_NMI) printk("Non-maskable interrupt\n"); if (reason & MCSR_IF) { printk("Instruction Fetch Error Report\n"); recoverable = 0; } if (reason & MCSR_LD) { printk("Load Error Report\n"); recoverable = 0; } if (reason & MCSR_ST) { printk("Store Error Report\n"); recoverable = 0; } if (reason & MCSR_LDG) { printk("Guarded Load Error Report\n"); recoverable = 0; } if (reason & MCSR_TLBSYNC) printk("Simultaneous tlbsync operations\n"); if (reason & MCSR_BSL2_ERR) { printk("Level 2 Cache Error\n"); recoverable = 0; } if (reason & MCSR_MAV) { u64 addr; addr = mfspr(SPRN_MCAR); addr |= (u64)mfspr(SPRN_MCARU) << 32; printk("Machine Check %s Address: %#llx\n", reason & MCSR_MEA ? "Effective" : "Physical", addr); } silent_out: mtspr(SPRN_MCSR, mcsr); return mfspr(SPRN_MCSR) == 0 && recoverable; } int machine_check_e500(struct pt_regs *regs) { unsigned long reason = get_mc_reason(regs); if (reason & MCSR_BUS_RBERR) { if (fsl_rio_mcheck_exception(regs)) return 1; } printk("Machine check in kernel mode.\n"); printk("Caused by (from MCSR=%lx): ", reason); if (reason & MCSR_MCP) printk("Machine Check Signal\n"); if (reason & MCSR_ICPERR) printk("Instruction Cache Parity Error\n"); if (reason & MCSR_DCP_PERR) printk("Data Cache Push Parity Error\n"); if (reason & MCSR_DCPERR) printk("Data Cache Parity Error\n"); if (reason & MCSR_BUS_IAERR) printk("Bus - Instruction Address Error\n"); if (reason & MCSR_BUS_RAERR) printk("Bus - Read Address Error\n"); if (reason & MCSR_BUS_WAERR) printk("Bus - Write Address Error\n"); if (reason & MCSR_BUS_IBERR) printk("Bus - Instruction Data Error\n"); if (reason & MCSR_BUS_RBERR) printk("Bus - Read Data Bus Error\n"); if (reason & MCSR_BUS_WBERR) printk("Bus - Read Data Bus Error\n"); if (reason & MCSR_BUS_IPERR) printk("Bus - Instruction Parity Error\n"); if (reason & MCSR_BUS_RPERR) printk("Bus - Read Parity Error\n"); return 0; } int machine_check_generic(struct pt_regs *regs) { return 0; } #elif defined(CONFIG_E200) int machine_check_e200(struct pt_regs *regs) { unsigned long reason = get_mc_reason(regs); printk("Machine check in kernel mode.\n"); printk("Caused by (from MCSR=%lx): ", reason); if (reason & MCSR_MCP) printk("Machine Check Signal\n"); if (reason & MCSR_CP_PERR) printk("Cache Push Parity Error\n"); if (reason & MCSR_CPERR) printk("Cache Parity Error\n"); if (reason & MCSR_EXCP_ERR) printk("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n"); if (reason & MCSR_BUS_IRERR) printk("Bus - Read Bus Error on instruction fetch\n"); if (reason & MCSR_BUS_DRERR) printk("Bus - Read Bus Error on data load\n"); if (reason & MCSR_BUS_WRERR) printk("Bus - Write Bus Error on buffered store or cache line push\n"); return 0; } #else int machine_check_generic(struct pt_regs *regs) { unsigned long reason = get_mc_reason(regs); printk("Machine check in kernel mode.\n"); printk("Caused by (from SRR1=%lx): ", reason); switch (reason & 0x601F0000) { case 0x80000: printk("Machine check signal\n"); break; case 0: /* for 601 */ case 0x40000: case 0x140000: /* 7450 MSS error and TEA */ printk("Transfer error ack signal\n"); break; case 0x20000: printk("Data parity error signal\n"); break; case 0x10000: printk("Address parity error signal\n"); break; case 0x20000000: printk("L1 Data Cache error\n"); break; case 0x40000000: printk("L1 Instruction Cache error\n"); break; case 0x00100000: printk("L2 data cache parity error\n"); break; default: printk("Unknown values in msr\n"); } return 0; } #endif /* everything else */ void machine_check_exception(struct pt_regs *regs) { int recover = 0; __get_cpu_var(irq_stat).mce_exceptions++; /* See if any machine dependent calls. In theory, we would want * to call the CPU first, and call the ppc_md. one if the CPU * one returns a positive number. However there is existing code * that assumes the board gets a first chance, so let's keep it * that way for now and fix things later. --BenH. */ if (ppc_md.machine_check_exception) recover = ppc_md.machine_check_exception(regs); else if (cur_cpu_spec->machine_check) recover = cur_cpu_spec->machine_check(regs); if (recover > 0) return; #if defined(CONFIG_8xx) && defined(CONFIG_PCI) /* the qspan pci read routines can cause machine checks -- Cort * * yuck !!! that totally needs to go away ! There are better ways * to deal with that than having a wart in the mcheck handler. * -- BenH */ bad_page_fault(regs, regs->dar, SIGBUS); return; #endif if (debugger_fault_handler(regs)) return; if (check_io_access(regs)) return; die("Machine check", regs, SIGBUS); /* Must die if the interrupt is not recoverable */ if (!(regs->msr & MSR_RI)) panic("Unrecoverable Machine check"); } void SMIException(struct pt_regs *regs) { die("System Management Interrupt", regs, SIGABRT); } void unknown_exception(struct pt_regs *regs) { printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n", regs->nip, regs->msr, regs->trap); _exception(SIGTRAP, regs, 0, 0); } void instruction_breakpoint_exception(struct pt_regs *regs) { if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5, 5, SIGTRAP) == NOTIFY_STOP) return; if (debugger_iabr_match(regs)) return; _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip); } void RunModeException(struct pt_regs *regs) { _exception(SIGTRAP, regs, 0, 0); } void __kprobes single_step_exception(struct pt_regs *regs) { clear_single_step(regs); if (notify_die(DIE_SSTEP, "single_step", regs, 5, 5, SIGTRAP) == NOTIFY_STOP) return; if (debugger_sstep(regs)) return; _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); } /* * After we have successfully emulated an instruction, we have to * check if the instruction was being single-stepped, and if so, * pretend we got a single-step exception. This was pointed out * by Kumar Gala. -- paulus */ static void emulate_single_step(struct pt_regs *regs) { if (single_stepping(regs)) single_step_exception(regs); } static inline int __parse_fpscr(unsigned long fpscr) { int ret = 0; /* Invalid operation */ if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX)) ret = FPE_FLTINV; /* Overflow */ else if ((fpscr & FPSCR_OE) && (fpscr & FPSCR_OX)) ret = FPE_FLTOVF; /* Underflow */ else if ((fpscr & FPSCR_UE) && (fpscr & FPSCR_UX)) ret = FPE_FLTUND; /* Divide by zero */ else if ((fpscr & FPSCR_ZE) && (fpscr & FPSCR_ZX)) ret = FPE_FLTDIV; /* Inexact result */ else if ((fpscr & FPSCR_XE) && (fpscr & FPSCR_XX)) ret = FPE_FLTRES; return ret; } static void parse_fpe(struct pt_regs *regs) { int code = 0; flush_fp_to_thread(current); code = __parse_fpscr(current->thread.fpscr.val); _exception(SIGFPE, regs, code, regs->nip); } /* * Illegal instruction emulation support. Originally written to * provide the PVR to user applications using the mfspr rd, PVR. * Return non-zero if we can't emulate, or -EFAULT if the associated * memory access caused an access fault. Return zero on success. * * There are a couple of ways to do this, either "decode" the instruction * or directly match lots of bits. In this case, matching lots of * bits is faster and easier. * */ static int emulate_string_inst(struct pt_regs *regs, u32 instword) { u8 rT = (instword >> 21) & 0x1f; u8 rA = (instword >> 16) & 0x1f; u8 NB_RB = (instword >> 11) & 0x1f; u32 num_bytes; unsigned long EA; int pos = 0; /* Early out if we are an invalid form of lswx */ if ((instword & PPC_INST_STRING_MASK) == PPC_INST_LSWX) if ((rT == rA) || (rT == NB_RB)) return -EINVAL; EA = (rA == 0) ? 0 : regs->gpr[rA]; switch (instword & PPC_INST_STRING_MASK) { case PPC_INST_LSWX: case PPC_INST_STSWX: EA += NB_RB; num_bytes = regs->xer & 0x7f; break; case PPC_INST_LSWI: case PPC_INST_STSWI: num_bytes = (NB_RB == 0) ? 32 : NB_RB; break; default: return -EINVAL; } while (num_bytes != 0) { u8 val; u32 shift = 8 * (3 - (pos & 0x3)); switch ((instword & PPC_INST_STRING_MASK)) { case PPC_INST_LSWX: case PPC_INST_LSWI: if (get_user(val, (u8 __user *)EA)) return -EFAULT; /* first time updating this reg, * zero it out */ if (pos == 0) regs->gpr[rT] = 0; regs->gpr[rT] |= val << shift; break; case PPC_INST_STSWI: case PPC_INST_STSWX: val = regs->gpr[rT] >> shift; if (put_user(val, (u8 __user *)EA)) return -EFAULT; break; } /* move EA to next address */ EA += 1; num_bytes--; /* manage our position within the register */ if (++pos == 4) { pos = 0; if (++rT == 32) rT = 0; } } return 0; } static int emulate_popcntb_inst(struct pt_regs *regs, u32 instword) { u32 ra,rs; unsigned long tmp; ra = (instword >> 16) & 0x1f; rs = (instword >> 21) & 0x1f; tmp = regs->gpr[rs]; tmp = tmp - ((tmp >> 1) & 0x5555555555555555ULL); tmp = (tmp & 0x3333333333333333ULL) + ((tmp >> 2) & 0x3333333333333333ULL); tmp = (tmp + (tmp >> 4)) & 0x0f0f0f0f0f0f0f0fULL; regs->gpr[ra] = tmp; return 0; } static int emulate_isel(struct pt_regs *regs, u32 instword) { u8 rT = (instword >> 21) & 0x1f; u8 rA = (instword >> 16) & 0x1f; u8 rB = (instword >> 11) & 0x1f; u8 BC = (instword >> 6) & 0x1f; u8 bit; unsigned long tmp; tmp = (rA == 0) ? 0 : regs->gpr[rA]; bit = (regs->ccr >> (31 - BC)) & 0x1; regs->gpr[rT] = bit ? tmp : regs->gpr[rB]; return 0; } static int emulate_instruction(struct pt_regs *regs) { u32 instword; u32 rd; if (!user_mode(regs) || (regs->msr & MSR_LE)) return -EINVAL; CHECK_FULL_REGS(regs); if (get_user(instword, (u32 __user *)(regs->nip))) return -EFAULT; /* Emulate the mfspr rD, PVR. */ if ((instword & PPC_INST_MFSPR_PVR_MASK) == PPC_INST_MFSPR_PVR) { PPC_WARN_EMULATED(mfpvr, regs); rd = (instword >> 21) & 0x1f; regs->gpr[rd] = mfspr(SPRN_PVR); return 0; } /* Emulating the dcba insn is just a no-op. */ if ((instword & PPC_INST_DCBA_MASK) == PPC_INST_DCBA) { PPC_WARN_EMULATED(dcba, regs); return 0; } /* Emulate the mcrxr insn. */ if ((instword & PPC_INST_MCRXR_MASK) == PPC_INST_MCRXR) { int shift = (instword >> 21) & 0x1c; unsigned long msk = 0xf0000000UL >> shift; PPC_WARN_EMULATED(mcrxr, regs); regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk); regs->xer &= ~0xf0000000UL; return 0; } /* Emulate load/store string insn. */ if ((instword & PPC_INST_STRING_GEN_MASK) == PPC_INST_STRING) { PPC_WARN_EMULATED(string, regs); return emulate_string_inst(regs, instword); } /* Emulate the popcntb (Population Count Bytes) instruction. */ if ((instword & PPC_INST_POPCNTB_MASK) == PPC_INST_POPCNTB) { PPC_WARN_EMULATED(popcntb, regs); return emulate_popcntb_inst(regs, instword); } /* Emulate isel (Integer Select) instruction */ if ((instword & PPC_INST_ISEL_MASK) == PPC_INST_ISEL) { PPC_WARN_EMULATED(isel, regs); return emulate_isel(regs, instword); } #ifdef CONFIG_PPC64 /* Emulate the mfspr rD, DSCR. */ if (((instword & PPC_INST_MFSPR_DSCR_MASK) == PPC_INST_MFSPR_DSCR) && cpu_has_feature(CPU_FTR_DSCR)) { PPC_WARN_EMULATED(mfdscr, regs); rd = (instword >> 21) & 0x1f; regs->gpr[rd] = mfspr(SPRN_DSCR); return 0; } /* Emulate the mtspr DSCR, rD. */ if (((instword & PPC_INST_MTSPR_DSCR_MASK) == PPC_INST_MTSPR_DSCR) && cpu_has_feature(CPU_FTR_DSCR)) { PPC_WARN_EMULATED(mtdscr, regs); rd = (instword >> 21) & 0x1f; mtspr(SPRN_DSCR, regs->gpr[rd]); current->thread.dscr_inherit = 1; return 0; } #endif return -EINVAL; } int is_valid_bugaddr(unsigned long addr) { return is_kernel_addr(addr); } void __kprobes program_check_exception(struct pt_regs *regs) { unsigned int reason = get_reason(regs); extern int do_mathemu(struct pt_regs *regs); /* We can now get here via a FP Unavailable exception if the core * has no FPU, in that case the reason flags will be 0 */ if (reason & REASON_FP) { /* IEEE FP exception */ parse_fpe(regs); return; } if (reason & REASON_TRAP) { /* Debugger is first in line to stop recursive faults in * rcu_lock, notify_die, or atomic_notifier_call_chain */ if (debugger_bpt(regs)) return; /* trap exception */ if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP) == NOTIFY_STOP) return; if (!(regs->msr & MSR_PR) && /* not user-mode */ report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) { regs->nip += 4; return; } _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip); return; } local_irq_enable(); #ifdef CONFIG_MATH_EMULATION /* (reason & REASON_ILLEGAL) would be the obvious thing here, * but there seems to be a hardware bug on the 405GP (RevD) * that means ESR is sometimes set incorrectly - either to * ESR_DST (!?) or 0. In the process of chasing this with the * hardware people - not sure if it can happen on any illegal * instruction or only on FP instructions, whether there is a * pattern to occurrences etc. -dgibson 31/Mar/2003 */ switch (do_mathemu(regs)) { case 0: emulate_single_step(regs); return; case 1: { int code = 0; code = __parse_fpscr(current->thread.fpscr.val); _exception(SIGFPE, regs, code, regs->nip); return; } case -EFAULT: _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); return; } /* fall through on any other errors */ #endif /* CONFIG_MATH_EMULATION */ /* Try to emulate it if we should. */ if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) { switch (emulate_instruction(regs)) { case 0: regs->nip += 4; emulate_single_step(regs); return; case -EFAULT: _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); return; } } if (reason & REASON_PRIVILEGED) _exception(SIGILL, regs, ILL_PRVOPC, regs->nip); else _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); } void alignment_exception(struct pt_regs *regs) { int sig, code, fixed = 0; /* we don't implement logging of alignment exceptions */ if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS)) fixed = fix_alignment(regs); if (fixed == 1) { regs->nip += 4; /* skip over emulated instruction */ emulate_single_step(regs); return; } /* Operand address was bad */ if (fixed == -EFAULT) { sig = SIGSEGV; code = SEGV_ACCERR; } else { sig = SIGBUS; code = BUS_ADRALN; } if (user_mode(regs)) _exception(sig, regs, code, regs->dar); else bad_page_fault(regs, regs->dar, sig); } void StackOverflow(struct pt_regs *regs) { printk(KERN_CRIT "Kernel stack overflow in process %p, r1=%lx\n", current, regs->gpr[1]); debugger(regs); show_regs(regs); panic("kernel stack overflow"); } void nonrecoverable_exception(struct pt_regs *regs) { printk(KERN_ERR "Non-recoverable exception at PC=%lx MSR=%lx\n", regs->nip, regs->msr); debugger(regs); die("nonrecoverable exception", regs, SIGKILL); } void trace_syscall(struct pt_regs *regs) { printk("Task: %p(%d), PC: %08lX/%08lX, Syscall: %3ld, Result: %s%ld %s\n", current, task_pid_nr(current), regs->nip, regs->link, regs->gpr[0], regs->ccr&0x10000000?"Error=":"", regs->gpr[3], print_tainted()); } void kernel_fp_unavailable_exception(struct pt_regs *regs) { printk(KERN_EMERG "Unrecoverable FP Unavailable Exception " "%lx at %lx\n", regs->trap, regs->nip); die("Unrecoverable FP Unavailable Exception", regs, SIGABRT); } void altivec_unavailable_exception(struct pt_regs *regs) { if (user_mode(regs)) { /* A user program has executed an altivec instruction, but this kernel doesn't support altivec. */ _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); return; } printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception " "%lx at %lx\n", regs->trap, regs->nip); die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT); } void vsx_unavailable_exception(struct pt_regs *regs) { if (user_mode(regs)) { /* A user program has executed an vsx instruction, but this kernel doesn't support vsx. */ _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); return; } printk(KERN_EMERG "Unrecoverable VSX Unavailable Exception " "%lx at %lx\n", regs->trap, regs->nip); die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT); } void performance_monitor_exception(struct pt_regs *regs) { __get_cpu_var(irq_stat).pmu_irqs++; perf_irq(regs); } #ifdef CONFIG_8xx void SoftwareEmulation(struct pt_regs *regs) { extern int do_mathemu(struct pt_regs *); extern int Soft_emulate_8xx(struct pt_regs *); #if defined(CONFIG_MATH_EMULATION) || defined(CONFIG_8XX_MINIMAL_FPEMU) int errcode; #endif CHECK_FULL_REGS(regs); if (!user_mode(regs)) { debugger(regs); die("Kernel Mode Software FPU Emulation", regs, SIGFPE); } #ifdef CONFIG_MATH_EMULATION errcode = do_mathemu(regs); if (errcode >= 0) PPC_WARN_EMULATED(math, regs); switch (errcode) { case 0: emulate_single_step(regs); return; case 1: { int code = 0; code = __parse_fpscr(current->thread.fpscr.val); _exception(SIGFPE, regs, code, regs->nip); return; } case -EFAULT: _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); return; default: _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); return; } #elif defined(CONFIG_8XX_MINIMAL_FPEMU) errcode = Soft_emulate_8xx(regs); if (errcode >= 0) PPC_WARN_EMULATED(8xx, regs); switch (errcode) { case 0: emulate_single_step(regs); return; case 1: _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); return; case -EFAULT: _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); return; } #else _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); #endif } #endif /* CONFIG_8xx */ #ifdef CONFIG_PPC_ADV_DEBUG_REGS static void handle_debug(struct pt_regs *regs, unsigned long debug_status) { int changed = 0; /* * Determine the cause of the debug event, clear the * event flags and send a trap to the handler. Torez */ if (debug_status & (DBSR_DAC1R | DBSR_DAC1W)) { dbcr_dac(current) &= ~(DBCR_DAC1R | DBCR_DAC1W); #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE current->thread.dbcr2 &= ~DBCR2_DAC12MODE; #endif do_send_trap(regs, mfspr(SPRN_DAC1), debug_status, TRAP_HWBKPT, 5); changed |= 0x01; } else if (debug_status & (DBSR_DAC2R | DBSR_DAC2W)) { dbcr_dac(current) &= ~(DBCR_DAC2R | DBCR_DAC2W); do_send_trap(regs, mfspr(SPRN_DAC2), debug_status, TRAP_HWBKPT, 6); changed |= 0x01; } else if (debug_status & DBSR_IAC1) { current->thread.dbcr0 &= ~DBCR0_IAC1; dbcr_iac_range(current) &= ~DBCR_IAC12MODE; do_send_trap(regs, mfspr(SPRN_IAC1), debug_status, TRAP_HWBKPT, 1); changed |= 0x01; } else if (debug_status & DBSR_IAC2) { current->thread.dbcr0 &= ~DBCR0_IAC2; do_send_trap(regs, mfspr(SPRN_IAC2), debug_status, TRAP_HWBKPT, 2); changed |= 0x01; } else if (debug_status & DBSR_IAC3) { current->thread.dbcr0 &= ~DBCR0_IAC3; dbcr_iac_range(current) &= ~DBCR_IAC34MODE; do_send_trap(regs, mfspr(SPRN_IAC3), debug_status, TRAP_HWBKPT, 3); changed |= 0x01; } else if (debug_status & DBSR_IAC4) { current->thread.dbcr0 &= ~DBCR0_IAC4; do_send_trap(regs, mfspr(SPRN_IAC4), debug_status, TRAP_HWBKPT, 4); changed |= 0x01; } /* * At the point this routine was called, the MSR(DE) was turned off. * Check all other debug flags and see if that bit needs to be turned * back on or not. */ if (DBCR_ACTIVE_EVENTS(current->thread.dbcr0, current->thread.dbcr1)) regs->msr |= MSR_DE; else /* Make sure the IDM flag is off */ current->thread.dbcr0 &= ~DBCR0_IDM; if (changed & 0x01) mtspr(SPRN_DBCR0, current->thread.dbcr0); } void __kprobes DebugException(struct pt_regs *regs, unsigned long debug_status) { current->thread.dbsr = debug_status; /* Hack alert: On BookE, Branch Taken stops on the branch itself, while * on server, it stops on the target of the branch. In order to simulate * the server behaviour, we thus restart right away with a single step * instead of stopping here when hitting a BT */ if (debug_status & DBSR_BT) { regs->msr &= ~MSR_DE; /* Disable BT */ mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_BT); /* Clear the BT event */ mtspr(SPRN_DBSR, DBSR_BT); /* Do the single step trick only when coming from userspace */ if (user_mode(regs)) { current->thread.dbcr0 &= ~DBCR0_BT; current->thread.dbcr0 |= DBCR0_IDM | DBCR0_IC; regs->msr |= MSR_DE; return; } if (notify_die(DIE_SSTEP, "block_step", regs, 5, 5, SIGTRAP) == NOTIFY_STOP) { return; } if (debugger_sstep(regs)) return; } else if (debug_status & DBSR_IC) { /* Instruction complete */ regs->msr &= ~MSR_DE; /* Disable instruction completion */ mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC); /* Clear the instruction completion event */ mtspr(SPRN_DBSR, DBSR_IC); if (notify_die(DIE_SSTEP, "single_step", regs, 5, 5, SIGTRAP) == NOTIFY_STOP) { return; } if (debugger_sstep(regs)) return; if (user_mode(regs)) { current->thread.dbcr0 &= ~DBCR0_IC; #ifdef CONFIG_PPC_ADV_DEBUG_REGS if (DBCR_ACTIVE_EVENTS(current->thread.dbcr0, current->thread.dbcr1)) regs->msr |= MSR_DE; else /* Make sure the IDM bit is off */ current->thread.dbcr0 &= ~DBCR0_IDM; #endif } _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); } else handle_debug(regs, debug_status); } #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ #if !defined(CONFIG_TAU_INT) void TAUException(struct pt_regs *regs) { printk("TAU trap at PC: %lx, MSR: %lx, vector=%lx %s\n", regs->nip, regs->msr, regs->trap, print_tainted()); } #endif /* CONFIG_INT_TAU */ #ifdef CONFIG_ALTIVEC void altivec_assist_exception(struct pt_regs *regs) { int err; if (!user_mode(regs)) { printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode" " at %lx\n", regs->nip); die("Kernel VMX/Altivec assist exception", regs, SIGILL); } flush_altivec_to_thread(current); PPC_WARN_EMULATED(altivec, regs); err = emulate_altivec(regs); if (err == 0) { regs->nip += 4; /* skip emulated instruction */ emulate_single_step(regs); return; } if (err == -EFAULT) { /* got an error reading the instruction */ _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip); } else { /* didn't recognize the instruction */ /* XXX quick hack for now: set the non-Java bit in the VSCR */ printk_ratelimited(KERN_ERR "Unrecognized altivec instruction " "in %s at %lx\n", current->comm, regs->nip); current->thread.vscr.u[3] |= 0x10000; } } #endif /* CONFIG_ALTIVEC */ #ifdef CONFIG_VSX void vsx_assist_exception(struct pt_regs *regs) { if (!user_mode(regs)) { printk(KERN_EMERG "VSX assist exception in kernel mode" " at %lx\n", regs->nip); die("Kernel VSX assist exception", regs, SIGILL); } flush_vsx_to_thread(current); printk(KERN_INFO "VSX assist not supported at %lx\n", regs->nip); _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); } #endif /* CONFIG_VSX */ #ifdef CONFIG_FSL_BOOKE void CacheLockingException(struct pt_regs *regs, unsigned long address, unsigned long error_code) { /* We treat cache locking instructions from the user * as priv ops, in the future we could try to do * something smarter */ if (error_code & (ESR_DLK|ESR_ILK)) _exception(SIGILL, regs, ILL_PRVOPC, regs->nip); return; } #endif /* CONFIG_FSL_BOOKE */ #ifdef CONFIG_SPE void SPEFloatingPointException(struct pt_regs *regs) { extern int do_spe_mathemu(struct pt_regs *regs); unsigned long spefscr; int fpexc_mode; int code = 0; int err; preempt_disable(); if (regs->msr & MSR_SPE) giveup_spe(current); preempt_enable(); spefscr = current->thread.spefscr; fpexc_mode = current->thread.fpexc_mode; if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) { code = FPE_FLTOVF; } else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) { code = FPE_FLTUND; } else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV)) code = FPE_FLTDIV; else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) { code = FPE_FLTINV; } else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES)) code = FPE_FLTRES; err = do_spe_mathemu(regs); if (err == 0) { regs->nip += 4; /* skip emulated instruction */ emulate_single_step(regs); return; } if (err == -EFAULT) { /* got an error reading the instruction */ _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip); } else if (err == -EINVAL) { /* didn't recognize the instruction */ printk(KERN_ERR "unrecognized spe instruction " "in %s at %lx\n", current->comm, regs->nip); } else { _exception(SIGFPE, regs, code, regs->nip); } return; } void SPEFloatingPointRoundException(struct pt_regs *regs) { extern int speround_handler(struct pt_regs *regs); int err; preempt_disable(); if (regs->msr & MSR_SPE) giveup_spe(current); preempt_enable(); regs->nip -= 4; err = speround_handler(regs); if (err == 0) { regs->nip += 4; /* skip emulated instruction */ emulate_single_step(regs); return; } if (err == -EFAULT) { /* got an error reading the instruction */ _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip); } else if (err == -EINVAL) { /* didn't recognize the instruction */ printk(KERN_ERR "unrecognized spe instruction " "in %s at %lx\n", current->comm, regs->nip); } else { _exception(SIGFPE, regs, 0, regs->nip); return; } } #endif /* * We enter here if we get an unrecoverable exception, that is, one * that happened at a point where the RI (recoverable interrupt) bit * in the MSR is 0. This indicates that SRR0/1 are live, and that * we therefore lost state by taking this exception. */ void unrecoverable_exception(struct pt_regs *regs) { printk(KERN_EMERG "Unrecoverable exception %lx at %lx\n", regs->trap, regs->nip); die("Unrecoverable exception", regs, SIGABRT); } #ifdef CONFIG_BOOKE_WDT /* * Default handler for a Watchdog exception, * spins until a reboot occurs */ void __attribute__ ((weak)) WatchdogHandler(struct pt_regs *regs) { /* Generic WatchdogHandler, implement your own */ mtspr(SPRN_TCR, mfspr(SPRN_TCR)&(~TCR_WIE)); return; } void WatchdogException(struct pt_regs *regs) { printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n"); WatchdogHandler(regs); } #endif /* * We enter here if we discover during exception entry that we are * running in supervisor mode with a userspace value in the stack pointer. */ void kernel_bad_stack(struct pt_regs *regs) { printk(KERN_EMERG "Bad kernel stack pointer %lx at %lx\n", regs->gpr[1], regs->nip); die("Bad kernel stack pointer", regs, SIGABRT); } void __init trap_init(void) { } #ifdef CONFIG_PPC_EMULATED_STATS #define WARN_EMULATED_SETUP(type) .type = { .name = #type } struct ppc_emulated ppc_emulated = { #ifdef CONFIG_ALTIVEC WARN_EMULATED_SETUP(altivec), #endif WARN_EMULATED_SETUP(dcba), WARN_EMULATED_SETUP(dcbz), WARN_EMULATED_SETUP(fp_pair), WARN_EMULATED_SETUP(isel), WARN_EMULATED_SETUP(mcrxr), WARN_EMULATED_SETUP(mfpvr), WARN_EMULATED_SETUP(multiple), WARN_EMULATED_SETUP(popcntb), WARN_EMULATED_SETUP(spe), WARN_EMULATED_SETUP(string), WARN_EMULATED_SETUP(unaligned), #ifdef CONFIG_MATH_EMULATION WARN_EMULATED_SETUP(math), #elif defined(CONFIG_8XX_MINIMAL_FPEMU) WARN_EMULATED_SETUP(8xx), #endif #ifdef CONFIG_VSX WARN_EMULATED_SETUP(vsx), #endif #ifdef CONFIG_PPC64 WARN_EMULATED_SETUP(mfdscr), WARN_EMULATED_SETUP(mtdscr), #endif }; u32 ppc_warn_emulated; void ppc_warn_emulated_print(const char *type) { pr_warn_ratelimited("%s used emulated %s instruction\n", current->comm, type); } static int __init ppc_warn_emulated_init(void) { struct dentry *dir, *d; unsigned int i; struct ppc_emulated_entry *entries = (void *)&ppc_emulated; if (!powerpc_debugfs_root) return -ENODEV; dir = debugfs_create_dir("emulated_instructions", powerpc_debugfs_root); if (!dir) return -ENOMEM; d = debugfs_create_u32("do_warn", S_IRUGO | S_IWUSR, dir, &ppc_warn_emulated); if (!d) goto fail; for (i = 0; i < sizeof(ppc_emulated)/sizeof(*entries); i++) { d = debugfs_create_u32(entries[i].name, S_IRUGO | S_IWUSR, dir, (u32 *)&entries[i].val.counter); if (!d) goto fail; } return 0; fail: debugfs_remove_recursive(dir); return -ENOMEM; } device_initcall(ppc_warn_emulated_init); #endif /* CONFIG_PPC_EMULATED_STATS */
gpl-2.0
IxLabs/net-next
drivers/net/ethernet/ti/cpsw_ale.c
2371
17526
/* * Texas Instruments 3-Port Ethernet Switch Address Lookup Engine * * Copyright (C) 2012 Texas Instruments * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation version 2. * * This program is distributed "as is" WITHOUT ANY WARRANTY of any * kind, whether express or implied; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/err.h> #include <linux/io.h> #include <linux/stat.h> #include <linux/sysfs.h> #include <linux/etherdevice.h> #include "cpsw_ale.h" #define BITMASK(bits) (BIT(bits) - 1) #define ALE_ENTRY_BITS 68 #define ALE_ENTRY_WORDS DIV_ROUND_UP(ALE_ENTRY_BITS, 32) #define ALE_VERSION_MAJOR(rev) ((rev >> 8) & 0xff) #define ALE_VERSION_MINOR(rev) (rev & 0xff) /* ALE Registers */ #define ALE_IDVER 0x00 #define ALE_CONTROL 0x08 #define ALE_PRESCALE 0x10 #define ALE_UNKNOWNVLAN 0x18 #define ALE_TABLE_CONTROL 0x20 #define ALE_TABLE 0x34 #define ALE_PORTCTL 0x40 #define ALE_TABLE_WRITE BIT(31) #define ALE_TYPE_FREE 0 #define ALE_TYPE_ADDR 1 #define ALE_TYPE_VLAN 2 #define ALE_TYPE_VLAN_ADDR 3 #define ALE_UCAST_PERSISTANT 0 #define ALE_UCAST_UNTOUCHED 1 #define ALE_UCAST_OUI 2 #define ALE_UCAST_TOUCHED 3 static inline int cpsw_ale_get_field(u32 *ale_entry, u32 start, u32 bits) { int idx; idx = start / 32; start -= idx * 32; idx = 2 - idx; /* flip */ return (ale_entry[idx] >> start) & BITMASK(bits); } static inline void cpsw_ale_set_field(u32 *ale_entry, u32 start, u32 bits, u32 value) { int idx; value &= BITMASK(bits); idx = start / 32; start -= idx * 32; idx = 2 - idx; /* flip */ ale_entry[idx] &= ~(BITMASK(bits) << start); ale_entry[idx] |= (value << start); } #define DEFINE_ALE_FIELD(name, start, bits) \ static inline int cpsw_ale_get_##name(u32 *ale_entry) \ { \ return cpsw_ale_get_field(ale_entry, start, bits); \ } \ static inline void cpsw_ale_set_##name(u32 *ale_entry, u32 value) \ { \ cpsw_ale_set_field(ale_entry, start, bits, value); \ } DEFINE_ALE_FIELD(entry_type, 60, 2) DEFINE_ALE_FIELD(vlan_id, 48, 12) DEFINE_ALE_FIELD(mcast_state, 62, 2) DEFINE_ALE_FIELD(port_mask, 66, 3) DEFINE_ALE_FIELD(super, 65, 1) DEFINE_ALE_FIELD(ucast_type, 62, 2) DEFINE_ALE_FIELD(port_num, 66, 2) DEFINE_ALE_FIELD(blocked, 65, 1) DEFINE_ALE_FIELD(secure, 64, 1) DEFINE_ALE_FIELD(vlan_untag_force, 24, 3) DEFINE_ALE_FIELD(vlan_reg_mcast, 16, 3) DEFINE_ALE_FIELD(vlan_unreg_mcast, 8, 3) DEFINE_ALE_FIELD(vlan_member_list, 0, 3) DEFINE_ALE_FIELD(mcast, 40, 1) /* The MAC address field in the ALE entry cannot be macroized as above */ static inline void cpsw_ale_get_addr(u32 *ale_entry, u8 *addr) { int i; for (i = 0; i < 6; i++) addr[i] = cpsw_ale_get_field(ale_entry, 40 - 8*i, 8); } static inline void cpsw_ale_set_addr(u32 *ale_entry, u8 *addr) { int i; for (i = 0; i < 6; i++) cpsw_ale_set_field(ale_entry, 40 - 8*i, 8, addr[i]); } static int cpsw_ale_read(struct cpsw_ale *ale, int idx, u32 *ale_entry) { int i; WARN_ON(idx > ale->params.ale_entries); __raw_writel(idx, ale->params.ale_regs + ALE_TABLE_CONTROL); for (i = 0; i < ALE_ENTRY_WORDS; i++) ale_entry[i] = __raw_readl(ale->params.ale_regs + ALE_TABLE + 4 * i); return idx; } static int cpsw_ale_write(struct cpsw_ale *ale, int idx, u32 *ale_entry) { int i; WARN_ON(idx > ale->params.ale_entries); for (i = 0; i < ALE_ENTRY_WORDS; i++) __raw_writel(ale_entry[i], ale->params.ale_regs + ALE_TABLE + 4 * i); __raw_writel(idx | ALE_TABLE_WRITE, ale->params.ale_regs + ALE_TABLE_CONTROL); return idx; } int cpsw_ale_match_addr(struct cpsw_ale *ale, u8 *addr, u16 vid) { u32 ale_entry[ALE_ENTRY_WORDS]; int type, idx; for (idx = 0; idx < ale->params.ale_entries; idx++) { u8 entry_addr[6]; cpsw_ale_read(ale, idx, ale_entry); type = cpsw_ale_get_entry_type(ale_entry); if (type != ALE_TYPE_ADDR && type != ALE_TYPE_VLAN_ADDR) continue; if (cpsw_ale_get_vlan_id(ale_entry) != vid) continue; cpsw_ale_get_addr(ale_entry, entry_addr); if (memcmp(entry_addr, addr, 6) == 0) return idx; } return -ENOENT; } int cpsw_ale_match_vlan(struct cpsw_ale *ale, u16 vid) { u32 ale_entry[ALE_ENTRY_WORDS]; int type, idx; for (idx = 0; idx < ale->params.ale_entries; idx++) { cpsw_ale_read(ale, idx, ale_entry); type = cpsw_ale_get_entry_type(ale_entry); if (type != ALE_TYPE_VLAN) continue; if (cpsw_ale_get_vlan_id(ale_entry) == vid) return idx; } return -ENOENT; } static int cpsw_ale_match_free(struct cpsw_ale *ale) { u32 ale_entry[ALE_ENTRY_WORDS]; int type, idx; for (idx = 0; idx < ale->params.ale_entries; idx++) { cpsw_ale_read(ale, idx, ale_entry); type = cpsw_ale_get_entry_type(ale_entry); if (type == ALE_TYPE_FREE) return idx; } return -ENOENT; } static int cpsw_ale_find_ageable(struct cpsw_ale *ale) { u32 ale_entry[ALE_ENTRY_WORDS]; int type, idx; for (idx = 0; idx < ale->params.ale_entries; idx++) { cpsw_ale_read(ale, idx, ale_entry); type = cpsw_ale_get_entry_type(ale_entry); if (type != ALE_TYPE_ADDR && type != ALE_TYPE_VLAN_ADDR) continue; if (cpsw_ale_get_mcast(ale_entry)) continue; type = cpsw_ale_get_ucast_type(ale_entry); if (type != ALE_UCAST_PERSISTANT && type != ALE_UCAST_OUI) return idx; } return -ENOENT; } static void cpsw_ale_flush_mcast(struct cpsw_ale *ale, u32 *ale_entry, int port_mask) { int mask; mask = cpsw_ale_get_port_mask(ale_entry); if ((mask & port_mask) == 0) return; /* ports dont intersect, not interested */ mask &= ~port_mask; /* free if only remaining port is host port */ if (mask) cpsw_ale_set_port_mask(ale_entry, mask); else cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE); } int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask) { u32 ale_entry[ALE_ENTRY_WORDS]; int ret, idx; for (idx = 0; idx < ale->params.ale_entries; idx++) { cpsw_ale_read(ale, idx, ale_entry); ret = cpsw_ale_get_entry_type(ale_entry); if (ret != ALE_TYPE_ADDR && ret != ALE_TYPE_VLAN_ADDR) continue; if (cpsw_ale_get_mcast(ale_entry)) { u8 addr[6]; cpsw_ale_get_addr(ale_entry, addr); if (!is_broadcast_ether_addr(addr)) cpsw_ale_flush_mcast(ale, ale_entry, port_mask); } cpsw_ale_write(ale, idx, ale_entry); } return 0; } static void cpsw_ale_flush_ucast(struct cpsw_ale *ale, u32 *ale_entry, int port_mask) { int port; port = cpsw_ale_get_port_num(ale_entry); if ((BIT(port) & port_mask) == 0) return; /* ports dont intersect, not interested */ cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE); } int cpsw_ale_flush(struct cpsw_ale *ale, int port_mask) { u32 ale_entry[ALE_ENTRY_WORDS]; int ret, idx; for (idx = 0; idx < ale->params.ale_entries; idx++) { cpsw_ale_read(ale, idx, ale_entry); ret = cpsw_ale_get_entry_type(ale_entry); if (ret != ALE_TYPE_ADDR && ret != ALE_TYPE_VLAN_ADDR) continue; if (cpsw_ale_get_mcast(ale_entry)) cpsw_ale_flush_mcast(ale, ale_entry, port_mask); else cpsw_ale_flush_ucast(ale, ale_entry, port_mask); cpsw_ale_write(ale, idx, ale_entry); } return 0; } static inline void cpsw_ale_set_vlan_entry_type(u32 *ale_entry, int flags, u16 vid) { if (flags & ALE_VLAN) { cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_VLAN_ADDR); cpsw_ale_set_vlan_id(ale_entry, vid); } else { cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR); } } int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port, int flags, u16 vid) { u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0}; int idx; cpsw_ale_set_vlan_entry_type(ale_entry, flags, vid); cpsw_ale_set_addr(ale_entry, addr); cpsw_ale_set_ucast_type(ale_entry, ALE_UCAST_PERSISTANT); cpsw_ale_set_secure(ale_entry, (flags & ALE_SECURE) ? 1 : 0); cpsw_ale_set_blocked(ale_entry, (flags & ALE_BLOCKED) ? 1 : 0); cpsw_ale_set_port_num(ale_entry, port); idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0); if (idx < 0) idx = cpsw_ale_match_free(ale); if (idx < 0) idx = cpsw_ale_find_ageable(ale); if (idx < 0) return -ENOMEM; cpsw_ale_write(ale, idx, ale_entry); return 0; } int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port, int flags, u16 vid) { u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0}; int idx; idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0); if (idx < 0) return -ENOENT; cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE); cpsw_ale_write(ale, idx, ale_entry); return 0; } int cpsw_ale_add_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask, int flags, u16 vid, int mcast_state) { u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0}; int idx, mask; idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0); if (idx >= 0) cpsw_ale_read(ale, idx, ale_entry); cpsw_ale_set_vlan_entry_type(ale_entry, flags, vid); cpsw_ale_set_addr(ale_entry, addr); cpsw_ale_set_super(ale_entry, (flags & ALE_BLOCKED) ? 1 : 0); cpsw_ale_set_mcast_state(ale_entry, mcast_state); mask = cpsw_ale_get_port_mask(ale_entry); port_mask |= mask; cpsw_ale_set_port_mask(ale_entry, port_mask); if (idx < 0) idx = cpsw_ale_match_free(ale); if (idx < 0) idx = cpsw_ale_find_ageable(ale); if (idx < 0) return -ENOMEM; cpsw_ale_write(ale, idx, ale_entry); return 0; } int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask, int flags, u16 vid) { u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0}; int idx; idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0); if (idx < 0) return -EINVAL; cpsw_ale_read(ale, idx, ale_entry); if (port_mask) cpsw_ale_set_port_mask(ale_entry, port_mask); else cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE); cpsw_ale_write(ale, idx, ale_entry); return 0; } int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag, int reg_mcast, int unreg_mcast) { u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0}; int idx; idx = cpsw_ale_match_vlan(ale, vid); if (idx >= 0) cpsw_ale_read(ale, idx, ale_entry); cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_VLAN); cpsw_ale_set_vlan_id(ale_entry, vid); cpsw_ale_set_vlan_untag_force(ale_entry, untag); cpsw_ale_set_vlan_reg_mcast(ale_entry, reg_mcast); cpsw_ale_set_vlan_unreg_mcast(ale_entry, unreg_mcast); cpsw_ale_set_vlan_member_list(ale_entry, port); if (idx < 0) idx = cpsw_ale_match_free(ale); if (idx < 0) idx = cpsw_ale_find_ageable(ale); if (idx < 0) return -ENOMEM; cpsw_ale_write(ale, idx, ale_entry); return 0; } int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port_mask) { u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0}; int idx; idx = cpsw_ale_match_vlan(ale, vid); if (idx < 0) return -ENOENT; cpsw_ale_read(ale, idx, ale_entry); if (port_mask) cpsw_ale_set_vlan_member_list(ale_entry, port_mask); else cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE); cpsw_ale_write(ale, idx, ale_entry); return 0; } struct ale_control_info { const char *name; int offset, port_offset; int shift, port_shift; int bits; }; static const struct ale_control_info ale_controls[ALE_NUM_CONTROLS] = { [ALE_ENABLE] = { .name = "enable", .offset = ALE_CONTROL, .port_offset = 0, .shift = 31, .port_shift = 0, .bits = 1, }, [ALE_CLEAR] = { .name = "clear", .offset = ALE_CONTROL, .port_offset = 0, .shift = 30, .port_shift = 0, .bits = 1, }, [ALE_AGEOUT] = { .name = "ageout", .offset = ALE_CONTROL, .port_offset = 0, .shift = 29, .port_shift = 0, .bits = 1, }, [ALE_VLAN_NOLEARN] = { .name = "vlan_nolearn", .offset = ALE_CONTROL, .port_offset = 0, .shift = 7, .port_shift = 0, .bits = 1, }, [ALE_NO_PORT_VLAN] = { .name = "no_port_vlan", .offset = ALE_CONTROL, .port_offset = 0, .shift = 6, .port_shift = 0, .bits = 1, }, [ALE_OUI_DENY] = { .name = "oui_deny", .offset = ALE_CONTROL, .port_offset = 0, .shift = 5, .port_shift = 0, .bits = 1, }, [ALE_BYPASS] = { .name = "bypass", .offset = ALE_CONTROL, .port_offset = 0, .shift = 4, .port_shift = 0, .bits = 1, }, [ALE_RATE_LIMIT_TX] = { .name = "rate_limit_tx", .offset = ALE_CONTROL, .port_offset = 0, .shift = 3, .port_shift = 0, .bits = 1, }, [ALE_VLAN_AWARE] = { .name = "vlan_aware", .offset = ALE_CONTROL, .port_offset = 0, .shift = 2, .port_shift = 0, .bits = 1, }, [ALE_AUTH_ENABLE] = { .name = "auth_enable", .offset = ALE_CONTROL, .port_offset = 0, .shift = 1, .port_shift = 0, .bits = 1, }, [ALE_RATE_LIMIT] = { .name = "rate_limit", .offset = ALE_CONTROL, .port_offset = 0, .shift = 0, .port_shift = 0, .bits = 1, }, [ALE_PORT_STATE] = { .name = "port_state", .offset = ALE_PORTCTL, .port_offset = 4, .shift = 0, .port_shift = 0, .bits = 2, }, [ALE_PORT_DROP_UNTAGGED] = { .name = "drop_untagged", .offset = ALE_PORTCTL, .port_offset = 4, .shift = 2, .port_shift = 0, .bits = 1, }, [ALE_PORT_DROP_UNKNOWN_VLAN] = { .name = "drop_unknown", .offset = ALE_PORTCTL, .port_offset = 4, .shift = 3, .port_shift = 0, .bits = 1, }, [ALE_PORT_NOLEARN] = { .name = "nolearn", .offset = ALE_PORTCTL, .port_offset = 4, .shift = 4, .port_shift = 0, .bits = 1, }, [ALE_PORT_MCAST_LIMIT] = { .name = "mcast_limit", .offset = ALE_PORTCTL, .port_offset = 4, .shift = 16, .port_shift = 0, .bits = 8, }, [ALE_PORT_BCAST_LIMIT] = { .name = "bcast_limit", .offset = ALE_PORTCTL, .port_offset = 4, .shift = 24, .port_shift = 0, .bits = 8, }, [ALE_PORT_UNKNOWN_VLAN_MEMBER] = { .name = "unknown_vlan_member", .offset = ALE_UNKNOWNVLAN, .port_offset = 0, .shift = 0, .port_shift = 0, .bits = 6, }, [ALE_PORT_UNKNOWN_MCAST_FLOOD] = { .name = "unknown_mcast_flood", .offset = ALE_UNKNOWNVLAN, .port_offset = 0, .shift = 8, .port_shift = 0, .bits = 6, }, [ALE_PORT_UNKNOWN_REG_MCAST_FLOOD] = { .name = "unknown_reg_flood", .offset = ALE_UNKNOWNVLAN, .port_offset = 0, .shift = 16, .port_shift = 0, .bits = 6, }, [ALE_PORT_UNTAGGED_EGRESS] = { .name = "untagged_egress", .offset = ALE_UNKNOWNVLAN, .port_offset = 0, .shift = 24, .port_shift = 0, .bits = 6, }, }; int cpsw_ale_control_set(struct cpsw_ale *ale, int port, int control, int value) { const struct ale_control_info *info; int offset, shift; u32 tmp, mask; if (control < 0 || control >= ARRAY_SIZE(ale_controls)) return -EINVAL; info = &ale_controls[control]; if (info->port_offset == 0 && info->port_shift == 0) port = 0; /* global, port is a dont care */ if (port < 0 || port > ale->params.ale_ports) return -EINVAL; mask = BITMASK(info->bits); if (value & ~mask) return -EINVAL; offset = info->offset + (port * info->port_offset); shift = info->shift + (port * info->port_shift); tmp = __raw_readl(ale->params.ale_regs + offset); tmp = (tmp & ~(mask << shift)) | (value << shift); __raw_writel(tmp, ale->params.ale_regs + offset); return 0; } int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control) { const struct ale_control_info *info; int offset, shift; u32 tmp; if (control < 0 || control >= ARRAY_SIZE(ale_controls)) return -EINVAL; info = &ale_controls[control]; if (info->port_offset == 0 && info->port_shift == 0) port = 0; /* global, port is a dont care */ if (port < 0 || port > ale->params.ale_ports) return -EINVAL; offset = info->offset + (port * info->port_offset); shift = info->shift + (port * info->port_shift); tmp = __raw_readl(ale->params.ale_regs + offset) >> shift; return tmp & BITMASK(info->bits); } static void cpsw_ale_timer(unsigned long arg) { struct cpsw_ale *ale = (struct cpsw_ale *)arg; cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1); if (ale->ageout) { ale->timer.expires = jiffies + ale->ageout; add_timer(&ale->timer); } } int cpsw_ale_set_ageout(struct cpsw_ale *ale, int ageout) { del_timer_sync(&ale->timer); ale->ageout = ageout * HZ; if (ale->ageout) { ale->timer.expires = jiffies + ale->ageout; add_timer(&ale->timer); } return 0; } void cpsw_ale_start(struct cpsw_ale *ale) { u32 rev; rev = __raw_readl(ale->params.ale_regs + ALE_IDVER); dev_dbg(ale->params.dev, "initialized cpsw ale revision %d.%d\n", ALE_VERSION_MAJOR(rev), ALE_VERSION_MINOR(rev)); cpsw_ale_control_set(ale, 0, ALE_ENABLE, 1); cpsw_ale_control_set(ale, 0, ALE_CLEAR, 1); init_timer(&ale->timer); ale->timer.data = (unsigned long)ale; ale->timer.function = cpsw_ale_timer; if (ale->ageout) { ale->timer.expires = jiffies + ale->ageout; add_timer(&ale->timer); } } void cpsw_ale_stop(struct cpsw_ale *ale) { del_timer_sync(&ale->timer); } struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params) { struct cpsw_ale *ale; ale = kzalloc(sizeof(*ale), GFP_KERNEL); if (!ale) return NULL; ale->params = *params; ale->ageout = ale->params.ale_ageout * HZ; return ale; } int cpsw_ale_destroy(struct cpsw_ale *ale) { if (!ale) return -EINVAL; cpsw_ale_stop(ale); cpsw_ale_control_set(ale, 0, ALE_ENABLE, 0); kfree(ale); return 0; }
gpl-2.0