repo_name
string
path
string
copies
string
size
string
content
string
license
string
MoltenMotherBoard/platform_kernel_samsung_msm7x27
drivers/ieee1394/hosts.c
1688
6421
/* * IEEE 1394 for Linux * * Low level (host adapter) management. * * Copyright (C) 1999 Andreas E. Bombe * Copyright (C) 1999 Emanuel Pirker * * This code is licensed under the GPL. See the file COPYING in the root * directory of the kernel sources for details. */ #include <linux/module.h> #include <linux/types.h> #include <linux/list.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/timer.h> #include <linux/jiffies.h> #include <linux/mutex.h> #include "csr1212.h" #include "ieee1394.h" #include "ieee1394_types.h" #include "hosts.h" #include "ieee1394_core.h" #include "highlevel.h" #include "nodemgr.h" #include "csr.h" #include "config_roms.h" static void delayed_reset_bus(struct work_struct *work) { struct hpsb_host *host = container_of(work, struct hpsb_host, delayed_reset.work); u8 generation = host->csr.generation + 1; /* The generation field rolls over to 2 rather than 0 per IEEE * 1394a-2000. */ if (generation > 0xf || generation < 2) generation = 2; csr_set_bus_info_generation(host->csr.rom, generation); if (csr1212_generate_csr_image(host->csr.rom) != CSR1212_SUCCESS) { /* CSR image creation failed. * Reset generation field and do not issue a bus reset. */ csr_set_bus_info_generation(host->csr.rom, host->csr.generation); return; } host->csr.generation = generation; host->update_config_rom = 0; if (host->driver->set_hw_config_rom) host->driver->set_hw_config_rom(host, host->csr.rom->bus_info_data); host->csr.gen_timestamp[host->csr.generation] = jiffies; hpsb_reset_bus(host, SHORT_RESET); } static int dummy_transmit_packet(struct hpsb_host *h, struct hpsb_packet *p) { return 0; } static int dummy_devctl(struct hpsb_host *h, enum devctl_cmd c, int arg) { return -1; } static int dummy_isoctl(struct hpsb_iso *iso, enum isoctl_cmd command, unsigned long arg) { return -1; } static struct hpsb_host_driver dummy_driver = { .transmit_packet = dummy_transmit_packet, .devctl = dummy_devctl, .isoctl = dummy_isoctl }; static int alloc_hostnum_cb(struct hpsb_host *host, void *__data) { int *hostnum = __data; if (host->id == *hostnum) return 1; return 0; } static DEFINE_MUTEX(host_num_alloc); /** * hpsb_alloc_host - allocate a new host controller. * @drv: the driver that will manage the host controller * @extra: number of extra bytes to allocate for the driver * * Allocate a &hpsb_host and initialize the general subsystem specific * fields. If the driver needs to store per host data, as drivers * usually do, the amount of memory required can be specified by the * @extra parameter. Once allocated, the driver should initialize the * driver specific parts, enable the controller and make it available * to the general subsystem using hpsb_add_host(). * * Return Value: a pointer to the &hpsb_host if successful, %NULL if * no memory was available. */ struct hpsb_host *hpsb_alloc_host(struct hpsb_host_driver *drv, size_t extra, struct device *dev) { struct hpsb_host *h; int i; int hostnum = 0; h = kzalloc(sizeof(*h) + extra, GFP_KERNEL); if (!h) return NULL; h->csr.rom = csr1212_create_csr(&csr_bus_ops, CSR_BUS_INFO_SIZE, h); if (!h->csr.rom) goto fail; h->hostdata = h + 1; h->driver = drv; INIT_LIST_HEAD(&h->pending_packets); INIT_LIST_HEAD(&h->addr_space); for (i = 2; i < 16; i++) h->csr.gen_timestamp[i] = jiffies - 60 * HZ; atomic_set(&h->generation, 0); INIT_DELAYED_WORK(&h->delayed_reset, delayed_reset_bus); init_timer(&h->timeout); h->timeout.data = (unsigned long) h; h->timeout.function = abort_timedouts; h->timeout_interval = HZ / 20; /* 50ms, half of minimum SPLIT_TIMEOUT */ h->topology_map = h->csr.topology_map + 3; h->speed_map = (u8 *)(h->csr.speed_map + 2); mutex_lock(&host_num_alloc); while (nodemgr_for_each_host(&hostnum, alloc_hostnum_cb)) hostnum++; mutex_unlock(&host_num_alloc); h->id = hostnum; memcpy(&h->device, &nodemgr_dev_template_host, sizeof(h->device)); h->device.parent = dev; set_dev_node(&h->device, dev_to_node(dev)); dev_set_name(&h->device, "fw-host%d", h->id); h->host_dev.parent = &h->device; h->host_dev.class = &hpsb_host_class; dev_set_name(&h->host_dev, "fw-host%d", h->id); if (device_register(&h->device)) goto fail; if (device_register(&h->host_dev)) { device_unregister(&h->device); goto fail; } get_device(&h->device); return h; fail: kfree(h); return NULL; } int hpsb_add_host(struct hpsb_host *host) { if (hpsb_default_host_entry(host)) return -ENOMEM; highlevel_add_host(host); return 0; } void hpsb_resume_host(struct hpsb_host *host) { if (host->driver->set_hw_config_rom) host->driver->set_hw_config_rom(host, host->csr.rom->bus_info_data); host->driver->devctl(host, RESET_BUS, SHORT_RESET); } void hpsb_remove_host(struct hpsb_host *host) { host->is_shutdown = 1; cancel_delayed_work(&host->delayed_reset); flush_scheduled_work(); host->driver = &dummy_driver; highlevel_remove_host(host); device_unregister(&host->host_dev); device_unregister(&host->device); } /** * hpsb_update_config_rom_image - updates configuration ROM image of a host * * Updates the configuration ROM image of a host. rom_version must be the * current version, otherwise it will fail with return value -1. If this * host does not support config-rom-update, it will return -%EINVAL. * Return value 0 indicates success. */ int hpsb_update_config_rom_image(struct hpsb_host *host) { unsigned long reset_delay; int next_gen = host->csr.generation + 1; if (!host->update_config_rom) return -EINVAL; if (next_gen > 0xf) next_gen = 2; /* Stop the delayed interrupt, we're about to change the config rom and * it would be a waste to do a bus reset twice. */ cancel_delayed_work(&host->delayed_reset); /* IEEE 1394a-2000 prohibits using the same generation number * twice in a 60 second period. */ if (time_before(jiffies, host->csr.gen_timestamp[next_gen] + 60 * HZ)) /* Wait 60 seconds from the last time this generation number was * used. */ reset_delay = (60 * HZ) + host->csr.gen_timestamp[next_gen] - jiffies; else /* Wait 1 second in case some other code wants to change the * Config ROM in the near future. */ reset_delay = HZ; PREPARE_DELAYED_WORK(&host->delayed_reset, delayed_reset_bus); schedule_delayed_work(&host->delayed_reset, reset_delay); return 0; }
gpl-2.0
fentensoft/kernel_xt701
drivers/ieee1394/hosts.c
1688
6421
/* * IEEE 1394 for Linux * * Low level (host adapter) management. * * Copyright (C) 1999 Andreas E. Bombe * Copyright (C) 1999 Emanuel Pirker * * This code is licensed under the GPL. See the file COPYING in the root * directory of the kernel sources for details. */ #include <linux/module.h> #include <linux/types.h> #include <linux/list.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/timer.h> #include <linux/jiffies.h> #include <linux/mutex.h> #include "csr1212.h" #include "ieee1394.h" #include "ieee1394_types.h" #include "hosts.h" #include "ieee1394_core.h" #include "highlevel.h" #include "nodemgr.h" #include "csr.h" #include "config_roms.h" static void delayed_reset_bus(struct work_struct *work) { struct hpsb_host *host = container_of(work, struct hpsb_host, delayed_reset.work); u8 generation = host->csr.generation + 1; /* The generation field rolls over to 2 rather than 0 per IEEE * 1394a-2000. */ if (generation > 0xf || generation < 2) generation = 2; csr_set_bus_info_generation(host->csr.rom, generation); if (csr1212_generate_csr_image(host->csr.rom) != CSR1212_SUCCESS) { /* CSR image creation failed. * Reset generation field and do not issue a bus reset. */ csr_set_bus_info_generation(host->csr.rom, host->csr.generation); return; } host->csr.generation = generation; host->update_config_rom = 0; if (host->driver->set_hw_config_rom) host->driver->set_hw_config_rom(host, host->csr.rom->bus_info_data); host->csr.gen_timestamp[host->csr.generation] = jiffies; hpsb_reset_bus(host, SHORT_RESET); } static int dummy_transmit_packet(struct hpsb_host *h, struct hpsb_packet *p) { return 0; } static int dummy_devctl(struct hpsb_host *h, enum devctl_cmd c, int arg) { return -1; } static int dummy_isoctl(struct hpsb_iso *iso, enum isoctl_cmd command, unsigned long arg) { return -1; } static struct hpsb_host_driver dummy_driver = { .transmit_packet = dummy_transmit_packet, .devctl = dummy_devctl, .isoctl = dummy_isoctl }; static int alloc_hostnum_cb(struct hpsb_host *host, void *__data) { int *hostnum = __data; if (host->id == *hostnum) return 1; return 0; } static DEFINE_MUTEX(host_num_alloc); /** * hpsb_alloc_host - allocate a new host controller. * @drv: the driver that will manage the host controller * @extra: number of extra bytes to allocate for the driver * * Allocate a &hpsb_host and initialize the general subsystem specific * fields. If the driver needs to store per host data, as drivers * usually do, the amount of memory required can be specified by the * @extra parameter. Once allocated, the driver should initialize the * driver specific parts, enable the controller and make it available * to the general subsystem using hpsb_add_host(). * * Return Value: a pointer to the &hpsb_host if successful, %NULL if * no memory was available. */ struct hpsb_host *hpsb_alloc_host(struct hpsb_host_driver *drv, size_t extra, struct device *dev) { struct hpsb_host *h; int i; int hostnum = 0; h = kzalloc(sizeof(*h) + extra, GFP_KERNEL); if (!h) return NULL; h->csr.rom = csr1212_create_csr(&csr_bus_ops, CSR_BUS_INFO_SIZE, h); if (!h->csr.rom) goto fail; h->hostdata = h + 1; h->driver = drv; INIT_LIST_HEAD(&h->pending_packets); INIT_LIST_HEAD(&h->addr_space); for (i = 2; i < 16; i++) h->csr.gen_timestamp[i] = jiffies - 60 * HZ; atomic_set(&h->generation, 0); INIT_DELAYED_WORK(&h->delayed_reset, delayed_reset_bus); init_timer(&h->timeout); h->timeout.data = (unsigned long) h; h->timeout.function = abort_timedouts; h->timeout_interval = HZ / 20; /* 50ms, half of minimum SPLIT_TIMEOUT */ h->topology_map = h->csr.topology_map + 3; h->speed_map = (u8 *)(h->csr.speed_map + 2); mutex_lock(&host_num_alloc); while (nodemgr_for_each_host(&hostnum, alloc_hostnum_cb)) hostnum++; mutex_unlock(&host_num_alloc); h->id = hostnum; memcpy(&h->device, &nodemgr_dev_template_host, sizeof(h->device)); h->device.parent = dev; set_dev_node(&h->device, dev_to_node(dev)); dev_set_name(&h->device, "fw-host%d", h->id); h->host_dev.parent = &h->device; h->host_dev.class = &hpsb_host_class; dev_set_name(&h->host_dev, "fw-host%d", h->id); if (device_register(&h->device)) goto fail; if (device_register(&h->host_dev)) { device_unregister(&h->device); goto fail; } get_device(&h->device); return h; fail: kfree(h); return NULL; } int hpsb_add_host(struct hpsb_host *host) { if (hpsb_default_host_entry(host)) return -ENOMEM; highlevel_add_host(host); return 0; } void hpsb_resume_host(struct hpsb_host *host) { if (host->driver->set_hw_config_rom) host->driver->set_hw_config_rom(host, host->csr.rom->bus_info_data); host->driver->devctl(host, RESET_BUS, SHORT_RESET); } void hpsb_remove_host(struct hpsb_host *host) { host->is_shutdown = 1; cancel_delayed_work(&host->delayed_reset); flush_scheduled_work(); host->driver = &dummy_driver; highlevel_remove_host(host); device_unregister(&host->host_dev); device_unregister(&host->device); } /** * hpsb_update_config_rom_image - updates configuration ROM image of a host * * Updates the configuration ROM image of a host. rom_version must be the * current version, otherwise it will fail with return value -1. If this * host does not support config-rom-update, it will return -%EINVAL. * Return value 0 indicates success. */ int hpsb_update_config_rom_image(struct hpsb_host *host) { unsigned long reset_delay; int next_gen = host->csr.generation + 1; if (!host->update_config_rom) return -EINVAL; if (next_gen > 0xf) next_gen = 2; /* Stop the delayed interrupt, we're about to change the config rom and * it would be a waste to do a bus reset twice. */ cancel_delayed_work(&host->delayed_reset); /* IEEE 1394a-2000 prohibits using the same generation number * twice in a 60 second period. */ if (time_before(jiffies, host->csr.gen_timestamp[next_gen] + 60 * HZ)) /* Wait 60 seconds from the last time this generation number was * used. */ reset_delay = (60 * HZ) + host->csr.gen_timestamp[next_gen] - jiffies; else /* Wait 1 second in case some other code wants to change the * Config ROM in the near future. */ reset_delay = HZ; PREPARE_DELAYED_WORK(&host->delayed_reset, delayed_reset_bus); schedule_delayed_work(&host->delayed_reset, reset_delay); return 0; }
gpl-2.0
tdm/kernel_huawei_msm8928
drivers/gpu/ion/ion_chunk_heap.c
1944
4874
/* * drivers/gpu/ion/ion_chunk_heap.c * * Copyright (C) 2012 Google, Inc. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ //#include <linux/spinlock.h> #include <linux/dma-mapping.h> #include <linux/err.h> #include <linux/genalloc.h> #include <linux/io.h> #include <linux/ion.h> #include <linux/mm.h> #include <linux/scatterlist.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include "ion_priv.h" #include <asm/mach/map.h> struct ion_chunk_heap { struct ion_heap heap; struct gen_pool *pool; ion_phys_addr_t base; unsigned long chunk_size; unsigned long size; unsigned long allocated; }; static int ion_chunk_heap_allocate(struct ion_heap *heap, struct ion_buffer *buffer, unsigned long size, unsigned long align, unsigned long flags) { struct ion_chunk_heap *chunk_heap = container_of(heap, struct ion_chunk_heap, heap); struct sg_table *table; struct scatterlist *sg; int ret, i; unsigned long num_chunks; if (ion_buffer_fault_user_mappings(buffer)) return -ENOMEM; num_chunks = ALIGN(size, chunk_heap->chunk_size) / chunk_heap->chunk_size; buffer->size = num_chunks * chunk_heap->chunk_size; if (buffer->size > chunk_heap->size - chunk_heap->allocated) return -ENOMEM; table = kzalloc(sizeof(struct sg_table), GFP_KERNEL); if (!table) return -ENOMEM; ret = sg_alloc_table(table, num_chunks, GFP_KERNEL); if (ret) { kfree(table); return ret; } sg = table->sgl; for (i = 0; i < num_chunks; i++) { unsigned long paddr = gen_pool_alloc(chunk_heap->pool, chunk_heap->chunk_size); if (!paddr) goto err; sg_set_page(sg, phys_to_page(paddr), chunk_heap->chunk_size, 0); sg = sg_next(sg); } buffer->priv_virt = table; chunk_heap->allocated += buffer->size; return 0; err: sg = table->sgl; for (i -= 1; i >= 0; i--) { gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)), sg_dma_len(sg)); sg = sg_next(sg); } sg_free_table(table); kfree(table); return -ENOMEM; } static void ion_chunk_heap_free(struct ion_buffer *buffer) { struct ion_heap *heap = buffer->heap; struct ion_chunk_heap *chunk_heap = container_of(heap, struct ion_chunk_heap, heap); struct sg_table *table = buffer->priv_virt; struct scatterlist *sg; int i; ion_heap_buffer_zero(buffer); for_each_sg(table->sgl, sg, table->nents, i) { if (ion_buffer_cached(buffer)) dma_sync_sg_for_device(NULL, sg, 1, DMA_BIDIRECTIONAL); gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)), sg_dma_len(sg)); } chunk_heap->allocated -= buffer->size; sg_free_table(table); kfree(table); } struct sg_table *ion_chunk_heap_map_dma(struct ion_heap *heap, struct ion_buffer *buffer) { return buffer->priv_virt; } void ion_chunk_heap_unmap_dma(struct ion_heap *heap, struct ion_buffer *buffer) { return; } static struct ion_heap_ops chunk_heap_ops = { .allocate = ion_chunk_heap_allocate, .free = ion_chunk_heap_free, .map_dma = ion_chunk_heap_map_dma, .unmap_dma = ion_chunk_heap_unmap_dma, .map_user = ion_heap_map_user, .map_kernel = ion_heap_map_kernel, .unmap_kernel = ion_heap_unmap_kernel, }; struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data) { struct ion_chunk_heap *chunk_heap; struct scatterlist sg; chunk_heap = kzalloc(sizeof(struct ion_chunk_heap), GFP_KERNEL); if (!chunk_heap) return ERR_PTR(-ENOMEM); chunk_heap->chunk_size = (unsigned long)heap_data->priv; chunk_heap->pool = gen_pool_create(get_order(chunk_heap->chunk_size) + PAGE_SHIFT, -1); if (!chunk_heap->pool) { kfree(chunk_heap); return ERR_PTR(-ENOMEM); } chunk_heap->base = heap_data->base; chunk_heap->size = heap_data->size; chunk_heap->allocated = 0; sg_init_table(&sg, 1); sg_set_page(&sg, phys_to_page(heap_data->base), heap_data->size, 0); dma_sync_sg_for_device(NULL, &sg, 1, DMA_BIDIRECTIONAL); gen_pool_add(chunk_heap->pool, chunk_heap->base, heap_data->size, -1); chunk_heap->heap.ops = &chunk_heap_ops; chunk_heap->heap.type = ION_HEAP_TYPE_CHUNK; chunk_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE; pr_info("%s: base %pa size %zd align %pa\n", __func__, &chunk_heap->base, heap_data->size, &heap_data->align); return &chunk_heap->heap; } void ion_chunk_heap_destroy(struct ion_heap *heap) { struct ion_chunk_heap *chunk_heap = container_of(heap, struct ion_chunk_heap, heap); gen_pool_destroy(chunk_heap->pool); kfree(chunk_heap); chunk_heap = NULL; }
gpl-2.0
The-Sickness/S6-MM
arch/mips/mm/uasm.c
1944
12865
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * A small micro-assembler. It is intentionally kept simple, does only * support a subset of instructions, and does not try to hide pipeline * effects like branch delay slots. * * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer * Copyright (C) 2005, 2007 Maciej W. Rozycki * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org) * Copyright (C) 2012, 2013 MIPS Technologies, Inc. All rights reserved. */ enum fields { RS = 0x001, RT = 0x002, RD = 0x004, RE = 0x008, SIMM = 0x010, UIMM = 0x020, BIMM = 0x040, JIMM = 0x080, FUNC = 0x100, SET = 0x200, SCIMM = 0x400 }; #define OP_MASK 0x3f #define OP_SH 26 #define RD_MASK 0x1f #define RD_SH 11 #define RE_MASK 0x1f #define RE_SH 6 #define IMM_MASK 0xffff #define IMM_SH 0 #define JIMM_MASK 0x3ffffff #define JIMM_SH 0 #define FUNC_MASK 0x3f #define FUNC_SH 0 #define SET_MASK 0x7 #define SET_SH 0 enum opcode { insn_invalid, insn_addiu, insn_addu, insn_and, insn_andi, insn_bbit0, insn_bbit1, insn_beq, insn_beql, insn_bgez, insn_bgezl, insn_bltz, insn_bltzl, insn_bne, insn_cache, insn_daddiu, insn_daddu, insn_dins, insn_dinsm, insn_dmfc0, insn_dmtc0, insn_drotr, insn_drotr32, insn_dsll, insn_dsll32, insn_dsra, insn_dsrl, insn_dsrl32, insn_dsubu, insn_eret, insn_ext, insn_ins, insn_j, insn_jal, insn_jr, insn_ld, insn_ldx, insn_ll, insn_lld, insn_lui, insn_lw, insn_lwx, insn_mfc0, insn_mtc0, insn_or, insn_ori, insn_pref, insn_rfe, insn_rotr, insn_sc, insn_scd, insn_sd, insn_sll, insn_sra, insn_srl, insn_subu, insn_sw, insn_syscall, insn_tlbp, insn_tlbr, insn_tlbwi, insn_tlbwr, insn_xor, insn_xori, }; struct insn { enum opcode opcode; u32 match; enum fields fields; }; static inline __uasminit u32 build_rs(u32 arg) { WARN(arg & ~RS_MASK, KERN_WARNING "Micro-assembler field overflow\n"); return (arg & RS_MASK) << RS_SH; } static inline __uasminit u32 build_rt(u32 arg) { WARN(arg & ~RT_MASK, KERN_WARNING "Micro-assembler field overflow\n"); return (arg & RT_MASK) << RT_SH; } static inline __uasminit u32 build_rd(u32 arg) { WARN(arg & ~RD_MASK, KERN_WARNING "Micro-assembler field overflow\n"); return (arg & RD_MASK) << RD_SH; } static inline __uasminit u32 build_re(u32 arg) { WARN(arg & ~RE_MASK, KERN_WARNING "Micro-assembler field overflow\n"); return (arg & RE_MASK) << RE_SH; } static inline __uasminit u32 build_simm(s32 arg) { WARN(arg > 0x7fff || arg < -0x8000, KERN_WARNING "Micro-assembler field overflow\n"); return arg & 0xffff; } static inline __uasminit u32 build_uimm(u32 arg) { WARN(arg & ~IMM_MASK, KERN_WARNING "Micro-assembler field overflow\n"); return arg & IMM_MASK; } static inline __uasminit u32 build_scimm(u32 arg) { WARN(arg & ~SCIMM_MASK, KERN_WARNING "Micro-assembler field overflow\n"); return (arg & SCIMM_MASK) << SCIMM_SH; } static inline __uasminit u32 build_func(u32 arg) { WARN(arg & ~FUNC_MASK, KERN_WARNING "Micro-assembler field overflow\n"); return arg & FUNC_MASK; } static inline __uasminit u32 build_set(u32 arg) { WARN(arg & ~SET_MASK, KERN_WARNING "Micro-assembler field overflow\n"); return arg & SET_MASK; } static void __uasminit build_insn(u32 **buf, enum opcode opc, ...); #define I_u1u2u3(op) \ Ip_u1u2u3(op) \ { \ build_insn(buf, insn##op, a, b, c); \ } \ UASM_EXPORT_SYMBOL(uasm_i##op); #define I_u2u1u3(op) \ Ip_u2u1u3(op) \ { \ build_insn(buf, insn##op, b, a, c); \ } \ UASM_EXPORT_SYMBOL(uasm_i##op); #define I_u3u1u2(op) \ Ip_u3u1u2(op) \ { \ build_insn(buf, insn##op, b, c, a); \ } \ UASM_EXPORT_SYMBOL(uasm_i##op); #define I_u1u2s3(op) \ Ip_u1u2s3(op) \ { \ build_insn(buf, insn##op, a, b, c); \ } \ UASM_EXPORT_SYMBOL(uasm_i##op); #define I_u2s3u1(op) \ Ip_u2s3u1(op) \ { \ build_insn(buf, insn##op, c, a, b); \ } \ UASM_EXPORT_SYMBOL(uasm_i##op); #define I_u2u1s3(op) \ Ip_u2u1s3(op) \ { \ build_insn(buf, insn##op, b, a, c); \ } \ UASM_EXPORT_SYMBOL(uasm_i##op); #define I_u2u1msbu3(op) \ Ip_u2u1msbu3(op) \ { \ build_insn(buf, insn##op, b, a, c+d-1, c); \ } \ UASM_EXPORT_SYMBOL(uasm_i##op); #define I_u2u1msb32u3(op) \ Ip_u2u1msbu3(op) \ { \ build_insn(buf, insn##op, b, a, c+d-33, c); \ } \ UASM_EXPORT_SYMBOL(uasm_i##op); #define I_u2u1msbdu3(op) \ Ip_u2u1msbu3(op) \ { \ build_insn(buf, insn##op, b, a, d-1, c); \ } \ UASM_EXPORT_SYMBOL(uasm_i##op); #define I_u1u2(op) \ Ip_u1u2(op) \ { \ build_insn(buf, insn##op, a, b); \ } \ UASM_EXPORT_SYMBOL(uasm_i##op); #define I_u1s2(op) \ Ip_u1s2(op) \ { \ build_insn(buf, insn##op, a, b); \ } \ UASM_EXPORT_SYMBOL(uasm_i##op); #define I_u1(op) \ Ip_u1(op) \ { \ build_insn(buf, insn##op, a); \ } \ UASM_EXPORT_SYMBOL(uasm_i##op); #define I_0(op) \ Ip_0(op) \ { \ build_insn(buf, insn##op); \ } \ UASM_EXPORT_SYMBOL(uasm_i##op); I_u2u1s3(_addiu) I_u3u1u2(_addu) I_u2u1u3(_andi) I_u3u1u2(_and) I_u1u2s3(_beq) I_u1u2s3(_beql) I_u1s2(_bgez) I_u1s2(_bgezl) I_u1s2(_bltz) I_u1s2(_bltzl) I_u1u2s3(_bne) I_u2s3u1(_cache) I_u1u2u3(_dmfc0) I_u1u2u3(_dmtc0) I_u2u1s3(_daddiu) I_u3u1u2(_daddu) I_u2u1u3(_dsll) I_u2u1u3(_dsll32) I_u2u1u3(_dsra) I_u2u1u3(_dsrl) I_u2u1u3(_dsrl32) I_u2u1u3(_drotr) I_u2u1u3(_drotr32) I_u3u1u2(_dsubu) I_0(_eret) I_u2u1msbdu3(_ext) I_u2u1msbu3(_ins) I_u1(_j) I_u1(_jal) I_u1(_jr) I_u2s3u1(_ld) I_u2s3u1(_ll) I_u2s3u1(_lld) I_u1s2(_lui) I_u2s3u1(_lw) I_u1u2u3(_mfc0) I_u1u2u3(_mtc0) I_u2u1u3(_ori) I_u3u1u2(_or) I_0(_rfe) I_u2s3u1(_sc) I_u2s3u1(_scd) I_u2s3u1(_sd) I_u2u1u3(_sll) I_u2u1u3(_sra) I_u2u1u3(_srl) I_u2u1u3(_rotr) I_u3u1u2(_subu) I_u2s3u1(_sw) I_0(_tlbp) I_0(_tlbr) I_0(_tlbwi) I_0(_tlbwr) I_u3u1u2(_xor) I_u2u1u3(_xori) I_u2u1msbu3(_dins); I_u2u1msb32u3(_dinsm); I_u1(_syscall); I_u1u2s3(_bbit0); I_u1u2s3(_bbit1); I_u3u1u2(_lwx) I_u3u1u2(_ldx) #ifdef CONFIG_CPU_CAVIUM_OCTEON #include <asm/octeon/octeon.h> void __uasminit ISAFUNC(uasm_i_pref)(u32 **buf, unsigned int a, signed int b, unsigned int c) { if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X) && a <= 24 && a != 5) /* * As per erratum Core-14449, replace prefetches 0-4, * 6-24 with 'pref 28'. */ build_insn(buf, insn_pref, c, 28, b); else build_insn(buf, insn_pref, c, a, b); } UASM_EXPORT_SYMBOL(ISAFUNC(uasm_i_pref)); #else I_u2s3u1(_pref) #endif /* Handle labels. */ void __uasminit ISAFUNC(uasm_build_label)(struct uasm_label **lab, u32 *addr, int lid) { (*lab)->addr = addr; (*lab)->lab = lid; (*lab)++; } UASM_EXPORT_SYMBOL(ISAFUNC(uasm_build_label)); int __uasminit ISAFUNC(uasm_in_compat_space_p)(long addr) { /* Is this address in 32bit compat space? */ #ifdef CONFIG_64BIT return (((addr) & 0xffffffff00000000L) == 0xffffffff00000000L); #else return 1; #endif } UASM_EXPORT_SYMBOL(ISAFUNC(uasm_in_compat_space_p)); static int __uasminit uasm_rel_highest(long val) { #ifdef CONFIG_64BIT return ((((val + 0x800080008000L) >> 48) & 0xffff) ^ 0x8000) - 0x8000; #else return 0; #endif } static int __uasminit uasm_rel_higher(long val) { #ifdef CONFIG_64BIT return ((((val + 0x80008000L) >> 32) & 0xffff) ^ 0x8000) - 0x8000; #else return 0; #endif } int __uasminit ISAFUNC(uasm_rel_hi)(long val) { return ((((val + 0x8000L) >> 16) & 0xffff) ^ 0x8000) - 0x8000; } UASM_EXPORT_SYMBOL(ISAFUNC(uasm_rel_hi)); int __uasminit ISAFUNC(uasm_rel_lo)(long val) { return ((val & 0xffff) ^ 0x8000) - 0x8000; } UASM_EXPORT_SYMBOL(ISAFUNC(uasm_rel_lo)); void __uasminit ISAFUNC(UASM_i_LA_mostly)(u32 **buf, unsigned int rs, long addr) { if (!ISAFUNC(uasm_in_compat_space_p)(addr)) { ISAFUNC(uasm_i_lui)(buf, rs, uasm_rel_highest(addr)); if (uasm_rel_higher(addr)) ISAFUNC(uasm_i_daddiu)(buf, rs, rs, uasm_rel_higher(addr)); if (ISAFUNC(uasm_rel_hi(addr))) { ISAFUNC(uasm_i_dsll)(buf, rs, rs, 16); ISAFUNC(uasm_i_daddiu)(buf, rs, rs, ISAFUNC(uasm_rel_hi)(addr)); ISAFUNC(uasm_i_dsll)(buf, rs, rs, 16); } else ISAFUNC(uasm_i_dsll32)(buf, rs, rs, 0); } else ISAFUNC(uasm_i_lui)(buf, rs, ISAFUNC(uasm_rel_hi(addr))); } UASM_EXPORT_SYMBOL(ISAFUNC(UASM_i_LA_mostly)); void __uasminit ISAFUNC(UASM_i_LA)(u32 **buf, unsigned int rs, long addr) { ISAFUNC(UASM_i_LA_mostly)(buf, rs, addr); if (ISAFUNC(uasm_rel_lo(addr))) { if (!ISAFUNC(uasm_in_compat_space_p)(addr)) ISAFUNC(uasm_i_daddiu)(buf, rs, rs, ISAFUNC(uasm_rel_lo(addr))); else ISAFUNC(uasm_i_addiu)(buf, rs, rs, ISAFUNC(uasm_rel_lo(addr))); } } UASM_EXPORT_SYMBOL(ISAFUNC(UASM_i_LA)); /* Handle relocations. */ void __uasminit ISAFUNC(uasm_r_mips_pc16)(struct uasm_reloc **rel, u32 *addr, int lid) { (*rel)->addr = addr; (*rel)->type = R_MIPS_PC16; (*rel)->lab = lid; (*rel)++; } UASM_EXPORT_SYMBOL(ISAFUNC(uasm_r_mips_pc16)); static inline void __uasminit __resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab); void __uasminit ISAFUNC(uasm_resolve_relocs)(struct uasm_reloc *rel, struct uasm_label *lab) { struct uasm_label *l; for (; rel->lab != UASM_LABEL_INVALID; rel++) for (l = lab; l->lab != UASM_LABEL_INVALID; l++) if (rel->lab == l->lab) __resolve_relocs(rel, l); } UASM_EXPORT_SYMBOL(ISAFUNC(uasm_resolve_relocs)); void __uasminit ISAFUNC(uasm_move_relocs)(struct uasm_reloc *rel, u32 *first, u32 *end, long off) { for (; rel->lab != UASM_LABEL_INVALID; rel++) if (rel->addr >= first && rel->addr < end) rel->addr += off; } UASM_EXPORT_SYMBOL(ISAFUNC(uasm_move_relocs)); void __uasminit ISAFUNC(uasm_move_labels)(struct uasm_label *lab, u32 *first, u32 *end, long off) { for (; lab->lab != UASM_LABEL_INVALID; lab++) if (lab->addr >= first && lab->addr < end) lab->addr += off; } UASM_EXPORT_SYMBOL(ISAFUNC(uasm_move_labels)); void __uasminit ISAFUNC(uasm_copy_handler)(struct uasm_reloc *rel, struct uasm_label *lab, u32 *first, u32 *end, u32 *target) { long off = (long)(target - first); memcpy(target, first, (end - first) * sizeof(u32)); ISAFUNC(uasm_move_relocs(rel, first, end, off)); ISAFUNC(uasm_move_labels(lab, first, end, off)); } UASM_EXPORT_SYMBOL(ISAFUNC(uasm_copy_handler)); int __uasminit ISAFUNC(uasm_insn_has_bdelay)(struct uasm_reloc *rel, u32 *addr) { for (; rel->lab != UASM_LABEL_INVALID; rel++) { if (rel->addr == addr && (rel->type == R_MIPS_PC16 || rel->type == R_MIPS_26)) return 1; } return 0; } UASM_EXPORT_SYMBOL(ISAFUNC(uasm_insn_has_bdelay)); /* Convenience functions for labeled branches. */ void __uasminit ISAFUNC(uasm_il_bltz)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) { uasm_r_mips_pc16(r, *p, lid); ISAFUNC(uasm_i_bltz)(p, reg, 0); } UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bltz)); void __uasminit ISAFUNC(uasm_il_b)(u32 **p, struct uasm_reloc **r, int lid) { uasm_r_mips_pc16(r, *p, lid); ISAFUNC(uasm_i_b)(p, 0); } UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_b)); void __uasminit ISAFUNC(uasm_il_beqz)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) { uasm_r_mips_pc16(r, *p, lid); ISAFUNC(uasm_i_beqz)(p, reg, 0); } UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_beqz)); void __uasminit ISAFUNC(uasm_il_beqzl)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) { uasm_r_mips_pc16(r, *p, lid); ISAFUNC(uasm_i_beqzl)(p, reg, 0); } UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_beqzl)); void __uasminit ISAFUNC(uasm_il_bne)(u32 **p, struct uasm_reloc **r, unsigned int reg1, unsigned int reg2, int lid) { uasm_r_mips_pc16(r, *p, lid); ISAFUNC(uasm_i_bne)(p, reg1, reg2, 0); } UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bne)); void __uasminit ISAFUNC(uasm_il_bnez)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) { uasm_r_mips_pc16(r, *p, lid); ISAFUNC(uasm_i_bnez)(p, reg, 0); } UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bnez)); void __uasminit ISAFUNC(uasm_il_bgezl)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) { uasm_r_mips_pc16(r, *p, lid); ISAFUNC(uasm_i_bgezl)(p, reg, 0); } UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bgezl)); void __uasminit ISAFUNC(uasm_il_bgez)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) { uasm_r_mips_pc16(r, *p, lid); ISAFUNC(uasm_i_bgez)(p, reg, 0); } UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bgez)); void __uasminit ISAFUNC(uasm_il_bbit0)(u32 **p, struct uasm_reloc **r, unsigned int reg, unsigned int bit, int lid) { uasm_r_mips_pc16(r, *p, lid); ISAFUNC(uasm_i_bbit0)(p, reg, bit, 0); } UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bbit0)); void __uasminit ISAFUNC(uasm_il_bbit1)(u32 **p, struct uasm_reloc **r, unsigned int reg, unsigned int bit, int lid) { uasm_r_mips_pc16(r, *p, lid); ISAFUNC(uasm_i_bbit1)(p, reg, bit, 0); } UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bbit1));
gpl-2.0
marc1706/hd2_kernel
arch/arm/mach-ux500/board-u5500-sdi.c
2712
2105
/* * Copyright (C) ST-Ericsson SA 2010 * * Author: Hanumath Prasad <ulf.hansson@stericsson.com> * License terms: GNU General Public License (GPL) version 2 */ #include <linux/amba/mmci.h> #include <linux/mmc/host.h> #include <linux/gpio.h> #include <plat/pincfg.h> #include <mach/db5500-regs.h> #include <plat/ste_dma40.h> #include "pins-db5500.h" #include "devices-db5500.h" #include "ste-dma40-db5500.h" static pin_cfg_t u5500_sdi_pins[] = { /* SDI0 (POP eMMC) */ GPIO5_MC0_DAT0 | PIN_DIR_INPUT | PIN_PULL_UP, GPIO6_MC0_DAT1 | PIN_DIR_INPUT | PIN_PULL_UP, GPIO7_MC0_DAT2 | PIN_DIR_INPUT | PIN_PULL_UP, GPIO8_MC0_DAT3 | PIN_DIR_INPUT | PIN_PULL_UP, GPIO9_MC0_DAT4 | PIN_DIR_INPUT | PIN_PULL_UP, GPIO10_MC0_DAT5 | PIN_DIR_INPUT | PIN_PULL_UP, GPIO11_MC0_DAT6 | PIN_DIR_INPUT | PIN_PULL_UP, GPIO12_MC0_DAT7 | PIN_DIR_INPUT | PIN_PULL_UP, GPIO13_MC0_CMD | PIN_DIR_INPUT | PIN_PULL_UP, GPIO14_MC0_CLK | PIN_DIR_OUTPUT | PIN_VAL_LOW, }; #ifdef CONFIG_STE_DMA40 struct stedma40_chan_cfg u5500_sdi0_dma_cfg_rx = { .mode = STEDMA40_MODE_LOGICAL, .dir = STEDMA40_PERIPH_TO_MEM, .src_dev_type = DB5500_DMA_DEV24_SDMMC0_RX, .dst_dev_type = STEDMA40_DEV_DST_MEMORY, .src_info.data_width = STEDMA40_WORD_WIDTH, .dst_info.data_width = STEDMA40_WORD_WIDTH, }; static struct stedma40_chan_cfg u5500_sdi0_dma_cfg_tx = { .mode = STEDMA40_MODE_LOGICAL, .dir = STEDMA40_MEM_TO_PERIPH, .src_dev_type = STEDMA40_DEV_SRC_MEMORY, .dst_dev_type = DB5500_DMA_DEV24_SDMMC0_TX, .src_info.data_width = STEDMA40_WORD_WIDTH, .dst_info.data_width = STEDMA40_WORD_WIDTH, }; #endif static struct mmci_platform_data u5500_sdi0_data = { .ocr_mask = MMC_VDD_165_195, .f_max = 50000000, .capabilities = MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA | MMC_CAP_MMC_HIGHSPEED, .gpio_cd = -1, .gpio_wp = -1, #ifdef CONFIG_STE_DMA40 .dma_filter = stedma40_filter, .dma_rx_param = &u5500_sdi0_dma_cfg_rx, .dma_tx_param = &u5500_sdi0_dma_cfg_tx, #endif }; void __init u5500_sdi_init(void) { nmk_config_pins(u5500_sdi_pins, ARRAY_SIZE(u5500_sdi_pins)); db5500_add_sdi0(&u5500_sdi0_data); }
gpl-2.0
chris4824/kernel_samsung_jf
arch/blackfin/mach-bf548/boards/cm_bf548.c
4504
28341
/* * Copyright 2004-2009 Analog Devices Inc. * 2008-2009 Bluetechnix * 2005 National ICT Australia (NICTA) * Aidan Williams <aidan@nicta.com.au> * * Licensed under the GPL-2 or later. */ #include <linux/device.h> #include <linux/platform_device.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include <linux/mtd/physmap.h> #include <linux/spi/spi.h> #include <linux/spi/flash.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/usb/musb.h> #include <asm/bfin5xx_spi.h> #include <asm/dma.h> #include <asm/gpio.h> #include <asm/nand.h> #include <asm/portmux.h> #include <asm/bfin_sdh.h> #include <mach/bf54x_keys.h> #include <asm/dpmc.h> #include <linux/input.h> #include <linux/spi/ad7877.h> /* * Name the Board for the /proc/cpuinfo */ const char bfin_board_name[] = "Bluetechnix CM-BF548"; /* * Driver needs to know address, irq and flag pin. */ #if defined(CONFIG_FB_BF54X_LQ043) || defined(CONFIG_FB_BF54X_LQ043_MODULE) #include <mach/bf54x-lq043.h> static struct bfin_bf54xfb_mach_info bf54x_lq043_data = { .width = 480, .height = 272, .xres = {480, 480, 480}, .yres = {272, 272, 272}, .bpp = {24, 24, 24}, .disp = GPIO_PE3, }; static struct resource bf54x_lq043_resources[] = { { .start = IRQ_EPPI0_ERR, .end = IRQ_EPPI0_ERR, .flags = IORESOURCE_IRQ, }, }; static struct platform_device bf54x_lq043_device = { .name = "bf54x-lq043", .id = -1, .num_resources = ARRAY_SIZE(bf54x_lq043_resources), .resource = bf54x_lq043_resources, .dev = { .platform_data = &bf54x_lq043_data, }, }; #endif #if defined(CONFIG_KEYBOARD_BFIN) || defined(CONFIG_KEYBOARD_BFIN_MODULE) static unsigned int bf548_keymap[] = { KEYVAL(0, 0, KEY_ENTER), KEYVAL(0, 1, KEY_HELP), KEYVAL(0, 2, KEY_0), KEYVAL(0, 3, KEY_BACKSPACE), KEYVAL(1, 0, KEY_TAB), KEYVAL(1, 1, KEY_9), KEYVAL(1, 2, KEY_8), KEYVAL(1, 3, KEY_7), KEYVAL(2, 0, KEY_DOWN), KEYVAL(2, 1, KEY_6), KEYVAL(2, 2, KEY_5), KEYVAL(2, 3, KEY_4), KEYVAL(3, 0, KEY_UP), KEYVAL(3, 1, KEY_3), KEYVAL(3, 2, KEY_2), KEYVAL(3, 3, KEY_1), }; static struct bfin_kpad_platform_data bf54x_kpad_data = { .rows = 4, .cols = 4, .keymap = bf548_keymap, .keymapsize = ARRAY_SIZE(bf548_keymap), .repeat = 0, .debounce_time = 5000, /* ns (5ms) */ .coldrive_time = 1000, /* ns (1ms) */ .keyup_test_interval = 50, /* ms (50ms) */ }; static struct resource bf54x_kpad_resources[] = { { .start = IRQ_KEY, .end = IRQ_KEY, .flags = IORESOURCE_IRQ, }, }; static struct platform_device bf54x_kpad_device = { .name = "bf54x-keys", .id = -1, .num_resources = ARRAY_SIZE(bf54x_kpad_resources), .resource = bf54x_kpad_resources, .dev = { .platform_data = &bf54x_kpad_data, }, }; #endif #if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) static struct platform_device rtc_device = { .name = "rtc-bfin", .id = -1, }; #endif #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) #ifdef CONFIG_SERIAL_BFIN_UART0 static struct resource bfin_uart0_resources[] = { { .start = UART0_DLL, .end = UART0_RBR+2, .flags = IORESOURCE_MEM, }, { .start = IRQ_UART0_TX, .end = IRQ_UART0_TX, .flags = IORESOURCE_IRQ, }, { .start = IRQ_UART0_RX, .end = IRQ_UART0_RX, .flags = IORESOURCE_IRQ, }, { .start = IRQ_UART0_ERROR, .end = IRQ_UART0_ERROR, .flags = IORESOURCE_IRQ, }, { .start = CH_UART0_TX, .end = CH_UART0_TX, .flags = IORESOURCE_DMA, }, { .start = CH_UART0_RX, .end = CH_UART0_RX, .flags = IORESOURCE_DMA, }, }; static unsigned short bfin_uart0_peripherals[] = { P_UART0_TX, P_UART0_RX, 0 }; static struct platform_device bfin_uart0_device = { .name = "bfin-uart", .id = 0, .num_resources = ARRAY_SIZE(bfin_uart0_resources), .resource = bfin_uart0_resources, .dev = { .platform_data = &bfin_uart0_peripherals, /* Passed to driver */ }, }; #endif #ifdef CONFIG_SERIAL_BFIN_UART1 static struct resource bfin_uart1_resources[] = { { .start = UART1_DLL, .end = UART1_RBR+2, .flags = IORESOURCE_MEM, }, { .start = IRQ_UART1_TX, .end = IRQ_UART1_TX, .flags = IORESOURCE_IRQ, }, { .start = IRQ_UART1_RX, .end = IRQ_UART1_RX, .flags = IORESOURCE_IRQ, }, { .start = IRQ_UART1_ERROR, .end = IRQ_UART1_ERROR, .flags = IORESOURCE_IRQ, }, { .start = CH_UART1_TX, .end = CH_UART1_TX, .flags = IORESOURCE_DMA, }, { .start = CH_UART1_RX, .end = CH_UART1_RX, .flags = IORESOURCE_DMA, }, #ifdef CONFIG_BFIN_UART1_CTSRTS { /* CTS pin -- 0 means not supported */ .start = GPIO_PE10, .end = GPIO_PE10, .flags = IORESOURCE_IO, }, { /* RTS pin -- 0 means not supported */ .start = GPIO_PE9, .end = GPIO_PE9, .flags = IORESOURCE_IO, }, #endif }; static unsigned short bfin_uart1_peripherals[] = { P_UART1_TX, P_UART1_RX, #ifdef CONFIG_BFIN_UART1_CTSRTS P_UART1_RTS, P_UART1_CTS, #endif 0 }; static struct platform_device bfin_uart1_device = { .name = "bfin-uart", .id = 1, .num_resources = ARRAY_SIZE(bfin_uart1_resources), .resource = bfin_uart1_resources, .dev = { .platform_data = &bfin_uart1_peripherals, /* Passed to driver */ }, }; #endif #ifdef CONFIG_SERIAL_BFIN_UART2 static struct resource bfin_uart2_resources[] = { { .start = UART2_DLL, .end = UART2_RBR+2, .flags = IORESOURCE_MEM, }, { .start = IRQ_UART2_TX, .end = IRQ_UART2_TX, .flags = IORESOURCE_IRQ, }, { .start = IRQ_UART2_RX, .end = IRQ_UART2_RX, .flags = IORESOURCE_IRQ, }, { .start = IRQ_UART2_ERROR, .end = IRQ_UART2_ERROR, .flags = IORESOURCE_IRQ, }, { .start = CH_UART2_TX, .end = CH_UART2_TX, .flags = IORESOURCE_DMA, }, { .start = CH_UART2_RX, .end = CH_UART2_RX, .flags = IORESOURCE_DMA, }, }; static unsigned short bfin_uart2_peripherals[] = { P_UART2_TX, P_UART2_RX, 0 }; static struct platform_device bfin_uart2_device = { .name = "bfin-uart", .id = 2, .num_resources = ARRAY_SIZE(bfin_uart2_resources), .resource = bfin_uart2_resources, .dev = { .platform_data = &bfin_uart2_peripherals, /* Passed to driver */ }, }; #endif #ifdef CONFIG_SERIAL_BFIN_UART3 static struct resource bfin_uart3_resources[] = { { .start = UART3_DLL, .end = UART3_RBR+2, .flags = IORESOURCE_MEM, }, { .start = IRQ_UART3_TX, .end = IRQ_UART3_TX, .flags = IORESOURCE_IRQ, }, { .start = IRQ_UART3_RX, .end = IRQ_UART3_RX, .flags = IORESOURCE_IRQ, }, { .start = IRQ_UART3_ERROR, .end = IRQ_UART3_ERROR, .flags = IORESOURCE_IRQ, }, { .start = CH_UART3_TX, .end = CH_UART3_TX, .flags = IORESOURCE_DMA, }, { .start = CH_UART3_RX, .end = CH_UART3_RX, .flags = IORESOURCE_DMA, }, #ifdef CONFIG_BFIN_UART3_CTSRTS { /* CTS pin -- 0 means not supported */ .start = GPIO_PB3, .end = GPIO_PB3, .flags = IORESOURCE_IO, }, { /* RTS pin -- 0 means not supported */ .start = GPIO_PB2, .end = GPIO_PB2, .flags = IORESOURCE_IO, }, #endif }; static unsigned short bfin_uart3_peripherals[] = { P_UART3_TX, P_UART3_RX, #ifdef CONFIG_BFIN_UART3_CTSRTS P_UART3_RTS, P_UART3_CTS, #endif 0 }; static struct platform_device bfin_uart3_device = { .name = "bfin-uart", .id = 3, .num_resources = ARRAY_SIZE(bfin_uart3_resources), .resource = bfin_uart3_resources, .dev = { .platform_data = &bfin_uart3_peripherals, /* Passed to driver */ }, }; #endif #endif #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) #ifdef CONFIG_BFIN_SIR0 static struct resource bfin_sir0_resources[] = { { .start = 0xFFC00400, .end = 0xFFC004FF, .flags = IORESOURCE_MEM, }, { .start = IRQ_UART0_RX, .end = IRQ_UART0_RX+1, .flags = IORESOURCE_IRQ, }, { .start = CH_UART0_RX, .end = CH_UART0_RX+1, .flags = IORESOURCE_DMA, }, }; static struct platform_device bfin_sir0_device = { .name = "bfin_sir", .id = 0, .num_resources = ARRAY_SIZE(bfin_sir0_resources), .resource = bfin_sir0_resources, }; #endif #ifdef CONFIG_BFIN_SIR1 static struct resource bfin_sir1_resources[] = { { .start = 0xFFC02000, .end = 0xFFC020FF, .flags = IORESOURCE_MEM, }, { .start = IRQ_UART1_RX, .end = IRQ_UART1_RX+1, .flags = IORESOURCE_IRQ, }, { .start = CH_UART1_RX, .end = CH_UART1_RX+1, .flags = IORESOURCE_DMA, }, }; static struct platform_device bfin_sir1_device = { .name = "bfin_sir", .id = 1, .num_resources = ARRAY_SIZE(bfin_sir1_resources), .resource = bfin_sir1_resources, }; #endif #ifdef CONFIG_BFIN_SIR2 static struct resource bfin_sir2_resources[] = { { .start = 0xFFC02100, .end = 0xFFC021FF, .flags = IORESOURCE_MEM, }, { .start = IRQ_UART2_RX, .end = IRQ_UART2_RX+1, .flags = IORESOURCE_IRQ, }, { .start = CH_UART2_RX, .end = CH_UART2_RX+1, .flags = IORESOURCE_DMA, }, }; static struct platform_device bfin_sir2_device = { .name = "bfin_sir", .id = 2, .num_resources = ARRAY_SIZE(bfin_sir2_resources), .resource = bfin_sir2_resources, }; #endif #ifdef CONFIG_BFIN_SIR3 static struct resource bfin_sir3_resources[] = { { .start = 0xFFC03100, .end = 0xFFC031FF, .flags = IORESOURCE_MEM, }, { .start = IRQ_UART3_RX, .end = IRQ_UART3_RX+1, .flags = IORESOURCE_IRQ, }, { .start = CH_UART3_RX, .end = CH_UART3_RX+1, .flags = IORESOURCE_DMA, }, }; static struct platform_device bfin_sir3_device = { .name = "bfin_sir", .id = 3, .num_resources = ARRAY_SIZE(bfin_sir3_resources), .resource = bfin_sir3_resources, }; #endif #endif #if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE) #include <linux/smsc911x.h> static struct resource smsc911x_resources[] = { { .name = "smsc911x-memory", .start = 0x24000000, .end = 0x24000000 + 0xFF, .flags = IORESOURCE_MEM, }, { .start = IRQ_PE6, .end = IRQ_PE6, .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL, }, }; static struct smsc911x_platform_config smsc911x_config = { .flags = SMSC911X_USE_16BIT, .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW, .irq_type = SMSC911X_IRQ_TYPE_OPEN_DRAIN, .phy_interface = PHY_INTERFACE_MODE_MII, }; static struct platform_device smsc911x_device = { .name = "smsc911x", .id = 0, .num_resources = ARRAY_SIZE(smsc911x_resources), .resource = smsc911x_resources, .dev = { .platform_data = &smsc911x_config, }, }; #endif #if defined(CONFIG_USB_MUSB_HDRC) || defined(CONFIG_USB_MUSB_HDRC_MODULE) static struct resource musb_resources[] = { [0] = { .start = 0xFFC03C00, .end = 0xFFC040FF, .flags = IORESOURCE_MEM, }, [1] = { /* general IRQ */ .start = IRQ_USB_INT0, .end = IRQ_USB_INT0, .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, .name = "mc" }, [2] = { /* DMA IRQ */ .start = IRQ_USB_DMA, .end = IRQ_USB_DMA, .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, .name = "dma" }, }; static struct musb_hdrc_config musb_config = { .multipoint = 0, .dyn_fifo = 0, .soft_con = 1, .dma = 1, .num_eps = 8, .dma_channels = 8, .gpio_vrsel = GPIO_PH6, /* Some custom boards need to be active low, just set it to "0" * if it is the case. */ .gpio_vrsel_active = 1, .clkin = 24, /* musb CLKIN in MHZ */ }; static struct musb_hdrc_platform_data musb_plat = { #if defined(CONFIG_USB_MUSB_OTG) .mode = MUSB_OTG, #elif defined(CONFIG_USB_MUSB_HDRC_HCD) .mode = MUSB_HOST, #elif defined(CONFIG_USB_GADGET_MUSB_HDRC) .mode = MUSB_PERIPHERAL, #endif .config = &musb_config, }; static u64 musb_dmamask = ~(u32)0; static struct platform_device musb_device = { .name = "musb-blackfin", .id = 0, .dev = { .dma_mask = &musb_dmamask, .coherent_dma_mask = 0xffffffff, .platform_data = &musb_plat, }, .num_resources = ARRAY_SIZE(musb_resources), .resource = musb_resources, }; #endif #if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) #ifdef CONFIG_SERIAL_BFIN_SPORT0_UART static struct resource bfin_sport0_uart_resources[] = { { .start = SPORT0_TCR1, .end = SPORT0_MRCS3+4, .flags = IORESOURCE_MEM, }, { .start = IRQ_SPORT0_RX, .end = IRQ_SPORT0_RX+1, .flags = IORESOURCE_IRQ, }, { .start = IRQ_SPORT0_ERROR, .end = IRQ_SPORT0_ERROR, .flags = IORESOURCE_IRQ, }, }; static unsigned short bfin_sport0_peripherals[] = { P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS, P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0 }; static struct platform_device bfin_sport0_uart_device = { .name = "bfin-sport-uart", .id = 0, .num_resources = ARRAY_SIZE(bfin_sport0_uart_resources), .resource = bfin_sport0_uart_resources, .dev = { .platform_data = &bfin_sport0_peripherals, /* Passed to driver */ }, }; #endif #ifdef CONFIG_SERIAL_BFIN_SPORT1_UART static struct resource bfin_sport1_uart_resources[] = { { .start = SPORT1_TCR1, .end = SPORT1_MRCS3+4, .flags = IORESOURCE_MEM, }, { .start = IRQ_SPORT1_RX, .end = IRQ_SPORT1_RX+1, .flags = IORESOURCE_IRQ, }, { .start = IRQ_SPORT1_ERROR, .end = IRQ_SPORT1_ERROR, .flags = IORESOURCE_IRQ, }, }; static unsigned short bfin_sport1_peripherals[] = { P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS, P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0 }; static struct platform_device bfin_sport1_uart_device = { .name = "bfin-sport-uart", .id = 1, .num_resources = ARRAY_SIZE(bfin_sport1_uart_resources), .resource = bfin_sport1_uart_resources, .dev = { .platform_data = &bfin_sport1_peripherals, /* Passed to driver */ }, }; #endif #ifdef CONFIG_SERIAL_BFIN_SPORT2_UART static struct resource bfin_sport2_uart_resources[] = { { .start = SPORT2_TCR1, .end = SPORT2_MRCS3+4, .flags = IORESOURCE_MEM, }, { .start = IRQ_SPORT2_RX, .end = IRQ_SPORT2_RX+1, .flags = IORESOURCE_IRQ, }, { .start = IRQ_SPORT2_ERROR, .end = IRQ_SPORT2_ERROR, .flags = IORESOURCE_IRQ, }, }; static unsigned short bfin_sport2_peripherals[] = { P_SPORT2_TFS, P_SPORT2_DTPRI, P_SPORT2_TSCLK, P_SPORT2_RFS, P_SPORT2_DRPRI, P_SPORT2_RSCLK, P_SPORT2_DRSEC, P_SPORT2_DTSEC, 0 }; static struct platform_device bfin_sport2_uart_device = { .name = "bfin-sport-uart", .id = 2, .num_resources = ARRAY_SIZE(bfin_sport2_uart_resources), .resource = bfin_sport2_uart_resources, .dev = { .platform_data = &bfin_sport2_peripherals, /* Passed to driver */ }, }; #endif #ifdef CONFIG_SERIAL_BFIN_SPORT3_UART static struct resource bfin_sport3_uart_resources[] = { { .start = SPORT3_TCR1, .end = SPORT3_MRCS3+4, .flags = IORESOURCE_MEM, }, { .start = IRQ_SPORT3_RX, .end = IRQ_SPORT3_RX+1, .flags = IORESOURCE_IRQ, }, { .start = IRQ_SPORT3_ERROR, .end = IRQ_SPORT3_ERROR, .flags = IORESOURCE_IRQ, }, }; static unsigned short bfin_sport3_peripherals[] = { P_SPORT3_TFS, P_SPORT3_DTPRI, P_SPORT3_TSCLK, P_SPORT3_RFS, P_SPORT3_DRPRI, P_SPORT3_RSCLK, P_SPORT3_DRSEC, P_SPORT3_DTSEC, 0 }; static struct platform_device bfin_sport3_uart_device = { .name = "bfin-sport-uart", .id = 3, .num_resources = ARRAY_SIZE(bfin_sport3_uart_resources), .resource = bfin_sport3_uart_resources, .dev = { .platform_data = &bfin_sport3_peripherals, /* Passed to driver */ }, }; #endif #endif #if defined(CONFIG_PATA_BF54X) || defined(CONFIG_PATA_BF54X_MODULE) static struct resource bfin_atapi_resources[] = { { .start = 0xFFC03800, .end = 0xFFC0386F, .flags = IORESOURCE_MEM, }, { .start = IRQ_ATAPI_ERR, .end = IRQ_ATAPI_ERR, .flags = IORESOURCE_IRQ, }, }; static struct platform_device bfin_atapi_device = { .name = "pata-bf54x", .id = -1, .num_resources = ARRAY_SIZE(bfin_atapi_resources), .resource = bfin_atapi_resources, }; #endif #if defined(CONFIG_MTD_NAND_BF5XX) || defined(CONFIG_MTD_NAND_BF5XX_MODULE) static struct mtd_partition partition_info[] = { { .name = "linux kernel(nand)", .offset = 0, .size = 4 * 1024 * 1024, }, { .name = "file system(nand)", .offset = 4 * 1024 * 1024, .size = (256 - 4) * 1024 * 1024, }, }; static struct bf5xx_nand_platform bf5xx_nand_platform = { .data_width = NFC_NWIDTH_8, .partitions = partition_info, .nr_partitions = ARRAY_SIZE(partition_info), .rd_dly = 3, .wr_dly = 3, }; static struct resource bf5xx_nand_resources[] = { { .start = 0xFFC03B00, .end = 0xFFC03B4F, .flags = IORESOURCE_MEM, }, { .start = CH_NFC, .end = CH_NFC, .flags = IORESOURCE_IRQ, }, }; static struct platform_device bf5xx_nand_device = { .name = "bf5xx-nand", .id = 0, .num_resources = ARRAY_SIZE(bf5xx_nand_resources), .resource = bf5xx_nand_resources, .dev = { .platform_data = &bf5xx_nand_platform, }, }; #endif #if defined(CONFIG_SDH_BFIN) || defined(CONFIG_SDH_BFIN_MODULE) static struct bfin_sd_host bfin_sdh_data = { .dma_chan = CH_SDH, .irq_int0 = IRQ_SDH_MASK0, .pin_req = {P_SD_D0, P_SD_D1, P_SD_D2, P_SD_D3, P_SD_CLK, P_SD_CMD, 0}, }; static struct platform_device bf54x_sdh_device = { .name = "bfin-sdh", .id = 0, .dev = { .platform_data = &bfin_sdh_data, }, }; #endif #if defined(CONFIG_CAN_BFIN) || defined(CONFIG_CAN_BFIN_MODULE) static unsigned short bfin_can_peripherals[] = { P_CAN0_RX, P_CAN0_TX, 0 }; static struct resource bfin_can_resources[] = { { .start = 0xFFC02A00, .end = 0xFFC02FFF, .flags = IORESOURCE_MEM, }, { .start = IRQ_CAN0_RX, .end = IRQ_CAN0_RX, .flags = IORESOURCE_IRQ, }, { .start = IRQ_CAN0_TX, .end = IRQ_CAN0_TX, .flags = IORESOURCE_IRQ, }, { .start = IRQ_CAN0_ERROR, .end = IRQ_CAN0_ERROR, .flags = IORESOURCE_IRQ, }, }; static struct platform_device bfin_can_device = { .name = "bfin_can", .num_resources = ARRAY_SIZE(bfin_can_resources), .resource = bfin_can_resources, .dev = { .platform_data = &bfin_can_peripherals, /* Passed to driver */ }, }; #endif #if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE) static struct mtd_partition para_partitions[] = { { .name = "bootloader(nor)", .size = 0x40000, .offset = 0, }, { .name = "linux kernel(nor)", .size = 0x100000, .offset = MTDPART_OFS_APPEND, }, { .name = "file system(nor)", .size = MTDPART_SIZ_FULL, .offset = MTDPART_OFS_APPEND, } }; static struct physmap_flash_data para_flash_data = { .width = 2, .parts = para_partitions, .nr_parts = ARRAY_SIZE(para_partitions), }; static struct resource para_flash_resource = { .start = 0x20000000, .end = 0x207fffff, .flags = IORESOURCE_MEM, }; static struct platform_device para_flash_device = { .name = "physmap-flash", .id = 0, .dev = { .platform_data = &para_flash_data, }, .num_resources = 1, .resource = &para_flash_resource, }; #endif #if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) /* all SPI peripherals info goes here */ #if defined(CONFIG_MTD_M25P80) \ || defined(CONFIG_MTD_M25P80_MODULE) /* SPI flash chip (m25p16) */ static struct mtd_partition bfin_spi_flash_partitions[] = { { .name = "bootloader(spi)", .size = 0x00040000, .offset = 0, .mask_flags = MTD_CAP_ROM }, { .name = "linux kernel(spi)", .size = 0x1c0000, .offset = 0x40000 } }; static struct flash_platform_data bfin_spi_flash_data = { .name = "m25p80", .parts = bfin_spi_flash_partitions, .nr_parts = ARRAY_SIZE(bfin_spi_flash_partitions), .type = "m25p16", }; static struct bfin5xx_spi_chip spi_flash_chip_info = { .enable_dma = 0, /* use dma transfer with this chip*/ }; #endif #if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE) static const struct ad7877_platform_data bfin_ad7877_ts_info = { .model = 7877, .vref_delay_usecs = 50, /* internal, no capacitor */ .x_plate_ohms = 419, .y_plate_ohms = 486, .pressure_max = 1000, .pressure_min = 0, .stopacq_polarity = 1, .first_conversion_delay = 3, .acquisition_time = 1, .averaging = 1, .pen_down_acc_interval = 1, }; #endif static struct spi_board_info bf54x_spi_board_info[] __initdata = { #if defined(CONFIG_MTD_M25P80) \ || defined(CONFIG_MTD_M25P80_MODULE) { /* the modalias must be the same as spi device driver name */ .modalias = "m25p80", /* Name of spi_driver for this device */ .max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, /* Framework bus number */ .chip_select = 1, /* SPI_SSEL1*/ .platform_data = &bfin_spi_flash_data, .controller_data = &spi_flash_chip_info, .mode = SPI_MODE_3, }, #endif #if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE) { .modalias = "ad7877", .platform_data = &bfin_ad7877_ts_info, .irq = IRQ_PJ11, .max_speed_hz = 12500000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 2, }, #endif #if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE) { .modalias = "spidev", .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 1, }, #endif }; /* SPI (0) */ static struct resource bfin_spi0_resource[] = { [0] = { .start = SPI0_REGBASE, .end = SPI0_REGBASE + 0xFF, .flags = IORESOURCE_MEM, }, [1] = { .start = CH_SPI0, .end = CH_SPI0, .flags = IORESOURCE_DMA, }, [2] = { .start = IRQ_SPI0, .end = IRQ_SPI0, .flags = IORESOURCE_IRQ, } }; /* SPI (1) */ static struct resource bfin_spi1_resource[] = { [0] = { .start = SPI1_REGBASE, .end = SPI1_REGBASE + 0xFF, .flags = IORESOURCE_MEM, }, [1] = { .start = CH_SPI1, .end = CH_SPI1, .flags = IORESOURCE_DMA, }, [2] = { .start = IRQ_SPI1, .end = IRQ_SPI1, .flags = IORESOURCE_IRQ, } }; /* SPI controller data */ static struct bfin5xx_spi_master bf54x_spi_master_info0 = { .num_chipselect = 4, .enable_dma = 1, /* master has the ability to do dma transfer */ .pin_req = {P_SPI0_SCK, P_SPI0_MISO, P_SPI0_MOSI, 0}, }; static struct platform_device bf54x_spi_master0 = { .name = "bfin-spi", .id = 0, /* Bus number */ .num_resources = ARRAY_SIZE(bfin_spi0_resource), .resource = bfin_spi0_resource, .dev = { .platform_data = &bf54x_spi_master_info0, /* Passed to driver */ }, }; static struct bfin5xx_spi_master bf54x_spi_master_info1 = { .num_chipselect = 4, .enable_dma = 1, /* master has the ability to do dma transfer */ .pin_req = {P_SPI1_SCK, P_SPI1_MISO, P_SPI1_MOSI, 0}, }; static struct platform_device bf54x_spi_master1 = { .name = "bfin-spi", .id = 1, /* Bus number */ .num_resources = ARRAY_SIZE(bfin_spi1_resource), .resource = bfin_spi1_resource, .dev = { .platform_data = &bf54x_spi_master_info1, /* Passed to driver */ }, }; #endif /* spi master and devices */ #if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE) static struct resource bfin_twi0_resource[] = { [0] = { .start = TWI0_REGBASE, .end = TWI0_REGBASE + 0xFF, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_TWI0, .end = IRQ_TWI0, .flags = IORESOURCE_IRQ, }, }; static struct platform_device i2c_bfin_twi0_device = { .name = "i2c-bfin-twi", .id = 0, .num_resources = ARRAY_SIZE(bfin_twi0_resource), .resource = bfin_twi0_resource, }; #if !defined(CONFIG_BF542) /* The BF542 only has 1 TWI */ static struct resource bfin_twi1_resource[] = { [0] = { .start = TWI1_REGBASE, .end = TWI1_REGBASE + 0xFF, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_TWI1, .end = IRQ_TWI1, .flags = IORESOURCE_IRQ, }, }; static struct platform_device i2c_bfin_twi1_device = { .name = "i2c-bfin-twi", .id = 1, .num_resources = ARRAY_SIZE(bfin_twi1_resource), .resource = bfin_twi1_resource, }; #endif #endif #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) #include <linux/gpio_keys.h> static struct gpio_keys_button bfin_gpio_keys_table[] = { {BTN_0, GPIO_PH7, 1, "gpio-keys: BTN0"}, }; static struct gpio_keys_platform_data bfin_gpio_keys_data = { .buttons = bfin_gpio_keys_table, .nbuttons = ARRAY_SIZE(bfin_gpio_keys_table), }; static struct platform_device bfin_device_gpiokeys = { .name = "gpio-keys", .dev = { .platform_data = &bfin_gpio_keys_data, }, }; #endif static const unsigned int cclk_vlev_datasheet[] = { /* * Internal VLEV BF54XSBBC1533 ****temporarily using these values until data sheet is updated */ VRPAIR(VLEV_085, 150000000), VRPAIR(VLEV_090, 250000000), VRPAIR(VLEV_110, 276000000), VRPAIR(VLEV_115, 301000000), VRPAIR(VLEV_120, 525000000), VRPAIR(VLEV_125, 550000000), VRPAIR(VLEV_130, 600000000), }; static struct bfin_dpmc_platform_data bfin_dmpc_vreg_data = { .tuple_tab = cclk_vlev_datasheet, .tabsize = ARRAY_SIZE(cclk_vlev_datasheet), .vr_settling_time = 25 /* us */, }; static struct platform_device bfin_dpmc = { .name = "bfin dpmc", .dev = { .platform_data = &bfin_dmpc_vreg_data, }, }; static struct platform_device *cm_bf548_devices[] __initdata = { &bfin_dpmc, #if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) &rtc_device, #endif #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) #ifdef CONFIG_SERIAL_BFIN_UART0 &bfin_uart0_device, #endif #ifdef CONFIG_SERIAL_BFIN_UART1 &bfin_uart1_device, #endif #ifdef CONFIG_SERIAL_BFIN_UART2 &bfin_uart2_device, #endif #ifdef CONFIG_SERIAL_BFIN_UART3 &bfin_uart3_device, #endif #endif #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) #ifdef CONFIG_BFIN_SIR0 &bfin_sir0_device, #endif #ifdef CONFIG_BFIN_SIR1 &bfin_sir1_device, #endif #ifdef CONFIG_BFIN_SIR2 &bfin_sir2_device, #endif #ifdef CONFIG_BFIN_SIR3 &bfin_sir3_device, #endif #endif #if defined(CONFIG_FB_BF54X_LQ043) || defined(CONFIG_FB_BF54X_LQ043_MODULE) &bf54x_lq043_device, #endif #if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE) &smsc911x_device, #endif #if defined(CONFIG_USB_MUSB_HDRC) || defined(CONFIG_USB_MUSB_HDRC_MODULE) &musb_device, #endif #if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) #ifdef CONFIG_SERIAL_BFIN_SPORT0_UART &bfin_sport0_uart_device, #endif #ifdef CONFIG_SERIAL_BFIN_SPORT1_UART &bfin_sport1_uart_device, #endif #ifdef CONFIG_SERIAL_BFIN_SPORT2_UART &bfin_sport2_uart_device, #endif #ifdef CONFIG_SERIAL_BFIN_SPORT3_UART &bfin_sport3_uart_device, #endif #endif #if defined(CONFIG_PATA_BF54X) || defined(CONFIG_PATA_BF54X_MODULE) &bfin_atapi_device, #endif #if defined(CONFIG_MTD_NAND_BF5XX) || defined(CONFIG_MTD_NAND_BF5XX_MODULE) &bf5xx_nand_device, #endif #if defined(CONFIG_SDH_BFIN) || defined(CONFIG_SDH_BFIN_MODULE) &bf54x_sdh_device, #endif #if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) &bf54x_spi_master0, &bf54x_spi_master1, #endif #if defined(CONFIG_KEYBOARD_BFIN) || defined(CONFIG_KEYBOARD_BFIN_MODULE) &bf54x_kpad_device, #endif #if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE) &i2c_bfin_twi0_device, #if !defined(CONFIG_BF542) &i2c_bfin_twi1_device, #endif #endif #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) &bfin_device_gpiokeys, #endif #if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE) &para_flash_device, #endif #if defined(CONFIG_CAN_BFIN) || defined(CONFIG_CAN_BFIN_MODULE) &bfin_can_device, #endif }; static int __init cm_bf548_init(void) { printk(KERN_INFO "%s(): registering device resources\n", __func__); platform_add_devices(cm_bf548_devices, ARRAY_SIZE(cm_bf548_devices)); #if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) spi_register_board_info(bf54x_spi_board_info, ARRAY_SIZE(bf54x_spi_board_info)); #endif return 0; } arch_initcall(cm_bf548_init); static struct platform_device *cm_bf548_early_devices[] __initdata = { #if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK) #ifdef CONFIG_SERIAL_BFIN_UART0 &bfin_uart0_device, #endif #ifdef CONFIG_SERIAL_BFIN_UART1 &bfin_uart1_device, #endif #ifdef CONFIG_SERIAL_BFIN_UART2 &bfin_uart2_device, #endif #ifdef CONFIG_SERIAL_BFIN_UART3 &bfin_uart3_device, #endif #endif #if defined(CONFIG_SERIAL_BFIN_SPORT_CONSOLE) #ifdef CONFIG_SERIAL_BFIN_SPORT0_UART &bfin_sport0_uart_device, #endif #ifdef CONFIG_SERIAL_BFIN_SPORT1_UART &bfin_sport1_uart_device, #endif #ifdef CONFIG_SERIAL_BFIN_SPORT2_UART &bfin_sport2_uart_device, #endif #ifdef CONFIG_SERIAL_BFIN_SPORT3_UART &bfin_sport3_uart_device, #endif #endif }; void __init native_machine_early_platform_add_devices(void) { printk(KERN_INFO "register early platform devices\n"); early_platform_add_devices(cm_bf548_early_devices, ARRAY_SIZE(cm_bf548_early_devices)); }
gpl-2.0
hehopmajieh/linux-3.4-h3
drivers/usb/gadget/amd5536udc.c
4760
86145
/* * amd5536.c -- AMD 5536 UDC high/full speed USB device controller * * Copyright (C) 2005-2007 AMD (http://www.amd.com) * Author: Thomas Dahlmann * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ /* * The AMD5536 UDC is part of the x86 southbridge AMD Geode CS5536. * It is a USB Highspeed DMA capable USB device controller. Beside ep0 it * provides 4 IN and 4 OUT endpoints (bulk or interrupt type). * * Make sure that UDC is assigned to port 4 by BIOS settings (port can also * be used as host port) and UOC bits PAD_EN and APU are set (should be done * by BIOS init). * * UDC DMA requires 32-bit aligned buffers so DMA with gadget ether does not * work without updating NET_IP_ALIGN. Or PIO mode (module param "use_dma=0") * can be used with gadget ether. */ /* debug control */ /* #define UDC_VERBOSE */ /* Driver strings */ #define UDC_MOD_DESCRIPTION "AMD 5536 UDC - USB Device Controller" #define UDC_DRIVER_VERSION_STRING "01.00.0206" /* system */ #include <linux/module.h> #include <linux/pci.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/ioport.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/timer.h> #include <linux/list.h> #include <linux/interrupt.h> #include <linux/ioctl.h> #include <linux/fs.h> #include <linux/dmapool.h> #include <linux/moduleparam.h> #include <linux/device.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/prefetch.h> #include <asm/byteorder.h> #include <asm/unaligned.h> /* gadget stack */ #include <linux/usb/ch9.h> #include <linux/usb/gadget.h> /* udc specific */ #include "amd5536udc.h" static void udc_tasklet_disconnect(unsigned long); static void empty_req_queue(struct udc_ep *); static int udc_probe(struct udc *dev); static void udc_basic_init(struct udc *dev); static void udc_setup_endpoints(struct udc *dev); static void udc_soft_reset(struct udc *dev); static struct udc_request *udc_alloc_bna_dummy(struct udc_ep *ep); static void udc_free_request(struct usb_ep *usbep, struct usb_request *usbreq); static int udc_free_dma_chain(struct udc *dev, struct udc_request *req); static int udc_create_dma_chain(struct udc_ep *ep, struct udc_request *req, unsigned long buf_len, gfp_t gfp_flags); static int udc_remote_wakeup(struct udc *dev); static int udc_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id); static void udc_pci_remove(struct pci_dev *pdev); /* description */ static const char mod_desc[] = UDC_MOD_DESCRIPTION; static const char name[] = "amd5536udc"; /* structure to hold endpoint function pointers */ static const struct usb_ep_ops udc_ep_ops; /* received setup data */ static union udc_setup_data setup_data; /* pointer to device object */ static struct udc *udc; /* irq spin lock for soft reset */ static DEFINE_SPINLOCK(udc_irq_spinlock); /* stall spin lock */ static DEFINE_SPINLOCK(udc_stall_spinlock); /* * slave mode: pending bytes in rx fifo after nyet, * used if EPIN irq came but no req was available */ static unsigned int udc_rxfifo_pending; /* count soft resets after suspend to avoid loop */ static int soft_reset_occured; static int soft_reset_after_usbreset_occured; /* timer */ static struct timer_list udc_timer; static int stop_timer; /* set_rde -- Is used to control enabling of RX DMA. Problem is * that UDC has only one bit (RDE) to enable/disable RX DMA for * all OUT endpoints. So we have to handle race conditions like * when OUT data reaches the fifo but no request was queued yet. * This cannot be solved by letting the RX DMA disabled until a * request gets queued because there may be other OUT packets * in the FIFO (important for not blocking control traffic). * The value of set_rde controls the correspondig timer. * * set_rde -1 == not used, means it is alloed to be set to 0 or 1 * set_rde 0 == do not touch RDE, do no start the RDE timer * set_rde 1 == timer function will look whether FIFO has data * set_rde 2 == set by timer function to enable RX DMA on next call */ static int set_rde = -1; static DECLARE_COMPLETION(on_exit); static struct timer_list udc_pollstall_timer; static int stop_pollstall_timer; static DECLARE_COMPLETION(on_pollstall_exit); /* tasklet for usb disconnect */ static DECLARE_TASKLET(disconnect_tasklet, udc_tasklet_disconnect, (unsigned long) &udc); /* endpoint names used for print */ static const char ep0_string[] = "ep0in"; static const char *const ep_string[] = { ep0_string, "ep1in-int", "ep2in-bulk", "ep3in-bulk", "ep4in-bulk", "ep5in-bulk", "ep6in-bulk", "ep7in-bulk", "ep8in-bulk", "ep9in-bulk", "ep10in-bulk", "ep11in-bulk", "ep12in-bulk", "ep13in-bulk", "ep14in-bulk", "ep15in-bulk", "ep0out", "ep1out-bulk", "ep2out-bulk", "ep3out-bulk", "ep4out-bulk", "ep5out-bulk", "ep6out-bulk", "ep7out-bulk", "ep8out-bulk", "ep9out-bulk", "ep10out-bulk", "ep11out-bulk", "ep12out-bulk", "ep13out-bulk", "ep14out-bulk", "ep15out-bulk" }; /* DMA usage flag */ static bool use_dma = 1; /* packet per buffer dma */ static bool use_dma_ppb = 1; /* with per descr. update */ static bool use_dma_ppb_du; /* buffer fill mode */ static int use_dma_bufferfill_mode; /* full speed only mode */ static bool use_fullspeed; /* tx buffer size for high speed */ static unsigned long hs_tx_buf = UDC_EPIN_BUFF_SIZE; /* module parameters */ module_param(use_dma, bool, S_IRUGO); MODULE_PARM_DESC(use_dma, "true for DMA"); module_param(use_dma_ppb, bool, S_IRUGO); MODULE_PARM_DESC(use_dma_ppb, "true for DMA in packet per buffer mode"); module_param(use_dma_ppb_du, bool, S_IRUGO); MODULE_PARM_DESC(use_dma_ppb_du, "true for DMA in packet per buffer mode with descriptor update"); module_param(use_fullspeed, bool, S_IRUGO); MODULE_PARM_DESC(use_fullspeed, "true for fullspeed only"); /*---------------------------------------------------------------------------*/ /* Prints UDC device registers and endpoint irq registers */ static void print_regs(struct udc *dev) { DBG(dev, "------- Device registers -------\n"); DBG(dev, "dev config = %08x\n", readl(&dev->regs->cfg)); DBG(dev, "dev control = %08x\n", readl(&dev->regs->ctl)); DBG(dev, "dev status = %08x\n", readl(&dev->regs->sts)); DBG(dev, "\n"); DBG(dev, "dev int's = %08x\n", readl(&dev->regs->irqsts)); DBG(dev, "dev intmask = %08x\n", readl(&dev->regs->irqmsk)); DBG(dev, "\n"); DBG(dev, "dev ep int's = %08x\n", readl(&dev->regs->ep_irqsts)); DBG(dev, "dev ep intmask = %08x\n", readl(&dev->regs->ep_irqmsk)); DBG(dev, "\n"); DBG(dev, "USE DMA = %d\n", use_dma); if (use_dma && use_dma_ppb && !use_dma_ppb_du) { DBG(dev, "DMA mode = PPBNDU (packet per buffer " "WITHOUT desc. update)\n"); dev_info(&dev->pdev->dev, "DMA mode (%s)\n", "PPBNDU"); } else if (use_dma && use_dma_ppb && use_dma_ppb_du) { DBG(dev, "DMA mode = PPBDU (packet per buffer " "WITH desc. update)\n"); dev_info(&dev->pdev->dev, "DMA mode (%s)\n", "PPBDU"); } if (use_dma && use_dma_bufferfill_mode) { DBG(dev, "DMA mode = BF (buffer fill mode)\n"); dev_info(&dev->pdev->dev, "DMA mode (%s)\n", "BF"); } if (!use_dma) dev_info(&dev->pdev->dev, "FIFO mode\n"); DBG(dev, "-------------------------------------------------------\n"); } /* Masks unused interrupts */ static int udc_mask_unused_interrupts(struct udc *dev) { u32 tmp; /* mask all dev interrupts */ tmp = AMD_BIT(UDC_DEVINT_SVC) | AMD_BIT(UDC_DEVINT_ENUM) | AMD_BIT(UDC_DEVINT_US) | AMD_BIT(UDC_DEVINT_UR) | AMD_BIT(UDC_DEVINT_ES) | AMD_BIT(UDC_DEVINT_SI) | AMD_BIT(UDC_DEVINT_SOF)| AMD_BIT(UDC_DEVINT_SC); writel(tmp, &dev->regs->irqmsk); /* mask all ep interrupts */ writel(UDC_EPINT_MSK_DISABLE_ALL, &dev->regs->ep_irqmsk); return 0; } /* Enables endpoint 0 interrupts */ static int udc_enable_ep0_interrupts(struct udc *dev) { u32 tmp; DBG(dev, "udc_enable_ep0_interrupts()\n"); /* read irq mask */ tmp = readl(&dev->regs->ep_irqmsk); /* enable ep0 irq's */ tmp &= AMD_UNMASK_BIT(UDC_EPINT_IN_EP0) & AMD_UNMASK_BIT(UDC_EPINT_OUT_EP0); writel(tmp, &dev->regs->ep_irqmsk); return 0; } /* Enables device interrupts for SET_INTF and SET_CONFIG */ static int udc_enable_dev_setup_interrupts(struct udc *dev) { u32 tmp; DBG(dev, "enable device interrupts for setup data\n"); /* read irq mask */ tmp = readl(&dev->regs->irqmsk); /* enable SET_INTERFACE, SET_CONFIG and other needed irq's */ tmp &= AMD_UNMASK_BIT(UDC_DEVINT_SI) & AMD_UNMASK_BIT(UDC_DEVINT_SC) & AMD_UNMASK_BIT(UDC_DEVINT_UR) & AMD_UNMASK_BIT(UDC_DEVINT_SVC) & AMD_UNMASK_BIT(UDC_DEVINT_ENUM); writel(tmp, &dev->regs->irqmsk); return 0; } /* Calculates fifo start of endpoint based on preceding endpoints */ static int udc_set_txfifo_addr(struct udc_ep *ep) { struct udc *dev; u32 tmp; int i; if (!ep || !(ep->in)) return -EINVAL; dev = ep->dev; ep->txfifo = dev->txfifo; /* traverse ep's */ for (i = 0; i < ep->num; i++) { if (dev->ep[i].regs) { /* read fifo size */ tmp = readl(&dev->ep[i].regs->bufin_framenum); tmp = AMD_GETBITS(tmp, UDC_EPIN_BUFF_SIZE); ep->txfifo += tmp; } } return 0; } /* CNAK pending field: bit0 = ep0in, bit16 = ep0out */ static u32 cnak_pending; static void UDC_QUEUE_CNAK(struct udc_ep *ep, unsigned num) { if (readl(&ep->regs->ctl) & AMD_BIT(UDC_EPCTL_NAK)) { DBG(ep->dev, "NAK could not be cleared for ep%d\n", num); cnak_pending |= 1 << (num); ep->naking = 1; } else cnak_pending = cnak_pending & (~(1 << (num))); } /* Enables endpoint, is called by gadget driver */ static int udc_ep_enable(struct usb_ep *usbep, const struct usb_endpoint_descriptor *desc) { struct udc_ep *ep; struct udc *dev; u32 tmp; unsigned long iflags; u8 udc_csr_epix; unsigned maxpacket; if (!usbep || usbep->name == ep0_string || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) return -EINVAL; ep = container_of(usbep, struct udc_ep, ep); dev = ep->dev; DBG(dev, "udc_ep_enable() ep %d\n", ep->num); if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) return -ESHUTDOWN; spin_lock_irqsave(&dev->lock, iflags); ep->desc = desc; ep->halted = 0; /* set traffic type */ tmp = readl(&dev->ep[ep->num].regs->ctl); tmp = AMD_ADDBITS(tmp, desc->bmAttributes, UDC_EPCTL_ET); writel(tmp, &dev->ep[ep->num].regs->ctl); /* set max packet size */ maxpacket = usb_endpoint_maxp(desc); tmp = readl(&dev->ep[ep->num].regs->bufout_maxpkt); tmp = AMD_ADDBITS(tmp, maxpacket, UDC_EP_MAX_PKT_SIZE); ep->ep.maxpacket = maxpacket; writel(tmp, &dev->ep[ep->num].regs->bufout_maxpkt); /* IN ep */ if (ep->in) { /* ep ix in UDC CSR register space */ udc_csr_epix = ep->num; /* set buffer size (tx fifo entries) */ tmp = readl(&dev->ep[ep->num].regs->bufin_framenum); /* double buffering: fifo size = 2 x max packet size */ tmp = AMD_ADDBITS( tmp, maxpacket * UDC_EPIN_BUFF_SIZE_MULT / UDC_DWORD_BYTES, UDC_EPIN_BUFF_SIZE); writel(tmp, &dev->ep[ep->num].regs->bufin_framenum); /* calc. tx fifo base addr */ udc_set_txfifo_addr(ep); /* flush fifo */ tmp = readl(&ep->regs->ctl); tmp |= AMD_BIT(UDC_EPCTL_F); writel(tmp, &ep->regs->ctl); /* OUT ep */ } else { /* ep ix in UDC CSR register space */ udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS; /* set max packet size UDC CSR */ tmp = readl(&dev->csr->ne[ep->num - UDC_CSR_EP_OUT_IX_OFS]); tmp = AMD_ADDBITS(tmp, maxpacket, UDC_CSR_NE_MAX_PKT); writel(tmp, &dev->csr->ne[ep->num - UDC_CSR_EP_OUT_IX_OFS]); if (use_dma && !ep->in) { /* alloc and init BNA dummy request */ ep->bna_dummy_req = udc_alloc_bna_dummy(ep); ep->bna_occurred = 0; } if (ep->num != UDC_EP0OUT_IX) dev->data_ep_enabled = 1; } /* set ep values */ tmp = readl(&dev->csr->ne[udc_csr_epix]); /* max packet */ tmp = AMD_ADDBITS(tmp, maxpacket, UDC_CSR_NE_MAX_PKT); /* ep number */ tmp = AMD_ADDBITS(tmp, desc->bEndpointAddress, UDC_CSR_NE_NUM); /* ep direction */ tmp = AMD_ADDBITS(tmp, ep->in, UDC_CSR_NE_DIR); /* ep type */ tmp = AMD_ADDBITS(tmp, desc->bmAttributes, UDC_CSR_NE_TYPE); /* ep config */ tmp = AMD_ADDBITS(tmp, ep->dev->cur_config, UDC_CSR_NE_CFG); /* ep interface */ tmp = AMD_ADDBITS(tmp, ep->dev->cur_intf, UDC_CSR_NE_INTF); /* ep alt */ tmp = AMD_ADDBITS(tmp, ep->dev->cur_alt, UDC_CSR_NE_ALT); /* write reg */ writel(tmp, &dev->csr->ne[udc_csr_epix]); /* enable ep irq */ tmp = readl(&dev->regs->ep_irqmsk); tmp &= AMD_UNMASK_BIT(ep->num); writel(tmp, &dev->regs->ep_irqmsk); /* * clear NAK by writing CNAK * avoid BNA for OUT DMA, don't clear NAK until DMA desc. written */ if (!use_dma || ep->in) { tmp = readl(&ep->regs->ctl); tmp |= AMD_BIT(UDC_EPCTL_CNAK); writel(tmp, &ep->regs->ctl); ep->naking = 0; UDC_QUEUE_CNAK(ep, ep->num); } tmp = desc->bEndpointAddress; DBG(dev, "%s enabled\n", usbep->name); spin_unlock_irqrestore(&dev->lock, iflags); return 0; } /* Resets endpoint */ static void ep_init(struct udc_regs __iomem *regs, struct udc_ep *ep) { u32 tmp; VDBG(ep->dev, "ep-%d reset\n", ep->num); ep->desc = NULL; ep->ep.desc = NULL; ep->ep.ops = &udc_ep_ops; INIT_LIST_HEAD(&ep->queue); ep->ep.maxpacket = (u16) ~0; /* set NAK */ tmp = readl(&ep->regs->ctl); tmp |= AMD_BIT(UDC_EPCTL_SNAK); writel(tmp, &ep->regs->ctl); ep->naking = 1; /* disable interrupt */ tmp = readl(&regs->ep_irqmsk); tmp |= AMD_BIT(ep->num); writel(tmp, &regs->ep_irqmsk); if (ep->in) { /* unset P and IN bit of potential former DMA */ tmp = readl(&ep->regs->ctl); tmp &= AMD_UNMASK_BIT(UDC_EPCTL_P); writel(tmp, &ep->regs->ctl); tmp = readl(&ep->regs->sts); tmp |= AMD_BIT(UDC_EPSTS_IN); writel(tmp, &ep->regs->sts); /* flush the fifo */ tmp = readl(&ep->regs->ctl); tmp |= AMD_BIT(UDC_EPCTL_F); writel(tmp, &ep->regs->ctl); } /* reset desc pointer */ writel(0, &ep->regs->desptr); } /* Disables endpoint, is called by gadget driver */ static int udc_ep_disable(struct usb_ep *usbep) { struct udc_ep *ep = NULL; unsigned long iflags; if (!usbep) return -EINVAL; ep = container_of(usbep, struct udc_ep, ep); if (usbep->name == ep0_string || !ep->desc) return -EINVAL; DBG(ep->dev, "Disable ep-%d\n", ep->num); spin_lock_irqsave(&ep->dev->lock, iflags); udc_free_request(&ep->ep, &ep->bna_dummy_req->req); empty_req_queue(ep); ep_init(ep->dev->regs, ep); spin_unlock_irqrestore(&ep->dev->lock, iflags); return 0; } /* Allocates request packet, called by gadget driver */ static struct usb_request * udc_alloc_request(struct usb_ep *usbep, gfp_t gfp) { struct udc_request *req; struct udc_data_dma *dma_desc; struct udc_ep *ep; if (!usbep) return NULL; ep = container_of(usbep, struct udc_ep, ep); VDBG(ep->dev, "udc_alloc_req(): ep%d\n", ep->num); req = kzalloc(sizeof(struct udc_request), gfp); if (!req) return NULL; req->req.dma = DMA_DONT_USE; INIT_LIST_HEAD(&req->queue); if (ep->dma) { /* ep0 in requests are allocated from data pool here */ dma_desc = pci_pool_alloc(ep->dev->data_requests, gfp, &req->td_phys); if (!dma_desc) { kfree(req); return NULL; } VDBG(ep->dev, "udc_alloc_req: req = %p dma_desc = %p, " "td_phys = %lx\n", req, dma_desc, (unsigned long)req->td_phys); /* prevent from using desc. - set HOST BUSY */ dma_desc->status = AMD_ADDBITS(dma_desc->status, UDC_DMA_STP_STS_BS_HOST_BUSY, UDC_DMA_STP_STS_BS); dma_desc->bufptr = cpu_to_le32(DMA_DONT_USE); req->td_data = dma_desc; req->td_data_last = NULL; req->chain_len = 1; } return &req->req; } /* Frees request packet, called by gadget driver */ static void udc_free_request(struct usb_ep *usbep, struct usb_request *usbreq) { struct udc_ep *ep; struct udc_request *req; if (!usbep || !usbreq) return; ep = container_of(usbep, struct udc_ep, ep); req = container_of(usbreq, struct udc_request, req); VDBG(ep->dev, "free_req req=%p\n", req); BUG_ON(!list_empty(&req->queue)); if (req->td_data) { VDBG(ep->dev, "req->td_data=%p\n", req->td_data); /* free dma chain if created */ if (req->chain_len > 1) udc_free_dma_chain(ep->dev, req); pci_pool_free(ep->dev->data_requests, req->td_data, req->td_phys); } kfree(req); } /* Init BNA dummy descriptor for HOST BUSY and pointing to itself */ static void udc_init_bna_dummy(struct udc_request *req) { if (req) { /* set last bit */ req->td_data->status |= AMD_BIT(UDC_DMA_IN_STS_L); /* set next pointer to itself */ req->td_data->next = req->td_phys; /* set HOST BUSY */ req->td_data->status = AMD_ADDBITS(req->td_data->status, UDC_DMA_STP_STS_BS_DMA_DONE, UDC_DMA_STP_STS_BS); #ifdef UDC_VERBOSE pr_debug("bna desc = %p, sts = %08x\n", req->td_data, req->td_data->status); #endif } } /* Allocate BNA dummy descriptor */ static struct udc_request *udc_alloc_bna_dummy(struct udc_ep *ep) { struct udc_request *req = NULL; struct usb_request *_req = NULL; /* alloc the dummy request */ _req = udc_alloc_request(&ep->ep, GFP_ATOMIC); if (_req) { req = container_of(_req, struct udc_request, req); ep->bna_dummy_req = req; udc_init_bna_dummy(req); } return req; } /* Write data to TX fifo for IN packets */ static void udc_txfifo_write(struct udc_ep *ep, struct usb_request *req) { u8 *req_buf; u32 *buf; int i, j; unsigned bytes = 0; unsigned remaining = 0; if (!req || !ep) return; req_buf = req->buf + req->actual; prefetch(req_buf); remaining = req->length - req->actual; buf = (u32 *) req_buf; bytes = ep->ep.maxpacket; if (bytes > remaining) bytes = remaining; /* dwords first */ for (i = 0; i < bytes / UDC_DWORD_BYTES; i++) writel(*(buf + i), ep->txfifo); /* remaining bytes must be written by byte access */ for (j = 0; j < bytes % UDC_DWORD_BYTES; j++) { writeb((u8)(*(buf + i) >> (j << UDC_BITS_PER_BYTE_SHIFT)), ep->txfifo); } /* dummy write confirm */ writel(0, &ep->regs->confirm); } /* Read dwords from RX fifo for OUT transfers */ static int udc_rxfifo_read_dwords(struct udc *dev, u32 *buf, int dwords) { int i; VDBG(dev, "udc_read_dwords(): %d dwords\n", dwords); for (i = 0; i < dwords; i++) *(buf + i) = readl(dev->rxfifo); return 0; } /* Read bytes from RX fifo for OUT transfers */ static int udc_rxfifo_read_bytes(struct udc *dev, u8 *buf, int bytes) { int i, j; u32 tmp; VDBG(dev, "udc_read_bytes(): %d bytes\n", bytes); /* dwords first */ for (i = 0; i < bytes / UDC_DWORD_BYTES; i++) *((u32 *)(buf + (i<<2))) = readl(dev->rxfifo); /* remaining bytes must be read by byte access */ if (bytes % UDC_DWORD_BYTES) { tmp = readl(dev->rxfifo); for (j = 0; j < bytes % UDC_DWORD_BYTES; j++) { *(buf + (i<<2) + j) = (u8)(tmp & UDC_BYTE_MASK); tmp = tmp >> UDC_BITS_PER_BYTE; } } return 0; } /* Read data from RX fifo for OUT transfers */ static int udc_rxfifo_read(struct udc_ep *ep, struct udc_request *req) { u8 *buf; unsigned buf_space; unsigned bytes = 0; unsigned finished = 0; /* received number bytes */ bytes = readl(&ep->regs->sts); bytes = AMD_GETBITS(bytes, UDC_EPSTS_RX_PKT_SIZE); buf_space = req->req.length - req->req.actual; buf = req->req.buf + req->req.actual; if (bytes > buf_space) { if ((buf_space % ep->ep.maxpacket) != 0) { DBG(ep->dev, "%s: rx %d bytes, rx-buf space = %d bytesn\n", ep->ep.name, bytes, buf_space); req->req.status = -EOVERFLOW; } bytes = buf_space; } req->req.actual += bytes; /* last packet ? */ if (((bytes % ep->ep.maxpacket) != 0) || (!bytes) || ((req->req.actual == req->req.length) && !req->req.zero)) finished = 1; /* read rx fifo bytes */ VDBG(ep->dev, "ep %s: rxfifo read %d bytes\n", ep->ep.name, bytes); udc_rxfifo_read_bytes(ep->dev, buf, bytes); return finished; } /* create/re-init a DMA descriptor or a DMA descriptor chain */ static int prep_dma(struct udc_ep *ep, struct udc_request *req, gfp_t gfp) { int retval = 0; u32 tmp; VDBG(ep->dev, "prep_dma\n"); VDBG(ep->dev, "prep_dma ep%d req->td_data=%p\n", ep->num, req->td_data); /* set buffer pointer */ req->td_data->bufptr = req->req.dma; /* set last bit */ req->td_data->status |= AMD_BIT(UDC_DMA_IN_STS_L); /* build/re-init dma chain if maxpkt scatter mode, not for EP0 */ if (use_dma_ppb) { retval = udc_create_dma_chain(ep, req, ep->ep.maxpacket, gfp); if (retval != 0) { if (retval == -ENOMEM) DBG(ep->dev, "Out of DMA memory\n"); return retval; } if (ep->in) { if (req->req.length == ep->ep.maxpacket) { /* write tx bytes */ req->td_data->status = AMD_ADDBITS(req->td_data->status, ep->ep.maxpacket, UDC_DMA_IN_STS_TXBYTES); } } } if (ep->in) { VDBG(ep->dev, "IN: use_dma_ppb=%d req->req.len=%d " "maxpacket=%d ep%d\n", use_dma_ppb, req->req.length, ep->ep.maxpacket, ep->num); /* * if bytes < max packet then tx bytes must * be written in packet per buffer mode */ if (!use_dma_ppb || req->req.length < ep->ep.maxpacket || ep->num == UDC_EP0OUT_IX || ep->num == UDC_EP0IN_IX) { /* write tx bytes */ req->td_data->status = AMD_ADDBITS(req->td_data->status, req->req.length, UDC_DMA_IN_STS_TXBYTES); /* reset frame num */ req->td_data->status = AMD_ADDBITS(req->td_data->status, 0, UDC_DMA_IN_STS_FRAMENUM); } /* set HOST BUSY */ req->td_data->status = AMD_ADDBITS(req->td_data->status, UDC_DMA_STP_STS_BS_HOST_BUSY, UDC_DMA_STP_STS_BS); } else { VDBG(ep->dev, "OUT set host ready\n"); /* set HOST READY */ req->td_data->status = AMD_ADDBITS(req->td_data->status, UDC_DMA_STP_STS_BS_HOST_READY, UDC_DMA_STP_STS_BS); /* clear NAK by writing CNAK */ if (ep->naking) { tmp = readl(&ep->regs->ctl); tmp |= AMD_BIT(UDC_EPCTL_CNAK); writel(tmp, &ep->regs->ctl); ep->naking = 0; UDC_QUEUE_CNAK(ep, ep->num); } } return retval; } /* Completes request packet ... caller MUST hold lock */ static void complete_req(struct udc_ep *ep, struct udc_request *req, int sts) __releases(ep->dev->lock) __acquires(ep->dev->lock) { struct udc *dev; unsigned halted; VDBG(ep->dev, "complete_req(): ep%d\n", ep->num); dev = ep->dev; /* unmap DMA */ if (ep->dma) usb_gadget_unmap_request(&dev->gadget, &req->req, ep->in); halted = ep->halted; ep->halted = 1; /* set new status if pending */ if (req->req.status == -EINPROGRESS) req->req.status = sts; /* remove from ep queue */ list_del_init(&req->queue); VDBG(ep->dev, "req %p => complete %d bytes at %s with sts %d\n", &req->req, req->req.length, ep->ep.name, sts); spin_unlock(&dev->lock); req->req.complete(&ep->ep, &req->req); spin_lock(&dev->lock); ep->halted = halted; } /* frees pci pool descriptors of a DMA chain */ static int udc_free_dma_chain(struct udc *dev, struct udc_request *req) { int ret_val = 0; struct udc_data_dma *td; struct udc_data_dma *td_last = NULL; unsigned int i; DBG(dev, "free chain req = %p\n", req); /* do not free first desc., will be done by free for request */ td_last = req->td_data; td = phys_to_virt(td_last->next); for (i = 1; i < req->chain_len; i++) { pci_pool_free(dev->data_requests, td, (dma_addr_t) td_last->next); td_last = td; td = phys_to_virt(td_last->next); } return ret_val; } /* Iterates to the end of a DMA chain and returns last descriptor */ static struct udc_data_dma *udc_get_last_dma_desc(struct udc_request *req) { struct udc_data_dma *td; td = req->td_data; while (td && !(td->status & AMD_BIT(UDC_DMA_IN_STS_L))) td = phys_to_virt(td->next); return td; } /* Iterates to the end of a DMA chain and counts bytes received */ static u32 udc_get_ppbdu_rxbytes(struct udc_request *req) { struct udc_data_dma *td; u32 count; td = req->td_data; /* received number bytes */ count = AMD_GETBITS(td->status, UDC_DMA_OUT_STS_RXBYTES); while (td && !(td->status & AMD_BIT(UDC_DMA_IN_STS_L))) { td = phys_to_virt(td->next); /* received number bytes */ if (td) { count += AMD_GETBITS(td->status, UDC_DMA_OUT_STS_RXBYTES); } } return count; } /* Creates or re-inits a DMA chain */ static int udc_create_dma_chain( struct udc_ep *ep, struct udc_request *req, unsigned long buf_len, gfp_t gfp_flags ) { unsigned long bytes = req->req.length; unsigned int i; dma_addr_t dma_addr; struct udc_data_dma *td = NULL; struct udc_data_dma *last = NULL; unsigned long txbytes; unsigned create_new_chain = 0; unsigned len; VDBG(ep->dev, "udc_create_dma_chain: bytes=%ld buf_len=%ld\n", bytes, buf_len); dma_addr = DMA_DONT_USE; /* unset L bit in first desc for OUT */ if (!ep->in) req->td_data->status &= AMD_CLEAR_BIT(UDC_DMA_IN_STS_L); /* alloc only new desc's if not already available */ len = req->req.length / ep->ep.maxpacket; if (req->req.length % ep->ep.maxpacket) len++; if (len > req->chain_len) { /* shorter chain already allocated before */ if (req->chain_len > 1) udc_free_dma_chain(ep->dev, req); req->chain_len = len; create_new_chain = 1; } td = req->td_data; /* gen. required number of descriptors and buffers */ for (i = buf_len; i < bytes; i += buf_len) { /* create or determine next desc. */ if (create_new_chain) { td = pci_pool_alloc(ep->dev->data_requests, gfp_flags, &dma_addr); if (!td) return -ENOMEM; td->status = 0; } else if (i == buf_len) { /* first td */ td = (struct udc_data_dma *) phys_to_virt( req->td_data->next); td->status = 0; } else { td = (struct udc_data_dma *) phys_to_virt(last->next); td->status = 0; } if (td) td->bufptr = req->req.dma + i; /* assign buffer */ else break; /* short packet ? */ if ((bytes - i) >= buf_len) { txbytes = buf_len; } else { /* short packet */ txbytes = bytes - i; } /* link td and assign tx bytes */ if (i == buf_len) { if (create_new_chain) req->td_data->next = dma_addr; /* else req->td_data->next = virt_to_phys(td); */ /* write tx bytes */ if (ep->in) { /* first desc */ req->td_data->status = AMD_ADDBITS(req->td_data->status, ep->ep.maxpacket, UDC_DMA_IN_STS_TXBYTES); /* second desc */ td->status = AMD_ADDBITS(td->status, txbytes, UDC_DMA_IN_STS_TXBYTES); } } else { if (create_new_chain) last->next = dma_addr; /* else last->next = virt_to_phys(td); */ if (ep->in) { /* write tx bytes */ td->status = AMD_ADDBITS(td->status, txbytes, UDC_DMA_IN_STS_TXBYTES); } } last = td; } /* set last bit */ if (td) { td->status |= AMD_BIT(UDC_DMA_IN_STS_L); /* last desc. points to itself */ req->td_data_last = td; } return 0; } /* Enabling RX DMA */ static void udc_set_rde(struct udc *dev) { u32 tmp; VDBG(dev, "udc_set_rde()\n"); /* stop RDE timer */ if (timer_pending(&udc_timer)) { set_rde = 0; mod_timer(&udc_timer, jiffies - 1); } /* set RDE */ tmp = readl(&dev->regs->ctl); tmp |= AMD_BIT(UDC_DEVCTL_RDE); writel(tmp, &dev->regs->ctl); } /* Queues a request packet, called by gadget driver */ static int udc_queue(struct usb_ep *usbep, struct usb_request *usbreq, gfp_t gfp) { int retval = 0; u8 open_rxfifo = 0; unsigned long iflags; struct udc_ep *ep; struct udc_request *req; struct udc *dev; u32 tmp; /* check the inputs */ req = container_of(usbreq, struct udc_request, req); if (!usbep || !usbreq || !usbreq->complete || !usbreq->buf || !list_empty(&req->queue)) return -EINVAL; ep = container_of(usbep, struct udc_ep, ep); if (!ep->desc && (ep->num != 0 && ep->num != UDC_EP0OUT_IX)) return -EINVAL; VDBG(ep->dev, "udc_queue(): ep%d-in=%d\n", ep->num, ep->in); dev = ep->dev; if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) return -ESHUTDOWN; /* map dma (usually done before) */ if (ep->dma) { VDBG(dev, "DMA map req %p\n", req); retval = usb_gadget_map_request(&udc->gadget, usbreq, ep->in); if (retval) return retval; } VDBG(dev, "%s queue req %p, len %d req->td_data=%p buf %p\n", usbep->name, usbreq, usbreq->length, req->td_data, usbreq->buf); spin_lock_irqsave(&dev->lock, iflags); usbreq->actual = 0; usbreq->status = -EINPROGRESS; req->dma_done = 0; /* on empty queue just do first transfer */ if (list_empty(&ep->queue)) { /* zlp */ if (usbreq->length == 0) { /* IN zlp's are handled by hardware */ complete_req(ep, req, 0); VDBG(dev, "%s: zlp\n", ep->ep.name); /* * if set_config or set_intf is waiting for ack by zlp * then set CSR_DONE */ if (dev->set_cfg_not_acked) { tmp = readl(&dev->regs->ctl); tmp |= AMD_BIT(UDC_DEVCTL_CSR_DONE); writel(tmp, &dev->regs->ctl); dev->set_cfg_not_acked = 0; } /* setup command is ACK'ed now by zlp */ if (dev->waiting_zlp_ack_ep0in) { /* clear NAK by writing CNAK in EP0_IN */ tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl); tmp |= AMD_BIT(UDC_EPCTL_CNAK); writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl); dev->ep[UDC_EP0IN_IX].naking = 0; UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX], UDC_EP0IN_IX); dev->waiting_zlp_ack_ep0in = 0; } goto finished; } if (ep->dma) { retval = prep_dma(ep, req, gfp); if (retval != 0) goto finished; /* write desc pointer to enable DMA */ if (ep->in) { /* set HOST READY */ req->td_data->status = AMD_ADDBITS(req->td_data->status, UDC_DMA_IN_STS_BS_HOST_READY, UDC_DMA_IN_STS_BS); } /* disabled rx dma while descriptor update */ if (!ep->in) { /* stop RDE timer */ if (timer_pending(&udc_timer)) { set_rde = 0; mod_timer(&udc_timer, jiffies - 1); } /* clear RDE */ tmp = readl(&dev->regs->ctl); tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_RDE); writel(tmp, &dev->regs->ctl); open_rxfifo = 1; /* * if BNA occurred then let BNA dummy desc. * point to current desc. */ if (ep->bna_occurred) { VDBG(dev, "copy to BNA dummy desc.\n"); memcpy(ep->bna_dummy_req->td_data, req->td_data, sizeof(struct udc_data_dma)); } } /* write desc pointer */ writel(req->td_phys, &ep->regs->desptr); /* clear NAK by writing CNAK */ if (ep->naking) { tmp = readl(&ep->regs->ctl); tmp |= AMD_BIT(UDC_EPCTL_CNAK); writel(tmp, &ep->regs->ctl); ep->naking = 0; UDC_QUEUE_CNAK(ep, ep->num); } if (ep->in) { /* enable ep irq */ tmp = readl(&dev->regs->ep_irqmsk); tmp &= AMD_UNMASK_BIT(ep->num); writel(tmp, &dev->regs->ep_irqmsk); } } else if (ep->in) { /* enable ep irq */ tmp = readl(&dev->regs->ep_irqmsk); tmp &= AMD_UNMASK_BIT(ep->num); writel(tmp, &dev->regs->ep_irqmsk); } } else if (ep->dma) { /* * prep_dma not used for OUT ep's, this is not possible * for PPB modes, because of chain creation reasons */ if (ep->in) { retval = prep_dma(ep, req, gfp); if (retval != 0) goto finished; } } VDBG(dev, "list_add\n"); /* add request to ep queue */ if (req) { list_add_tail(&req->queue, &ep->queue); /* open rxfifo if out data queued */ if (open_rxfifo) { /* enable DMA */ req->dma_going = 1; udc_set_rde(dev); if (ep->num != UDC_EP0OUT_IX) dev->data_ep_queued = 1; } /* stop OUT naking */ if (!ep->in) { if (!use_dma && udc_rxfifo_pending) { DBG(dev, "udc_queue(): pending bytes in " "rxfifo after nyet\n"); /* * read pending bytes afer nyet: * referring to isr */ if (udc_rxfifo_read(ep, req)) { /* finish */ complete_req(ep, req, 0); } udc_rxfifo_pending = 0; } } } finished: spin_unlock_irqrestore(&dev->lock, iflags); return retval; } /* Empty request queue of an endpoint; caller holds spinlock */ static void empty_req_queue(struct udc_ep *ep) { struct udc_request *req; ep->halted = 1; while (!list_empty(&ep->queue)) { req = list_entry(ep->queue.next, struct udc_request, queue); complete_req(ep, req, -ESHUTDOWN); } } /* Dequeues a request packet, called by gadget driver */ static int udc_dequeue(struct usb_ep *usbep, struct usb_request *usbreq) { struct udc_ep *ep; struct udc_request *req; unsigned halted; unsigned long iflags; ep = container_of(usbep, struct udc_ep, ep); if (!usbep || !usbreq || (!ep->desc && (ep->num != 0 && ep->num != UDC_EP0OUT_IX))) return -EINVAL; req = container_of(usbreq, struct udc_request, req); spin_lock_irqsave(&ep->dev->lock, iflags); halted = ep->halted; ep->halted = 1; /* request in processing or next one */ if (ep->queue.next == &req->queue) { if (ep->dma && req->dma_going) { if (ep->in) ep->cancel_transfer = 1; else { u32 tmp; u32 dma_sts; /* stop potential receive DMA */ tmp = readl(&udc->regs->ctl); writel(tmp & AMD_UNMASK_BIT(UDC_DEVCTL_RDE), &udc->regs->ctl); /* * Cancel transfer later in ISR * if descriptor was touched. */ dma_sts = AMD_GETBITS(req->td_data->status, UDC_DMA_OUT_STS_BS); if (dma_sts != UDC_DMA_OUT_STS_BS_HOST_READY) ep->cancel_transfer = 1; else { udc_init_bna_dummy(ep->req); writel(ep->bna_dummy_req->td_phys, &ep->regs->desptr); } writel(tmp, &udc->regs->ctl); } } } complete_req(ep, req, -ECONNRESET); ep->halted = halted; spin_unlock_irqrestore(&ep->dev->lock, iflags); return 0; } /* Halt or clear halt of endpoint */ static int udc_set_halt(struct usb_ep *usbep, int halt) { struct udc_ep *ep; u32 tmp; unsigned long iflags; int retval = 0; if (!usbep) return -EINVAL; pr_debug("set_halt %s: halt=%d\n", usbep->name, halt); ep = container_of(usbep, struct udc_ep, ep); if (!ep->desc && (ep->num != 0 && ep->num != UDC_EP0OUT_IX)) return -EINVAL; if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN) return -ESHUTDOWN; spin_lock_irqsave(&udc_stall_spinlock, iflags); /* halt or clear halt */ if (halt) { if (ep->num == 0) ep->dev->stall_ep0in = 1; else { /* * set STALL * rxfifo empty not taken into acount */ tmp = readl(&ep->regs->ctl); tmp |= AMD_BIT(UDC_EPCTL_S); writel(tmp, &ep->regs->ctl); ep->halted = 1; /* setup poll timer */ if (!timer_pending(&udc_pollstall_timer)) { udc_pollstall_timer.expires = jiffies + HZ * UDC_POLLSTALL_TIMER_USECONDS / (1000 * 1000); if (!stop_pollstall_timer) { DBG(ep->dev, "start polltimer\n"); add_timer(&udc_pollstall_timer); } } } } else { /* ep is halted by set_halt() before */ if (ep->halted) { tmp = readl(&ep->regs->ctl); /* clear stall bit */ tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S); /* clear NAK by writing CNAK */ tmp |= AMD_BIT(UDC_EPCTL_CNAK); writel(tmp, &ep->regs->ctl); ep->halted = 0; UDC_QUEUE_CNAK(ep, ep->num); } } spin_unlock_irqrestore(&udc_stall_spinlock, iflags); return retval; } /* gadget interface */ static const struct usb_ep_ops udc_ep_ops = { .enable = udc_ep_enable, .disable = udc_ep_disable, .alloc_request = udc_alloc_request, .free_request = udc_free_request, .queue = udc_queue, .dequeue = udc_dequeue, .set_halt = udc_set_halt, /* fifo ops not implemented */ }; /*-------------------------------------------------------------------------*/ /* Get frame counter (not implemented) */ static int udc_get_frame(struct usb_gadget *gadget) { return -EOPNOTSUPP; } /* Remote wakeup gadget interface */ static int udc_wakeup(struct usb_gadget *gadget) { struct udc *dev; if (!gadget) return -EINVAL; dev = container_of(gadget, struct udc, gadget); udc_remote_wakeup(dev); return 0; } static int amd5536_start(struct usb_gadget_driver *driver, int (*bind)(struct usb_gadget *)); static int amd5536_stop(struct usb_gadget_driver *driver); /* gadget operations */ static const struct usb_gadget_ops udc_ops = { .wakeup = udc_wakeup, .get_frame = udc_get_frame, .start = amd5536_start, .stop = amd5536_stop, }; /* Setups endpoint parameters, adds endpoints to linked list */ static void make_ep_lists(struct udc *dev) { /* make gadget ep lists */ INIT_LIST_HEAD(&dev->gadget.ep_list); list_add_tail(&dev->ep[UDC_EPIN_STATUS_IX].ep.ep_list, &dev->gadget.ep_list); list_add_tail(&dev->ep[UDC_EPIN_IX].ep.ep_list, &dev->gadget.ep_list); list_add_tail(&dev->ep[UDC_EPOUT_IX].ep.ep_list, &dev->gadget.ep_list); /* fifo config */ dev->ep[UDC_EPIN_STATUS_IX].fifo_depth = UDC_EPIN_SMALLINT_BUFF_SIZE; if (dev->gadget.speed == USB_SPEED_FULL) dev->ep[UDC_EPIN_IX].fifo_depth = UDC_FS_EPIN_BUFF_SIZE; else if (dev->gadget.speed == USB_SPEED_HIGH) dev->ep[UDC_EPIN_IX].fifo_depth = hs_tx_buf; dev->ep[UDC_EPOUT_IX].fifo_depth = UDC_RXFIFO_SIZE; } /* init registers at driver load time */ static int startup_registers(struct udc *dev) { u32 tmp; /* init controller by soft reset */ udc_soft_reset(dev); /* mask not needed interrupts */ udc_mask_unused_interrupts(dev); /* put into initial config */ udc_basic_init(dev); /* link up all endpoints */ udc_setup_endpoints(dev); /* program speed */ tmp = readl(&dev->regs->cfg); if (use_fullspeed) tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_FS, UDC_DEVCFG_SPD); else tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_HS, UDC_DEVCFG_SPD); writel(tmp, &dev->regs->cfg); return 0; } /* Inits UDC context */ static void udc_basic_init(struct udc *dev) { u32 tmp; DBG(dev, "udc_basic_init()\n"); dev->gadget.speed = USB_SPEED_UNKNOWN; /* stop RDE timer */ if (timer_pending(&udc_timer)) { set_rde = 0; mod_timer(&udc_timer, jiffies - 1); } /* stop poll stall timer */ if (timer_pending(&udc_pollstall_timer)) mod_timer(&udc_pollstall_timer, jiffies - 1); /* disable DMA */ tmp = readl(&dev->regs->ctl); tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_RDE); tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_TDE); writel(tmp, &dev->regs->ctl); /* enable dynamic CSR programming */ tmp = readl(&dev->regs->cfg); tmp |= AMD_BIT(UDC_DEVCFG_CSR_PRG); /* set self powered */ tmp |= AMD_BIT(UDC_DEVCFG_SP); /* set remote wakeupable */ tmp |= AMD_BIT(UDC_DEVCFG_RWKP); writel(tmp, &dev->regs->cfg); make_ep_lists(dev); dev->data_ep_enabled = 0; dev->data_ep_queued = 0; } /* Sets initial endpoint parameters */ static void udc_setup_endpoints(struct udc *dev) { struct udc_ep *ep; u32 tmp; u32 reg; DBG(dev, "udc_setup_endpoints()\n"); /* read enum speed */ tmp = readl(&dev->regs->sts); tmp = AMD_GETBITS(tmp, UDC_DEVSTS_ENUM_SPEED); if (tmp == UDC_DEVSTS_ENUM_SPEED_HIGH) dev->gadget.speed = USB_SPEED_HIGH; else if (tmp == UDC_DEVSTS_ENUM_SPEED_FULL) dev->gadget.speed = USB_SPEED_FULL; /* set basic ep parameters */ for (tmp = 0; tmp < UDC_EP_NUM; tmp++) { ep = &dev->ep[tmp]; ep->dev = dev; ep->ep.name = ep_string[tmp]; ep->num = tmp; /* txfifo size is calculated at enable time */ ep->txfifo = dev->txfifo; /* fifo size */ if (tmp < UDC_EPIN_NUM) { ep->fifo_depth = UDC_TXFIFO_SIZE; ep->in = 1; } else { ep->fifo_depth = UDC_RXFIFO_SIZE; ep->in = 0; } ep->regs = &dev->ep_regs[tmp]; /* * ep will be reset only if ep was not enabled before to avoid * disabling ep interrupts when ENUM interrupt occurs but ep is * not enabled by gadget driver */ if (!ep->desc) ep_init(dev->regs, ep); if (use_dma) { /* * ep->dma is not really used, just to indicate that * DMA is active: remove this * dma regs = dev control regs */ ep->dma = &dev->regs->ctl; /* nak OUT endpoints until enable - not for ep0 */ if (tmp != UDC_EP0IN_IX && tmp != UDC_EP0OUT_IX && tmp > UDC_EPIN_NUM) { /* set NAK */ reg = readl(&dev->ep[tmp].regs->ctl); reg |= AMD_BIT(UDC_EPCTL_SNAK); writel(reg, &dev->ep[tmp].regs->ctl); dev->ep[tmp].naking = 1; } } } /* EP0 max packet */ if (dev->gadget.speed == USB_SPEED_FULL) { dev->ep[UDC_EP0IN_IX].ep.maxpacket = UDC_FS_EP0IN_MAX_PKT_SIZE; dev->ep[UDC_EP0OUT_IX].ep.maxpacket = UDC_FS_EP0OUT_MAX_PKT_SIZE; } else if (dev->gadget.speed == USB_SPEED_HIGH) { dev->ep[UDC_EP0IN_IX].ep.maxpacket = UDC_EP0IN_MAX_PKT_SIZE; dev->ep[UDC_EP0OUT_IX].ep.maxpacket = UDC_EP0OUT_MAX_PKT_SIZE; } /* * with suspend bug workaround, ep0 params for gadget driver * are set at gadget driver bind() call */ dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IX].ep; dev->ep[UDC_EP0IN_IX].halted = 0; INIT_LIST_HEAD(&dev->gadget.ep0->ep_list); /* init cfg/alt/int */ dev->cur_config = 0; dev->cur_intf = 0; dev->cur_alt = 0; } /* Bringup after Connect event, initial bringup to be ready for ep0 events */ static void usb_connect(struct udc *dev) { dev_info(&dev->pdev->dev, "USB Connect\n"); dev->connected = 1; /* put into initial config */ udc_basic_init(dev); /* enable device setup interrupts */ udc_enable_dev_setup_interrupts(dev); } /* * Calls gadget with disconnect event and resets the UDC and makes * initial bringup to be ready for ep0 events */ static void usb_disconnect(struct udc *dev) { dev_info(&dev->pdev->dev, "USB Disconnect\n"); dev->connected = 0; /* mask interrupts */ udc_mask_unused_interrupts(dev); /* REVISIT there doesn't seem to be a point to having this * talk to a tasklet ... do it directly, we already hold * the spinlock needed to process the disconnect. */ tasklet_schedule(&disconnect_tasklet); } /* Tasklet for disconnect to be outside of interrupt context */ static void udc_tasklet_disconnect(unsigned long par) { struct udc *dev = (struct udc *)(*((struct udc **) par)); u32 tmp; DBG(dev, "Tasklet disconnect\n"); spin_lock_irq(&dev->lock); if (dev->driver) { spin_unlock(&dev->lock); dev->driver->disconnect(&dev->gadget); spin_lock(&dev->lock); /* empty queues */ for (tmp = 0; tmp < UDC_EP_NUM; tmp++) empty_req_queue(&dev->ep[tmp]); } /* disable ep0 */ ep_init(dev->regs, &dev->ep[UDC_EP0IN_IX]); if (!soft_reset_occured) { /* init controller by soft reset */ udc_soft_reset(dev); soft_reset_occured++; } /* re-enable dev interrupts */ udc_enable_dev_setup_interrupts(dev); /* back to full speed ? */ if (use_fullspeed) { tmp = readl(&dev->regs->cfg); tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_FS, UDC_DEVCFG_SPD); writel(tmp, &dev->regs->cfg); } spin_unlock_irq(&dev->lock); } /* Reset the UDC core */ static void udc_soft_reset(struct udc *dev) { unsigned long flags; DBG(dev, "Soft reset\n"); /* * reset possible waiting interrupts, because int. * status is lost after soft reset, * ep int. status reset */ writel(UDC_EPINT_MSK_DISABLE_ALL, &dev->regs->ep_irqsts); /* device int. status reset */ writel(UDC_DEV_MSK_DISABLE, &dev->regs->irqsts); spin_lock_irqsave(&udc_irq_spinlock, flags); writel(AMD_BIT(UDC_DEVCFG_SOFTRESET), &dev->regs->cfg); readl(&dev->regs->cfg); spin_unlock_irqrestore(&udc_irq_spinlock, flags); } /* RDE timer callback to set RDE bit */ static void udc_timer_function(unsigned long v) { u32 tmp; spin_lock_irq(&udc_irq_spinlock); if (set_rde > 0) { /* * open the fifo if fifo was filled on last timer call * conditionally */ if (set_rde > 1) { /* set RDE to receive setup data */ tmp = readl(&udc->regs->ctl); tmp |= AMD_BIT(UDC_DEVCTL_RDE); writel(tmp, &udc->regs->ctl); set_rde = -1; } else if (readl(&udc->regs->sts) & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY)) { /* * if fifo empty setup polling, do not just * open the fifo */ udc_timer.expires = jiffies + HZ/UDC_RDE_TIMER_DIV; if (!stop_timer) add_timer(&udc_timer); } else { /* * fifo contains data now, setup timer for opening * the fifo when timer expires to be able to receive * setup packets, when data packets gets queued by * gadget layer then timer will forced to expire with * set_rde=0 (RDE is set in udc_queue()) */ set_rde++; /* debug: lhadmot_timer_start = 221070 */ udc_timer.expires = jiffies + HZ*UDC_RDE_TIMER_SECONDS; if (!stop_timer) add_timer(&udc_timer); } } else set_rde = -1; /* RDE was set by udc_queue() */ spin_unlock_irq(&udc_irq_spinlock); if (stop_timer) complete(&on_exit); } /* Handle halt state, used in stall poll timer */ static void udc_handle_halt_state(struct udc_ep *ep) { u32 tmp; /* set stall as long not halted */ if (ep->halted == 1) { tmp = readl(&ep->regs->ctl); /* STALL cleared ? */ if (!(tmp & AMD_BIT(UDC_EPCTL_S))) { /* * FIXME: MSC spec requires that stall remains * even on receivng of CLEAR_FEATURE HALT. So * we would set STALL again here to be compliant. * But with current mass storage drivers this does * not work (would produce endless host retries). * So we clear halt on CLEAR_FEATURE. * DBG(ep->dev, "ep %d: set STALL again\n", ep->num); tmp |= AMD_BIT(UDC_EPCTL_S); writel(tmp, &ep->regs->ctl);*/ /* clear NAK by writing CNAK */ tmp |= AMD_BIT(UDC_EPCTL_CNAK); writel(tmp, &ep->regs->ctl); ep->halted = 0; UDC_QUEUE_CNAK(ep, ep->num); } } } /* Stall timer callback to poll S bit and set it again after */ static void udc_pollstall_timer_function(unsigned long v) { struct udc_ep *ep; int halted = 0; spin_lock_irq(&udc_stall_spinlock); /* * only one IN and OUT endpoints are handled * IN poll stall */ ep = &udc->ep[UDC_EPIN_IX]; udc_handle_halt_state(ep); if (ep->halted) halted = 1; /* OUT poll stall */ ep = &udc->ep[UDC_EPOUT_IX]; udc_handle_halt_state(ep); if (ep->halted) halted = 1; /* setup timer again when still halted */ if (!stop_pollstall_timer && halted) { udc_pollstall_timer.expires = jiffies + HZ * UDC_POLLSTALL_TIMER_USECONDS / (1000 * 1000); add_timer(&udc_pollstall_timer); } spin_unlock_irq(&udc_stall_spinlock); if (stop_pollstall_timer) complete(&on_pollstall_exit); } /* Inits endpoint 0 so that SETUP packets are processed */ static void activate_control_endpoints(struct udc *dev) { u32 tmp; DBG(dev, "activate_control_endpoints\n"); /* flush fifo */ tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl); tmp |= AMD_BIT(UDC_EPCTL_F); writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl); /* set ep0 directions */ dev->ep[UDC_EP0IN_IX].in = 1; dev->ep[UDC_EP0OUT_IX].in = 0; /* set buffer size (tx fifo entries) of EP0_IN */ tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->bufin_framenum); if (dev->gadget.speed == USB_SPEED_FULL) tmp = AMD_ADDBITS(tmp, UDC_FS_EPIN0_BUFF_SIZE, UDC_EPIN_BUFF_SIZE); else if (dev->gadget.speed == USB_SPEED_HIGH) tmp = AMD_ADDBITS(tmp, UDC_EPIN0_BUFF_SIZE, UDC_EPIN_BUFF_SIZE); writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->bufin_framenum); /* set max packet size of EP0_IN */ tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->bufout_maxpkt); if (dev->gadget.speed == USB_SPEED_FULL) tmp = AMD_ADDBITS(tmp, UDC_FS_EP0IN_MAX_PKT_SIZE, UDC_EP_MAX_PKT_SIZE); else if (dev->gadget.speed == USB_SPEED_HIGH) tmp = AMD_ADDBITS(tmp, UDC_EP0IN_MAX_PKT_SIZE, UDC_EP_MAX_PKT_SIZE); writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->bufout_maxpkt); /* set max packet size of EP0_OUT */ tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->bufout_maxpkt); if (dev->gadget.speed == USB_SPEED_FULL) tmp = AMD_ADDBITS(tmp, UDC_FS_EP0OUT_MAX_PKT_SIZE, UDC_EP_MAX_PKT_SIZE); else if (dev->gadget.speed == USB_SPEED_HIGH) tmp = AMD_ADDBITS(tmp, UDC_EP0OUT_MAX_PKT_SIZE, UDC_EP_MAX_PKT_SIZE); writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->bufout_maxpkt); /* set max packet size of EP0 in UDC CSR */ tmp = readl(&dev->csr->ne[0]); if (dev->gadget.speed == USB_SPEED_FULL) tmp = AMD_ADDBITS(tmp, UDC_FS_EP0OUT_MAX_PKT_SIZE, UDC_CSR_NE_MAX_PKT); else if (dev->gadget.speed == USB_SPEED_HIGH) tmp = AMD_ADDBITS(tmp, UDC_EP0OUT_MAX_PKT_SIZE, UDC_CSR_NE_MAX_PKT); writel(tmp, &dev->csr->ne[0]); if (use_dma) { dev->ep[UDC_EP0OUT_IX].td->status |= AMD_BIT(UDC_DMA_OUT_STS_L); /* write dma desc address */ writel(dev->ep[UDC_EP0OUT_IX].td_stp_dma, &dev->ep[UDC_EP0OUT_IX].regs->subptr); writel(dev->ep[UDC_EP0OUT_IX].td_phys, &dev->ep[UDC_EP0OUT_IX].regs->desptr); /* stop RDE timer */ if (timer_pending(&udc_timer)) { set_rde = 0; mod_timer(&udc_timer, jiffies - 1); } /* stop pollstall timer */ if (timer_pending(&udc_pollstall_timer)) mod_timer(&udc_pollstall_timer, jiffies - 1); /* enable DMA */ tmp = readl(&dev->regs->ctl); tmp |= AMD_BIT(UDC_DEVCTL_MODE) | AMD_BIT(UDC_DEVCTL_RDE) | AMD_BIT(UDC_DEVCTL_TDE); if (use_dma_bufferfill_mode) tmp |= AMD_BIT(UDC_DEVCTL_BF); else if (use_dma_ppb_du) tmp |= AMD_BIT(UDC_DEVCTL_DU); writel(tmp, &dev->regs->ctl); } /* clear NAK by writing CNAK for EP0IN */ tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl); tmp |= AMD_BIT(UDC_EPCTL_CNAK); writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl); dev->ep[UDC_EP0IN_IX].naking = 0; UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX], UDC_EP0IN_IX); /* clear NAK by writing CNAK for EP0OUT */ tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl); tmp |= AMD_BIT(UDC_EPCTL_CNAK); writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->ctl); dev->ep[UDC_EP0OUT_IX].naking = 0; UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX], UDC_EP0OUT_IX); } /* Make endpoint 0 ready for control traffic */ static int setup_ep0(struct udc *dev) { activate_control_endpoints(dev); /* enable ep0 interrupts */ udc_enable_ep0_interrupts(dev); /* enable device setup interrupts */ udc_enable_dev_setup_interrupts(dev); return 0; } /* Called by gadget driver to register itself */ static int amd5536_start(struct usb_gadget_driver *driver, int (*bind)(struct usb_gadget *)) { struct udc *dev = udc; int retval; u32 tmp; if (!driver || !bind || !driver->setup || driver->max_speed < USB_SPEED_HIGH) return -EINVAL; if (!dev) return -ENODEV; if (dev->driver) return -EBUSY; driver->driver.bus = NULL; dev->driver = driver; dev->gadget.dev.driver = &driver->driver; retval = bind(&dev->gadget); /* Some gadget drivers use both ep0 directions. * NOTE: to gadget driver, ep0 is just one endpoint... */ dev->ep[UDC_EP0OUT_IX].ep.driver_data = dev->ep[UDC_EP0IN_IX].ep.driver_data; if (retval) { DBG(dev, "binding to %s returning %d\n", driver->driver.name, retval); dev->driver = NULL; dev->gadget.dev.driver = NULL; return retval; } /* get ready for ep0 traffic */ setup_ep0(dev); /* clear SD */ tmp = readl(&dev->regs->ctl); tmp = tmp & AMD_CLEAR_BIT(UDC_DEVCTL_SD); writel(tmp, &dev->regs->ctl); usb_connect(dev); return 0; } /* shutdown requests and disconnect from gadget */ static void shutdown(struct udc *dev, struct usb_gadget_driver *driver) __releases(dev->lock) __acquires(dev->lock) { int tmp; if (dev->gadget.speed != USB_SPEED_UNKNOWN) { spin_unlock(&dev->lock); driver->disconnect(&dev->gadget); spin_lock(&dev->lock); } /* empty queues and init hardware */ udc_basic_init(dev); for (tmp = 0; tmp < UDC_EP_NUM; tmp++) empty_req_queue(&dev->ep[tmp]); udc_setup_endpoints(dev); } /* Called by gadget driver to unregister itself */ static int amd5536_stop(struct usb_gadget_driver *driver) { struct udc *dev = udc; unsigned long flags; u32 tmp; if (!dev) return -ENODEV; if (!driver || driver != dev->driver || !driver->unbind) return -EINVAL; spin_lock_irqsave(&dev->lock, flags); udc_mask_unused_interrupts(dev); shutdown(dev, driver); spin_unlock_irqrestore(&dev->lock, flags); driver->unbind(&dev->gadget); dev->gadget.dev.driver = NULL; dev->driver = NULL; /* set SD */ tmp = readl(&dev->regs->ctl); tmp |= AMD_BIT(UDC_DEVCTL_SD); writel(tmp, &dev->regs->ctl); DBG(dev, "%s: unregistered\n", driver->driver.name); return 0; } /* Clear pending NAK bits */ static void udc_process_cnak_queue(struct udc *dev) { u32 tmp; u32 reg; /* check epin's */ DBG(dev, "CNAK pending queue processing\n"); for (tmp = 0; tmp < UDC_EPIN_NUM_USED; tmp++) { if (cnak_pending & (1 << tmp)) { DBG(dev, "CNAK pending for ep%d\n", tmp); /* clear NAK by writing CNAK */ reg = readl(&dev->ep[tmp].regs->ctl); reg |= AMD_BIT(UDC_EPCTL_CNAK); writel(reg, &dev->ep[tmp].regs->ctl); dev->ep[tmp].naking = 0; UDC_QUEUE_CNAK(&dev->ep[tmp], dev->ep[tmp].num); } } /* ... and ep0out */ if (cnak_pending & (1 << UDC_EP0OUT_IX)) { DBG(dev, "CNAK pending for ep%d\n", UDC_EP0OUT_IX); /* clear NAK by writing CNAK */ reg = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl); reg |= AMD_BIT(UDC_EPCTL_CNAK); writel(reg, &dev->ep[UDC_EP0OUT_IX].regs->ctl); dev->ep[UDC_EP0OUT_IX].naking = 0; UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX], dev->ep[UDC_EP0OUT_IX].num); } } /* Enabling RX DMA after setup packet */ static void udc_ep0_set_rde(struct udc *dev) { if (use_dma) { /* * only enable RXDMA when no data endpoint enabled * or data is queued */ if (!dev->data_ep_enabled || dev->data_ep_queued) { udc_set_rde(dev); } else { /* * setup timer for enabling RDE (to not enable * RXFIFO DMA for data endpoints to early) */ if (set_rde != 0 && !timer_pending(&udc_timer)) { udc_timer.expires = jiffies + HZ/UDC_RDE_TIMER_DIV; set_rde = 1; if (!stop_timer) add_timer(&udc_timer); } } } } /* Interrupt handler for data OUT traffic */ static irqreturn_t udc_data_out_isr(struct udc *dev, int ep_ix) { irqreturn_t ret_val = IRQ_NONE; u32 tmp; struct udc_ep *ep; struct udc_request *req; unsigned int count; struct udc_data_dma *td = NULL; unsigned dma_done; VDBG(dev, "ep%d irq\n", ep_ix); ep = &dev->ep[ep_ix]; tmp = readl(&ep->regs->sts); if (use_dma) { /* BNA event ? */ if (tmp & AMD_BIT(UDC_EPSTS_BNA)) { DBG(dev, "BNA ep%dout occurred - DESPTR = %x\n", ep->num, readl(&ep->regs->desptr)); /* clear BNA */ writel(tmp | AMD_BIT(UDC_EPSTS_BNA), &ep->regs->sts); if (!ep->cancel_transfer) ep->bna_occurred = 1; else ep->cancel_transfer = 0; ret_val = IRQ_HANDLED; goto finished; } } /* HE event ? */ if (tmp & AMD_BIT(UDC_EPSTS_HE)) { dev_err(&dev->pdev->dev, "HE ep%dout occurred\n", ep->num); /* clear HE */ writel(tmp | AMD_BIT(UDC_EPSTS_HE), &ep->regs->sts); ret_val = IRQ_HANDLED; goto finished; } if (!list_empty(&ep->queue)) { /* next request */ req = list_entry(ep->queue.next, struct udc_request, queue); } else { req = NULL; udc_rxfifo_pending = 1; } VDBG(dev, "req = %p\n", req); /* fifo mode */ if (!use_dma) { /* read fifo */ if (req && udc_rxfifo_read(ep, req)) { ret_val = IRQ_HANDLED; /* finish */ complete_req(ep, req, 0); /* next request */ if (!list_empty(&ep->queue) && !ep->halted) { req = list_entry(ep->queue.next, struct udc_request, queue); } else req = NULL; } /* DMA */ } else if (!ep->cancel_transfer && req != NULL) { ret_val = IRQ_HANDLED; /* check for DMA done */ if (!use_dma_ppb) { dma_done = AMD_GETBITS(req->td_data->status, UDC_DMA_OUT_STS_BS); /* packet per buffer mode - rx bytes */ } else { /* * if BNA occurred then recover desc. from * BNA dummy desc. */ if (ep->bna_occurred) { VDBG(dev, "Recover desc. from BNA dummy\n"); memcpy(req->td_data, ep->bna_dummy_req->td_data, sizeof(struct udc_data_dma)); ep->bna_occurred = 0; udc_init_bna_dummy(ep->req); } td = udc_get_last_dma_desc(req); dma_done = AMD_GETBITS(td->status, UDC_DMA_OUT_STS_BS); } if (dma_done == UDC_DMA_OUT_STS_BS_DMA_DONE) { /* buffer fill mode - rx bytes */ if (!use_dma_ppb) { /* received number bytes */ count = AMD_GETBITS(req->td_data->status, UDC_DMA_OUT_STS_RXBYTES); VDBG(dev, "rx bytes=%u\n", count); /* packet per buffer mode - rx bytes */ } else { VDBG(dev, "req->td_data=%p\n", req->td_data); VDBG(dev, "last desc = %p\n", td); /* received number bytes */ if (use_dma_ppb_du) { /* every desc. counts bytes */ count = udc_get_ppbdu_rxbytes(req); } else { /* last desc. counts bytes */ count = AMD_GETBITS(td->status, UDC_DMA_OUT_STS_RXBYTES); if (!count && req->req.length == UDC_DMA_MAXPACKET) { /* * on 64k packets the RXBYTES * field is zero */ count = UDC_DMA_MAXPACKET; } } VDBG(dev, "last desc rx bytes=%u\n", count); } tmp = req->req.length - req->req.actual; if (count > tmp) { if ((tmp % ep->ep.maxpacket) != 0) { DBG(dev, "%s: rx %db, space=%db\n", ep->ep.name, count, tmp); req->req.status = -EOVERFLOW; } count = tmp; } req->req.actual += count; req->dma_going = 0; /* complete request */ complete_req(ep, req, 0); /* next request */ if (!list_empty(&ep->queue) && !ep->halted) { req = list_entry(ep->queue.next, struct udc_request, queue); /* * DMA may be already started by udc_queue() * called by gadget drivers completion * routine. This happens when queue * holds one request only. */ if (req->dma_going == 0) { /* next dma */ if (prep_dma(ep, req, GFP_ATOMIC) != 0) goto finished; /* write desc pointer */ writel(req->td_phys, &ep->regs->desptr); req->dma_going = 1; /* enable DMA */ udc_set_rde(dev); } } else { /* * implant BNA dummy descriptor to allow * RXFIFO opening by RDE */ if (ep->bna_dummy_req) { /* write desc pointer */ writel(ep->bna_dummy_req->td_phys, &ep->regs->desptr); ep->bna_occurred = 0; } /* * schedule timer for setting RDE if queue * remains empty to allow ep0 packets pass * through */ if (set_rde != 0 && !timer_pending(&udc_timer)) { udc_timer.expires = jiffies + HZ*UDC_RDE_TIMER_SECONDS; set_rde = 1; if (!stop_timer) add_timer(&udc_timer); } if (ep->num != UDC_EP0OUT_IX) dev->data_ep_queued = 0; } } else { /* * RX DMA must be reenabled for each desc in PPBDU mode * and must be enabled for PPBNDU mode in case of BNA */ udc_set_rde(dev); } } else if (ep->cancel_transfer) { ret_val = IRQ_HANDLED; ep->cancel_transfer = 0; } /* check pending CNAKS */ if (cnak_pending) { /* CNAk processing when rxfifo empty only */ if (readl(&dev->regs->sts) & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY)) udc_process_cnak_queue(dev); } /* clear OUT bits in ep status */ writel(UDC_EPSTS_OUT_CLEAR, &ep->regs->sts); finished: return ret_val; } /* Interrupt handler for data IN traffic */ static irqreturn_t udc_data_in_isr(struct udc *dev, int ep_ix) { irqreturn_t ret_val = IRQ_NONE; u32 tmp; u32 epsts; struct udc_ep *ep; struct udc_request *req; struct udc_data_dma *td; unsigned dma_done; unsigned len; ep = &dev->ep[ep_ix]; epsts = readl(&ep->regs->sts); if (use_dma) { /* BNA ? */ if (epsts & AMD_BIT(UDC_EPSTS_BNA)) { dev_err(&dev->pdev->dev, "BNA ep%din occurred - DESPTR = %08lx\n", ep->num, (unsigned long) readl(&ep->regs->desptr)); /* clear BNA */ writel(epsts, &ep->regs->sts); ret_val = IRQ_HANDLED; goto finished; } } /* HE event ? */ if (epsts & AMD_BIT(UDC_EPSTS_HE)) { dev_err(&dev->pdev->dev, "HE ep%dn occurred - DESPTR = %08lx\n", ep->num, (unsigned long) readl(&ep->regs->desptr)); /* clear HE */ writel(epsts | AMD_BIT(UDC_EPSTS_HE), &ep->regs->sts); ret_val = IRQ_HANDLED; goto finished; } /* DMA completion */ if (epsts & AMD_BIT(UDC_EPSTS_TDC)) { VDBG(dev, "TDC set- completion\n"); ret_val = IRQ_HANDLED; if (!ep->cancel_transfer && !list_empty(&ep->queue)) { req = list_entry(ep->queue.next, struct udc_request, queue); /* * length bytes transferred * check dma done of last desc. in PPBDU mode */ if (use_dma_ppb_du) { td = udc_get_last_dma_desc(req); if (td) { dma_done = AMD_GETBITS(td->status, UDC_DMA_IN_STS_BS); /* don't care DMA done */ req->req.actual = req->req.length; } } else { /* assume all bytes transferred */ req->req.actual = req->req.length; } if (req->req.actual == req->req.length) { /* complete req */ complete_req(ep, req, 0); req->dma_going = 0; /* further request available ? */ if (list_empty(&ep->queue)) { /* disable interrupt */ tmp = readl(&dev->regs->ep_irqmsk); tmp |= AMD_BIT(ep->num); writel(tmp, &dev->regs->ep_irqmsk); } } } ep->cancel_transfer = 0; } /* * status reg has IN bit set and TDC not set (if TDC was handled, * IN must not be handled (UDC defect) ? */ if ((epsts & AMD_BIT(UDC_EPSTS_IN)) && !(epsts & AMD_BIT(UDC_EPSTS_TDC))) { ret_val = IRQ_HANDLED; if (!list_empty(&ep->queue)) { /* next request */ req = list_entry(ep->queue.next, struct udc_request, queue); /* FIFO mode */ if (!use_dma) { /* write fifo */ udc_txfifo_write(ep, &req->req); len = req->req.length - req->req.actual; if (len > ep->ep.maxpacket) len = ep->ep.maxpacket; req->req.actual += len; if (req->req.actual == req->req.length || (len != ep->ep.maxpacket)) { /* complete req */ complete_req(ep, req, 0); } /* DMA */ } else if (req && !req->dma_going) { VDBG(dev, "IN DMA : req=%p req->td_data=%p\n", req, req->td_data); if (req->td_data) { req->dma_going = 1; /* * unset L bit of first desc. * for chain */ if (use_dma_ppb && req->req.length > ep->ep.maxpacket) { req->td_data->status &= AMD_CLEAR_BIT( UDC_DMA_IN_STS_L); } /* write desc pointer */ writel(req->td_phys, &ep->regs->desptr); /* set HOST READY */ req->td_data->status = AMD_ADDBITS( req->td_data->status, UDC_DMA_IN_STS_BS_HOST_READY, UDC_DMA_IN_STS_BS); /* set poll demand bit */ tmp = readl(&ep->regs->ctl); tmp |= AMD_BIT(UDC_EPCTL_P); writel(tmp, &ep->regs->ctl); } } } else if (!use_dma && ep->in) { /* disable interrupt */ tmp = readl( &dev->regs->ep_irqmsk); tmp |= AMD_BIT(ep->num); writel(tmp, &dev->regs->ep_irqmsk); } } /* clear status bits */ writel(epsts, &ep->regs->sts); finished: return ret_val; } /* Interrupt handler for Control OUT traffic */ static irqreturn_t udc_control_out_isr(struct udc *dev) __releases(dev->lock) __acquires(dev->lock) { irqreturn_t ret_val = IRQ_NONE; u32 tmp; int setup_supported; u32 count; int set = 0; struct udc_ep *ep; struct udc_ep *ep_tmp; ep = &dev->ep[UDC_EP0OUT_IX]; /* clear irq */ writel(AMD_BIT(UDC_EPINT_OUT_EP0), &dev->regs->ep_irqsts); tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->sts); /* check BNA and clear if set */ if (tmp & AMD_BIT(UDC_EPSTS_BNA)) { VDBG(dev, "ep0: BNA set\n"); writel(AMD_BIT(UDC_EPSTS_BNA), &dev->ep[UDC_EP0OUT_IX].regs->sts); ep->bna_occurred = 1; ret_val = IRQ_HANDLED; goto finished; } /* type of data: SETUP or DATA 0 bytes */ tmp = AMD_GETBITS(tmp, UDC_EPSTS_OUT); VDBG(dev, "data_typ = %x\n", tmp); /* setup data */ if (tmp == UDC_EPSTS_OUT_SETUP) { ret_val = IRQ_HANDLED; ep->dev->stall_ep0in = 0; dev->waiting_zlp_ack_ep0in = 0; /* set NAK for EP0_IN */ tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl); tmp |= AMD_BIT(UDC_EPCTL_SNAK); writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl); dev->ep[UDC_EP0IN_IX].naking = 1; /* get setup data */ if (use_dma) { /* clear OUT bits in ep status */ writel(UDC_EPSTS_OUT_CLEAR, &dev->ep[UDC_EP0OUT_IX].regs->sts); setup_data.data[0] = dev->ep[UDC_EP0OUT_IX].td_stp->data12; setup_data.data[1] = dev->ep[UDC_EP0OUT_IX].td_stp->data34; /* set HOST READY */ dev->ep[UDC_EP0OUT_IX].td_stp->status = UDC_DMA_STP_STS_BS_HOST_READY; } else { /* read fifo */ udc_rxfifo_read_dwords(dev, setup_data.data, 2); } /* determine direction of control data */ if ((setup_data.request.bRequestType & USB_DIR_IN) != 0) { dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IX].ep; /* enable RDE */ udc_ep0_set_rde(dev); set = 0; } else { dev->gadget.ep0 = &dev->ep[UDC_EP0OUT_IX].ep; /* * implant BNA dummy descriptor to allow RXFIFO opening * by RDE */ if (ep->bna_dummy_req) { /* write desc pointer */ writel(ep->bna_dummy_req->td_phys, &dev->ep[UDC_EP0OUT_IX].regs->desptr); ep->bna_occurred = 0; } set = 1; dev->ep[UDC_EP0OUT_IX].naking = 1; /* * setup timer for enabling RDE (to not enable * RXFIFO DMA for data to early) */ set_rde = 1; if (!timer_pending(&udc_timer)) { udc_timer.expires = jiffies + HZ/UDC_RDE_TIMER_DIV; if (!stop_timer) add_timer(&udc_timer); } } /* * mass storage reset must be processed here because * next packet may be a CLEAR_FEATURE HALT which would not * clear the stall bit when no STALL handshake was received * before (autostall can cause this) */ if (setup_data.data[0] == UDC_MSCRES_DWORD0 && setup_data.data[1] == UDC_MSCRES_DWORD1) { DBG(dev, "MSC Reset\n"); /* * clear stall bits * only one IN and OUT endpoints are handled */ ep_tmp = &udc->ep[UDC_EPIN_IX]; udc_set_halt(&ep_tmp->ep, 0); ep_tmp = &udc->ep[UDC_EPOUT_IX]; udc_set_halt(&ep_tmp->ep, 0); } /* call gadget with setup data received */ spin_unlock(&dev->lock); setup_supported = dev->driver->setup(&dev->gadget, &setup_data.request); spin_lock(&dev->lock); tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl); /* ep0 in returns data (not zlp) on IN phase */ if (setup_supported >= 0 && setup_supported < UDC_EP0IN_MAXPACKET) { /* clear NAK by writing CNAK in EP0_IN */ tmp |= AMD_BIT(UDC_EPCTL_CNAK); writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl); dev->ep[UDC_EP0IN_IX].naking = 0; UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX], UDC_EP0IN_IX); /* if unsupported request then stall */ } else if (setup_supported < 0) { tmp |= AMD_BIT(UDC_EPCTL_S); writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl); } else dev->waiting_zlp_ack_ep0in = 1; /* clear NAK by writing CNAK in EP0_OUT */ if (!set) { tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl); tmp |= AMD_BIT(UDC_EPCTL_CNAK); writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->ctl); dev->ep[UDC_EP0OUT_IX].naking = 0; UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX], UDC_EP0OUT_IX); } if (!use_dma) { /* clear OUT bits in ep status */ writel(UDC_EPSTS_OUT_CLEAR, &dev->ep[UDC_EP0OUT_IX].regs->sts); } /* data packet 0 bytes */ } else if (tmp == UDC_EPSTS_OUT_DATA) { /* clear OUT bits in ep status */ writel(UDC_EPSTS_OUT_CLEAR, &dev->ep[UDC_EP0OUT_IX].regs->sts); /* get setup data: only 0 packet */ if (use_dma) { /* no req if 0 packet, just reactivate */ if (list_empty(&dev->ep[UDC_EP0OUT_IX].queue)) { VDBG(dev, "ZLP\n"); /* set HOST READY */ dev->ep[UDC_EP0OUT_IX].td->status = AMD_ADDBITS( dev->ep[UDC_EP0OUT_IX].td->status, UDC_DMA_OUT_STS_BS_HOST_READY, UDC_DMA_OUT_STS_BS); /* enable RDE */ udc_ep0_set_rde(dev); ret_val = IRQ_HANDLED; } else { /* control write */ ret_val |= udc_data_out_isr(dev, UDC_EP0OUT_IX); /* re-program desc. pointer for possible ZLPs */ writel(dev->ep[UDC_EP0OUT_IX].td_phys, &dev->ep[UDC_EP0OUT_IX].regs->desptr); /* enable RDE */ udc_ep0_set_rde(dev); } } else { /* received number bytes */ count = readl(&dev->ep[UDC_EP0OUT_IX].regs->sts); count = AMD_GETBITS(count, UDC_EPSTS_RX_PKT_SIZE); /* out data for fifo mode not working */ count = 0; /* 0 packet or real data ? */ if (count != 0) { ret_val |= udc_data_out_isr(dev, UDC_EP0OUT_IX); } else { /* dummy read confirm */ readl(&dev->ep[UDC_EP0OUT_IX].regs->confirm); ret_val = IRQ_HANDLED; } } } /* check pending CNAKS */ if (cnak_pending) { /* CNAk processing when rxfifo empty only */ if (readl(&dev->regs->sts) & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY)) udc_process_cnak_queue(dev); } finished: return ret_val; } /* Interrupt handler for Control IN traffic */ static irqreturn_t udc_control_in_isr(struct udc *dev) { irqreturn_t ret_val = IRQ_NONE; u32 tmp; struct udc_ep *ep; struct udc_request *req; unsigned len; ep = &dev->ep[UDC_EP0IN_IX]; /* clear irq */ writel(AMD_BIT(UDC_EPINT_IN_EP0), &dev->regs->ep_irqsts); tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->sts); /* DMA completion */ if (tmp & AMD_BIT(UDC_EPSTS_TDC)) { VDBG(dev, "isr: TDC clear\n"); ret_val = IRQ_HANDLED; /* clear TDC bit */ writel(AMD_BIT(UDC_EPSTS_TDC), &dev->ep[UDC_EP0IN_IX].regs->sts); /* status reg has IN bit set ? */ } else if (tmp & AMD_BIT(UDC_EPSTS_IN)) { ret_val = IRQ_HANDLED; if (ep->dma) { /* clear IN bit */ writel(AMD_BIT(UDC_EPSTS_IN), &dev->ep[UDC_EP0IN_IX].regs->sts); } if (dev->stall_ep0in) { DBG(dev, "stall ep0in\n"); /* halt ep0in */ tmp = readl(&ep->regs->ctl); tmp |= AMD_BIT(UDC_EPCTL_S); writel(tmp, &ep->regs->ctl); } else { if (!list_empty(&ep->queue)) { /* next request */ req = list_entry(ep->queue.next, struct udc_request, queue); if (ep->dma) { /* write desc pointer */ writel(req->td_phys, &ep->regs->desptr); /* set HOST READY */ req->td_data->status = AMD_ADDBITS( req->td_data->status, UDC_DMA_STP_STS_BS_HOST_READY, UDC_DMA_STP_STS_BS); /* set poll demand bit */ tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl); tmp |= AMD_BIT(UDC_EPCTL_P); writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl); /* all bytes will be transferred */ req->req.actual = req->req.length; /* complete req */ complete_req(ep, req, 0); } else { /* write fifo */ udc_txfifo_write(ep, &req->req); /* lengh bytes transferred */ len = req->req.length - req->req.actual; if (len > ep->ep.maxpacket) len = ep->ep.maxpacket; req->req.actual += len; if (req->req.actual == req->req.length || (len != ep->ep.maxpacket)) { /* complete req */ complete_req(ep, req, 0); } } } } ep->halted = 0; dev->stall_ep0in = 0; if (!ep->dma) { /* clear IN bit */ writel(AMD_BIT(UDC_EPSTS_IN), &dev->ep[UDC_EP0IN_IX].regs->sts); } } return ret_val; } /* Interrupt handler for global device events */ static irqreturn_t udc_dev_isr(struct udc *dev, u32 dev_irq) __releases(dev->lock) __acquires(dev->lock) { irqreturn_t ret_val = IRQ_NONE; u32 tmp; u32 cfg; struct udc_ep *ep; u16 i; u8 udc_csr_epix; /* SET_CONFIG irq ? */ if (dev_irq & AMD_BIT(UDC_DEVINT_SC)) { ret_val = IRQ_HANDLED; /* read config value */ tmp = readl(&dev->regs->sts); cfg = AMD_GETBITS(tmp, UDC_DEVSTS_CFG); DBG(dev, "SET_CONFIG interrupt: config=%d\n", cfg); dev->cur_config = cfg; dev->set_cfg_not_acked = 1; /* make usb request for gadget driver */ memset(&setup_data, 0 , sizeof(union udc_setup_data)); setup_data.request.bRequest = USB_REQ_SET_CONFIGURATION; setup_data.request.wValue = cpu_to_le16(dev->cur_config); /* programm the NE registers */ for (i = 0; i < UDC_EP_NUM; i++) { ep = &dev->ep[i]; if (ep->in) { /* ep ix in UDC CSR register space */ udc_csr_epix = ep->num; /* OUT ep */ } else { /* ep ix in UDC CSR register space */ udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS; } tmp = readl(&dev->csr->ne[udc_csr_epix]); /* ep cfg */ tmp = AMD_ADDBITS(tmp, ep->dev->cur_config, UDC_CSR_NE_CFG); /* write reg */ writel(tmp, &dev->csr->ne[udc_csr_epix]); /* clear stall bits */ ep->halted = 0; tmp = readl(&ep->regs->ctl); tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S); writel(tmp, &ep->regs->ctl); } /* call gadget zero with setup data received */ spin_unlock(&dev->lock); tmp = dev->driver->setup(&dev->gadget, &setup_data.request); spin_lock(&dev->lock); } /* SET_INTERFACE ? */ if (dev_irq & AMD_BIT(UDC_DEVINT_SI)) { ret_val = IRQ_HANDLED; dev->set_cfg_not_acked = 1; /* read interface and alt setting values */ tmp = readl(&dev->regs->sts); dev->cur_alt = AMD_GETBITS(tmp, UDC_DEVSTS_ALT); dev->cur_intf = AMD_GETBITS(tmp, UDC_DEVSTS_INTF); /* make usb request for gadget driver */ memset(&setup_data, 0 , sizeof(union udc_setup_data)); setup_data.request.bRequest = USB_REQ_SET_INTERFACE; setup_data.request.bRequestType = USB_RECIP_INTERFACE; setup_data.request.wValue = cpu_to_le16(dev->cur_alt); setup_data.request.wIndex = cpu_to_le16(dev->cur_intf); DBG(dev, "SET_INTERFACE interrupt: alt=%d intf=%d\n", dev->cur_alt, dev->cur_intf); /* programm the NE registers */ for (i = 0; i < UDC_EP_NUM; i++) { ep = &dev->ep[i]; if (ep->in) { /* ep ix in UDC CSR register space */ udc_csr_epix = ep->num; /* OUT ep */ } else { /* ep ix in UDC CSR register space */ udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS; } /* UDC CSR reg */ /* set ep values */ tmp = readl(&dev->csr->ne[udc_csr_epix]); /* ep interface */ tmp = AMD_ADDBITS(tmp, ep->dev->cur_intf, UDC_CSR_NE_INTF); /* tmp = AMD_ADDBITS(tmp, 2, UDC_CSR_NE_INTF); */ /* ep alt */ tmp = AMD_ADDBITS(tmp, ep->dev->cur_alt, UDC_CSR_NE_ALT); /* write reg */ writel(tmp, &dev->csr->ne[udc_csr_epix]); /* clear stall bits */ ep->halted = 0; tmp = readl(&ep->regs->ctl); tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S); writel(tmp, &ep->regs->ctl); } /* call gadget zero with setup data received */ spin_unlock(&dev->lock); tmp = dev->driver->setup(&dev->gadget, &setup_data.request); spin_lock(&dev->lock); } /* USB reset */ if (dev_irq & AMD_BIT(UDC_DEVINT_UR)) { DBG(dev, "USB Reset interrupt\n"); ret_val = IRQ_HANDLED; /* allow soft reset when suspend occurs */ soft_reset_occured = 0; dev->waiting_zlp_ack_ep0in = 0; dev->set_cfg_not_acked = 0; /* mask not needed interrupts */ udc_mask_unused_interrupts(dev); /* call gadget to resume and reset configs etc. */ spin_unlock(&dev->lock); if (dev->sys_suspended && dev->driver->resume) { dev->driver->resume(&dev->gadget); dev->sys_suspended = 0; } dev->driver->disconnect(&dev->gadget); spin_lock(&dev->lock); /* disable ep0 to empty req queue */ empty_req_queue(&dev->ep[UDC_EP0IN_IX]); ep_init(dev->regs, &dev->ep[UDC_EP0IN_IX]); /* soft reset when rxfifo not empty */ tmp = readl(&dev->regs->sts); if (!(tmp & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY)) && !soft_reset_after_usbreset_occured) { udc_soft_reset(dev); soft_reset_after_usbreset_occured++; } /* * DMA reset to kill potential old DMA hw hang, * POLL bit is already reset by ep_init() through * disconnect() */ DBG(dev, "DMA machine reset\n"); tmp = readl(&dev->regs->cfg); writel(tmp | AMD_BIT(UDC_DEVCFG_DMARST), &dev->regs->cfg); writel(tmp, &dev->regs->cfg); /* put into initial config */ udc_basic_init(dev); /* enable device setup interrupts */ udc_enable_dev_setup_interrupts(dev); /* enable suspend interrupt */ tmp = readl(&dev->regs->irqmsk); tmp &= AMD_UNMASK_BIT(UDC_DEVINT_US); writel(tmp, &dev->regs->irqmsk); } /* USB suspend */ if (dev_irq & AMD_BIT(UDC_DEVINT_US)) { DBG(dev, "USB Suspend interrupt\n"); ret_val = IRQ_HANDLED; if (dev->driver->suspend) { spin_unlock(&dev->lock); dev->sys_suspended = 1; dev->driver->suspend(&dev->gadget); spin_lock(&dev->lock); } } /* new speed ? */ if (dev_irq & AMD_BIT(UDC_DEVINT_ENUM)) { DBG(dev, "ENUM interrupt\n"); ret_val = IRQ_HANDLED; soft_reset_after_usbreset_occured = 0; /* disable ep0 to empty req queue */ empty_req_queue(&dev->ep[UDC_EP0IN_IX]); ep_init(dev->regs, &dev->ep[UDC_EP0IN_IX]); /* link up all endpoints */ udc_setup_endpoints(dev); dev_info(&dev->pdev->dev, "Connect: %s\n", usb_speed_string(dev->gadget.speed)); /* init ep 0 */ activate_control_endpoints(dev); /* enable ep0 interrupts */ udc_enable_ep0_interrupts(dev); } /* session valid change interrupt */ if (dev_irq & AMD_BIT(UDC_DEVINT_SVC)) { DBG(dev, "USB SVC interrupt\n"); ret_val = IRQ_HANDLED; /* check that session is not valid to detect disconnect */ tmp = readl(&dev->regs->sts); if (!(tmp & AMD_BIT(UDC_DEVSTS_SESSVLD))) { /* disable suspend interrupt */ tmp = readl(&dev->regs->irqmsk); tmp |= AMD_BIT(UDC_DEVINT_US); writel(tmp, &dev->regs->irqmsk); DBG(dev, "USB Disconnect (session valid low)\n"); /* cleanup on disconnect */ usb_disconnect(udc); } } return ret_val; } /* Interrupt Service Routine, see Linux Kernel Doc for parameters */ static irqreturn_t udc_irq(int irq, void *pdev) { struct udc *dev = pdev; u32 reg; u16 i; u32 ep_irq; irqreturn_t ret_val = IRQ_NONE; spin_lock(&dev->lock); /* check for ep irq */ reg = readl(&dev->regs->ep_irqsts); if (reg) { if (reg & AMD_BIT(UDC_EPINT_OUT_EP0)) ret_val |= udc_control_out_isr(dev); if (reg & AMD_BIT(UDC_EPINT_IN_EP0)) ret_val |= udc_control_in_isr(dev); /* * data endpoint * iterate ep's */ for (i = 1; i < UDC_EP_NUM; i++) { ep_irq = 1 << i; if (!(reg & ep_irq) || i == UDC_EPINT_OUT_EP0) continue; /* clear irq status */ writel(ep_irq, &dev->regs->ep_irqsts); /* irq for out ep ? */ if (i > UDC_EPIN_NUM) ret_val |= udc_data_out_isr(dev, i); else ret_val |= udc_data_in_isr(dev, i); } } /* check for dev irq */ reg = readl(&dev->regs->irqsts); if (reg) { /* clear irq */ writel(reg, &dev->regs->irqsts); ret_val |= udc_dev_isr(dev, reg); } spin_unlock(&dev->lock); return ret_val; } /* Tears down device */ static void gadget_release(struct device *pdev) { struct amd5536udc *dev = dev_get_drvdata(pdev); kfree(dev); } /* Cleanup on device remove */ static void udc_remove(struct udc *dev) { /* remove timer */ stop_timer++; if (timer_pending(&udc_timer)) wait_for_completion(&on_exit); if (udc_timer.data) del_timer_sync(&udc_timer); /* remove pollstall timer */ stop_pollstall_timer++; if (timer_pending(&udc_pollstall_timer)) wait_for_completion(&on_pollstall_exit); if (udc_pollstall_timer.data) del_timer_sync(&udc_pollstall_timer); udc = NULL; } /* Reset all pci context */ static void udc_pci_remove(struct pci_dev *pdev) { struct udc *dev; dev = pci_get_drvdata(pdev); usb_del_gadget_udc(&udc->gadget); /* gadget driver must not be registered */ BUG_ON(dev->driver != NULL); /* dma pool cleanup */ if (dev->data_requests) pci_pool_destroy(dev->data_requests); if (dev->stp_requests) { /* cleanup DMA desc's for ep0in */ pci_pool_free(dev->stp_requests, dev->ep[UDC_EP0OUT_IX].td_stp, dev->ep[UDC_EP0OUT_IX].td_stp_dma); pci_pool_free(dev->stp_requests, dev->ep[UDC_EP0OUT_IX].td, dev->ep[UDC_EP0OUT_IX].td_phys); pci_pool_destroy(dev->stp_requests); } /* reset controller */ writel(AMD_BIT(UDC_DEVCFG_SOFTRESET), &dev->regs->cfg); if (dev->irq_registered) free_irq(pdev->irq, dev); if (dev->regs) iounmap(dev->regs); if (dev->mem_region) release_mem_region(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); if (dev->active) pci_disable_device(pdev); device_unregister(&dev->gadget.dev); pci_set_drvdata(pdev, NULL); udc_remove(dev); } /* create dma pools on init */ static int init_dma_pools(struct udc *dev) { struct udc_stp_dma *td_stp; struct udc_data_dma *td_data; int retval; /* consistent DMA mode setting ? */ if (use_dma_ppb) { use_dma_bufferfill_mode = 0; } else { use_dma_ppb_du = 0; use_dma_bufferfill_mode = 1; } /* DMA setup */ dev->data_requests = dma_pool_create("data_requests", NULL, sizeof(struct udc_data_dma), 0, 0); if (!dev->data_requests) { DBG(dev, "can't get request data pool\n"); retval = -ENOMEM; goto finished; } /* EP0 in dma regs = dev control regs */ dev->ep[UDC_EP0IN_IX].dma = &dev->regs->ctl; /* dma desc for setup data */ dev->stp_requests = dma_pool_create("setup requests", NULL, sizeof(struct udc_stp_dma), 0, 0); if (!dev->stp_requests) { DBG(dev, "can't get stp request pool\n"); retval = -ENOMEM; goto finished; } /* setup */ td_stp = dma_pool_alloc(dev->stp_requests, GFP_KERNEL, &dev->ep[UDC_EP0OUT_IX].td_stp_dma); if (td_stp == NULL) { retval = -ENOMEM; goto finished; } dev->ep[UDC_EP0OUT_IX].td_stp = td_stp; /* data: 0 packets !? */ td_data = dma_pool_alloc(dev->stp_requests, GFP_KERNEL, &dev->ep[UDC_EP0OUT_IX].td_phys); if (td_data == NULL) { retval = -ENOMEM; goto finished; } dev->ep[UDC_EP0OUT_IX].td = td_data; return 0; finished: return retval; } /* Called by pci bus driver to init pci context */ static int udc_pci_probe( struct pci_dev *pdev, const struct pci_device_id *id ) { struct udc *dev; unsigned long resource; unsigned long len; int retval = 0; /* one udc only */ if (udc) { dev_dbg(&pdev->dev, "already probed\n"); return -EBUSY; } /* init */ dev = kzalloc(sizeof(struct udc), GFP_KERNEL); if (!dev) { retval = -ENOMEM; goto finished; } /* pci setup */ if (pci_enable_device(pdev) < 0) { kfree(dev); dev = NULL; retval = -ENODEV; goto finished; } dev->active = 1; /* PCI resource allocation */ resource = pci_resource_start(pdev, 0); len = pci_resource_len(pdev, 0); if (!request_mem_region(resource, len, name)) { dev_dbg(&pdev->dev, "pci device used already\n"); kfree(dev); dev = NULL; retval = -EBUSY; goto finished; } dev->mem_region = 1; dev->virt_addr = ioremap_nocache(resource, len); if (dev->virt_addr == NULL) { dev_dbg(&pdev->dev, "start address cannot be mapped\n"); kfree(dev); dev = NULL; retval = -EFAULT; goto finished; } if (!pdev->irq) { dev_err(&dev->pdev->dev, "irq not set\n"); kfree(dev); dev = NULL; retval = -ENODEV; goto finished; } spin_lock_init(&dev->lock); /* udc csr registers base */ dev->csr = dev->virt_addr + UDC_CSR_ADDR; /* dev registers base */ dev->regs = dev->virt_addr + UDC_DEVCFG_ADDR; /* ep registers base */ dev->ep_regs = dev->virt_addr + UDC_EPREGS_ADDR; /* fifo's base */ dev->rxfifo = (u32 __iomem *)(dev->virt_addr + UDC_RXFIFO_ADDR); dev->txfifo = (u32 __iomem *)(dev->virt_addr + UDC_TXFIFO_ADDR); if (request_irq(pdev->irq, udc_irq, IRQF_SHARED, name, dev) != 0) { dev_dbg(&dev->pdev->dev, "request_irq(%d) fail\n", pdev->irq); kfree(dev); dev = NULL; retval = -EBUSY; goto finished; } dev->irq_registered = 1; pci_set_drvdata(pdev, dev); /* chip revision for Hs AMD5536 */ dev->chiprev = pdev->revision; pci_set_master(pdev); pci_try_set_mwi(pdev); /* init dma pools */ if (use_dma) { retval = init_dma_pools(dev); if (retval != 0) goto finished; } dev->phys_addr = resource; dev->irq = pdev->irq; dev->pdev = pdev; dev->gadget.dev.parent = &pdev->dev; dev->gadget.dev.dma_mask = pdev->dev.dma_mask; /* general probing */ if (udc_probe(dev) == 0) return 0; finished: if (dev) udc_pci_remove(pdev); return retval; } /* general probe */ static int udc_probe(struct udc *dev) { char tmp[128]; u32 reg; int retval; /* mark timer as not initialized */ udc_timer.data = 0; udc_pollstall_timer.data = 0; /* device struct setup */ dev->gadget.ops = &udc_ops; dev_set_name(&dev->gadget.dev, "gadget"); dev->gadget.dev.release = gadget_release; dev->gadget.name = name; dev->gadget.max_speed = USB_SPEED_HIGH; /* init registers, interrupts, ... */ startup_registers(dev); dev_info(&dev->pdev->dev, "%s\n", mod_desc); snprintf(tmp, sizeof tmp, "%d", dev->irq); dev_info(&dev->pdev->dev, "irq %s, pci mem %08lx, chip rev %02x(Geode5536 %s)\n", tmp, dev->phys_addr, dev->chiprev, (dev->chiprev == UDC_HSA0_REV) ? "A0" : "B1"); strcpy(tmp, UDC_DRIVER_VERSION_STRING); if (dev->chiprev == UDC_HSA0_REV) { dev_err(&dev->pdev->dev, "chip revision is A0; too old\n"); retval = -ENODEV; goto finished; } dev_info(&dev->pdev->dev, "driver version: %s(for Geode5536 B1)\n", tmp); udc = dev; retval = usb_add_gadget_udc(&udc->pdev->dev, &dev->gadget); if (retval) goto finished; retval = device_register(&dev->gadget.dev); if (retval) { usb_del_gadget_udc(&dev->gadget); put_device(&dev->gadget.dev); goto finished; } /* timer init */ init_timer(&udc_timer); udc_timer.function = udc_timer_function; udc_timer.data = 1; /* timer pollstall init */ init_timer(&udc_pollstall_timer); udc_pollstall_timer.function = udc_pollstall_timer_function; udc_pollstall_timer.data = 1; /* set SD */ reg = readl(&dev->regs->ctl); reg |= AMD_BIT(UDC_DEVCTL_SD); writel(reg, &dev->regs->ctl); /* print dev register info */ print_regs(dev); return 0; finished: return retval; } /* Initiates a remote wakeup */ static int udc_remote_wakeup(struct udc *dev) { unsigned long flags; u32 tmp; DBG(dev, "UDC initiates remote wakeup\n"); spin_lock_irqsave(&dev->lock, flags); tmp = readl(&dev->regs->ctl); tmp |= AMD_BIT(UDC_DEVCTL_RES); writel(tmp, &dev->regs->ctl); tmp &= AMD_CLEAR_BIT(UDC_DEVCTL_RES); writel(tmp, &dev->regs->ctl); spin_unlock_irqrestore(&dev->lock, flags); return 0; } /* PCI device parameters */ static DEFINE_PCI_DEVICE_TABLE(pci_id) = { { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x2096), .class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe, .class_mask = 0xffffffff, }, {}, }; MODULE_DEVICE_TABLE(pci, pci_id); /* PCI functions */ static struct pci_driver udc_pci_driver = { .name = (char *) name, .id_table = pci_id, .probe = udc_pci_probe, .remove = udc_pci_remove, }; /* Inits driver */ static int __init init(void) { return pci_register_driver(&udc_pci_driver); } module_init(init); /* Cleans driver */ static void __exit cleanup(void) { pci_unregister_driver(&udc_pci_driver); } module_exit(cleanup); MODULE_DESCRIPTION(UDC_MOD_DESCRIPTION); MODULE_AUTHOR("Thomas Dahlmann"); MODULE_LICENSE("GPL");
gpl-2.0
RenderBroken/msm8974_OPO-CAF_render_kernel
arch/arm/mach-omap2/common-board-devices.c
4760
3682
/* * common-board-devices.c * * Copyright (C) 2011 CompuLab, Ltd. * Author: Mike Rapoport <mike@compulab.co.il> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA * */ #include <linux/gpio.h> #include <linux/spi/spi.h> #include <linux/spi/ads7846.h> #include <plat/mcspi.h> #include <plat/nand.h> #include "common-board-devices.h" #if defined(CONFIG_TOUCHSCREEN_ADS7846) || \ defined(CONFIG_TOUCHSCREEN_ADS7846_MODULE) static struct omap2_mcspi_device_config ads7846_mcspi_config = { .turbo_mode = 0, }; static struct ads7846_platform_data ads7846_config = { .x_max = 0x0fff, .y_max = 0x0fff, .x_plate_ohms = 180, .pressure_max = 255, .debounce_max = 10, .debounce_tol = 3, .debounce_rep = 1, .gpio_pendown = -EINVAL, .keep_vref_on = 1, }; static struct spi_board_info ads7846_spi_board_info __initdata = { .modalias = "ads7846", .bus_num = -EINVAL, .chip_select = 0, .max_speed_hz = 1500000, .controller_data = &ads7846_mcspi_config, .irq = -EINVAL, .platform_data = &ads7846_config, }; void __init omap_ads7846_init(int bus_num, int gpio_pendown, int gpio_debounce, struct ads7846_platform_data *board_pdata) { struct spi_board_info *spi_bi = &ads7846_spi_board_info; int err; if (board_pdata && board_pdata->get_pendown_state) { err = gpio_request_one(gpio_pendown, GPIOF_IN, "TSPenDown"); if (err) { pr_err("Couldn't obtain gpio for TSPenDown: %d\n", err); return; } gpio_export(gpio_pendown, 0); if (gpio_debounce) gpio_set_debounce(gpio_pendown, gpio_debounce); } spi_bi->bus_num = bus_num; spi_bi->irq = gpio_to_irq(gpio_pendown); if (board_pdata) { board_pdata->gpio_pendown = gpio_pendown; spi_bi->platform_data = board_pdata; } else { ads7846_config.gpio_pendown = gpio_pendown; } spi_register_board_info(&ads7846_spi_board_info, 1); } #else void __init omap_ads7846_init(int bus_num, int gpio_pendown, int gpio_debounce, struct ads7846_platform_data *board_pdata) { } #endif #if defined(CONFIG_MTD_NAND_OMAP2) || defined(CONFIG_MTD_NAND_OMAP2_MODULE) static struct omap_nand_platform_data nand_data; void __init omap_nand_flash_init(int options, struct mtd_partition *parts, int nr_parts) { u8 cs = 0; u8 nandcs = GPMC_CS_NUM + 1; /* find out the chip-select on which NAND exists */ while (cs < GPMC_CS_NUM) { u32 ret = 0; ret = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG1); if ((ret & 0xC00) == 0x800) { printk(KERN_INFO "Found NAND on CS%d\n", cs); if (nandcs > GPMC_CS_NUM) nandcs = cs; } cs++; } if (nandcs > GPMC_CS_NUM) { printk(KERN_INFO "NAND: Unable to find configuration " "in GPMC\n "); return; } if (nandcs < GPMC_CS_NUM) { nand_data.cs = nandcs; nand_data.parts = parts; nand_data.nr_parts = nr_parts; nand_data.devsize = options; printk(KERN_INFO "Registering NAND on CS%d\n", nandcs); if (gpmc_nand_init(&nand_data) < 0) printk(KERN_ERR "Unable to register NAND device\n"); } } #else void __init omap_nand_flash_init(int options, struct mtd_partition *parts, int nr_parts) { } #endif
gpl-2.0
davidmueller13/L900_3.9_Experiment
drivers/rtc/rtc-ds3232.c
153
12157
/* * RTC client/driver for the Maxim/Dallas DS3232 Real-Time Clock over I2C * * Copyright (C) 2009-2011 Freescale Semiconductor. * Author: Jack Lan <jack.lan@freescale.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ /* * It would be more efficient to use i2c msgs/i2c_transfer directly but, as * recommened in .../Documentation/i2c/writing-clients section * "Sending and receiving", using SMBus level communication is preferred. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/i2c.h> #include <linux/rtc.h> #include <linux/bcd.h> #include <linux/workqueue.h> #include <linux/slab.h> #define DS3232_REG_SECONDS 0x00 #define DS3232_REG_MINUTES 0x01 #define DS3232_REG_HOURS 0x02 #define DS3232_REG_AMPM 0x02 #define DS3232_REG_DAY 0x03 #define DS3232_REG_DATE 0x04 #define DS3232_REG_MONTH 0x05 #define DS3232_REG_CENTURY 0x05 #define DS3232_REG_YEAR 0x06 #define DS3232_REG_ALARM1 0x07 /* Alarm 1 BASE */ #define DS3232_REG_ALARM2 0x0B /* Alarm 2 BASE */ #define DS3232_REG_CR 0x0E /* Control register */ # define DS3232_REG_CR_nEOSC 0x80 # define DS3232_REG_CR_INTCN 0x04 # define DS3232_REG_CR_A2IE 0x02 # define DS3232_REG_CR_A1IE 0x01 #define DS3232_REG_SR 0x0F /* control/status register */ # define DS3232_REG_SR_OSF 0x80 # define DS3232_REG_SR_BSY 0x04 # define DS3232_REG_SR_A2F 0x02 # define DS3232_REG_SR_A1F 0x01 struct ds3232 { struct i2c_client *client; struct rtc_device *rtc; struct work_struct work; /* The mutex protects alarm operations, and prevents a race * between the enable_irq() in the workqueue and the free_irq() * in the remove function. */ struct mutex mutex; int exiting; }; static struct i2c_driver ds3232_driver; static int ds3232_check_rtc_status(struct i2c_client *client) { int ret = 0; int control, stat; stat = i2c_smbus_read_byte_data(client, DS3232_REG_SR); if (stat < 0) return stat; if (stat & DS3232_REG_SR_OSF) dev_warn(&client->dev, "oscillator discontinuity flagged, " "time unreliable\n"); stat &= ~(DS3232_REG_SR_OSF | DS3232_REG_SR_A1F | DS3232_REG_SR_A2F); ret = i2c_smbus_write_byte_data(client, DS3232_REG_SR, stat); if (ret < 0) return ret; /* If the alarm is pending, clear it before requesting * the interrupt, so an interrupt event isn't reported * before everything is initialized. */ control = i2c_smbus_read_byte_data(client, DS3232_REG_CR); if (control < 0) return control; control &= ~(DS3232_REG_CR_A1IE | DS3232_REG_CR_A2IE); control |= DS3232_REG_CR_INTCN; return i2c_smbus_write_byte_data(client, DS3232_REG_CR, control); } static int ds3232_read_time(struct device *dev, struct rtc_time *time) { struct i2c_client *client = to_i2c_client(dev); int ret; u8 buf[7]; unsigned int year, month, day, hour, minute, second; unsigned int week, twelve_hr, am_pm; unsigned int century, add_century = 0; ret = i2c_smbus_read_i2c_block_data(client, DS3232_REG_SECONDS, 7, buf); if (ret < 0) return ret; if (ret < 7) return -EIO; second = buf[0]; minute = buf[1]; hour = buf[2]; week = buf[3]; day = buf[4]; month = buf[5]; year = buf[6]; /* Extract additional information for AM/PM and century */ twelve_hr = hour & 0x40; am_pm = hour & 0x20; century = month & 0x80; /* Write to rtc_time structure */ time->tm_sec = bcd2bin(second); time->tm_min = bcd2bin(minute); if (twelve_hr) { /* Convert to 24 hr */ if (am_pm) time->tm_hour = bcd2bin(hour & 0x1F) + 12; else time->tm_hour = bcd2bin(hour & 0x1F); } else { time->tm_hour = bcd2bin(hour); } /* Day of the week in linux range is 0~6 while 1~7 in RTC chip */ time->tm_wday = bcd2bin(week) - 1; time->tm_mday = bcd2bin(day); /* linux tm_mon range:0~11, while month range is 1~12 in RTC chip */ time->tm_mon = bcd2bin(month & 0x7F) - 1; if (century) add_century = 100; time->tm_year = bcd2bin(year) + add_century; return rtc_valid_tm(time); } static int ds3232_set_time(struct device *dev, struct rtc_time *time) { struct i2c_client *client = to_i2c_client(dev); u8 buf[7]; /* Extract time from rtc_time and load into ds3232*/ buf[0] = bin2bcd(time->tm_sec); buf[1] = bin2bcd(time->tm_min); buf[2] = bin2bcd(time->tm_hour); /* Day of the week in linux range is 0~6 while 1~7 in RTC chip */ buf[3] = bin2bcd(time->tm_wday + 1); buf[4] = bin2bcd(time->tm_mday); /* Date */ /* linux tm_mon range:0~11, while month range is 1~12 in RTC chip */ buf[5] = bin2bcd(time->tm_mon + 1); if (time->tm_year >= 100) { buf[5] |= 0x80; buf[6] = bin2bcd(time->tm_year - 100); } else { buf[6] = bin2bcd(time->tm_year); } return i2c_smbus_write_i2c_block_data(client, DS3232_REG_SECONDS, 7, buf); } /* * DS3232 has two alarm, we only use alarm1 * According to linux specification, only support one-shot alarm * no periodic alarm mode */ static int ds3232_read_alarm(struct device *dev, struct rtc_wkalrm *alarm) { struct i2c_client *client = to_i2c_client(dev); struct ds3232 *ds3232 = i2c_get_clientdata(client); int control, stat; int ret; u8 buf[4]; mutex_lock(&ds3232->mutex); ret = i2c_smbus_read_byte_data(client, DS3232_REG_SR); if (ret < 0) goto out; stat = ret; ret = i2c_smbus_read_byte_data(client, DS3232_REG_CR); if (ret < 0) goto out; control = ret; ret = i2c_smbus_read_i2c_block_data(client, DS3232_REG_ALARM1, 4, buf); if (ret < 0) goto out; alarm->time.tm_sec = bcd2bin(buf[0] & 0x7F); alarm->time.tm_min = bcd2bin(buf[1] & 0x7F); alarm->time.tm_hour = bcd2bin(buf[2] & 0x7F); alarm->time.tm_mday = bcd2bin(buf[3] & 0x7F); alarm->time.tm_mon = -1; alarm->time.tm_year = -1; alarm->time.tm_wday = -1; alarm->time.tm_yday = -1; alarm->time.tm_isdst = -1; alarm->enabled = !!(control & DS3232_REG_CR_A1IE); alarm->pending = !!(stat & DS3232_REG_SR_A1F); ret = 0; out: mutex_unlock(&ds3232->mutex); return ret; } /* * linux rtc-module does not support wday alarm * and only 24h time mode supported indeed */ static int ds3232_set_alarm(struct device *dev, struct rtc_wkalrm *alarm) { struct i2c_client *client = to_i2c_client(dev); struct ds3232 *ds3232 = i2c_get_clientdata(client); int control, stat; int ret; u8 buf[4]; if (client->irq <= 0) return -EINVAL; mutex_lock(&ds3232->mutex); buf[0] = bin2bcd(alarm->time.tm_sec); buf[1] = bin2bcd(alarm->time.tm_min); buf[2] = bin2bcd(alarm->time.tm_hour); buf[3] = bin2bcd(alarm->time.tm_mday); /* clear alarm interrupt enable bit */ ret = i2c_smbus_read_byte_data(client, DS3232_REG_CR); if (ret < 0) goto out; control = ret; control &= ~(DS3232_REG_CR_A1IE | DS3232_REG_CR_A2IE); ret = i2c_smbus_write_byte_data(client, DS3232_REG_CR, control); if (ret < 0) goto out; /* clear any pending alarm flag */ ret = i2c_smbus_read_byte_data(client, DS3232_REG_SR); if (ret < 0) goto out; stat = ret; stat &= ~(DS3232_REG_SR_A1F | DS3232_REG_SR_A2F); ret = i2c_smbus_write_byte_data(client, DS3232_REG_SR, stat); if (ret < 0) goto out; ret = i2c_smbus_write_i2c_block_data(client, DS3232_REG_ALARM1, 4, buf); if (alarm->enabled) { control |= DS3232_REG_CR_A1IE; ret = i2c_smbus_write_byte_data(client, DS3232_REG_CR, control); } out: mutex_unlock(&ds3232->mutex); return ret; } static void ds3232_update_alarm(struct i2c_client *client) { struct ds3232 *ds3232 = i2c_get_clientdata(client); int control; int ret; u8 buf[4]; mutex_lock(&ds3232->mutex); ret = i2c_smbus_read_i2c_block_data(client, DS3232_REG_ALARM1, 4, buf); if (ret < 0) goto unlock; buf[0] = bcd2bin(buf[0]) < 0 || (ds3232->rtc->irq_data & RTC_UF) ? 0x80 : buf[0]; buf[1] = bcd2bin(buf[1]) < 0 || (ds3232->rtc->irq_data & RTC_UF) ? 0x80 : buf[1]; buf[2] = bcd2bin(buf[2]) < 0 || (ds3232->rtc->irq_data & RTC_UF) ? 0x80 : buf[2]; buf[3] = bcd2bin(buf[3]) < 0 || (ds3232->rtc->irq_data & RTC_UF) ? 0x80 : buf[3]; ret = i2c_smbus_write_i2c_block_data(client, DS3232_REG_ALARM1, 4, buf); if (ret < 0) goto unlock; control = i2c_smbus_read_byte_data(client, DS3232_REG_CR); if (control < 0) goto unlock; if (ds3232->rtc->irq_data & (RTC_AF | RTC_UF)) /* enable alarm1 interrupt */ control |= DS3232_REG_CR_A1IE; else /* disable alarm1 interrupt */ control &= ~(DS3232_REG_CR_A1IE); i2c_smbus_write_byte_data(client, DS3232_REG_CR, control); unlock: mutex_unlock(&ds3232->mutex); } static int ds3232_alarm_irq_enable(struct device *dev, unsigned int enabled) { struct i2c_client *client = to_i2c_client(dev); struct ds3232 *ds3232 = i2c_get_clientdata(client); if (client->irq <= 0) return -EINVAL; if (enabled) ds3232->rtc->irq_data |= RTC_AF; else ds3232->rtc->irq_data &= ~RTC_AF; ds3232_update_alarm(client); return 0; } static irqreturn_t ds3232_irq(int irq, void *dev_id) { struct i2c_client *client = dev_id; struct ds3232 *ds3232 = i2c_get_clientdata(client); disable_irq_nosync(irq); schedule_work(&ds3232->work); return IRQ_HANDLED; } static void ds3232_work(struct work_struct *work) { struct ds3232 *ds3232 = container_of(work, struct ds3232, work); struct i2c_client *client = ds3232->client; int stat, control; mutex_lock(&ds3232->mutex); stat = i2c_smbus_read_byte_data(client, DS3232_REG_SR); if (stat < 0) goto unlock; if (stat & DS3232_REG_SR_A1F) { control = i2c_smbus_read_byte_data(client, DS3232_REG_CR); if (control < 0) goto out; /* disable alarm1 interrupt */ control &= ~(DS3232_REG_CR_A1IE); i2c_smbus_write_byte_data(client, DS3232_REG_CR, control); /* clear the alarm pend flag */ stat &= ~DS3232_REG_SR_A1F; i2c_smbus_write_byte_data(client, DS3232_REG_SR, stat); rtc_update_irq(ds3232->rtc, 1, RTC_AF | RTC_IRQF); } out: if (!ds3232->exiting) enable_irq(client->irq); unlock: mutex_unlock(&ds3232->mutex); } static const struct rtc_class_ops ds3232_rtc_ops = { .read_time = ds3232_read_time, .set_time = ds3232_set_time, .read_alarm = ds3232_read_alarm, .set_alarm = ds3232_set_alarm, .alarm_irq_enable = ds3232_alarm_irq_enable, }; static int ds3232_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct ds3232 *ds3232; int ret; ds3232 = kzalloc(sizeof(struct ds3232), GFP_KERNEL); if (!ds3232) return -ENOMEM; ds3232->client = client; i2c_set_clientdata(client, ds3232); INIT_WORK(&ds3232->work, ds3232_work); mutex_init(&ds3232->mutex); ret = ds3232_check_rtc_status(client); if (ret) goto out_free; ds3232->rtc = rtc_device_register(client->name, &client->dev, &ds3232_rtc_ops, THIS_MODULE); if (IS_ERR(ds3232->rtc)) { ret = PTR_ERR(ds3232->rtc); dev_err(&client->dev, "unable to register the class device\n"); goto out_irq; } if (client->irq >= 0) { ret = request_irq(client->irq, ds3232_irq, 0, "ds3232", client); if (ret) { dev_err(&client->dev, "unable to request IRQ\n"); goto out_free; } } return 0; out_irq: if (client->irq >= 0) free_irq(client->irq, client); out_free: kfree(ds3232); return ret; } static int ds3232_remove(struct i2c_client *client) { struct ds3232 *ds3232 = i2c_get_clientdata(client); if (client->irq >= 0) { mutex_lock(&ds3232->mutex); ds3232->exiting = 1; mutex_unlock(&ds3232->mutex); free_irq(client->irq, client); cancel_work_sync(&ds3232->work); } rtc_device_unregister(ds3232->rtc); kfree(ds3232); return 0; } static const struct i2c_device_id ds3232_id[] = { { "ds3232", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, ds3232_id); static struct i2c_driver ds3232_driver = { .driver = { .name = "rtc-ds3232", .owner = THIS_MODULE, }, .probe = ds3232_probe, .remove = ds3232_remove, .id_table = ds3232_id, }; module_i2c_driver(ds3232_driver); MODULE_AUTHOR("Srikanth Srinivasan <srikanth.srinivasan@freescale.com>"); MODULE_DESCRIPTION("Maxim/Dallas DS3232 RTC Driver"); MODULE_LICENSE("GPL");
gpl-2.0
cxgbit/cxgbit
arch/x86/platform/intel-mid/intel_mid_vrtc.c
665
4422
/* * intel_mid_vrtc.c: Driver for virtual RTC device on Intel MID platform * * (C) Copyright 2009 Intel Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; version 2 * of the License. * * Note: * VRTC is emulated by system controller firmware, the real HW * RTC is located in the PMIC device. SCU FW shadows PMIC RTC * in a memory mapped IO space that is visible to the host IA * processor. * * This driver is based on RTC CMOS driver. */ #include <linux/kernel.h> #include <linux/export.h> #include <linux/init.h> #include <linux/sfi.h> #include <linux/platform_device.h> #include <asm/intel-mid.h> #include <asm/intel_mid_vrtc.h> #include <asm/time.h> #include <asm/fixmap.h> static unsigned char __iomem *vrtc_virt_base; unsigned char vrtc_cmos_read(unsigned char reg) { unsigned char retval; /* vRTC's registers range from 0x0 to 0xD */ if (reg > 0xd || !vrtc_virt_base) return 0xff; lock_cmos_prefix(reg); retval = __raw_readb(vrtc_virt_base + (reg << 2)); lock_cmos_suffix(reg); return retval; } EXPORT_SYMBOL_GPL(vrtc_cmos_read); void vrtc_cmos_write(unsigned char val, unsigned char reg) { if (reg > 0xd || !vrtc_virt_base) return; lock_cmos_prefix(reg); __raw_writeb(val, vrtc_virt_base + (reg << 2)); lock_cmos_suffix(reg); } EXPORT_SYMBOL_GPL(vrtc_cmos_write); void vrtc_get_time(struct timespec *now) { u8 sec, min, hour, mday, mon; unsigned long flags; u32 year; spin_lock_irqsave(&rtc_lock, flags); while ((vrtc_cmos_read(RTC_FREQ_SELECT) & RTC_UIP)) cpu_relax(); sec = vrtc_cmos_read(RTC_SECONDS); min = vrtc_cmos_read(RTC_MINUTES); hour = vrtc_cmos_read(RTC_HOURS); mday = vrtc_cmos_read(RTC_DAY_OF_MONTH); mon = vrtc_cmos_read(RTC_MONTH); year = vrtc_cmos_read(RTC_YEAR); spin_unlock_irqrestore(&rtc_lock, flags); /* vRTC YEAR reg contains the offset to 1972 */ year += 1972; pr_info("vRTC: sec: %d min: %d hour: %d day: %d " "mon: %d year: %d\n", sec, min, hour, mday, mon, year); now->tv_sec = mktime(year, mon, mday, hour, min, sec); now->tv_nsec = 0; } int vrtc_set_mmss(const struct timespec *now) { unsigned long flags; struct rtc_time tm; int year; int retval = 0; rtc_time_to_tm(now->tv_sec, &tm); if (!rtc_valid_tm(&tm) && tm.tm_year >= 72) { /* * tm.year is the number of years since 1900, and the * vrtc need the years since 1972. */ year = tm.tm_year - 72; spin_lock_irqsave(&rtc_lock, flags); vrtc_cmos_write(year, RTC_YEAR); vrtc_cmos_write(tm.tm_mon, RTC_MONTH); vrtc_cmos_write(tm.tm_mday, RTC_DAY_OF_MONTH); vrtc_cmos_write(tm.tm_hour, RTC_HOURS); vrtc_cmos_write(tm.tm_min, RTC_MINUTES); vrtc_cmos_write(tm.tm_sec, RTC_SECONDS); spin_unlock_irqrestore(&rtc_lock, flags); } else { pr_err("%s: Invalid vRTC value: write of %lx to vRTC failed\n", __func__, now->tv_sec); retval = -EINVAL; } return retval; } void __init intel_mid_rtc_init(void) { unsigned long vrtc_paddr; sfi_table_parse(SFI_SIG_MRTC, NULL, NULL, sfi_parse_mrtc); vrtc_paddr = sfi_mrtc_array[0].phys_addr; if (!sfi_mrtc_num || !vrtc_paddr) return; vrtc_virt_base = (void __iomem *)set_fixmap_offset_nocache(FIX_LNW_VRTC, vrtc_paddr); x86_platform.get_wallclock = vrtc_get_time; x86_platform.set_wallclock = vrtc_set_mmss; } /* * The Moorestown platform has a memory mapped virtual RTC device that emulates * the programming interface of the RTC. */ static struct resource vrtc_resources[] = { [0] = { .flags = IORESOURCE_MEM, }, [1] = { .flags = IORESOURCE_IRQ, } }; static struct platform_device vrtc_device = { .name = "rtc_mrst", .id = -1, .resource = vrtc_resources, .num_resources = ARRAY_SIZE(vrtc_resources), }; /* Register the RTC device if appropriate */ static int __init intel_mid_device_create(void) { /* No Moorestown, no device */ if (!intel_mid_identify_cpu()) return -ENODEV; /* No timer, no device */ if (!sfi_mrtc_num) return -ENODEV; /* iomem resource */ vrtc_resources[0].start = sfi_mrtc_array[0].phys_addr; vrtc_resources[0].end = sfi_mrtc_array[0].phys_addr + MRST_VRTC_MAP_SZ; /* irq resource */ vrtc_resources[1].start = sfi_mrtc_array[0].irq; vrtc_resources[1].end = sfi_mrtc_array[0].irq; return platform_device_register(&vrtc_device); } device_initcall(intel_mid_device_create);
gpl-2.0
shizhai/wprobe
build_dir/target-mips_r2_uClibc-0.9.33.2/linux-ar71xx_generic/linux-3.10.4/arch/arm/mach-bcm2835/bcm2835.c
1177
3505
/* * Copyright (C) 2010 Broadcom * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/delay.h> #include <linux/init.h> #include <linux/irqchip/bcm2835.h> #include <linux/of_address.h> #include <linux/of_platform.h> #include <linux/clk/bcm2835.h> #include <linux/clocksource.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #define PM_RSTC 0x1c #define PM_RSTS 0x20 #define PM_WDOG 0x24 #define PM_PASSWORD 0x5a000000 #define PM_RSTC_WRCFG_MASK 0x00000030 #define PM_RSTC_WRCFG_FULL_RESET 0x00000020 #define PM_RSTS_HADWRH_SET 0x00000040 #define BCM2835_PERIPH_PHYS 0x20000000 #define BCM2835_PERIPH_VIRT 0xf0000000 #define BCM2835_PERIPH_SIZE SZ_16M static void __iomem *wdt_regs; /* * The machine restart method can be called from an atomic context so we won't * be able to ioremap the regs then. */ static void bcm2835_setup_restart(void) { struct device_node *np = of_find_compatible_node(NULL, NULL, "brcm,bcm2835-pm-wdt"); if (WARN(!np, "unable to setup watchdog restart")) return; wdt_regs = of_iomap(np, 0); WARN(!wdt_regs, "failed to remap watchdog regs"); } static void bcm2835_restart(char mode, const char *cmd) { u32 val; if (!wdt_regs) return; /* use a timeout of 10 ticks (~150us) */ writel_relaxed(10 | PM_PASSWORD, wdt_regs + PM_WDOG); val = readl_relaxed(wdt_regs + PM_RSTC); val &= ~PM_RSTC_WRCFG_MASK; val |= PM_PASSWORD | PM_RSTC_WRCFG_FULL_RESET; writel_relaxed(val, wdt_regs + PM_RSTC); /* No sleeping, possibly atomic. */ mdelay(1); } /* * We can't really power off, but if we do the normal reset scheme, and * indicate to bootcode.bin not to reboot, then most of the chip will be * powered off. */ static void bcm2835_power_off(void) { u32 val; /* * We set the watchdog hard reset bit here to distinguish this reset * from the normal (full) reset. bootcode.bin will not reboot after a * hard reset. */ val = readl_relaxed(wdt_regs + PM_RSTS); val &= ~PM_RSTC_WRCFG_MASK; val |= PM_PASSWORD | PM_RSTS_HADWRH_SET; writel_relaxed(val, wdt_regs + PM_RSTS); /* Continue with normal reset mechanism */ bcm2835_restart(0, ""); } static struct map_desc io_map __initdata = { .virtual = BCM2835_PERIPH_VIRT, .pfn = __phys_to_pfn(BCM2835_PERIPH_PHYS), .length = BCM2835_PERIPH_SIZE, .type = MT_DEVICE }; static void __init bcm2835_map_io(void) { iotable_init(&io_map, 1); } static void __init bcm2835_init(void) { int ret; bcm2835_setup_restart(); if (wdt_regs) pm_power_off = bcm2835_power_off; bcm2835_init_clocks(); ret = of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); if (ret) { pr_err("of_platform_populate failed: %d\n", ret); BUG(); } } static const char * const bcm2835_compat[] = { "brcm,bcm2835", NULL }; DT_MACHINE_START(BCM2835, "BCM2835") .map_io = bcm2835_map_io, .init_irq = bcm2835_init_irq, .handle_irq = bcm2835_handle_irq, .init_machine = bcm2835_init, .init_time = clocksource_of_init, .restart = bcm2835_restart, .dt_compat = bcm2835_compat MACHINE_END
gpl-2.0
azhou-nicira/net-next
block/blk-iopoll.c
1433
5984
/* * Functions related to interrupt-poll handling in the block layer. This * is similar to NAPI for network devices. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/bio.h> #include <linux/blkdev.h> #include <linux/interrupt.h> #include <linux/cpu.h> #include <linux/blk-iopoll.h> #include <linux/delay.h> #include "blk.h" static unsigned int blk_iopoll_budget __read_mostly = 256; static DEFINE_PER_CPU(struct list_head, blk_cpu_iopoll); /** * blk_iopoll_sched - Schedule a run of the iopoll handler * @iop: The parent iopoll structure * * Description: * Add this blk_iopoll structure to the pending poll list and trigger the * raise of the blk iopoll softirq. The driver must already have gotten a * successful return from blk_iopoll_sched_prep() before calling this. **/ void blk_iopoll_sched(struct blk_iopoll *iop) { unsigned long flags; local_irq_save(flags); list_add_tail(&iop->list, this_cpu_ptr(&blk_cpu_iopoll)); __raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ); local_irq_restore(flags); } EXPORT_SYMBOL(blk_iopoll_sched); /** * __blk_iopoll_complete - Mark this @iop as un-polled again * @iop: The parent iopoll structure * * Description: * See blk_iopoll_complete(). This function must be called with interrupts * disabled. **/ void __blk_iopoll_complete(struct blk_iopoll *iop) { list_del(&iop->list); smp_mb__before_atomic(); clear_bit_unlock(IOPOLL_F_SCHED, &iop->state); } EXPORT_SYMBOL(__blk_iopoll_complete); /** * blk_iopoll_complete - Mark this @iop as un-polled again * @iop: The parent iopoll structure * * Description: * If a driver consumes less than the assigned budget in its run of the * iopoll handler, it'll end the polled mode by calling this function. The * iopoll handler will not be invoked again before blk_iopoll_sched_prep() * is called. **/ void blk_iopoll_complete(struct blk_iopoll *iop) { unsigned long flags; local_irq_save(flags); __blk_iopoll_complete(iop); local_irq_restore(flags); } EXPORT_SYMBOL(blk_iopoll_complete); static void blk_iopoll_softirq(struct softirq_action *h) { struct list_head *list = this_cpu_ptr(&blk_cpu_iopoll); int rearm = 0, budget = blk_iopoll_budget; unsigned long start_time = jiffies; local_irq_disable(); while (!list_empty(list)) { struct blk_iopoll *iop; int work, weight; /* * If softirq window is exhausted then punt. */ if (budget <= 0 || time_after(jiffies, start_time)) { rearm = 1; break; } local_irq_enable(); /* Even though interrupts have been re-enabled, this * access is safe because interrupts can only add new * entries to the tail of this list, and only ->poll() * calls can remove this head entry from the list. */ iop = list_entry(list->next, struct blk_iopoll, list); weight = iop->weight; work = 0; if (test_bit(IOPOLL_F_SCHED, &iop->state)) work = iop->poll(iop, weight); budget -= work; local_irq_disable(); /* * Drivers must not modify the iopoll state, if they * consume their assigned weight (or more, some drivers can't * easily just stop processing, they have to complete an * entire mask of commands).In such cases this code * still "owns" the iopoll instance and therefore can * move the instance around on the list at-will. */ if (work >= weight) { if (blk_iopoll_disable_pending(iop)) __blk_iopoll_complete(iop); else list_move_tail(&iop->list, list); } } if (rearm) __raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ); local_irq_enable(); } /** * blk_iopoll_disable - Disable iopoll on this @iop * @iop: The parent iopoll structure * * Description: * Disable io polling and wait for any pending callbacks to have completed. **/ void blk_iopoll_disable(struct blk_iopoll *iop) { set_bit(IOPOLL_F_DISABLE, &iop->state); while (test_and_set_bit(IOPOLL_F_SCHED, &iop->state)) msleep(1); clear_bit(IOPOLL_F_DISABLE, &iop->state); } EXPORT_SYMBOL(blk_iopoll_disable); /** * blk_iopoll_enable - Enable iopoll on this @iop * @iop: The parent iopoll structure * * Description: * Enable iopoll on this @iop. Note that the handler run will not be * scheduled, it will only mark it as active. **/ void blk_iopoll_enable(struct blk_iopoll *iop) { BUG_ON(!test_bit(IOPOLL_F_SCHED, &iop->state)); smp_mb__before_atomic(); clear_bit_unlock(IOPOLL_F_SCHED, &iop->state); } EXPORT_SYMBOL(blk_iopoll_enable); /** * blk_iopoll_init - Initialize this @iop * @iop: The parent iopoll structure * @weight: The default weight (or command completion budget) * @poll_fn: The handler to invoke * * Description: * Initialize this blk_iopoll structure. Before being actively used, the * driver must call blk_iopoll_enable(). **/ void blk_iopoll_init(struct blk_iopoll *iop, int weight, blk_iopoll_fn *poll_fn) { memset(iop, 0, sizeof(*iop)); INIT_LIST_HEAD(&iop->list); iop->weight = weight; iop->poll = poll_fn; set_bit(IOPOLL_F_SCHED, &iop->state); } EXPORT_SYMBOL(blk_iopoll_init); static int blk_iopoll_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) { /* * If a CPU goes away, splice its entries to the current CPU * and trigger a run of the softirq */ if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { int cpu = (unsigned long) hcpu; local_irq_disable(); list_splice_init(&per_cpu(blk_cpu_iopoll, cpu), this_cpu_ptr(&blk_cpu_iopoll)); __raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ); local_irq_enable(); } return NOTIFY_OK; } static struct notifier_block blk_iopoll_cpu_notifier = { .notifier_call = blk_iopoll_cpu_notify, }; static __init int blk_iopoll_setup(void) { int i; for_each_possible_cpu(i) INIT_LIST_HEAD(&per_cpu(blk_cpu_iopoll, i)); open_softirq(BLOCK_IOPOLL_SOFTIRQ, blk_iopoll_softirq); register_hotcpu_notifier(&blk_iopoll_cpu_notifier); return 0; } subsys_initcall(blk_iopoll_setup);
gpl-2.0
arunkuttiyara/linux
tools/lib/api/fd/array.c
1433
2477
/* * Copyright (C) 2014, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> * * Released under the GPL v2. (and only v2, not any later version) */ #include "array.h" #include <errno.h> #include <fcntl.h> #include <poll.h> #include <stdlib.h> #include <unistd.h> void fdarray__init(struct fdarray *fda, int nr_autogrow) { fda->entries = NULL; fda->priv = NULL; fda->nr = fda->nr_alloc = 0; fda->nr_autogrow = nr_autogrow; } int fdarray__grow(struct fdarray *fda, int nr) { void *priv; int nr_alloc = fda->nr_alloc + nr; size_t psize = sizeof(fda->priv[0]) * nr_alloc; size_t size = sizeof(struct pollfd) * nr_alloc; struct pollfd *entries = realloc(fda->entries, size); if (entries == NULL) return -ENOMEM; priv = realloc(fda->priv, psize); if (priv == NULL) { free(entries); return -ENOMEM; } fda->nr_alloc = nr_alloc; fda->entries = entries; fda->priv = priv; return 0; } struct fdarray *fdarray__new(int nr_alloc, int nr_autogrow) { struct fdarray *fda = calloc(1, sizeof(*fda)); if (fda != NULL) { if (fdarray__grow(fda, nr_alloc)) { free(fda); fda = NULL; } else { fda->nr_autogrow = nr_autogrow; } } return fda; } void fdarray__exit(struct fdarray *fda) { free(fda->entries); free(fda->priv); fdarray__init(fda, 0); } void fdarray__delete(struct fdarray *fda) { fdarray__exit(fda); free(fda); } int fdarray__add(struct fdarray *fda, int fd, short revents) { int pos = fda->nr; if (fda->nr == fda->nr_alloc && fdarray__grow(fda, fda->nr_autogrow) < 0) return -ENOMEM; fda->entries[fda->nr].fd = fd; fda->entries[fda->nr].events = revents; fda->nr++; return pos; } int fdarray__filter(struct fdarray *fda, short revents, void (*entry_destructor)(struct fdarray *fda, int fd)) { int fd, nr = 0; if (fda->nr == 0) return 0; for (fd = 0; fd < fda->nr; ++fd) { if (fda->entries[fd].revents & revents) { if (entry_destructor) entry_destructor(fda, fd); continue; } if (fd != nr) { fda->entries[nr] = fda->entries[fd]; fda->priv[nr] = fda->priv[fd]; } ++nr; } return fda->nr = nr; } int fdarray__poll(struct fdarray *fda, int timeout) { return poll(fda->entries, fda->nr, timeout); } int fdarray__fprintf(struct fdarray *fda, FILE *fp) { int fd, printed = fprintf(fp, "%d [ ", fda->nr); for (fd = 0; fd < fda->nr; ++fd) printed += fprintf(fp, "%s%d", fd ? ", " : "", fda->entries[fd].fd); return printed + fprintf(fp, " ]"); }
gpl-2.0
Distrotech/linux
drivers/tty/serial/dz.c
1689
22950
/* * dz.c: Serial port driver for DECstations equipped * with the DZ chipset. * * Copyright (C) 1998 Olivier A. D. Lebaillif * * Email: olivier.lebaillif@ifrsys.com * * Copyright (C) 2004, 2006, 2007 Maciej W. Rozycki * * [31-AUG-98] triemer * Changed IRQ to use Harald's dec internals interrupts.h * removed base_addr code - moving address assignment to setup.c * Changed name of dz_init to rs_init to be consistent with tc code * [13-NOV-98] triemer fixed code to receive characters * after patches by harald to irq code. * [09-JAN-99] triemer minor fix for schedule - due to removal of timeout * field from "current" - somewhere between 2.1.121 and 2.1.131 Qua Jun 27 15:02:26 BRT 2001 * [27-JUN-2001] Arnaldo Carvalho de Melo <acme@conectiva.com.br> - cleanups * * Parts (C) 1999 David Airlie, airlied@linux.ie * [07-SEP-99] Bugfixes * * [06-Jan-2002] Russell King <rmk@arm.linux.org.uk> * Converted to new serial core */ #undef DEBUG_DZ #if defined(CONFIG_SERIAL_DZ_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) #define SUPPORT_SYSRQ #endif #include <linux/bitops.h> #include <linux/compiler.h> #include <linux/console.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/kernel.h> #include <linux/major.h> #include <linux/module.h> #include <linux/serial.h> #include <linux/serial_core.h> #include <linux/sysrq.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/atomic.h> #include <asm/bootinfo.h> #include <asm/io.h> #include <asm/dec/interrupts.h> #include <asm/dec/kn01.h> #include <asm/dec/kn02.h> #include <asm/dec/machtype.h> #include <asm/dec/prom.h> #include <asm/dec/system.h> #include "dz.h" MODULE_DESCRIPTION("DECstation DZ serial driver"); MODULE_LICENSE("GPL"); static char dz_name[] __initdata = "DECstation DZ serial driver version "; static char dz_version[] __initdata = "1.04"; struct dz_port { struct dz_mux *mux; struct uart_port port; unsigned int cflag; }; struct dz_mux { struct dz_port dport[DZ_NB_PORT]; atomic_t map_guard; atomic_t irq_guard; int initialised; }; static struct dz_mux dz_mux; static inline struct dz_port *to_dport(struct uart_port *uport) { return container_of(uport, struct dz_port, port); } /* * ------------------------------------------------------------ * dz_in () and dz_out () * * These routines are used to access the registers of the DZ * chip, hiding relocation differences between implementation. * ------------------------------------------------------------ */ static u16 dz_in(struct dz_port *dport, unsigned offset) { void __iomem *addr = dport->port.membase + offset; return readw(addr); } static void dz_out(struct dz_port *dport, unsigned offset, u16 value) { void __iomem *addr = dport->port.membase + offset; writew(value, addr); } /* * ------------------------------------------------------------ * rs_stop () and rs_start () * * These routines are called before setting or resetting * tty->stopped. They enable or disable transmitter interrupts, * as necessary. * ------------------------------------------------------------ */ static void dz_stop_tx(struct uart_port *uport) { struct dz_port *dport = to_dport(uport); u16 tmp, mask = 1 << dport->port.line; tmp = dz_in(dport, DZ_TCR); /* read the TX flag */ tmp &= ~mask; /* clear the TX flag */ dz_out(dport, DZ_TCR, tmp); } static void dz_start_tx(struct uart_port *uport) { struct dz_port *dport = to_dport(uport); u16 tmp, mask = 1 << dport->port.line; tmp = dz_in(dport, DZ_TCR); /* read the TX flag */ tmp |= mask; /* set the TX flag */ dz_out(dport, DZ_TCR, tmp); } static void dz_stop_rx(struct uart_port *uport) { struct dz_port *dport = to_dport(uport); dport->cflag &= ~DZ_RXENAB; dz_out(dport, DZ_LPR, dport->cflag); } /* * ------------------------------------------------------------ * * Here start the interrupt handling routines. All of the following * subroutines are declared as inline and are folded into * dz_interrupt. They were separated out for readability's sake. * * Note: dz_interrupt() is a "fast" interrupt, which means that it * runs with interrupts turned off. People who may want to modify * dz_interrupt() should try to keep the interrupt handler as fast as * possible. After you are done making modifications, it is not a bad * idea to do: * * make drivers/serial/dz.s * * and look at the resulting assemble code in dz.s. * * ------------------------------------------------------------ */ /* * ------------------------------------------------------------ * receive_char () * * This routine deals with inputs from any lines. * ------------------------------------------------------------ */ static inline void dz_receive_chars(struct dz_mux *mux) { struct uart_port *uport; struct dz_port *dport = &mux->dport[0]; struct uart_icount *icount; int lines_rx[DZ_NB_PORT] = { [0 ... DZ_NB_PORT - 1] = 0 }; unsigned char ch, flag; u16 status; int i; while ((status = dz_in(dport, DZ_RBUF)) & DZ_DVAL) { dport = &mux->dport[LINE(status)]; uport = &dport->port; ch = UCHAR(status); /* grab the char */ flag = TTY_NORMAL; icount = &uport->icount; icount->rx++; if (unlikely(status & (DZ_OERR | DZ_FERR | DZ_PERR))) { /* * There is no separate BREAK status bit, so treat * null characters with framing errors as BREAKs; * normally, otherwise. For this move the Framing * Error bit to a simulated BREAK bit. */ if (!ch) { status |= (status & DZ_FERR) >> (ffs(DZ_FERR) - ffs(DZ_BREAK)); status &= ~DZ_FERR; } /* Handle SysRq/SAK & keep track of the statistics. */ if (status & DZ_BREAK) { icount->brk++; if (uart_handle_break(uport)) continue; } else if (status & DZ_FERR) icount->frame++; else if (status & DZ_PERR) icount->parity++; if (status & DZ_OERR) icount->overrun++; status &= uport->read_status_mask; if (status & DZ_BREAK) flag = TTY_BREAK; else if (status & DZ_FERR) flag = TTY_FRAME; else if (status & DZ_PERR) flag = TTY_PARITY; } if (uart_handle_sysrq_char(uport, ch)) continue; uart_insert_char(uport, status, DZ_OERR, ch, flag); lines_rx[LINE(status)] = 1; } for (i = 0; i < DZ_NB_PORT; i++) if (lines_rx[i]) tty_flip_buffer_push(&mux->dport[i].port.state->port); } /* * ------------------------------------------------------------ * transmit_char () * * This routine deals with outputs to any lines. * ------------------------------------------------------------ */ static inline void dz_transmit_chars(struct dz_mux *mux) { struct dz_port *dport = &mux->dport[0]; struct circ_buf *xmit; unsigned char tmp; u16 status; status = dz_in(dport, DZ_CSR); dport = &mux->dport[LINE(status)]; xmit = &dport->port.state->xmit; if (dport->port.x_char) { /* XON/XOFF chars */ dz_out(dport, DZ_TDR, dport->port.x_char); dport->port.icount.tx++; dport->port.x_char = 0; return; } /* If nothing to do or stopped or hardware stopped. */ if (uart_circ_empty(xmit) || uart_tx_stopped(&dport->port)) { spin_lock(&dport->port.lock); dz_stop_tx(&dport->port); spin_unlock(&dport->port.lock); return; } /* * If something to do... (remember the dz has no output fifo, * so we go one char at a time) :-< */ tmp = xmit->buf[xmit->tail]; xmit->tail = (xmit->tail + 1) & (DZ_XMIT_SIZE - 1); dz_out(dport, DZ_TDR, tmp); dport->port.icount.tx++; if (uart_circ_chars_pending(xmit) < DZ_WAKEUP_CHARS) uart_write_wakeup(&dport->port); /* Are we are done. */ if (uart_circ_empty(xmit)) { spin_lock(&dport->port.lock); dz_stop_tx(&dport->port); spin_unlock(&dport->port.lock); } } /* * ------------------------------------------------------------ * check_modem_status() * * DS 3100 & 5100: Only valid for the MODEM line, duh! * DS 5000/200: Valid for the MODEM and PRINTER line. * ------------------------------------------------------------ */ static inline void check_modem_status(struct dz_port *dport) { /* * FIXME: * 1. No status change interrupt; use a timer. * 2. Handle the 3100/5000 as appropriate. --macro */ u16 status; /* If not the modem line just return. */ if (dport->port.line != DZ_MODEM) return; status = dz_in(dport, DZ_MSR); /* it's easy, since DSR2 is the only bit in the register */ if (status) dport->port.icount.dsr++; } /* * ------------------------------------------------------------ * dz_interrupt () * * this is the main interrupt routine for the DZ chip. * It deals with the multiple ports. * ------------------------------------------------------------ */ static irqreturn_t dz_interrupt(int irq, void *dev_id) { struct dz_mux *mux = dev_id; struct dz_port *dport = &mux->dport[0]; u16 status; /* get the reason why we just got an irq */ status = dz_in(dport, DZ_CSR); if ((status & (DZ_RDONE | DZ_RIE)) == (DZ_RDONE | DZ_RIE)) dz_receive_chars(mux); if ((status & (DZ_TRDY | DZ_TIE)) == (DZ_TRDY | DZ_TIE)) dz_transmit_chars(mux); return IRQ_HANDLED; } /* * ------------------------------------------------------------------- * Here ends the DZ interrupt routines. * ------------------------------------------------------------------- */ static unsigned int dz_get_mctrl(struct uart_port *uport) { /* * FIXME: Handle the 3100/5000 as appropriate. --macro */ struct dz_port *dport = to_dport(uport); unsigned int mctrl = TIOCM_CAR | TIOCM_DSR | TIOCM_CTS; if (dport->port.line == DZ_MODEM) { if (dz_in(dport, DZ_MSR) & DZ_MODEM_DSR) mctrl &= ~TIOCM_DSR; } return mctrl; } static void dz_set_mctrl(struct uart_port *uport, unsigned int mctrl) { /* * FIXME: Handle the 3100/5000 as appropriate. --macro */ struct dz_port *dport = to_dport(uport); u16 tmp; if (dport->port.line == DZ_MODEM) { tmp = dz_in(dport, DZ_TCR); if (mctrl & TIOCM_DTR) tmp &= ~DZ_MODEM_DTR; else tmp |= DZ_MODEM_DTR; dz_out(dport, DZ_TCR, tmp); } } /* * ------------------------------------------------------------------- * startup () * * various initialization tasks * ------------------------------------------------------------------- */ static int dz_startup(struct uart_port *uport) { struct dz_port *dport = to_dport(uport); struct dz_mux *mux = dport->mux; unsigned long flags; int irq_guard; int ret; u16 tmp; irq_guard = atomic_add_return(1, &mux->irq_guard); if (irq_guard != 1) return 0; ret = request_irq(dport->port.irq, dz_interrupt, IRQF_SHARED, "dz", mux); if (ret) { atomic_add(-1, &mux->irq_guard); printk(KERN_ERR "dz: Cannot get IRQ %d!\n", dport->port.irq); return ret; } spin_lock_irqsave(&dport->port.lock, flags); /* Enable interrupts. */ tmp = dz_in(dport, DZ_CSR); tmp |= DZ_RIE | DZ_TIE; dz_out(dport, DZ_CSR, tmp); spin_unlock_irqrestore(&dport->port.lock, flags); return 0; } /* * ------------------------------------------------------------------- * shutdown () * * This routine will shutdown a serial port; interrupts are disabled, and * DTR is dropped if the hangup on close termio flag is on. * ------------------------------------------------------------------- */ static void dz_shutdown(struct uart_port *uport) { struct dz_port *dport = to_dport(uport); struct dz_mux *mux = dport->mux; unsigned long flags; int irq_guard; u16 tmp; spin_lock_irqsave(&dport->port.lock, flags); dz_stop_tx(&dport->port); spin_unlock_irqrestore(&dport->port.lock, flags); irq_guard = atomic_add_return(-1, &mux->irq_guard); if (!irq_guard) { /* Disable interrupts. */ tmp = dz_in(dport, DZ_CSR); tmp &= ~(DZ_RIE | DZ_TIE); dz_out(dport, DZ_CSR, tmp); free_irq(dport->port.irq, mux); } } /* * ------------------------------------------------------------------- * dz_tx_empty() -- get the transmitter empty status * * Purpose: Let user call ioctl() to get info when the UART physically * is emptied. On bus types like RS485, the transmitter must * release the bus after transmitting. This must be done when * the transmit shift register is empty, not be done when the * transmit holding register is empty. This functionality * allows an RS485 driver to be written in user space. * ------------------------------------------------------------------- */ static unsigned int dz_tx_empty(struct uart_port *uport) { struct dz_port *dport = to_dport(uport); unsigned short tmp, mask = 1 << dport->port.line; tmp = dz_in(dport, DZ_TCR); tmp &= mask; return tmp ? 0 : TIOCSER_TEMT; } static void dz_break_ctl(struct uart_port *uport, int break_state) { /* * FIXME: Can't access BREAK bits in TDR easily; * reuse the code for polled TX. --macro */ struct dz_port *dport = to_dport(uport); unsigned long flags; unsigned short tmp, mask = 1 << dport->port.line; spin_lock_irqsave(&uport->lock, flags); tmp = dz_in(dport, DZ_TCR); if (break_state) tmp |= mask; else tmp &= ~mask; dz_out(dport, DZ_TCR, tmp); spin_unlock_irqrestore(&uport->lock, flags); } static int dz_encode_baud_rate(unsigned int baud) { switch (baud) { case 50: return DZ_B50; case 75: return DZ_B75; case 110: return DZ_B110; case 134: return DZ_B134; case 150: return DZ_B150; case 300: return DZ_B300; case 600: return DZ_B600; case 1200: return DZ_B1200; case 1800: return DZ_B1800; case 2000: return DZ_B2000; case 2400: return DZ_B2400; case 3600: return DZ_B3600; case 4800: return DZ_B4800; case 7200: return DZ_B7200; case 9600: return DZ_B9600; default: return -1; } } static void dz_reset(struct dz_port *dport) { struct dz_mux *mux = dport->mux; if (mux->initialised) return; dz_out(dport, DZ_CSR, DZ_CLR); while (dz_in(dport, DZ_CSR) & DZ_CLR); iob(); /* Enable scanning. */ dz_out(dport, DZ_CSR, DZ_MSE); mux->initialised = 1; } static void dz_set_termios(struct uart_port *uport, struct ktermios *termios, struct ktermios *old_termios) { struct dz_port *dport = to_dport(uport); unsigned long flags; unsigned int cflag, baud; int bflag; cflag = dport->port.line; switch (termios->c_cflag & CSIZE) { case CS5: cflag |= DZ_CS5; break; case CS6: cflag |= DZ_CS6; break; case CS7: cflag |= DZ_CS7; break; case CS8: default: cflag |= DZ_CS8; } if (termios->c_cflag & CSTOPB) cflag |= DZ_CSTOPB; if (termios->c_cflag & PARENB) cflag |= DZ_PARENB; if (termios->c_cflag & PARODD) cflag |= DZ_PARODD; baud = uart_get_baud_rate(uport, termios, old_termios, 50, 9600); bflag = dz_encode_baud_rate(baud); if (bflag < 0) { /* Try to keep unchanged. */ baud = uart_get_baud_rate(uport, old_termios, NULL, 50, 9600); bflag = dz_encode_baud_rate(baud); if (bflag < 0) { /* Resort to 9600. */ baud = 9600; bflag = DZ_B9600; } tty_termios_encode_baud_rate(termios, baud, baud); } cflag |= bflag; if (termios->c_cflag & CREAD) cflag |= DZ_RXENAB; spin_lock_irqsave(&dport->port.lock, flags); uart_update_timeout(uport, termios->c_cflag, baud); dz_out(dport, DZ_LPR, cflag); dport->cflag = cflag; /* setup accept flag */ dport->port.read_status_mask = DZ_OERR; if (termios->c_iflag & INPCK) dport->port.read_status_mask |= DZ_FERR | DZ_PERR; if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK)) dport->port.read_status_mask |= DZ_BREAK; /* characters to ignore */ uport->ignore_status_mask = 0; if ((termios->c_iflag & (IGNPAR | IGNBRK)) == (IGNPAR | IGNBRK)) dport->port.ignore_status_mask |= DZ_OERR; if (termios->c_iflag & IGNPAR) dport->port.ignore_status_mask |= DZ_FERR | DZ_PERR; if (termios->c_iflag & IGNBRK) dport->port.ignore_status_mask |= DZ_BREAK; spin_unlock_irqrestore(&dport->port.lock, flags); } /* * Hack alert! * Required solely so that the initial PROM-based console * works undisturbed in parallel with this one. */ static void dz_pm(struct uart_port *uport, unsigned int state, unsigned int oldstate) { struct dz_port *dport = to_dport(uport); unsigned long flags; spin_lock_irqsave(&dport->port.lock, flags); if (state < 3) dz_start_tx(&dport->port); else dz_stop_tx(&dport->port); spin_unlock_irqrestore(&dport->port.lock, flags); } static const char *dz_type(struct uart_port *uport) { return "DZ"; } static void dz_release_port(struct uart_port *uport) { struct dz_mux *mux = to_dport(uport)->mux; int map_guard; iounmap(uport->membase); uport->membase = NULL; map_guard = atomic_add_return(-1, &mux->map_guard); if (!map_guard) release_mem_region(uport->mapbase, dec_kn_slot_size); } static int dz_map_port(struct uart_port *uport) { if (!uport->membase) uport->membase = ioremap_nocache(uport->mapbase, dec_kn_slot_size); if (!uport->membase) { printk(KERN_ERR "dz: Cannot map MMIO\n"); return -ENOMEM; } return 0; } static int dz_request_port(struct uart_port *uport) { struct dz_mux *mux = to_dport(uport)->mux; int map_guard; int ret; map_guard = atomic_add_return(1, &mux->map_guard); if (map_guard == 1) { if (!request_mem_region(uport->mapbase, dec_kn_slot_size, "dz")) { atomic_add(-1, &mux->map_guard); printk(KERN_ERR "dz: Unable to reserve MMIO resource\n"); return -EBUSY; } } ret = dz_map_port(uport); if (ret) { map_guard = atomic_add_return(-1, &mux->map_guard); if (!map_guard) release_mem_region(uport->mapbase, dec_kn_slot_size); return ret; } return 0; } static void dz_config_port(struct uart_port *uport, int flags) { struct dz_port *dport = to_dport(uport); if (flags & UART_CONFIG_TYPE) { if (dz_request_port(uport)) return; uport->type = PORT_DZ; dz_reset(dport); } } /* * Verify the new serial_struct (for TIOCSSERIAL). */ static int dz_verify_port(struct uart_port *uport, struct serial_struct *ser) { int ret = 0; if (ser->type != PORT_UNKNOWN && ser->type != PORT_DZ) ret = -EINVAL; if (ser->irq != uport->irq) ret = -EINVAL; return ret; } static struct uart_ops dz_ops = { .tx_empty = dz_tx_empty, .get_mctrl = dz_get_mctrl, .set_mctrl = dz_set_mctrl, .stop_tx = dz_stop_tx, .start_tx = dz_start_tx, .stop_rx = dz_stop_rx, .break_ctl = dz_break_ctl, .startup = dz_startup, .shutdown = dz_shutdown, .set_termios = dz_set_termios, .pm = dz_pm, .type = dz_type, .release_port = dz_release_port, .request_port = dz_request_port, .config_port = dz_config_port, .verify_port = dz_verify_port, }; static void __init dz_init_ports(void) { static int first = 1; unsigned long base; int line; if (!first) return; first = 0; if (mips_machtype == MACH_DS23100 || mips_machtype == MACH_DS5100) base = dec_kn_slot_base + KN01_DZ11; else base = dec_kn_slot_base + KN02_DZ11; for (line = 0; line < DZ_NB_PORT; line++) { struct dz_port *dport = &dz_mux.dport[line]; struct uart_port *uport = &dport->port; dport->mux = &dz_mux; uport->irq = dec_interrupt[DEC_IRQ_DZ11]; uport->fifosize = 1; uport->iotype = UPIO_MEM; uport->flags = UPF_BOOT_AUTOCONF; uport->ops = &dz_ops; uport->line = line; uport->mapbase = base; } } #ifdef CONFIG_SERIAL_DZ_CONSOLE /* * ------------------------------------------------------------------- * dz_console_putchar() -- transmit a character * * Polled transmission. This is tricky. We need to mask transmit * interrupts so that they do not interfere, enable the transmitter * for the line requested and then wait till the transmit scanner * requests data for this line. But it may request data for another * line first, in which case we have to disable its transmitter and * repeat waiting till our line pops up. Only then the character may * be transmitted. Finally, the state of the transmitter mask is * restored. Welcome to the world of PDP-11! * ------------------------------------------------------------------- */ static void dz_console_putchar(struct uart_port *uport, int ch) { struct dz_port *dport = to_dport(uport); unsigned long flags; unsigned short csr, tcr, trdy, mask; int loops = 10000; spin_lock_irqsave(&dport->port.lock, flags); csr = dz_in(dport, DZ_CSR); dz_out(dport, DZ_CSR, csr & ~DZ_TIE); tcr = dz_in(dport, DZ_TCR); tcr |= 1 << dport->port.line; mask = tcr; dz_out(dport, DZ_TCR, mask); iob(); spin_unlock_irqrestore(&dport->port.lock, flags); do { trdy = dz_in(dport, DZ_CSR); if (!(trdy & DZ_TRDY)) continue; trdy = (trdy & DZ_TLINE) >> 8; if (trdy == dport->port.line) break; mask &= ~(1 << trdy); dz_out(dport, DZ_TCR, mask); iob(); udelay(2); } while (--loops); if (loops) /* Cannot send otherwise. */ dz_out(dport, DZ_TDR, ch); dz_out(dport, DZ_TCR, tcr); dz_out(dport, DZ_CSR, csr); } /* * ------------------------------------------------------------------- * dz_console_print () * * dz_console_print is registered for printk. * The console must be locked when we get here. * ------------------------------------------------------------------- */ static void dz_console_print(struct console *co, const char *str, unsigned int count) { struct dz_port *dport = &dz_mux.dport[co->index]; #ifdef DEBUG_DZ prom_printf((char *) str); #endif uart_console_write(&dport->port, str, count, dz_console_putchar); } static int __init dz_console_setup(struct console *co, char *options) { struct dz_port *dport = &dz_mux.dport[co->index]; struct uart_port *uport = &dport->port; int baud = 9600; int bits = 8; int parity = 'n'; int flow = 'n'; int ret; ret = dz_map_port(uport); if (ret) return ret; spin_lock_init(&dport->port.lock); /* For dz_pm(). */ dz_reset(dport); dz_pm(uport, 0, -1); if (options) uart_parse_options(options, &baud, &parity, &bits, &flow); return uart_set_options(&dport->port, co, baud, parity, bits, flow); } static struct uart_driver dz_reg; static struct console dz_console = { .name = "ttyS", .write = dz_console_print, .device = uart_console_device, .setup = dz_console_setup, .flags = CON_PRINTBUFFER, .index = -1, .data = &dz_reg, }; static int __init dz_serial_console_init(void) { if (!IOASIC) { dz_init_ports(); register_console(&dz_console); return 0; } else return -ENXIO; } console_initcall(dz_serial_console_init); #define SERIAL_DZ_CONSOLE &dz_console #else #define SERIAL_DZ_CONSOLE NULL #endif /* CONFIG_SERIAL_DZ_CONSOLE */ static struct uart_driver dz_reg = { .owner = THIS_MODULE, .driver_name = "serial", .dev_name = "ttyS", .major = TTY_MAJOR, .minor = 64, .nr = DZ_NB_PORT, .cons = SERIAL_DZ_CONSOLE, }; static int __init dz_init(void) { int ret, i; if (IOASIC) return -ENXIO; printk("%s%s\n", dz_name, dz_version); dz_init_ports(); ret = uart_register_driver(&dz_reg); if (ret) return ret; for (i = 0; i < DZ_NB_PORT; i++) uart_add_one_port(&dz_reg, &dz_mux.dport[i].port); return 0; } module_init(dz_init);
gpl-2.0
SudeepDuhoon/FirSt-kernel
kernel/test_kprobes.c
2713
8674
/* * test_kprobes.c - simple sanity test for *probes * * Copyright IBM Corp. 2008 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it would be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/kprobes.h> #include <linux/random.h> #define div_factor 3 static u32 rand1, preh_val, posth_val, jph_val; static int errors, handler_errors, num_tests; static u32 (*target)(u32 value); static u32 (*target2)(u32 value); static noinline u32 kprobe_target(u32 value) { return (value / div_factor); } static int kp_pre_handler(struct kprobe *p, struct pt_regs *regs) { preh_val = (rand1 / div_factor); return 0; } static void kp_post_handler(struct kprobe *p, struct pt_regs *regs, unsigned long flags) { if (preh_val != (rand1 / div_factor)) { handler_errors++; printk(KERN_ERR "Kprobe smoke test failed: " "incorrect value in post_handler\n"); } posth_val = preh_val + div_factor; } static struct kprobe kp = { .symbol_name = "kprobe_target", .pre_handler = kp_pre_handler, .post_handler = kp_post_handler }; static int test_kprobe(void) { int ret; ret = register_kprobe(&kp); if (ret < 0) { printk(KERN_ERR "Kprobe smoke test failed: " "register_kprobe returned %d\n", ret); return ret; } ret = target(rand1); unregister_kprobe(&kp); if (preh_val == 0) { printk(KERN_ERR "Kprobe smoke test failed: " "kprobe pre_handler not called\n"); handler_errors++; } if (posth_val == 0) { printk(KERN_ERR "Kprobe smoke test failed: " "kprobe post_handler not called\n"); handler_errors++; } return 0; } static noinline u32 kprobe_target2(u32 value) { return (value / div_factor) + 1; } static int kp_pre_handler2(struct kprobe *p, struct pt_regs *regs) { preh_val = (rand1 / div_factor) + 1; return 0; } static void kp_post_handler2(struct kprobe *p, struct pt_regs *regs, unsigned long flags) { if (preh_val != (rand1 / div_factor) + 1) { handler_errors++; printk(KERN_ERR "Kprobe smoke test failed: " "incorrect value in post_handler2\n"); } posth_val = preh_val + div_factor; } static struct kprobe kp2 = { .symbol_name = "kprobe_target2", .pre_handler = kp_pre_handler2, .post_handler = kp_post_handler2 }; static int test_kprobes(void) { int ret; struct kprobe *kps[2] = {&kp, &kp2}; /* addr and flags should be cleard for reusing kprobe. */ kp.addr = NULL; kp.flags = 0; ret = register_kprobes(kps, 2); if (ret < 0) { printk(KERN_ERR "Kprobe smoke test failed: " "register_kprobes returned %d\n", ret); return ret; } preh_val = 0; posth_val = 0; ret = target(rand1); if (preh_val == 0) { printk(KERN_ERR "Kprobe smoke test failed: " "kprobe pre_handler not called\n"); handler_errors++; } if (posth_val == 0) { printk(KERN_ERR "Kprobe smoke test failed: " "kprobe post_handler not called\n"); handler_errors++; } preh_val = 0; posth_val = 0; ret = target2(rand1); if (preh_val == 0) { printk(KERN_ERR "Kprobe smoke test failed: " "kprobe pre_handler2 not called\n"); handler_errors++; } if (posth_val == 0) { printk(KERN_ERR "Kprobe smoke test failed: " "kprobe post_handler2 not called\n"); handler_errors++; } unregister_kprobes(kps, 2); return 0; } static u32 j_kprobe_target(u32 value) { if (value != rand1) { handler_errors++; printk(KERN_ERR "Kprobe smoke test failed: " "incorrect value in jprobe handler\n"); } jph_val = rand1; jprobe_return(); return 0; } static struct jprobe jp = { .entry = j_kprobe_target, .kp.symbol_name = "kprobe_target" }; static int test_jprobe(void) { int ret; ret = register_jprobe(&jp); if (ret < 0) { printk(KERN_ERR "Kprobe smoke test failed: " "register_jprobe returned %d\n", ret); return ret; } ret = target(rand1); unregister_jprobe(&jp); if (jph_val == 0) { printk(KERN_ERR "Kprobe smoke test failed: " "jprobe handler not called\n"); handler_errors++; } return 0; } static struct jprobe jp2 = { .entry = j_kprobe_target, .kp.symbol_name = "kprobe_target2" }; static int test_jprobes(void) { int ret; struct jprobe *jps[2] = {&jp, &jp2}; /* addr and flags should be cleard for reusing kprobe. */ jp.kp.addr = NULL; jp.kp.flags = 0; ret = register_jprobes(jps, 2); if (ret < 0) { printk(KERN_ERR "Kprobe smoke test failed: " "register_jprobes returned %d\n", ret); return ret; } jph_val = 0; ret = target(rand1); if (jph_val == 0) { printk(KERN_ERR "Kprobe smoke test failed: " "jprobe handler not called\n"); handler_errors++; } jph_val = 0; ret = target2(rand1); if (jph_val == 0) { printk(KERN_ERR "Kprobe smoke test failed: " "jprobe handler2 not called\n"); handler_errors++; } unregister_jprobes(jps, 2); return 0; } #ifdef CONFIG_KRETPROBES static u32 krph_val; static int entry_handler(struct kretprobe_instance *ri, struct pt_regs *regs) { krph_val = (rand1 / div_factor); return 0; } static int return_handler(struct kretprobe_instance *ri, struct pt_regs *regs) { unsigned long ret = regs_return_value(regs); if (ret != (rand1 / div_factor)) { handler_errors++; printk(KERN_ERR "Kprobe smoke test failed: " "incorrect value in kretprobe handler\n"); } if (krph_val == 0) { handler_errors++; printk(KERN_ERR "Kprobe smoke test failed: " "call to kretprobe entry handler failed\n"); } krph_val = rand1; return 0; } static struct kretprobe rp = { .handler = return_handler, .entry_handler = entry_handler, .kp.symbol_name = "kprobe_target" }; static int test_kretprobe(void) { int ret; ret = register_kretprobe(&rp); if (ret < 0) { printk(KERN_ERR "Kprobe smoke test failed: " "register_kretprobe returned %d\n", ret); return ret; } ret = target(rand1); unregister_kretprobe(&rp); if (krph_val != rand1) { printk(KERN_ERR "Kprobe smoke test failed: " "kretprobe handler not called\n"); handler_errors++; } return 0; } static int return_handler2(struct kretprobe_instance *ri, struct pt_regs *regs) { unsigned long ret = regs_return_value(regs); if (ret != (rand1 / div_factor) + 1) { handler_errors++; printk(KERN_ERR "Kprobe smoke test failed: " "incorrect value in kretprobe handler2\n"); } if (krph_val == 0) { handler_errors++; printk(KERN_ERR "Kprobe smoke test failed: " "call to kretprobe entry handler failed\n"); } krph_val = rand1; return 0; } static struct kretprobe rp2 = { .handler = return_handler2, .entry_handler = entry_handler, .kp.symbol_name = "kprobe_target2" }; static int test_kretprobes(void) { int ret; struct kretprobe *rps[2] = {&rp, &rp2}; /* addr and flags should be cleard for reusing kprobe. */ rp.kp.addr = NULL; rp.kp.flags = 0; ret = register_kretprobes(rps, 2); if (ret < 0) { printk(KERN_ERR "Kprobe smoke test failed: " "register_kretprobe returned %d\n", ret); return ret; } krph_val = 0; ret = target(rand1); if (krph_val != rand1) { printk(KERN_ERR "Kprobe smoke test failed: " "kretprobe handler not called\n"); handler_errors++; } krph_val = 0; ret = target2(rand1); if (krph_val != rand1) { printk(KERN_ERR "Kprobe smoke test failed: " "kretprobe handler2 not called\n"); handler_errors++; } unregister_kretprobes(rps, 2); return 0; } #endif /* CONFIG_KRETPROBES */ int init_test_probes(void) { int ret; target = kprobe_target; target2 = kprobe_target2; do { rand1 = prandom_u32(); } while (rand1 <= div_factor); printk(KERN_INFO "Kprobe smoke test started\n"); num_tests++; ret = test_kprobe(); if (ret < 0) errors++; num_tests++; ret = test_kprobes(); if (ret < 0) errors++; num_tests++; ret = test_jprobe(); if (ret < 0) errors++; num_tests++; ret = test_jprobes(); if (ret < 0) errors++; #ifdef CONFIG_KRETPROBES num_tests++; ret = test_kretprobe(); if (ret < 0) errors++; num_tests++; ret = test_kretprobes(); if (ret < 0) errors++; #endif /* CONFIG_KRETPROBES */ if (errors) printk(KERN_ERR "BUG: Kprobe smoke test: %d out of " "%d tests failed\n", errors, num_tests); else if (handler_errors) printk(KERN_ERR "BUG: Kprobe smoke test: %d error(s) " "running handlers\n", handler_errors); else printk(KERN_INFO "Kprobe smoke test passed successfully\n"); return 0; }
gpl-2.0
h2o64/kernel_msm
drivers/usb/serial/io_edgeport.c
3993
97371
/* * Edgeport USB Serial Converter driver * * Copyright (C) 2000 Inside Out Networks, All rights reserved. * Copyright (C) 2001-2002 Greg Kroah-Hartman <greg@kroah.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Supports the following devices: * Edgeport/4 * Edgeport/4t * Edgeport/2 * Edgeport/4i * Edgeport/2i * Edgeport/421 * Edgeport/21 * Rapidport/4 * Edgeport/8 * Edgeport/2D8 * Edgeport/4D8 * Edgeport/8i * * For questions or problems with this driver, contact Inside Out * Networks technical support, or Peter Berger <pberger@brimson.com>, * or Al Borchers <alborchers@steinerpoint.com>. * */ #include <linux/kernel.h> #include <linux/jiffies.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/tty.h> #include <linux/tty_driver.h> #include <linux/tty_flip.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/serial.h> #include <linux/ioctl.h> #include <linux/wait.h> #include <linux/firmware.h> #include <linux/ihex.h> #include <linux/uaccess.h> #include <linux/usb.h> #include <linux/usb/serial.h> #include "io_edgeport.h" #include "io_ionsp.h" /* info for the iosp messages */ #include "io_16654.h" /* 16654 UART defines */ /* * Version Information */ #define DRIVER_VERSION "v2.7" #define DRIVER_AUTHOR "Greg Kroah-Hartman <greg@kroah.com> and David Iacovelli" #define DRIVER_DESC "Edgeport USB Serial Driver" #define MAX_NAME_LEN 64 #define CHASE_TIMEOUT (5*HZ) /* 5 seconds */ #define OPEN_TIMEOUT (5*HZ) /* 5 seconds */ #define COMMAND_TIMEOUT (5*HZ) /* 5 seconds */ /* receive port state */ enum RXSTATE { EXPECT_HDR1 = 0, /* Expect header byte 1 */ EXPECT_HDR2 = 1, /* Expect header byte 2 */ EXPECT_DATA = 2, /* Expect 'RxBytesRemaining' data */ EXPECT_HDR3 = 3, /* Expect header byte 3 (for status hdrs only) */ }; /* Transmit Fifo * This Transmit queue is an extension of the edgeport Rx buffer. * The maximum amount of data buffered in both the edgeport * Rx buffer (maxTxCredits) and this buffer will never exceed maxTxCredits. */ struct TxFifo { unsigned int head; /* index to head pointer (write) */ unsigned int tail; /* index to tail pointer (read) */ unsigned int count; /* Bytes in queue */ unsigned int size; /* Max size of queue (equal to Max number of TxCredits) */ unsigned char *fifo; /* allocated Buffer */ }; /* This structure holds all of the local port information */ struct edgeport_port { __u16 txCredits; /* our current credits for this port */ __u16 maxTxCredits; /* the max size of the port */ struct TxFifo txfifo; /* transmit fifo -- size will be maxTxCredits */ struct urb *write_urb; /* write URB for this port */ bool write_in_progress; /* 'true' while a write URB is outstanding */ spinlock_t ep_lock; __u8 shadowLCR; /* last LCR value received */ __u8 shadowMCR; /* last MCR value received */ __u8 shadowMSR; /* last MSR value received */ __u8 shadowLSR; /* last LSR value received */ __u8 shadowXonChar; /* last value set as XON char in Edgeport */ __u8 shadowXoffChar; /* last value set as XOFF char in Edgeport */ __u8 validDataMask; __u32 baudRate; bool open; bool openPending; bool commandPending; bool closePending; bool chaseResponsePending; wait_queue_head_t wait_chase; /* for handling sleeping while waiting for chase to finish */ wait_queue_head_t wait_open; /* for handling sleeping while waiting for open to finish */ wait_queue_head_t wait_command; /* for handling sleeping while waiting for command to finish */ wait_queue_head_t delta_msr_wait; /* for handling sleeping while waiting for msr change to happen */ struct async_icount icount; struct usb_serial_port *port; /* loop back to the owner of this object */ }; /* This structure holds all of the individual device information */ struct edgeport_serial { char name[MAX_NAME_LEN+2]; /* string name of this device */ struct edge_manuf_descriptor manuf_descriptor; /* the manufacturer descriptor */ struct edge_boot_descriptor boot_descriptor; /* the boot firmware descriptor */ struct edgeport_product_info product_info; /* Product Info */ struct edge_compatibility_descriptor epic_descriptor; /* Edgeport compatible descriptor */ int is_epic; /* flag if EPiC device or not */ __u8 interrupt_in_endpoint; /* the interrupt endpoint handle */ unsigned char *interrupt_in_buffer; /* the buffer we use for the interrupt endpoint */ struct urb *interrupt_read_urb; /* our interrupt urb */ __u8 bulk_in_endpoint; /* the bulk in endpoint handle */ unsigned char *bulk_in_buffer; /* the buffer we use for the bulk in endpoint */ struct urb *read_urb; /* our bulk read urb */ bool read_in_progress; spinlock_t es_lock; __u8 bulk_out_endpoint; /* the bulk out endpoint handle */ __s16 rxBytesAvail; /* the number of bytes that we need to read from this device */ enum RXSTATE rxState; /* the current state of the bulk receive processor */ __u8 rxHeader1; /* receive header byte 1 */ __u8 rxHeader2; /* receive header byte 2 */ __u8 rxHeader3; /* receive header byte 3 */ __u8 rxPort; /* the port that we are currently receiving data for */ __u8 rxStatusCode; /* the receive status code */ __u8 rxStatusParam; /* the receive status paramater */ __s16 rxBytesRemaining; /* the number of port bytes left to read */ struct usb_serial *serial; /* loop back to the owner of this object */ }; /* baud rate information */ struct divisor_table_entry { __u32 BaudRate; __u16 Divisor; }; /* * Define table of divisors for Rev A EdgePort/4 hardware * These assume a 3.6864MHz crystal, the standard /16, and * MCR.7 = 0. */ static const struct divisor_table_entry divisor_table[] = { { 50, 4608}, { 75, 3072}, { 110, 2095}, /* 2094.545455 => 230450 => .0217 % over */ { 134, 1713}, /* 1713.011152 => 230398.5 => .00065% under */ { 150, 1536}, { 300, 768}, { 600, 384}, { 1200, 192}, { 1800, 128}, { 2400, 96}, { 4800, 48}, { 7200, 32}, { 9600, 24}, { 14400, 16}, { 19200, 12}, { 38400, 6}, { 57600, 4}, { 115200, 2}, { 230400, 1}, }; /* local variables */ static bool debug; /* Number of outstanding Command Write Urbs */ static atomic_t CmdUrbs = ATOMIC_INIT(0); /* local function prototypes */ /* function prototypes for all URB callbacks */ static void edge_interrupt_callback(struct urb *urb); static void edge_bulk_in_callback(struct urb *urb); static void edge_bulk_out_data_callback(struct urb *urb); static void edge_bulk_out_cmd_callback(struct urb *urb); /* function prototypes for the usbserial callbacks */ static int edge_open(struct tty_struct *tty, struct usb_serial_port *port); static void edge_close(struct usb_serial_port *port); static int edge_write(struct tty_struct *tty, struct usb_serial_port *port, const unsigned char *buf, int count); static int edge_write_room(struct tty_struct *tty); static int edge_chars_in_buffer(struct tty_struct *tty); static void edge_throttle(struct tty_struct *tty); static void edge_unthrottle(struct tty_struct *tty); static void edge_set_termios(struct tty_struct *tty, struct usb_serial_port *port, struct ktermios *old_termios); static int edge_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg); static void edge_break(struct tty_struct *tty, int break_state); static int edge_tiocmget(struct tty_struct *tty); static int edge_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear); static int edge_get_icount(struct tty_struct *tty, struct serial_icounter_struct *icount); static int edge_startup(struct usb_serial *serial); static void edge_disconnect(struct usb_serial *serial); static void edge_release(struct usb_serial *serial); #include "io_tables.h" /* all of the devices that this driver supports */ /* function prototypes for all of our local functions */ static void process_rcvd_data(struct edgeport_serial *edge_serial, unsigned char *buffer, __u16 bufferLength); static void process_rcvd_status(struct edgeport_serial *edge_serial, __u8 byte2, __u8 byte3); static void edge_tty_recv(struct device *dev, struct tty_struct *tty, unsigned char *data, int length); static void handle_new_msr(struct edgeport_port *edge_port, __u8 newMsr); static void handle_new_lsr(struct edgeport_port *edge_port, __u8 lsrData, __u8 lsr, __u8 data); static int send_iosp_ext_cmd(struct edgeport_port *edge_port, __u8 command, __u8 param); static int calc_baud_rate_divisor(int baud_rate, int *divisor); static int send_cmd_write_baud_rate(struct edgeport_port *edge_port, int baudRate); static void change_port_settings(struct tty_struct *tty, struct edgeport_port *edge_port, struct ktermios *old_termios); static int send_cmd_write_uart_register(struct edgeport_port *edge_port, __u8 regNum, __u8 regValue); static int write_cmd_usb(struct edgeport_port *edge_port, unsigned char *buffer, int writeLength); static void send_more_port_data(struct edgeport_serial *edge_serial, struct edgeport_port *edge_port); static int sram_write(struct usb_serial *serial, __u16 extAddr, __u16 addr, __u16 length, const __u8 *data); static int rom_read(struct usb_serial *serial, __u16 extAddr, __u16 addr, __u16 length, __u8 *data); static int rom_write(struct usb_serial *serial, __u16 extAddr, __u16 addr, __u16 length, const __u8 *data); static void get_manufacturing_desc(struct edgeport_serial *edge_serial); static void get_boot_desc(struct edgeport_serial *edge_serial); static void load_application_firmware(struct edgeport_serial *edge_serial); static void unicode_to_ascii(char *string, int buflen, __le16 *unicode, int unicode_size); /* ************************************************************************ */ /* ************************************************************************ */ /* ************************************************************************ */ /* ************************************************************************ */ /************************************************************************ * * * update_edgeport_E2PROM() Compare current versions of * * Boot ROM and Manufacture * * Descriptors with versions * * embedded in this driver * * * ************************************************************************/ static void update_edgeport_E2PROM(struct edgeport_serial *edge_serial) { __u32 BootCurVer; __u32 BootNewVer; __u8 BootMajorVersion; __u8 BootMinorVersion; __u16 BootBuildNumber; __u32 Bootaddr; const struct ihex_binrec *rec; const struct firmware *fw; const char *fw_name; int response; switch (edge_serial->product_info.iDownloadFile) { case EDGE_DOWNLOAD_FILE_I930: fw_name = "edgeport/boot.fw"; break; case EDGE_DOWNLOAD_FILE_80251: fw_name = "edgeport/boot2.fw"; break; default: return; } response = request_ihex_firmware(&fw, fw_name, &edge_serial->serial->dev->dev); if (response) { printk(KERN_ERR "Failed to load image \"%s\" err %d\n", fw_name, response); return; } rec = (const struct ihex_binrec *)fw->data; BootMajorVersion = rec->data[0]; BootMinorVersion = rec->data[1]; BootBuildNumber = (rec->data[2] << 8) | rec->data[3]; /* Check Boot Image Version */ BootCurVer = (edge_serial->boot_descriptor.MajorVersion << 24) + (edge_serial->boot_descriptor.MinorVersion << 16) + le16_to_cpu(edge_serial->boot_descriptor.BuildNumber); BootNewVer = (BootMajorVersion << 24) + (BootMinorVersion << 16) + BootBuildNumber; dbg("Current Boot Image version %d.%d.%d", edge_serial->boot_descriptor.MajorVersion, edge_serial->boot_descriptor.MinorVersion, le16_to_cpu(edge_serial->boot_descriptor.BuildNumber)); if (BootNewVer > BootCurVer) { dbg("**Update Boot Image from %d.%d.%d to %d.%d.%d", edge_serial->boot_descriptor.MajorVersion, edge_serial->boot_descriptor.MinorVersion, le16_to_cpu(edge_serial->boot_descriptor.BuildNumber), BootMajorVersion, BootMinorVersion, BootBuildNumber); dbg("Downloading new Boot Image"); for (rec = ihex_next_binrec(rec); rec; rec = ihex_next_binrec(rec)) { Bootaddr = be32_to_cpu(rec->addr); response = rom_write(edge_serial->serial, Bootaddr >> 16, Bootaddr & 0xFFFF, be16_to_cpu(rec->len), &rec->data[0]); if (response < 0) { dev_err(&edge_serial->serial->dev->dev, "rom_write failed (%x, %x, %d)\n", Bootaddr >> 16, Bootaddr & 0xFFFF, be16_to_cpu(rec->len)); break; } } } else { dbg("Boot Image -- already up to date"); } release_firmware(fw); } #if 0 /************************************************************************ * * Get string descriptor from device * ************************************************************************/ static int get_string_desc(struct usb_device *dev, int Id, struct usb_string_descriptor **pRetDesc) { struct usb_string_descriptor StringDesc; struct usb_string_descriptor *pStringDesc; dbg("%s - USB String ID = %d", __func__, Id); if (!usb_get_descriptor(dev, USB_DT_STRING, Id, &StringDesc, sizeof(StringDesc))) return 0; pStringDesc = kmalloc(StringDesc.bLength, GFP_KERNEL); if (!pStringDesc) return -1; if (!usb_get_descriptor(dev, USB_DT_STRING, Id, pStringDesc, StringDesc.bLength)) { kfree(pStringDesc); return -1; } *pRetDesc = pStringDesc; return 0; } #endif static void dump_product_info(struct edgeport_product_info *product_info) { /* Dump Product Info structure */ dbg("**Product Information:"); dbg(" ProductId %x", product_info->ProductId); dbg(" NumPorts %d", product_info->NumPorts); dbg(" ProdInfoVer %d", product_info->ProdInfoVer); dbg(" IsServer %d", product_info->IsServer); dbg(" IsRS232 %d", product_info->IsRS232); dbg(" IsRS422 %d", product_info->IsRS422); dbg(" IsRS485 %d", product_info->IsRS485); dbg(" RomSize %d", product_info->RomSize); dbg(" RamSize %d", product_info->RamSize); dbg(" CpuRev %x", product_info->CpuRev); dbg(" BoardRev %x", product_info->BoardRev); dbg(" BootMajorVersion %d.%d.%d", product_info->BootMajorVersion, product_info->BootMinorVersion, le16_to_cpu(product_info->BootBuildNumber)); dbg(" FirmwareMajorVersion %d.%d.%d", product_info->FirmwareMajorVersion, product_info->FirmwareMinorVersion, le16_to_cpu(product_info->FirmwareBuildNumber)); dbg(" ManufactureDescDate %d/%d/%d", product_info->ManufactureDescDate[0], product_info->ManufactureDescDate[1], product_info->ManufactureDescDate[2]+1900); dbg(" iDownloadFile 0x%x", product_info->iDownloadFile); dbg(" EpicVer %d", product_info->EpicVer); } static void get_product_info(struct edgeport_serial *edge_serial) { struct edgeport_product_info *product_info = &edge_serial->product_info; memset(product_info, 0, sizeof(struct edgeport_product_info)); product_info->ProductId = (__u16)(le16_to_cpu(edge_serial->serial->dev->descriptor.idProduct) & ~ION_DEVICE_ID_80251_NETCHIP); product_info->NumPorts = edge_serial->manuf_descriptor.NumPorts; product_info->ProdInfoVer = 0; product_info->RomSize = edge_serial->manuf_descriptor.RomSize; product_info->RamSize = edge_serial->manuf_descriptor.RamSize; product_info->CpuRev = edge_serial->manuf_descriptor.CpuRev; product_info->BoardRev = edge_serial->manuf_descriptor.BoardRev; product_info->BootMajorVersion = edge_serial->boot_descriptor.MajorVersion; product_info->BootMinorVersion = edge_serial->boot_descriptor.MinorVersion; product_info->BootBuildNumber = edge_serial->boot_descriptor.BuildNumber; memcpy(product_info->ManufactureDescDate, edge_serial->manuf_descriptor.DescDate, sizeof(edge_serial->manuf_descriptor.DescDate)); /* check if this is 2nd generation hardware */ if (le16_to_cpu(edge_serial->serial->dev->descriptor.idProduct) & ION_DEVICE_ID_80251_NETCHIP) product_info->iDownloadFile = EDGE_DOWNLOAD_FILE_80251; else product_info->iDownloadFile = EDGE_DOWNLOAD_FILE_I930; /* Determine Product type and set appropriate flags */ switch (DEVICE_ID_FROM_USB_PRODUCT_ID(product_info->ProductId)) { case ION_DEVICE_ID_EDGEPORT_COMPATIBLE: case ION_DEVICE_ID_EDGEPORT_4T: case ION_DEVICE_ID_EDGEPORT_4: case ION_DEVICE_ID_EDGEPORT_2: case ION_DEVICE_ID_EDGEPORT_8_DUAL_CPU: case ION_DEVICE_ID_EDGEPORT_8: case ION_DEVICE_ID_EDGEPORT_421: case ION_DEVICE_ID_EDGEPORT_21: case ION_DEVICE_ID_EDGEPORT_2_DIN: case ION_DEVICE_ID_EDGEPORT_4_DIN: case ION_DEVICE_ID_EDGEPORT_16_DUAL_CPU: product_info->IsRS232 = 1; break; case ION_DEVICE_ID_EDGEPORT_2I: /* Edgeport/2 RS422/RS485 */ product_info->IsRS422 = 1; product_info->IsRS485 = 1; break; case ION_DEVICE_ID_EDGEPORT_8I: /* Edgeport/4 RS422 */ case ION_DEVICE_ID_EDGEPORT_4I: /* Edgeport/4 RS422 */ product_info->IsRS422 = 1; break; } dump_product_info(product_info); } static int get_epic_descriptor(struct edgeport_serial *ep) { int result; struct usb_serial *serial = ep->serial; struct edgeport_product_info *product_info = &ep->product_info; struct edge_compatibility_descriptor *epic = &ep->epic_descriptor; struct edge_compatibility_bits *bits; ep->is_epic = 0; result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), USB_REQUEST_ION_GET_EPIC_DESC, 0xC0, 0x00, 0x00, &ep->epic_descriptor, sizeof(struct edge_compatibility_descriptor), 300); dbg("%s result = %d", __func__, result); if (result > 0) { ep->is_epic = 1; memset(product_info, 0, sizeof(struct edgeport_product_info)); product_info->NumPorts = epic->NumPorts; product_info->ProdInfoVer = 0; product_info->FirmwareMajorVersion = epic->MajorVersion; product_info->FirmwareMinorVersion = epic->MinorVersion; product_info->FirmwareBuildNumber = epic->BuildNumber; product_info->iDownloadFile = epic->iDownloadFile; product_info->EpicVer = epic->EpicVer; product_info->Epic = epic->Supports; product_info->ProductId = ION_DEVICE_ID_EDGEPORT_COMPATIBLE; dump_product_info(product_info); bits = &ep->epic_descriptor.Supports; dbg("**EPIC descriptor:"); dbg(" VendEnableSuspend: %s", bits->VendEnableSuspend ? "TRUE": "FALSE"); dbg(" IOSPOpen : %s", bits->IOSPOpen ? "TRUE": "FALSE"); dbg(" IOSPClose : %s", bits->IOSPClose ? "TRUE": "FALSE"); dbg(" IOSPChase : %s", bits->IOSPChase ? "TRUE": "FALSE"); dbg(" IOSPSetRxFlow : %s", bits->IOSPSetRxFlow ? "TRUE": "FALSE"); dbg(" IOSPSetTxFlow : %s", bits->IOSPSetTxFlow ? "TRUE": "FALSE"); dbg(" IOSPSetXChar : %s", bits->IOSPSetXChar ? "TRUE": "FALSE"); dbg(" IOSPRxCheck : %s", bits->IOSPRxCheck ? "TRUE": "FALSE"); dbg(" IOSPSetClrBreak : %s", bits->IOSPSetClrBreak ? "TRUE": "FALSE"); dbg(" IOSPWriteMCR : %s", bits->IOSPWriteMCR ? "TRUE": "FALSE"); dbg(" IOSPWriteLCR : %s", bits->IOSPWriteLCR ? "TRUE": "FALSE"); dbg(" IOSPSetBaudRate : %s", bits->IOSPSetBaudRate ? "TRUE": "FALSE"); dbg(" TrueEdgeport : %s", bits->TrueEdgeport ? "TRUE": "FALSE"); } return result; } /************************************************************************/ /************************************************************************/ /* U S B C A L L B A C K F U N C T I O N S */ /* U S B C A L L B A C K F U N C T I O N S */ /************************************************************************/ /************************************************************************/ /***************************************************************************** * edge_interrupt_callback * this is the callback function for when we have received data on the * interrupt endpoint. *****************************************************************************/ static void edge_interrupt_callback(struct urb *urb) { struct edgeport_serial *edge_serial = urb->context; struct edgeport_port *edge_port; struct usb_serial_port *port; struct tty_struct *tty; unsigned char *data = urb->transfer_buffer; int length = urb->actual_length; int bytes_avail; int position; int txCredits; int portNumber; int result; int status = urb->status; dbg("%s", __func__); switch (status) { case 0: /* success */ break; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: /* this urb is terminated, clean up */ dbg("%s - urb shutting down with status: %d", __func__, status); return; default: dbg("%s - nonzero urb status received: %d", __func__, status); goto exit; } /* process this interrupt-read even if there are no ports open */ if (length) { usb_serial_debug_data(debug, &edge_serial->serial->dev->dev, __func__, length, data); if (length > 1) { bytes_avail = data[0] | (data[1] << 8); if (bytes_avail) { spin_lock(&edge_serial->es_lock); edge_serial->rxBytesAvail += bytes_avail; dbg("%s - bytes_avail=%d, rxBytesAvail=%d, read_in_progress=%d", __func__, bytes_avail, edge_serial->rxBytesAvail, edge_serial->read_in_progress); if (edge_serial->rxBytesAvail > 0 && !edge_serial->read_in_progress) { dbg("%s - posting a read", __func__); edge_serial->read_in_progress = true; /* we have pending bytes on the bulk in pipe, send a request */ result = usb_submit_urb(edge_serial->read_urb, GFP_ATOMIC); if (result) { dev_err(&edge_serial->serial->dev->dev, "%s - usb_submit_urb(read bulk) failed with result = %d\n", __func__, result); edge_serial->read_in_progress = false; } } spin_unlock(&edge_serial->es_lock); } } /* grab the txcredits for the ports if available */ position = 2; portNumber = 0; while ((position < length) && (portNumber < edge_serial->serial->num_ports)) { txCredits = data[position] | (data[position+1] << 8); if (txCredits) { port = edge_serial->serial->port[portNumber]; edge_port = usb_get_serial_port_data(port); if (edge_port->open) { spin_lock(&edge_port->ep_lock); edge_port->txCredits += txCredits; spin_unlock(&edge_port->ep_lock); dbg("%s - txcredits for port%d = %d", __func__, portNumber, edge_port->txCredits); /* tell the tty driver that something has changed */ tty = tty_port_tty_get( &edge_port->port->port); if (tty) { tty_wakeup(tty); tty_kref_put(tty); } /* Since we have more credit, check if more data can be sent */ send_more_port_data(edge_serial, edge_port); } } position += 2; ++portNumber; } } exit: result = usb_submit_urb(urb, GFP_ATOMIC); if (result) dev_err(&urb->dev->dev, "%s - Error %d submitting control urb\n", __func__, result); } /***************************************************************************** * edge_bulk_in_callback * this is the callback function for when we have received data on the * bulk in endpoint. *****************************************************************************/ static void edge_bulk_in_callback(struct urb *urb) { struct edgeport_serial *edge_serial = urb->context; unsigned char *data = urb->transfer_buffer; int retval; __u16 raw_data_length; int status = urb->status; dbg("%s", __func__); if (status) { dbg("%s - nonzero read bulk status received: %d", __func__, status); edge_serial->read_in_progress = false; return; } if (urb->actual_length == 0) { dbg("%s - read bulk callback with no data", __func__); edge_serial->read_in_progress = false; return; } raw_data_length = urb->actual_length; usb_serial_debug_data(debug, &edge_serial->serial->dev->dev, __func__, raw_data_length, data); spin_lock(&edge_serial->es_lock); /* decrement our rxBytes available by the number that we just got */ edge_serial->rxBytesAvail -= raw_data_length; dbg("%s - Received = %d, rxBytesAvail %d", __func__, raw_data_length, edge_serial->rxBytesAvail); process_rcvd_data(edge_serial, data, urb->actual_length); /* check to see if there's any more data for us to read */ if (edge_serial->rxBytesAvail > 0) { dbg("%s - posting a read", __func__); retval = usb_submit_urb(edge_serial->read_urb, GFP_ATOMIC); if (retval) { dev_err(&urb->dev->dev, "%s - usb_submit_urb(read bulk) failed, " "retval = %d\n", __func__, retval); edge_serial->read_in_progress = false; } } else { edge_serial->read_in_progress = false; } spin_unlock(&edge_serial->es_lock); } /***************************************************************************** * edge_bulk_out_data_callback * this is the callback function for when we have finished sending * serial data on the bulk out endpoint. *****************************************************************************/ static void edge_bulk_out_data_callback(struct urb *urb) { struct edgeport_port *edge_port = urb->context; struct tty_struct *tty; int status = urb->status; dbg("%s", __func__); if (status) { dbg("%s - nonzero write bulk status received: %d", __func__, status); } tty = tty_port_tty_get(&edge_port->port->port); if (tty && edge_port->open) { /* let the tty driver wakeup if it has a special write_wakeup function */ tty_wakeup(tty); } tty_kref_put(tty); /* Release the Write URB */ edge_port->write_in_progress = false; /* Check if more data needs to be sent */ send_more_port_data((struct edgeport_serial *) (usb_get_serial_data(edge_port->port->serial)), edge_port); } /***************************************************************************** * BulkOutCmdCallback * this is the callback function for when we have finished sending a * command on the bulk out endpoint. *****************************************************************************/ static void edge_bulk_out_cmd_callback(struct urb *urb) { struct edgeport_port *edge_port = urb->context; struct tty_struct *tty; int status = urb->status; dbg("%s", __func__); atomic_dec(&CmdUrbs); dbg("%s - FREE URB %p (outstanding %d)", __func__, urb, atomic_read(&CmdUrbs)); /* clean up the transfer buffer */ kfree(urb->transfer_buffer); /* Free the command urb */ usb_free_urb(urb); if (status) { dbg("%s - nonzero write bulk status received: %d", __func__, status); return; } /* Get pointer to tty */ tty = tty_port_tty_get(&edge_port->port->port); /* tell the tty driver that something has changed */ if (tty && edge_port->open) tty_wakeup(tty); tty_kref_put(tty); /* we have completed the command */ edge_port->commandPending = false; wake_up(&edge_port->wait_command); } /***************************************************************************** * Driver tty interface functions *****************************************************************************/ /***************************************************************************** * SerialOpen * this function is called by the tty driver when a port is opened * If successful, we return 0 * Otherwise we return a negative error number. *****************************************************************************/ static int edge_open(struct tty_struct *tty, struct usb_serial_port *port) { struct edgeport_port *edge_port = usb_get_serial_port_data(port); struct usb_serial *serial; struct edgeport_serial *edge_serial; int response; dbg("%s - port %d", __func__, port->number); if (edge_port == NULL) return -ENODEV; /* see if we've set up our endpoint info yet (can't set it up in edge_startup as the structures were not set up at that time.) */ serial = port->serial; edge_serial = usb_get_serial_data(serial); if (edge_serial == NULL) return -ENODEV; if (edge_serial->interrupt_in_buffer == NULL) { struct usb_serial_port *port0 = serial->port[0]; /* not set up yet, so do it now */ edge_serial->interrupt_in_buffer = port0->interrupt_in_buffer; edge_serial->interrupt_in_endpoint = port0->interrupt_in_endpointAddress; edge_serial->interrupt_read_urb = port0->interrupt_in_urb; edge_serial->bulk_in_buffer = port0->bulk_in_buffer; edge_serial->bulk_in_endpoint = port0->bulk_in_endpointAddress; edge_serial->read_urb = port0->read_urb; edge_serial->bulk_out_endpoint = port0->bulk_out_endpointAddress; /* set up our interrupt urb */ usb_fill_int_urb(edge_serial->interrupt_read_urb, serial->dev, usb_rcvintpipe(serial->dev, port0->interrupt_in_endpointAddress), port0->interrupt_in_buffer, edge_serial->interrupt_read_urb->transfer_buffer_length, edge_interrupt_callback, edge_serial, edge_serial->interrupt_read_urb->interval); /* set up our bulk in urb */ usb_fill_bulk_urb(edge_serial->read_urb, serial->dev, usb_rcvbulkpipe(serial->dev, port0->bulk_in_endpointAddress), port0->bulk_in_buffer, edge_serial->read_urb->transfer_buffer_length, edge_bulk_in_callback, edge_serial); edge_serial->read_in_progress = false; /* start interrupt read for this edgeport * this interrupt will continue as long * as the edgeport is connected */ response = usb_submit_urb(edge_serial->interrupt_read_urb, GFP_KERNEL); if (response) { dev_err(&port->dev, "%s - Error %d submitting control urb\n", __func__, response); } } /* initialize our wait queues */ init_waitqueue_head(&edge_port->wait_open); init_waitqueue_head(&edge_port->wait_chase); init_waitqueue_head(&edge_port->delta_msr_wait); init_waitqueue_head(&edge_port->wait_command); /* initialize our icount structure */ memset(&(edge_port->icount), 0x00, sizeof(edge_port->icount)); /* initialize our port settings */ edge_port->txCredits = 0; /* Can't send any data yet */ /* Must always set this bit to enable ints! */ edge_port->shadowMCR = MCR_MASTER_IE; edge_port->chaseResponsePending = false; /* send a open port command */ edge_port->openPending = true; edge_port->open = false; response = send_iosp_ext_cmd(edge_port, IOSP_CMD_OPEN_PORT, 0); if (response < 0) { dev_err(&port->dev, "%s - error sending open port command\n", __func__); edge_port->openPending = false; return -ENODEV; } /* now wait for the port to be completely opened */ wait_event_timeout(edge_port->wait_open, !edge_port->openPending, OPEN_TIMEOUT); if (!edge_port->open) { /* open timed out */ dbg("%s - open timedout", __func__); edge_port->openPending = false; return -ENODEV; } /* create the txfifo */ edge_port->txfifo.head = 0; edge_port->txfifo.tail = 0; edge_port->txfifo.count = 0; edge_port->txfifo.size = edge_port->maxTxCredits; edge_port->txfifo.fifo = kmalloc(edge_port->maxTxCredits, GFP_KERNEL); if (!edge_port->txfifo.fifo) { dbg("%s - no memory", __func__); edge_close(port); return -ENOMEM; } /* Allocate a URB for the write */ edge_port->write_urb = usb_alloc_urb(0, GFP_KERNEL); edge_port->write_in_progress = false; if (!edge_port->write_urb) { dbg("%s - no memory", __func__); edge_close(port); return -ENOMEM; } dbg("%s(%d) - Initialize TX fifo to %d bytes", __func__, port->number, edge_port->maxTxCredits); dbg("%s exited", __func__); return 0; } /************************************************************************ * * block_until_chase_response * * This function will block the close until one of the following: * 1. Response to our Chase comes from Edgeport * 2. A timeout of 10 seconds without activity has expired * (1K of Edgeport data @ 2400 baud ==> 4 sec to empty) * ************************************************************************/ static void block_until_chase_response(struct edgeport_port *edge_port) { DEFINE_WAIT(wait); __u16 lastCredits; int timeout = 1*HZ; int loop = 10; while (1) { /* Save Last credits */ lastCredits = edge_port->txCredits; /* Did we get our Chase response */ if (!edge_port->chaseResponsePending) { dbg("%s - Got Chase Response", __func__); /* did we get all of our credit back? */ if (edge_port->txCredits == edge_port->maxTxCredits) { dbg("%s - Got all credits", __func__); return; } } /* Block the thread for a while */ prepare_to_wait(&edge_port->wait_chase, &wait, TASK_UNINTERRUPTIBLE); schedule_timeout(timeout); finish_wait(&edge_port->wait_chase, &wait); if (lastCredits == edge_port->txCredits) { /* No activity.. count down. */ loop--; if (loop == 0) { edge_port->chaseResponsePending = false; dbg("%s - Chase TIMEOUT", __func__); return; } } else { /* Reset timeout value back to 10 seconds */ dbg("%s - Last %d, Current %d", __func__, lastCredits, edge_port->txCredits); loop = 10; } } } /************************************************************************ * * block_until_tx_empty * * This function will block the close until one of the following: * 1. TX count are 0 * 2. The edgeport has stopped * 3. A timeout of 3 seconds without activity has expired * ************************************************************************/ static void block_until_tx_empty(struct edgeport_port *edge_port) { DEFINE_WAIT(wait); struct TxFifo *fifo = &edge_port->txfifo; __u32 lastCount; int timeout = HZ/10; int loop = 30; while (1) { /* Save Last count */ lastCount = fifo->count; /* Is the Edgeport Buffer empty? */ if (lastCount == 0) { dbg("%s - TX Buffer Empty", __func__); return; } /* Block the thread for a while */ prepare_to_wait(&edge_port->wait_chase, &wait, TASK_UNINTERRUPTIBLE); schedule_timeout(timeout); finish_wait(&edge_port->wait_chase, &wait); dbg("%s wait", __func__); if (lastCount == fifo->count) { /* No activity.. count down. */ loop--; if (loop == 0) { dbg("%s - TIMEOUT", __func__); return; } } else { /* Reset timeout value back to seconds */ loop = 30; } } } /***************************************************************************** * edge_close * this function is called by the tty driver when a port is closed *****************************************************************************/ static void edge_close(struct usb_serial_port *port) { struct edgeport_serial *edge_serial; struct edgeport_port *edge_port; int status; dbg("%s - port %d", __func__, port->number); edge_serial = usb_get_serial_data(port->serial); edge_port = usb_get_serial_port_data(port); if (edge_serial == NULL || edge_port == NULL) return; /* block until tx is empty */ block_until_tx_empty(edge_port); edge_port->closePending = true; if ((!edge_serial->is_epic) || ((edge_serial->is_epic) && (edge_serial->epic_descriptor.Supports.IOSPChase))) { /* flush and chase */ edge_port->chaseResponsePending = true; dbg("%s - Sending IOSP_CMD_CHASE_PORT", __func__); status = send_iosp_ext_cmd(edge_port, IOSP_CMD_CHASE_PORT, 0); if (status == 0) /* block until chase finished */ block_until_chase_response(edge_port); else edge_port->chaseResponsePending = false; } if ((!edge_serial->is_epic) || ((edge_serial->is_epic) && (edge_serial->epic_descriptor.Supports.IOSPClose))) { /* close the port */ dbg("%s - Sending IOSP_CMD_CLOSE_PORT", __func__); send_iosp_ext_cmd(edge_port, IOSP_CMD_CLOSE_PORT, 0); } /* port->close = true; */ edge_port->closePending = false; edge_port->open = false; edge_port->openPending = false; usb_kill_urb(edge_port->write_urb); if (edge_port->write_urb) { /* if this urb had a transfer buffer already (old transfer) free it */ kfree(edge_port->write_urb->transfer_buffer); usb_free_urb(edge_port->write_urb); edge_port->write_urb = NULL; } kfree(edge_port->txfifo.fifo); edge_port->txfifo.fifo = NULL; dbg("%s exited", __func__); } /***************************************************************************** * SerialWrite * this function is called by the tty driver when data should be written * to the port. * If successful, we return the number of bytes written, otherwise we * return a negative error number. *****************************************************************************/ static int edge_write(struct tty_struct *tty, struct usb_serial_port *port, const unsigned char *data, int count) { struct edgeport_port *edge_port = usb_get_serial_port_data(port); struct TxFifo *fifo; int copySize; int bytesleft; int firsthalf; int secondhalf; unsigned long flags; dbg("%s - port %d", __func__, port->number); if (edge_port == NULL) return -ENODEV; /* get a pointer to the Tx fifo */ fifo = &edge_port->txfifo; spin_lock_irqsave(&edge_port->ep_lock, flags); /* calculate number of bytes to put in fifo */ copySize = min((unsigned int)count, (edge_port->txCredits - fifo->count)); dbg("%s(%d) of %d byte(s) Fifo room %d -- will copy %d bytes", __func__, port->number, count, edge_port->txCredits - fifo->count, copySize); /* catch writes of 0 bytes which the tty driver likes to give us, and when txCredits is empty */ if (copySize == 0) { dbg("%s - copySize = Zero", __func__); goto finish_write; } /* queue the data * since we can never overflow the buffer we do not have to check for a * full condition * * the copy is done is two parts -- first fill to the end of the buffer * then copy the reset from the start of the buffer */ bytesleft = fifo->size - fifo->head; firsthalf = min(bytesleft, copySize); dbg("%s - copy %d bytes of %d into fifo ", __func__, firsthalf, bytesleft); /* now copy our data */ memcpy(&fifo->fifo[fifo->head], data, firsthalf); usb_serial_debug_data(debug, &port->dev, __func__, firsthalf, &fifo->fifo[fifo->head]); /* update the index and size */ fifo->head += firsthalf; fifo->count += firsthalf; /* wrap the index */ if (fifo->head == fifo->size) fifo->head = 0; secondhalf = copySize-firsthalf; if (secondhalf) { dbg("%s - copy rest of data %d", __func__, secondhalf); memcpy(&fifo->fifo[fifo->head], &data[firsthalf], secondhalf); usb_serial_debug_data(debug, &port->dev, __func__, secondhalf, &fifo->fifo[fifo->head]); /* update the index and size */ fifo->count += secondhalf; fifo->head += secondhalf; /* No need to check for wrap since we can not get to end of * the fifo in this part */ } finish_write: spin_unlock_irqrestore(&edge_port->ep_lock, flags); send_more_port_data((struct edgeport_serial *) usb_get_serial_data(port->serial), edge_port); dbg("%s wrote %d byte(s) TxCredits %d, Fifo %d", __func__, copySize, edge_port->txCredits, fifo->count); return copySize; } /************************************************************************ * * send_more_port_data() * * This routine attempts to write additional UART transmit data * to a port over the USB bulk pipe. It is called (1) when new * data has been written to a port's TxBuffer from higher layers * (2) when the peripheral sends us additional TxCredits indicating * that it can accept more Tx data for a given port; and (3) when * a bulk write completes successfully and we want to see if we * can transmit more. * ************************************************************************/ static void send_more_port_data(struct edgeport_serial *edge_serial, struct edgeport_port *edge_port) { struct TxFifo *fifo = &edge_port->txfifo; struct urb *urb; unsigned char *buffer; int status; int count; int bytesleft; int firsthalf; int secondhalf; unsigned long flags; dbg("%s(%d)", __func__, edge_port->port->number); spin_lock_irqsave(&edge_port->ep_lock, flags); if (edge_port->write_in_progress || !edge_port->open || (fifo->count == 0)) { dbg("%s(%d) EXIT - fifo %d, PendingWrite = %d", __func__, edge_port->port->number, fifo->count, edge_port->write_in_progress); goto exit_send; } /* since the amount of data in the fifo will always fit into the * edgeport buffer we do not need to check the write length * * Do we have enough credits for this port to make it worthwhile * to bother queueing a write. If it's too small, say a few bytes, * it's better to wait for more credits so we can do a larger write. */ if (edge_port->txCredits < EDGE_FW_GET_TX_CREDITS_SEND_THRESHOLD(edge_port->maxTxCredits, EDGE_FW_BULK_MAX_PACKET_SIZE)) { dbg("%s(%d) Not enough credit - fifo %d TxCredit %d", __func__, edge_port->port->number, fifo->count, edge_port->txCredits); goto exit_send; } /* lock this write */ edge_port->write_in_progress = true; /* get a pointer to the write_urb */ urb = edge_port->write_urb; /* make sure transfer buffer is freed */ kfree(urb->transfer_buffer); urb->transfer_buffer = NULL; /* build the data header for the buffer and port that we are about to send out */ count = fifo->count; buffer = kmalloc(count+2, GFP_ATOMIC); if (buffer == NULL) { dev_err_console(edge_port->port, "%s - no more kernel memory...\n", __func__); edge_port->write_in_progress = false; goto exit_send; } buffer[0] = IOSP_BUILD_DATA_HDR1(edge_port->port->number - edge_port->port->serial->minor, count); buffer[1] = IOSP_BUILD_DATA_HDR2(edge_port->port->number - edge_port->port->serial->minor, count); /* now copy our data */ bytesleft = fifo->size - fifo->tail; firsthalf = min(bytesleft, count); memcpy(&buffer[2], &fifo->fifo[fifo->tail], firsthalf); fifo->tail += firsthalf; fifo->count -= firsthalf; if (fifo->tail == fifo->size) fifo->tail = 0; secondhalf = count-firsthalf; if (secondhalf) { memcpy(&buffer[2+firsthalf], &fifo->fifo[fifo->tail], secondhalf); fifo->tail += secondhalf; fifo->count -= secondhalf; } if (count) usb_serial_debug_data(debug, &edge_port->port->dev, __func__, count, &buffer[2]); /* fill up the urb with all of our data and submit it */ usb_fill_bulk_urb(urb, edge_serial->serial->dev, usb_sndbulkpipe(edge_serial->serial->dev, edge_serial->bulk_out_endpoint), buffer, count+2, edge_bulk_out_data_callback, edge_port); /* decrement the number of credits we have by the number we just sent */ edge_port->txCredits -= count; edge_port->icount.tx += count; status = usb_submit_urb(urb, GFP_ATOMIC); if (status) { /* something went wrong */ dev_err_console(edge_port->port, "%s - usb_submit_urb(write bulk) failed, status = %d, data lost\n", __func__, status); edge_port->write_in_progress = false; /* revert the credits as something bad happened. */ edge_port->txCredits += count; edge_port->icount.tx -= count; } dbg("%s wrote %d byte(s) TxCredit %d, Fifo %d", __func__, count, edge_port->txCredits, fifo->count); exit_send: spin_unlock_irqrestore(&edge_port->ep_lock, flags); } /***************************************************************************** * edge_write_room * this function is called by the tty driver when it wants to know how * many bytes of data we can accept for a specific port. If successful, * we return the amount of room that we have for this port (the txCredits) * otherwise we return a negative error number. *****************************************************************************/ static int edge_write_room(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct edgeport_port *edge_port = usb_get_serial_port_data(port); int room; unsigned long flags; dbg("%s", __func__); if (edge_port == NULL) return 0; if (edge_port->closePending) return 0; dbg("%s - port %d", __func__, port->number); if (!edge_port->open) { dbg("%s - port not opened", __func__); return 0; } /* total of both buffers is still txCredit */ spin_lock_irqsave(&edge_port->ep_lock, flags); room = edge_port->txCredits - edge_port->txfifo.count; spin_unlock_irqrestore(&edge_port->ep_lock, flags); dbg("%s - returns %d", __func__, room); return room; } /***************************************************************************** * edge_chars_in_buffer * this function is called by the tty driver when it wants to know how * many bytes of data we currently have outstanding in the port (data that * has been written, but hasn't made it out the port yet) * If successful, we return the number of bytes left to be written in the * system, * Otherwise we return a negative error number. *****************************************************************************/ static int edge_chars_in_buffer(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct edgeport_port *edge_port = usb_get_serial_port_data(port); int num_chars; unsigned long flags; dbg("%s", __func__); if (edge_port == NULL) return 0; if (edge_port->closePending) return 0; if (!edge_port->open) { dbg("%s - port not opened", __func__); return 0; } spin_lock_irqsave(&edge_port->ep_lock, flags); num_chars = edge_port->maxTxCredits - edge_port->txCredits + edge_port->txfifo.count; spin_unlock_irqrestore(&edge_port->ep_lock, flags); if (num_chars) { dbg("%s(port %d) - returns %d", __func__, port->number, num_chars); } return num_chars; } /***************************************************************************** * SerialThrottle * this function is called by the tty driver when it wants to stop the data * being read from the port. *****************************************************************************/ static void edge_throttle(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct edgeport_port *edge_port = usb_get_serial_port_data(port); int status; dbg("%s - port %d", __func__, port->number); if (edge_port == NULL) return; if (!edge_port->open) { dbg("%s - port not opened", __func__); return; } /* if we are implementing XON/XOFF, send the stop character */ if (I_IXOFF(tty)) { unsigned char stop_char = STOP_CHAR(tty); status = edge_write(tty, port, &stop_char, 1); if (status <= 0) return; } /* if we are implementing RTS/CTS, toggle that line */ if (tty->termios->c_cflag & CRTSCTS) { edge_port->shadowMCR &= ~MCR_RTS; status = send_cmd_write_uart_register(edge_port, MCR, edge_port->shadowMCR); if (status != 0) return; } } /***************************************************************************** * edge_unthrottle * this function is called by the tty driver when it wants to resume the * data being read from the port (called after SerialThrottle is called) *****************************************************************************/ static void edge_unthrottle(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct edgeport_port *edge_port = usb_get_serial_port_data(port); int status; dbg("%s - port %d", __func__, port->number); if (edge_port == NULL) return; if (!edge_port->open) { dbg("%s - port not opened", __func__); return; } /* if we are implementing XON/XOFF, send the start character */ if (I_IXOFF(tty)) { unsigned char start_char = START_CHAR(tty); status = edge_write(tty, port, &start_char, 1); if (status <= 0) return; } /* if we are implementing RTS/CTS, toggle that line */ if (tty->termios->c_cflag & CRTSCTS) { edge_port->shadowMCR |= MCR_RTS; send_cmd_write_uart_register(edge_port, MCR, edge_port->shadowMCR); } } /***************************************************************************** * SerialSetTermios * this function is called by the tty driver when it wants to change * the termios structure *****************************************************************************/ static void edge_set_termios(struct tty_struct *tty, struct usb_serial_port *port, struct ktermios *old_termios) { struct edgeport_port *edge_port = usb_get_serial_port_data(port); unsigned int cflag; cflag = tty->termios->c_cflag; dbg("%s - clfag %08x iflag %08x", __func__, tty->termios->c_cflag, tty->termios->c_iflag); dbg("%s - old clfag %08x old iflag %08x", __func__, old_termios->c_cflag, old_termios->c_iflag); dbg("%s - port %d", __func__, port->number); if (edge_port == NULL) return; if (!edge_port->open) { dbg("%s - port not opened", __func__); return; } /* change the port settings to the new ones specified */ change_port_settings(tty, edge_port, old_termios); } /***************************************************************************** * get_lsr_info - get line status register info * * Purpose: Let user call ioctl() to get info when the UART physically * is emptied. On bus types like RS485, the transmitter must * release the bus after transmitting. This must be done when * the transmit shift register is empty, not be done when the * transmit holding register is empty. This functionality * allows an RS485 driver to be written in user space. *****************************************************************************/ static int get_lsr_info(struct edgeport_port *edge_port, unsigned int __user *value) { unsigned int result = 0; unsigned long flags; spin_lock_irqsave(&edge_port->ep_lock, flags); if (edge_port->maxTxCredits == edge_port->txCredits && edge_port->txfifo.count == 0) { dbg("%s -- Empty", __func__); result = TIOCSER_TEMT; } spin_unlock_irqrestore(&edge_port->ep_lock, flags); if (copy_to_user(value, &result, sizeof(int))) return -EFAULT; return 0; } static int edge_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear) { struct usb_serial_port *port = tty->driver_data; struct edgeport_port *edge_port = usb_get_serial_port_data(port); unsigned int mcr; dbg("%s - port %d", __func__, port->number); mcr = edge_port->shadowMCR; if (set & TIOCM_RTS) mcr |= MCR_RTS; if (set & TIOCM_DTR) mcr |= MCR_DTR; if (set & TIOCM_LOOP) mcr |= MCR_LOOPBACK; if (clear & TIOCM_RTS) mcr &= ~MCR_RTS; if (clear & TIOCM_DTR) mcr &= ~MCR_DTR; if (clear & TIOCM_LOOP) mcr &= ~MCR_LOOPBACK; edge_port->shadowMCR = mcr; send_cmd_write_uart_register(edge_port, MCR, edge_port->shadowMCR); return 0; } static int edge_tiocmget(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct edgeport_port *edge_port = usb_get_serial_port_data(port); unsigned int result = 0; unsigned int msr; unsigned int mcr; dbg("%s - port %d", __func__, port->number); msr = edge_port->shadowMSR; mcr = edge_port->shadowMCR; result = ((mcr & MCR_DTR) ? TIOCM_DTR: 0) /* 0x002 */ | ((mcr & MCR_RTS) ? TIOCM_RTS: 0) /* 0x004 */ | ((msr & EDGEPORT_MSR_CTS) ? TIOCM_CTS: 0) /* 0x020 */ | ((msr & EDGEPORT_MSR_CD) ? TIOCM_CAR: 0) /* 0x040 */ | ((msr & EDGEPORT_MSR_RI) ? TIOCM_RI: 0) /* 0x080 */ | ((msr & EDGEPORT_MSR_DSR) ? TIOCM_DSR: 0); /* 0x100 */ dbg("%s -- %x", __func__, result); return result; } static int edge_get_icount(struct tty_struct *tty, struct serial_icounter_struct *icount) { struct usb_serial_port *port = tty->driver_data; struct edgeport_port *edge_port = usb_get_serial_port_data(port); struct async_icount cnow; cnow = edge_port->icount; icount->cts = cnow.cts; icount->dsr = cnow.dsr; icount->rng = cnow.rng; icount->dcd = cnow.dcd; icount->rx = cnow.rx; icount->tx = cnow.tx; icount->frame = cnow.frame; icount->overrun = cnow.overrun; icount->parity = cnow.parity; icount->brk = cnow.brk; icount->buf_overrun = cnow.buf_overrun; dbg("%s (%d) TIOCGICOUNT RX=%d, TX=%d", __func__, port->number, icount->rx, icount->tx); return 0; } static int get_serial_info(struct edgeport_port *edge_port, struct serial_struct __user *retinfo) { struct serial_struct tmp; if (!retinfo) return -EFAULT; memset(&tmp, 0, sizeof(tmp)); tmp.type = PORT_16550A; tmp.line = edge_port->port->serial->minor; tmp.port = edge_port->port->number; tmp.irq = 0; tmp.flags = ASYNC_SKIP_TEST | ASYNC_AUTO_IRQ; tmp.xmit_fifo_size = edge_port->maxTxCredits; tmp.baud_base = 9600; tmp.close_delay = 5*HZ; tmp.closing_wait = 30*HZ; if (copy_to_user(retinfo, &tmp, sizeof(*retinfo))) return -EFAULT; return 0; } /***************************************************************************** * SerialIoctl * this function handles any ioctl calls to the driver *****************************************************************************/ static int edge_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { struct usb_serial_port *port = tty->driver_data; DEFINE_WAIT(wait); struct edgeport_port *edge_port = usb_get_serial_port_data(port); struct async_icount cnow; struct async_icount cprev; dbg("%s - port %d, cmd = 0x%x", __func__, port->number, cmd); switch (cmd) { case TIOCSERGETLSR: dbg("%s (%d) TIOCSERGETLSR", __func__, port->number); return get_lsr_info(edge_port, (unsigned int __user *) arg); case TIOCGSERIAL: dbg("%s (%d) TIOCGSERIAL", __func__, port->number); return get_serial_info(edge_port, (struct serial_struct __user *) arg); case TIOCMIWAIT: dbg("%s (%d) TIOCMIWAIT", __func__, port->number); cprev = edge_port->icount; while (1) { prepare_to_wait(&edge_port->delta_msr_wait, &wait, TASK_INTERRUPTIBLE); schedule(); finish_wait(&edge_port->delta_msr_wait, &wait); /* see if a signal did it */ if (signal_pending(current)) return -ERESTARTSYS; cnow = edge_port->icount; if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr && cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) return -EIO; /* no change => error */ if (((arg & TIOCM_RNG) && (cnow.rng != cprev.rng)) || ((arg & TIOCM_DSR) && (cnow.dsr != cprev.dsr)) || ((arg & TIOCM_CD) && (cnow.dcd != cprev.dcd)) || ((arg & TIOCM_CTS) && (cnow.cts != cprev.cts))) { return 0; } cprev = cnow; } /* NOTREACHED */ break; } return -ENOIOCTLCMD; } /***************************************************************************** * SerialBreak * this function sends a break to the port *****************************************************************************/ static void edge_break(struct tty_struct *tty, int break_state) { struct usb_serial_port *port = tty->driver_data; struct edgeport_port *edge_port = usb_get_serial_port_data(port); struct edgeport_serial *edge_serial = usb_get_serial_data(port->serial); int status; if ((!edge_serial->is_epic) || ((edge_serial->is_epic) && (edge_serial->epic_descriptor.Supports.IOSPChase))) { /* flush and chase */ edge_port->chaseResponsePending = true; dbg("%s - Sending IOSP_CMD_CHASE_PORT", __func__); status = send_iosp_ext_cmd(edge_port, IOSP_CMD_CHASE_PORT, 0); if (status == 0) { /* block until chase finished */ block_until_chase_response(edge_port); } else { edge_port->chaseResponsePending = false; } } if ((!edge_serial->is_epic) || ((edge_serial->is_epic) && (edge_serial->epic_descriptor.Supports.IOSPSetClrBreak))) { if (break_state == -1) { dbg("%s - Sending IOSP_CMD_SET_BREAK", __func__); status = send_iosp_ext_cmd(edge_port, IOSP_CMD_SET_BREAK, 0); } else { dbg("%s - Sending IOSP_CMD_CLEAR_BREAK", __func__); status = send_iosp_ext_cmd(edge_port, IOSP_CMD_CLEAR_BREAK, 0); } if (status) dbg("%s - error sending break set/clear command.", __func__); } } /***************************************************************************** * process_rcvd_data * this function handles the data received on the bulk in pipe. *****************************************************************************/ static void process_rcvd_data(struct edgeport_serial *edge_serial, unsigned char *buffer, __u16 bufferLength) { struct usb_serial_port *port; struct edgeport_port *edge_port; struct tty_struct *tty; __u16 lastBufferLength; __u16 rxLen; dbg("%s", __func__); lastBufferLength = bufferLength + 1; while (bufferLength > 0) { /* failsafe incase we get a message that we don't understand */ if (lastBufferLength == bufferLength) { dbg("%s - stuck in loop, exiting it.", __func__); break; } lastBufferLength = bufferLength; switch (edge_serial->rxState) { case EXPECT_HDR1: edge_serial->rxHeader1 = *buffer; ++buffer; --bufferLength; if (bufferLength == 0) { edge_serial->rxState = EXPECT_HDR2; break; } /* otherwise, drop on through */ case EXPECT_HDR2: edge_serial->rxHeader2 = *buffer; ++buffer; --bufferLength; dbg("%s - Hdr1=%02X Hdr2=%02X", __func__, edge_serial->rxHeader1, edge_serial->rxHeader2); /* Process depending on whether this header is * data or status */ if (IS_CMD_STAT_HDR(edge_serial->rxHeader1)) { /* Decode this status header and go to * EXPECT_HDR1 (if we can process the status * with only 2 bytes), or go to EXPECT_HDR3 to * get the third byte. */ edge_serial->rxPort = IOSP_GET_HDR_PORT(edge_serial->rxHeader1); edge_serial->rxStatusCode = IOSP_GET_STATUS_CODE( edge_serial->rxHeader1); if (!IOSP_STATUS_IS_2BYTE( edge_serial->rxStatusCode)) { /* This status needs additional bytes. * Save what we have and then wait for * more data. */ edge_serial->rxStatusParam = edge_serial->rxHeader2; edge_serial->rxState = EXPECT_HDR3; break; } /* We have all the header bytes, process the status now */ process_rcvd_status(edge_serial, edge_serial->rxHeader2, 0); edge_serial->rxState = EXPECT_HDR1; break; } else { edge_serial->rxPort = IOSP_GET_HDR_PORT(edge_serial->rxHeader1); edge_serial->rxBytesRemaining = IOSP_GET_HDR_DATA_LEN( edge_serial->rxHeader1, edge_serial->rxHeader2); dbg("%s - Data for Port %u Len %u", __func__, edge_serial->rxPort, edge_serial->rxBytesRemaining); /* ASSERT(DevExt->RxPort < DevExt->NumPorts); * ASSERT(DevExt->RxBytesRemaining < * IOSP_MAX_DATA_LENGTH); */ if (bufferLength == 0) { edge_serial->rxState = EXPECT_DATA; break; } /* Else, drop through */ } case EXPECT_DATA: /* Expect data */ if (bufferLength < edge_serial->rxBytesRemaining) { rxLen = bufferLength; /* Expect data to start next buffer */ edge_serial->rxState = EXPECT_DATA; } else { /* BufLen >= RxBytesRemaining */ rxLen = edge_serial->rxBytesRemaining; /* Start another header next time */ edge_serial->rxState = EXPECT_HDR1; } bufferLength -= rxLen; edge_serial->rxBytesRemaining -= rxLen; /* spit this data back into the tty driver if this port is open */ if (rxLen) { port = edge_serial->serial->port[ edge_serial->rxPort]; edge_port = usb_get_serial_port_data(port); if (edge_port->open) { tty = tty_port_tty_get( &edge_port->port->port); if (tty) { dbg("%s - Sending %d bytes to TTY for port %d", __func__, rxLen, edge_serial->rxPort); edge_tty_recv(&edge_serial->serial->dev->dev, tty, buffer, rxLen); tty_kref_put(tty); } edge_port->icount.rx += rxLen; } buffer += rxLen; } break; case EXPECT_HDR3: /* Expect 3rd byte of status header */ edge_serial->rxHeader3 = *buffer; ++buffer; --bufferLength; /* We have all the header bytes, process the status now */ process_rcvd_status(edge_serial, edge_serial->rxStatusParam, edge_serial->rxHeader3); edge_serial->rxState = EXPECT_HDR1; break; } } } /***************************************************************************** * process_rcvd_status * this function handles the any status messages received on the * bulk in pipe. *****************************************************************************/ static void process_rcvd_status(struct edgeport_serial *edge_serial, __u8 byte2, __u8 byte3) { struct usb_serial_port *port; struct edgeport_port *edge_port; struct tty_struct *tty; __u8 code = edge_serial->rxStatusCode; /* switch the port pointer to the one being currently talked about */ port = edge_serial->serial->port[edge_serial->rxPort]; edge_port = usb_get_serial_port_data(port); if (edge_port == NULL) { dev_err(&edge_serial->serial->dev->dev, "%s - edge_port == NULL for port %d\n", __func__, edge_serial->rxPort); return; } dbg("%s - port %d", __func__, edge_serial->rxPort); if (code == IOSP_EXT_STATUS) { switch (byte2) { case IOSP_EXT_STATUS_CHASE_RSP: /* we want to do EXT status regardless of port * open/closed */ dbg("%s - Port %u EXT CHASE_RSP Data = %02x", __func__, edge_serial->rxPort, byte3); /* Currently, the only EXT_STATUS is Chase, so process * here instead of one more call to one more subroutine * If/when more EXT_STATUS, there'll be more work to do * Also, we currently clear flag and close the port * regardless of content of above's Byte3. * We could choose to do something else when Byte3 says * Timeout on Chase from Edgeport, like wait longer in * block_until_chase_response, but for now we don't. */ edge_port->chaseResponsePending = false; wake_up(&edge_port->wait_chase); return; case IOSP_EXT_STATUS_RX_CHECK_RSP: dbg("%s ========== Port %u CHECK_RSP Sequence = %02x =============", __func__, edge_serial->rxPort, byte3); /* Port->RxCheckRsp = true; */ return; } } if (code == IOSP_STATUS_OPEN_RSP) { edge_port->txCredits = GET_TX_BUFFER_SIZE(byte3); edge_port->maxTxCredits = edge_port->txCredits; dbg("%s - Port %u Open Response Initial MSR = %02x TxBufferSize = %d", __func__, edge_serial->rxPort, byte2, edge_port->txCredits); handle_new_msr(edge_port, byte2); /* send the current line settings to the port so we are in sync with any further termios calls */ tty = tty_port_tty_get(&edge_port->port->port); if (tty) { change_port_settings(tty, edge_port, tty->termios); tty_kref_put(tty); } /* we have completed the open */ edge_port->openPending = false; edge_port->open = true; wake_up(&edge_port->wait_open); return; } /* If port is closed, silently discard all rcvd status. We can * have cases where buffered status is received AFTER the close * port command is sent to the Edgeport. */ if (!edge_port->open || edge_port->closePending) return; switch (code) { /* Not currently sent by Edgeport */ case IOSP_STATUS_LSR: dbg("%s - Port %u LSR Status = %02x", __func__, edge_serial->rxPort, byte2); handle_new_lsr(edge_port, false, byte2, 0); break; case IOSP_STATUS_LSR_DATA: dbg("%s - Port %u LSR Status = %02x, Data = %02x", __func__, edge_serial->rxPort, byte2, byte3); /* byte2 is LSR Register */ /* byte3 is broken data byte */ handle_new_lsr(edge_port, true, byte2, byte3); break; /* * case IOSP_EXT_4_STATUS: * dbg("%s - Port %u LSR Status = %02x Data = %02x", * __func__, edge_serial->rxPort, byte2, byte3); * break; */ case IOSP_STATUS_MSR: dbg("%s - Port %u MSR Status = %02x", __func__, edge_serial->rxPort, byte2); /* * Process this new modem status and generate appropriate * events, etc, based on the new status. This routine * also saves the MSR in Port->ShadowMsr. */ handle_new_msr(edge_port, byte2); break; default: dbg("%s - Unrecognized IOSP status code %u", __func__, code); break; } } /***************************************************************************** * edge_tty_recv * this function passes data on to the tty flip buffer *****************************************************************************/ static void edge_tty_recv(struct device *dev, struct tty_struct *tty, unsigned char *data, int length) { int cnt; cnt = tty_insert_flip_string(tty, data, length); if (cnt < length) { dev_err(dev, "%s - dropping data, %d bytes lost\n", __func__, length - cnt); } data += cnt; length -= cnt; tty_flip_buffer_push(tty); } /***************************************************************************** * handle_new_msr * this function handles any change to the msr register for a port. *****************************************************************************/ static void handle_new_msr(struct edgeport_port *edge_port, __u8 newMsr) { struct async_icount *icount; dbg("%s %02x", __func__, newMsr); if (newMsr & (EDGEPORT_MSR_DELTA_CTS | EDGEPORT_MSR_DELTA_DSR | EDGEPORT_MSR_DELTA_RI | EDGEPORT_MSR_DELTA_CD)) { icount = &edge_port->icount; /* update input line counters */ if (newMsr & EDGEPORT_MSR_DELTA_CTS) icount->cts++; if (newMsr & EDGEPORT_MSR_DELTA_DSR) icount->dsr++; if (newMsr & EDGEPORT_MSR_DELTA_CD) icount->dcd++; if (newMsr & EDGEPORT_MSR_DELTA_RI) icount->rng++; wake_up_interruptible(&edge_port->delta_msr_wait); } /* Save the new modem status */ edge_port->shadowMSR = newMsr & 0xf0; } /***************************************************************************** * handle_new_lsr * this function handles any change to the lsr register for a port. *****************************************************************************/ static void handle_new_lsr(struct edgeport_port *edge_port, __u8 lsrData, __u8 lsr, __u8 data) { __u8 newLsr = (__u8) (lsr & (__u8) (LSR_OVER_ERR | LSR_PAR_ERR | LSR_FRM_ERR | LSR_BREAK)); struct async_icount *icount; dbg("%s - %02x", __func__, newLsr); edge_port->shadowLSR = lsr; if (newLsr & LSR_BREAK) { /* * Parity and Framing errors only count if they * occur exclusive of a break being * received. */ newLsr &= (__u8)(LSR_OVER_ERR | LSR_BREAK); } /* Place LSR data byte into Rx buffer */ if (lsrData) { struct tty_struct *tty = tty_port_tty_get(&edge_port->port->port); if (tty) { edge_tty_recv(&edge_port->port->dev, tty, &data, 1); tty_kref_put(tty); } } /* update input line counters */ icount = &edge_port->icount; if (newLsr & LSR_BREAK) icount->brk++; if (newLsr & LSR_OVER_ERR) icount->overrun++; if (newLsr & LSR_PAR_ERR) icount->parity++; if (newLsr & LSR_FRM_ERR) icount->frame++; } /**************************************************************************** * sram_write * writes a number of bytes to the Edgeport device's sram starting at the * given address. * If successful returns the number of bytes written, otherwise it returns * a negative error number of the problem. ****************************************************************************/ static int sram_write(struct usb_serial *serial, __u16 extAddr, __u16 addr, __u16 length, const __u8 *data) { int result; __u16 current_length; unsigned char *transfer_buffer; dbg("%s - %x, %x, %d", __func__, extAddr, addr, length); transfer_buffer = kmalloc(64, GFP_KERNEL); if (!transfer_buffer) { dev_err(&serial->dev->dev, "%s - kmalloc(%d) failed.\n", __func__, 64); return -ENOMEM; } /* need to split these writes up into 64 byte chunks */ result = 0; while (length > 0) { if (length > 64) current_length = 64; else current_length = length; /* dbg("%s - writing %x, %x, %d", __func__, extAddr, addr, current_length); */ memcpy(transfer_buffer, data, current_length); result = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), USB_REQUEST_ION_WRITE_RAM, 0x40, addr, extAddr, transfer_buffer, current_length, 300); if (result < 0) break; length -= current_length; addr += current_length; data += current_length; } kfree(transfer_buffer); return result; } /**************************************************************************** * rom_write * writes a number of bytes to the Edgeport device's ROM starting at the * given address. * If successful returns the number of bytes written, otherwise it returns * a negative error number of the problem. ****************************************************************************/ static int rom_write(struct usb_serial *serial, __u16 extAddr, __u16 addr, __u16 length, const __u8 *data) { int result; __u16 current_length; unsigned char *transfer_buffer; /* dbg("%s - %x, %x, %d", __func__, extAddr, addr, length); */ transfer_buffer = kmalloc(64, GFP_KERNEL); if (!transfer_buffer) { dev_err(&serial->dev->dev, "%s - kmalloc(%d) failed.\n", __func__, 64); return -ENOMEM; } /* need to split these writes up into 64 byte chunks */ result = 0; while (length > 0) { if (length > 64) current_length = 64; else current_length = length; /* dbg("%s - writing %x, %x, %d", __func__, extAddr, addr, current_length); */ memcpy(transfer_buffer, data, current_length); result = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), USB_REQUEST_ION_WRITE_ROM, 0x40, addr, extAddr, transfer_buffer, current_length, 300); if (result < 0) break; length -= current_length; addr += current_length; data += current_length; } kfree(transfer_buffer); return result; } /**************************************************************************** * rom_read * reads a number of bytes from the Edgeport device starting at the given * address. * If successful returns the number of bytes read, otherwise it returns * a negative error number of the problem. ****************************************************************************/ static int rom_read(struct usb_serial *serial, __u16 extAddr, __u16 addr, __u16 length, __u8 *data) { int result; __u16 current_length; unsigned char *transfer_buffer; dbg("%s - %x, %x, %d", __func__, extAddr, addr, length); transfer_buffer = kmalloc(64, GFP_KERNEL); if (!transfer_buffer) { dev_err(&serial->dev->dev, "%s - kmalloc(%d) failed.\n", __func__, 64); return -ENOMEM; } /* need to split these reads up into 64 byte chunks */ result = 0; while (length > 0) { if (length > 64) current_length = 64; else current_length = length; /* dbg("%s - %x, %x, %d", __func__, extAddr, addr, current_length); */ result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), USB_REQUEST_ION_READ_ROM, 0xC0, addr, extAddr, transfer_buffer, current_length, 300); if (result < 0) break; memcpy(data, transfer_buffer, current_length); length -= current_length; addr += current_length; data += current_length; } kfree(transfer_buffer); return result; } /**************************************************************************** * send_iosp_ext_cmd * Is used to send a IOSP message to the Edgeport device ****************************************************************************/ static int send_iosp_ext_cmd(struct edgeport_port *edge_port, __u8 command, __u8 param) { unsigned char *buffer; unsigned char *currentCommand; int length = 0; int status = 0; dbg("%s - %d, %d", __func__, command, param); buffer = kmalloc(10, GFP_ATOMIC); if (!buffer) { dev_err(&edge_port->port->dev, "%s - kmalloc(%d) failed.\n", __func__, 10); return -ENOMEM; } currentCommand = buffer; MAKE_CMD_EXT_CMD(&currentCommand, &length, edge_port->port->number - edge_port->port->serial->minor, command, param); status = write_cmd_usb(edge_port, buffer, length); if (status) { /* something bad happened, let's free up the memory */ kfree(buffer); } return status; } /***************************************************************************** * write_cmd_usb * this function writes the given buffer out to the bulk write endpoint. *****************************************************************************/ static int write_cmd_usb(struct edgeport_port *edge_port, unsigned char *buffer, int length) { struct edgeport_serial *edge_serial = usb_get_serial_data(edge_port->port->serial); int status = 0; struct urb *urb; usb_serial_debug_data(debug, &edge_port->port->dev, __func__, length, buffer); /* Allocate our next urb */ urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) return -ENOMEM; atomic_inc(&CmdUrbs); dbg("%s - ALLOCATE URB %p (outstanding %d)", __func__, urb, atomic_read(&CmdUrbs)); usb_fill_bulk_urb(urb, edge_serial->serial->dev, usb_sndbulkpipe(edge_serial->serial->dev, edge_serial->bulk_out_endpoint), buffer, length, edge_bulk_out_cmd_callback, edge_port); edge_port->commandPending = true; status = usb_submit_urb(urb, GFP_ATOMIC); if (status) { /* something went wrong */ dev_err(&edge_port->port->dev, "%s - usb_submit_urb(write command) failed, status = %d\n", __func__, status); usb_kill_urb(urb); usb_free_urb(urb); atomic_dec(&CmdUrbs); return status; } #if 0 wait_event(&edge_port->wait_command, !edge_port->commandPending); if (edge_port->commandPending) { /* command timed out */ dbg("%s - command timed out", __func__); status = -EINVAL; } #endif return status; } /***************************************************************************** * send_cmd_write_baud_rate * this function sends the proper command to change the baud rate of the * specified port. *****************************************************************************/ static int send_cmd_write_baud_rate(struct edgeport_port *edge_port, int baudRate) { struct edgeport_serial *edge_serial = usb_get_serial_data(edge_port->port->serial); unsigned char *cmdBuffer; unsigned char *currCmd; int cmdLen = 0; int divisor; int status; unsigned char number = edge_port->port->number - edge_port->port->serial->minor; if (edge_serial->is_epic && !edge_serial->epic_descriptor.Supports.IOSPSetBaudRate) { dbg("SendCmdWriteBaudRate - NOT Setting baud rate for port = %d, baud = %d", edge_port->port->number, baudRate); return 0; } dbg("%s - port = %d, baud = %d", __func__, edge_port->port->number, baudRate); status = calc_baud_rate_divisor(baudRate, &divisor); if (status) { dev_err(&edge_port->port->dev, "%s - bad baud rate\n", __func__); return status; } /* Alloc memory for the string of commands. */ cmdBuffer = kmalloc(0x100, GFP_ATOMIC); if (!cmdBuffer) { dev_err(&edge_port->port->dev, "%s - kmalloc(%d) failed.\n", __func__, 0x100); return -ENOMEM; } currCmd = cmdBuffer; /* Enable access to divisor latch */ MAKE_CMD_WRITE_REG(&currCmd, &cmdLen, number, LCR, LCR_DL_ENABLE); /* Write the divisor itself */ MAKE_CMD_WRITE_REG(&currCmd, &cmdLen, number, DLL, LOW8(divisor)); MAKE_CMD_WRITE_REG(&currCmd, &cmdLen, number, DLM, HIGH8(divisor)); /* Restore original value to disable access to divisor latch */ MAKE_CMD_WRITE_REG(&currCmd, &cmdLen, number, LCR, edge_port->shadowLCR); status = write_cmd_usb(edge_port, cmdBuffer, cmdLen); if (status) { /* something bad happened, let's free up the memory */ kfree(cmdBuffer); } return status; } /***************************************************************************** * calc_baud_rate_divisor * this function calculates the proper baud rate divisor for the specified * baud rate. *****************************************************************************/ static int calc_baud_rate_divisor(int baudrate, int *divisor) { int i; __u16 custom; dbg("%s - %d", __func__, baudrate); for (i = 0; i < ARRAY_SIZE(divisor_table); i++) { if (divisor_table[i].BaudRate == baudrate) { *divisor = divisor_table[i].Divisor; return 0; } } /* We have tried all of the standard baud rates * lets try to calculate the divisor for this baud rate * Make sure the baud rate is reasonable */ if (baudrate > 50 && baudrate < 230400) { /* get divisor */ custom = (__u16)((230400L + baudrate/2) / baudrate); *divisor = custom; dbg("%s - Baud %d = %d", __func__, baudrate, custom); return 0; } return -1; } /***************************************************************************** * send_cmd_write_uart_register * this function builds up a uart register message and sends to the device. *****************************************************************************/ static int send_cmd_write_uart_register(struct edgeport_port *edge_port, __u8 regNum, __u8 regValue) { struct edgeport_serial *edge_serial = usb_get_serial_data(edge_port->port->serial); unsigned char *cmdBuffer; unsigned char *currCmd; unsigned long cmdLen = 0; int status; dbg("%s - write to %s register 0x%02x", (regNum == MCR) ? "MCR" : "LCR", __func__, regValue); if (edge_serial->is_epic && !edge_serial->epic_descriptor.Supports.IOSPWriteMCR && regNum == MCR) { dbg("SendCmdWriteUartReg - Not writing to MCR Register"); return 0; } if (edge_serial->is_epic && !edge_serial->epic_descriptor.Supports.IOSPWriteLCR && regNum == LCR) { dbg("SendCmdWriteUartReg - Not writing to LCR Register"); return 0; } /* Alloc memory for the string of commands. */ cmdBuffer = kmalloc(0x10, GFP_ATOMIC); if (cmdBuffer == NULL) return -ENOMEM; currCmd = cmdBuffer; /* Build a cmd in the buffer to write the given register */ MAKE_CMD_WRITE_REG(&currCmd, &cmdLen, edge_port->port->number - edge_port->port->serial->minor, regNum, regValue); status = write_cmd_usb(edge_port, cmdBuffer, cmdLen); if (status) { /* something bad happened, let's free up the memory */ kfree(cmdBuffer); } return status; } /***************************************************************************** * change_port_settings * This routine is called to set the UART on the device to match the * specified new settings. *****************************************************************************/ static void change_port_settings(struct tty_struct *tty, struct edgeport_port *edge_port, struct ktermios *old_termios) { struct edgeport_serial *edge_serial = usb_get_serial_data(edge_port->port->serial); int baud; unsigned cflag; __u8 mask = 0xff; __u8 lData; __u8 lParity; __u8 lStop; __u8 rxFlow; __u8 txFlow; int status; dbg("%s - port %d", __func__, edge_port->port->number); if (!edge_port->open && !edge_port->openPending) { dbg("%s - port not opened", __func__); return; } cflag = tty->termios->c_cflag; switch (cflag & CSIZE) { case CS5: lData = LCR_BITS_5; mask = 0x1f; dbg("%s - data bits = 5", __func__); break; case CS6: lData = LCR_BITS_6; mask = 0x3f; dbg("%s - data bits = 6", __func__); break; case CS7: lData = LCR_BITS_7; mask = 0x7f; dbg("%s - data bits = 7", __func__); break; default: case CS8: lData = LCR_BITS_8; dbg("%s - data bits = 8", __func__); break; } lParity = LCR_PAR_NONE; if (cflag & PARENB) { if (cflag & CMSPAR) { if (cflag & PARODD) { lParity = LCR_PAR_MARK; dbg("%s - parity = mark", __func__); } else { lParity = LCR_PAR_SPACE; dbg("%s - parity = space", __func__); } } else if (cflag & PARODD) { lParity = LCR_PAR_ODD; dbg("%s - parity = odd", __func__); } else { lParity = LCR_PAR_EVEN; dbg("%s - parity = even", __func__); } } else { dbg("%s - parity = none", __func__); } if (cflag & CSTOPB) { lStop = LCR_STOP_2; dbg("%s - stop bits = 2", __func__); } else { lStop = LCR_STOP_1; dbg("%s - stop bits = 1", __func__); } /* figure out the flow control settings */ rxFlow = txFlow = 0x00; if (cflag & CRTSCTS) { rxFlow |= IOSP_RX_FLOW_RTS; txFlow |= IOSP_TX_FLOW_CTS; dbg("%s - RTS/CTS is enabled", __func__); } else { dbg("%s - RTS/CTS is disabled", __func__); } /* if we are implementing XON/XOFF, set the start and stop character in the device */ if (I_IXOFF(tty) || I_IXON(tty)) { unsigned char stop_char = STOP_CHAR(tty); unsigned char start_char = START_CHAR(tty); if ((!edge_serial->is_epic) || ((edge_serial->is_epic) && (edge_serial->epic_descriptor.Supports.IOSPSetXChar))) { send_iosp_ext_cmd(edge_port, IOSP_CMD_SET_XON_CHAR, start_char); send_iosp_ext_cmd(edge_port, IOSP_CMD_SET_XOFF_CHAR, stop_char); } /* if we are implementing INBOUND XON/XOFF */ if (I_IXOFF(tty)) { rxFlow |= IOSP_RX_FLOW_XON_XOFF; dbg("%s - INBOUND XON/XOFF is enabled, XON = %2x, XOFF = %2x", __func__, start_char, stop_char); } else { dbg("%s - INBOUND XON/XOFF is disabled", __func__); } /* if we are implementing OUTBOUND XON/XOFF */ if (I_IXON(tty)) { txFlow |= IOSP_TX_FLOW_XON_XOFF; dbg("%s - OUTBOUND XON/XOFF is enabled, XON = %2x, XOFF = %2x", __func__, start_char, stop_char); } else { dbg("%s - OUTBOUND XON/XOFF is disabled", __func__); } } /* Set flow control to the configured value */ if ((!edge_serial->is_epic) || ((edge_serial->is_epic) && (edge_serial->epic_descriptor.Supports.IOSPSetRxFlow))) send_iosp_ext_cmd(edge_port, IOSP_CMD_SET_RX_FLOW, rxFlow); if ((!edge_serial->is_epic) || ((edge_serial->is_epic) && (edge_serial->epic_descriptor.Supports.IOSPSetTxFlow))) send_iosp_ext_cmd(edge_port, IOSP_CMD_SET_TX_FLOW, txFlow); edge_port->shadowLCR &= ~(LCR_BITS_MASK | LCR_STOP_MASK | LCR_PAR_MASK); edge_port->shadowLCR |= (lData | lParity | lStop); edge_port->validDataMask = mask; /* Send the updated LCR value to the EdgePort */ status = send_cmd_write_uart_register(edge_port, LCR, edge_port->shadowLCR); if (status != 0) return; /* set up the MCR register and send it to the EdgePort */ edge_port->shadowMCR = MCR_MASTER_IE; if (cflag & CBAUD) edge_port->shadowMCR |= (MCR_DTR | MCR_RTS); status = send_cmd_write_uart_register(edge_port, MCR, edge_port->shadowMCR); if (status != 0) return; /* Determine divisor based on baud rate */ baud = tty_get_baud_rate(tty); if (!baud) { /* pick a default, any default... */ baud = 9600; } dbg("%s - baud rate = %d", __func__, baud); status = send_cmd_write_baud_rate(edge_port, baud); if (status == -1) { /* Speed change was not possible - put back the old speed */ baud = tty_termios_baud_rate(old_termios); tty_encode_baud_rate(tty, baud, baud); } } /**************************************************************************** * unicode_to_ascii * Turns a string from Unicode into ASCII. * Doesn't do a good job with any characters that are outside the normal * ASCII range, but it's only for debugging... * NOTE: expects the unicode in LE format ****************************************************************************/ static void unicode_to_ascii(char *string, int buflen, __le16 *unicode, int unicode_size) { int i; if (buflen <= 0) /* never happens, but... */ return; --buflen; /* space for nul */ for (i = 0; i < unicode_size; i++) { if (i >= buflen) break; string[i] = (char)(le16_to_cpu(unicode[i])); } string[i] = 0x00; } /**************************************************************************** * get_manufacturing_desc * reads in the manufacturing descriptor and stores it into the serial * structure. ****************************************************************************/ static void get_manufacturing_desc(struct edgeport_serial *edge_serial) { int response; dbg("getting manufacturer descriptor"); response = rom_read(edge_serial->serial, (EDGE_MANUF_DESC_ADDR & 0xffff0000) >> 16, (__u16)(EDGE_MANUF_DESC_ADDR & 0x0000ffff), EDGE_MANUF_DESC_LEN, (__u8 *)(&edge_serial->manuf_descriptor)); if (response < 1) dev_err(&edge_serial->serial->dev->dev, "error in getting manufacturer descriptor\n"); else { char string[30]; dbg("**Manufacturer Descriptor"); dbg(" RomSize: %dK", edge_serial->manuf_descriptor.RomSize); dbg(" RamSize: %dK", edge_serial->manuf_descriptor.RamSize); dbg(" CpuRev: %d", edge_serial->manuf_descriptor.CpuRev); dbg(" BoardRev: %d", edge_serial->manuf_descriptor.BoardRev); dbg(" NumPorts: %d", edge_serial->manuf_descriptor.NumPorts); dbg(" DescDate: %d/%d/%d", edge_serial->manuf_descriptor.DescDate[0], edge_serial->manuf_descriptor.DescDate[1], edge_serial->manuf_descriptor.DescDate[2]+1900); unicode_to_ascii(string, sizeof(string), edge_serial->manuf_descriptor.SerialNumber, edge_serial->manuf_descriptor.SerNumLength/2); dbg(" SerialNumber: %s", string); unicode_to_ascii(string, sizeof(string), edge_serial->manuf_descriptor.AssemblyNumber, edge_serial->manuf_descriptor.AssemblyNumLength/2); dbg(" AssemblyNumber: %s", string); unicode_to_ascii(string, sizeof(string), edge_serial->manuf_descriptor.OemAssyNumber, edge_serial->manuf_descriptor.OemAssyNumLength/2); dbg(" OemAssyNumber: %s", string); dbg(" UartType: %d", edge_serial->manuf_descriptor.UartType); dbg(" IonPid: %d", edge_serial->manuf_descriptor.IonPid); dbg(" IonConfig: %d", edge_serial->manuf_descriptor.IonConfig); } } /**************************************************************************** * get_boot_desc * reads in the bootloader descriptor and stores it into the serial * structure. ****************************************************************************/ static void get_boot_desc(struct edgeport_serial *edge_serial) { int response; dbg("getting boot descriptor"); response = rom_read(edge_serial->serial, (EDGE_BOOT_DESC_ADDR & 0xffff0000) >> 16, (__u16)(EDGE_BOOT_DESC_ADDR & 0x0000ffff), EDGE_BOOT_DESC_LEN, (__u8 *)(&edge_serial->boot_descriptor)); if (response < 1) dev_err(&edge_serial->serial->dev->dev, "error in getting boot descriptor\n"); else { dbg("**Boot Descriptor:"); dbg(" BootCodeLength: %d", le16_to_cpu(edge_serial->boot_descriptor.BootCodeLength)); dbg(" MajorVersion: %d", edge_serial->boot_descriptor.MajorVersion); dbg(" MinorVersion: %d", edge_serial->boot_descriptor.MinorVersion); dbg(" BuildNumber: %d", le16_to_cpu(edge_serial->boot_descriptor.BuildNumber)); dbg(" Capabilities: 0x%x", le16_to_cpu(edge_serial->boot_descriptor.Capabilities)); dbg(" UConfig0: %d", edge_serial->boot_descriptor.UConfig0); dbg(" UConfig1: %d", edge_serial->boot_descriptor.UConfig1); } } /**************************************************************************** * load_application_firmware * This is called to load the application firmware to the device ****************************************************************************/ static void load_application_firmware(struct edgeport_serial *edge_serial) { const struct ihex_binrec *rec; const struct firmware *fw; const char *fw_name; const char *fw_info; int response; __u32 Operaddr; __u16 build; switch (edge_serial->product_info.iDownloadFile) { case EDGE_DOWNLOAD_FILE_I930: fw_info = "downloading firmware version (930)"; fw_name = "edgeport/down.fw"; break; case EDGE_DOWNLOAD_FILE_80251: fw_info = "downloading firmware version (80251)"; fw_name = "edgeport/down2.fw"; break; case EDGE_DOWNLOAD_FILE_NONE: dbg("No download file specified, skipping download"); return; default: return; } response = request_ihex_firmware(&fw, fw_name, &edge_serial->serial->dev->dev); if (response) { printk(KERN_ERR "Failed to load image \"%s\" err %d\n", fw_name, response); return; } rec = (const struct ihex_binrec *)fw->data; build = (rec->data[2] << 8) | rec->data[3]; dbg("%s %d.%d.%d", fw_info, rec->data[0], rec->data[1], build); edge_serial->product_info.FirmwareMajorVersion = rec->data[0]; edge_serial->product_info.FirmwareMinorVersion = rec->data[1]; edge_serial->product_info.FirmwareBuildNumber = cpu_to_le16(build); for (rec = ihex_next_binrec(rec); rec; rec = ihex_next_binrec(rec)) { Operaddr = be32_to_cpu(rec->addr); response = sram_write(edge_serial->serial, Operaddr >> 16, Operaddr & 0xFFFF, be16_to_cpu(rec->len), &rec->data[0]); if (response < 0) { dev_err(&edge_serial->serial->dev->dev, "sram_write failed (%x, %x, %d)\n", Operaddr >> 16, Operaddr & 0xFFFF, be16_to_cpu(rec->len)); break; } } dbg("sending exec_dl_code"); response = usb_control_msg (edge_serial->serial->dev, usb_sndctrlpipe(edge_serial->serial->dev, 0), USB_REQUEST_ION_EXEC_DL_CODE, 0x40, 0x4000, 0x0001, NULL, 0, 3000); release_firmware(fw); } /**************************************************************************** * edge_startup ****************************************************************************/ static int edge_startup(struct usb_serial *serial) { struct edgeport_serial *edge_serial; struct edgeport_port *edge_port; struct usb_device *dev; int i, j; int response; bool interrupt_in_found; bool bulk_in_found; bool bulk_out_found; static __u32 descriptor[3] = { EDGE_COMPATIBILITY_MASK0, EDGE_COMPATIBILITY_MASK1, EDGE_COMPATIBILITY_MASK2 }; dev = serial->dev; /* create our private serial structure */ edge_serial = kzalloc(sizeof(struct edgeport_serial), GFP_KERNEL); if (edge_serial == NULL) { dev_err(&serial->dev->dev, "%s - Out of memory\n", __func__); return -ENOMEM; } spin_lock_init(&edge_serial->es_lock); edge_serial->serial = serial; usb_set_serial_data(serial, edge_serial); /* get the name for the device from the device */ i = usb_string(dev, dev->descriptor.iManufacturer, &edge_serial->name[0], MAX_NAME_LEN+1); if (i < 0) i = 0; edge_serial->name[i++] = ' '; usb_string(dev, dev->descriptor.iProduct, &edge_serial->name[i], MAX_NAME_LEN+2 - i); dev_info(&serial->dev->dev, "%s detected\n", edge_serial->name); /* Read the epic descriptor */ if (get_epic_descriptor(edge_serial) <= 0) { /* memcpy descriptor to Supports structures */ memcpy(&edge_serial->epic_descriptor.Supports, descriptor, sizeof(struct edge_compatibility_bits)); /* get the manufacturing descriptor for this device */ get_manufacturing_desc(edge_serial); /* get the boot descriptor */ get_boot_desc(edge_serial); get_product_info(edge_serial); } /* set the number of ports from the manufacturing description */ /* serial->num_ports = serial->product_info.NumPorts; */ if ((!edge_serial->is_epic) && (edge_serial->product_info.NumPorts != serial->num_ports)) { dev_warn(&serial->dev->dev, "Device Reported %d serial ports " "vs. core thinking we have %d ports, email " "greg@kroah.com this information.\n", edge_serial->product_info.NumPorts, serial->num_ports); } dbg("%s - time 1 %ld", __func__, jiffies); /* If not an EPiC device */ if (!edge_serial->is_epic) { /* now load the application firmware into this device */ load_application_firmware(edge_serial); dbg("%s - time 2 %ld", __func__, jiffies); /* Check current Edgeport EEPROM and update if necessary */ update_edgeport_E2PROM(edge_serial); dbg("%s - time 3 %ld", __func__, jiffies); /* set the configuration to use #1 */ /* dbg("set_configuration 1"); */ /* usb_set_configuration (dev, 1); */ } dbg(" FirmwareMajorVersion %d.%d.%d", edge_serial->product_info.FirmwareMajorVersion, edge_serial->product_info.FirmwareMinorVersion, le16_to_cpu(edge_serial->product_info.FirmwareBuildNumber)); /* we set up the pointers to the endpoints in the edge_open function, * as the structures aren't created yet. */ /* set up our port private structures */ for (i = 0; i < serial->num_ports; ++i) { edge_port = kzalloc(sizeof(struct edgeport_port), GFP_KERNEL); if (edge_port == NULL) { dev_err(&serial->dev->dev, "%s - Out of memory\n", __func__); for (j = 0; j < i; ++j) { kfree(usb_get_serial_port_data(serial->port[j])); usb_set_serial_port_data(serial->port[j], NULL); } usb_set_serial_data(serial, NULL); kfree(edge_serial); return -ENOMEM; } spin_lock_init(&edge_port->ep_lock); edge_port->port = serial->port[i]; usb_set_serial_port_data(serial->port[i], edge_port); } response = 0; if (edge_serial->is_epic) { /* EPIC thing, set up our interrupt polling now and our read * urb, so that the device knows it really is connected. */ interrupt_in_found = bulk_in_found = bulk_out_found = false; for (i = 0; i < serial->interface->altsetting[0] .desc.bNumEndpoints; ++i) { struct usb_endpoint_descriptor *endpoint; int buffer_size; endpoint = &serial->interface->altsetting[0]. endpoint[i].desc; buffer_size = usb_endpoint_maxp(endpoint); if (!interrupt_in_found && (usb_endpoint_is_int_in(endpoint))) { /* we found a interrupt in endpoint */ dbg("found interrupt in"); /* not set up yet, so do it now */ edge_serial->interrupt_read_urb = usb_alloc_urb(0, GFP_KERNEL); if (!edge_serial->interrupt_read_urb) { dev_err(&dev->dev, "out of memory\n"); return -ENOMEM; } edge_serial->interrupt_in_buffer = kmalloc(buffer_size, GFP_KERNEL); if (!edge_serial->interrupt_in_buffer) { dev_err(&dev->dev, "out of memory\n"); usb_free_urb(edge_serial->interrupt_read_urb); return -ENOMEM; } edge_serial->interrupt_in_endpoint = endpoint->bEndpointAddress; /* set up our interrupt urb */ usb_fill_int_urb( edge_serial->interrupt_read_urb, dev, usb_rcvintpipe(dev, endpoint->bEndpointAddress), edge_serial->interrupt_in_buffer, buffer_size, edge_interrupt_callback, edge_serial, endpoint->bInterval); interrupt_in_found = true; } if (!bulk_in_found && (usb_endpoint_is_bulk_in(endpoint))) { /* we found a bulk in endpoint */ dbg("found bulk in"); /* not set up yet, so do it now */ edge_serial->read_urb = usb_alloc_urb(0, GFP_KERNEL); if (!edge_serial->read_urb) { dev_err(&dev->dev, "out of memory\n"); return -ENOMEM; } edge_serial->bulk_in_buffer = kmalloc(buffer_size, GFP_KERNEL); if (!edge_serial->bulk_in_buffer) { dev_err(&dev->dev, "out of memory\n"); usb_free_urb(edge_serial->read_urb); return -ENOMEM; } edge_serial->bulk_in_endpoint = endpoint->bEndpointAddress; /* set up our bulk in urb */ usb_fill_bulk_urb(edge_serial->read_urb, dev, usb_rcvbulkpipe(dev, endpoint->bEndpointAddress), edge_serial->bulk_in_buffer, usb_endpoint_maxp(endpoint), edge_bulk_in_callback, edge_serial); bulk_in_found = true; } if (!bulk_out_found && (usb_endpoint_is_bulk_out(endpoint))) { /* we found a bulk out endpoint */ dbg("found bulk out"); edge_serial->bulk_out_endpoint = endpoint->bEndpointAddress; bulk_out_found = true; } } if (!interrupt_in_found || !bulk_in_found || !bulk_out_found) { dev_err(&dev->dev, "Error - the proper endpoints " "were not found!\n"); return -ENODEV; } /* start interrupt read for this edgeport this interrupt will * continue as long as the edgeport is connected */ response = usb_submit_urb(edge_serial->interrupt_read_urb, GFP_KERNEL); if (response) dev_err(&dev->dev, "%s - Error %d submitting control urb\n", __func__, response); } return response; } /**************************************************************************** * edge_disconnect * This function is called whenever the device is removed from the usb bus. ****************************************************************************/ static void edge_disconnect(struct usb_serial *serial) { struct edgeport_serial *edge_serial = usb_get_serial_data(serial); dbg("%s", __func__); /* stop reads and writes on all ports */ /* free up our endpoint stuff */ if (edge_serial->is_epic) { usb_kill_urb(edge_serial->interrupt_read_urb); usb_free_urb(edge_serial->interrupt_read_urb); kfree(edge_serial->interrupt_in_buffer); usb_kill_urb(edge_serial->read_urb); usb_free_urb(edge_serial->read_urb); kfree(edge_serial->bulk_in_buffer); } } /**************************************************************************** * edge_release * This function is called when the device structure is deallocated. ****************************************************************************/ static void edge_release(struct usb_serial *serial) { struct edgeport_serial *edge_serial = usb_get_serial_data(serial); int i; dbg("%s", __func__); for (i = 0; i < serial->num_ports; ++i) kfree(usb_get_serial_port_data(serial->port[i])); kfree(edge_serial); } module_usb_serial_driver(io_driver, serial_drivers); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); MODULE_FIRMWARE("edgeport/boot.fw"); MODULE_FIRMWARE("edgeport/boot2.fw"); MODULE_FIRMWARE("edgeport/down.fw"); MODULE_FIRMWARE("edgeport/down2.fw"); module_param(debug, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(debug, "Debug enabled or not");
gpl-2.0
championswimmer/android_kernel_sony_msm8930
drivers/net/wireless/ath/ath6kl/usb.c
4761
10422
/* * Copyright (c) 2007-2011 Atheros Communications Inc. * Copyright (c) 2011-2012 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <linux/module.h> #include <linux/usb.h> #include "debug.h" #include "core.h" /* usb device object */ struct ath6kl_usb { struct usb_device *udev; struct usb_interface *interface; u8 *diag_cmd_buffer; u8 *diag_resp_buffer; struct ath6kl *ar; }; /* diagnostic command defnitions */ #define ATH6KL_USB_CONTROL_REQ_SEND_BMI_CMD 1 #define ATH6KL_USB_CONTROL_REQ_RECV_BMI_RESP 2 #define ATH6KL_USB_CONTROL_REQ_DIAG_CMD 3 #define ATH6KL_USB_CONTROL_REQ_DIAG_RESP 4 #define ATH6KL_USB_CTRL_DIAG_CC_READ 0 #define ATH6KL_USB_CTRL_DIAG_CC_WRITE 1 struct ath6kl_usb_ctrl_diag_cmd_write { __le32 cmd; __le32 address; __le32 value; __le32 _pad[1]; } __packed; struct ath6kl_usb_ctrl_diag_cmd_read { __le32 cmd; __le32 address; } __packed; struct ath6kl_usb_ctrl_diag_resp_read { __le32 value; } __packed; #define ATH6KL_USB_MAX_DIAG_CMD (sizeof(struct ath6kl_usb_ctrl_diag_cmd_write)) #define ATH6KL_USB_MAX_DIAG_RESP (sizeof(struct ath6kl_usb_ctrl_diag_resp_read)) static void ath6kl_usb_destroy(struct ath6kl_usb *ar_usb) { usb_set_intfdata(ar_usb->interface, NULL); kfree(ar_usb->diag_cmd_buffer); kfree(ar_usb->diag_resp_buffer); kfree(ar_usb); } static struct ath6kl_usb *ath6kl_usb_create(struct usb_interface *interface) { struct ath6kl_usb *ar_usb = NULL; struct usb_device *dev = interface_to_usbdev(interface); int status = 0; ar_usb = kzalloc(sizeof(struct ath6kl_usb), GFP_KERNEL); if (ar_usb == NULL) goto fail_ath6kl_usb_create; memset(ar_usb, 0, sizeof(struct ath6kl_usb)); usb_set_intfdata(interface, ar_usb); ar_usb->udev = dev; ar_usb->interface = interface; ar_usb->diag_cmd_buffer = kzalloc(ATH6KL_USB_MAX_DIAG_CMD, GFP_KERNEL); if (ar_usb->diag_cmd_buffer == NULL) { status = -ENOMEM; goto fail_ath6kl_usb_create; } ar_usb->diag_resp_buffer = kzalloc(ATH6KL_USB_MAX_DIAG_RESP, GFP_KERNEL); if (ar_usb->diag_resp_buffer == NULL) { status = -ENOMEM; goto fail_ath6kl_usb_create; } fail_ath6kl_usb_create: if (status != 0) { ath6kl_usb_destroy(ar_usb); ar_usb = NULL; } return ar_usb; } static void ath6kl_usb_device_detached(struct usb_interface *interface) { struct ath6kl_usb *ar_usb; ar_usb = usb_get_intfdata(interface); if (ar_usb == NULL) return; ath6kl_stop_txrx(ar_usb->ar); ath6kl_core_cleanup(ar_usb->ar); ath6kl_usb_destroy(ar_usb); } static int ath6kl_usb_submit_ctrl_out(struct ath6kl_usb *ar_usb, u8 req, u16 value, u16 index, void *data, u32 size) { u8 *buf = NULL; int ret; if (size > 0) { buf = kmalloc(size, GFP_KERNEL); if (buf == NULL) return -ENOMEM; memcpy(buf, data, size); } /* note: if successful returns number of bytes transfered */ ret = usb_control_msg(ar_usb->udev, usb_sndctrlpipe(ar_usb->udev, 0), req, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, value, index, buf, size, 1000); if (ret < 0) { ath6kl_dbg(ATH6KL_DBG_USB, "%s failed,result = %d\n", __func__, ret); } kfree(buf); return 0; } static int ath6kl_usb_submit_ctrl_in(struct ath6kl_usb *ar_usb, u8 req, u16 value, u16 index, void *data, u32 size) { u8 *buf = NULL; int ret; if (size > 0) { buf = kmalloc(size, GFP_KERNEL); if (buf == NULL) return -ENOMEM; } /* note: if successful returns number of bytes transfered */ ret = usb_control_msg(ar_usb->udev, usb_rcvctrlpipe(ar_usb->udev, 0), req, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, value, index, buf, size, 2 * HZ); if (ret < 0) { ath6kl_dbg(ATH6KL_DBG_USB, "%s failed,result = %d\n", __func__, ret); } memcpy((u8 *) data, buf, size); kfree(buf); return 0; } static int ath6kl_usb_ctrl_msg_exchange(struct ath6kl_usb *ar_usb, u8 req_val, u8 *req_buf, u32 req_len, u8 resp_val, u8 *resp_buf, u32 *resp_len) { int ret; /* send command */ ret = ath6kl_usb_submit_ctrl_out(ar_usb, req_val, 0, 0, req_buf, req_len); if (ret != 0) return ret; if (resp_buf == NULL) { /* no expected response */ return ret; } /* get response */ ret = ath6kl_usb_submit_ctrl_in(ar_usb, resp_val, 0, 0, resp_buf, *resp_len); return ret; } static int ath6kl_usb_diag_read32(struct ath6kl *ar, u32 address, u32 *data) { struct ath6kl_usb *ar_usb = ar->hif_priv; struct ath6kl_usb_ctrl_diag_resp_read *resp; struct ath6kl_usb_ctrl_diag_cmd_read *cmd; u32 resp_len; int ret; cmd = (struct ath6kl_usb_ctrl_diag_cmd_read *) ar_usb->diag_cmd_buffer; memset(cmd, 0, sizeof(*cmd)); cmd->cmd = ATH6KL_USB_CTRL_DIAG_CC_READ; cmd->address = cpu_to_le32(address); resp_len = sizeof(*resp); ret = ath6kl_usb_ctrl_msg_exchange(ar_usb, ATH6KL_USB_CONTROL_REQ_DIAG_CMD, (u8 *) cmd, sizeof(struct ath6kl_usb_ctrl_diag_cmd_write), ATH6KL_USB_CONTROL_REQ_DIAG_RESP, ar_usb->diag_resp_buffer, &resp_len); if (ret) return ret; resp = (struct ath6kl_usb_ctrl_diag_resp_read *) ar_usb->diag_resp_buffer; *data = le32_to_cpu(resp->value); return ret; } static int ath6kl_usb_diag_write32(struct ath6kl *ar, u32 address, __le32 data) { struct ath6kl_usb *ar_usb = ar->hif_priv; struct ath6kl_usb_ctrl_diag_cmd_write *cmd; cmd = (struct ath6kl_usb_ctrl_diag_cmd_write *) ar_usb->diag_cmd_buffer; memset(cmd, 0, sizeof(struct ath6kl_usb_ctrl_diag_cmd_write)); cmd->cmd = cpu_to_le32(ATH6KL_USB_CTRL_DIAG_CC_WRITE); cmd->address = cpu_to_le32(address); cmd->value = data; return ath6kl_usb_ctrl_msg_exchange(ar_usb, ATH6KL_USB_CONTROL_REQ_DIAG_CMD, (u8 *) cmd, sizeof(*cmd), 0, NULL, NULL); } static int ath6kl_usb_bmi_read(struct ath6kl *ar, u8 *buf, u32 len) { struct ath6kl_usb *ar_usb = ar->hif_priv; int ret; /* get response */ ret = ath6kl_usb_submit_ctrl_in(ar_usb, ATH6KL_USB_CONTROL_REQ_RECV_BMI_RESP, 0, 0, buf, len); if (ret != 0) { ath6kl_err("Unable to read the bmi data from the device: %d\n", ret); return ret; } return 0; } static int ath6kl_usb_bmi_write(struct ath6kl *ar, u8 *buf, u32 len) { struct ath6kl_usb *ar_usb = ar->hif_priv; int ret; /* send command */ ret = ath6kl_usb_submit_ctrl_out(ar_usb, ATH6KL_USB_CONTROL_REQ_SEND_BMI_CMD, 0, 0, buf, len); if (ret != 0) { ath6kl_err("unable to send the bmi data to the device: %d\n", ret); return ret; } return 0; } static int ath6kl_usb_power_on(struct ath6kl *ar) { return 0; } static int ath6kl_usb_power_off(struct ath6kl *ar) { return 0; } static const struct ath6kl_hif_ops ath6kl_usb_ops = { .diag_read32 = ath6kl_usb_diag_read32, .diag_write32 = ath6kl_usb_diag_write32, .bmi_read = ath6kl_usb_bmi_read, .bmi_write = ath6kl_usb_bmi_write, .power_on = ath6kl_usb_power_on, .power_off = ath6kl_usb_power_off, }; /* ath6kl usb driver registered functions */ static int ath6kl_usb_probe(struct usb_interface *interface, const struct usb_device_id *id) { struct usb_device *dev = interface_to_usbdev(interface); struct ath6kl *ar; struct ath6kl_usb *ar_usb = NULL; int vendor_id, product_id; int ret = 0; usb_get_dev(dev); vendor_id = le16_to_cpu(dev->descriptor.idVendor); product_id = le16_to_cpu(dev->descriptor.idProduct); ath6kl_dbg(ATH6KL_DBG_USB, "vendor_id = %04x\n", vendor_id); ath6kl_dbg(ATH6KL_DBG_USB, "product_id = %04x\n", product_id); if (interface->cur_altsetting) ath6kl_dbg(ATH6KL_DBG_USB, "USB Interface %d\n", interface->cur_altsetting->desc.bInterfaceNumber); if (dev->speed == USB_SPEED_HIGH) ath6kl_dbg(ATH6KL_DBG_USB, "USB 2.0 Host\n"); else ath6kl_dbg(ATH6KL_DBG_USB, "USB 1.1 Host\n"); ar_usb = ath6kl_usb_create(interface); if (ar_usb == NULL) { ret = -ENOMEM; goto err_usb_put; } ar = ath6kl_core_create(&ar_usb->udev->dev); if (ar == NULL) { ath6kl_err("Failed to alloc ath6kl core\n"); ret = -ENOMEM; goto err_usb_destroy; } ar->hif_priv = ar_usb; ar->hif_type = ATH6KL_HIF_TYPE_USB; ar->hif_ops = &ath6kl_usb_ops; ar->mbox_info.block_size = 16; ar->bmi.max_data_size = 252; ar_usb->ar = ar; ret = ath6kl_core_init(ar); if (ret) { ath6kl_err("Failed to init ath6kl core: %d\n", ret); goto err_core_free; } return ret; err_core_free: ath6kl_core_destroy(ar); err_usb_destroy: ath6kl_usb_destroy(ar_usb); err_usb_put: usb_put_dev(dev); return ret; } static void ath6kl_usb_remove(struct usb_interface *interface) { usb_put_dev(interface_to_usbdev(interface)); ath6kl_usb_device_detached(interface); } /* table of devices that work with this driver */ static struct usb_device_id ath6kl_usb_ids[] = { {USB_DEVICE(0x0cf3, 0x9374)}, { /* Terminating entry */ }, }; MODULE_DEVICE_TABLE(usb, ath6kl_usb_ids); static struct usb_driver ath6kl_usb_driver = { .name = "ath6kl_usb", .probe = ath6kl_usb_probe, .disconnect = ath6kl_usb_remove, .id_table = ath6kl_usb_ids, }; static int ath6kl_usb_init(void) { usb_register(&ath6kl_usb_driver); return 0; } static void ath6kl_usb_exit(void) { usb_deregister(&ath6kl_usb_driver); } module_init(ath6kl_usb_init); module_exit(ath6kl_usb_exit); MODULE_AUTHOR("Atheros Communications, Inc."); MODULE_DESCRIPTION("Driver support for Atheros AR600x USB devices"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_FIRMWARE(AR6004_HW_1_0_FIRMWARE_FILE); MODULE_FIRMWARE(AR6004_HW_1_0_BOARD_DATA_FILE); MODULE_FIRMWARE(AR6004_HW_1_0_DEFAULT_BOARD_DATA_FILE); MODULE_FIRMWARE(AR6004_HW_1_1_FIRMWARE_FILE); MODULE_FIRMWARE(AR6004_HW_1_1_BOARD_DATA_FILE); MODULE_FIRMWARE(AR6004_HW_1_1_DEFAULT_BOARD_DATA_FILE);
gpl-2.0
jcsullins/kernel-tenderloin-3.0
arch/score/mm/init.c
7577
4337
/* * arch/score/mm/init.c * * Score Processor version. * * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. * Lennox Wu <lennox.wu@sunplusct.com> * Chen Liqin <liqin.chen@sunplusct.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, see the file COPYING, or write * to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/errno.h> #include <linux/bootmem.h> #include <linux/kernel.h> #include <linux/gfp.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/mman.h> #include <linux/pagemap.h> #include <linux/proc_fs.h> #include <linux/sched.h> #include <linux/initrd.h> #include <asm/sections.h> #include <asm/tlb.h> unsigned long empty_zero_page; EXPORT_SYMBOL_GPL(empty_zero_page); static struct kcore_list kcore_mem, kcore_vmalloc; static unsigned long setup_zero_page(void) { struct page *page; empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, 0); if (!empty_zero_page) panic("Oh boy, that early out of memory?"); page = virt_to_page((void *) empty_zero_page); SetPageReserved(page); return 1UL; } #ifndef CONFIG_NEED_MULTIPLE_NODES int page_is_ram(unsigned long pagenr) { if (pagenr >= min_low_pfn && pagenr < max_low_pfn) return 1; else return 0; } void __init paging_init(void) { unsigned long max_zone_pfns[MAX_NR_ZONES]; unsigned long lastpfn; pagetable_init(); max_zone_pfns[ZONE_NORMAL] = max_low_pfn; lastpfn = max_low_pfn; free_area_init_nodes(max_zone_pfns); } void __init mem_init(void) { unsigned long codesize, reservedpages, datasize, initsize; unsigned long tmp, ram = 0; high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); totalram_pages += free_all_bootmem(); totalram_pages -= setup_zero_page(); /* Setup zeroed pages. */ reservedpages = 0; for (tmp = 0; tmp < max_low_pfn; tmp++) if (page_is_ram(tmp)) { ram++; if (PageReserved(pfn_to_page(tmp))) reservedpages++; } num_physpages = ram; codesize = (unsigned long) &_etext - (unsigned long) &_text; datasize = (unsigned long) &_edata - (unsigned long) &_etext; initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, " "%ldk reserved, %ldk data, %ldk init, %ldk highmem)\n", (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), ram << (PAGE_SHIFT-10), codesize >> 10, reservedpages << (PAGE_SHIFT-10), datasize >> 10, initsize >> 10, totalhigh_pages << (PAGE_SHIFT-10)); } #endif /* !CONFIG_NEED_MULTIPLE_NODES */ static void free_init_pages(const char *what, unsigned long begin, unsigned long end) { unsigned long pfn; for (pfn = PFN_UP(begin); pfn < PFN_DOWN(end); pfn++) { struct page *page = pfn_to_page(pfn); void *addr = phys_to_virt(PFN_PHYS(pfn)); ClearPageReserved(page); init_page_count(page); memset(addr, POISON_FREE_INITMEM, PAGE_SIZE); __free_page(page); totalram_pages++; } printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10); } #ifdef CONFIG_BLK_DEV_INITRD void free_initrd_mem(unsigned long start, unsigned long end) { free_init_pages("initrd memory", virt_to_phys((void *) start), virt_to_phys((void *) end)); } #endif void __init_refok free_initmem(void) { free_init_pages("unused kernel memory", __pa(&__init_begin), __pa(&__init_end)); } unsigned long pgd_current; #define __page_aligned(order) __attribute__((__aligned__(PAGE_SIZE<<order))) /* * gcc 3.3 and older have trouble determining that PTRS_PER_PGD and PGD_ORDER * are constants. So we use the variants from asm-offset.h until that gcc * will officially be retired. */ pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned(PTE_ORDER); pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned(PTE_ORDER);
gpl-2.0
codename13/kernel_samsung_kylessopen
arch/blackfin/mm/maccess.c
11929
2155
/* * safe read and write memory routines callable while atomic * * Copyright 2005-2008 Analog Devices Inc. * * Licensed under the GPL-2 or later. */ #include <linux/uaccess.h> #include <asm/dma.h> static int validate_memory_access_address(unsigned long addr, int size) { if (size < 0 || addr == 0) return -EFAULT; return bfin_mem_access_type(addr, size); } long probe_kernel_read(void *dst, const void *src, size_t size) { unsigned long lsrc = (unsigned long)src; int mem_type; mem_type = validate_memory_access_address(lsrc, size); if (mem_type < 0) return mem_type; if (lsrc >= SYSMMR_BASE) { if (size == 2 && lsrc % 2 == 0) { u16 mmr = bfin_read16(src); memcpy(dst, &mmr, sizeof(mmr)); return 0; } else if (size == 4 && lsrc % 4 == 0) { u32 mmr = bfin_read32(src); memcpy(dst, &mmr, sizeof(mmr)); return 0; } } else { switch (mem_type) { case BFIN_MEM_ACCESS_CORE: case BFIN_MEM_ACCESS_CORE_ONLY: return __probe_kernel_read(dst, src, size); /* XXX: should support IDMA here with SMP */ case BFIN_MEM_ACCESS_DMA: if (dma_memcpy(dst, src, size)) return 0; break; case BFIN_MEM_ACCESS_ITEST: if (isram_memcpy(dst, src, size)) return 0; break; } } return -EFAULT; } long probe_kernel_write(void *dst, const void *src, size_t size) { unsigned long ldst = (unsigned long)dst; int mem_type; mem_type = validate_memory_access_address(ldst, size); if (mem_type < 0) return mem_type; if (ldst >= SYSMMR_BASE) { if (size == 2 && ldst % 2 == 0) { u16 mmr; memcpy(&mmr, src, sizeof(mmr)); bfin_write16(dst, mmr); return 0; } else if (size == 4 && ldst % 4 == 0) { u32 mmr; memcpy(&mmr, src, sizeof(mmr)); bfin_write32(dst, mmr); return 0; } } else { switch (mem_type) { case BFIN_MEM_ACCESS_CORE: case BFIN_MEM_ACCESS_CORE_ONLY: return __probe_kernel_write(dst, src, size); /* XXX: should support IDMA here with SMP */ case BFIN_MEM_ACCESS_DMA: if (dma_memcpy(dst, src, size)) return 0; break; case BFIN_MEM_ACCESS_ITEST: if (isram_memcpy(dst, src, size)) return 0; break; } } return -EFAULT; }
gpl-2.0
barrettcolin/Quake-III-Arena
libs/jpeg6/jdmarker.cpp
154
30622
/* * jdmarker.c * * Copyright (C) 1991-1995, Thomas G. Lane. * This file is part of the Independent JPEG Group's software. * For conditions of distribution and use, see the accompanying README file. * * This file contains routines to decode JPEG datastream markers. * Most of the complexity arises from our desire to support input * suspension: if not all of the data for a marker is available, * we must exit back to the application. On resumption, we reprocess * the marker. */ #define JPEG_INTERNALS #include "jinclude.h" #include "jpeglib.h" typedef enum { /* JPEG marker codes */ M_SOF0 = 0xc0, M_SOF1 = 0xc1, M_SOF2 = 0xc2, M_SOF3 = 0xc3, M_SOF5 = 0xc5, M_SOF6 = 0xc6, M_SOF7 = 0xc7, M_JPG = 0xc8, M_SOF9 = 0xc9, M_SOF10 = 0xca, M_SOF11 = 0xcb, M_SOF13 = 0xcd, M_SOF14 = 0xce, M_SOF15 = 0xcf, M_DHT = 0xc4, M_DAC = 0xcc, M_RST0 = 0xd0, M_RST1 = 0xd1, M_RST2 = 0xd2, M_RST3 = 0xd3, M_RST4 = 0xd4, M_RST5 = 0xd5, M_RST6 = 0xd6, M_RST7 = 0xd7, M_SOI = 0xd8, M_EOI = 0xd9, M_SOS = 0xda, M_DQT = 0xdb, M_DNL = 0xdc, M_DRI = 0xdd, M_DHP = 0xde, M_EXP = 0xdf, M_APP0 = 0xe0, M_APP1 = 0xe1, M_APP2 = 0xe2, M_APP3 = 0xe3, M_APP4 = 0xe4, M_APP5 = 0xe5, M_APP6 = 0xe6, M_APP7 = 0xe7, M_APP8 = 0xe8, M_APP9 = 0xe9, M_APP10 = 0xea, M_APP11 = 0xeb, M_APP12 = 0xec, M_APP13 = 0xed, M_APP14 = 0xee, M_APP15 = 0xef, M_JPG0 = 0xf0, M_JPG13 = 0xfd, M_COM = 0xfe, M_TEM = 0x01, M_ERROR = 0x100 } JPEG_MARKER; /* * Macros for fetching data from the data source module. * * At all times, cinfo->src->next_input_byte and ->bytes_in_buffer reflect * the current restart point; we update them only when we have reached a * suitable place to restart if a suspension occurs. */ /* Declare and initialize local copies of input pointer/count */ #define INPUT_VARS(cinfo) \ struct jpeg_source_mgr * datasrc = (cinfo)->src; \ const JOCTET * next_input_byte = datasrc->next_input_byte; \ size_t bytes_in_buffer = datasrc->bytes_in_buffer /* Unload the local copies --- do this only at a restart boundary */ #define INPUT_SYNC(cinfo) \ ( datasrc->next_input_byte = next_input_byte, \ datasrc->bytes_in_buffer = bytes_in_buffer ) /* Reload the local copies --- seldom used except in MAKE_BYTE_AVAIL */ #define INPUT_RELOAD(cinfo) \ ( next_input_byte = datasrc->next_input_byte, \ bytes_in_buffer = datasrc->bytes_in_buffer ) /* Internal macro for INPUT_BYTE and INPUT_2BYTES: make a byte available. * Note we do *not* do INPUT_SYNC before calling fill_input_buffer, * but we must reload the local copies after a successful fill. */ #define MAKE_BYTE_AVAIL(cinfo,action) \ if (bytes_in_buffer == 0) { \ if (! (*datasrc->fill_input_buffer) (cinfo)) \ { action; } \ INPUT_RELOAD(cinfo); \ } \ bytes_in_buffer-- /* Read a byte into variable V. * If must suspend, take the specified action (typically "return FALSE"). */ #define INPUT_BYTE(cinfo,V,action) \ MAKESTMT( MAKE_BYTE_AVAIL(cinfo,action); \ V = GETJOCTET(*next_input_byte++); ) /* As above, but read two bytes interpreted as an unsigned 16-bit integer. * V should be declared unsigned int or perhaps INT32. */ #define INPUT_2BYTES(cinfo,V,action) \ MAKESTMT( MAKE_BYTE_AVAIL(cinfo,action); \ V = ((unsigned int) GETJOCTET(*next_input_byte++)) << 8; \ MAKE_BYTE_AVAIL(cinfo,action); \ V += GETJOCTET(*next_input_byte++); ) /* * Routines to process JPEG markers. * * Entry condition: JPEG marker itself has been read and its code saved * in cinfo->unread_marker; input restart point is just after the marker. * * Exit: if return TRUE, have read and processed any parameters, and have * updated the restart point to point after the parameters. * If return FALSE, was forced to suspend before reaching end of * marker parameters; restart point has not been moved. Same routine * will be called again after application supplies more input data. * * This approach to suspension assumes that all of a marker's parameters can * fit into a single input bufferload. This should hold for "normal" * markers. Some COM/APPn markers might have large parameter segments, * but we use skip_input_data to get past those, and thereby put the problem * on the source manager's shoulders. * * Note that we don't bother to avoid duplicate trace messages if a * suspension occurs within marker parameters. Other side effects * require more care. */ LOCAL boolean get_soi (j_decompress_ptr cinfo) /* Process an SOI marker */ { int i; TRACEMS(cinfo, 1, JTRC_SOI); if (cinfo->marker->saw_SOI) ERREXIT(cinfo, JERR_SOI_DUPLICATE); /* Reset all parameters that are defined to be reset by SOI */ for (i = 0; i < NUM_ARITH_TBLS; i++) { cinfo->arith_dc_L[i] = 0; cinfo->arith_dc_U[i] = 1; cinfo->arith_ac_K[i] = 5; } cinfo->restart_interval = 0; /* Set initial assumptions for colorspace etc */ cinfo->jpeg_color_space = JCS_UNKNOWN; cinfo->CCIR601_sampling = FALSE; /* Assume non-CCIR sampling??? */ cinfo->saw_JFIF_marker = FALSE; cinfo->density_unit = 0; /* set default JFIF APP0 values */ cinfo->X_density = 1; cinfo->Y_density = 1; cinfo->saw_Adobe_marker = FALSE; cinfo->Adobe_transform = 0; cinfo->marker->saw_SOI = TRUE; return TRUE; } LOCAL boolean get_sof (j_decompress_ptr cinfo, boolean is_prog, boolean is_arith) /* Process a SOFn marker */ { INT32 length; int c, ci; jpeg_component_info * compptr; INPUT_VARS(cinfo); cinfo->progressive_mode = is_prog; cinfo->arith_code = is_arith; INPUT_2BYTES(cinfo, length, return FALSE); INPUT_BYTE(cinfo, cinfo->data_precision, return FALSE); INPUT_2BYTES(cinfo, cinfo->image_height, return FALSE); INPUT_2BYTES(cinfo, cinfo->image_width, return FALSE); INPUT_BYTE(cinfo, cinfo->num_components, return FALSE); length -= 8; TRACEMS4(cinfo, 1, JTRC_SOF, cinfo->unread_marker, (int) cinfo->image_width, (int) cinfo->image_height, cinfo->num_components); if (cinfo->marker->saw_SOF) ERREXIT(cinfo, JERR_SOF_DUPLICATE); /* We don't support files in which the image height is initially specified */ /* as 0 and is later redefined by DNL. As long as we have to check that, */ /* might as well have a general sanity check. */ if (cinfo->image_height <= 0 || cinfo->image_width <= 0 || cinfo->num_components <= 0) ERREXIT(cinfo, JERR_EMPTY_IMAGE); if (length != (cinfo->num_components * 3)) ERREXIT(cinfo, JERR_BAD_LENGTH); if (cinfo->comp_info == NULL) /* do only once, even if suspend */ cinfo->comp_info = (jpeg_component_info *) (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo, JPOOL_IMAGE, cinfo->num_components * SIZEOF(jpeg_component_info)); for (ci = 0, compptr = cinfo->comp_info; ci < cinfo->num_components; ci++, compptr++) { compptr->component_index = ci; INPUT_BYTE(cinfo, compptr->component_id, return FALSE); INPUT_BYTE(cinfo, c, return FALSE); compptr->h_samp_factor = (c >> 4) & 15; compptr->v_samp_factor = (c ) & 15; INPUT_BYTE(cinfo, compptr->quant_tbl_no, return FALSE); TRACEMS4(cinfo, 1, JTRC_SOF_COMPONENT, compptr->component_id, compptr->h_samp_factor, compptr->v_samp_factor, compptr->quant_tbl_no); } cinfo->marker->saw_SOF = TRUE; INPUT_SYNC(cinfo); return TRUE; } LOCAL boolean get_sos (j_decompress_ptr cinfo) /* Process a SOS marker */ { INT32 length; int i, ci, n, c, cc; jpeg_component_info * compptr; INPUT_VARS(cinfo); if (! cinfo->marker->saw_SOF) ERREXIT(cinfo, JERR_SOS_NO_SOF); INPUT_2BYTES(cinfo, length, return FALSE); INPUT_BYTE(cinfo, n, return FALSE); /* Number of components */ if (length != (n * 2 + 6) || n < 1 || n > MAX_COMPS_IN_SCAN) ERREXIT(cinfo, JERR_BAD_LENGTH); TRACEMS1(cinfo, 1, JTRC_SOS, n); cinfo->comps_in_scan = n; /* Collect the component-spec parameters */ for (i = 0; i < n; i++) { INPUT_BYTE(cinfo, cc, return FALSE); INPUT_BYTE(cinfo, c, return FALSE); for (ci = 0, compptr = cinfo->comp_info; ci < cinfo->num_components; ci++, compptr++) { if (cc == compptr->component_id) goto id_found; } ERREXIT1(cinfo, JERR_BAD_COMPONENT_ID, cc); id_found: cinfo->cur_comp_info[i] = compptr; compptr->dc_tbl_no = (c >> 4) & 15; compptr->ac_tbl_no = (c ) & 15; TRACEMS3(cinfo, 1, JTRC_SOS_COMPONENT, cc, compptr->dc_tbl_no, compptr->ac_tbl_no); } /* Collect the additional scan parameters Ss, Se, Ah/Al. */ INPUT_BYTE(cinfo, c, return FALSE); cinfo->Ss = c; INPUT_BYTE(cinfo, c, return FALSE); cinfo->Se = c; INPUT_BYTE(cinfo, c, return FALSE); cinfo->Ah = (c >> 4) & 15; cinfo->Al = (c ) & 15; TRACEMS4(cinfo, 1, JTRC_SOS_PARAMS, cinfo->Ss, cinfo->Se, cinfo->Ah, cinfo->Al); /* Prepare to scan data & restart markers */ cinfo->marker->next_restart_num = 0; /* Count another SOS marker */ cinfo->input_scan_number++; INPUT_SYNC(cinfo); return TRUE; } METHODDEF boolean get_app0 (j_decompress_ptr cinfo) /* Process an APP0 marker */ { #define JFIF_LEN 14 INT32 length; UINT8 b[JFIF_LEN]; int buffp; INPUT_VARS(cinfo); INPUT_2BYTES(cinfo, length, return FALSE); length -= 2; /* See if a JFIF APP0 marker is present */ if (length >= JFIF_LEN) { for (buffp = 0; buffp < JFIF_LEN; buffp++) INPUT_BYTE(cinfo, b[buffp], return FALSE); length -= JFIF_LEN; if (b[0]==0x4A && b[1]==0x46 && b[2]==0x49 && b[3]==0x46 && b[4]==0) { /* Found JFIF APP0 marker: check version */ /* Major version must be 1, anything else signals an incompatible change. * We used to treat this as an error, but now it's a nonfatal warning, * because some bozo at Hijaak couldn't read the spec. * Minor version should be 0..2, but process anyway if newer. */ if (b[5] != 1) WARNMS2(cinfo, JWRN_JFIF_MAJOR, b[5], b[6]); else if (b[6] > 2) TRACEMS2(cinfo, 1, JTRC_JFIF_MINOR, b[5], b[6]); /* Save info */ cinfo->saw_JFIF_marker = TRUE; cinfo->density_unit = b[7]; cinfo->X_density = (b[8] << 8) + b[9]; cinfo->Y_density = (b[10] << 8) + b[11]; TRACEMS3(cinfo, 1, JTRC_JFIF, cinfo->X_density, cinfo->Y_density, cinfo->density_unit); if (b[12] | b[13]) TRACEMS2(cinfo, 1, JTRC_JFIF_THUMBNAIL, b[12], b[13]); if (length != ((INT32) b[12] * (INT32) b[13] * (INT32) 3)) TRACEMS1(cinfo, 1, JTRC_JFIF_BADTHUMBNAILSIZE, (int) length); } else { /* Start of APP0 does not match "JFIF" */ TRACEMS1(cinfo, 1, JTRC_APP0, (int) length + JFIF_LEN); } } else { /* Too short to be JFIF marker */ TRACEMS1(cinfo, 1, JTRC_APP0, (int) length); } INPUT_SYNC(cinfo); if (length > 0) /* skip any remaining data -- could be lots */ (*cinfo->src->skip_input_data) (cinfo, (long) length); return TRUE; } METHODDEF boolean get_app14 (j_decompress_ptr cinfo) /* Process an APP14 marker */ { #define ADOBE_LEN 12 INT32 length; UINT8 b[ADOBE_LEN]; int buffp; unsigned int version, flags0, flags1, transform; INPUT_VARS(cinfo); INPUT_2BYTES(cinfo, length, return FALSE); length -= 2; /* See if an Adobe APP14 marker is present */ if (length >= ADOBE_LEN) { for (buffp = 0; buffp < ADOBE_LEN; buffp++) INPUT_BYTE(cinfo, b[buffp], return FALSE); length -= ADOBE_LEN; if (b[0]==0x41 && b[1]==0x64 && b[2]==0x6F && b[3]==0x62 && b[4]==0x65) { /* Found Adobe APP14 marker */ version = (b[5] << 8) + b[6]; flags0 = (b[7] << 8) + b[8]; flags1 = (b[9] << 8) + b[10]; transform = b[11]; TRACEMS4(cinfo, 1, JTRC_ADOBE, version, flags0, flags1, transform); cinfo->saw_Adobe_marker = TRUE; cinfo->Adobe_transform = (UINT8) transform; } else { /* Start of APP14 does not match "Adobe" */ TRACEMS1(cinfo, 1, JTRC_APP14, (int) length + ADOBE_LEN); } } else { /* Too short to be Adobe marker */ TRACEMS1(cinfo, 1, JTRC_APP14, (int) length); } INPUT_SYNC(cinfo); if (length > 0) /* skip any remaining data -- could be lots */ (*cinfo->src->skip_input_data) (cinfo, (long) length); return TRUE; } LOCAL boolean get_dac (j_decompress_ptr cinfo) /* Process a DAC marker */ { INT32 length; int index, val; INPUT_VARS(cinfo); INPUT_2BYTES(cinfo, length, return FALSE); length -= 2; while (length > 0) { INPUT_BYTE(cinfo, index, return FALSE); INPUT_BYTE(cinfo, val, return FALSE); length -= 2; TRACEMS2(cinfo, 1, JTRC_DAC, index, val); if (index < 0 || index >= (2*NUM_ARITH_TBLS)) ERREXIT1(cinfo, JERR_DAC_INDEX, index); if (index >= NUM_ARITH_TBLS) { /* define AC table */ cinfo->arith_ac_K[index-NUM_ARITH_TBLS] = (UINT8) val; } else { /* define DC table */ cinfo->arith_dc_L[index] = (UINT8) (val & 0x0F); cinfo->arith_dc_U[index] = (UINT8) (val >> 4); if (cinfo->arith_dc_L[index] > cinfo->arith_dc_U[index]) ERREXIT1(cinfo, JERR_DAC_VALUE, val); } } INPUT_SYNC(cinfo); return TRUE; } LOCAL boolean get_dht (j_decompress_ptr cinfo) /* Process a DHT marker */ { INT32 length; UINT8 bits[17]; UINT8 huffval[256]; int i, index, count; JHUFF_TBL **htblptr; INPUT_VARS(cinfo); INPUT_2BYTES(cinfo, length, return FALSE); length -= 2; while (length > 0) { INPUT_BYTE(cinfo, index, return FALSE); TRACEMS1(cinfo, 1, JTRC_DHT, index); bits[0] = 0; count = 0; for (i = 1; i <= 16; i++) { INPUT_BYTE(cinfo, bits[i], return FALSE); count += bits[i]; } length -= 1 + 16; TRACEMS8(cinfo, 2, JTRC_HUFFBITS, bits[1], bits[2], bits[3], bits[4], bits[5], bits[6], bits[7], bits[8]); TRACEMS8(cinfo, 2, JTRC_HUFFBITS, bits[9], bits[10], bits[11], bits[12], bits[13], bits[14], bits[15], bits[16]); if (count > 256 || ((INT32) count) > length) ERREXIT(cinfo, JERR_DHT_COUNTS); for (i = 0; i < count; i++) INPUT_BYTE(cinfo, huffval[i], return FALSE); length -= count; if (index & 0x10) { /* AC table definition */ index -= 0x10; htblptr = &cinfo->ac_huff_tbl_ptrs[index]; } else { /* DC table definition */ htblptr = &cinfo->dc_huff_tbl_ptrs[index]; } if (index < 0 || index >= NUM_HUFF_TBLS) ERREXIT1(cinfo, JERR_DHT_INDEX, index); if (*htblptr == NULL) *htblptr = jpeg_alloc_huff_table((j_common_ptr) cinfo); MEMCOPY((*htblptr)->bits, bits, SIZEOF((*htblptr)->bits)); MEMCOPY((*htblptr)->huffval, huffval, SIZEOF((*htblptr)->huffval)); } INPUT_SYNC(cinfo); return TRUE; } LOCAL boolean get_dqt (j_decompress_ptr cinfo) /* Process a DQT marker */ { INT32 length; int n, i, prec; unsigned int tmp; JQUANT_TBL *quant_ptr; INPUT_VARS(cinfo); INPUT_2BYTES(cinfo, length, return FALSE); length -= 2; while (length > 0) { INPUT_BYTE(cinfo, n, return FALSE); prec = n >> 4; n &= 0x0F; TRACEMS2(cinfo, 1, JTRC_DQT, n, prec); if (n >= NUM_QUANT_TBLS) ERREXIT1(cinfo, JERR_DQT_INDEX, n); if (cinfo->quant_tbl_ptrs[n] == NULL) cinfo->quant_tbl_ptrs[n] = jpeg_alloc_quant_table((j_common_ptr) cinfo); quant_ptr = cinfo->quant_tbl_ptrs[n]; for (i = 0; i < DCTSIZE2; i++) { if (prec) INPUT_2BYTES(cinfo, tmp, return FALSE); else INPUT_BYTE(cinfo, tmp, return FALSE); quant_ptr->quantval[i] = (UINT16) tmp; } for (i = 0; i < DCTSIZE2; i += 8) { TRACEMS8(cinfo, 2, JTRC_QUANTVALS, quant_ptr->quantval[i ], quant_ptr->quantval[i+1], quant_ptr->quantval[i+2], quant_ptr->quantval[i+3], quant_ptr->quantval[i+4], quant_ptr->quantval[i+5], quant_ptr->quantval[i+6], quant_ptr->quantval[i+7]); } length -= DCTSIZE2+1; if (prec) length -= DCTSIZE2; } INPUT_SYNC(cinfo); return TRUE; } LOCAL boolean get_dri (j_decompress_ptr cinfo) /* Process a DRI marker */ { INT32 length; unsigned int tmp; INPUT_VARS(cinfo); INPUT_2BYTES(cinfo, length, return FALSE); if (length != 4) ERREXIT(cinfo, JERR_BAD_LENGTH); INPUT_2BYTES(cinfo, tmp, return FALSE); TRACEMS1(cinfo, 1, JTRC_DRI, tmp); cinfo->restart_interval = tmp; INPUT_SYNC(cinfo); return TRUE; } METHODDEF boolean skip_variable (j_decompress_ptr cinfo) /* Skip over an unknown or uninteresting variable-length marker */ { INT32 length; INPUT_VARS(cinfo); INPUT_2BYTES(cinfo, length, return FALSE); TRACEMS2(cinfo, 1, JTRC_MISC_MARKER, cinfo->unread_marker, (int) length); INPUT_SYNC(cinfo); /* do before skip_input_data */ (*cinfo->src->skip_input_data) (cinfo, (long) length - 2L); return TRUE; } /* * Find the next JPEG marker, save it in cinfo->unread_marker. * Returns FALSE if had to suspend before reaching a marker; * in that case cinfo->unread_marker is unchanged. * * Note that the result might not be a valid marker code, * but it will never be 0 or FF. */ LOCAL boolean next_marker (j_decompress_ptr cinfo) { int c; INPUT_VARS(cinfo); for (;;) { INPUT_BYTE(cinfo, c, return FALSE); /* Skip any non-FF bytes. * This may look a bit inefficient, but it will not occur in a valid file. * We sync after each discarded byte so that a suspending data source * can discard the byte from its buffer. */ while (c != 0xFF) { cinfo->marker->discarded_bytes++; INPUT_SYNC(cinfo); INPUT_BYTE(cinfo, c, return FALSE); } /* This loop swallows any duplicate FF bytes. Extra FFs are legal as * pad bytes, so don't count them in discarded_bytes. We assume there * will not be so many consecutive FF bytes as to overflow a suspending * data source's input buffer. */ do { INPUT_BYTE(cinfo, c, return FALSE); } while (c == 0xFF); if (c != 0) break; /* found a valid marker, exit loop */ /* Reach here if we found a stuffed-zero data sequence (FF/00). * Discard it and loop back to try again. */ cinfo->marker->discarded_bytes += 2; INPUT_SYNC(cinfo); } if (cinfo->marker->discarded_bytes != 0) { WARNMS2(cinfo, JWRN_EXTRANEOUS_DATA, cinfo->marker->discarded_bytes, c); cinfo->marker->discarded_bytes = 0; } cinfo->unread_marker = c; INPUT_SYNC(cinfo); return TRUE; } LOCAL boolean first_marker (j_decompress_ptr cinfo) /* Like next_marker, but used to obtain the initial SOI marker. */ /* For this marker, we do not allow preceding garbage or fill; otherwise, * we might well scan an entire input file before realizing it ain't JPEG. * If an application wants to process non-JFIF files, it must seek to the * SOI before calling the JPEG library. */ { int c, c2; INPUT_VARS(cinfo); INPUT_BYTE(cinfo, c, return FALSE); INPUT_BYTE(cinfo, c2, return FALSE); if (c != 0xFF || c2 != (int) M_SOI) ERREXIT2(cinfo, JERR_NO_SOI, c, c2); cinfo->unread_marker = c2; INPUT_SYNC(cinfo); return TRUE; } /* * Read markers until SOS or EOI. * * Returns same codes as are defined for jpeg_consume_input: * JPEG_SUSPENDED, JPEG_REACHED_SOS, or JPEG_REACHED_EOI. */ METHODDEF int read_markers (j_decompress_ptr cinfo) { /* Outer loop repeats once for each marker. */ for (;;) { /* Collect the marker proper, unless we already did. */ /* NB: first_marker() enforces the requirement that SOI appear first. */ if (cinfo->unread_marker == 0) { if (! cinfo->marker->saw_SOI) { if (! first_marker(cinfo)) return JPEG_SUSPENDED; } else { if (! next_marker(cinfo)) return JPEG_SUSPENDED; } } /* At this point cinfo->unread_marker contains the marker code and the * input point is just past the marker proper, but before any parameters. * A suspension will cause us to return with this state still true. */ switch (cinfo->unread_marker) { case M_SOI: if (! get_soi(cinfo)) return JPEG_SUSPENDED; break; case M_SOF0: /* Baseline */ case M_SOF1: /* Extended sequential, Huffman */ if (! get_sof(cinfo, FALSE, FALSE)) return JPEG_SUSPENDED; break; case M_SOF2: /* Progressive, Huffman */ if (! get_sof(cinfo, TRUE, FALSE)) return JPEG_SUSPENDED; break; case M_SOF9: /* Extended sequential, arithmetic */ if (! get_sof(cinfo, FALSE, TRUE)) return JPEG_SUSPENDED; break; case M_SOF10: /* Progressive, arithmetic */ if (! get_sof(cinfo, TRUE, TRUE)) return JPEG_SUSPENDED; break; /* Currently unsupported SOFn types */ case M_SOF3: /* Lossless, Huffman */ case M_SOF5: /* Differential sequential, Huffman */ case M_SOF6: /* Differential progressive, Huffman */ case M_SOF7: /* Differential lossless, Huffman */ case M_JPG: /* Reserved for JPEG extensions */ case M_SOF11: /* Lossless, arithmetic */ case M_SOF13: /* Differential sequential, arithmetic */ case M_SOF14: /* Differential progressive, arithmetic */ case M_SOF15: /* Differential lossless, arithmetic */ ERREXIT1(cinfo, JERR_SOF_UNSUPPORTED, cinfo->unread_marker); break; case M_SOS: if (! get_sos(cinfo)) return JPEG_SUSPENDED; cinfo->unread_marker = 0; /* processed the marker */ return JPEG_REACHED_SOS; case M_EOI: TRACEMS(cinfo, 1, JTRC_EOI); cinfo->unread_marker = 0; /* processed the marker */ return JPEG_REACHED_EOI; case M_DAC: if (! get_dac(cinfo)) return JPEG_SUSPENDED; break; case M_DHT: if (! get_dht(cinfo)) return JPEG_SUSPENDED; break; case M_DQT: if (! get_dqt(cinfo)) return JPEG_SUSPENDED; break; case M_DRI: if (! get_dri(cinfo)) return JPEG_SUSPENDED; break; case M_APP0: case M_APP1: case M_APP2: case M_APP3: case M_APP4: case M_APP5: case M_APP6: case M_APP7: case M_APP8: case M_APP9: case M_APP10: case M_APP11: case M_APP12: case M_APP13: case M_APP14: case M_APP15: if (! (*cinfo->marker->process_APPn[cinfo->unread_marker - (int) M_APP0]) (cinfo)) return JPEG_SUSPENDED; break; case M_COM: if (! (*cinfo->marker->process_COM) (cinfo)) return JPEG_SUSPENDED; break; case M_RST0: /* these are all parameterless */ case M_RST1: case M_RST2: case M_RST3: case M_RST4: case M_RST5: case M_RST6: case M_RST7: case M_TEM: TRACEMS1(cinfo, 1, JTRC_PARMLESS_MARKER, cinfo->unread_marker); break; case M_DNL: /* Ignore DNL ... perhaps the wrong thing */ if (! skip_variable(cinfo)) return JPEG_SUSPENDED; break; default: /* must be DHP, EXP, JPGn, or RESn */ /* For now, we treat the reserved markers as fatal errors since they are * likely to be used to signal incompatible JPEG Part 3 extensions. * Once the JPEG 3 version-number marker is well defined, this code * ought to change! */ ERREXIT1(cinfo, JERR_UNKNOWN_MARKER, cinfo->unread_marker); break; } /* Successfully processed marker, so reset state variable */ cinfo->unread_marker = 0; } /* end loop */ } /* * Read a restart marker, which is expected to appear next in the datastream; * if the marker is not there, take appropriate recovery action. * Returns FALSE if suspension is required. * * This is called by the entropy decoder after it has read an appropriate * number of MCUs. cinfo->unread_marker may be nonzero if the entropy decoder * has already read a marker from the data source. Under normal conditions * cinfo->unread_marker will be reset to 0 before returning; if not reset, * it holds a marker which the decoder will be unable to read past. */ METHODDEF boolean read_restart_marker (j_decompress_ptr cinfo) { /* Obtain a marker unless we already did. */ /* Note that next_marker will complain if it skips any data. */ if (cinfo->unread_marker == 0) { if (! next_marker(cinfo)) return FALSE; } if (cinfo->unread_marker == ((int) M_RST0 + cinfo->marker->next_restart_num)) { /* Normal case --- swallow the marker and let entropy decoder continue */ TRACEMS1(cinfo, 2, JTRC_RST, cinfo->marker->next_restart_num); cinfo->unread_marker = 0; } else { /* Uh-oh, the restart markers have been messed up. */ /* Let the data source manager determine how to resync. */ if (! (*cinfo->src->resync_to_restart) (cinfo, cinfo->marker->next_restart_num)) return FALSE; } /* Update next-restart state */ cinfo->marker->next_restart_num = (cinfo->marker->next_restart_num + 1) & 7; return TRUE; } /* * This is the default resync_to_restart method for data source managers * to use if they don't have any better approach. Some data source managers * may be able to back up, or may have additional knowledge about the data * which permits a more intelligent recovery strategy; such managers would * presumably supply their own resync method. * * read_restart_marker calls resync_to_restart if it finds a marker other than * the restart marker it was expecting. (This code is *not* used unless * a nonzero restart interval has been declared.) cinfo->unread_marker is * the marker code actually found (might be anything, except 0 or FF). * The desired restart marker number (0..7) is passed as a parameter. * This routine is supposed to apply whatever error recovery strategy seems * appropriate in order to position the input stream to the next data segment. * Note that cinfo->unread_marker is treated as a marker appearing before * the current data-source input point; usually it should be reset to zero * before returning. * Returns FALSE if suspension is required. * * This implementation is substantially constrained by wanting to treat the * input as a data stream; this means we can't back up. Therefore, we have * only the following actions to work with: * 1. Simply discard the marker and let the entropy decoder resume at next * byte of file. * 2. Read forward until we find another marker, discarding intervening * data. (In theory we could look ahead within the current bufferload, * without having to discard data if we don't find the desired marker. * This idea is not implemented here, in part because it makes behavior * dependent on buffer size and chance buffer-boundary positions.) * 3. Leave the marker unread (by failing to zero cinfo->unread_marker). * This will cause the entropy decoder to process an empty data segment, * inserting dummy zeroes, and then we will reprocess the marker. * * #2 is appropriate if we think the desired marker lies ahead, while #3 is * appropriate if the found marker is a future restart marker (indicating * that we have missed the desired restart marker, probably because it got * corrupted). * We apply #2 or #3 if the found marker is a restart marker no more than * two counts behind or ahead of the expected one. We also apply #2 if the * found marker is not a legal JPEG marker code (it's certainly bogus data). * If the found marker is a restart marker more than 2 counts away, we do #1 * (too much risk that the marker is erroneous; with luck we will be able to * resync at some future point). * For any valid non-restart JPEG marker, we apply #3. This keeps us from * overrunning the end of a scan. An implementation limited to single-scan * files might find it better to apply #2 for markers other than EOI, since * any other marker would have to be bogus data in that case. */ GLOBAL boolean jpeg_resync_to_restart (j_decompress_ptr cinfo, int desired) { int marker = cinfo->unread_marker; int action = 1; /* Always put up a warning. */ WARNMS2(cinfo, JWRN_MUST_RESYNC, marker, desired); /* Outer loop handles repeated decision after scanning forward. */ for (;;) { if (marker < (int) M_SOF0) action = 2; /* invalid marker */ else if (marker < (int) M_RST0 || marker > (int) M_RST7) action = 3; /* valid non-restart marker */ else { if (marker == ((int) M_RST0 + ((desired+1) & 7)) || marker == ((int) M_RST0 + ((desired+2) & 7))) action = 3; /* one of the next two expected restarts */ else if (marker == ((int) M_RST0 + ((desired-1) & 7)) || marker == ((int) M_RST0 + ((desired-2) & 7))) action = 2; /* a prior restart, so advance */ else action = 1; /* desired restart or too far away */ } TRACEMS2(cinfo, 4, JTRC_RECOVERY_ACTION, marker, action); switch (action) { case 1: /* Discard marker and let entropy decoder resume processing. */ cinfo->unread_marker = 0; return TRUE; case 2: /* Scan to the next marker, and repeat the decision loop. */ if (! next_marker(cinfo)) return FALSE; marker = cinfo->unread_marker; break; case 3: /* Return without advancing past this marker. */ /* Entropy decoder will be forced to process an empty segment. */ return TRUE; } } /* end loop */ } /* * Reset marker processing state to begin a fresh datastream. */ METHODDEF void reset_marker_reader (j_decompress_ptr cinfo) { cinfo->comp_info = NULL; /* until allocated by get_sof */ cinfo->input_scan_number = 0; /* no SOS seen yet */ cinfo->unread_marker = 0; /* no pending marker */ cinfo->marker->saw_SOI = FALSE; /* set internal state too */ cinfo->marker->saw_SOF = FALSE; cinfo->marker->discarded_bytes = 0; } /* * Initialize the marker reader module. * This is called only once, when the decompression object is created. */ GLOBAL void jinit_marker_reader (j_decompress_ptr cinfo) { int i; /* Create subobject in permanent pool */ cinfo->marker = (struct jpeg_marker_reader *) (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo, JPOOL_PERMANENT, SIZEOF(struct jpeg_marker_reader)); /* Initialize method pointers */ cinfo->marker->reset_marker_reader = reset_marker_reader; cinfo->marker->read_markers = read_markers; cinfo->marker->read_restart_marker = read_restart_marker; cinfo->marker->process_COM = skip_variable; for (i = 0; i < 16; i++) cinfo->marker->process_APPn[i] = skip_variable; cinfo->marker->process_APPn[0] = get_app0; cinfo->marker->process_APPn[14] = get_app14; /* Reset marker processing state */ reset_marker_reader(cinfo); }
gpl-2.0
sourabgu/linux
drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
154
4300
/* * Copyright 2013 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Ben Skeggs <bskeggs@redhat.com> */ #include "gf100.h" #include "ctxgf100.h" #include <nvif/class.h> /******************************************************************************* * PGRAPH register lists ******************************************************************************/ static const struct gf100_gr_init gf117_gr_init_pe_0[] = { { 0x41980c, 1, 0x04, 0x00000010 }, { 0x419844, 1, 0x04, 0x00000000 }, { 0x41984c, 1, 0x04, 0x00005bc8 }, { 0x419850, 3, 0x04, 0x00000000 }, {} }; const struct gf100_gr_init gf117_gr_init_pes_0[] = { { 0x41be04, 1, 0x04, 0x00000000 }, { 0x41be08, 1, 0x04, 0x00000004 }, { 0x41be0c, 1, 0x04, 0x00000000 }, { 0x41be10, 1, 0x04, 0x003b8bc7 }, { 0x41be14, 2, 0x04, 0x00000000 }, {} }; const struct gf100_gr_init gf117_gr_init_wwdx_0[] = { { 0x41bfd4, 1, 0x04, 0x00800000 }, { 0x41bfdc, 1, 0x04, 0x00000000 }, { 0x41bff8, 2, 0x04, 0x00000000 }, {} }; const struct gf100_gr_init gf117_gr_init_cbm_0[] = { { 0x41becc, 1, 0x04, 0x00000000 }, { 0x41bee8, 2, 0x04, 0x00000000 }, {} }; static const struct gf100_gr_pack gf117_gr_pack_mmio[] = { { gf100_gr_init_main_0 }, { gf100_gr_init_fe_0 }, { gf100_gr_init_pri_0 }, { gf100_gr_init_rstr2d_0 }, { gf119_gr_init_pd_0 }, { gf119_gr_init_ds_0 }, { gf100_gr_init_scc_0 }, { gf119_gr_init_prop_0 }, { gf108_gr_init_gpc_unk_0 }, { gf100_gr_init_setup_0 }, { gf100_gr_init_crstr_0 }, { gf108_gr_init_setup_1 }, { gf100_gr_init_zcull_0 }, { gf119_gr_init_gpm_0 }, { gf119_gr_init_gpc_unk_1 }, { gf100_gr_init_gcc_0 }, { gf100_gr_init_tpccs_0 }, { gf119_gr_init_tex_0 }, { gf117_gr_init_pe_0 }, { gf100_gr_init_l1c_0 }, { gf100_gr_init_mpc_0 }, { gf119_gr_init_sm_0 }, { gf117_gr_init_pes_0 }, { gf117_gr_init_wwdx_0 }, { gf117_gr_init_cbm_0 }, { gf100_gr_init_be_0 }, { gf119_gr_init_fe_1 }, {} }; /******************************************************************************* * PGRAPH engine/subdev functions ******************************************************************************/ #include "fuc/hubgf117.fuc3.h" static struct gf100_gr_ucode gf117_gr_fecs_ucode = { .code.data = gf117_grhub_code, .code.size = sizeof(gf117_grhub_code), .data.data = gf117_grhub_data, .data.size = sizeof(gf117_grhub_data), }; #include "fuc/gpcgf117.fuc3.h" static struct gf100_gr_ucode gf117_gr_gpccs_ucode = { .code.data = gf117_grgpc_code, .code.size = sizeof(gf117_grgpc_code), .data.data = gf117_grgpc_data, .data.size = sizeof(gf117_grgpc_data), }; static const struct gf100_gr_func gf117_gr = { .init = gf100_gr_init, .mmio = gf117_gr_pack_mmio, .fecs.ucode = &gf117_gr_fecs_ucode, .gpccs.ucode = &gf117_gr_gpccs_ucode, .rops = gf100_gr_rops, .ppc_nr = 1, .grctx = &gf117_grctx, .sclass = { { -1, -1, FERMI_TWOD_A }, { -1, -1, FERMI_MEMORY_TO_MEMORY_FORMAT_A }, { -1, -1, FERMI_A, &gf100_fermi }, { -1, -1, FERMI_B, &gf100_fermi }, { -1, -1, FERMI_C, &gf100_fermi }, { -1, -1, FERMI_COMPUTE_A }, { -1, -1, FERMI_COMPUTE_B }, {} } }; int gf117_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) { return gf100_gr_new_(&gf117_gr, device, index, pgr); }
gpl-2.0
heptalium/rpi-sources-3.2
arch/ia64/kernel/smp.c
666
7519
/* * SMP Support * * Copyright (C) 1999 Walt Drummond <drummond@valinux.com> * Copyright (C) 1999, 2001, 2003 David Mosberger-Tang <davidm@hpl.hp.com> * * Lots of stuff stolen from arch/alpha/kernel/smp.c * * 01/05/16 Rohit Seth <rohit.seth@intel.com> IA64-SMP functions. Reorganized * the existing code (on the lines of x86 port). * 00/09/11 David Mosberger <davidm@hpl.hp.com> Do loops_per_jiffy * calibration on each CPU. * 00/08/23 Asit Mallick <asit.k.mallick@intel.com> fixed logical processor id * 00/03/31 Rohit Seth <rohit.seth@intel.com> Fixes for Bootstrap Processor * & cpu_online_map now gets done here (instead of setup.c) * 99/10/05 davidm Update to bring it in sync with new command-line processing * scheme. * 10/13/00 Goutham Rao <goutham.rao@intel.com> Updated smp_call_function and * smp_call_function_single to resend IPI on timeouts */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/smp.h> #include <linux/kernel_stat.h> #include <linux/mm.h> #include <linux/cache.h> #include <linux/delay.h> #include <linux/efi.h> #include <linux/bitops.h> #include <linux/kexec.h> #include <linux/atomic.h> #include <asm/current.h> #include <asm/delay.h> #include <asm/machvec.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/page.h> #include <asm/pgalloc.h> #include <asm/pgtable.h> #include <asm/processor.h> #include <asm/ptrace.h> #include <asm/sal.h> #include <asm/system.h> #include <asm/tlbflush.h> #include <asm/unistd.h> #include <asm/mca.h> /* * Note: alignment of 4 entries/cacheline was empirically determined * to be a good tradeoff between hot cachelines & spreading the array * across too many cacheline. */ static struct local_tlb_flush_counts { unsigned int count; } __attribute__((__aligned__(32))) local_tlb_flush_counts[NR_CPUS]; static DEFINE_PER_CPU_SHARED_ALIGNED(unsigned short [NR_CPUS], shadow_flush_counts); #define IPI_CALL_FUNC 0 #define IPI_CPU_STOP 1 #define IPI_CALL_FUNC_SINGLE 2 #define IPI_KDUMP_CPU_STOP 3 /* This needs to be cacheline aligned because it is written to by *other* CPUs. */ static DEFINE_PER_CPU_SHARED_ALIGNED(unsigned long, ipi_operation); extern void cpu_halt (void); static void stop_this_cpu(void) { /* * Remove this CPU: */ cpu_clear(smp_processor_id(), cpu_online_map); max_xtp(); local_irq_disable(); cpu_halt(); } void cpu_die(void) { max_xtp(); local_irq_disable(); cpu_halt(); /* Should never be here */ BUG(); for (;;); } irqreturn_t handle_IPI (int irq, void *dev_id) { int this_cpu = get_cpu(); unsigned long *pending_ipis = &__ia64_per_cpu_var(ipi_operation); unsigned long ops; mb(); /* Order interrupt and bit testing. */ while ((ops = xchg(pending_ipis, 0)) != 0) { mb(); /* Order bit clearing and data access. */ do { unsigned long which; which = ffz(~ops); ops &= ~(1 << which); switch (which) { case IPI_CPU_STOP: stop_this_cpu(); break; case IPI_CALL_FUNC: generic_smp_call_function_interrupt(); break; case IPI_CALL_FUNC_SINGLE: generic_smp_call_function_single_interrupt(); break; #ifdef CONFIG_KEXEC case IPI_KDUMP_CPU_STOP: unw_init_running(kdump_cpu_freeze, NULL); break; #endif default: printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n", this_cpu, which); break; } } while (ops); mb(); /* Order data access and bit testing. */ } put_cpu(); return IRQ_HANDLED; } /* * Called with preemption disabled. */ static inline void send_IPI_single (int dest_cpu, int op) { set_bit(op, &per_cpu(ipi_operation, dest_cpu)); platform_send_ipi(dest_cpu, IA64_IPI_VECTOR, IA64_IPI_DM_INT, 0); } /* * Called with preemption disabled. */ static inline void send_IPI_allbutself (int op) { unsigned int i; for_each_online_cpu(i) { if (i != smp_processor_id()) send_IPI_single(i, op); } } /* * Called with preemption disabled. */ static inline void send_IPI_mask(const struct cpumask *mask, int op) { unsigned int cpu; for_each_cpu(cpu, mask) { send_IPI_single(cpu, op); } } /* * Called with preemption disabled. */ static inline void send_IPI_all (int op) { int i; for_each_online_cpu(i) { send_IPI_single(i, op); } } /* * Called with preemption disabled. */ static inline void send_IPI_self (int op) { send_IPI_single(smp_processor_id(), op); } #ifdef CONFIG_KEXEC void kdump_smp_send_stop(void) { send_IPI_allbutself(IPI_KDUMP_CPU_STOP); } void kdump_smp_send_init(void) { unsigned int cpu, self_cpu; self_cpu = smp_processor_id(); for_each_online_cpu(cpu) { if (cpu != self_cpu) { if(kdump_status[cpu] == 0) platform_send_ipi(cpu, 0, IA64_IPI_DM_INIT, 0); } } } #endif /* * Called with preemption disabled. */ void smp_send_reschedule (int cpu) { platform_send_ipi(cpu, IA64_IPI_RESCHEDULE, IA64_IPI_DM_INT, 0); } EXPORT_SYMBOL_GPL(smp_send_reschedule); /* * Called with preemption disabled. */ static void smp_send_local_flush_tlb (int cpu) { platform_send_ipi(cpu, IA64_IPI_LOCAL_TLB_FLUSH, IA64_IPI_DM_INT, 0); } void smp_local_flush_tlb(void) { /* * Use atomic ops. Otherwise, the load/increment/store sequence from * a "++" operation can have the line stolen between the load & store. * The overhead of the atomic op in negligible in this case & offers * significant benefit for the brief periods where lots of cpus * are simultaneously flushing TLBs. */ ia64_fetchadd(1, &local_tlb_flush_counts[smp_processor_id()].count, acq); local_flush_tlb_all(); } #define FLUSH_DELAY 5 /* Usec backoff to eliminate excessive cacheline bouncing */ void smp_flush_tlb_cpumask(cpumask_t xcpumask) { unsigned short *counts = __ia64_per_cpu_var(shadow_flush_counts); cpumask_t cpumask = xcpumask; int mycpu, cpu, flush_mycpu = 0; preempt_disable(); mycpu = smp_processor_id(); for_each_cpu_mask(cpu, cpumask) counts[cpu] = local_tlb_flush_counts[cpu].count & 0xffff; mb(); for_each_cpu_mask(cpu, cpumask) { if (cpu == mycpu) flush_mycpu = 1; else smp_send_local_flush_tlb(cpu); } if (flush_mycpu) smp_local_flush_tlb(); for_each_cpu_mask(cpu, cpumask) while(counts[cpu] == (local_tlb_flush_counts[cpu].count & 0xffff)) udelay(FLUSH_DELAY); preempt_enable(); } void smp_flush_tlb_all (void) { on_each_cpu((void (*)(void *))local_flush_tlb_all, NULL, 1); } void smp_flush_tlb_mm (struct mm_struct *mm) { cpumask_var_t cpus; preempt_disable(); /* this happens for the common case of a single-threaded fork(): */ if (likely(mm == current->active_mm && atomic_read(&mm->mm_users) == 1)) { local_finish_flush_tlb_mm(mm); preempt_enable(); return; } if (!alloc_cpumask_var(&cpus, GFP_ATOMIC)) { smp_call_function((void (*)(void *))local_finish_flush_tlb_mm, mm, 1); } else { cpumask_copy(cpus, mm_cpumask(mm)); smp_call_function_many(cpus, (void (*)(void *))local_finish_flush_tlb_mm, mm, 1); free_cpumask_var(cpus); } local_irq_disable(); local_finish_flush_tlb_mm(mm); local_irq_enable(); preempt_enable(); } void arch_send_call_function_single_ipi(int cpu) { send_IPI_single(cpu, IPI_CALL_FUNC_SINGLE); } void arch_send_call_function_ipi_mask(const struct cpumask *mask) { send_IPI_mask(mask, IPI_CALL_FUNC); } /* * this function calls the 'stop' function on all other CPUs in the system. */ void smp_send_stop (void) { send_IPI_allbutself(IPI_CPU_STOP); } int setup_profiling_timer (unsigned int multiplier) { return -EINVAL; }
gpl-2.0
XirXes/pyramid-3.4.10
drivers/net/ethernet/micrel/ks8851.c
666
40753
/* drivers/net/ethernet/micrel/ks8851.c * * Copyright 2009 Simtec Electronics * http://www.simtec.co.uk/ * Ben Dooks <ben@simtec.co.uk> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define DEBUG #include <linux/interrupt.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/cache.h> #include <linux/crc32.h> #include <linux/mii.h> #include <linux/eeprom_93cx6.h> #include <linux/spi/spi.h> #include "ks8851.h" /** * struct ks8851_rxctrl - KS8851 driver rx control * @mchash: Multicast hash-table data. * @rxcr1: KS_RXCR1 register setting * @rxcr2: KS_RXCR2 register setting * * Representation of the settings needs to control the receive filtering * such as the multicast hash-filter and the receive register settings. This * is used to make the job of working out if the receive settings change and * then issuing the new settings to the worker that will send the necessary * commands. */ struct ks8851_rxctrl { u16 mchash[4]; u16 rxcr1; u16 rxcr2; }; /** * union ks8851_tx_hdr - tx header data * @txb: The header as bytes * @txw: The header as 16bit, little-endian words * * A dual representation of the tx header data to allow * access to individual bytes, and to allow 16bit accesses * with 16bit alignment. */ union ks8851_tx_hdr { u8 txb[6]; __le16 txw[3]; }; /** * struct ks8851_net - KS8851 driver private data * @netdev: The network device we're bound to * @spidev: The spi device we're bound to. * @lock: Lock to ensure that the device is not accessed when busy. * @statelock: Lock on this structure for tx list. * @mii: The MII state information for the mii calls. * @rxctrl: RX settings for @rxctrl_work. * @tx_work: Work queue for tx packets * @irq_work: Work queue for servicing interrupts * @rxctrl_work: Work queue for updating RX mode and multicast lists * @txq: Queue of packets for transmission. * @spi_msg1: pre-setup SPI transfer with one message, @spi_xfer1. * @spi_msg2: pre-setup SPI transfer with two messages, @spi_xfer2. * @txh: Space for generating packet TX header in DMA-able data * @rxd: Space for receiving SPI data, in DMA-able space. * @txd: Space for transmitting SPI data, in DMA-able space. * @msg_enable: The message flags controlling driver output (see ethtool). * @fid: Incrementing frame id tag. * @rc_ier: Cached copy of KS_IER. * @rc_ccr: Cached copy of KS_CCR. * @rc_rxqcr: Cached copy of KS_RXQCR. * @eeprom_size: Companion eeprom size in Bytes, 0 if no eeprom * @eeprom: 93CX6 EEPROM state for accessing on-board EEPROM. * * The @lock ensures that the chip is protected when certain operations are * in progress. When the read or write packet transfer is in progress, most * of the chip registers are not ccessible until the transfer is finished and * the DMA has been de-asserted. * * The @statelock is used to protect information in the structure which may * need to be accessed via several sources, such as the network driver layer * or one of the work queues. * * We align the buffers we may use for rx/tx to ensure that if the SPI driver * wants to DMA map them, it will not have any problems with data the driver * modifies. */ struct ks8851_net { struct net_device *netdev; struct spi_device *spidev; struct mutex lock; spinlock_t statelock; union ks8851_tx_hdr txh ____cacheline_aligned; u8 rxd[8]; u8 txd[8]; u32 msg_enable ____cacheline_aligned; u16 tx_space; u8 fid; u16 rc_ier; u16 rc_rxqcr; u16 rc_ccr; u16 eeprom_size; struct mii_if_info mii; struct ks8851_rxctrl rxctrl; struct work_struct tx_work; struct work_struct irq_work; struct work_struct rxctrl_work; struct sk_buff_head txq; struct spi_message spi_msg1; struct spi_message spi_msg2; struct spi_transfer spi_xfer1; struct spi_transfer spi_xfer2[2]; struct eeprom_93cx6 eeprom; }; static int msg_enable; /* shift for byte-enable data */ #define BYTE_EN(_x) ((_x) << 2) /* turn register number and byte-enable mask into data for start of packet */ #define MK_OP(_byteen, _reg) (BYTE_EN(_byteen) | (_reg) << (8+2) | (_reg) >> 6) /* SPI register read/write calls. * * All these calls issue SPI transactions to access the chip's registers. They * all require that the necessary lock is held to prevent accesses when the * chip is busy transferring packet data (RX/TX FIFO accesses). */ /** * ks8851_wrreg16 - write 16bit register value to chip * @ks: The chip state * @reg: The register address * @val: The value to write * * Issue a write to put the value @val into the register specified in @reg. */ static void ks8851_wrreg16(struct ks8851_net *ks, unsigned reg, unsigned val) { struct spi_transfer *xfer = &ks->spi_xfer1; struct spi_message *msg = &ks->spi_msg1; __le16 txb[2]; int ret; txb[0] = cpu_to_le16(MK_OP(reg & 2 ? 0xC : 0x03, reg) | KS_SPIOP_WR); txb[1] = cpu_to_le16(val); xfer->tx_buf = txb; xfer->rx_buf = NULL; xfer->len = 4; ret = spi_sync(ks->spidev, msg); if (ret < 0) netdev_err(ks->netdev, "spi_sync() failed\n"); } /** * ks8851_wrreg8 - write 8bit register value to chip * @ks: The chip state * @reg: The register address * @val: The value to write * * Issue a write to put the value @val into the register specified in @reg. */ static void ks8851_wrreg8(struct ks8851_net *ks, unsigned reg, unsigned val) { struct spi_transfer *xfer = &ks->spi_xfer1; struct spi_message *msg = &ks->spi_msg1; __le16 txb[2]; int ret; int bit; bit = 1 << (reg & 3); txb[0] = cpu_to_le16(MK_OP(bit, reg) | KS_SPIOP_WR); txb[1] = val; xfer->tx_buf = txb; xfer->rx_buf = NULL; xfer->len = 3; ret = spi_sync(ks->spidev, msg); if (ret < 0) netdev_err(ks->netdev, "spi_sync() failed\n"); } /** * ks8851_rx_1msg - select whether to use one or two messages for spi read * @ks: The device structure * * Return whether to generate a single message with a tx and rx buffer * supplied to spi_sync(), or alternatively send the tx and rx buffers * as separate messages. * * Depending on the hardware in use, a single message may be more efficient * on interrupts or work done by the driver. * * This currently always returns true until we add some per-device data passed * from the platform code to specify which mode is better. */ static inline bool ks8851_rx_1msg(struct ks8851_net *ks) { return true; } /** * ks8851_rdreg - issue read register command and return the data * @ks: The device state * @op: The register address and byte enables in message format. * @rxb: The RX buffer to return the result into * @rxl: The length of data expected. * * This is the low level read call that issues the necessary spi message(s) * to read data from the register specified in @op. */ static void ks8851_rdreg(struct ks8851_net *ks, unsigned op, u8 *rxb, unsigned rxl) { struct spi_transfer *xfer; struct spi_message *msg; __le16 *txb = (__le16 *)ks->txd; u8 *trx = ks->rxd; int ret; txb[0] = cpu_to_le16(op | KS_SPIOP_RD); if (ks8851_rx_1msg(ks)) { msg = &ks->spi_msg1; xfer = &ks->spi_xfer1; xfer->tx_buf = txb; xfer->rx_buf = trx; xfer->len = rxl + 2; } else { msg = &ks->spi_msg2; xfer = ks->spi_xfer2; xfer->tx_buf = txb; xfer->rx_buf = NULL; xfer->len = 2; xfer++; xfer->tx_buf = NULL; xfer->rx_buf = trx; xfer->len = rxl; } ret = spi_sync(ks->spidev, msg); if (ret < 0) netdev_err(ks->netdev, "read: spi_sync() failed\n"); else if (ks8851_rx_1msg(ks)) memcpy(rxb, trx + 2, rxl); else memcpy(rxb, trx, rxl); } /** * ks8851_rdreg8 - read 8 bit register from device * @ks: The chip information * @reg: The register address * * Read a 8bit register from the chip, returning the result */ static unsigned ks8851_rdreg8(struct ks8851_net *ks, unsigned reg) { u8 rxb[1]; ks8851_rdreg(ks, MK_OP(1 << (reg & 3), reg), rxb, 1); return rxb[0]; } /** * ks8851_rdreg16 - read 16 bit register from device * @ks: The chip information * @reg: The register address * * Read a 16bit register from the chip, returning the result */ static unsigned ks8851_rdreg16(struct ks8851_net *ks, unsigned reg) { __le16 rx = 0; ks8851_rdreg(ks, MK_OP(reg & 2 ? 0xC : 0x3, reg), (u8 *)&rx, 2); return le16_to_cpu(rx); } /** * ks8851_rdreg32 - read 32 bit register from device * @ks: The chip information * @reg: The register address * * Read a 32bit register from the chip. * * Note, this read requires the address be aligned to 4 bytes. */ static unsigned ks8851_rdreg32(struct ks8851_net *ks, unsigned reg) { __le32 rx = 0; WARN_ON(reg & 3); ks8851_rdreg(ks, MK_OP(0xf, reg), (u8 *)&rx, 4); return le32_to_cpu(rx); } /** * ks8851_soft_reset - issue one of the soft reset to the device * @ks: The device state. * @op: The bit(s) to set in the GRR * * Issue the relevant soft-reset command to the device's GRR register * specified by @op. * * Note, the delays are in there as a caution to ensure that the reset * has time to take effect and then complete. Since the datasheet does * not currently specify the exact sequence, we have chosen something * that seems to work with our device. */ static void ks8851_soft_reset(struct ks8851_net *ks, unsigned op) { ks8851_wrreg16(ks, KS_GRR, op); mdelay(1); /* wait a short time to effect reset */ ks8851_wrreg16(ks, KS_GRR, 0); mdelay(1); /* wait for condition to clear */ } /** * ks8851_set_powermode - set power mode of the device * @ks: The device state * @pwrmode: The power mode value to write to KS_PMECR. * * Change the power mode of the chip. */ static void ks8851_set_powermode(struct ks8851_net *ks, unsigned pwrmode) { unsigned pmecr; netif_dbg(ks, hw, ks->netdev, "setting power mode %d\n", pwrmode); pmecr = ks8851_rdreg16(ks, KS_PMECR); pmecr &= ~PMECR_PM_MASK; pmecr |= pwrmode; ks8851_wrreg16(ks, KS_PMECR, pmecr); } /** * ks8851_write_mac_addr - write mac address to device registers * @dev: The network device * * Update the KS8851 MAC address registers from the address in @dev. * * This call assumes that the chip is not running, so there is no need to * shutdown the RXQ process whilst setting this. */ static int ks8851_write_mac_addr(struct net_device *dev) { struct ks8851_net *ks = netdev_priv(dev); int i; mutex_lock(&ks->lock); /* * Wake up chip in case it was powered off when stopped; otherwise, * the first write to the MAC address does not take effect. */ ks8851_set_powermode(ks, PMECR_PM_NORMAL); for (i = 0; i < ETH_ALEN; i++) ks8851_wrreg8(ks, KS_MAR(i), dev->dev_addr[i]); if (!netif_running(dev)) ks8851_set_powermode(ks, PMECR_PM_SOFTDOWN); mutex_unlock(&ks->lock); return 0; } /** * ks8851_read_mac_addr - read mac address from device registers * @dev: The network device * * Update our copy of the KS8851 MAC address from the registers of @dev. */ static void ks8851_read_mac_addr(struct net_device *dev) { struct ks8851_net *ks = netdev_priv(dev); int i; mutex_lock(&ks->lock); for (i = 0; i < ETH_ALEN; i++) dev->dev_addr[i] = ks8851_rdreg8(ks, KS_MAR(i)); mutex_unlock(&ks->lock); } /** * ks8851_init_mac - initialise the mac address * @ks: The device structure * * Get or create the initial mac address for the device and then set that * into the station address register. If there is an EEPROM present, then * we try that. If no valid mac address is found we use random_ether_addr() * to create a new one. */ static void ks8851_init_mac(struct ks8851_net *ks) { struct net_device *dev = ks->netdev; /* first, try reading what we've got already */ if (ks->rc_ccr & CCR_EEPROM) { ks8851_read_mac_addr(dev); if (is_valid_ether_addr(dev->dev_addr)) return; netdev_err(ks->netdev, "invalid mac address read %pM\n", dev->dev_addr); } eth_hw_addr_random(dev); ks8851_write_mac_addr(dev); } /** * ks8851_irq - device interrupt handler * @irq: Interrupt number passed from the IRQ handler. * @pw: The private word passed to register_irq(), our struct ks8851_net. * * Disable the interrupt from happening again until we've processed the * current status by scheduling ks8851_irq_work(). */ static irqreturn_t ks8851_irq(int irq, void *pw) { struct ks8851_net *ks = pw; disable_irq_nosync(irq); schedule_work(&ks->irq_work); return IRQ_HANDLED; } /** * ks8851_rdfifo - read data from the receive fifo * @ks: The device state. * @buff: The buffer address * @len: The length of the data to read * * Issue an RXQ FIFO read command and read the @len amount of data from * the FIFO into the buffer specified by @buff. */ static void ks8851_rdfifo(struct ks8851_net *ks, u8 *buff, unsigned len) { struct spi_transfer *xfer = ks->spi_xfer2; struct spi_message *msg = &ks->spi_msg2; u8 txb[1]; int ret; netif_dbg(ks, rx_status, ks->netdev, "%s: %d@%p\n", __func__, len, buff); /* set the operation we're issuing */ txb[0] = KS_SPIOP_RXFIFO; xfer->tx_buf = txb; xfer->rx_buf = NULL; xfer->len = 1; xfer++; xfer->rx_buf = buff; xfer->tx_buf = NULL; xfer->len = len; ret = spi_sync(ks->spidev, msg); if (ret < 0) netdev_err(ks->netdev, "%s: spi_sync() failed\n", __func__); } /** * ks8851_dbg_dumpkkt - dump initial packet contents to debug * @ks: The device state * @rxpkt: The data for the received packet * * Dump the initial data from the packet to dev_dbg(). */ static void ks8851_dbg_dumpkkt(struct ks8851_net *ks, u8 *rxpkt) { netdev_dbg(ks->netdev, "pkt %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x\n", rxpkt[4], rxpkt[5], rxpkt[6], rxpkt[7], rxpkt[8], rxpkt[9], rxpkt[10], rxpkt[11], rxpkt[12], rxpkt[13], rxpkt[14], rxpkt[15]); } /** * ks8851_rx_pkts - receive packets from the host * @ks: The device information. * * This is called from the IRQ work queue when the system detects that there * are packets in the receive queue. Find out how many packets there are and * read them from the FIFO. */ static void ks8851_rx_pkts(struct ks8851_net *ks) { struct sk_buff *skb; unsigned rxfc; unsigned rxlen; unsigned rxstat; u32 rxh; u8 *rxpkt; rxfc = ks8851_rdreg8(ks, KS_RXFC); netif_dbg(ks, rx_status, ks->netdev, "%s: %d packets\n", __func__, rxfc); /* Currently we're issuing a read per packet, but we could possibly * improve the code by issuing a single read, getting the receive * header, allocating the packet and then reading the packet data * out in one go. * * This form of operation would require us to hold the SPI bus' * chipselect low during the entie transaction to avoid any * reset to the data stream coming from the chip. */ for (; rxfc != 0; rxfc--) { rxh = ks8851_rdreg32(ks, KS_RXFHSR); rxstat = rxh & 0xffff; rxlen = rxh >> 16; netif_dbg(ks, rx_status, ks->netdev, "rx: stat 0x%04x, len 0x%04x\n", rxstat, rxlen); /* the length of the packet includes the 32bit CRC */ /* set dma read address */ ks8851_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI | 0x00); /* start the packet dma process, and set auto-dequeue rx */ ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA | RXQCR_ADRFE); if (rxlen > 4) { unsigned int rxalign; rxlen -= 4; rxalign = ALIGN(rxlen, 4); skb = netdev_alloc_skb_ip_align(ks->netdev, rxalign); if (skb) { /* 4 bytes of status header + 4 bytes of * garbage: we put them before ethernet * header, so that they are copied, * but ignored. */ rxpkt = skb_put(skb, rxlen) - 8; ks8851_rdfifo(ks, rxpkt, rxalign + 8); if (netif_msg_pktdata(ks)) ks8851_dbg_dumpkkt(ks, rxpkt); skb->protocol = eth_type_trans(skb, ks->netdev); netif_rx_ni(skb); ks->netdev->stats.rx_packets++; ks->netdev->stats.rx_bytes += rxlen; } } ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr); } } /** * ks8851_irq_work - work queue handler for dealing with interrupt requests * @work: The work structure that was scheduled by schedule_work() * * This is the handler invoked when the ks8851_irq() is called to find out * what happened, as we cannot allow ourselves to sleep whilst waiting for * anything other process has the chip's lock. * * Read the interrupt status, work out what needs to be done and then clear * any of the interrupts that are not needed. */ static void ks8851_irq_work(struct work_struct *work) { struct ks8851_net *ks = container_of(work, struct ks8851_net, irq_work); unsigned status; unsigned handled = 0; mutex_lock(&ks->lock); status = ks8851_rdreg16(ks, KS_ISR); netif_dbg(ks, intr, ks->netdev, "%s: status 0x%04x\n", __func__, status); if (status & IRQ_LCI) handled |= IRQ_LCI; if (status & IRQ_LDI) { u16 pmecr = ks8851_rdreg16(ks, KS_PMECR); pmecr &= ~PMECR_WKEVT_MASK; ks8851_wrreg16(ks, KS_PMECR, pmecr | PMECR_WKEVT_LINK); handled |= IRQ_LDI; } if (status & IRQ_RXPSI) handled |= IRQ_RXPSI; if (status & IRQ_TXI) { handled |= IRQ_TXI; /* no lock here, tx queue should have been stopped */ /* update our idea of how much tx space is available to the * system */ ks->tx_space = ks8851_rdreg16(ks, KS_TXMIR); netif_dbg(ks, intr, ks->netdev, "%s: txspace %d\n", __func__, ks->tx_space); } if (status & IRQ_RXI) handled |= IRQ_RXI; if (status & IRQ_SPIBEI) { dev_err(&ks->spidev->dev, "%s: spi bus error\n", __func__); handled |= IRQ_SPIBEI; } ks8851_wrreg16(ks, KS_ISR, handled); if (status & IRQ_RXI) { /* the datasheet says to disable the rx interrupt during * packet read-out, however we're masking the interrupt * from the device so do not bother masking just the RX * from the device. */ ks8851_rx_pkts(ks); } /* if something stopped the rx process, probably due to wanting * to change the rx settings, then do something about restarting * it. */ if (status & IRQ_RXPSI) { struct ks8851_rxctrl *rxc = &ks->rxctrl; /* update the multicast hash table */ ks8851_wrreg16(ks, KS_MAHTR0, rxc->mchash[0]); ks8851_wrreg16(ks, KS_MAHTR1, rxc->mchash[1]); ks8851_wrreg16(ks, KS_MAHTR2, rxc->mchash[2]); ks8851_wrreg16(ks, KS_MAHTR3, rxc->mchash[3]); ks8851_wrreg16(ks, KS_RXCR2, rxc->rxcr2); ks8851_wrreg16(ks, KS_RXCR1, rxc->rxcr1); } mutex_unlock(&ks->lock); if (status & IRQ_LCI) mii_check_link(&ks->mii); if (status & IRQ_TXI) netif_wake_queue(ks->netdev); enable_irq(ks->netdev->irq); } /** * calc_txlen - calculate size of message to send packet * @len: Length of data * * Returns the size of the TXFIFO message needed to send * this packet. */ static inline unsigned calc_txlen(unsigned len) { return ALIGN(len + 4, 4); } /** * ks8851_wrpkt - write packet to TX FIFO * @ks: The device state. * @txp: The sk_buff to transmit. * @irq: IRQ on completion of the packet. * * Send the @txp to the chip. This means creating the relevant packet header * specifying the length of the packet and the other information the chip * needs, such as IRQ on completion. Send the header and the packet data to * the device. */ static void ks8851_wrpkt(struct ks8851_net *ks, struct sk_buff *txp, bool irq) { struct spi_transfer *xfer = ks->spi_xfer2; struct spi_message *msg = &ks->spi_msg2; unsigned fid = 0; int ret; netif_dbg(ks, tx_queued, ks->netdev, "%s: skb %p, %d@%p, irq %d\n", __func__, txp, txp->len, txp->data, irq); fid = ks->fid++; fid &= TXFR_TXFID_MASK; if (irq) fid |= TXFR_TXIC; /* irq on completion */ /* start header at txb[1] to align txw entries */ ks->txh.txb[1] = KS_SPIOP_TXFIFO; ks->txh.txw[1] = cpu_to_le16(fid); ks->txh.txw[2] = cpu_to_le16(txp->len); xfer->tx_buf = &ks->txh.txb[1]; xfer->rx_buf = NULL; xfer->len = 5; xfer++; xfer->tx_buf = txp->data; xfer->rx_buf = NULL; xfer->len = ALIGN(txp->len, 4); ret = spi_sync(ks->spidev, msg); if (ret < 0) netdev_err(ks->netdev, "%s: spi_sync() failed\n", __func__); } /** * ks8851_done_tx - update and then free skbuff after transmitting * @ks: The device state * @txb: The buffer transmitted */ static void ks8851_done_tx(struct ks8851_net *ks, struct sk_buff *txb) { struct net_device *dev = ks->netdev; dev->stats.tx_bytes += txb->len; dev->stats.tx_packets++; dev_kfree_skb(txb); } /** * ks8851_tx_work - process tx packet(s) * @work: The work strucutre what was scheduled. * * This is called when a number of packets have been scheduled for * transmission and need to be sent to the device. */ static void ks8851_tx_work(struct work_struct *work) { struct ks8851_net *ks = container_of(work, struct ks8851_net, tx_work); struct sk_buff *txb; bool last = skb_queue_empty(&ks->txq); mutex_lock(&ks->lock); while (!last) { txb = skb_dequeue(&ks->txq); last = skb_queue_empty(&ks->txq); if (txb != NULL) { ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA); ks8851_wrpkt(ks, txb, last); ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr); ks8851_wrreg16(ks, KS_TXQCR, TXQCR_METFE); ks8851_done_tx(ks, txb); } } mutex_unlock(&ks->lock); } /** * ks8851_net_open - open network device * @dev: The network device being opened. * * Called when the network device is marked active, such as a user executing * 'ifconfig up' on the device. */ static int ks8851_net_open(struct net_device *dev) { struct ks8851_net *ks = netdev_priv(dev); /* lock the card, even if we may not actually be doing anything * else at the moment */ mutex_lock(&ks->lock); netif_dbg(ks, ifup, ks->netdev, "opening\n"); /* bring chip out of any power saving mode it was in */ ks8851_set_powermode(ks, PMECR_PM_NORMAL); /* issue a soft reset to the RX/TX QMU to put it into a known * state. */ ks8851_soft_reset(ks, GRR_QMU); /* setup transmission parameters */ ks8851_wrreg16(ks, KS_TXCR, (TXCR_TXE | /* enable transmit process */ TXCR_TXPE | /* pad to min length */ TXCR_TXCRC | /* add CRC */ TXCR_TXFCE)); /* enable flow control */ /* auto-increment tx data, reset tx pointer */ ks8851_wrreg16(ks, KS_TXFDPR, TXFDPR_TXFPAI); /* setup receiver control */ ks8851_wrreg16(ks, KS_RXCR1, (RXCR1_RXPAFMA | /* from mac filter */ RXCR1_RXFCE | /* enable flow control */ RXCR1_RXBE | /* broadcast enable */ RXCR1_RXUE | /* unicast enable */ RXCR1_RXE)); /* enable rx block */ /* transfer entire frames out in one go */ ks8851_wrreg16(ks, KS_RXCR2, RXCR2_SRDBL_FRAME); /* set receive counter timeouts */ ks8851_wrreg16(ks, KS_RXDTTR, 1000); /* 1ms after first frame to IRQ */ ks8851_wrreg16(ks, KS_RXDBCTR, 4096); /* >4Kbytes in buffer to IRQ */ ks8851_wrreg16(ks, KS_RXFCTR, 10); /* 10 frames to IRQ */ ks->rc_rxqcr = (RXQCR_RXFCTE | /* IRQ on frame count exceeded */ RXQCR_RXDBCTE | /* IRQ on byte count exceeded */ RXQCR_RXDTTE); /* IRQ on time exceeded */ ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr); /* clear then enable interrupts */ #define STD_IRQ (IRQ_LCI | /* Link Change */ \ IRQ_TXI | /* TX done */ \ IRQ_RXI | /* RX done */ \ IRQ_SPIBEI | /* SPI bus error */ \ IRQ_TXPSI | /* TX process stop */ \ IRQ_RXPSI) /* RX process stop */ ks->rc_ier = STD_IRQ; ks8851_wrreg16(ks, KS_ISR, STD_IRQ); ks8851_wrreg16(ks, KS_IER, STD_IRQ); netif_start_queue(ks->netdev); netif_dbg(ks, ifup, ks->netdev, "network device up\n"); mutex_unlock(&ks->lock); return 0; } /** * ks8851_net_stop - close network device * @dev: The device being closed. * * Called to close down a network device which has been active. Cancell any * work, shutdown the RX and TX process and then place the chip into a low * power state whilst it is not being used. */ static int ks8851_net_stop(struct net_device *dev) { struct ks8851_net *ks = netdev_priv(dev); netif_info(ks, ifdown, dev, "shutting down\n"); netif_stop_queue(dev); mutex_lock(&ks->lock); /* turn off the IRQs and ack any outstanding */ ks8851_wrreg16(ks, KS_IER, 0x0000); ks8851_wrreg16(ks, KS_ISR, 0xffff); mutex_unlock(&ks->lock); /* stop any outstanding work */ flush_work(&ks->irq_work); flush_work(&ks->tx_work); flush_work(&ks->rxctrl_work); mutex_lock(&ks->lock); /* shutdown RX process */ ks8851_wrreg16(ks, KS_RXCR1, 0x0000); /* shutdown TX process */ ks8851_wrreg16(ks, KS_TXCR, 0x0000); /* set powermode to soft power down to save power */ ks8851_set_powermode(ks, PMECR_PM_SOFTDOWN); mutex_unlock(&ks->lock); /* ensure any queued tx buffers are dumped */ while (!skb_queue_empty(&ks->txq)) { struct sk_buff *txb = skb_dequeue(&ks->txq); netif_dbg(ks, ifdown, ks->netdev, "%s: freeing txb %p\n", __func__, txb); dev_kfree_skb(txb); } return 0; } /** * ks8851_start_xmit - transmit packet * @skb: The buffer to transmit * @dev: The device used to transmit the packet. * * Called by the network layer to transmit the @skb. Queue the packet for * the device and schedule the necessary work to transmit the packet when * it is free. * * We do this to firstly avoid sleeping with the network device locked, * and secondly so we can round up more than one packet to transmit which * means we can try and avoid generating too many transmit done interrupts. */ static netdev_tx_t ks8851_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct ks8851_net *ks = netdev_priv(dev); unsigned needed = calc_txlen(skb->len); netdev_tx_t ret = NETDEV_TX_OK; netif_dbg(ks, tx_queued, ks->netdev, "%s: skb %p, %d@%p\n", __func__, skb, skb->len, skb->data); spin_lock(&ks->statelock); if (needed > ks->tx_space) { netif_stop_queue(dev); ret = NETDEV_TX_BUSY; } else { ks->tx_space -= needed; skb_queue_tail(&ks->txq, skb); } spin_unlock(&ks->statelock); schedule_work(&ks->tx_work); return ret; } /** * ks8851_rxctrl_work - work handler to change rx mode * @work: The work structure this belongs to. * * Lock the device and issue the necessary changes to the receive mode from * the network device layer. This is done so that we can do this without * having to sleep whilst holding the network device lock. * * Since the recommendation from Micrel is that the RXQ is shutdown whilst the * receive parameters are programmed, we issue a write to disable the RXQ and * then wait for the interrupt handler to be triggered once the RXQ shutdown is * complete. The interrupt handler then writes the new values into the chip. */ static void ks8851_rxctrl_work(struct work_struct *work) { struct ks8851_net *ks = container_of(work, struct ks8851_net, rxctrl_work); mutex_lock(&ks->lock); /* need to shutdown RXQ before modifying filter parameters */ ks8851_wrreg16(ks, KS_RXCR1, 0x00); mutex_unlock(&ks->lock); } static void ks8851_set_rx_mode(struct net_device *dev) { struct ks8851_net *ks = netdev_priv(dev); struct ks8851_rxctrl rxctrl; memset(&rxctrl, 0, sizeof(rxctrl)); if (dev->flags & IFF_PROMISC) { /* interface to receive everything */ rxctrl.rxcr1 = RXCR1_RXAE | RXCR1_RXINVF; } else if (dev->flags & IFF_ALLMULTI) { /* accept all multicast packets */ rxctrl.rxcr1 = (RXCR1_RXME | RXCR1_RXAE | RXCR1_RXPAFMA | RXCR1_RXMAFMA); } else if (dev->flags & IFF_MULTICAST && !netdev_mc_empty(dev)) { struct netdev_hw_addr *ha; u32 crc; /* accept some multicast */ netdev_for_each_mc_addr(ha, dev) { crc = ether_crc(ETH_ALEN, ha->addr); crc >>= (32 - 6); /* get top six bits */ rxctrl.mchash[crc >> 4] |= (1 << (crc & 0xf)); } rxctrl.rxcr1 = RXCR1_RXME | RXCR1_RXPAFMA; } else { /* just accept broadcast / unicast */ rxctrl.rxcr1 = RXCR1_RXPAFMA; } rxctrl.rxcr1 |= (RXCR1_RXUE | /* unicast enable */ RXCR1_RXBE | /* broadcast enable */ RXCR1_RXE | /* RX process enable */ RXCR1_RXFCE); /* enable flow control */ rxctrl.rxcr2 |= RXCR2_SRDBL_FRAME; /* schedule work to do the actual set of the data if needed */ spin_lock(&ks->statelock); if (memcmp(&rxctrl, &ks->rxctrl, sizeof(rxctrl)) != 0) { memcpy(&ks->rxctrl, &rxctrl, sizeof(ks->rxctrl)); schedule_work(&ks->rxctrl_work); } spin_unlock(&ks->statelock); } static int ks8851_set_mac_address(struct net_device *dev, void *addr) { struct sockaddr *sa = addr; if (netif_running(dev)) return -EBUSY; if (!is_valid_ether_addr(sa->sa_data)) return -EADDRNOTAVAIL; dev->addr_assign_type &= ~NET_ADDR_RANDOM; memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN); return ks8851_write_mac_addr(dev); } static int ks8851_net_ioctl(struct net_device *dev, struct ifreq *req, int cmd) { struct ks8851_net *ks = netdev_priv(dev); if (!netif_running(dev)) return -EINVAL; return generic_mii_ioctl(&ks->mii, if_mii(req), cmd, NULL); } static const struct net_device_ops ks8851_netdev_ops = { .ndo_open = ks8851_net_open, .ndo_stop = ks8851_net_stop, .ndo_do_ioctl = ks8851_net_ioctl, .ndo_start_xmit = ks8851_start_xmit, .ndo_set_mac_address = ks8851_set_mac_address, .ndo_set_rx_mode = ks8851_set_rx_mode, .ndo_change_mtu = eth_change_mtu, .ndo_validate_addr = eth_validate_addr, }; /* ethtool support */ static void ks8851_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *di) { strlcpy(di->driver, "KS8851", sizeof(di->driver)); strlcpy(di->version, "1.00", sizeof(di->version)); strlcpy(di->bus_info, dev_name(dev->dev.parent), sizeof(di->bus_info)); } static u32 ks8851_get_msglevel(struct net_device *dev) { struct ks8851_net *ks = netdev_priv(dev); return ks->msg_enable; } static void ks8851_set_msglevel(struct net_device *dev, u32 to) { struct ks8851_net *ks = netdev_priv(dev); ks->msg_enable = to; } static int ks8851_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct ks8851_net *ks = netdev_priv(dev); return mii_ethtool_gset(&ks->mii, cmd); } static int ks8851_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct ks8851_net *ks = netdev_priv(dev); return mii_ethtool_sset(&ks->mii, cmd); } static u32 ks8851_get_link(struct net_device *dev) { struct ks8851_net *ks = netdev_priv(dev); return mii_link_ok(&ks->mii); } static int ks8851_nway_reset(struct net_device *dev) { struct ks8851_net *ks = netdev_priv(dev); return mii_nway_restart(&ks->mii); } /* EEPROM support */ static void ks8851_eeprom_regread(struct eeprom_93cx6 *ee) { struct ks8851_net *ks = ee->data; unsigned val; val = ks8851_rdreg16(ks, KS_EEPCR); ee->reg_data_out = (val & EEPCR_EESB) ? 1 : 0; ee->reg_data_clock = (val & EEPCR_EESCK) ? 1 : 0; ee->reg_chip_select = (val & EEPCR_EECS) ? 1 : 0; } static void ks8851_eeprom_regwrite(struct eeprom_93cx6 *ee) { struct ks8851_net *ks = ee->data; unsigned val = EEPCR_EESA; /* default - eeprom access on */ if (ee->drive_data) val |= EEPCR_EESRWA; if (ee->reg_data_in) val |= EEPCR_EEDO; if (ee->reg_data_clock) val |= EEPCR_EESCK; if (ee->reg_chip_select) val |= EEPCR_EECS; ks8851_wrreg16(ks, KS_EEPCR, val); } /** * ks8851_eeprom_claim - claim device EEPROM and activate the interface * @ks: The network device state. * * Check for the presence of an EEPROM, and then activate software access * to the device. */ static int ks8851_eeprom_claim(struct ks8851_net *ks) { if (!(ks->rc_ccr & CCR_EEPROM)) return -ENOENT; mutex_lock(&ks->lock); /* start with clock low, cs high */ ks8851_wrreg16(ks, KS_EEPCR, EEPCR_EESA | EEPCR_EECS); return 0; } /** * ks8851_eeprom_release - release the EEPROM interface * @ks: The device state * * Release the software access to the device EEPROM */ static void ks8851_eeprom_release(struct ks8851_net *ks) { unsigned val = ks8851_rdreg16(ks, KS_EEPCR); ks8851_wrreg16(ks, KS_EEPCR, val & ~EEPCR_EESA); mutex_unlock(&ks->lock); } #define KS_EEPROM_MAGIC (0x00008851) static int ks8851_set_eeprom(struct net_device *dev, struct ethtool_eeprom *ee, u8 *data) { struct ks8851_net *ks = netdev_priv(dev); int offset = ee->offset; int len = ee->len; u16 tmp; /* currently only support byte writing */ if (len != 1) return -EINVAL; if (ee->magic != KS_EEPROM_MAGIC) return -EINVAL; if (ks8851_eeprom_claim(ks)) return -ENOENT; eeprom_93cx6_wren(&ks->eeprom, true); /* ethtool currently only supports writing bytes, which means * we have to read/modify/write our 16bit EEPROMs */ eeprom_93cx6_read(&ks->eeprom, offset/2, &tmp); if (offset & 1) { tmp &= 0xff; tmp |= *data << 8; } else { tmp &= 0xff00; tmp |= *data; } eeprom_93cx6_write(&ks->eeprom, offset/2, tmp); eeprom_93cx6_wren(&ks->eeprom, false); ks8851_eeprom_release(ks); return 0; } static int ks8851_get_eeprom(struct net_device *dev, struct ethtool_eeprom *ee, u8 *data) { struct ks8851_net *ks = netdev_priv(dev); int offset = ee->offset; int len = ee->len; /* must be 2 byte aligned */ if (len & 1 || offset & 1) return -EINVAL; if (ks8851_eeprom_claim(ks)) return -ENOENT; ee->magic = KS_EEPROM_MAGIC; eeprom_93cx6_multiread(&ks->eeprom, offset/2, (__le16 *)data, len/2); ks8851_eeprom_release(ks); return 0; } static int ks8851_get_eeprom_len(struct net_device *dev) { struct ks8851_net *ks = netdev_priv(dev); /* currently, we assume it is an 93C46 attached, so return 128 */ return ks->rc_ccr & CCR_EEPROM ? 128 : 0; } static const struct ethtool_ops ks8851_ethtool_ops = { .get_drvinfo = ks8851_get_drvinfo, .get_msglevel = ks8851_get_msglevel, .set_msglevel = ks8851_set_msglevel, .get_settings = ks8851_get_settings, .set_settings = ks8851_set_settings, .get_link = ks8851_get_link, .nway_reset = ks8851_nway_reset, .get_eeprom_len = ks8851_get_eeprom_len, .get_eeprom = ks8851_get_eeprom, .set_eeprom = ks8851_set_eeprom, }; /* MII interface controls */ /** * ks8851_phy_reg - convert MII register into a KS8851 register * @reg: MII register number. * * Return the KS8851 register number for the corresponding MII PHY register * if possible. Return zero if the MII register has no direct mapping to the * KS8851 register set. */ static int ks8851_phy_reg(int reg) { switch (reg) { case MII_BMCR: return KS_P1MBCR; case MII_BMSR: return KS_P1MBSR; case MII_PHYSID1: return KS_PHY1ILR; case MII_PHYSID2: return KS_PHY1IHR; case MII_ADVERTISE: return KS_P1ANAR; case MII_LPA: return KS_P1ANLPR; } return 0x0; } /** * ks8851_phy_read - MII interface PHY register read. * @dev: The network device the PHY is on. * @phy_addr: Address of PHY (ignored as we only have one) * @reg: The register to read. * * This call reads data from the PHY register specified in @reg. Since the * device does not support all the MII registers, the non-existent values * are always returned as zero. * * We return zero for unsupported registers as the MII code does not check * the value returned for any error status, and simply returns it to the * caller. The mii-tool that the driver was tested with takes any -ve error * as real PHY capabilities, thus displaying incorrect data to the user. */ static int ks8851_phy_read(struct net_device *dev, int phy_addr, int reg) { struct ks8851_net *ks = netdev_priv(dev); int ksreg; int result; ksreg = ks8851_phy_reg(reg); if (!ksreg) return 0x0; /* no error return allowed, so use zero */ mutex_lock(&ks->lock); result = ks8851_rdreg16(ks, ksreg); mutex_unlock(&ks->lock); return result; } static void ks8851_phy_write(struct net_device *dev, int phy, int reg, int value) { struct ks8851_net *ks = netdev_priv(dev); int ksreg; ksreg = ks8851_phy_reg(reg); if (ksreg) { mutex_lock(&ks->lock); ks8851_wrreg16(ks, ksreg, value); mutex_unlock(&ks->lock); } } /** * ks8851_read_selftest - read the selftest memory info. * @ks: The device state * * Read and check the TX/RX memory selftest information. */ static int ks8851_read_selftest(struct ks8851_net *ks) { unsigned both_done = MBIR_TXMBF | MBIR_RXMBF; int ret = 0; unsigned rd; rd = ks8851_rdreg16(ks, KS_MBIR); if ((rd & both_done) != both_done) { netdev_warn(ks->netdev, "Memory selftest not finished\n"); return 0; } if (rd & MBIR_TXMBFA) { netdev_err(ks->netdev, "TX memory selftest fail\n"); ret |= 1; } if (rd & MBIR_RXMBFA) { netdev_err(ks->netdev, "RX memory selftest fail\n"); ret |= 2; } return 0; } /* driver bus management functions */ #ifdef CONFIG_PM static int ks8851_suspend(struct spi_device *spi, pm_message_t state) { struct ks8851_net *ks = dev_get_drvdata(&spi->dev); struct net_device *dev = ks->netdev; if (netif_running(dev)) { netif_device_detach(dev); ks8851_net_stop(dev); } return 0; } static int ks8851_resume(struct spi_device *spi) { struct ks8851_net *ks = dev_get_drvdata(&spi->dev); struct net_device *dev = ks->netdev; if (netif_running(dev)) { ks8851_net_open(dev); netif_device_attach(dev); } return 0; } #else #define ks8851_suspend NULL #define ks8851_resume NULL #endif static int __devinit ks8851_probe(struct spi_device *spi) { struct net_device *ndev; struct ks8851_net *ks; int ret; unsigned cider; ndev = alloc_etherdev(sizeof(struct ks8851_net)); if (!ndev) return -ENOMEM; spi->bits_per_word = 8; ks = netdev_priv(ndev); ks->netdev = ndev; ks->spidev = spi; ks->tx_space = 6144; mutex_init(&ks->lock); spin_lock_init(&ks->statelock); INIT_WORK(&ks->tx_work, ks8851_tx_work); INIT_WORK(&ks->irq_work, ks8851_irq_work); INIT_WORK(&ks->rxctrl_work, ks8851_rxctrl_work); /* initialise pre-made spi transfer messages */ spi_message_init(&ks->spi_msg1); spi_message_add_tail(&ks->spi_xfer1, &ks->spi_msg1); spi_message_init(&ks->spi_msg2); spi_message_add_tail(&ks->spi_xfer2[0], &ks->spi_msg2); spi_message_add_tail(&ks->spi_xfer2[1], &ks->spi_msg2); /* setup EEPROM state */ ks->eeprom.data = ks; ks->eeprom.width = PCI_EEPROM_WIDTH_93C46; ks->eeprom.register_read = ks8851_eeprom_regread; ks->eeprom.register_write = ks8851_eeprom_regwrite; /* setup mii state */ ks->mii.dev = ndev; ks->mii.phy_id = 1, ks->mii.phy_id_mask = 1; ks->mii.reg_num_mask = 0xf; ks->mii.mdio_read = ks8851_phy_read; ks->mii.mdio_write = ks8851_phy_write; dev_info(&spi->dev, "message enable is %d\n", msg_enable); /* set the default message enable */ ks->msg_enable = netif_msg_init(msg_enable, (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)); skb_queue_head_init(&ks->txq); SET_ETHTOOL_OPS(ndev, &ks8851_ethtool_ops); SET_NETDEV_DEV(ndev, &spi->dev); dev_set_drvdata(&spi->dev, ks); ndev->if_port = IF_PORT_100BASET; ndev->netdev_ops = &ks8851_netdev_ops; ndev->irq = spi->irq; /* issue a global soft reset to reset the device. */ ks8851_soft_reset(ks, GRR_GSR); /* simple check for a valid chip being connected to the bus */ cider = ks8851_rdreg16(ks, KS_CIDER); if ((cider & ~CIDER_REV_MASK) != CIDER_ID) { dev_err(&spi->dev, "failed to read device ID\n"); ret = -ENODEV; goto err_id; } /* cache the contents of the CCR register for EEPROM, etc. */ ks->rc_ccr = ks8851_rdreg16(ks, KS_CCR); if (ks->rc_ccr & CCR_EEPROM) ks->eeprom_size = 128; else ks->eeprom_size = 0; ks8851_read_selftest(ks); ks8851_init_mac(ks); ret = request_irq(spi->irq, ks8851_irq, IRQF_TRIGGER_LOW, ndev->name, ks); if (ret < 0) { dev_err(&spi->dev, "failed to get irq\n"); goto err_irq; } ret = register_netdev(ndev); if (ret) { dev_err(&spi->dev, "failed to register network device\n"); goto err_netdev; } netdev_info(ndev, "revision %d, MAC %pM, IRQ %d, %s EEPROM\n", CIDER_REV_GET(cider), ndev->dev_addr, ndev->irq, ks->rc_ccr & CCR_EEPROM ? "has" : "no"); return 0; err_netdev: free_irq(ndev->irq, ks); err_id: err_irq: free_netdev(ndev); return ret; } static int __devexit ks8851_remove(struct spi_device *spi) { struct ks8851_net *priv = dev_get_drvdata(&spi->dev); if (netif_msg_drv(priv)) dev_info(&spi->dev, "remove\n"); unregister_netdev(priv->netdev); free_irq(spi->irq, priv); free_netdev(priv->netdev); return 0; } static struct spi_driver ks8851_driver = { .driver = { .name = "ks8851", .owner = THIS_MODULE, }, .probe = ks8851_probe, .remove = __devexit_p(ks8851_remove), .suspend = ks8851_suspend, .resume = ks8851_resume, }; static int __init ks8851_init(void) { return spi_register_driver(&ks8851_driver); } static void __exit ks8851_exit(void) { spi_unregister_driver(&ks8851_driver); } module_init(ks8851_init); module_exit(ks8851_exit); MODULE_DESCRIPTION("KS8851 Network driver"); MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>"); MODULE_LICENSE("GPL"); module_param_named(message, msg_enable, int, 0); MODULE_PARM_DESC(message, "Message verbosity level (0=none, 31=all)"); MODULE_ALIAS("spi:ks8851");
gpl-2.0
lamassu/kernel
drivers/hid/usbhid/hid-quirks.c
666
11425
/* * USB HID quirks support for Linux * * Copyright (c) 1999 Andreas Gal * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz> * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc * Copyright (c) 2006-2007 Jiri Kosina * Copyright (c) 2007 Paul Walmsley */ /* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. */ #include <linux/hid.h> #include <linux/slab.h> #include "../hid-ids.h" /* * Alphabetically sorted blacklist by quirk type. */ static const struct hid_blacklist { __u16 idVendor; __u16 idProduct; __u32 quirks; } hid_blacklist[] = { { USB_VENDOR_ID_AASHIMA, USB_DEVICE_ID_AASHIMA_GAMEPAD, HID_QUIRK_BADPAD }, { USB_VENDOR_ID_AASHIMA, USB_DEVICE_ID_AASHIMA_PREDATOR, HID_QUIRK_BADPAD }, { USB_VENDOR_ID_ALPS, USB_DEVICE_ID_IBM_GAMEPAD, HID_QUIRK_BADPAD }, { USB_VENDOR_ID_CHIC, USB_DEVICE_ID_CHIC_GAMEPAD, HID_QUIRK_BADPAD }, { USB_VENDOR_ID_DWAV, USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER, HID_QUIRK_MULTI_INPUT | HID_QUIRK_NOGET }, { USB_VENDOR_ID_MOJO, USB_DEVICE_ID_RETRO_ADAPTER, HID_QUIRK_MULTI_INPUT }, { USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_DRIVING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT }, { USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FLYING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT }, { USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FIGHTING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT }, { USB_VENDOR_ID_NATSU, USB_DEVICE_ID_NATSU_GAMEPAD, HID_QUIRK_BADPAD }, { USB_VENDOR_ID_NEC, USB_DEVICE_ID_NEC_USB_GAME_PAD, HID_QUIRK_BADPAD }, { USB_VENDOR_ID_NEXTWINDOW, USB_DEVICE_ID_NEXTWINDOW_TOUCHSCREEN, HID_QUIRK_MULTI_INPUT}, { USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RUMBLEPAD, HID_QUIRK_BADPAD }, { USB_VENDOR_ID_TOPMAX, USB_DEVICE_ID_TOPMAX_COBRAPAD, HID_QUIRK_BADPAD }, { USB_VENDOR_ID_AFATECH, USB_DEVICE_ID_AFATECH_AF9016, HID_QUIRK_FULLSPEED_INTERVAL }, { USB_VENDOR_ID_ETURBOTOUCH, USB_DEVICE_ID_ETURBOTOUCH, HID_QUIRK_MULTI_INPUT }, { USB_VENDOR_ID_GREENASIA, USB_DEVICE_ID_GREENASIA_DUAL_USB_JOYPAD, HID_QUIRK_MULTI_INPUT }, { USB_VENDOR_ID_PANTHERLORD, USB_DEVICE_ID_PANTHERLORD_TWIN_USB_JOYSTICK, HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS }, { USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII, HID_QUIRK_MULTI_INPUT }, { USB_VENDOR_ID_TOUCHPACK, USB_DEVICE_ID_TOUCHPACK_RTS, HID_QUIRK_MULTI_INPUT }, { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_UC100KM, HID_QUIRK_NOGET }, { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS124U, HID_QUIRK_NOGET }, { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_2PORTKVM, HID_QUIRK_NOGET }, { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVM, HID_QUIRK_NOGET }, { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVMC, HID_QUIRK_NOGET }, { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_COMBATSTICK, HID_QUIRK_NOGET }, { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FLIGHT_SIM_ECLIPSE_YOKE, HID_QUIRK_NOGET }, { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FLIGHT_SIM_YOKE, HID_QUIRK_NOGET }, { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_PRO_THROTTLE, HID_QUIRK_NOGET }, { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_PRO_PEDALS, HID_QUIRK_NOGET }, { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_3AXIS_5BUTTON_STICK, HID_QUIRK_NOGET }, { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET }, { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET }, { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET }, { USB_VENDOR_ID_PRODIGE, USB_DEVICE_ID_PRODIGE_CORDLESS, HID_QUIRK_NOGET }, { USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NOGET }, { USB_VENDOR_ID_SUN, USB_DEVICE_ID_RARITAN_KVM_DONGLE, HID_QUIRK_NOGET }, { USB_VENDOR_ID_SYMBOL, USB_DEVICE_ID_SYMBOL_SCANNER_1, HID_QUIRK_NOGET }, { USB_VENDOR_ID_SYMBOL, USB_DEVICE_ID_SYMBOL_SCANNER_2, HID_QUIRK_NOGET }, { USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD, HID_QUIRK_NOGET }, { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_PF1209, HID_QUIRK_MULTI_INPUT }, { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U, HID_QUIRK_MULTI_INPUT }, { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_KNA5, HID_QUIRK_MULTI_INPUT }, { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWA60, HID_QUIRK_MULTI_INPUT }, { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP5540U, HID_QUIRK_MULTI_INPUT }, { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP8060U, HID_QUIRK_MULTI_INPUT }, { USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH, HID_QUIRK_MULTI_INPUT }, { USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH, HID_QUIRK_MULTI_INPUT }, { USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS }, { USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_QUAD_USB_JOYPAD, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT }, { USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SMARTJOY_DUAL_PLUS, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT }, { USB_VENDOR_ID_WISEGROUP_LTD2, USB_DEVICE_ID_SMARTJOY_DUAL_PLUS, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT }, { USB_VENDOR_ID_PI_ENGINEERING, USB_DEVICE_ID_PI_ENGINEERING_VEC_USB_FOOTPEDAL, HID_QUIRK_HIDINPUT_FORCE }, { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_MULTI_TOUCH, HID_QUIRK_MULTI_INPUT }, { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS, HID_QUIRK_MULTI_INPUT }, { USB_VENDOR_ID_SIGMA_MICRO, USB_DEVICE_ID_SIGMA_MICRO_KEYBOARD, HID_QUIRK_NO_INIT_REPORTS }, { 0, 0 } }; /* Dynamic HID quirks list - specified at runtime */ struct quirks_list_struct { struct hid_blacklist hid_bl_item; struct list_head node; }; static LIST_HEAD(dquirks_list); static DECLARE_RWSEM(dquirks_rwsem); /* Runtime ("dynamic") quirks manipulation functions */ /** * usbhid_exists_dquirk: find any dynamic quirks for a USB HID device * @idVendor: the 16-bit USB vendor ID, in native byteorder * @idProduct: the 16-bit USB product ID, in native byteorder * * Description: * Scans dquirks_list for a matching dynamic quirk and returns * the pointer to the relevant struct hid_blacklist if found. * Must be called with a read lock held on dquirks_rwsem. * * Returns: NULL if no quirk found, struct hid_blacklist * if found. */ static struct hid_blacklist *usbhid_exists_dquirk(const u16 idVendor, const u16 idProduct) { struct quirks_list_struct *q; struct hid_blacklist *bl_entry = NULL; list_for_each_entry(q, &dquirks_list, node) { if (q->hid_bl_item.idVendor == idVendor && q->hid_bl_item.idProduct == idProduct) { bl_entry = &q->hid_bl_item; break; } } if (bl_entry != NULL) dbg_hid("Found dynamic quirk 0x%x for USB HID vendor 0x%hx prod 0x%hx\n", bl_entry->quirks, bl_entry->idVendor, bl_entry->idProduct); return bl_entry; } /** * usbhid_modify_dquirk: add/replace a HID quirk * @idVendor: the 16-bit USB vendor ID, in native byteorder * @idProduct: the 16-bit USB product ID, in native byteorder * @quirks: the u32 quirks value to add/replace * * Description: * If an dynamic quirk exists in memory for this (idVendor, * idProduct) pair, replace its quirks value with what was * provided. Otherwise, add the quirk to the dynamic quirks list. * * Returns: 0 OK, -error on failure. */ static int usbhid_modify_dquirk(const u16 idVendor, const u16 idProduct, const u32 quirks) { struct quirks_list_struct *q_new, *q; int list_edited = 0; if (!idVendor) { dbg_hid("Cannot add a quirk with idVendor = 0\n"); return -EINVAL; } q_new = kmalloc(sizeof(struct quirks_list_struct), GFP_KERNEL); if (!q_new) { dbg_hid("Could not allocate quirks_list_struct\n"); return -ENOMEM; } q_new->hid_bl_item.idVendor = idVendor; q_new->hid_bl_item.idProduct = idProduct; q_new->hid_bl_item.quirks = quirks; down_write(&dquirks_rwsem); list_for_each_entry(q, &dquirks_list, node) { if (q->hid_bl_item.idVendor == idVendor && q->hid_bl_item.idProduct == idProduct) { list_replace(&q->node, &q_new->node); kfree(q); list_edited = 1; break; } } if (!list_edited) list_add_tail(&q_new->node, &dquirks_list); up_write(&dquirks_rwsem); return 0; } /** * usbhid_remove_all_dquirks: remove all runtime HID quirks from memory * * Description: * Free all memory associated with dynamic quirks - called before * module unload. * */ static void usbhid_remove_all_dquirks(void) { struct quirks_list_struct *q, *temp; down_write(&dquirks_rwsem); list_for_each_entry_safe(q, temp, &dquirks_list, node) { list_del(&q->node); kfree(q); } up_write(&dquirks_rwsem); } /** * usbhid_quirks_init: apply USB HID quirks specified at module load time */ int usbhid_quirks_init(char **quirks_param) { u16 idVendor, idProduct; u32 quirks; int n = 0, m; for (; n < MAX_USBHID_BOOT_QUIRKS && quirks_param[n]; n++) { m = sscanf(quirks_param[n], "0x%hx:0x%hx:0x%x", &idVendor, &idProduct, &quirks); if (m != 3 || usbhid_modify_dquirk(idVendor, idProduct, quirks) != 0) { printk(KERN_WARNING "Could not parse HID quirk module param %s\n", quirks_param[n]); } } return 0; } /** * usbhid_quirks_exit: release memory associated with dynamic_quirks * * Description: * Release all memory associated with dynamic quirks. Called upon * module unload. * * Returns: nothing */ void usbhid_quirks_exit(void) { usbhid_remove_all_dquirks(); } /** * usbhid_exists_squirk: return any static quirks for a USB HID device * @idVendor: the 16-bit USB vendor ID, in native byteorder * @idProduct: the 16-bit USB product ID, in native byteorder * * Description: * Given a USB vendor ID and product ID, return a pointer to * the hid_blacklist entry associated with that device. * * Returns: pointer if quirk found, or NULL if no quirks found. */ static const struct hid_blacklist *usbhid_exists_squirk(const u16 idVendor, const u16 idProduct) { const struct hid_blacklist *bl_entry = NULL; int n = 0; for (; hid_blacklist[n].idVendor; n++) if (hid_blacklist[n].idVendor == idVendor && hid_blacklist[n].idProduct == idProduct) bl_entry = &hid_blacklist[n]; if (bl_entry != NULL) dbg_hid("Found squirk 0x%x for USB HID vendor 0x%hx prod 0x%hx\n", bl_entry->quirks, bl_entry->idVendor, bl_entry->idProduct); return bl_entry; } /** * usbhid_lookup_quirk: return any quirks associated with a USB HID device * @idVendor: the 16-bit USB vendor ID, in native byteorder * @idProduct: the 16-bit USB product ID, in native byteorder * * Description: * Given a USB vendor ID and product ID, return any quirks associated * with that device. * * Returns: a u32 quirks value. */ u32 usbhid_lookup_quirk(const u16 idVendor, const u16 idProduct) { u32 quirks = 0; const struct hid_blacklist *bl_entry = NULL; /* NCR devices must not be queried for reports */ if (idVendor == USB_VENDOR_ID_NCR && idProduct >= USB_DEVICE_ID_NCR_FIRST && idProduct <= USB_DEVICE_ID_NCR_LAST) return HID_QUIRK_NO_INIT_REPORTS; down_read(&dquirks_rwsem); bl_entry = usbhid_exists_dquirk(idVendor, idProduct); if (!bl_entry) bl_entry = usbhid_exists_squirk(idVendor, idProduct); if (bl_entry) quirks = bl_entry->quirks; up_read(&dquirks_rwsem); return quirks; } EXPORT_SYMBOL_GPL(usbhid_lookup_quirk);
gpl-2.0
androidbftab1/bf-kernel
arch/mips/pmcs-msp71xx/msp_irq_cic.c
922
5018
/* * Copyright 2010 PMC-Sierra, Inc, derived from irq_cpu.c * * This file define the irq handler for MSP CIC subsystem interrupts. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/bitops.h> #include <linux/irq.h> #include <asm/mipsregs.h> #include <msp_cic_int.h> #include <msp_regs.h> /* * External API */ extern void msp_per_irq_init(void); extern void msp_per_irq_dispatch(void); /* * Convenience Macro. Should be somewhere generic. */ #define get_current_vpe() \ ((read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & TCBIND_CURVPE) #ifdef CONFIG_SMP #define LOCK_VPE(flags, mtflags) \ do { \ local_irq_save(flags); \ mtflags = dmt(); \ } while (0) #define UNLOCK_VPE(flags, mtflags) \ do { \ emt(mtflags); \ local_irq_restore(flags);\ } while (0) #define LOCK_CORE(flags, mtflags) \ do { \ local_irq_save(flags); \ mtflags = dvpe(); \ } while (0) #define UNLOCK_CORE(flags, mtflags) \ do { \ evpe(mtflags); \ local_irq_restore(flags);\ } while (0) #else #define LOCK_VPE(flags, mtflags) #define UNLOCK_VPE(flags, mtflags) #endif /* ensure writes to cic are completed */ static inline void cic_wmb(void) { const volatile void __iomem *cic_mem = CIC_VPE0_MSK_REG; volatile u32 dummy_read; wmb(); dummy_read = __raw_readl(cic_mem); dummy_read++; } static void unmask_cic_irq(struct irq_data *d) { volatile u32 *cic_msk_reg = CIC_VPE0_MSK_REG; int vpe; #ifdef CONFIG_SMP unsigned int mtflags; unsigned long flags; /* * Make sure we have IRQ affinity. It may have changed while * we were processing the IRQ. */ if (!cpumask_test_cpu(smp_processor_id(), d->affinity)) return; #endif vpe = get_current_vpe(); LOCK_VPE(flags, mtflags); cic_msk_reg[vpe] |= (1 << (d->irq - MSP_CIC_INTBASE)); UNLOCK_VPE(flags, mtflags); cic_wmb(); } static void mask_cic_irq(struct irq_data *d) { volatile u32 *cic_msk_reg = CIC_VPE0_MSK_REG; int vpe = get_current_vpe(); #ifdef CONFIG_SMP unsigned long flags, mtflags; #endif LOCK_VPE(flags, mtflags); cic_msk_reg[vpe] &= ~(1 << (d->irq - MSP_CIC_INTBASE)); UNLOCK_VPE(flags, mtflags); cic_wmb(); } static void msp_cic_irq_ack(struct irq_data *d) { mask_cic_irq(d); /* * Only really necessary for 18, 16-14 and sometimes 3:0 * (since these can be edge sensitive) but it doesn't * hurt for the others */ *CIC_STS_REG = (1 << (d->irq - MSP_CIC_INTBASE)); } /* Note: Limiting to VSMP. */ #ifdef CONFIG_MIPS_MT_SMP static int msp_cic_irq_set_affinity(struct irq_data *d, const struct cpumask *cpumask, bool force) { int cpu; unsigned long flags; unsigned int mtflags; unsigned long imask = (1 << (d->irq - MSP_CIC_INTBASE)); volatile u32 *cic_mask = (volatile u32 *)CIC_VPE0_MSK_REG; /* timer balancing should be disabled in kernel code */ BUG_ON(d->irq == MSP_INT_VPE0_TIMER || d->irq == MSP_INT_VPE1_TIMER); LOCK_CORE(flags, mtflags); /* enable if any of each VPE's TCs require this IRQ */ for_each_online_cpu(cpu) { if (cpumask_test_cpu(cpu, cpumask)) cic_mask[cpu] |= imask; else cic_mask[cpu] &= ~imask; } UNLOCK_CORE(flags, mtflags); return 0; } #endif static struct irq_chip msp_cic_irq_controller = { .name = "MSP_CIC", .irq_mask = mask_cic_irq, .irq_mask_ack = msp_cic_irq_ack, .irq_unmask = unmask_cic_irq, .irq_ack = msp_cic_irq_ack, #ifdef CONFIG_MIPS_MT_SMP .irq_set_affinity = msp_cic_irq_set_affinity, #endif }; void __init msp_cic_irq_init(void) { int i; /* Mask/clear interrupts. */ *CIC_VPE0_MSK_REG = 0x00000000; *CIC_VPE1_MSK_REG = 0x00000000; *CIC_STS_REG = 0xFFFFFFFF; /* * The MSP7120 RG and EVBD boards use IRQ[6:4] for PCI. * These inputs map to EXT_INT_POL[6:4] inside the CIC. * They are to be active low, level sensitive. */ *CIC_EXT_CFG_REG &= 0xFFFF8F8F; /* initialize all the IRQ descriptors */ for (i = MSP_CIC_INTBASE ; i < MSP_CIC_INTBASE + 32 ; i++) { irq_set_chip_and_handler(i, &msp_cic_irq_controller, handle_level_irq); } /* Initialize the PER interrupt sub-system */ msp_per_irq_init(); } /* CIC masked by CIC vector processing before dispatch called */ void msp_cic_irq_dispatch(void) { volatile u32 *cic_msk_reg = (volatile u32 *)CIC_VPE0_MSK_REG; u32 cic_mask; u32 pending; int cic_status = *CIC_STS_REG; cic_mask = cic_msk_reg[get_current_vpe()]; pending = cic_status & cic_mask; if (pending & (1 << (MSP_INT_VPE0_TIMER - MSP_CIC_INTBASE))) { do_IRQ(MSP_INT_VPE0_TIMER); } else if (pending & (1 << (MSP_INT_VPE1_TIMER - MSP_CIC_INTBASE))) { do_IRQ(MSP_INT_VPE1_TIMER); } else if (pending & (1 << (MSP_INT_PER - MSP_CIC_INTBASE))) { msp_per_irq_dispatch(); } else if (pending) { do_IRQ(ffs(pending) + MSP_CIC_INTBASE - 1); } else{ spurious_interrupt(); } }
gpl-2.0
matlo/beagleboard-usbsniffer-kernel
fs/openpromfs/inode.c
1690
9707
/* inode.c: /proc/openprom handling routines * * Copyright (C) 1996-1999 Jakub Jelinek (jakub@redhat.com) * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be) */ #include <linux/module.h> #include <linux/types.h> #include <linux/string.h> #include <linux/fs.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/seq_file.h> #include <linux/magic.h> #include <asm/openprom.h> #include <asm/oplib.h> #include <asm/prom.h> #include <asm/uaccess.h> static DEFINE_MUTEX(op_mutex); #define OPENPROM_ROOT_INO 0 enum op_inode_type { op_inode_node, op_inode_prop, }; union op_inode_data { struct device_node *node; struct property *prop; }; struct op_inode_info { struct inode vfs_inode; enum op_inode_type type; union op_inode_data u; }; static struct inode *openprom_iget(struct super_block *sb, ino_t ino); static inline struct op_inode_info *OP_I(struct inode *inode) { return container_of(inode, struct op_inode_info, vfs_inode); } static int is_string(unsigned char *p, int len) { int i; for (i = 0; i < len; i++) { unsigned char val = p[i]; if ((i && !val) || (val >= ' ' && val <= '~')) continue; return 0; } return 1; } static int property_show(struct seq_file *f, void *v) { struct property *prop = f->private; void *pval; int len; len = prop->length; pval = prop->value; if (is_string(pval, len)) { while (len > 0) { int n = strlen(pval); seq_printf(f, "%s", (char *) pval); /* Skip over the NULL byte too. */ pval += n + 1; len -= n + 1; if (len > 0) seq_printf(f, " + "); } } else { if (len & 3) { while (len) { len--; if (len) seq_printf(f, "%02x.", *(unsigned char *) pval); else seq_printf(f, "%02x", *(unsigned char *) pval); pval++; } } else { while (len >= 4) { len -= 4; if (len) seq_printf(f, "%08x.", *(unsigned int *) pval); else seq_printf(f, "%08x", *(unsigned int *) pval); pval += 4; } } } seq_printf(f, "\n"); return 0; } static void *property_start(struct seq_file *f, loff_t *pos) { if (*pos == 0) return pos; return NULL; } static void *property_next(struct seq_file *f, void *v, loff_t *pos) { (*pos)++; return NULL; } static void property_stop(struct seq_file *f, void *v) { /* Nothing to do */ } static const struct seq_operations property_op = { .start = property_start, .next = property_next, .stop = property_stop, .show = property_show }; static int property_open(struct inode *inode, struct file *file) { struct op_inode_info *oi = OP_I(inode); int ret; BUG_ON(oi->type != op_inode_prop); ret = seq_open(file, &property_op); if (!ret) { struct seq_file *m = file->private_data; m->private = oi->u.prop; } return ret; } static const struct file_operations openpromfs_prop_ops = { .open = property_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static int openpromfs_readdir(struct file *, void *, filldir_t); static const struct file_operations openprom_operations = { .read = generic_read_dir, .readdir = openpromfs_readdir, .llseek = generic_file_llseek, }; static struct dentry *openpromfs_lookup(struct inode *, struct dentry *, struct nameidata *); static const struct inode_operations openprom_inode_operations = { .lookup = openpromfs_lookup, }; static struct dentry *openpromfs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) { struct op_inode_info *ent_oi, *oi = OP_I(dir); struct device_node *dp, *child; struct property *prop; enum op_inode_type ent_type; union op_inode_data ent_data; const char *name; struct inode *inode; unsigned int ino; int len; BUG_ON(oi->type != op_inode_node); dp = oi->u.node; name = dentry->d_name.name; len = dentry->d_name.len; mutex_lock(&op_mutex); child = dp->child; while (child) { int n = strlen(child->path_component_name); if (len == n && !strncmp(child->path_component_name, name, len)) { ent_type = op_inode_node; ent_data.node = child; ino = child->unique_id; goto found; } child = child->sibling; } prop = dp->properties; while (prop) { int n = strlen(prop->name); if (len == n && !strncmp(prop->name, name, len)) { ent_type = op_inode_prop; ent_data.prop = prop; ino = prop->unique_id; goto found; } prop = prop->next; } mutex_unlock(&op_mutex); return ERR_PTR(-ENOENT); found: inode = openprom_iget(dir->i_sb, ino); mutex_unlock(&op_mutex); if (IS_ERR(inode)) return ERR_CAST(inode); ent_oi = OP_I(inode); ent_oi->type = ent_type; ent_oi->u = ent_data; switch (ent_type) { case op_inode_node: inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO; inode->i_op = &openprom_inode_operations; inode->i_fop = &openprom_operations; inode->i_nlink = 2; break; case op_inode_prop: if (!strcmp(dp->name, "options") && (len == 17) && !strncmp (name, "security-password", 17)) inode->i_mode = S_IFREG | S_IRUSR | S_IWUSR; else inode->i_mode = S_IFREG | S_IRUGO; inode->i_fop = &openpromfs_prop_ops; inode->i_nlink = 1; inode->i_size = ent_oi->u.prop->length; break; } d_add(dentry, inode); return NULL; } static int openpromfs_readdir(struct file * filp, void * dirent, filldir_t filldir) { struct inode *inode = filp->f_path.dentry->d_inode; struct op_inode_info *oi = OP_I(inode); struct device_node *dp = oi->u.node; struct device_node *child; struct property *prop; unsigned int ino; int i; mutex_lock(&op_mutex); ino = inode->i_ino; i = filp->f_pos; switch (i) { case 0: if (filldir(dirent, ".", 1, i, ino, DT_DIR) < 0) goto out; i++; filp->f_pos++; /* fall thru */ case 1: if (filldir(dirent, "..", 2, i, (dp->parent == NULL ? OPENPROM_ROOT_INO : dp->parent->unique_id), DT_DIR) < 0) goto out; i++; filp->f_pos++; /* fall thru */ default: i -= 2; /* First, the children nodes as directories. */ child = dp->child; while (i && child) { child = child->sibling; i--; } while (child) { if (filldir(dirent, child->path_component_name, strlen(child->path_component_name), filp->f_pos, child->unique_id, DT_DIR) < 0) goto out; filp->f_pos++; child = child->sibling; } /* Next, the properties as files. */ prop = dp->properties; while (i && prop) { prop = prop->next; i--; } while (prop) { if (filldir(dirent, prop->name, strlen(prop->name), filp->f_pos, prop->unique_id, DT_REG) < 0) goto out; filp->f_pos++; prop = prop->next; } } out: mutex_unlock(&op_mutex); return 0; } static struct kmem_cache *op_inode_cachep; static struct inode *openprom_alloc_inode(struct super_block *sb) { struct op_inode_info *oi; oi = kmem_cache_alloc(op_inode_cachep, GFP_KERNEL); if (!oi) return NULL; return &oi->vfs_inode; } static void openprom_destroy_inode(struct inode *inode) { kmem_cache_free(op_inode_cachep, OP_I(inode)); } static struct inode *openprom_iget(struct super_block *sb, ino_t ino) { struct inode *inode; inode = iget_locked(sb, ino); if (!inode) return ERR_PTR(-ENOMEM); if (inode->i_state & I_NEW) { inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; if (inode->i_ino == OPENPROM_ROOT_INO) { inode->i_op = &openprom_inode_operations; inode->i_fop = &openprom_operations; inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO; } unlock_new_inode(inode); } return inode; } static int openprom_remount(struct super_block *sb, int *flags, char *data) { *flags |= MS_NOATIME; return 0; } static const struct super_operations openprom_sops = { .alloc_inode = openprom_alloc_inode, .destroy_inode = openprom_destroy_inode, .statfs = simple_statfs, .remount_fs = openprom_remount, }; static int openprom_fill_super(struct super_block *s, void *data, int silent) { struct inode *root_inode; struct op_inode_info *oi; int ret; s->s_flags |= MS_NOATIME; s->s_blocksize = 1024; s->s_blocksize_bits = 10; s->s_magic = OPENPROM_SUPER_MAGIC; s->s_op = &openprom_sops; s->s_time_gran = 1; root_inode = openprom_iget(s, OPENPROM_ROOT_INO); if (IS_ERR(root_inode)) { ret = PTR_ERR(root_inode); goto out_no_root; } oi = OP_I(root_inode); oi->type = op_inode_node; oi->u.node = of_find_node_by_path("/"); s->s_root = d_alloc_root(root_inode); if (!s->s_root) goto out_no_root_dentry; return 0; out_no_root_dentry: iput(root_inode); ret = -ENOMEM; out_no_root: printk("openprom_fill_super: get root inode failed\n"); return ret; } static int openprom_get_sb(struct file_system_type *fs_type, int flags, const char *dev_name, void *data, struct vfsmount *mnt) { return get_sb_single(fs_type, flags, data, openprom_fill_super, mnt); } static struct file_system_type openprom_fs_type = { .owner = THIS_MODULE, .name = "openpromfs", .get_sb = openprom_get_sb, .kill_sb = kill_anon_super, }; static void op_inode_init_once(void *data) { struct op_inode_info *oi = (struct op_inode_info *) data; inode_init_once(&oi->vfs_inode); } static int __init init_openprom_fs(void) { int err; op_inode_cachep = kmem_cache_create("op_inode_cache", sizeof(struct op_inode_info), 0, (SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD), op_inode_init_once); if (!op_inode_cachep) return -ENOMEM; err = register_filesystem(&openprom_fs_type); if (err) kmem_cache_destroy(op_inode_cachep); return err; } static void __exit exit_openprom_fs(void) { unregister_filesystem(&openprom_fs_type); kmem_cache_destroy(op_inode_cachep); } module_init(init_openprom_fs) module_exit(exit_openprom_fs) MODULE_LICENSE("GPL");
gpl-2.0
genehuangtaiwan/common
drivers/clk/clk-fixed-rate.c
2202
2928
/* * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com> * Copyright (C) 2011-2012 Mike Turquette, Linaro Ltd <mturquette@linaro.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Fixed rate clock implementation */ #include <linux/clk-provider.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/io.h> #include <linux/err.h> #include <linux/of.h> /* * DOC: basic fixed-rate clock that cannot gate * * Traits of this clock: * prepare - clk_(un)prepare only ensures parents are prepared * enable - clk_enable only ensures parents are enabled * rate - rate is always a fixed value. No clk_set_rate support * parent - fixed parent. No clk_set_parent support */ #define to_clk_fixed_rate(_hw) container_of(_hw, struct clk_fixed_rate, hw) static unsigned long clk_fixed_rate_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { return to_clk_fixed_rate(hw)->fixed_rate; } const struct clk_ops clk_fixed_rate_ops = { .recalc_rate = clk_fixed_rate_recalc_rate, }; EXPORT_SYMBOL_GPL(clk_fixed_rate_ops); /** * clk_register_fixed_rate - register fixed-rate clock with the clock framework * @dev: device that is registering this clock * @name: name of this clock * @parent_name: name of clock's parent * @flags: framework-specific flags * @fixed_rate: non-adjustable clock rate */ struct clk *clk_register_fixed_rate(struct device *dev, const char *name, const char *parent_name, unsigned long flags, unsigned long fixed_rate) { struct clk_fixed_rate *fixed; struct clk *clk; struct clk_init_data init; /* allocate fixed-rate clock */ fixed = kzalloc(sizeof(struct clk_fixed_rate), GFP_KERNEL); if (!fixed) { pr_err("%s: could not allocate fixed clk\n", __func__); return ERR_PTR(-ENOMEM); } init.name = name; init.ops = &clk_fixed_rate_ops; init.flags = flags | CLK_IS_BASIC; init.parent_names = (parent_name ? &parent_name: NULL); init.num_parents = (parent_name ? 1 : 0); /* struct clk_fixed_rate assignments */ fixed->fixed_rate = fixed_rate; fixed->hw.init = &init; /* register the clock */ clk = clk_register(dev, &fixed->hw); if (IS_ERR(clk)) kfree(fixed); return clk; } #ifdef CONFIG_OF /** * of_fixed_clk_setup() - Setup function for simple fixed rate clock */ void of_fixed_clk_setup(struct device_node *node) { struct clk *clk; const char *clk_name = node->name; u32 rate; if (of_property_read_u32(node, "clock-frequency", &rate)) return; of_property_read_string(node, "clock-output-names", &clk_name); clk = clk_register_fixed_rate(NULL, clk_name, NULL, CLK_IS_ROOT, rate); if (!IS_ERR(clk)) of_clk_add_provider(node, of_clk_src_simple_get, clk); } EXPORT_SYMBOL_GPL(of_fixed_clk_setup); CLK_OF_DECLARE(fixed_clk, "fixed-clock", of_fixed_clk_setup); #endif
gpl-2.0
UnknownzD/I9103_TW_ICS_Kernel
drivers/acpi/acpica/exstore.c
3226
15287
/****************************************************************************** * * Module Name: exstore - AML Interpreter object store support * *****************************************************************************/ /* * Copyright (C) 2000 - 2011, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #include "acdispat.h" #include "acinterp.h" #include "amlcode.h" #include "acnamesp.h" #define _COMPONENT ACPI_EXECUTER ACPI_MODULE_NAME("exstore") /* Local prototypes */ static acpi_status acpi_ex_store_object_to_index(union acpi_operand_object *val_desc, union acpi_operand_object *dest_desc, struct acpi_walk_state *walk_state); /******************************************************************************* * * FUNCTION: acpi_ex_store * * PARAMETERS: *source_desc - Value to be stored * *dest_desc - Where to store it. Must be an NS node * or a union acpi_operand_object of type * Reference; * walk_state - Current walk state * * RETURN: Status * * DESCRIPTION: Store the value described by source_desc into the location * described by dest_desc. Called by various interpreter * functions to store the result of an operation into * the destination operand -- not just simply the actual "Store" * ASL operator. * ******************************************************************************/ acpi_status acpi_ex_store(union acpi_operand_object *source_desc, union acpi_operand_object *dest_desc, struct acpi_walk_state *walk_state) { acpi_status status = AE_OK; union acpi_operand_object *ref_desc = dest_desc; ACPI_FUNCTION_TRACE_PTR(ex_store, dest_desc); /* Validate parameters */ if (!source_desc || !dest_desc) { ACPI_ERROR((AE_INFO, "Null parameter")); return_ACPI_STATUS(AE_AML_NO_OPERAND); } /* dest_desc can be either a namespace node or an ACPI object */ if (ACPI_GET_DESCRIPTOR_TYPE(dest_desc) == ACPI_DESC_TYPE_NAMED) { /* * Dest is a namespace node, * Storing an object into a Named node. */ status = acpi_ex_store_object_to_node(source_desc, (struct acpi_namespace_node *) dest_desc, walk_state, ACPI_IMPLICIT_CONVERSION); return_ACPI_STATUS(status); } /* Destination object must be a Reference or a Constant object */ switch (dest_desc->common.type) { case ACPI_TYPE_LOCAL_REFERENCE: break; case ACPI_TYPE_INTEGER: /* Allow stores to Constants -- a Noop as per ACPI spec */ if (dest_desc->common.flags & AOPOBJ_AML_CONSTANT) { return_ACPI_STATUS(AE_OK); } /*lint -fallthrough */ default: /* Destination is not a Reference object */ ACPI_ERROR((AE_INFO, "Target is not a Reference or Constant object - %s [%p]", acpi_ut_get_object_type_name(dest_desc), dest_desc)); return_ACPI_STATUS(AE_AML_OPERAND_TYPE); } /* * Examine the Reference class. These cases are handled: * * 1) Store to Name (Change the object associated with a name) * 2) Store to an indexed area of a Buffer or Package * 3) Store to a Method Local or Arg * 4) Store to the debug object */ switch (ref_desc->reference.class) { case ACPI_REFCLASS_REFOF: /* Storing an object into a Name "container" */ status = acpi_ex_store_object_to_node(source_desc, ref_desc->reference. object, walk_state, ACPI_IMPLICIT_CONVERSION); break; case ACPI_REFCLASS_INDEX: /* Storing to an Index (pointer into a packager or buffer) */ status = acpi_ex_store_object_to_index(source_desc, ref_desc, walk_state); break; case ACPI_REFCLASS_LOCAL: case ACPI_REFCLASS_ARG: /* Store to a method local/arg */ status = acpi_ds_store_object_to_local(ref_desc->reference.class, ref_desc->reference.value, source_desc, walk_state); break; case ACPI_REFCLASS_DEBUG: /* * Storing to the Debug object causes the value stored to be * displayed and otherwise has no effect -- see ACPI Specification */ ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "**** Write to Debug Object: Object %p %s ****:\n\n", source_desc, acpi_ut_get_object_type_name(source_desc))); ACPI_DEBUG_OBJECT(source_desc, 0, 0); break; default: ACPI_ERROR((AE_INFO, "Unknown Reference Class 0x%2.2X", ref_desc->reference.class)); ACPI_DUMP_ENTRY(ref_desc, ACPI_LV_INFO); status = AE_AML_INTERNAL; break; } return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ex_store_object_to_index * * PARAMETERS: *source_desc - Value to be stored * *dest_desc - Named object to receive the value * walk_state - Current walk state * * RETURN: Status * * DESCRIPTION: Store the object to indexed Buffer or Package element * ******************************************************************************/ static acpi_status acpi_ex_store_object_to_index(union acpi_operand_object *source_desc, union acpi_operand_object *index_desc, struct acpi_walk_state *walk_state) { acpi_status status = AE_OK; union acpi_operand_object *obj_desc; union acpi_operand_object *new_desc; u8 value = 0; u32 i; ACPI_FUNCTION_TRACE(ex_store_object_to_index); /* * Destination must be a reference pointer, and * must point to either a buffer or a package */ switch (index_desc->reference.target_type) { case ACPI_TYPE_PACKAGE: /* * Storing to a package element. Copy the object and replace * any existing object with the new object. No implicit * conversion is performed. * * The object at *(index_desc->Reference.Where) is the * element within the package that is to be modified. * The parent package object is at index_desc->Reference.Object */ obj_desc = *(index_desc->reference.where); if (source_desc->common.type == ACPI_TYPE_LOCAL_REFERENCE && source_desc->reference.class == ACPI_REFCLASS_TABLE) { /* This is a DDBHandle, just add a reference to it */ acpi_ut_add_reference(source_desc); new_desc = source_desc; } else { /* Normal object, copy it */ status = acpi_ut_copy_iobject_to_iobject(source_desc, &new_desc, walk_state); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } } if (obj_desc) { /* Decrement reference count by the ref count of the parent package */ for (i = 0; i < ((union acpi_operand_object *) index_desc->reference.object)->common. reference_count; i++) { acpi_ut_remove_reference(obj_desc); } } *(index_desc->reference.where) = new_desc; /* Increment ref count by the ref count of the parent package-1 */ for (i = 1; i < ((union acpi_operand_object *) index_desc->reference.object)->common. reference_count; i++) { acpi_ut_add_reference(new_desc); } break; case ACPI_TYPE_BUFFER_FIELD: /* * Store into a Buffer or String (not actually a real buffer_field) * at a location defined by an Index. * * The first 8-bit element of the source object is written to the * 8-bit Buffer location defined by the Index destination object, * according to the ACPI 2.0 specification. */ /* * Make sure the target is a Buffer or String. An error should * not happen here, since the reference_object was constructed * by the INDEX_OP code. */ obj_desc = index_desc->reference.object; if ((obj_desc->common.type != ACPI_TYPE_BUFFER) && (obj_desc->common.type != ACPI_TYPE_STRING)) { return_ACPI_STATUS(AE_AML_OPERAND_TYPE); } /* * The assignment of the individual elements will be slightly * different for each source type. */ switch (source_desc->common.type) { case ACPI_TYPE_INTEGER: /* Use the least-significant byte of the integer */ value = (u8) (source_desc->integer.value); break; case ACPI_TYPE_BUFFER: case ACPI_TYPE_STRING: /* Note: Takes advantage of common string/buffer fields */ value = source_desc->buffer.pointer[0]; break; default: /* All other types are invalid */ ACPI_ERROR((AE_INFO, "Source must be Integer/Buffer/String type, not %s", acpi_ut_get_object_type_name(source_desc))); return_ACPI_STATUS(AE_AML_OPERAND_TYPE); } /* Store the source value into the target buffer byte */ obj_desc->buffer.pointer[index_desc->reference.value] = value; break; default: ACPI_ERROR((AE_INFO, "Target is not a Package or BufferField")); status = AE_AML_OPERAND_TYPE; break; } return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ex_store_object_to_node * * PARAMETERS: source_desc - Value to be stored * Node - Named object to receive the value * walk_state - Current walk state * implicit_conversion - Perform implicit conversion (yes/no) * * RETURN: Status * * DESCRIPTION: Store the object to the named object. * * The Assignment of an object to a named object is handled here * The value passed in will replace the current value (if any) * with the input value. * * When storing into an object the data is converted to the * target object type then stored in the object. This means * that the target object type (for an initialized target) will * not be changed by a store operation. * * Assumes parameters are already validated. * ******************************************************************************/ acpi_status acpi_ex_store_object_to_node(union acpi_operand_object *source_desc, struct acpi_namespace_node *node, struct acpi_walk_state *walk_state, u8 implicit_conversion) { acpi_status status = AE_OK; union acpi_operand_object *target_desc; union acpi_operand_object *new_desc; acpi_object_type target_type; ACPI_FUNCTION_TRACE_PTR(ex_store_object_to_node, source_desc); /* Get current type of the node, and object attached to Node */ target_type = acpi_ns_get_type(node); target_desc = acpi_ns_get_attached_object(node); ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Storing %p(%s) into node %p(%s)\n", source_desc, acpi_ut_get_object_type_name(source_desc), node, acpi_ut_get_type_name(target_type))); /* * Resolve the source object to an actual value * (If it is a reference object) */ status = acpi_ex_resolve_object(&source_desc, target_type, walk_state); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* If no implicit conversion, drop into the default case below */ if ((!implicit_conversion) || ((walk_state->opcode == AML_COPY_OP) && (target_type != ACPI_TYPE_LOCAL_REGION_FIELD) && (target_type != ACPI_TYPE_LOCAL_BANK_FIELD) && (target_type != ACPI_TYPE_LOCAL_INDEX_FIELD))) { /* * Force execution of default (no implicit conversion). Note: * copy_object does not perform an implicit conversion, as per the ACPI * spec -- except in case of region/bank/index fields -- because these * objects must retain their original type permanently. */ target_type = ACPI_TYPE_ANY; } /* Do the actual store operation */ switch (target_type) { case ACPI_TYPE_BUFFER_FIELD: case ACPI_TYPE_LOCAL_REGION_FIELD: case ACPI_TYPE_LOCAL_BANK_FIELD: case ACPI_TYPE_LOCAL_INDEX_FIELD: /* For fields, copy the source data to the target field. */ status = acpi_ex_write_data_to_field(source_desc, target_desc, &walk_state->result_obj); break; case ACPI_TYPE_INTEGER: case ACPI_TYPE_STRING: case ACPI_TYPE_BUFFER: /* * These target types are all of type Integer/String/Buffer, and * therefore support implicit conversion before the store. * * Copy and/or convert the source object to a new target object */ status = acpi_ex_store_object_to_object(source_desc, target_desc, &new_desc, walk_state); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } if (new_desc != target_desc) { /* * Store the new new_desc as the new value of the Name, and set * the Name's type to that of the value being stored in it. * source_desc reference count is incremented by attach_object. * * Note: This may change the type of the node if an explicit store * has been performed such that the node/object type has been * changed. */ status = acpi_ns_attach_object(node, new_desc, new_desc->common.type); ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Store %s into %s via Convert/Attach\n", acpi_ut_get_object_type_name (source_desc), acpi_ut_get_object_type_name (new_desc))); } break; default: ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Storing %s (%p) directly into node (%p) with no implicit conversion\n", acpi_ut_get_object_type_name(source_desc), source_desc, node)); /* No conversions for all other types. Just attach the source object */ status = acpi_ns_attach_object(node, source_desc, source_desc->common.type); break; } return_ACPI_STATUS(status); }
gpl-2.0
wangxingchao/s3c6410
arch/arm/kernel/xscale-cp0.c
4250
4029
/* * linux/arch/arm/kernel/xscale-cp0.c * * XScale DSP and iWMMXt coprocessor context switching and handling * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/init.h> #include <linux/io.h> #include <asm/thread_notify.h> static inline void dsp_save_state(u32 *state) { __asm__ __volatile__ ( "mrrc p0, 0, %0, %1, c0\n" : "=r" (state[0]), "=r" (state[1])); } static inline void dsp_load_state(u32 *state) { __asm__ __volatile__ ( "mcrr p0, 0, %0, %1, c0\n" : : "r" (state[0]), "r" (state[1])); } static int dsp_do(struct notifier_block *self, unsigned long cmd, void *t) { struct thread_info *thread = t; switch (cmd) { case THREAD_NOTIFY_FLUSH: thread->cpu_context.extra[0] = 0; thread->cpu_context.extra[1] = 0; break; case THREAD_NOTIFY_SWITCH: dsp_save_state(current_thread_info()->cpu_context.extra); dsp_load_state(thread->cpu_context.extra); break; } return NOTIFY_DONE; } static struct notifier_block dsp_notifier_block = { .notifier_call = dsp_do, }; #ifdef CONFIG_IWMMXT static int iwmmxt_do(struct notifier_block *self, unsigned long cmd, void *t) { struct thread_info *thread = t; switch (cmd) { case THREAD_NOTIFY_FLUSH: /* * flush_thread() zeroes thread->fpstate, so no need * to do anything here. * * FALLTHROUGH: Ensure we don't try to overwrite our newly * initialised state information on the first fault. */ case THREAD_NOTIFY_EXIT: iwmmxt_task_release(thread); break; case THREAD_NOTIFY_SWITCH: iwmmxt_task_switch(thread); break; } return NOTIFY_DONE; } static struct notifier_block iwmmxt_notifier_block = { .notifier_call = iwmmxt_do, }; #endif static u32 __init xscale_cp_access_read(void) { u32 value; __asm__ __volatile__ ( "mrc p15, 0, %0, c15, c1, 0\n\t" : "=r" (value)); return value; } static void __init xscale_cp_access_write(u32 value) { u32 temp; __asm__ __volatile__ ( "mcr p15, 0, %1, c15, c1, 0\n\t" "mrc p15, 0, %0, c15, c1, 0\n\t" "mov %0, %0\n\t" "sub pc, pc, #4\n\t" : "=r" (temp) : "r" (value)); } /* * Detect whether we have a MAC coprocessor (40 bit register) or an * iWMMXt coprocessor (64 bit registers) by loading 00000100:00000000 * into a coprocessor register and reading it back, and checking * whether the upper word survived intact. */ static int __init cpu_has_iwmmxt(void) { u32 lo; u32 hi; /* * This sequence is interpreted by the DSP coprocessor as: * mar acc0, %2, %3 * mra %0, %1, acc0 * * And by the iWMMXt coprocessor as: * tmcrr wR0, %2, %3 * tmrrc %0, %1, wR0 */ __asm__ __volatile__ ( "mcrr p0, 0, %2, %3, c0\n" "mrrc p0, 0, %0, %1, c0\n" : "=r" (lo), "=r" (hi) : "r" (0), "r" (0x100)); return !!hi; } /* * If we detect that the CPU has iWMMXt (and CONFIG_IWMMXT=y), we * disable CP0/CP1 on boot, and let call_fpe() and the iWMMXt lazy * switch code handle iWMMXt context switching. If on the other * hand the CPU has a DSP coprocessor, we keep access to CP0 enabled * all the time, and save/restore acc0 on context switch in non-lazy * fashion. */ static int __init xscale_cp0_init(void) { u32 cp_access; cp_access = xscale_cp_access_read() & ~3; xscale_cp_access_write(cp_access | 1); if (cpu_has_iwmmxt()) { #ifndef CONFIG_IWMMXT printk(KERN_WARNING "CAUTION: XScale iWMMXt coprocessor " "detected, but kernel support is missing.\n"); #else printk(KERN_INFO "XScale iWMMXt coprocessor detected.\n"); elf_hwcap |= HWCAP_IWMMXT; thread_register_notifier(&iwmmxt_notifier_block); #endif } else { printk(KERN_INFO "XScale DSP coprocessor detected.\n"); thread_register_notifier(&dsp_notifier_block); cp_access |= 1; } xscale_cp_access_write(cp_access); return 0; } late_initcall(xscale_cp0_init);
gpl-2.0
iamjy/beaglebone-kernel
arch/arm/mach-omap1/sram-init.c
4762
1919
/* * OMAP SRAM detection and management * * Copyright (C) 2005 Nokia Corporation * Written by Tony Lindgren <tony@atomide.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/io.h> #include <asm/fncpy.h> #include <asm/tlb.h> #include <asm/cacheflush.h> #include <asm/mach/map.h> #include "soc.h" #include "sram.h" #define OMAP1_SRAM_PA 0x20000000 #define SRAM_BOOTLOADER_SZ 0x80 /* * The amount of SRAM depends on the core type. * Note that we cannot try to test for SRAM here because writes * to secure SRAM will hang the system. Also the SRAM is not * yet mapped at this point. */ static void __init omap_detect_and_map_sram(void) { unsigned long omap_sram_skip = SRAM_BOOTLOADER_SZ; unsigned long omap_sram_start = OMAP1_SRAM_PA; unsigned long omap_sram_size; if (cpu_is_omap7xx()) omap_sram_size = 0x32000; /* 200K */ else if (cpu_is_omap15xx()) omap_sram_size = 0x30000; /* 192K */ else if (cpu_is_omap1610() || cpu_is_omap1611() || cpu_is_omap1621() || cpu_is_omap1710()) omap_sram_size = 0x4000; /* 16K */ else { pr_err("Could not detect SRAM size\n"); omap_sram_size = 0x4000; } omap_map_sram(omap_sram_start, omap_sram_size, omap_sram_skip, 1); } static void (*_omap_sram_reprogram_clock)(u32 dpllctl, u32 ckctl); void omap_sram_reprogram_clock(u32 dpllctl, u32 ckctl) { BUG_ON(!_omap_sram_reprogram_clock); /* On 730, bit 13 must always be 1 */ if (cpu_is_omap7xx()) ckctl |= 0x2000; _omap_sram_reprogram_clock(dpllctl, ckctl); } int __init omap_sram_init(void) { omap_detect_and_map_sram(); _omap_sram_reprogram_clock = omap_sram_push(omap1_sram_reprogram_clock, omap1_sram_reprogram_clock_sz); return 0; }
gpl-2.0
omega-roms/I9505_Omega_Kernel_LL
fs/notify/fanotify/fanotify.c
5786
6217
#include <linux/fanotify.h> #include <linux/fdtable.h> #include <linux/fsnotify_backend.h> #include <linux/init.h> #include <linux/jiffies.h> #include <linux/kernel.h> /* UINT_MAX */ #include <linux/mount.h> #include <linux/sched.h> #include <linux/types.h> #include <linux/wait.h> static bool should_merge(struct fsnotify_event *old, struct fsnotify_event *new) { pr_debug("%s: old=%p new=%p\n", __func__, old, new); if (old->to_tell == new->to_tell && old->data_type == new->data_type && old->tgid == new->tgid) { switch (old->data_type) { case (FSNOTIFY_EVENT_PATH): if ((old->path.mnt == new->path.mnt) && (old->path.dentry == new->path.dentry)) return true; case (FSNOTIFY_EVENT_NONE): return true; default: BUG(); }; } return false; } /* and the list better be locked by something too! */ static struct fsnotify_event *fanotify_merge(struct list_head *list, struct fsnotify_event *event) { struct fsnotify_event_holder *test_holder; struct fsnotify_event *test_event = NULL; struct fsnotify_event *new_event; pr_debug("%s: list=%p event=%p\n", __func__, list, event); list_for_each_entry_reverse(test_holder, list, event_list) { if (should_merge(test_holder->event, event)) { test_event = test_holder->event; break; } } if (!test_event) return NULL; fsnotify_get_event(test_event); /* if they are exactly the same we are done */ if (test_event->mask == event->mask) return test_event; /* * if the refcnt == 2 this is the only queue * for this event and so we can update the mask * in place. */ if (atomic_read(&test_event->refcnt) == 2) { test_event->mask |= event->mask; return test_event; } new_event = fsnotify_clone_event(test_event); /* done with test_event */ fsnotify_put_event(test_event); /* couldn't allocate memory, merge was not possible */ if (unlikely(!new_event)) return ERR_PTR(-ENOMEM); /* build new event and replace it on the list */ new_event->mask = (test_event->mask | event->mask); fsnotify_replace_event(test_holder, new_event); /* we hold a reference on new_event from clone_event */ return new_event; } #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS static int fanotify_get_response_from_access(struct fsnotify_group *group, struct fsnotify_event *event) { int ret; pr_debug("%s: group=%p event=%p\n", __func__, group, event); wait_event(group->fanotify_data.access_waitq, event->response || atomic_read(&group->fanotify_data.bypass_perm)); if (!event->response) /* bypass_perm set */ return 0; /* userspace responded, convert to something usable */ spin_lock(&event->lock); switch (event->response) { case FAN_ALLOW: ret = 0; break; case FAN_DENY: default: ret = -EPERM; } event->response = 0; spin_unlock(&event->lock); pr_debug("%s: group=%p event=%p about to return ret=%d\n", __func__, group, event, ret); return ret; } #endif static int fanotify_handle_event(struct fsnotify_group *group, struct fsnotify_mark *inode_mark, struct fsnotify_mark *fanotify_mark, struct fsnotify_event *event) { int ret = 0; struct fsnotify_event *notify_event = NULL; BUILD_BUG_ON(FAN_ACCESS != FS_ACCESS); BUILD_BUG_ON(FAN_MODIFY != FS_MODIFY); BUILD_BUG_ON(FAN_CLOSE_NOWRITE != FS_CLOSE_NOWRITE); BUILD_BUG_ON(FAN_CLOSE_WRITE != FS_CLOSE_WRITE); BUILD_BUG_ON(FAN_OPEN != FS_OPEN); BUILD_BUG_ON(FAN_EVENT_ON_CHILD != FS_EVENT_ON_CHILD); BUILD_BUG_ON(FAN_Q_OVERFLOW != FS_Q_OVERFLOW); BUILD_BUG_ON(FAN_OPEN_PERM != FS_OPEN_PERM); BUILD_BUG_ON(FAN_ACCESS_PERM != FS_ACCESS_PERM); BUILD_BUG_ON(FAN_ONDIR != FS_ISDIR); pr_debug("%s: group=%p event=%p\n", __func__, group, event); notify_event = fsnotify_add_notify_event(group, event, NULL, fanotify_merge); if (IS_ERR(notify_event)) return PTR_ERR(notify_event); #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS if (event->mask & FAN_ALL_PERM_EVENTS) { /* if we merged we need to wait on the new event */ if (notify_event) event = notify_event; ret = fanotify_get_response_from_access(group, event); } #endif if (notify_event) fsnotify_put_event(notify_event); return ret; } static bool fanotify_should_send_event(struct fsnotify_group *group, struct inode *to_tell, struct fsnotify_mark *inode_mark, struct fsnotify_mark *vfsmnt_mark, __u32 event_mask, void *data, int data_type) { __u32 marks_mask, marks_ignored_mask; struct path *path = data; pr_debug("%s: group=%p to_tell=%p inode_mark=%p vfsmnt_mark=%p " "mask=%x data=%p data_type=%d\n", __func__, group, to_tell, inode_mark, vfsmnt_mark, event_mask, data, data_type); /* if we don't have enough info to send an event to userspace say no */ if (data_type != FSNOTIFY_EVENT_PATH) return false; /* sorry, fanotify only gives a damn about files and dirs */ if (!S_ISREG(path->dentry->d_inode->i_mode) && !S_ISDIR(path->dentry->d_inode->i_mode)) return false; if (inode_mark && vfsmnt_mark) { marks_mask = (vfsmnt_mark->mask | inode_mark->mask); marks_ignored_mask = (vfsmnt_mark->ignored_mask | inode_mark->ignored_mask); } else if (inode_mark) { /* * if the event is for a child and this inode doesn't care about * events on the child, don't send it! */ if ((event_mask & FS_EVENT_ON_CHILD) && !(inode_mark->mask & FS_EVENT_ON_CHILD)) return false; marks_mask = inode_mark->mask; marks_ignored_mask = inode_mark->ignored_mask; } else if (vfsmnt_mark) { marks_mask = vfsmnt_mark->mask; marks_ignored_mask = vfsmnt_mark->ignored_mask; } else { BUG(); } if (S_ISDIR(path->dentry->d_inode->i_mode) && (marks_ignored_mask & FS_ISDIR)) return false; if (event_mask & marks_mask & ~marks_ignored_mask) return true; return false; } static void fanotify_free_group_priv(struct fsnotify_group *group) { struct user_struct *user; user = group->fanotify_data.user; atomic_dec(&user->fanotify_listeners); free_uid(user); } const struct fsnotify_ops fanotify_fsnotify_ops = { .handle_event = fanotify_handle_event, .should_send_event = fanotify_should_send_event, .free_group_priv = fanotify_free_group_priv, .free_event_priv = NULL, .freeing_mark = NULL, };
gpl-2.0
xXminiWHOOPERxX/dlxpul-kernel-final
fs/ubifs/gc.c
7834
28578
/* * This file is part of UBIFS. * * Copyright (C) 2006-2008 Nokia Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * * Authors: Adrian Hunter * Artem Bityutskiy (Битюцкий Артём) */ /* * This file implements garbage collection. The procedure for garbage collection * is different depending on whether a LEB as an index LEB (contains index * nodes) or not. For non-index LEBs, garbage collection finds a LEB which * contains a lot of dirty space (obsolete nodes), and copies the non-obsolete * nodes to the journal, at which point the garbage-collected LEB is free to be * reused. For index LEBs, garbage collection marks the non-obsolete index nodes * dirty in the TNC, and after the next commit, the garbage-collected LEB is * to be reused. Garbage collection will cause the number of dirty index nodes * to grow, however sufficient space is reserved for the index to ensure the * commit will never run out of space. * * Notes about dead watermark. At current UBIFS implementation we assume that * LEBs which have less than @c->dead_wm bytes of free + dirty space are full * and not worth garbage-collecting. The dead watermark is one min. I/O unit * size, or min. UBIFS node size, depending on what is greater. Indeed, UBIFS * Garbage Collector has to synchronize the GC head's write buffer before * returning, so this is about wasting one min. I/O unit. However, UBIFS GC can * actually reclaim even very small pieces of dirty space by garbage collecting * enough dirty LEBs, but we do not bother doing this at this implementation. * * Notes about dark watermark. The results of GC work depends on how big are * the UBIFS nodes GC deals with. Large nodes make GC waste more space. Indeed, * if GC move data from LEB A to LEB B and nodes in LEB A are large, GC would * have to waste large pieces of free space at the end of LEB B, because nodes * from LEB A would not fit. And the worst situation is when all nodes are of * maximum size. So dark watermark is the amount of free + dirty space in LEB * which are guaranteed to be reclaimable. If LEB has less space, the GC might * be unable to reclaim it. So, LEBs with free + dirty greater than dark * watermark are "good" LEBs from GC's point of few. The other LEBs are not so * good, and GC takes extra care when moving them. */ #include <linux/slab.h> #include <linux/pagemap.h> #include <linux/list_sort.h> #include "ubifs.h" /* * GC may need to move more than one LEB to make progress. The below constants * define "soft" and "hard" limits on the number of LEBs the garbage collector * may move. */ #define SOFT_LEBS_LIMIT 4 #define HARD_LEBS_LIMIT 32 /** * switch_gc_head - switch the garbage collection journal head. * @c: UBIFS file-system description object * @buf: buffer to write * @len: length of the buffer to write * @lnum: LEB number written is returned here * @offs: offset written is returned here * * This function switch the GC head to the next LEB which is reserved in * @c->gc_lnum. Returns %0 in case of success, %-EAGAIN if commit is required, * and other negative error code in case of failures. */ static int switch_gc_head(struct ubifs_info *c) { int err, gc_lnum = c->gc_lnum; struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf; ubifs_assert(gc_lnum != -1); dbg_gc("switch GC head from LEB %d:%d to LEB %d (waste %d bytes)", wbuf->lnum, wbuf->offs + wbuf->used, gc_lnum, c->leb_size - wbuf->offs - wbuf->used); err = ubifs_wbuf_sync_nolock(wbuf); if (err) return err; /* * The GC write-buffer was synchronized, we may safely unmap * 'c->gc_lnum'. */ err = ubifs_leb_unmap(c, gc_lnum); if (err) return err; err = ubifs_wbuf_sync_nolock(wbuf); if (err) return err; err = ubifs_add_bud_to_log(c, GCHD, gc_lnum, 0); if (err) return err; c->gc_lnum = -1; err = ubifs_wbuf_seek_nolock(wbuf, gc_lnum, 0, UBI_LONGTERM); return err; } /** * data_nodes_cmp - compare 2 data nodes. * @priv: UBIFS file-system description object * @a: first data node * @a: second data node * * This function compares data nodes @a and @b. Returns %1 if @a has greater * inode or block number, and %-1 otherwise. */ static int data_nodes_cmp(void *priv, struct list_head *a, struct list_head *b) { ino_t inuma, inumb; struct ubifs_info *c = priv; struct ubifs_scan_node *sa, *sb; cond_resched(); if (a == b) return 0; sa = list_entry(a, struct ubifs_scan_node, list); sb = list_entry(b, struct ubifs_scan_node, list); ubifs_assert(key_type(c, &sa->key) == UBIFS_DATA_KEY); ubifs_assert(key_type(c, &sb->key) == UBIFS_DATA_KEY); ubifs_assert(sa->type == UBIFS_DATA_NODE); ubifs_assert(sb->type == UBIFS_DATA_NODE); inuma = key_inum(c, &sa->key); inumb = key_inum(c, &sb->key); if (inuma == inumb) { unsigned int blka = key_block(c, &sa->key); unsigned int blkb = key_block(c, &sb->key); if (blka <= blkb) return -1; } else if (inuma <= inumb) return -1; return 1; } /* * nondata_nodes_cmp - compare 2 non-data nodes. * @priv: UBIFS file-system description object * @a: first node * @a: second node * * This function compares nodes @a and @b. It makes sure that inode nodes go * first and sorted by length in descending order. Directory entry nodes go * after inode nodes and are sorted in ascending hash valuer order. */ static int nondata_nodes_cmp(void *priv, struct list_head *a, struct list_head *b) { ino_t inuma, inumb; struct ubifs_info *c = priv; struct ubifs_scan_node *sa, *sb; cond_resched(); if (a == b) return 0; sa = list_entry(a, struct ubifs_scan_node, list); sb = list_entry(b, struct ubifs_scan_node, list); ubifs_assert(key_type(c, &sa->key) != UBIFS_DATA_KEY && key_type(c, &sb->key) != UBIFS_DATA_KEY); ubifs_assert(sa->type != UBIFS_DATA_NODE && sb->type != UBIFS_DATA_NODE); /* Inodes go before directory entries */ if (sa->type == UBIFS_INO_NODE) { if (sb->type == UBIFS_INO_NODE) return sb->len - sa->len; return -1; } if (sb->type == UBIFS_INO_NODE) return 1; ubifs_assert(key_type(c, &sa->key) == UBIFS_DENT_KEY || key_type(c, &sa->key) == UBIFS_XENT_KEY); ubifs_assert(key_type(c, &sb->key) == UBIFS_DENT_KEY || key_type(c, &sb->key) == UBIFS_XENT_KEY); ubifs_assert(sa->type == UBIFS_DENT_NODE || sa->type == UBIFS_XENT_NODE); ubifs_assert(sb->type == UBIFS_DENT_NODE || sb->type == UBIFS_XENT_NODE); inuma = key_inum(c, &sa->key); inumb = key_inum(c, &sb->key); if (inuma == inumb) { uint32_t hasha = key_hash(c, &sa->key); uint32_t hashb = key_hash(c, &sb->key); if (hasha <= hashb) return -1; } else if (inuma <= inumb) return -1; return 1; } /** * sort_nodes - sort nodes for GC. * @c: UBIFS file-system description object * @sleb: describes nodes to sort and contains the result on exit * @nondata: contains non-data nodes on exit * @min: minimum node size is returned here * * This function sorts the list of inodes to garbage collect. First of all, it * kills obsolete nodes and separates data and non-data nodes to the * @sleb->nodes and @nondata lists correspondingly. * * Data nodes are then sorted in block number order - this is important for * bulk-read; data nodes with lower inode number go before data nodes with * higher inode number, and data nodes with lower block number go before data * nodes with higher block number; * * Non-data nodes are sorted as follows. * o First go inode nodes - they are sorted in descending length order. * o Then go directory entry nodes - they are sorted in hash order, which * should supposedly optimize 'readdir()'. Direntry nodes with lower parent * inode number go before direntry nodes with higher parent inode number, * and direntry nodes with lower name hash values go before direntry nodes * with higher name hash values. * * This function returns zero in case of success and a negative error code in * case of failure. */ static int sort_nodes(struct ubifs_info *c, struct ubifs_scan_leb *sleb, struct list_head *nondata, int *min) { int err; struct ubifs_scan_node *snod, *tmp; *min = INT_MAX; /* Separate data nodes and non-data nodes */ list_for_each_entry_safe(snod, tmp, &sleb->nodes, list) { ubifs_assert(snod->type == UBIFS_INO_NODE || snod->type == UBIFS_DATA_NODE || snod->type == UBIFS_DENT_NODE || snod->type == UBIFS_XENT_NODE || snod->type == UBIFS_TRUN_NODE); if (snod->type != UBIFS_INO_NODE && snod->type != UBIFS_DATA_NODE && snod->type != UBIFS_DENT_NODE && snod->type != UBIFS_XENT_NODE) { /* Probably truncation node, zap it */ list_del(&snod->list); kfree(snod); continue; } ubifs_assert(key_type(c, &snod->key) == UBIFS_DATA_KEY || key_type(c, &snod->key) == UBIFS_INO_KEY || key_type(c, &snod->key) == UBIFS_DENT_KEY || key_type(c, &snod->key) == UBIFS_XENT_KEY); err = ubifs_tnc_has_node(c, &snod->key, 0, sleb->lnum, snod->offs, 0); if (err < 0) return err; if (!err) { /* The node is obsolete, remove it from the list */ list_del(&snod->list); kfree(snod); continue; } if (snod->len < *min) *min = snod->len; if (key_type(c, &snod->key) != UBIFS_DATA_KEY) list_move_tail(&snod->list, nondata); } /* Sort data and non-data nodes */ list_sort(c, &sleb->nodes, &data_nodes_cmp); list_sort(c, nondata, &nondata_nodes_cmp); err = dbg_check_data_nodes_order(c, &sleb->nodes); if (err) return err; err = dbg_check_nondata_nodes_order(c, nondata); if (err) return err; return 0; } /** * move_node - move a node. * @c: UBIFS file-system description object * @sleb: describes the LEB to move nodes from * @snod: the mode to move * @wbuf: write-buffer to move node to * * This function moves node @snod to @wbuf, changes TNC correspondingly, and * destroys @snod. Returns zero in case of success and a negative error code in * case of failure. */ static int move_node(struct ubifs_info *c, struct ubifs_scan_leb *sleb, struct ubifs_scan_node *snod, struct ubifs_wbuf *wbuf) { int err, new_lnum = wbuf->lnum, new_offs = wbuf->offs + wbuf->used; cond_resched(); err = ubifs_wbuf_write_nolock(wbuf, snod->node, snod->len); if (err) return err; err = ubifs_tnc_replace(c, &snod->key, sleb->lnum, snod->offs, new_lnum, new_offs, snod->len); list_del(&snod->list); kfree(snod); return err; } /** * move_nodes - move nodes. * @c: UBIFS file-system description object * @sleb: describes the LEB to move nodes from * * This function moves valid nodes from data LEB described by @sleb to the GC * journal head. This function returns zero in case of success, %-EAGAIN if * commit is required, and other negative error codes in case of other * failures. */ static int move_nodes(struct ubifs_info *c, struct ubifs_scan_leb *sleb) { int err, min; LIST_HEAD(nondata); struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf; if (wbuf->lnum == -1) { /* * The GC journal head is not set, because it is the first GC * invocation since mount. */ err = switch_gc_head(c); if (err) return err; } err = sort_nodes(c, sleb, &nondata, &min); if (err) goto out; /* Write nodes to their new location. Use the first-fit strategy */ while (1) { int avail; struct ubifs_scan_node *snod, *tmp; /* Move data nodes */ list_for_each_entry_safe(snod, tmp, &sleb->nodes, list) { avail = c->leb_size - wbuf->offs - wbuf->used; if (snod->len > avail) /* * Do not skip data nodes in order to optimize * bulk-read. */ break; err = move_node(c, sleb, snod, wbuf); if (err) goto out; } /* Move non-data nodes */ list_for_each_entry_safe(snod, tmp, &nondata, list) { avail = c->leb_size - wbuf->offs - wbuf->used; if (avail < min) break; if (snod->len > avail) { /* * Keep going only if this is an inode with * some data. Otherwise stop and switch the GC * head. IOW, we assume that data-less inode * nodes and direntry nodes are roughly of the * same size. */ if (key_type(c, &snod->key) == UBIFS_DENT_KEY || snod->len == UBIFS_INO_NODE_SZ) break; continue; } err = move_node(c, sleb, snod, wbuf); if (err) goto out; } if (list_empty(&sleb->nodes) && list_empty(&nondata)) break; /* * Waste the rest of the space in the LEB and switch to the * next LEB. */ err = switch_gc_head(c); if (err) goto out; } return 0; out: list_splice_tail(&nondata, &sleb->nodes); return err; } /** * gc_sync_wbufs - sync write-buffers for GC. * @c: UBIFS file-system description object * * We must guarantee that obsoleting nodes are on flash. Unfortunately they may * be in a write-buffer instead. That is, a node could be written to a * write-buffer, obsoleting another node in a LEB that is GC'd. If that LEB is * erased before the write-buffer is sync'd and then there is an unclean * unmount, then an existing node is lost. To avoid this, we sync all * write-buffers. * * This function returns %0 on success or a negative error code on failure. */ static int gc_sync_wbufs(struct ubifs_info *c) { int err, i; for (i = 0; i < c->jhead_cnt; i++) { if (i == GCHD) continue; err = ubifs_wbuf_sync(&c->jheads[i].wbuf); if (err) return err; } return 0; } /** * ubifs_garbage_collect_leb - garbage-collect a logical eraseblock. * @c: UBIFS file-system description object * @lp: describes the LEB to garbage collect * * This function garbage-collects an LEB and returns one of the @LEB_FREED, * @LEB_RETAINED, etc positive codes in case of success, %-EAGAIN if commit is * required, and other negative error codes in case of failures. */ int ubifs_garbage_collect_leb(struct ubifs_info *c, struct ubifs_lprops *lp) { struct ubifs_scan_leb *sleb; struct ubifs_scan_node *snod; struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf; int err = 0, lnum = lp->lnum; ubifs_assert(c->gc_lnum != -1 || wbuf->offs + wbuf->used == 0 || c->need_recovery); ubifs_assert(c->gc_lnum != lnum); ubifs_assert(wbuf->lnum != lnum); if (lp->free + lp->dirty == c->leb_size) { /* Special case - a free LEB */ dbg_gc("LEB %d is free, return it", lp->lnum); ubifs_assert(!(lp->flags & LPROPS_INDEX)); if (lp->free != c->leb_size) { /* * Write buffers must be sync'd before unmapping * freeable LEBs, because one of them may contain data * which obsoletes something in 'lp->pnum'. */ err = gc_sync_wbufs(c); if (err) return err; err = ubifs_change_one_lp(c, lp->lnum, c->leb_size, 0, 0, 0, 0); if (err) return err; } err = ubifs_leb_unmap(c, lp->lnum); if (err) return err; if (c->gc_lnum == -1) { c->gc_lnum = lnum; return LEB_RETAINED; } return LEB_FREED; } /* * We scan the entire LEB even though we only really need to scan up to * (c->leb_size - lp->free). */ sleb = ubifs_scan(c, lnum, 0, c->sbuf, 0); if (IS_ERR(sleb)) return PTR_ERR(sleb); ubifs_assert(!list_empty(&sleb->nodes)); snod = list_entry(sleb->nodes.next, struct ubifs_scan_node, list); if (snod->type == UBIFS_IDX_NODE) { struct ubifs_gced_idx_leb *idx_gc; dbg_gc("indexing LEB %d (free %d, dirty %d)", lnum, lp->free, lp->dirty); list_for_each_entry(snod, &sleb->nodes, list) { struct ubifs_idx_node *idx = snod->node; int level = le16_to_cpu(idx->level); ubifs_assert(snod->type == UBIFS_IDX_NODE); key_read(c, ubifs_idx_key(c, idx), &snod->key); err = ubifs_dirty_idx_node(c, &snod->key, level, lnum, snod->offs); if (err) goto out; } idx_gc = kmalloc(sizeof(struct ubifs_gced_idx_leb), GFP_NOFS); if (!idx_gc) { err = -ENOMEM; goto out; } idx_gc->lnum = lnum; idx_gc->unmap = 0; list_add(&idx_gc->list, &c->idx_gc); /* * Don't release the LEB until after the next commit, because * it may contain data which is needed for recovery. So * although we freed this LEB, it will become usable only after * the commit. */ err = ubifs_change_one_lp(c, lnum, c->leb_size, 0, 0, LPROPS_INDEX, 1); if (err) goto out; err = LEB_FREED_IDX; } else { dbg_gc("data LEB %d (free %d, dirty %d)", lnum, lp->free, lp->dirty); err = move_nodes(c, sleb); if (err) goto out_inc_seq; err = gc_sync_wbufs(c); if (err) goto out_inc_seq; err = ubifs_change_one_lp(c, lnum, c->leb_size, 0, 0, 0, 0); if (err) goto out_inc_seq; /* Allow for races with TNC */ c->gced_lnum = lnum; smp_wmb(); c->gc_seq += 1; smp_wmb(); if (c->gc_lnum == -1) { c->gc_lnum = lnum; err = LEB_RETAINED; } else { err = ubifs_wbuf_sync_nolock(wbuf); if (err) goto out; err = ubifs_leb_unmap(c, lnum); if (err) goto out; err = LEB_FREED; } } out: ubifs_scan_destroy(sleb); return err; out_inc_seq: /* We may have moved at least some nodes so allow for races with TNC */ c->gced_lnum = lnum; smp_wmb(); c->gc_seq += 1; smp_wmb(); goto out; } /** * ubifs_garbage_collect - UBIFS garbage collector. * @c: UBIFS file-system description object * @anyway: do GC even if there are free LEBs * * This function does out-of-place garbage collection. The return codes are: * o positive LEB number if the LEB has been freed and may be used; * o %-EAGAIN if the caller has to run commit; * o %-ENOSPC if GC failed to make any progress; * o other negative error codes in case of other errors. * * Garbage collector writes data to the journal when GC'ing data LEBs, and just * marking indexing nodes dirty when GC'ing indexing LEBs. Thus, at some point * commit may be required. But commit cannot be run from inside GC, because the * caller might be holding the commit lock, so %-EAGAIN is returned instead; * And this error code means that the caller has to run commit, and re-run GC * if there is still no free space. * * There are many reasons why this function may return %-EAGAIN: * o the log is full and there is no space to write an LEB reference for * @c->gc_lnum; * o the journal is too large and exceeds size limitations; * o GC moved indexing LEBs, but they can be used only after the commit; * o the shrinker fails to find clean znodes to free and requests the commit; * o etc. * * Note, if the file-system is close to be full, this function may return * %-EAGAIN infinitely, so the caller has to limit amount of re-invocations of * the function. E.g., this happens if the limits on the journal size are too * tough and GC writes too much to the journal before an LEB is freed. This * might also mean that the journal is too large, and the TNC becomes to big, * so that the shrinker is constantly called, finds not clean znodes to free, * and requests commit. Well, this may also happen if the journal is all right, * but another kernel process consumes too much memory. Anyway, infinite * %-EAGAIN may happen, but in some extreme/misconfiguration cases. */ int ubifs_garbage_collect(struct ubifs_info *c, int anyway) { int i, err, ret, min_space = c->dead_wm; struct ubifs_lprops lp; struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf; ubifs_assert_cmt_locked(c); ubifs_assert(!c->ro_media && !c->ro_mount); if (ubifs_gc_should_commit(c)) return -EAGAIN; mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead); if (c->ro_error) { ret = -EROFS; goto out_unlock; } /* We expect the write-buffer to be empty on entry */ ubifs_assert(!wbuf->used); for (i = 0; ; i++) { int space_before = c->leb_size - wbuf->offs - wbuf->used; int space_after; cond_resched(); /* Give the commit an opportunity to run */ if (ubifs_gc_should_commit(c)) { ret = -EAGAIN; break; } if (i > SOFT_LEBS_LIMIT && !list_empty(&c->idx_gc)) { /* * We've done enough iterations. Indexing LEBs were * moved and will be available after the commit. */ dbg_gc("soft limit, some index LEBs GC'ed, -EAGAIN"); ubifs_commit_required(c); ret = -EAGAIN; break; } if (i > HARD_LEBS_LIMIT) { /* * We've moved too many LEBs and have not made * progress, give up. */ dbg_gc("hard limit, -ENOSPC"); ret = -ENOSPC; break; } /* * Empty and freeable LEBs can turn up while we waited for * the wbuf lock, or while we have been running GC. In that * case, we should just return one of those instead of * continuing to GC dirty LEBs. Hence we request * 'ubifs_find_dirty_leb()' to return an empty LEB if it can. */ ret = ubifs_find_dirty_leb(c, &lp, min_space, anyway ? 0 : 1); if (ret) { if (ret == -ENOSPC) dbg_gc("no more dirty LEBs"); break; } dbg_gc("found LEB %d: free %d, dirty %d, sum %d " "(min. space %d)", lp.lnum, lp.free, lp.dirty, lp.free + lp.dirty, min_space); space_before = c->leb_size - wbuf->offs - wbuf->used; if (wbuf->lnum == -1) space_before = 0; ret = ubifs_garbage_collect_leb(c, &lp); if (ret < 0) { if (ret == -EAGAIN) { /* * This is not error, so we have to return the * LEB to lprops. But if 'ubifs_return_leb()' * fails, its failure code is propagated to the * caller instead of the original '-EAGAIN'. */ err = ubifs_return_leb(c, lp.lnum); if (err) ret = err; break; } goto out; } if (ret == LEB_FREED) { /* An LEB has been freed and is ready for use */ dbg_gc("LEB %d freed, return", lp.lnum); ret = lp.lnum; break; } if (ret == LEB_FREED_IDX) { /* * This was an indexing LEB and it cannot be * immediately used. And instead of requesting the * commit straight away, we try to garbage collect some * more. */ dbg_gc("indexing LEB %d freed, continue", lp.lnum); continue; } ubifs_assert(ret == LEB_RETAINED); space_after = c->leb_size - wbuf->offs - wbuf->used; dbg_gc("LEB %d retained, freed %d bytes", lp.lnum, space_after - space_before); if (space_after > space_before) { /* GC makes progress, keep working */ min_space >>= 1; if (min_space < c->dead_wm) min_space = c->dead_wm; continue; } dbg_gc("did not make progress"); /* * GC moved an LEB bud have not done any progress. This means * that the previous GC head LEB contained too few free space * and the LEB which was GC'ed contained only large nodes which * did not fit that space. * * We can do 2 things: * 1. pick another LEB in a hope it'll contain a small node * which will fit the space we have at the end of current GC * head LEB, but there is no guarantee, so we try this out * unless we have already been working for too long; * 2. request an LEB with more dirty space, which will force * 'ubifs_find_dirty_leb()' to start scanning the lprops * table, instead of just picking one from the heap * (previously it already picked the dirtiest LEB). */ if (i < SOFT_LEBS_LIMIT) { dbg_gc("try again"); continue; } min_space <<= 1; if (min_space > c->dark_wm) min_space = c->dark_wm; dbg_gc("set min. space to %d", min_space); } if (ret == -ENOSPC && !list_empty(&c->idx_gc)) { dbg_gc("no space, some index LEBs GC'ed, -EAGAIN"); ubifs_commit_required(c); ret = -EAGAIN; } err = ubifs_wbuf_sync_nolock(wbuf); if (!err) err = ubifs_leb_unmap(c, c->gc_lnum); if (err) { ret = err; goto out; } out_unlock: mutex_unlock(&wbuf->io_mutex); return ret; out: ubifs_assert(ret < 0); ubifs_assert(ret != -ENOSPC && ret != -EAGAIN); ubifs_wbuf_sync_nolock(wbuf); ubifs_ro_mode(c, ret); mutex_unlock(&wbuf->io_mutex); ubifs_return_leb(c, lp.lnum); return ret; } /** * ubifs_gc_start_commit - garbage collection at start of commit. * @c: UBIFS file-system description object * * If a LEB has only dirty and free space, then we may safely unmap it and make * it free. Note, we cannot do this with indexing LEBs because dirty space may * correspond index nodes that are required for recovery. In that case, the * LEB cannot be unmapped until after the next commit. * * This function returns %0 upon success and a negative error code upon failure. */ int ubifs_gc_start_commit(struct ubifs_info *c) { struct ubifs_gced_idx_leb *idx_gc; const struct ubifs_lprops *lp; int err = 0, flags; ubifs_get_lprops(c); /* * Unmap (non-index) freeable LEBs. Note that recovery requires that all * wbufs are sync'd before this, which is done in 'do_commit()'. */ while (1) { lp = ubifs_fast_find_freeable(c); if (IS_ERR(lp)) { err = PTR_ERR(lp); goto out; } if (!lp) break; ubifs_assert(!(lp->flags & LPROPS_TAKEN)); ubifs_assert(!(lp->flags & LPROPS_INDEX)); err = ubifs_leb_unmap(c, lp->lnum); if (err) goto out; lp = ubifs_change_lp(c, lp, c->leb_size, 0, lp->flags, 0); if (IS_ERR(lp)) { err = PTR_ERR(lp); goto out; } ubifs_assert(!(lp->flags & LPROPS_TAKEN)); ubifs_assert(!(lp->flags & LPROPS_INDEX)); } /* Mark GC'd index LEBs OK to unmap after this commit finishes */ list_for_each_entry(idx_gc, &c->idx_gc, list) idx_gc->unmap = 1; /* Record index freeable LEBs for unmapping after commit */ while (1) { lp = ubifs_fast_find_frdi_idx(c); if (IS_ERR(lp)) { err = PTR_ERR(lp); goto out; } if (!lp) break; idx_gc = kmalloc(sizeof(struct ubifs_gced_idx_leb), GFP_NOFS); if (!idx_gc) { err = -ENOMEM; goto out; } ubifs_assert(!(lp->flags & LPROPS_TAKEN)); ubifs_assert(lp->flags & LPROPS_INDEX); /* Don't release the LEB until after the next commit */ flags = (lp->flags | LPROPS_TAKEN) ^ LPROPS_INDEX; lp = ubifs_change_lp(c, lp, c->leb_size, 0, flags, 1); if (IS_ERR(lp)) { err = PTR_ERR(lp); kfree(idx_gc); goto out; } ubifs_assert(lp->flags & LPROPS_TAKEN); ubifs_assert(!(lp->flags & LPROPS_INDEX)); idx_gc->lnum = lp->lnum; idx_gc->unmap = 1; list_add(&idx_gc->list, &c->idx_gc); } out: ubifs_release_lprops(c); return err; } /** * ubifs_gc_end_commit - garbage collection at end of commit. * @c: UBIFS file-system description object * * This function completes out-of-place garbage collection of index LEBs. */ int ubifs_gc_end_commit(struct ubifs_info *c) { struct ubifs_gced_idx_leb *idx_gc, *tmp; struct ubifs_wbuf *wbuf; int err = 0; wbuf = &c->jheads[GCHD].wbuf; mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead); list_for_each_entry_safe(idx_gc, tmp, &c->idx_gc, list) if (idx_gc->unmap) { dbg_gc("LEB %d", idx_gc->lnum); err = ubifs_leb_unmap(c, idx_gc->lnum); if (err) goto out; err = ubifs_change_one_lp(c, idx_gc->lnum, LPROPS_NC, LPROPS_NC, 0, LPROPS_TAKEN, -1); if (err) goto out; list_del(&idx_gc->list); kfree(idx_gc); } out: mutex_unlock(&wbuf->io_mutex); return err; } /** * ubifs_destroy_idx_gc - destroy idx_gc list. * @c: UBIFS file-system description object * * This function destroys the @c->idx_gc list. It is called when unmounting * so locks are not needed. Returns zero in case of success and a negative * error code in case of failure. */ void ubifs_destroy_idx_gc(struct ubifs_info *c) { while (!list_empty(&c->idx_gc)) { struct ubifs_gced_idx_leb *idx_gc; idx_gc = list_entry(c->idx_gc.next, struct ubifs_gced_idx_leb, list); c->idx_gc_cnt -= 1; list_del(&idx_gc->list); kfree(idx_gc); } } /** * ubifs_get_idx_gc_leb - get a LEB from GC'd index LEB list. * @c: UBIFS file-system description object * * Called during start commit so locks are not needed. */ int ubifs_get_idx_gc_leb(struct ubifs_info *c) { struct ubifs_gced_idx_leb *idx_gc; int lnum; if (list_empty(&c->idx_gc)) return -ENOSPC; idx_gc = list_entry(c->idx_gc.next, struct ubifs_gced_idx_leb, list); lnum = idx_gc->lnum; /* c->idx_gc_cnt is updated by the caller when lprops are updated */ list_del(&idx_gc->list); kfree(idx_gc); return lnum; }
gpl-2.0
erikcas/android_kernel_sony_msm
sound/synth/emux/emux_effect.c
14746
9584
/* * Midi synth routines for the Emu8k/Emu10k1 * * Copyright (C) 1999 Steve Ratcliffe * Copyright (c) 1999-2000 Takashi Iwai <tiwai@suse.de> * * Contains code based on awe_wave.c by Takashi Iwai * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include "emux_voice.h" #include <linux/slab.h> #ifdef SNDRV_EMUX_USE_RAW_EFFECT /* * effects table */ #define xoffsetof(type,tag) ((long)(&((type)NULL)->tag) - (long)(NULL)) #define parm_offset(tag) xoffsetof(struct soundfont_voice_parm *, tag) #define PARM_IS_BYTE (1 << 0) #define PARM_IS_WORD (1 << 1) #define PARM_IS_ALIGNED (3 << 2) #define PARM_IS_ALIGN_HI (1 << 2) #define PARM_IS_ALIGN_LO (2 << 2) #define PARM_IS_SIGNED (1 << 4) #define PARM_WORD (PARM_IS_WORD) #define PARM_BYTE_LO (PARM_IS_BYTE|PARM_IS_ALIGN_LO) #define PARM_BYTE_HI (PARM_IS_BYTE|PARM_IS_ALIGN_HI) #define PARM_BYTE (PARM_IS_BYTE) #define PARM_SIGN_LO (PARM_IS_BYTE|PARM_IS_ALIGN_LO|PARM_IS_SIGNED) #define PARM_SIGN_HI (PARM_IS_BYTE|PARM_IS_ALIGN_HI|PARM_IS_SIGNED) static struct emux_parm_defs { int type; /* byte or word */ int low, high; /* value range */ long offset; /* offset in parameter record (-1 = not written) */ int update; /* flgas for real-time update */ } parm_defs[EMUX_NUM_EFFECTS] = { {PARM_WORD, 0, 0x8000, parm_offset(moddelay), 0}, /* env1 delay */ {PARM_BYTE_LO, 1, 0x80, parm_offset(modatkhld), 0}, /* env1 attack */ {PARM_BYTE_HI, 0, 0x7e, parm_offset(modatkhld), 0}, /* env1 hold */ {PARM_BYTE_LO, 1, 0x7f, parm_offset(moddcysus), 0}, /* env1 decay */ {PARM_BYTE_LO, 1, 0x7f, parm_offset(modrelease), 0}, /* env1 release */ {PARM_BYTE_HI, 0, 0x7f, parm_offset(moddcysus), 0}, /* env1 sustain */ {PARM_BYTE_HI, 0, 0xff, parm_offset(pefe), 0}, /* env1 pitch */ {PARM_BYTE_LO, 0, 0xff, parm_offset(pefe), 0}, /* env1 fc */ {PARM_WORD, 0, 0x8000, parm_offset(voldelay), 0}, /* env2 delay */ {PARM_BYTE_LO, 1, 0x80, parm_offset(volatkhld), 0}, /* env2 attack */ {PARM_BYTE_HI, 0, 0x7e, parm_offset(volatkhld), 0}, /* env2 hold */ {PARM_BYTE_LO, 1, 0x7f, parm_offset(voldcysus), 0}, /* env2 decay */ {PARM_BYTE_LO, 1, 0x7f, parm_offset(volrelease), 0}, /* env2 release */ {PARM_BYTE_HI, 0, 0x7f, parm_offset(voldcysus), 0}, /* env2 sustain */ {PARM_WORD, 0, 0x8000, parm_offset(lfo1delay), 0}, /* lfo1 delay */ {PARM_BYTE_LO, 0, 0xff, parm_offset(tremfrq), SNDRV_EMUX_UPDATE_TREMFREQ}, /* lfo1 freq */ {PARM_SIGN_HI, -128, 127, parm_offset(tremfrq), SNDRV_EMUX_UPDATE_TREMFREQ}, /* lfo1 vol */ {PARM_SIGN_HI, -128, 127, parm_offset(fmmod), SNDRV_EMUX_UPDATE_FMMOD}, /* lfo1 pitch */ {PARM_BYTE_LO, 0, 0xff, parm_offset(fmmod), SNDRV_EMUX_UPDATE_FMMOD}, /* lfo1 cutoff */ {PARM_WORD, 0, 0x8000, parm_offset(lfo2delay), 0}, /* lfo2 delay */ {PARM_BYTE_LO, 0, 0xff, parm_offset(fm2frq2), SNDRV_EMUX_UPDATE_FM2FRQ2}, /* lfo2 freq */ {PARM_SIGN_HI, -128, 127, parm_offset(fm2frq2), SNDRV_EMUX_UPDATE_FM2FRQ2}, /* lfo2 pitch */ {PARM_WORD, 0, 0xffff, -1, SNDRV_EMUX_UPDATE_PITCH}, /* initial pitch */ {PARM_BYTE, 0, 0xff, parm_offset(chorus), 0}, /* chorus */ {PARM_BYTE, 0, 0xff, parm_offset(reverb), 0}, /* reverb */ {PARM_BYTE, 0, 0xff, parm_offset(cutoff), SNDRV_EMUX_UPDATE_VOLUME}, /* cutoff */ {PARM_BYTE, 0, 15, parm_offset(filterQ), SNDRV_EMUX_UPDATE_Q}, /* resonance */ {PARM_WORD, 0, 0xffff, -1, 0}, /* sample start */ {PARM_WORD, 0, 0xffff, -1, 0}, /* loop start */ {PARM_WORD, 0, 0xffff, -1, 0}, /* loop end */ {PARM_WORD, 0, 0xffff, -1, 0}, /* coarse sample start */ {PARM_WORD, 0, 0xffff, -1, 0}, /* coarse loop start */ {PARM_WORD, 0, 0xffff, -1, 0}, /* coarse loop end */ {PARM_BYTE, 0, 0xff, -1, SNDRV_EMUX_UPDATE_VOLUME}, /* initial attenuation */ }; /* set byte effect value */ static void effect_set_byte(unsigned char *valp, struct snd_midi_channel *chan, int type) { short effect; struct snd_emux_effect_table *fx = chan->private; effect = fx->val[type]; if (fx->flag[type] == EMUX_FX_FLAG_ADD) { if (parm_defs[type].type & PARM_IS_SIGNED) effect += *(char*)valp; else effect += *valp; } if (effect < parm_defs[type].low) effect = parm_defs[type].low; else if (effect > parm_defs[type].high) effect = parm_defs[type].high; *valp = (unsigned char)effect; } /* set word effect value */ static void effect_set_word(unsigned short *valp, struct snd_midi_channel *chan, int type) { int effect; struct snd_emux_effect_table *fx = chan->private; effect = *(unsigned short*)&fx->val[type]; if (fx->flag[type] == EMUX_FX_FLAG_ADD) effect += *valp; if (effect < parm_defs[type].low) effect = parm_defs[type].low; else if (effect > parm_defs[type].high) effect = parm_defs[type].high; *valp = (unsigned short)effect; } /* address offset */ static int effect_get_offset(struct snd_midi_channel *chan, int lo, int hi, int mode) { int addr = 0; struct snd_emux_effect_table *fx = chan->private; if (fx->flag[hi]) addr = (short)fx->val[hi]; addr = addr << 15; if (fx->flag[lo]) addr += (short)fx->val[lo]; if (!(mode & SNDRV_SFNT_SAMPLE_8BITS)) addr /= 2; return addr; } #ifdef CONFIG_SND_SEQUENCER_OSS /* change effects - for OSS sequencer compatibility */ void snd_emux_send_effect_oss(struct snd_emux_port *port, struct snd_midi_channel *chan, int type, int val) { int mode; if (type & 0x40) mode = EMUX_FX_FLAG_OFF; else if (type & 0x80) mode = EMUX_FX_FLAG_ADD; else mode = EMUX_FX_FLAG_SET; type &= 0x3f; snd_emux_send_effect(port, chan, type, val, mode); } #endif /* Modify the effect value. * if update is necessary, call emu8000_control */ void snd_emux_send_effect(struct snd_emux_port *port, struct snd_midi_channel *chan, int type, int val, int mode) { int i; int offset; unsigned char *srcp, *origp; struct snd_emux *emu; struct snd_emux_effect_table *fx; unsigned long flags; emu = port->emu; fx = chan->private; if (emu == NULL || fx == NULL) return; if (type < 0 || type >= EMUX_NUM_EFFECTS) return; fx->val[type] = val; fx->flag[type] = mode; /* do we need to modify the register in realtime ? */ if (! parm_defs[type].update || (offset = parm_defs[type].offset) < 0) return; #ifdef SNDRV_LITTLE_ENDIAN if (parm_defs[type].type & PARM_IS_ALIGN_HI) offset++; #else if (parm_defs[type].type & PARM_IS_ALIGN_LO) offset++; #endif /* modify the register values */ spin_lock_irqsave(&emu->voice_lock, flags); for (i = 0; i < emu->max_voices; i++) { struct snd_emux_voice *vp = &emu->voices[i]; if (!STATE_IS_PLAYING(vp->state) || vp->chan != chan) continue; srcp = (unsigned char*)&vp->reg.parm + offset; origp = (unsigned char*)&vp->zone->v.parm + offset; if (parm_defs[i].type & PARM_IS_BYTE) { *srcp = *origp; effect_set_byte(srcp, chan, type); } else { *(unsigned short*)srcp = *(unsigned short*)origp; effect_set_word((unsigned short*)srcp, chan, type); } } spin_unlock_irqrestore(&emu->voice_lock, flags); /* activate them */ snd_emux_update_channel(port, chan, parm_defs[type].update); } /* copy wavetable registers to voice table */ void snd_emux_setup_effect(struct snd_emux_voice *vp) { struct snd_midi_channel *chan = vp->chan; struct snd_emux_effect_table *fx; unsigned char *srcp; int i; if (! (fx = chan->private)) return; /* modify the register values via effect table */ for (i = 0; i < EMUX_FX_END; i++) { int offset; if (! fx->flag[i] || (offset = parm_defs[i].offset) < 0) continue; #ifdef SNDRV_LITTLE_ENDIAN if (parm_defs[i].type & PARM_IS_ALIGN_HI) offset++; #else if (parm_defs[i].type & PARM_IS_ALIGN_LO) offset++; #endif srcp = (unsigned char*)&vp->reg.parm + offset; if (parm_defs[i].type & PARM_IS_BYTE) effect_set_byte(srcp, chan, i); else effect_set_word((unsigned short*)srcp, chan, i); } /* correct sample and loop points */ vp->reg.start += effect_get_offset(chan, EMUX_FX_SAMPLE_START, EMUX_FX_COARSE_SAMPLE_START, vp->reg.sample_mode); vp->reg.loopstart += effect_get_offset(chan, EMUX_FX_LOOP_START, EMUX_FX_COARSE_LOOP_START, vp->reg.sample_mode); vp->reg.loopend += effect_get_offset(chan, EMUX_FX_LOOP_END, EMUX_FX_COARSE_LOOP_END, vp->reg.sample_mode); } /* * effect table */ void snd_emux_create_effect(struct snd_emux_port *p) { int i; p->effect = kcalloc(p->chset.max_channels, sizeof(struct snd_emux_effect_table), GFP_KERNEL); if (p->effect) { for (i = 0; i < p->chset.max_channels; i++) p->chset.channels[i].private = p->effect + i; } else { for (i = 0; i < p->chset.max_channels; i++) p->chset.channels[i].private = NULL; } } void snd_emux_delete_effect(struct snd_emux_port *p) { kfree(p->effect); p->effect = NULL; } void snd_emux_clear_effect(struct snd_emux_port *p) { if (p->effect) { memset(p->effect, 0, sizeof(struct snd_emux_effect_table) * p->chset.max_channels); } } #endif /* SNDRV_EMUX_USE_RAW_EFFECT */
gpl-2.0
adhi1419/MSM7627A
sound/synth/emux/emux_effect.c
14746
9584
/* * Midi synth routines for the Emu8k/Emu10k1 * * Copyright (C) 1999 Steve Ratcliffe * Copyright (c) 1999-2000 Takashi Iwai <tiwai@suse.de> * * Contains code based on awe_wave.c by Takashi Iwai * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include "emux_voice.h" #include <linux/slab.h> #ifdef SNDRV_EMUX_USE_RAW_EFFECT /* * effects table */ #define xoffsetof(type,tag) ((long)(&((type)NULL)->tag) - (long)(NULL)) #define parm_offset(tag) xoffsetof(struct soundfont_voice_parm *, tag) #define PARM_IS_BYTE (1 << 0) #define PARM_IS_WORD (1 << 1) #define PARM_IS_ALIGNED (3 << 2) #define PARM_IS_ALIGN_HI (1 << 2) #define PARM_IS_ALIGN_LO (2 << 2) #define PARM_IS_SIGNED (1 << 4) #define PARM_WORD (PARM_IS_WORD) #define PARM_BYTE_LO (PARM_IS_BYTE|PARM_IS_ALIGN_LO) #define PARM_BYTE_HI (PARM_IS_BYTE|PARM_IS_ALIGN_HI) #define PARM_BYTE (PARM_IS_BYTE) #define PARM_SIGN_LO (PARM_IS_BYTE|PARM_IS_ALIGN_LO|PARM_IS_SIGNED) #define PARM_SIGN_HI (PARM_IS_BYTE|PARM_IS_ALIGN_HI|PARM_IS_SIGNED) static struct emux_parm_defs { int type; /* byte or word */ int low, high; /* value range */ long offset; /* offset in parameter record (-1 = not written) */ int update; /* flgas for real-time update */ } parm_defs[EMUX_NUM_EFFECTS] = { {PARM_WORD, 0, 0x8000, parm_offset(moddelay), 0}, /* env1 delay */ {PARM_BYTE_LO, 1, 0x80, parm_offset(modatkhld), 0}, /* env1 attack */ {PARM_BYTE_HI, 0, 0x7e, parm_offset(modatkhld), 0}, /* env1 hold */ {PARM_BYTE_LO, 1, 0x7f, parm_offset(moddcysus), 0}, /* env1 decay */ {PARM_BYTE_LO, 1, 0x7f, parm_offset(modrelease), 0}, /* env1 release */ {PARM_BYTE_HI, 0, 0x7f, parm_offset(moddcysus), 0}, /* env1 sustain */ {PARM_BYTE_HI, 0, 0xff, parm_offset(pefe), 0}, /* env1 pitch */ {PARM_BYTE_LO, 0, 0xff, parm_offset(pefe), 0}, /* env1 fc */ {PARM_WORD, 0, 0x8000, parm_offset(voldelay), 0}, /* env2 delay */ {PARM_BYTE_LO, 1, 0x80, parm_offset(volatkhld), 0}, /* env2 attack */ {PARM_BYTE_HI, 0, 0x7e, parm_offset(volatkhld), 0}, /* env2 hold */ {PARM_BYTE_LO, 1, 0x7f, parm_offset(voldcysus), 0}, /* env2 decay */ {PARM_BYTE_LO, 1, 0x7f, parm_offset(volrelease), 0}, /* env2 release */ {PARM_BYTE_HI, 0, 0x7f, parm_offset(voldcysus), 0}, /* env2 sustain */ {PARM_WORD, 0, 0x8000, parm_offset(lfo1delay), 0}, /* lfo1 delay */ {PARM_BYTE_LO, 0, 0xff, parm_offset(tremfrq), SNDRV_EMUX_UPDATE_TREMFREQ}, /* lfo1 freq */ {PARM_SIGN_HI, -128, 127, parm_offset(tremfrq), SNDRV_EMUX_UPDATE_TREMFREQ}, /* lfo1 vol */ {PARM_SIGN_HI, -128, 127, parm_offset(fmmod), SNDRV_EMUX_UPDATE_FMMOD}, /* lfo1 pitch */ {PARM_BYTE_LO, 0, 0xff, parm_offset(fmmod), SNDRV_EMUX_UPDATE_FMMOD}, /* lfo1 cutoff */ {PARM_WORD, 0, 0x8000, parm_offset(lfo2delay), 0}, /* lfo2 delay */ {PARM_BYTE_LO, 0, 0xff, parm_offset(fm2frq2), SNDRV_EMUX_UPDATE_FM2FRQ2}, /* lfo2 freq */ {PARM_SIGN_HI, -128, 127, parm_offset(fm2frq2), SNDRV_EMUX_UPDATE_FM2FRQ2}, /* lfo2 pitch */ {PARM_WORD, 0, 0xffff, -1, SNDRV_EMUX_UPDATE_PITCH}, /* initial pitch */ {PARM_BYTE, 0, 0xff, parm_offset(chorus), 0}, /* chorus */ {PARM_BYTE, 0, 0xff, parm_offset(reverb), 0}, /* reverb */ {PARM_BYTE, 0, 0xff, parm_offset(cutoff), SNDRV_EMUX_UPDATE_VOLUME}, /* cutoff */ {PARM_BYTE, 0, 15, parm_offset(filterQ), SNDRV_EMUX_UPDATE_Q}, /* resonance */ {PARM_WORD, 0, 0xffff, -1, 0}, /* sample start */ {PARM_WORD, 0, 0xffff, -1, 0}, /* loop start */ {PARM_WORD, 0, 0xffff, -1, 0}, /* loop end */ {PARM_WORD, 0, 0xffff, -1, 0}, /* coarse sample start */ {PARM_WORD, 0, 0xffff, -1, 0}, /* coarse loop start */ {PARM_WORD, 0, 0xffff, -1, 0}, /* coarse loop end */ {PARM_BYTE, 0, 0xff, -1, SNDRV_EMUX_UPDATE_VOLUME}, /* initial attenuation */ }; /* set byte effect value */ static void effect_set_byte(unsigned char *valp, struct snd_midi_channel *chan, int type) { short effect; struct snd_emux_effect_table *fx = chan->private; effect = fx->val[type]; if (fx->flag[type] == EMUX_FX_FLAG_ADD) { if (parm_defs[type].type & PARM_IS_SIGNED) effect += *(char*)valp; else effect += *valp; } if (effect < parm_defs[type].low) effect = parm_defs[type].low; else if (effect > parm_defs[type].high) effect = parm_defs[type].high; *valp = (unsigned char)effect; } /* set word effect value */ static void effect_set_word(unsigned short *valp, struct snd_midi_channel *chan, int type) { int effect; struct snd_emux_effect_table *fx = chan->private; effect = *(unsigned short*)&fx->val[type]; if (fx->flag[type] == EMUX_FX_FLAG_ADD) effect += *valp; if (effect < parm_defs[type].low) effect = parm_defs[type].low; else if (effect > parm_defs[type].high) effect = parm_defs[type].high; *valp = (unsigned short)effect; } /* address offset */ static int effect_get_offset(struct snd_midi_channel *chan, int lo, int hi, int mode) { int addr = 0; struct snd_emux_effect_table *fx = chan->private; if (fx->flag[hi]) addr = (short)fx->val[hi]; addr = addr << 15; if (fx->flag[lo]) addr += (short)fx->val[lo]; if (!(mode & SNDRV_SFNT_SAMPLE_8BITS)) addr /= 2; return addr; } #ifdef CONFIG_SND_SEQUENCER_OSS /* change effects - for OSS sequencer compatibility */ void snd_emux_send_effect_oss(struct snd_emux_port *port, struct snd_midi_channel *chan, int type, int val) { int mode; if (type & 0x40) mode = EMUX_FX_FLAG_OFF; else if (type & 0x80) mode = EMUX_FX_FLAG_ADD; else mode = EMUX_FX_FLAG_SET; type &= 0x3f; snd_emux_send_effect(port, chan, type, val, mode); } #endif /* Modify the effect value. * if update is necessary, call emu8000_control */ void snd_emux_send_effect(struct snd_emux_port *port, struct snd_midi_channel *chan, int type, int val, int mode) { int i; int offset; unsigned char *srcp, *origp; struct snd_emux *emu; struct snd_emux_effect_table *fx; unsigned long flags; emu = port->emu; fx = chan->private; if (emu == NULL || fx == NULL) return; if (type < 0 || type >= EMUX_NUM_EFFECTS) return; fx->val[type] = val; fx->flag[type] = mode; /* do we need to modify the register in realtime ? */ if (! parm_defs[type].update || (offset = parm_defs[type].offset) < 0) return; #ifdef SNDRV_LITTLE_ENDIAN if (parm_defs[type].type & PARM_IS_ALIGN_HI) offset++; #else if (parm_defs[type].type & PARM_IS_ALIGN_LO) offset++; #endif /* modify the register values */ spin_lock_irqsave(&emu->voice_lock, flags); for (i = 0; i < emu->max_voices; i++) { struct snd_emux_voice *vp = &emu->voices[i]; if (!STATE_IS_PLAYING(vp->state) || vp->chan != chan) continue; srcp = (unsigned char*)&vp->reg.parm + offset; origp = (unsigned char*)&vp->zone->v.parm + offset; if (parm_defs[i].type & PARM_IS_BYTE) { *srcp = *origp; effect_set_byte(srcp, chan, type); } else { *(unsigned short*)srcp = *(unsigned short*)origp; effect_set_word((unsigned short*)srcp, chan, type); } } spin_unlock_irqrestore(&emu->voice_lock, flags); /* activate them */ snd_emux_update_channel(port, chan, parm_defs[type].update); } /* copy wavetable registers to voice table */ void snd_emux_setup_effect(struct snd_emux_voice *vp) { struct snd_midi_channel *chan = vp->chan; struct snd_emux_effect_table *fx; unsigned char *srcp; int i; if (! (fx = chan->private)) return; /* modify the register values via effect table */ for (i = 0; i < EMUX_FX_END; i++) { int offset; if (! fx->flag[i] || (offset = parm_defs[i].offset) < 0) continue; #ifdef SNDRV_LITTLE_ENDIAN if (parm_defs[i].type & PARM_IS_ALIGN_HI) offset++; #else if (parm_defs[i].type & PARM_IS_ALIGN_LO) offset++; #endif srcp = (unsigned char*)&vp->reg.parm + offset; if (parm_defs[i].type & PARM_IS_BYTE) effect_set_byte(srcp, chan, i); else effect_set_word((unsigned short*)srcp, chan, i); } /* correct sample and loop points */ vp->reg.start += effect_get_offset(chan, EMUX_FX_SAMPLE_START, EMUX_FX_COARSE_SAMPLE_START, vp->reg.sample_mode); vp->reg.loopstart += effect_get_offset(chan, EMUX_FX_LOOP_START, EMUX_FX_COARSE_LOOP_START, vp->reg.sample_mode); vp->reg.loopend += effect_get_offset(chan, EMUX_FX_LOOP_END, EMUX_FX_COARSE_LOOP_END, vp->reg.sample_mode); } /* * effect table */ void snd_emux_create_effect(struct snd_emux_port *p) { int i; p->effect = kcalloc(p->chset.max_channels, sizeof(struct snd_emux_effect_table), GFP_KERNEL); if (p->effect) { for (i = 0; i < p->chset.max_channels; i++) p->chset.channels[i].private = p->effect + i; } else { for (i = 0; i < p->chset.max_channels; i++) p->chset.channels[i].private = NULL; } } void snd_emux_delete_effect(struct snd_emux_port *p) { kfree(p->effect); p->effect = NULL; } void snd_emux_clear_effect(struct snd_emux_port *p) { if (p->effect) { memset(p->effect, 0, sizeof(struct snd_emux_effect_table) * p->chset.max_channels); } } #endif /* SNDRV_EMUX_USE_RAW_EFFECT */
gpl-2.0
seem-sky/rt-thread
components/net/lwip-1.3.2/src/core/ipv4/ip.c
155
24982
/** * @file * This is the IPv4 layer implementation for incoming and outgoing IP traffic. * * @see ip_frag.c * */ /* * Copyright (c) 2001-2004 Swedish Institute of Computer Science. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * This file is part of the lwIP TCP/IP stack. * * Author: Adam Dunkels <adam@sics.se> * */ #include "lwip/opt.h" #include "lwip/ip.h" #include "lwip/def.h" #include "lwip/mem.h" #include "lwip/ip_frag.h" #include "lwip/inet.h" #include "lwip/inet_chksum.h" #include "lwip/netif.h" #include "lwip/icmp.h" #include "lwip/igmp.h" #include "lwip/raw.h" #include "lwip/udp.h" #include "lwip/tcp.h" #include "lwip/snmp.h" #include "lwip/dhcp.h" #include "lwip/stats.h" #include "arch/perf.h" #include <string.h> /** * The interface that provided the packet for the current callback * invocation. */ struct netif *current_netif; /** * Header of the input packet currently being processed. */ const struct ip_hdr *current_header; /** * Finds the appropriate network interface for a given IP address. It * searches the list of network interfaces linearly. A match is found * if the masked IP address of the network interface equals the masked * IP address given to the function. * * @param dest the destination IP address for which to find the route * @return the netif on which to send to reach dest */ struct netif * ip_route(struct ip_addr *dest) { struct netif *netif; /* iterate through netifs */ for(netif = netif_list; netif != NULL; netif = netif->next) { /* network mask matches? */ if (netif_is_up(netif)) { if (ip_addr_netcmp(dest, &(netif->ip_addr), &(netif->netmask))) { /* return netif on which to forward IP packet */ return netif; } } } if ((netif_default == NULL) || (!netif_is_up(netif_default))) { LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("ip_route: No route to 0x%"X32_F"\n", dest->addr)); IP_STATS_INC(ip.rterr); snmp_inc_ipoutnoroutes(); return NULL; } /* no matching netif found, use default netif */ return netif_default; } #if IP_FORWARD /** * Forwards an IP packet. It finds an appropriate route for the * packet, decrements the TTL value of the packet, adjusts the * checksum and outputs the packet on the appropriate interface. * * @param p the packet to forward (p->payload points to IP header) * @param iphdr the IP header of the input packet * @param inp the netif on which this packet was received * @return the netif on which the packet was sent (NULL if it wasn't sent) */ static struct netif * ip_forward(struct pbuf *p, struct ip_hdr *iphdr, struct netif *inp) { struct netif *netif; PERF_START; /* Find network interface where to forward this IP packet to. */ netif = ip_route((struct ip_addr *)&(iphdr->dest)); if (netif == NULL) { LWIP_DEBUGF(IP_DEBUG, ("ip_forward: no forwarding route for 0x%"X32_F" found\n", iphdr->dest.addr)); snmp_inc_ipoutnoroutes(); return (struct netif *)NULL; } /* Do not forward packets onto the same network interface on which * they arrived. */ if (netif == inp) { LWIP_DEBUGF(IP_DEBUG, ("ip_forward: not bouncing packets back on incoming interface.\n")); snmp_inc_ipoutnoroutes(); return (struct netif *)NULL; } /* decrement TTL */ IPH_TTL_SET(iphdr, IPH_TTL(iphdr) - 1); /* send ICMP if TTL == 0 */ if (IPH_TTL(iphdr) == 0) { snmp_inc_ipinhdrerrors(); #if LWIP_ICMP /* Don't send ICMP messages in response to ICMP messages */ if (IPH_PROTO(iphdr) != IP_PROTO_ICMP) { icmp_time_exceeded(p, ICMP_TE_TTL); } #endif /* LWIP_ICMP */ return (struct netif *)NULL; } /* Incrementally update the IP checksum. */ if (IPH_CHKSUM(iphdr) >= htons(0xffff - 0x100)) { IPH_CHKSUM_SET(iphdr, IPH_CHKSUM(iphdr) + htons(0x100) + 1); } else { IPH_CHKSUM_SET(iphdr, IPH_CHKSUM(iphdr) + htons(0x100)); } LWIP_DEBUGF(IP_DEBUG, ("ip_forward: forwarding packet to 0x%"X32_F"\n", iphdr->dest.addr)); IP_STATS_INC(ip.fw); IP_STATS_INC(ip.xmit); snmp_inc_ipforwdatagrams(); PERF_STOP("ip_forward"); /* transmit pbuf on chosen interface */ netif->output(netif, p, (struct ip_addr *)&(iphdr->dest)); return netif; } #endif /* IP_FORWARD */ /** * This function is called by the network interface device driver when * an IP packet is received. The function does the basic checks of the * IP header such as packet size being at least larger than the header * size etc. If the packet was not destined for us, the packet is * forwarded (using ip_forward). The IP checksum is always checked. * * Finally, the packet is sent to the upper layer protocol input function. * * @param p the received IP packet (p->payload points to IP header) * @param inp the netif on which this packet was received * @return ERR_OK if the packet was processed (could return ERR_* if it wasn't * processed, but currently always returns ERR_OK) */ err_t ip_input(struct pbuf *p, struct netif *inp) { struct ip_hdr *iphdr; struct netif *netif; u16_t iphdr_hlen; u16_t iphdr_len; #if LWIP_DHCP int check_ip_src=1; #endif /* LWIP_DHCP */ IP_STATS_INC(ip.recv); snmp_inc_ipinreceives(); /* identify the IP header */ iphdr = p->payload; if (IPH_V(iphdr) != 4) { LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_WARNING, ("IP packet dropped due to bad version number %"U16_F"\n", IPH_V(iphdr))); ip_debug_print(p); pbuf_free(p); IP_STATS_INC(ip.err); IP_STATS_INC(ip.drop); snmp_inc_ipinhdrerrors(); return ERR_OK; } /* obtain IP header length in number of 32-bit words */ iphdr_hlen = IPH_HL(iphdr); /* calculate IP header length in bytes */ iphdr_hlen *= 4; /* obtain ip length in bytes */ iphdr_len = ntohs(IPH_LEN(iphdr)); /* header length exceeds first pbuf length, or ip length exceeds total pbuf length? */ if ((iphdr_hlen > p->len) || (iphdr_len > p->tot_len)) { if (iphdr_hlen > p->len) { LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("IP header (len %"U16_F") does not fit in first pbuf (len %"U16_F"), IP packet dropped.\n", iphdr_hlen, p->len)); } if (iphdr_len > p->tot_len) { LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("IP (len %"U16_F") is longer than pbuf (len %"U16_F"), IP packet dropped.\n", iphdr_len, p->tot_len)); } /* free (drop) packet pbufs */ pbuf_free(p); IP_STATS_INC(ip.lenerr); IP_STATS_INC(ip.drop); snmp_inc_ipindiscards(); return ERR_OK; } /* verify checksum */ #if CHECKSUM_CHECK_IP if (inet_chksum(iphdr, iphdr_hlen) != 0) { LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("Checksum (0x%"X16_F") failed, IP packet dropped.\n", inet_chksum(iphdr, iphdr_hlen))); ip_debug_print(p); pbuf_free(p); IP_STATS_INC(ip.chkerr); IP_STATS_INC(ip.drop); snmp_inc_ipinhdrerrors(); return ERR_OK; } #endif /* Trim pbuf. This should have been done at the netif layer, * but we'll do it anyway just to be sure that its done. */ pbuf_realloc(p, iphdr_len); /* match packet against an interface, i.e. is this packet for us? */ #if LWIP_IGMP if (ip_addr_ismulticast(&(iphdr->dest))) { if ((inp->flags & NETIF_FLAG_IGMP) && (igmp_lookfor_group(inp, &(iphdr->dest)))) { netif = inp; } else { netif = NULL; } } else #endif /* LWIP_IGMP */ { /* start trying with inp. if that's not acceptable, start walking the list of configured netifs. 'first' is used as a boolean to mark whether we started walking the list */ int first = 1; netif = inp; do { LWIP_DEBUGF(IP_DEBUG, ("ip_input: iphdr->dest 0x%"X32_F" netif->ip_addr 0x%"X32_F" (0x%"X32_F", 0x%"X32_F", 0x%"X32_F")\n", iphdr->dest.addr, netif->ip_addr.addr, iphdr->dest.addr & netif->netmask.addr, netif->ip_addr.addr & netif->netmask.addr, iphdr->dest.addr & ~(netif->netmask.addr))); /* interface is up and configured? */ if ((netif_is_up(netif)) && (!ip_addr_isany(&(netif->ip_addr)))) { /* unicast to this interface address? */ if (ip_addr_cmp(&(iphdr->dest), &(netif->ip_addr)) || /* or broadcast on this interface network address? */ ip_addr_isbroadcast(&(iphdr->dest), netif)) { LWIP_DEBUGF(IP_DEBUG, ("ip_input: packet accepted on interface %c%c\n", netif->name[0], netif->name[1])); /* break out of for loop */ break; } } if (first) { first = 0; netif = netif_list; } else { netif = netif->next; } if (netif == inp) { netif = netif->next; } } while(netif != NULL); } #if LWIP_DHCP /* Pass DHCP messages regardless of destination address. DHCP traffic is addressed * using link layer addressing (such as Ethernet MAC) so we must not filter on IP. * According to RFC 1542 section 3.1.1, referred by RFC 2131). */ if (netif == NULL) { /* remote port is DHCP server? */ if (IPH_PROTO(iphdr) == IP_PROTO_UDP) { LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_TRACE, ("ip_input: UDP packet to DHCP client port %"U16_F"\n", ntohs(((struct udp_hdr *)((u8_t *)iphdr + iphdr_hlen))->dest))); if (ntohs(((struct udp_hdr *)((u8_t *)iphdr + iphdr_hlen))->dest) == DHCP_CLIENT_PORT) { LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_TRACE, ("ip_input: DHCP packet accepted.\n")); netif = inp; check_ip_src = 0; } } } #endif /* LWIP_DHCP */ /* broadcast or multicast packet source address? Compliant with RFC 1122: 3.2.1.3 */ #if LWIP_DHCP /* DHCP servers need 0.0.0.0 to be allowed as source address (RFC 1.1.2.2: 3.2.1.3/a) */ if (check_ip_src && (iphdr->src.addr != 0)) #endif /* LWIP_DHCP */ { if ((ip_addr_isbroadcast(&(iphdr->src), inp)) || (ip_addr_ismulticast(&(iphdr->src)))) { /* packet source is not valid */ LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, ("ip_input: packet source is not valid.\n")); /* free (drop) packet pbufs */ pbuf_free(p); IP_STATS_INC(ip.drop); snmp_inc_ipinaddrerrors(); snmp_inc_ipindiscards(); return ERR_OK; } } /* packet not for us? */ if (netif == NULL) { /* packet not for us, route or discard */ LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_TRACE, ("ip_input: packet not for us.\n")); #if IP_FORWARD /* non-broadcast packet? */ if (!ip_addr_isbroadcast(&(iphdr->dest), inp)) { /* try to forward IP packet on (other) interfaces */ ip_forward(p, iphdr, inp); } else #endif /* IP_FORWARD */ { snmp_inc_ipinaddrerrors(); snmp_inc_ipindiscards(); } pbuf_free(p); return ERR_OK; } /* packet consists of multiple fragments? */ if ((IPH_OFFSET(iphdr) & htons(IP_OFFMASK | IP_MF)) != 0) { #if IP_REASSEMBLY /* packet fragment reassembly code present? */ LWIP_DEBUGF(IP_DEBUG, ("IP packet is a fragment (id=0x%04"X16_F" tot_len=%"U16_F" len=%"U16_F" MF=%"U16_F" offset=%"U16_F"), calling ip_reass()\n", ntohs(IPH_ID(iphdr)), p->tot_len, ntohs(IPH_LEN(iphdr)), !!(IPH_OFFSET(iphdr) & htons(IP_MF)), (ntohs(IPH_OFFSET(iphdr)) & IP_OFFMASK)*8)); /* reassemble the packet*/ p = ip_reass(p); /* packet not fully reassembled yet? */ if (p == NULL) { return ERR_OK; } iphdr = p->payload; #else /* IP_REASSEMBLY == 0, no packet fragment reassembly code present */ pbuf_free(p); LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("IP packet dropped since it was fragmented (0x%"X16_F") (while IP_REASSEMBLY == 0).\n", ntohs(IPH_OFFSET(iphdr)))); IP_STATS_INC(ip.opterr); IP_STATS_INC(ip.drop); /* unsupported protocol feature */ snmp_inc_ipinunknownprotos(); return ERR_OK; #endif /* IP_REASSEMBLY */ } #if IP_OPTIONS_ALLOWED == 0 /* no support for IP options in the IP header? */ #if LWIP_IGMP /* there is an extra "router alert" option in IGMP messages which we allow for but do not police */ if((iphdr_hlen > IP_HLEN && (IPH_PROTO(iphdr) != IP_PROTO_IGMP)) { #else if (iphdr_hlen > IP_HLEN) { #endif /* LWIP_IGMP */ LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("IP packet dropped since there were IP options (while IP_OPTIONS_ALLOWED == 0).\n")); pbuf_free(p); IP_STATS_INC(ip.opterr); IP_STATS_INC(ip.drop); /* unsupported protocol feature */ snmp_inc_ipinunknownprotos(); return ERR_OK; } #endif /* IP_OPTIONS_ALLOWED == 0 */ /* send to upper layers */ LWIP_DEBUGF(IP_DEBUG, ("ip_input: \n")); ip_debug_print(p); LWIP_DEBUGF(IP_DEBUG, ("ip_input: p->len %"U16_F" p->tot_len %"U16_F"\n", p->len, p->tot_len)); current_netif = inp; current_header = iphdr; #if LWIP_RAW /* raw input did not eat the packet? */ if (raw_input(p, inp) == 0) #endif /* LWIP_RAW */ { switch (IPH_PROTO(iphdr)) { #if LWIP_UDP case IP_PROTO_UDP: #if LWIP_UDPLITE case IP_PROTO_UDPLITE: #endif /* LWIP_UDPLITE */ snmp_inc_ipindelivers(); udp_input(p, inp); break; #endif /* LWIP_UDP */ #if LWIP_TCP case IP_PROTO_TCP: snmp_inc_ipindelivers(); tcp_input(p, inp); break; #endif /* LWIP_TCP */ #if LWIP_ICMP case IP_PROTO_ICMP: snmp_inc_ipindelivers(); icmp_input(p, inp); break; #endif /* LWIP_ICMP */ #if LWIP_IGMP case IP_PROTO_IGMP: igmp_input(p,inp,&(iphdr->dest)); break; #endif /* LWIP_IGMP */ default: #if LWIP_ICMP /* send ICMP destination protocol unreachable unless is was a broadcast */ if (!ip_addr_isbroadcast(&(iphdr->dest), inp) && !ip_addr_ismulticast(&(iphdr->dest))) { p->payload = iphdr; icmp_dest_unreach(p, ICMP_DUR_PROTO); } #endif /* LWIP_ICMP */ pbuf_free(p); LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("Unsupported transport protocol %"U16_F"\n", IPH_PROTO(iphdr))); IP_STATS_INC(ip.proterr); IP_STATS_INC(ip.drop); snmp_inc_ipinunknownprotos(); } } current_netif = NULL; current_header = NULL; return ERR_OK; } /** * Sends an IP packet on a network interface. This function constructs * the IP header and calculates the IP header checksum. If the source * IP address is NULL, the IP address of the outgoing network * interface is filled in as source address. * If the destination IP address is IP_HDRINCL, p is assumed to already * include an IP header and p->payload points to it instead of the data. * * @param p the packet to send (p->payload points to the data, e.g. next protocol header; if dest == IP_HDRINCL, p already includes an IP header and p->payload points to that IP header) * @param src the source IP address to send from (if src == IP_ADDR_ANY, the * IP address of the netif used to send is used as source address) * @param dest the destination IP address to send the packet to * @param ttl the TTL value to be set in the IP header * @param tos the TOS value to be set in the IP header * @param proto the PROTOCOL to be set in the IP header * @param netif the netif on which to send this packet * @return ERR_OK if the packet was sent OK * ERR_BUF if p doesn't have enough space for IP/LINK headers * returns errors returned by netif->output * * @note ip_id: RFC791 "some host may be able to simply use * unique identifiers independent of destination" */ err_t ip_output_if(struct pbuf *p, struct ip_addr *src, struct ip_addr *dest, u8_t ttl, u8_t tos, u8_t proto, struct netif *netif) { #if IP_OPTIONS_SEND return ip_output_if_opt(p, src, dest, ttl, tos, proto, netif, NULL, 0); } /** * Same as ip_output_if() but with the possibility to include IP options: * * @ param ip_options pointer to the IP options, copied into the IP header * @ param optlen length of ip_options */ err_t ip_output_if_opt(struct pbuf *p, struct ip_addr *src, struct ip_addr *dest, u8_t ttl, u8_t tos, u8_t proto, struct netif *netif, void *ip_options, u16_t optlen) { #endif /* IP_OPTIONS_SEND */ struct ip_hdr *iphdr; static u16_t ip_id = 0; snmp_inc_ipoutrequests(); /* Should the IP header be generated or is it already included in p? */ if (dest != IP_HDRINCL) { u16_t ip_hlen = IP_HLEN; #if IP_OPTIONS_SEND u16_t optlen_aligned = 0; if (optlen != 0) { /* round up to a multiple of 4 */ optlen_aligned = ((optlen + 3) & ~3); ip_hlen += optlen_aligned; /* First write in the IP options */ if (pbuf_header(p, optlen_aligned)) { LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("ip_output_if_opt: not enough room for IP options in pbuf\n")); IP_STATS_INC(ip.err); snmp_inc_ipoutdiscards(); return ERR_BUF; } MEMCPY(p->payload, ip_options, optlen); if (optlen < optlen_aligned) { /* zero the remaining bytes */ memset(((char*)p->payload) + optlen, 0, optlen_aligned - optlen); } } #endif /* IP_OPTIONS_SEND */ /* generate IP header */ if (pbuf_header(p, IP_HLEN)) { LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("ip_output: not enough room for IP header in pbuf\n")); IP_STATS_INC(ip.err); snmp_inc_ipoutdiscards(); return ERR_BUF; } iphdr = p->payload; LWIP_ASSERT("check that first pbuf can hold struct ip_hdr", (p->len >= sizeof(struct ip_hdr))); IPH_TTL_SET(iphdr, ttl); IPH_PROTO_SET(iphdr, proto); ip_addr_set(&(iphdr->dest), dest); IPH_VHLTOS_SET(iphdr, 4, ip_hlen / 4, tos); IPH_LEN_SET(iphdr, htons(p->tot_len)); IPH_OFFSET_SET(iphdr, 0); IPH_ID_SET(iphdr, htons(ip_id)); ++ip_id; if (ip_addr_isany(src)) { ip_addr_set(&(iphdr->src), &(netif->ip_addr)); } else { ip_addr_set(&(iphdr->src), src); } IPH_CHKSUM_SET(iphdr, 0); #if CHECKSUM_GEN_IP IPH_CHKSUM_SET(iphdr, inet_chksum(iphdr, ip_hlen)); #endif } else { /* IP header already included in p */ iphdr = p->payload; dest = &(iphdr->dest); } IP_STATS_INC(ip.xmit); LWIP_DEBUGF(IP_DEBUG, ("ip_output_if: %c%c%"U16_F"\n", netif->name[0], netif->name[1], netif->num)); ip_debug_print(p); #if ENABLE_LOOPBACK if (ip_addr_cmp(dest, &netif->ip_addr)) { /* Packet to self, enqueue it for loopback */ LWIP_DEBUGF(IP_DEBUG, ("netif_loop_output()")); return netif_loop_output(netif, p, dest); } #endif /* ENABLE_LOOPBACK */ #if IP_FRAG /* don't fragment if interface has mtu set to 0 [loopif] */ if (netif->mtu && (p->tot_len > netif->mtu)) { return ip_frag(p,netif,dest); } #endif LWIP_DEBUGF(IP_DEBUG, ("netif->output()")); return netif->output(netif, p, dest); } /** * Simple interface to ip_output_if. It finds the outgoing network * interface and calls upon ip_output_if to do the actual work. * * @param p the packet to send (p->payload points to the data, e.g. next protocol header; if dest == IP_HDRINCL, p already includes an IP header and p->payload points to that IP header) * @param src the source IP address to send from (if src == IP_ADDR_ANY, the * IP address of the netif used to send is used as source address) * @param dest the destination IP address to send the packet to * @param ttl the TTL value to be set in the IP header * @param tos the TOS value to be set in the IP header * @param proto the PROTOCOL to be set in the IP header * * @return ERR_RTE if no route is found * see ip_output_if() for more return values */ err_t ip_output(struct pbuf *p, struct ip_addr *src, struct ip_addr *dest, u8_t ttl, u8_t tos, u8_t proto) { struct netif *netif; if ((netif = ip_route(dest)) == NULL) { LWIP_DEBUGF(IP_DEBUG, ("ip_output: No route to 0x%"X32_F"\n", dest->addr)); IP_STATS_INC(ip.rterr); return ERR_RTE; } return ip_output_if(p, src, dest, ttl, tos, proto, netif); } #if LWIP_NETIF_HWADDRHINT /** Like ip_output, but takes and addr_hint pointer that is passed on to netif->addr_hint * before calling ip_output_if. * * @param p the packet to send (p->payload points to the data, e.g. next protocol header; if dest == IP_HDRINCL, p already includes an IP header and p->payload points to that IP header) * @param src the source IP address to send from (if src == IP_ADDR_ANY, the * IP address of the netif used to send is used as source address) * @param dest the destination IP address to send the packet to * @param ttl the TTL value to be set in the IP header * @param tos the TOS value to be set in the IP header * @param proto the PROTOCOL to be set in the IP header * @param addr_hint address hint pointer set to netif->addr_hint before * calling ip_output_if() * * @return ERR_RTE if no route is found * see ip_output_if() for more return values */ err_t ip_output_hinted(struct pbuf *p, struct ip_addr *src, struct ip_addr *dest, u8_t ttl, u8_t tos, u8_t proto, u8_t *addr_hint) { struct netif *netif; err_t err; if ((netif = ip_route(dest)) == NULL) { LWIP_DEBUGF(IP_DEBUG, ("ip_output: No route to 0x%"X32_F"\n", dest->addr)); IP_STATS_INC(ip.rterr); return ERR_RTE; } netif->addr_hint = addr_hint; err = ip_output_if(p, src, dest, ttl, tos, proto, netif); netif->addr_hint = NULL; return err; } #endif /* LWIP_NETIF_HWADDRHINT*/ #if IP_DEBUG /* Print an IP header by using LWIP_DEBUGF * @param p an IP packet, p->payload pointing to the IP header */ void ip_debug_print(struct pbuf *p) { struct ip_hdr *iphdr = p->payload; u8_t *payload; payload = (u8_t *)iphdr + IP_HLEN; LWIP_DEBUGF(IP_DEBUG, ("IP header:\n")); LWIP_DEBUGF(IP_DEBUG, ("+-------------------------------+\n")); LWIP_DEBUGF(IP_DEBUG, ("|%2"S16_F" |%2"S16_F" | 0x%02"X16_F" | %5"U16_F" | (v, hl, tos, len)\n", IPH_V(iphdr), IPH_HL(iphdr), IPH_TOS(iphdr), ntohs(IPH_LEN(iphdr)))); LWIP_DEBUGF(IP_DEBUG, ("+-------------------------------+\n")); LWIP_DEBUGF(IP_DEBUG, ("| %5"U16_F" |%"U16_F"%"U16_F"%"U16_F"| %4"U16_F" | (id, flags, offset)\n", ntohs(IPH_ID(iphdr)), ntohs(IPH_OFFSET(iphdr)) >> 15 & 1, ntohs(IPH_OFFSET(iphdr)) >> 14 & 1, ntohs(IPH_OFFSET(iphdr)) >> 13 & 1, ntohs(IPH_OFFSET(iphdr)) & IP_OFFMASK)); LWIP_DEBUGF(IP_DEBUG, ("+-------------------------------+\n")); LWIP_DEBUGF(IP_DEBUG, ("| %3"U16_F" | %3"U16_F" | 0x%04"X16_F" | (ttl, proto, chksum)\n", IPH_TTL(iphdr), IPH_PROTO(iphdr), ntohs(IPH_CHKSUM(iphdr)))); LWIP_DEBUGF(IP_DEBUG, ("+-------------------------------+\n")); LWIP_DEBUGF(IP_DEBUG, ("| %3"U16_F" | %3"U16_F" | %3"U16_F" | %3"U16_F" | (src)\n", ip4_addr1(&iphdr->src), ip4_addr2(&iphdr->src), ip4_addr3(&iphdr->src), ip4_addr4(&iphdr->src))); LWIP_DEBUGF(IP_DEBUG, ("+-------------------------------+\n")); LWIP_DEBUGF(IP_DEBUG, ("| %3"U16_F" | %3"U16_F" | %3"U16_F" | %3"U16_F" | (dest)\n", ip4_addr1(&iphdr->dest), ip4_addr2(&iphdr->dest), ip4_addr3(&iphdr->dest), ip4_addr4(&iphdr->dest))); LWIP_DEBUGF(IP_DEBUG, ("+-------------------------------+\n")); } #endif /* IP_DEBUG */
gpl-2.0
1HLtd/linux
drivers/md/bitmap.c
155
60953
/* * bitmap.c two-level bitmap (C) Peter T. Breuer (ptb@ot.uc3m.es) 2003 * * bitmap_create - sets up the bitmap structure * bitmap_destroy - destroys the bitmap structure * * additions, Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.: * - added disk storage for bitmap * - changes to allow various bitmap chunk sizes */ /* * Still to do: * * flush after percent set rather than just time based. (maybe both). */ #include <linux/blkdev.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/timer.h> #include <linux/sched.h> #include <linux/list.h> #include <linux/file.h> #include <linux/mount.h> #include <linux/buffer_head.h> #include <linux/seq_file.h> #include "md.h" #include "bitmap.h" static inline char *bmname(struct bitmap *bitmap) { return bitmap->mddev ? mdname(bitmap->mddev) : "mdX"; } /* * check a page and, if necessary, allocate it (or hijack it if the alloc fails) * * 1) check to see if this page is allocated, if it's not then try to alloc * 2) if the alloc fails, set the page's hijacked flag so we'll use the * page pointer directly as a counter * * if we find our page, we increment the page's refcount so that it stays * allocated while we're using it */ static int bitmap_checkpage(struct bitmap_counts *bitmap, unsigned long page, int create) __releases(bitmap->lock) __acquires(bitmap->lock) { unsigned char *mappage; if (page >= bitmap->pages) { /* This can happen if bitmap_start_sync goes beyond * End-of-device while looking for a whole page. * It is harmless. */ return -EINVAL; } if (bitmap->bp[page].hijacked) /* it's hijacked, don't try to alloc */ return 0; if (bitmap->bp[page].map) /* page is already allocated, just return */ return 0; if (!create) return -ENOENT; /* this page has not been allocated yet */ spin_unlock_irq(&bitmap->lock); mappage = kzalloc(PAGE_SIZE, GFP_NOIO); spin_lock_irq(&bitmap->lock); if (mappage == NULL) { pr_debug("md/bitmap: map page allocation failed, hijacking\n"); /* failed - set the hijacked flag so that we can use the * pointer as a counter */ if (!bitmap->bp[page].map) bitmap->bp[page].hijacked = 1; } else if (bitmap->bp[page].map || bitmap->bp[page].hijacked) { /* somebody beat us to getting the page */ kfree(mappage); return 0; } else { /* no page was in place and we have one, so install it */ bitmap->bp[page].map = mappage; bitmap->missing_pages--; } return 0; } /* if page is completely empty, put it back on the free list, or dealloc it */ /* if page was hijacked, unmark the flag so it might get alloced next time */ /* Note: lock should be held when calling this */ static void bitmap_checkfree(struct bitmap_counts *bitmap, unsigned long page) { char *ptr; if (bitmap->bp[page].count) /* page is still busy */ return; /* page is no longer in use, it can be released */ if (bitmap->bp[page].hijacked) { /* page was hijacked, undo this now */ bitmap->bp[page].hijacked = 0; bitmap->bp[page].map = NULL; } else { /* normal case, free the page */ ptr = bitmap->bp[page].map; bitmap->bp[page].map = NULL; bitmap->missing_pages++; kfree(ptr); } } /* * bitmap file handling - read and write the bitmap file and its superblock */ /* * basic page I/O operations */ /* IO operations when bitmap is stored near all superblocks */ static int read_sb_page(struct mddev *mddev, loff_t offset, struct page *page, unsigned long index, int size) { /* choose a good rdev and read the page from there */ struct md_rdev *rdev; sector_t target; rdev_for_each(rdev, mddev) { if (! test_bit(In_sync, &rdev->flags) || test_bit(Faulty, &rdev->flags)) continue; target = offset + index * (PAGE_SIZE/512); if (sync_page_io(rdev, target, roundup(size, bdev_logical_block_size(rdev->bdev)), page, READ, true)) { page->index = index; return 0; } } return -EIO; } static struct md_rdev *next_active_rdev(struct md_rdev *rdev, struct mddev *mddev) { /* Iterate the disks of an mddev, using rcu to protect access to the * linked list, and raising the refcount of devices we return to ensure * they don't disappear while in use. * As devices are only added or removed when raid_disk is < 0 and * nr_pending is 0 and In_sync is clear, the entries we return will * still be in the same position on the list when we re-enter * list_for_each_entry_continue_rcu. */ rcu_read_lock(); if (rdev == NULL) /* start at the beginning */ rdev = list_entry_rcu(&mddev->disks, struct md_rdev, same_set); else { /* release the previous rdev and start from there. */ rdev_dec_pending(rdev, mddev); } list_for_each_entry_continue_rcu(rdev, &mddev->disks, same_set) { if (rdev->raid_disk >= 0 && !test_bit(Faulty, &rdev->flags)) { /* this is a usable devices */ atomic_inc(&rdev->nr_pending); rcu_read_unlock(); return rdev; } } rcu_read_unlock(); return NULL; } static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait) { struct md_rdev *rdev = NULL; struct block_device *bdev; struct mddev *mddev = bitmap->mddev; struct bitmap_storage *store = &bitmap->storage; while ((rdev = next_active_rdev(rdev, mddev)) != NULL) { int size = PAGE_SIZE; loff_t offset = mddev->bitmap_info.offset; bdev = (rdev->meta_bdev) ? rdev->meta_bdev : rdev->bdev; if (page->index == store->file_pages-1) { int last_page_size = store->bytes & (PAGE_SIZE-1); if (last_page_size == 0) last_page_size = PAGE_SIZE; size = roundup(last_page_size, bdev_logical_block_size(bdev)); } /* Just make sure we aren't corrupting data or * metadata */ if (mddev->external) { /* Bitmap could be anywhere. */ if (rdev->sb_start + offset + (page->index * (PAGE_SIZE/512)) > rdev->data_offset && rdev->sb_start + offset < (rdev->data_offset + mddev->dev_sectors + (PAGE_SIZE/512))) goto bad_alignment; } else if (offset < 0) { /* DATA BITMAP METADATA */ if (offset + (long)(page->index * (PAGE_SIZE/512)) + size/512 > 0) /* bitmap runs in to metadata */ goto bad_alignment; if (rdev->data_offset + mddev->dev_sectors > rdev->sb_start + offset) /* data runs in to bitmap */ goto bad_alignment; } else if (rdev->sb_start < rdev->data_offset) { /* METADATA BITMAP DATA */ if (rdev->sb_start + offset + page->index*(PAGE_SIZE/512) + size/512 > rdev->data_offset) /* bitmap runs in to data */ goto bad_alignment; } else { /* DATA METADATA BITMAP - no problems */ } md_super_write(mddev, rdev, rdev->sb_start + offset + page->index * (PAGE_SIZE/512), size, page); } if (wait) md_super_wait(mddev); return 0; bad_alignment: return -EINVAL; } static void bitmap_file_kick(struct bitmap *bitmap); /* * write out a page to a file */ static void write_page(struct bitmap *bitmap, struct page *page, int wait) { struct buffer_head *bh; if (bitmap->storage.file == NULL) { switch (write_sb_page(bitmap, page, wait)) { case -EINVAL: set_bit(BITMAP_WRITE_ERROR, &bitmap->flags); } } else { bh = page_buffers(page); while (bh && bh->b_blocknr) { atomic_inc(&bitmap->pending_writes); set_buffer_locked(bh); set_buffer_mapped(bh); submit_bh(WRITE | REQ_SYNC, bh); bh = bh->b_this_page; } if (wait) wait_event(bitmap->write_wait, atomic_read(&bitmap->pending_writes)==0); } if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags)) bitmap_file_kick(bitmap); } static void end_bitmap_write(struct buffer_head *bh, int uptodate) { struct bitmap *bitmap = bh->b_private; if (!uptodate) set_bit(BITMAP_WRITE_ERROR, &bitmap->flags); if (atomic_dec_and_test(&bitmap->pending_writes)) wake_up(&bitmap->write_wait); } /* copied from buffer.c */ static void __clear_page_buffers(struct page *page) { ClearPagePrivate(page); set_page_private(page, 0); page_cache_release(page); } static void free_buffers(struct page *page) { struct buffer_head *bh; if (!PagePrivate(page)) return; bh = page_buffers(page); while (bh) { struct buffer_head *next = bh->b_this_page; free_buffer_head(bh); bh = next; } __clear_page_buffers(page); put_page(page); } /* read a page from a file. * We both read the page, and attach buffers to the page to record the * address of each block (using bmap). These addresses will be used * to write the block later, completely bypassing the filesystem. * This usage is similar to how swap files are handled, and allows us * to write to a file with no concerns of memory allocation failing. */ static int read_page(struct file *file, unsigned long index, struct bitmap *bitmap, unsigned long count, struct page *page) { int ret = 0; struct inode *inode = file_inode(file); struct buffer_head *bh; sector_t block; pr_debug("read bitmap file (%dB @ %llu)\n", (int)PAGE_SIZE, (unsigned long long)index << PAGE_SHIFT); bh = alloc_page_buffers(page, 1<<inode->i_blkbits, 0); if (!bh) { ret = -ENOMEM; goto out; } attach_page_buffers(page, bh); block = index << (PAGE_SHIFT - inode->i_blkbits); while (bh) { if (count == 0) bh->b_blocknr = 0; else { bh->b_blocknr = bmap(inode, block); if (bh->b_blocknr == 0) { /* Cannot use this file! */ ret = -EINVAL; goto out; } bh->b_bdev = inode->i_sb->s_bdev; if (count < (1<<inode->i_blkbits)) count = 0; else count -= (1<<inode->i_blkbits); bh->b_end_io = end_bitmap_write; bh->b_private = bitmap; atomic_inc(&bitmap->pending_writes); set_buffer_locked(bh); set_buffer_mapped(bh); submit_bh(READ, bh); } block++; bh = bh->b_this_page; } page->index = index; wait_event(bitmap->write_wait, atomic_read(&bitmap->pending_writes)==0); if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags)) ret = -EIO; out: if (ret) printk(KERN_ALERT "md: bitmap read error: (%dB @ %llu): %d\n", (int)PAGE_SIZE, (unsigned long long)index << PAGE_SHIFT, ret); return ret; } /* * bitmap file superblock operations */ /* update the event counter and sync the superblock to disk */ void bitmap_update_sb(struct bitmap *bitmap) { bitmap_super_t *sb; if (!bitmap || !bitmap->mddev) /* no bitmap for this array */ return; if (bitmap->mddev->bitmap_info.external) return; if (!bitmap->storage.sb_page) /* no superblock */ return; sb = kmap_atomic(bitmap->storage.sb_page); sb->events = cpu_to_le64(bitmap->mddev->events); if (bitmap->mddev->events < bitmap->events_cleared) /* rocking back to read-only */ bitmap->events_cleared = bitmap->mddev->events; sb->events_cleared = cpu_to_le64(bitmap->events_cleared); sb->state = cpu_to_le32(bitmap->flags); /* Just in case these have been changed via sysfs: */ sb->daemon_sleep = cpu_to_le32(bitmap->mddev->bitmap_info.daemon_sleep/HZ); sb->write_behind = cpu_to_le32(bitmap->mddev->bitmap_info.max_write_behind); /* This might have been changed by a reshape */ sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors); sb->chunksize = cpu_to_le32(bitmap->mddev->bitmap_info.chunksize); sb->sectors_reserved = cpu_to_le32(bitmap->mddev-> bitmap_info.space); kunmap_atomic(sb); write_page(bitmap, bitmap->storage.sb_page, 1); } /* print out the bitmap file superblock */ void bitmap_print_sb(struct bitmap *bitmap) { bitmap_super_t *sb; if (!bitmap || !bitmap->storage.sb_page) return; sb = kmap_atomic(bitmap->storage.sb_page); printk(KERN_DEBUG "%s: bitmap file superblock:\n", bmname(bitmap)); printk(KERN_DEBUG " magic: %08x\n", le32_to_cpu(sb->magic)); printk(KERN_DEBUG " version: %d\n", le32_to_cpu(sb->version)); printk(KERN_DEBUG " uuid: %08x.%08x.%08x.%08x\n", *(__u32 *)(sb->uuid+0), *(__u32 *)(sb->uuid+4), *(__u32 *)(sb->uuid+8), *(__u32 *)(sb->uuid+12)); printk(KERN_DEBUG " events: %llu\n", (unsigned long long) le64_to_cpu(sb->events)); printk(KERN_DEBUG "events cleared: %llu\n", (unsigned long long) le64_to_cpu(sb->events_cleared)); printk(KERN_DEBUG " state: %08x\n", le32_to_cpu(sb->state)); printk(KERN_DEBUG " chunksize: %d B\n", le32_to_cpu(sb->chunksize)); printk(KERN_DEBUG " daemon sleep: %ds\n", le32_to_cpu(sb->daemon_sleep)); printk(KERN_DEBUG " sync size: %llu KB\n", (unsigned long long)le64_to_cpu(sb->sync_size)/2); printk(KERN_DEBUG "max write behind: %d\n", le32_to_cpu(sb->write_behind)); kunmap_atomic(sb); } /* * bitmap_new_disk_sb * @bitmap * * This function is somewhat the reverse of bitmap_read_sb. bitmap_read_sb * reads and verifies the on-disk bitmap superblock and populates bitmap_info. * This function verifies 'bitmap_info' and populates the on-disk bitmap * structure, which is to be written to disk. * * Returns: 0 on success, -Exxx on error */ static int bitmap_new_disk_sb(struct bitmap *bitmap) { bitmap_super_t *sb; unsigned long chunksize, daemon_sleep, write_behind; bitmap->storage.sb_page = alloc_page(GFP_KERNEL); if (bitmap->storage.sb_page == NULL) return -ENOMEM; bitmap->storage.sb_page->index = 0; sb = kmap_atomic(bitmap->storage.sb_page); sb->magic = cpu_to_le32(BITMAP_MAGIC); sb->version = cpu_to_le32(BITMAP_MAJOR_HI); chunksize = bitmap->mddev->bitmap_info.chunksize; BUG_ON(!chunksize); if (!is_power_of_2(chunksize)) { kunmap_atomic(sb); printk(KERN_ERR "bitmap chunksize not a power of 2\n"); return -EINVAL; } sb->chunksize = cpu_to_le32(chunksize); daemon_sleep = bitmap->mddev->bitmap_info.daemon_sleep; if (!daemon_sleep || (daemon_sleep < 1) || (daemon_sleep > MAX_SCHEDULE_TIMEOUT)) { printk(KERN_INFO "Choosing daemon_sleep default (5 sec)\n"); daemon_sleep = 5 * HZ; } sb->daemon_sleep = cpu_to_le32(daemon_sleep); bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep; /* * FIXME: write_behind for RAID1. If not specified, what * is a good choice? We choose COUNTER_MAX / 2 arbitrarily. */ write_behind = bitmap->mddev->bitmap_info.max_write_behind; if (write_behind > COUNTER_MAX) write_behind = COUNTER_MAX / 2; sb->write_behind = cpu_to_le32(write_behind); bitmap->mddev->bitmap_info.max_write_behind = write_behind; /* keep the array size field of the bitmap superblock up to date */ sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors); memcpy(sb->uuid, bitmap->mddev->uuid, 16); set_bit(BITMAP_STALE, &bitmap->flags); sb->state = cpu_to_le32(bitmap->flags); bitmap->events_cleared = bitmap->mddev->events; sb->events_cleared = cpu_to_le64(bitmap->mddev->events); kunmap_atomic(sb); return 0; } /* read the superblock from the bitmap file and initialize some bitmap fields */ static int bitmap_read_sb(struct bitmap *bitmap) { char *reason = NULL; bitmap_super_t *sb; unsigned long chunksize, daemon_sleep, write_behind; unsigned long long events; unsigned long sectors_reserved = 0; int err = -EINVAL; struct page *sb_page; if (!bitmap->storage.file && !bitmap->mddev->bitmap_info.offset) { chunksize = 128 * 1024 * 1024; daemon_sleep = 5 * HZ; write_behind = 0; set_bit(BITMAP_STALE, &bitmap->flags); err = 0; goto out_no_sb; } /* page 0 is the superblock, read it... */ sb_page = alloc_page(GFP_KERNEL); if (!sb_page) return -ENOMEM; bitmap->storage.sb_page = sb_page; if (bitmap->storage.file) { loff_t isize = i_size_read(bitmap->storage.file->f_mapping->host); int bytes = isize > PAGE_SIZE ? PAGE_SIZE : isize; err = read_page(bitmap->storage.file, 0, bitmap, bytes, sb_page); } else { err = read_sb_page(bitmap->mddev, bitmap->mddev->bitmap_info.offset, sb_page, 0, sizeof(bitmap_super_t)); } if (err) return err; sb = kmap_atomic(sb_page); chunksize = le32_to_cpu(sb->chunksize); daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ; write_behind = le32_to_cpu(sb->write_behind); sectors_reserved = le32_to_cpu(sb->sectors_reserved); /* verify that the bitmap-specific fields are valid */ if (sb->magic != cpu_to_le32(BITMAP_MAGIC)) reason = "bad magic"; else if (le32_to_cpu(sb->version) < BITMAP_MAJOR_LO || le32_to_cpu(sb->version) > BITMAP_MAJOR_HI) reason = "unrecognized superblock version"; else if (chunksize < 512) reason = "bitmap chunksize too small"; else if (!is_power_of_2(chunksize)) reason = "bitmap chunksize not a power of 2"; else if (daemon_sleep < 1 || daemon_sleep > MAX_SCHEDULE_TIMEOUT) reason = "daemon sleep period out of range"; else if (write_behind > COUNTER_MAX) reason = "write-behind limit out of range (0 - 16383)"; if (reason) { printk(KERN_INFO "%s: invalid bitmap file superblock: %s\n", bmname(bitmap), reason); goto out; } /* keep the array size field of the bitmap superblock up to date */ sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors); if (bitmap->mddev->persistent) { /* * We have a persistent array superblock, so compare the * bitmap's UUID and event counter to the mddev's */ if (memcmp(sb->uuid, bitmap->mddev->uuid, 16)) { printk(KERN_INFO "%s: bitmap superblock UUID mismatch\n", bmname(bitmap)); goto out; } events = le64_to_cpu(sb->events); if (events < bitmap->mddev->events) { printk(KERN_INFO "%s: bitmap file is out of date (%llu < %llu) " "-- forcing full recovery\n", bmname(bitmap), events, (unsigned long long) bitmap->mddev->events); set_bit(BITMAP_STALE, &bitmap->flags); } } /* assign fields using values from superblock */ bitmap->flags |= le32_to_cpu(sb->state); if (le32_to_cpu(sb->version) == BITMAP_MAJOR_HOSTENDIAN) set_bit(BITMAP_HOSTENDIAN, &bitmap->flags); bitmap->events_cleared = le64_to_cpu(sb->events_cleared); err = 0; out: kunmap_atomic(sb); out_no_sb: if (test_bit(BITMAP_STALE, &bitmap->flags)) bitmap->events_cleared = bitmap->mddev->events; bitmap->mddev->bitmap_info.chunksize = chunksize; bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep; bitmap->mddev->bitmap_info.max_write_behind = write_behind; if (bitmap->mddev->bitmap_info.space == 0 || bitmap->mddev->bitmap_info.space > sectors_reserved) bitmap->mddev->bitmap_info.space = sectors_reserved; if (err) bitmap_print_sb(bitmap); return err; } /* * general bitmap file operations */ /* * on-disk bitmap: * * Use one bit per "chunk" (block set). We do the disk I/O on the bitmap * file a page at a time. There's a superblock at the start of the file. */ /* calculate the index of the page that contains this bit */ static inline unsigned long file_page_index(struct bitmap_storage *store, unsigned long chunk) { if (store->sb_page) chunk += sizeof(bitmap_super_t) << 3; return chunk >> PAGE_BIT_SHIFT; } /* calculate the (bit) offset of this bit within a page */ static inline unsigned long file_page_offset(struct bitmap_storage *store, unsigned long chunk) { if (store->sb_page) chunk += sizeof(bitmap_super_t) << 3; return chunk & (PAGE_BITS - 1); } /* * return a pointer to the page in the filemap that contains the given bit * * this lookup is complicated by the fact that the bitmap sb might be exactly * 1 page (e.g., x86) or less than 1 page -- so the bitmap might start on page * 0 or page 1 */ static inline struct page *filemap_get_page(struct bitmap_storage *store, unsigned long chunk) { if (file_page_index(store, chunk) >= store->file_pages) return NULL; return store->filemap[file_page_index(store, chunk) - file_page_index(store, 0)]; } static int bitmap_storage_alloc(struct bitmap_storage *store, unsigned long chunks, int with_super) { int pnum; unsigned long num_pages; unsigned long bytes; bytes = DIV_ROUND_UP(chunks, 8); if (with_super) bytes += sizeof(bitmap_super_t); num_pages = DIV_ROUND_UP(bytes, PAGE_SIZE); store->filemap = kmalloc(sizeof(struct page *) * num_pages, GFP_KERNEL); if (!store->filemap) return -ENOMEM; if (with_super && !store->sb_page) { store->sb_page = alloc_page(GFP_KERNEL|__GFP_ZERO); if (store->sb_page == NULL) return -ENOMEM; store->sb_page->index = 0; } pnum = 0; if (store->sb_page) { store->filemap[0] = store->sb_page; pnum = 1; } for ( ; pnum < num_pages; pnum++) { store->filemap[pnum] = alloc_page(GFP_KERNEL|__GFP_ZERO); if (!store->filemap[pnum]) { store->file_pages = pnum; return -ENOMEM; } store->filemap[pnum]->index = pnum; } store->file_pages = pnum; /* We need 4 bits per page, rounded up to a multiple * of sizeof(unsigned long) */ store->filemap_attr = kzalloc( roundup(DIV_ROUND_UP(num_pages*4, 8), sizeof(unsigned long)), GFP_KERNEL); if (!store->filemap_attr) return -ENOMEM; store->bytes = bytes; return 0; } static void bitmap_file_unmap(struct bitmap_storage *store) { struct page **map, *sb_page; int pages; struct file *file; file = store->file; map = store->filemap; pages = store->file_pages; sb_page = store->sb_page; while (pages--) if (map[pages] != sb_page) /* 0 is sb_page, release it below */ free_buffers(map[pages]); kfree(map); kfree(store->filemap_attr); if (sb_page) free_buffers(sb_page); if (file) { struct inode *inode = file_inode(file); invalidate_mapping_pages(inode->i_mapping, 0, -1); fput(file); } } /* * bitmap_file_kick - if an error occurs while manipulating the bitmap file * then it is no longer reliable, so we stop using it and we mark the file * as failed in the superblock */ static void bitmap_file_kick(struct bitmap *bitmap) { char *path, *ptr = NULL; if (!test_and_set_bit(BITMAP_STALE, &bitmap->flags)) { bitmap_update_sb(bitmap); if (bitmap->storage.file) { path = kmalloc(PAGE_SIZE, GFP_KERNEL); if (path) ptr = d_path(&bitmap->storage.file->f_path, path, PAGE_SIZE); printk(KERN_ALERT "%s: kicking failed bitmap file %s from array!\n", bmname(bitmap), IS_ERR(ptr) ? "" : ptr); kfree(path); } else printk(KERN_ALERT "%s: disabling internal bitmap due to errors\n", bmname(bitmap)); } } enum bitmap_page_attr { BITMAP_PAGE_DIRTY = 0, /* there are set bits that need to be synced */ BITMAP_PAGE_PENDING = 1, /* there are bits that are being cleaned. * i.e. counter is 1 or 2. */ BITMAP_PAGE_NEEDWRITE = 2, /* there are cleared bits that need to be synced */ }; static inline void set_page_attr(struct bitmap *bitmap, int pnum, enum bitmap_page_attr attr) { set_bit((pnum<<2) + attr, bitmap->storage.filemap_attr); } static inline void clear_page_attr(struct bitmap *bitmap, int pnum, enum bitmap_page_attr attr) { clear_bit((pnum<<2) + attr, bitmap->storage.filemap_attr); } static inline int test_page_attr(struct bitmap *bitmap, int pnum, enum bitmap_page_attr attr) { return test_bit((pnum<<2) + attr, bitmap->storage.filemap_attr); } static inline int test_and_clear_page_attr(struct bitmap *bitmap, int pnum, enum bitmap_page_attr attr) { return test_and_clear_bit((pnum<<2) + attr, bitmap->storage.filemap_attr); } /* * bitmap_file_set_bit -- called before performing a write to the md device * to set (and eventually sync) a particular bit in the bitmap file * * we set the bit immediately, then we record the page number so that * when an unplug occurs, we can flush the dirty pages out to disk */ static void bitmap_file_set_bit(struct bitmap *bitmap, sector_t block) { unsigned long bit; struct page *page; void *kaddr; unsigned long chunk = block >> bitmap->counts.chunkshift; page = filemap_get_page(&bitmap->storage, chunk); if (!page) return; bit = file_page_offset(&bitmap->storage, chunk); /* set the bit */ kaddr = kmap_atomic(page); if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags)) set_bit(bit, kaddr); else set_bit_le(bit, kaddr); kunmap_atomic(kaddr); pr_debug("set file bit %lu page %lu\n", bit, page->index); /* record page number so it gets flushed to disk when unplug occurs */ set_page_attr(bitmap, page->index, BITMAP_PAGE_DIRTY); } static void bitmap_file_clear_bit(struct bitmap *bitmap, sector_t block) { unsigned long bit; struct page *page; void *paddr; unsigned long chunk = block >> bitmap->counts.chunkshift; page = filemap_get_page(&bitmap->storage, chunk); if (!page) return; bit = file_page_offset(&bitmap->storage, chunk); paddr = kmap_atomic(page); if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags)) clear_bit(bit, paddr); else clear_bit_le(bit, paddr); kunmap_atomic(paddr); if (!test_page_attr(bitmap, page->index, BITMAP_PAGE_NEEDWRITE)) { set_page_attr(bitmap, page->index, BITMAP_PAGE_PENDING); bitmap->allclean = 0; } } /* this gets called when the md device is ready to unplug its underlying * (slave) device queues -- before we let any writes go down, we need to * sync the dirty pages of the bitmap file to disk */ void bitmap_unplug(struct bitmap *bitmap) { unsigned long i; int dirty, need_write; int wait = 0; if (!bitmap || !bitmap->storage.filemap || test_bit(BITMAP_STALE, &bitmap->flags)) return; /* look at each page to see if there are any set bits that need to be * flushed out to disk */ for (i = 0; i < bitmap->storage.file_pages; i++) { if (!bitmap->storage.filemap) return; dirty = test_and_clear_page_attr(bitmap, i, BITMAP_PAGE_DIRTY); need_write = test_and_clear_page_attr(bitmap, i, BITMAP_PAGE_NEEDWRITE); if (dirty || need_write) { clear_page_attr(bitmap, i, BITMAP_PAGE_PENDING); write_page(bitmap, bitmap->storage.filemap[i], 0); } if (dirty) wait = 1; } if (wait) { /* if any writes were performed, we need to wait on them */ if (bitmap->storage.file) wait_event(bitmap->write_wait, atomic_read(&bitmap->pending_writes)==0); else md_super_wait(bitmap->mddev); } if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags)) bitmap_file_kick(bitmap); } EXPORT_SYMBOL(bitmap_unplug); static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed); /* * bitmap_init_from_disk -- called at bitmap_create time to initialize * the in-memory bitmap from the on-disk bitmap -- also, sets up the * memory mapping of the bitmap file * Special cases: * if there's no bitmap file, or if the bitmap file had been * previously kicked from the array, we mark all the bits as * 1's in order to cause a full resync. * * We ignore all bits for sectors that end earlier than 'start'. * This is used when reading an out-of-date bitmap... */ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start) { unsigned long i, chunks, index, oldindex, bit; struct page *page = NULL; unsigned long bit_cnt = 0; struct file *file; unsigned long offset; int outofdate; int ret = -ENOSPC; void *paddr; struct bitmap_storage *store = &bitmap->storage; chunks = bitmap->counts.chunks; file = store->file; if (!file && !bitmap->mddev->bitmap_info.offset) { /* No permanent bitmap - fill with '1s'. */ store->filemap = NULL; store->file_pages = 0; for (i = 0; i < chunks ; i++) { /* if the disk bit is set, set the memory bit */ int needed = ((sector_t)(i+1) << (bitmap->counts.chunkshift) >= start); bitmap_set_memory_bits(bitmap, (sector_t)i << bitmap->counts.chunkshift, needed); } return 0; } outofdate = test_bit(BITMAP_STALE, &bitmap->flags); if (outofdate) printk(KERN_INFO "%s: bitmap file is out of date, doing full " "recovery\n", bmname(bitmap)); if (file && i_size_read(file->f_mapping->host) < store->bytes) { printk(KERN_INFO "%s: bitmap file too short %lu < %lu\n", bmname(bitmap), (unsigned long) i_size_read(file->f_mapping->host), store->bytes); goto err; } oldindex = ~0L; offset = 0; if (!bitmap->mddev->bitmap_info.external) offset = sizeof(bitmap_super_t); for (i = 0; i < chunks; i++) { int b; index = file_page_index(&bitmap->storage, i); bit = file_page_offset(&bitmap->storage, i); if (index != oldindex) { /* this is a new page, read it in */ int count; /* unmap the old page, we're done with it */ if (index == store->file_pages-1) count = store->bytes - index * PAGE_SIZE; else count = PAGE_SIZE; page = store->filemap[index]; if (file) ret = read_page(file, index, bitmap, count, page); else ret = read_sb_page( bitmap->mddev, bitmap->mddev->bitmap_info.offset, page, index, count); if (ret) goto err; oldindex = index; if (outofdate) { /* * if bitmap is out of date, dirty the * whole page and write it out */ paddr = kmap_atomic(page); memset(paddr + offset, 0xff, PAGE_SIZE - offset); kunmap_atomic(paddr); write_page(bitmap, page, 1); ret = -EIO; if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags)) goto err; } } paddr = kmap_atomic(page); if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags)) b = test_bit(bit, paddr); else b = test_bit_le(bit, paddr); kunmap_atomic(paddr); if (b) { /* if the disk bit is set, set the memory bit */ int needed = ((sector_t)(i+1) << bitmap->counts.chunkshift >= start); bitmap_set_memory_bits(bitmap, (sector_t)i << bitmap->counts.chunkshift, needed); bit_cnt++; } offset = 0; } printk(KERN_INFO "%s: bitmap initialized from disk: " "read %lu pages, set %lu of %lu bits\n", bmname(bitmap), store->file_pages, bit_cnt, chunks); return 0; err: printk(KERN_INFO "%s: bitmap initialisation failed: %d\n", bmname(bitmap), ret); return ret; } void bitmap_write_all(struct bitmap *bitmap) { /* We don't actually write all bitmap blocks here, * just flag them as needing to be written */ int i; if (!bitmap || !bitmap->storage.filemap) return; if (bitmap->storage.file) /* Only one copy, so nothing needed */ return; for (i = 0; i < bitmap->storage.file_pages; i++) set_page_attr(bitmap, i, BITMAP_PAGE_NEEDWRITE); bitmap->allclean = 0; } static void bitmap_count_page(struct bitmap_counts *bitmap, sector_t offset, int inc) { sector_t chunk = offset >> bitmap->chunkshift; unsigned long page = chunk >> PAGE_COUNTER_SHIFT; bitmap->bp[page].count += inc; bitmap_checkfree(bitmap, page); } static void bitmap_set_pending(struct bitmap_counts *bitmap, sector_t offset) { sector_t chunk = offset >> bitmap->chunkshift; unsigned long page = chunk >> PAGE_COUNTER_SHIFT; struct bitmap_page *bp = &bitmap->bp[page]; if (!bp->pending) bp->pending = 1; } static bitmap_counter_t *bitmap_get_counter(struct bitmap_counts *bitmap, sector_t offset, sector_t *blocks, int create); /* * bitmap daemon -- periodically wakes up to clean bits and flush pages * out to disk */ void bitmap_daemon_work(struct mddev *mddev) { struct bitmap *bitmap; unsigned long j; unsigned long nextpage; sector_t blocks; struct bitmap_counts *counts; /* Use a mutex to guard daemon_work against * bitmap_destroy. */ mutex_lock(&mddev->bitmap_info.mutex); bitmap = mddev->bitmap; if (bitmap == NULL) { mutex_unlock(&mddev->bitmap_info.mutex); return; } if (time_before(jiffies, bitmap->daemon_lastrun + mddev->bitmap_info.daemon_sleep)) goto done; bitmap->daemon_lastrun = jiffies; if (bitmap->allclean) { mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT; goto done; } bitmap->allclean = 1; /* Any file-page which is PENDING now needs to be written. * So set NEEDWRITE now, then after we make any last-minute changes * we will write it. */ for (j = 0; j < bitmap->storage.file_pages; j++) if (test_and_clear_page_attr(bitmap, j, BITMAP_PAGE_PENDING)) set_page_attr(bitmap, j, BITMAP_PAGE_NEEDWRITE); if (bitmap->need_sync && mddev->bitmap_info.external == 0) { /* Arrange for superblock update as well as * other changes */ bitmap_super_t *sb; bitmap->need_sync = 0; if (bitmap->storage.filemap) { sb = kmap_atomic(bitmap->storage.sb_page); sb->events_cleared = cpu_to_le64(bitmap->events_cleared); kunmap_atomic(sb); set_page_attr(bitmap, 0, BITMAP_PAGE_NEEDWRITE); } } /* Now look at the bitmap counters and if any are '2' or '1', * decrement and handle accordingly. */ counts = &bitmap->counts; spin_lock_irq(&counts->lock); nextpage = 0; for (j = 0; j < counts->chunks; j++) { bitmap_counter_t *bmc; sector_t block = (sector_t)j << counts->chunkshift; if (j == nextpage) { nextpage += PAGE_COUNTER_RATIO; if (!counts->bp[j >> PAGE_COUNTER_SHIFT].pending) { j |= PAGE_COUNTER_MASK; continue; } counts->bp[j >> PAGE_COUNTER_SHIFT].pending = 0; } bmc = bitmap_get_counter(counts, block, &blocks, 0); if (!bmc) { j |= PAGE_COUNTER_MASK; continue; } if (*bmc == 1 && !bitmap->need_sync) { /* We can clear the bit */ *bmc = 0; bitmap_count_page(counts, block, -1); bitmap_file_clear_bit(bitmap, block); } else if (*bmc && *bmc <= 2) { *bmc = 1; bitmap_set_pending(counts, block); bitmap->allclean = 0; } } spin_unlock_irq(&counts->lock); /* Now start writeout on any page in NEEDWRITE that isn't DIRTY. * DIRTY pages need to be written by bitmap_unplug so it can wait * for them. * If we find any DIRTY page we stop there and let bitmap_unplug * handle all the rest. This is important in the case where * the first blocking holds the superblock and it has been updated. * We mustn't write any other blocks before the superblock. */ for (j = 0; j < bitmap->storage.file_pages && !test_bit(BITMAP_STALE, &bitmap->flags); j++) { if (test_page_attr(bitmap, j, BITMAP_PAGE_DIRTY)) /* bitmap_unplug will handle the rest */ break; if (test_and_clear_page_attr(bitmap, j, BITMAP_PAGE_NEEDWRITE)) { write_page(bitmap, bitmap->storage.filemap[j], 0); } } done: if (bitmap->allclean == 0) mddev->thread->timeout = mddev->bitmap_info.daemon_sleep; mutex_unlock(&mddev->bitmap_info.mutex); } static bitmap_counter_t *bitmap_get_counter(struct bitmap_counts *bitmap, sector_t offset, sector_t *blocks, int create) __releases(bitmap->lock) __acquires(bitmap->lock) { /* If 'create', we might release the lock and reclaim it. * The lock must have been taken with interrupts enabled. * If !create, we don't release the lock. */ sector_t chunk = offset >> bitmap->chunkshift; unsigned long page = chunk >> PAGE_COUNTER_SHIFT; unsigned long pageoff = (chunk & PAGE_COUNTER_MASK) << COUNTER_BYTE_SHIFT; sector_t csize; int err; err = bitmap_checkpage(bitmap, page, create); if (bitmap->bp[page].hijacked || bitmap->bp[page].map == NULL) csize = ((sector_t)1) << (bitmap->chunkshift + PAGE_COUNTER_SHIFT - 1); else csize = ((sector_t)1) << bitmap->chunkshift; *blocks = csize - (offset & (csize - 1)); if (err < 0) return NULL; /* now locked ... */ if (bitmap->bp[page].hijacked) { /* hijacked pointer */ /* should we use the first or second counter field * of the hijacked pointer? */ int hi = (pageoff > PAGE_COUNTER_MASK); return &((bitmap_counter_t *) &bitmap->bp[page].map)[hi]; } else /* page is allocated */ return (bitmap_counter_t *) &(bitmap->bp[page].map[pageoff]); } int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sectors, int behind) { if (!bitmap) return 0; if (behind) { int bw; atomic_inc(&bitmap->behind_writes); bw = atomic_read(&bitmap->behind_writes); if (bw > bitmap->behind_writes_used) bitmap->behind_writes_used = bw; pr_debug("inc write-behind count %d/%lu\n", bw, bitmap->mddev->bitmap_info.max_write_behind); } while (sectors) { sector_t blocks; bitmap_counter_t *bmc; spin_lock_irq(&bitmap->counts.lock); bmc = bitmap_get_counter(&bitmap->counts, offset, &blocks, 1); if (!bmc) { spin_unlock_irq(&bitmap->counts.lock); return 0; } if (unlikely(COUNTER(*bmc) == COUNTER_MAX)) { DEFINE_WAIT(__wait); /* note that it is safe to do the prepare_to_wait * after the test as long as we do it before dropping * the spinlock. */ prepare_to_wait(&bitmap->overflow_wait, &__wait, TASK_UNINTERRUPTIBLE); spin_unlock_irq(&bitmap->counts.lock); schedule(); finish_wait(&bitmap->overflow_wait, &__wait); continue; } switch (*bmc) { case 0: bitmap_file_set_bit(bitmap, offset); bitmap_count_page(&bitmap->counts, offset, 1); /* fall through */ case 1: *bmc = 2; } (*bmc)++; spin_unlock_irq(&bitmap->counts.lock); offset += blocks; if (sectors > blocks) sectors -= blocks; else sectors = 0; } return 0; } EXPORT_SYMBOL(bitmap_startwrite); void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long sectors, int success, int behind) { if (!bitmap) return; if (behind) { if (atomic_dec_and_test(&bitmap->behind_writes)) wake_up(&bitmap->behind_wait); pr_debug("dec write-behind count %d/%lu\n", atomic_read(&bitmap->behind_writes), bitmap->mddev->bitmap_info.max_write_behind); } while (sectors) { sector_t blocks; unsigned long flags; bitmap_counter_t *bmc; spin_lock_irqsave(&bitmap->counts.lock, flags); bmc = bitmap_get_counter(&bitmap->counts, offset, &blocks, 0); if (!bmc) { spin_unlock_irqrestore(&bitmap->counts.lock, flags); return; } if (success && !bitmap->mddev->degraded && bitmap->events_cleared < bitmap->mddev->events) { bitmap->events_cleared = bitmap->mddev->events; bitmap->need_sync = 1; sysfs_notify_dirent_safe(bitmap->sysfs_can_clear); } if (!success && !NEEDED(*bmc)) *bmc |= NEEDED_MASK; if (COUNTER(*bmc) == COUNTER_MAX) wake_up(&bitmap->overflow_wait); (*bmc)--; if (*bmc <= 2) { bitmap_set_pending(&bitmap->counts, offset); bitmap->allclean = 0; } spin_unlock_irqrestore(&bitmap->counts.lock, flags); offset += blocks; if (sectors > blocks) sectors -= blocks; else sectors = 0; } } EXPORT_SYMBOL(bitmap_endwrite); static int __bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int degraded) { bitmap_counter_t *bmc; int rv; if (bitmap == NULL) {/* FIXME or bitmap set as 'failed' */ *blocks = 1024; return 1; /* always resync if no bitmap */ } spin_lock_irq(&bitmap->counts.lock); bmc = bitmap_get_counter(&bitmap->counts, offset, blocks, 0); rv = 0; if (bmc) { /* locked */ if (RESYNC(*bmc)) rv = 1; else if (NEEDED(*bmc)) { rv = 1; if (!degraded) { /* don't set/clear bits if degraded */ *bmc |= RESYNC_MASK; *bmc &= ~NEEDED_MASK; } } } spin_unlock_irq(&bitmap->counts.lock); return rv; } int bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int degraded) { /* bitmap_start_sync must always report on multiples of whole * pages, otherwise resync (which is very PAGE_SIZE based) will * get confused. * So call __bitmap_start_sync repeatedly (if needed) until * At least PAGE_SIZE>>9 blocks are covered. * Return the 'or' of the result. */ int rv = 0; sector_t blocks1; *blocks = 0; while (*blocks < (PAGE_SIZE>>9)) { rv |= __bitmap_start_sync(bitmap, offset, &blocks1, degraded); offset += blocks1; *blocks += blocks1; } return rv; } EXPORT_SYMBOL(bitmap_start_sync); void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int aborted) { bitmap_counter_t *bmc; unsigned long flags; if (bitmap == NULL) { *blocks = 1024; return; } spin_lock_irqsave(&bitmap->counts.lock, flags); bmc = bitmap_get_counter(&bitmap->counts, offset, blocks, 0); if (bmc == NULL) goto unlock; /* locked */ if (RESYNC(*bmc)) { *bmc &= ~RESYNC_MASK; if (!NEEDED(*bmc) && aborted) *bmc |= NEEDED_MASK; else { if (*bmc <= 2) { bitmap_set_pending(&bitmap->counts, offset); bitmap->allclean = 0; } } } unlock: spin_unlock_irqrestore(&bitmap->counts.lock, flags); } EXPORT_SYMBOL(bitmap_end_sync); void bitmap_close_sync(struct bitmap *bitmap) { /* Sync has finished, and any bitmap chunks that weren't synced * properly have been aborted. It remains to us to clear the * RESYNC bit wherever it is still on */ sector_t sector = 0; sector_t blocks; if (!bitmap) return; while (sector < bitmap->mddev->resync_max_sectors) { bitmap_end_sync(bitmap, sector, &blocks, 0); sector += blocks; } } EXPORT_SYMBOL(bitmap_close_sync); void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector) { sector_t s = 0; sector_t blocks; if (!bitmap) return; if (sector == 0) { bitmap->last_end_sync = jiffies; return; } if (time_before(jiffies, (bitmap->last_end_sync + bitmap->mddev->bitmap_info.daemon_sleep))) return; wait_event(bitmap->mddev->recovery_wait, atomic_read(&bitmap->mddev->recovery_active) == 0); bitmap->mddev->curr_resync_completed = sector; set_bit(MD_CHANGE_CLEAN, &bitmap->mddev->flags); sector &= ~((1ULL << bitmap->counts.chunkshift) - 1); s = 0; while (s < sector && s < bitmap->mddev->resync_max_sectors) { bitmap_end_sync(bitmap, s, &blocks, 0); s += blocks; } bitmap->last_end_sync = jiffies; sysfs_notify(&bitmap->mddev->kobj, NULL, "sync_completed"); } EXPORT_SYMBOL(bitmap_cond_end_sync); static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed) { /* For each chunk covered by any of these sectors, set the * counter to 2 and possibly set resync_needed. They should all * be 0 at this point */ sector_t secs; bitmap_counter_t *bmc; spin_lock_irq(&bitmap->counts.lock); bmc = bitmap_get_counter(&bitmap->counts, offset, &secs, 1); if (!bmc) { spin_unlock_irq(&bitmap->counts.lock); return; } if (!*bmc) { *bmc = 2 | (needed ? NEEDED_MASK : 0); bitmap_count_page(&bitmap->counts, offset, 1); bitmap_set_pending(&bitmap->counts, offset); bitmap->allclean = 0; } spin_unlock_irq(&bitmap->counts.lock); } /* dirty the memory and file bits for bitmap chunks "s" to "e" */ void bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long e) { unsigned long chunk; for (chunk = s; chunk <= e; chunk++) { sector_t sec = (sector_t)chunk << bitmap->counts.chunkshift; bitmap_set_memory_bits(bitmap, sec, 1); bitmap_file_set_bit(bitmap, sec); if (sec < bitmap->mddev->recovery_cp) /* We are asserting that the array is dirty, * so move the recovery_cp address back so * that it is obvious that it is dirty */ bitmap->mddev->recovery_cp = sec; } } /* * flush out any pending updates */ void bitmap_flush(struct mddev *mddev) { struct bitmap *bitmap = mddev->bitmap; long sleep; if (!bitmap) /* there was no bitmap */ return; /* run the daemon_work three time to ensure everything is flushed * that can be */ sleep = mddev->bitmap_info.daemon_sleep * 2; bitmap->daemon_lastrun -= sleep; bitmap_daemon_work(mddev); bitmap->daemon_lastrun -= sleep; bitmap_daemon_work(mddev); bitmap->daemon_lastrun -= sleep; bitmap_daemon_work(mddev); bitmap_update_sb(bitmap); } /* * free memory that was allocated */ static void bitmap_free(struct bitmap *bitmap) { unsigned long k, pages; struct bitmap_page *bp; if (!bitmap) /* there was no bitmap */ return; /* Shouldn't be needed - but just in case.... */ wait_event(bitmap->write_wait, atomic_read(&bitmap->pending_writes) == 0); /* release the bitmap file */ bitmap_file_unmap(&bitmap->storage); bp = bitmap->counts.bp; pages = bitmap->counts.pages; /* free all allocated memory */ if (bp) /* deallocate the page memory */ for (k = 0; k < pages; k++) if (bp[k].map && !bp[k].hijacked) kfree(bp[k].map); kfree(bp); kfree(bitmap); } void bitmap_destroy(struct mddev *mddev) { struct bitmap *bitmap = mddev->bitmap; if (!bitmap) /* there was no bitmap */ return; mutex_lock(&mddev->bitmap_info.mutex); mddev->bitmap = NULL; /* disconnect from the md device */ mutex_unlock(&mddev->bitmap_info.mutex); if (mddev->thread) mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT; if (bitmap->sysfs_can_clear) sysfs_put(bitmap->sysfs_can_clear); bitmap_free(bitmap); } /* * initialize the bitmap structure * if this returns an error, bitmap_destroy must be called to do clean up */ int bitmap_create(struct mddev *mddev) { struct bitmap *bitmap; sector_t blocks = mddev->resync_max_sectors; struct file *file = mddev->bitmap_info.file; int err; struct kernfs_node *bm = NULL; BUILD_BUG_ON(sizeof(bitmap_super_t) != 256); BUG_ON(file && mddev->bitmap_info.offset); bitmap = kzalloc(sizeof(*bitmap), GFP_KERNEL); if (!bitmap) return -ENOMEM; spin_lock_init(&bitmap->counts.lock); atomic_set(&bitmap->pending_writes, 0); init_waitqueue_head(&bitmap->write_wait); init_waitqueue_head(&bitmap->overflow_wait); init_waitqueue_head(&bitmap->behind_wait); bitmap->mddev = mddev; if (mddev->kobj.sd) bm = sysfs_get_dirent(mddev->kobj.sd, "bitmap"); if (bm) { bitmap->sysfs_can_clear = sysfs_get_dirent(bm, "can_clear"); sysfs_put(bm); } else bitmap->sysfs_can_clear = NULL; bitmap->storage.file = file; if (file) { get_file(file); /* As future accesses to this file will use bmap, * and bypass the page cache, we must sync the file * first. */ vfs_fsync(file, 1); } /* read superblock from bitmap file (this sets mddev->bitmap_info.chunksize) */ if (!mddev->bitmap_info.external) { /* * If 'MD_ARRAY_FIRST_USE' is set, then device-mapper is * instructing us to create a new on-disk bitmap instance. */ if (test_and_clear_bit(MD_ARRAY_FIRST_USE, &mddev->flags)) err = bitmap_new_disk_sb(bitmap); else err = bitmap_read_sb(bitmap); } else { err = 0; if (mddev->bitmap_info.chunksize == 0 || mddev->bitmap_info.daemon_sleep == 0) /* chunksize and time_base need to be * set first. */ err = -EINVAL; } if (err) goto error; bitmap->daemon_lastrun = jiffies; err = bitmap_resize(bitmap, blocks, mddev->bitmap_info.chunksize, 1); if (err) goto error; printk(KERN_INFO "created bitmap (%lu pages) for device %s\n", bitmap->counts.pages, bmname(bitmap)); mddev->bitmap = bitmap; return test_bit(BITMAP_WRITE_ERROR, &bitmap->flags) ? -EIO : 0; error: bitmap_free(bitmap); return err; } int bitmap_load(struct mddev *mddev) { int err = 0; sector_t start = 0; sector_t sector = 0; struct bitmap *bitmap = mddev->bitmap; if (!bitmap) goto out; /* Clear out old bitmap info first: Either there is none, or we * are resuming after someone else has possibly changed things, * so we should forget old cached info. * All chunks should be clean, but some might need_sync. */ while (sector < mddev->resync_max_sectors) { sector_t blocks; bitmap_start_sync(bitmap, sector, &blocks, 0); sector += blocks; } bitmap_close_sync(bitmap); if (mddev->degraded == 0 || bitmap->events_cleared == mddev->events) /* no need to keep dirty bits to optimise a * re-add of a missing device */ start = mddev->recovery_cp; mutex_lock(&mddev->bitmap_info.mutex); err = bitmap_init_from_disk(bitmap, start); mutex_unlock(&mddev->bitmap_info.mutex); if (err) goto out; clear_bit(BITMAP_STALE, &bitmap->flags); /* Kick recovery in case any bits were set */ set_bit(MD_RECOVERY_NEEDED, &bitmap->mddev->recovery); mddev->thread->timeout = mddev->bitmap_info.daemon_sleep; md_wakeup_thread(mddev->thread); bitmap_update_sb(bitmap); if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags)) err = -EIO; out: return err; } EXPORT_SYMBOL_GPL(bitmap_load); void bitmap_status(struct seq_file *seq, struct bitmap *bitmap) { unsigned long chunk_kb; struct bitmap_counts *counts; if (!bitmap) return; counts = &bitmap->counts; chunk_kb = bitmap->mddev->bitmap_info.chunksize >> 10; seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], " "%lu%s chunk", counts->pages - counts->missing_pages, counts->pages, (counts->pages - counts->missing_pages) << (PAGE_SHIFT - 10), chunk_kb ? chunk_kb : bitmap->mddev->bitmap_info.chunksize, chunk_kb ? "KB" : "B"); if (bitmap->storage.file) { seq_printf(seq, ", file: "); seq_path(seq, &bitmap->storage.file->f_path, " \t\n"); } seq_printf(seq, "\n"); } int bitmap_resize(struct bitmap *bitmap, sector_t blocks, int chunksize, int init) { /* If chunk_size is 0, choose an appropriate chunk size. * Then possibly allocate new storage space. * Then quiesce, copy bits, replace bitmap, and re-start * * This function is called both to set up the initial bitmap * and to resize the bitmap while the array is active. * If this happens as a result of the array being resized, * chunksize will be zero, and we need to choose a suitable * chunksize, otherwise we use what we are given. */ struct bitmap_storage store; struct bitmap_counts old_counts; unsigned long chunks; sector_t block; sector_t old_blocks, new_blocks; int chunkshift; int ret = 0; long pages; struct bitmap_page *new_bp; if (chunksize == 0) { /* If there is enough space, leave the chunk size unchanged, * else increase by factor of two until there is enough space. */ long bytes; long space = bitmap->mddev->bitmap_info.space; if (space == 0) { /* We don't know how much space there is, so limit * to current size - in sectors. */ bytes = DIV_ROUND_UP(bitmap->counts.chunks, 8); if (!bitmap->mddev->bitmap_info.external) bytes += sizeof(bitmap_super_t); space = DIV_ROUND_UP(bytes, 512); bitmap->mddev->bitmap_info.space = space; } chunkshift = bitmap->counts.chunkshift; chunkshift--; do { /* 'chunkshift' is shift from block size to chunk size */ chunkshift++; chunks = DIV_ROUND_UP_SECTOR_T(blocks, 1 << chunkshift); bytes = DIV_ROUND_UP(chunks, 8); if (!bitmap->mddev->bitmap_info.external) bytes += sizeof(bitmap_super_t); } while (bytes > (space << 9)); } else chunkshift = ffz(~chunksize) - BITMAP_BLOCK_SHIFT; chunks = DIV_ROUND_UP_SECTOR_T(blocks, 1 << chunkshift); memset(&store, 0, sizeof(store)); if (bitmap->mddev->bitmap_info.offset || bitmap->mddev->bitmap_info.file) ret = bitmap_storage_alloc(&store, chunks, !bitmap->mddev->bitmap_info.external); if (ret) goto err; pages = DIV_ROUND_UP(chunks, PAGE_COUNTER_RATIO); new_bp = kzalloc(pages * sizeof(*new_bp), GFP_KERNEL); ret = -ENOMEM; if (!new_bp) { bitmap_file_unmap(&store); goto err; } if (!init) bitmap->mddev->pers->quiesce(bitmap->mddev, 1); store.file = bitmap->storage.file; bitmap->storage.file = NULL; if (store.sb_page && bitmap->storage.sb_page) memcpy(page_address(store.sb_page), page_address(bitmap->storage.sb_page), sizeof(bitmap_super_t)); bitmap_file_unmap(&bitmap->storage); bitmap->storage = store; old_counts = bitmap->counts; bitmap->counts.bp = new_bp; bitmap->counts.pages = pages; bitmap->counts.missing_pages = pages; bitmap->counts.chunkshift = chunkshift; bitmap->counts.chunks = chunks; bitmap->mddev->bitmap_info.chunksize = 1 << (chunkshift + BITMAP_BLOCK_SHIFT); blocks = min(old_counts.chunks << old_counts.chunkshift, chunks << chunkshift); spin_lock_irq(&bitmap->counts.lock); for (block = 0; block < blocks; ) { bitmap_counter_t *bmc_old, *bmc_new; int set; bmc_old = bitmap_get_counter(&old_counts, block, &old_blocks, 0); set = bmc_old && NEEDED(*bmc_old); if (set) { bmc_new = bitmap_get_counter(&bitmap->counts, block, &new_blocks, 1); if (*bmc_new == 0) { /* need to set on-disk bits too. */ sector_t end = block + new_blocks; sector_t start = block >> chunkshift; start <<= chunkshift; while (start < end) { bitmap_file_set_bit(bitmap, block); start += 1 << chunkshift; } *bmc_new = 2; bitmap_count_page(&bitmap->counts, block, 1); bitmap_set_pending(&bitmap->counts, block); } *bmc_new |= NEEDED_MASK; if (new_blocks < old_blocks) old_blocks = new_blocks; } block += old_blocks; } if (!init) { int i; while (block < (chunks << chunkshift)) { bitmap_counter_t *bmc; bmc = bitmap_get_counter(&bitmap->counts, block, &new_blocks, 1); if (bmc) { /* new space. It needs to be resynced, so * we set NEEDED_MASK. */ if (*bmc == 0) { *bmc = NEEDED_MASK | 2; bitmap_count_page(&bitmap->counts, block, 1); bitmap_set_pending(&bitmap->counts, block); } } block += new_blocks; } for (i = 0; i < bitmap->storage.file_pages; i++) set_page_attr(bitmap, i, BITMAP_PAGE_DIRTY); } spin_unlock_irq(&bitmap->counts.lock); if (!init) { bitmap_unplug(bitmap); bitmap->mddev->pers->quiesce(bitmap->mddev, 0); } ret = 0; err: return ret; } EXPORT_SYMBOL_GPL(bitmap_resize); static ssize_t location_show(struct mddev *mddev, char *page) { ssize_t len; if (mddev->bitmap_info.file) len = sprintf(page, "file"); else if (mddev->bitmap_info.offset) len = sprintf(page, "%+lld", (long long)mddev->bitmap_info.offset); else len = sprintf(page, "none"); len += sprintf(page+len, "\n"); return len; } static ssize_t location_store(struct mddev *mddev, const char *buf, size_t len) { if (mddev->pers) { if (!mddev->pers->quiesce) return -EBUSY; if (mddev->recovery || mddev->sync_thread) return -EBUSY; } if (mddev->bitmap || mddev->bitmap_info.file || mddev->bitmap_info.offset) { /* bitmap already configured. Only option is to clear it */ if (strncmp(buf, "none", 4) != 0) return -EBUSY; if (mddev->pers) { mddev->pers->quiesce(mddev, 1); bitmap_destroy(mddev); mddev->pers->quiesce(mddev, 0); } mddev->bitmap_info.offset = 0; if (mddev->bitmap_info.file) { struct file *f = mddev->bitmap_info.file; mddev->bitmap_info.file = NULL; restore_bitmap_write_access(f); fput(f); } } else { /* No bitmap, OK to set a location */ long long offset; if (strncmp(buf, "none", 4) == 0) /* nothing to be done */; else if (strncmp(buf, "file:", 5) == 0) { /* Not supported yet */ return -EINVAL; } else { int rv; if (buf[0] == '+') rv = kstrtoll(buf+1, 10, &offset); else rv = kstrtoll(buf, 10, &offset); if (rv) return rv; if (offset == 0) return -EINVAL; if (mddev->bitmap_info.external == 0 && mddev->major_version == 0 && offset != mddev->bitmap_info.default_offset) return -EINVAL; mddev->bitmap_info.offset = offset; if (mddev->pers) { mddev->pers->quiesce(mddev, 1); rv = bitmap_create(mddev); if (!rv) rv = bitmap_load(mddev); if (rv) { bitmap_destroy(mddev); mddev->bitmap_info.offset = 0; } mddev->pers->quiesce(mddev, 0); if (rv) return rv; } } } if (!mddev->external) { /* Ensure new bitmap info is stored in * metadata promptly. */ set_bit(MD_CHANGE_DEVS, &mddev->flags); md_wakeup_thread(mddev->thread); } return len; } static struct md_sysfs_entry bitmap_location = __ATTR(location, S_IRUGO|S_IWUSR, location_show, location_store); /* 'bitmap/space' is the space available at 'location' for the * bitmap. This allows the kernel to know when it is safe to * resize the bitmap to match a resized array. */ static ssize_t space_show(struct mddev *mddev, char *page) { return sprintf(page, "%lu\n", mddev->bitmap_info.space); } static ssize_t space_store(struct mddev *mddev, const char *buf, size_t len) { unsigned long sectors; int rv; rv = kstrtoul(buf, 10, &sectors); if (rv) return rv; if (sectors == 0) return -EINVAL; if (mddev->bitmap && sectors < (mddev->bitmap->storage.bytes + 511) >> 9) return -EFBIG; /* Bitmap is too big for this small space */ /* could make sure it isn't too big, but that isn't really * needed - user-space should be careful. */ mddev->bitmap_info.space = sectors; return len; } static struct md_sysfs_entry bitmap_space = __ATTR(space, S_IRUGO|S_IWUSR, space_show, space_store); static ssize_t timeout_show(struct mddev *mddev, char *page) { ssize_t len; unsigned long secs = mddev->bitmap_info.daemon_sleep / HZ; unsigned long jifs = mddev->bitmap_info.daemon_sleep % HZ; len = sprintf(page, "%lu", secs); if (jifs) len += sprintf(page+len, ".%03u", jiffies_to_msecs(jifs)); len += sprintf(page+len, "\n"); return len; } static ssize_t timeout_store(struct mddev *mddev, const char *buf, size_t len) { /* timeout can be set at any time */ unsigned long timeout; int rv = strict_strtoul_scaled(buf, &timeout, 4); if (rv) return rv; /* just to make sure we don't overflow... */ if (timeout >= LONG_MAX / HZ) return -EINVAL; timeout = timeout * HZ / 10000; if (timeout >= MAX_SCHEDULE_TIMEOUT) timeout = MAX_SCHEDULE_TIMEOUT-1; if (timeout < 1) timeout = 1; mddev->bitmap_info.daemon_sleep = timeout; if (mddev->thread) { /* if thread->timeout is MAX_SCHEDULE_TIMEOUT, then * the bitmap is all clean and we don't need to * adjust the timeout right now */ if (mddev->thread->timeout < MAX_SCHEDULE_TIMEOUT) { mddev->thread->timeout = timeout; md_wakeup_thread(mddev->thread); } } return len; } static struct md_sysfs_entry bitmap_timeout = __ATTR(time_base, S_IRUGO|S_IWUSR, timeout_show, timeout_store); static ssize_t backlog_show(struct mddev *mddev, char *page) { return sprintf(page, "%lu\n", mddev->bitmap_info.max_write_behind); } static ssize_t backlog_store(struct mddev *mddev, const char *buf, size_t len) { unsigned long backlog; int rv = kstrtoul(buf, 10, &backlog); if (rv) return rv; if (backlog > COUNTER_MAX) return -EINVAL; mddev->bitmap_info.max_write_behind = backlog; return len; } static struct md_sysfs_entry bitmap_backlog = __ATTR(backlog, S_IRUGO|S_IWUSR, backlog_show, backlog_store); static ssize_t chunksize_show(struct mddev *mddev, char *page) { return sprintf(page, "%lu\n", mddev->bitmap_info.chunksize); } static ssize_t chunksize_store(struct mddev *mddev, const char *buf, size_t len) { /* Can only be changed when no bitmap is active */ int rv; unsigned long csize; if (mddev->bitmap) return -EBUSY; rv = kstrtoul(buf, 10, &csize); if (rv) return rv; if (csize < 512 || !is_power_of_2(csize)) return -EINVAL; mddev->bitmap_info.chunksize = csize; return len; } static struct md_sysfs_entry bitmap_chunksize = __ATTR(chunksize, S_IRUGO|S_IWUSR, chunksize_show, chunksize_store); static ssize_t metadata_show(struct mddev *mddev, char *page) { return sprintf(page, "%s\n", (mddev->bitmap_info.external ? "external" : "internal")); } static ssize_t metadata_store(struct mddev *mddev, const char *buf, size_t len) { if (mddev->bitmap || mddev->bitmap_info.file || mddev->bitmap_info.offset) return -EBUSY; if (strncmp(buf, "external", 8) == 0) mddev->bitmap_info.external = 1; else if (strncmp(buf, "internal", 8) == 0) mddev->bitmap_info.external = 0; else return -EINVAL; return len; } static struct md_sysfs_entry bitmap_metadata = __ATTR(metadata, S_IRUGO|S_IWUSR, metadata_show, metadata_store); static ssize_t can_clear_show(struct mddev *mddev, char *page) { int len; if (mddev->bitmap) len = sprintf(page, "%s\n", (mddev->bitmap->need_sync ? "false" : "true")); else len = sprintf(page, "\n"); return len; } static ssize_t can_clear_store(struct mddev *mddev, const char *buf, size_t len) { if (mddev->bitmap == NULL) return -ENOENT; if (strncmp(buf, "false", 5) == 0) mddev->bitmap->need_sync = 1; else if (strncmp(buf, "true", 4) == 0) { if (mddev->degraded) return -EBUSY; mddev->bitmap->need_sync = 0; } else return -EINVAL; return len; } static struct md_sysfs_entry bitmap_can_clear = __ATTR(can_clear, S_IRUGO|S_IWUSR, can_clear_show, can_clear_store); static ssize_t behind_writes_used_show(struct mddev *mddev, char *page) { if (mddev->bitmap == NULL) return sprintf(page, "0\n"); return sprintf(page, "%lu\n", mddev->bitmap->behind_writes_used); } static ssize_t behind_writes_used_reset(struct mddev *mddev, const char *buf, size_t len) { if (mddev->bitmap) mddev->bitmap->behind_writes_used = 0; return len; } static struct md_sysfs_entry max_backlog_used = __ATTR(max_backlog_used, S_IRUGO | S_IWUSR, behind_writes_used_show, behind_writes_used_reset); static struct attribute *md_bitmap_attrs[] = { &bitmap_location.attr, &bitmap_space.attr, &bitmap_timeout.attr, &bitmap_backlog.attr, &bitmap_chunksize.attr, &bitmap_metadata.attr, &bitmap_can_clear.attr, &max_backlog_used.attr, NULL }; struct attribute_group md_bitmap_group = { .name = "bitmap", .attrs = md_bitmap_attrs, };
gpl-2.0
nmacs/lm3s-uclinux
lib/libssl/openssl-1.0.1e/ssl/d1_srvr.c
411
44866
/* ssl/d1_srvr.c */ /* * DTLS implementation written by Nagendra Modadugu * (nagendra@cs.stanford.edu) for the OpenSSL project 2005. */ /* ==================================================================== * Copyright (c) 1999-2007 The OpenSSL Project. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * 3. All advertising materials mentioning features or use of this * software must display the following acknowledgment: * "This product includes software developed by the OpenSSL Project * for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)" * * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to * endorse or promote products derived from this software without * prior written permission. For written permission, please contact * openssl-core@OpenSSL.org. * * 5. Products derived from this software may not be called "OpenSSL" * nor may "OpenSSL" appear in their names without prior written * permission of the OpenSSL Project. * * 6. Redistributions of any form whatsoever must retain the following * acknowledgment: * "This product includes software developed by the OpenSSL Project * for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)" * * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. * ==================================================================== * * This product includes cryptographic software written by Eric Young * (eay@cryptsoft.com). This product includes software written by Tim * Hudson (tjh@cryptsoft.com). * */ /* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) * All rights reserved. * * This package is an SSL implementation written * by Eric Young (eay@cryptsoft.com). * The implementation was written so as to conform with Netscapes SSL. * * This library is free for commercial and non-commercial use as long as * the following conditions are aheared to. The following conditions * apply to all code found in this distribution, be it the RC4, RSA, * lhash, DES, etc., code; not just the SSL code. The SSL documentation * included with this distribution is covered by the same copyright terms * except that the holder is Tim Hudson (tjh@cryptsoft.com). * * Copyright remains Eric Young's, and as such any Copyright notices in * the code are not to be removed. * If this package is used in a product, Eric Young should be given attribution * as the author of the parts of the library used. * This can be in the form of a textual message at program startup or * in documentation (online or textual) provided with the package. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * "This product includes cryptographic software written by * Eric Young (eay@cryptsoft.com)" * The word 'cryptographic' can be left out if the rouines from the library * being used are not cryptographic related :-). * 4. If you include any Windows specific code (or a derivative thereof) from * the apps directory (application code) you must include an acknowledgement: * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)" * * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * The licence and distribution terms for any publically available version or * derivative of this code cannot be changed. i.e. this code cannot simply be * copied and put under another distribution licence * [including the GNU Public Licence.] */ #include <stdio.h> #include "ssl_locl.h" #include <openssl/buffer.h> #include <openssl/rand.h> #include <openssl/objects.h> #include <openssl/evp.h> #include <openssl/x509.h> #include <openssl/md5.h> #include <openssl/bn.h> #ifndef OPENSSL_NO_DH #include <openssl/dh.h> #endif static const SSL_METHOD *dtls1_get_server_method(int ver); static int dtls1_send_hello_verify_request(SSL *s); static const SSL_METHOD *dtls1_get_server_method(int ver) { if (ver == DTLS1_VERSION) return(DTLSv1_server_method()); else return(NULL); } IMPLEMENT_dtls1_meth_func(DTLSv1_server_method, dtls1_accept, ssl_undefined_function, dtls1_get_server_method) int dtls1_accept(SSL *s) { BUF_MEM *buf; unsigned long Time=(unsigned long)time(NULL); void (*cb)(const SSL *ssl,int type,int val)=NULL; unsigned long alg_k; int ret= -1; int new_state,state,skip=0; int listen; #ifndef OPENSSL_NO_SCTP unsigned char sctpauthkey[64]; char labelbuffer[sizeof(DTLS1_SCTP_AUTH_LABEL)]; #endif RAND_add(&Time,sizeof(Time),0); ERR_clear_error(); clear_sys_error(); if (s->info_callback != NULL) cb=s->info_callback; else if (s->ctx->info_callback != NULL) cb=s->ctx->info_callback; listen = s->d1->listen; /* init things to blank */ s->in_handshake++; if (!SSL_in_init(s) || SSL_in_before(s)) SSL_clear(s); s->d1->listen = listen; #ifndef OPENSSL_NO_SCTP /* Notify SCTP BIO socket to enter handshake * mode and prevent stream identifier other * than 0. Will be ignored if no SCTP is used. */ BIO_ctrl(SSL_get_wbio(s), BIO_CTRL_DGRAM_SCTP_SET_IN_HANDSHAKE, s->in_handshake, NULL); #endif if (s->cert == NULL) { SSLerr(SSL_F_DTLS1_ACCEPT,SSL_R_NO_CERTIFICATE_SET); return(-1); } #ifndef OPENSSL_NO_HEARTBEATS /* If we're awaiting a HeartbeatResponse, pretend we * already got and don't await it anymore, because * Heartbeats don't make sense during handshakes anyway. */ if (s->tlsext_hb_pending) { dtls1_stop_timer(s); s->tlsext_hb_pending = 0; s->tlsext_hb_seq++; } #endif for (;;) { state=s->state; switch (s->state) { case SSL_ST_RENEGOTIATE: s->renegotiate=1; /* s->state=SSL_ST_ACCEPT; */ case SSL_ST_BEFORE: case SSL_ST_ACCEPT: case SSL_ST_BEFORE|SSL_ST_ACCEPT: case SSL_ST_OK|SSL_ST_ACCEPT: s->server=1; if (cb != NULL) cb(s,SSL_CB_HANDSHAKE_START,1); if ((s->version & 0xff00) != (DTLS1_VERSION & 0xff00)) { SSLerr(SSL_F_DTLS1_ACCEPT, ERR_R_INTERNAL_ERROR); return -1; } s->type=SSL_ST_ACCEPT; if (s->init_buf == NULL) { if ((buf=BUF_MEM_new()) == NULL) { ret= -1; goto end; } if (!BUF_MEM_grow(buf,SSL3_RT_MAX_PLAIN_LENGTH)) { ret= -1; goto end; } s->init_buf=buf; } if (!ssl3_setup_buffers(s)) { ret= -1; goto end; } s->init_num=0; if (s->state != SSL_ST_RENEGOTIATE) { /* Ok, we now need to push on a buffering BIO so that * the output is sent in a way that TCP likes :-) * ...but not with SCTP :-) */ #ifndef OPENSSL_NO_SCTP if (!BIO_dgram_is_sctp(SSL_get_wbio(s))) #endif if (!ssl_init_wbio_buffer(s,1)) { ret= -1; goto end; } ssl3_init_finished_mac(s); s->state=SSL3_ST_SR_CLNT_HELLO_A; s->ctx->stats.sess_accept++; } else { /* s->state == SSL_ST_RENEGOTIATE, * we will just send a HelloRequest */ s->ctx->stats.sess_accept_renegotiate++; s->state=SSL3_ST_SW_HELLO_REQ_A; } break; case SSL3_ST_SW_HELLO_REQ_A: case SSL3_ST_SW_HELLO_REQ_B: s->shutdown=0; dtls1_start_timer(s); ret=dtls1_send_hello_request(s); if (ret <= 0) goto end; s->s3->tmp.next_state=SSL3_ST_SW_HELLO_REQ_C; s->state=SSL3_ST_SW_FLUSH; s->init_num=0; ssl3_init_finished_mac(s); break; case SSL3_ST_SW_HELLO_REQ_C: s->state=SSL_ST_OK; break; case SSL3_ST_SR_CLNT_HELLO_A: case SSL3_ST_SR_CLNT_HELLO_B: case SSL3_ST_SR_CLNT_HELLO_C: s->shutdown=0; ret=ssl3_get_client_hello(s); if (ret <= 0) goto end; dtls1_stop_timer(s); if (ret == 1 && (SSL_get_options(s) & SSL_OP_COOKIE_EXCHANGE)) s->state = DTLS1_ST_SW_HELLO_VERIFY_REQUEST_A; else s->state = SSL3_ST_SW_SRVR_HELLO_A; s->init_num=0; /* Reflect ClientHello sequence to remain stateless while listening */ if (listen) { memcpy(s->s3->write_sequence, s->s3->read_sequence, sizeof(s->s3->write_sequence)); } /* If we're just listening, stop here */ if (listen && s->state == SSL3_ST_SW_SRVR_HELLO_A) { ret = 2; s->d1->listen = 0; /* Set expected sequence numbers * to continue the handshake. */ s->d1->handshake_read_seq = 2; s->d1->handshake_write_seq = 1; s->d1->next_handshake_write_seq = 1; goto end; } break; case DTLS1_ST_SW_HELLO_VERIFY_REQUEST_A: case DTLS1_ST_SW_HELLO_VERIFY_REQUEST_B: ret = dtls1_send_hello_verify_request(s); if ( ret <= 0) goto end; s->state=SSL3_ST_SW_FLUSH; s->s3->tmp.next_state=SSL3_ST_SR_CLNT_HELLO_A; /* HelloVerifyRequest resets Finished MAC */ if (s->version != DTLS1_BAD_VER) ssl3_init_finished_mac(s); break; #ifndef OPENSSL_NO_SCTP case DTLS1_SCTP_ST_SR_READ_SOCK: if (BIO_dgram_sctp_msg_waiting(SSL_get_rbio(s))) { s->s3->in_read_app_data=2; s->rwstate=SSL_READING; BIO_clear_retry_flags(SSL_get_rbio(s)); BIO_set_retry_read(SSL_get_rbio(s)); ret = -1; goto end; } s->state=SSL3_ST_SR_FINISHED_A; break; case DTLS1_SCTP_ST_SW_WRITE_SOCK: ret = BIO_dgram_sctp_wait_for_dry(SSL_get_wbio(s)); if (ret < 0) goto end; if (ret == 0) { if (s->d1->next_state != SSL_ST_OK) { s->s3->in_read_app_data=2; s->rwstate=SSL_READING; BIO_clear_retry_flags(SSL_get_rbio(s)); BIO_set_retry_read(SSL_get_rbio(s)); ret = -1; goto end; } } s->state=s->d1->next_state; break; #endif case SSL3_ST_SW_SRVR_HELLO_A: case SSL3_ST_SW_SRVR_HELLO_B: s->renegotiate = 2; dtls1_start_timer(s); ret=dtls1_send_server_hello(s); if (ret <= 0) goto end; if (s->hit) { #ifndef OPENSSL_NO_SCTP /* Add new shared key for SCTP-Auth, * will be ignored if no SCTP used. */ snprintf((char*) labelbuffer, sizeof(DTLS1_SCTP_AUTH_LABEL), DTLS1_SCTP_AUTH_LABEL); SSL_export_keying_material(s, sctpauthkey, sizeof(sctpauthkey), labelbuffer, sizeof(labelbuffer), NULL, 0, 0); BIO_ctrl(SSL_get_wbio(s), BIO_CTRL_DGRAM_SCTP_ADD_AUTH_KEY, sizeof(sctpauthkey), sctpauthkey); #endif #ifndef OPENSSL_NO_TLSEXT if (s->tlsext_ticket_expected) s->state=SSL3_ST_SW_SESSION_TICKET_A; else s->state=SSL3_ST_SW_CHANGE_A; #else s->state=SSL3_ST_SW_CHANGE_A; #endif } else s->state=SSL3_ST_SW_CERT_A; s->init_num=0; break; case SSL3_ST_SW_CERT_A: case SSL3_ST_SW_CERT_B: /* Check if it is anon DH or normal PSK */ if (!(s->s3->tmp.new_cipher->algorithm_auth & SSL_aNULL) && !(s->s3->tmp.new_cipher->algorithm_mkey & SSL_kPSK)) { dtls1_start_timer(s); ret=dtls1_send_server_certificate(s); if (ret <= 0) goto end; #ifndef OPENSSL_NO_TLSEXT if (s->tlsext_status_expected) s->state=SSL3_ST_SW_CERT_STATUS_A; else s->state=SSL3_ST_SW_KEY_EXCH_A; } else { skip = 1; s->state=SSL3_ST_SW_KEY_EXCH_A; } #else } else skip=1; s->state=SSL3_ST_SW_KEY_EXCH_A; #endif s->init_num=0; break; case SSL3_ST_SW_KEY_EXCH_A: case SSL3_ST_SW_KEY_EXCH_B: alg_k = s->s3->tmp.new_cipher->algorithm_mkey; /* clear this, it may get reset by * send_server_key_exchange */ if ((s->options & SSL_OP_EPHEMERAL_RSA) #ifndef OPENSSL_NO_KRB5 && !(alg_k & SSL_kKRB5) #endif /* OPENSSL_NO_KRB5 */ ) /* option SSL_OP_EPHEMERAL_RSA sends temporary RSA key * even when forbidden by protocol specs * (handshake may fail as clients are not required to * be able to handle this) */ s->s3->tmp.use_rsa_tmp=1; else s->s3->tmp.use_rsa_tmp=0; /* only send if a DH key exchange or * RSA but we have a sign only certificate */ if (s->s3->tmp.use_rsa_tmp /* PSK: send ServerKeyExchange if PSK identity * hint if provided */ #ifndef OPENSSL_NO_PSK || ((alg_k & SSL_kPSK) && s->ctx->psk_identity_hint) #endif || (alg_k & (SSL_kEDH|SSL_kDHr|SSL_kDHd)) || (alg_k & SSL_kEECDH) || ((alg_k & SSL_kRSA) && (s->cert->pkeys[SSL_PKEY_RSA_ENC].privatekey == NULL || (SSL_C_IS_EXPORT(s->s3->tmp.new_cipher) && EVP_PKEY_size(s->cert->pkeys[SSL_PKEY_RSA_ENC].privatekey)*8 > SSL_C_EXPORT_PKEYLENGTH(s->s3->tmp.new_cipher) ) ) ) ) { dtls1_start_timer(s); ret=dtls1_send_server_key_exchange(s); if (ret <= 0) goto end; } else skip=1; s->state=SSL3_ST_SW_CERT_REQ_A; s->init_num=0; break; case SSL3_ST_SW_CERT_REQ_A: case SSL3_ST_SW_CERT_REQ_B: if (/* don't request cert unless asked for it: */ !(s->verify_mode & SSL_VERIFY_PEER) || /* if SSL_VERIFY_CLIENT_ONCE is set, * don't request cert during re-negotiation: */ ((s->session->peer != NULL) && (s->verify_mode & SSL_VERIFY_CLIENT_ONCE)) || /* never request cert in anonymous ciphersuites * (see section "Certificate request" in SSL 3 drafts * and in RFC 2246): */ ((s->s3->tmp.new_cipher->algorithm_auth & SSL_aNULL) && /* ... except when the application insists on verification * (against the specs, but s3_clnt.c accepts this for SSL 3) */ !(s->verify_mode & SSL_VERIFY_FAIL_IF_NO_PEER_CERT)) || /* never request cert in Kerberos ciphersuites */ (s->s3->tmp.new_cipher->algorithm_auth & SSL_aKRB5) /* With normal PSK Certificates and * Certificate Requests are omitted */ || (s->s3->tmp.new_cipher->algorithm_mkey & SSL_kPSK)) { /* no cert request */ skip=1; s->s3->tmp.cert_request=0; s->state=SSL3_ST_SW_SRVR_DONE_A; #ifndef OPENSSL_NO_SCTP if (BIO_dgram_is_sctp(SSL_get_wbio(s))) { s->d1->next_state = SSL3_ST_SW_SRVR_DONE_A; s->state = DTLS1_SCTP_ST_SW_WRITE_SOCK; } #endif } else { s->s3->tmp.cert_request=1; dtls1_start_timer(s); ret=dtls1_send_certificate_request(s); if (ret <= 0) goto end; #ifndef NETSCAPE_HANG_BUG s->state=SSL3_ST_SW_SRVR_DONE_A; #ifndef OPENSSL_NO_SCTP if (BIO_dgram_is_sctp(SSL_get_wbio(s))) { s->d1->next_state = SSL3_ST_SW_SRVR_DONE_A; s->state = DTLS1_SCTP_ST_SW_WRITE_SOCK; } #endif #else s->state=SSL3_ST_SW_FLUSH; s->s3->tmp.next_state=SSL3_ST_SR_CERT_A; #ifndef OPENSSL_NO_SCTP if (BIO_dgram_is_sctp(SSL_get_wbio(s))) { s->d1->next_state = s->s3->tmp.next_state; s->s3->tmp.next_state=DTLS1_SCTP_ST_SW_WRITE_SOCK; } #endif #endif s->init_num=0; } break; case SSL3_ST_SW_SRVR_DONE_A: case SSL3_ST_SW_SRVR_DONE_B: dtls1_start_timer(s); ret=dtls1_send_server_done(s); if (ret <= 0) goto end; s->s3->tmp.next_state=SSL3_ST_SR_CERT_A; s->state=SSL3_ST_SW_FLUSH; s->init_num=0; break; case SSL3_ST_SW_FLUSH: s->rwstate=SSL_WRITING; if (BIO_flush(s->wbio) <= 0) { /* If the write error was fatal, stop trying */ if (!BIO_should_retry(s->wbio)) { s->rwstate=SSL_NOTHING; s->state=s->s3->tmp.next_state; } ret= -1; goto end; } s->rwstate=SSL_NOTHING; s->state=s->s3->tmp.next_state; break; case SSL3_ST_SR_CERT_A: case SSL3_ST_SR_CERT_B: /* Check for second client hello (MS SGC) */ ret = ssl3_check_client_hello(s); if (ret <= 0) goto end; if (ret == 2) { dtls1_stop_timer(s); s->state = SSL3_ST_SR_CLNT_HELLO_C; } else { /* could be sent for a DH cert, even if we * have not asked for it :-) */ ret=ssl3_get_client_certificate(s); if (ret <= 0) goto end; s->init_num=0; s->state=SSL3_ST_SR_KEY_EXCH_A; } break; case SSL3_ST_SR_KEY_EXCH_A: case SSL3_ST_SR_KEY_EXCH_B: ret=ssl3_get_client_key_exchange(s); if (ret <= 0) goto end; #ifndef OPENSSL_NO_SCTP /* Add new shared key for SCTP-Auth, * will be ignored if no SCTP used. */ snprintf((char *) labelbuffer, sizeof(DTLS1_SCTP_AUTH_LABEL), DTLS1_SCTP_AUTH_LABEL); SSL_export_keying_material(s, sctpauthkey, sizeof(sctpauthkey), labelbuffer, sizeof(labelbuffer), NULL, 0, 0); BIO_ctrl(SSL_get_wbio(s), BIO_CTRL_DGRAM_SCTP_ADD_AUTH_KEY, sizeof(sctpauthkey), sctpauthkey); #endif s->state=SSL3_ST_SR_CERT_VRFY_A; s->init_num=0; if (ret == 2) { /* For the ECDH ciphersuites when * the client sends its ECDH pub key in * a certificate, the CertificateVerify * message is not sent. */ s->state=SSL3_ST_SR_FINISHED_A; s->init_num = 0; } else { s->state=SSL3_ST_SR_CERT_VRFY_A; s->init_num=0; /* We need to get hashes here so if there is * a client cert, it can be verified */ s->method->ssl3_enc->cert_verify_mac(s, NID_md5, &(s->s3->tmp.cert_verify_md[0])); s->method->ssl3_enc->cert_verify_mac(s, NID_sha1, &(s->s3->tmp.cert_verify_md[MD5_DIGEST_LENGTH])); } break; case SSL3_ST_SR_CERT_VRFY_A: case SSL3_ST_SR_CERT_VRFY_B: s->d1->change_cipher_spec_ok = 1; /* we should decide if we expected this one */ ret=ssl3_get_cert_verify(s); if (ret <= 0) goto end; #ifndef OPENSSL_NO_SCTP if (BIO_dgram_is_sctp(SSL_get_wbio(s)) && state == SSL_ST_RENEGOTIATE) s->state=DTLS1_SCTP_ST_SR_READ_SOCK; else #endif s->state=SSL3_ST_SR_FINISHED_A; s->init_num=0; break; case SSL3_ST_SR_FINISHED_A: case SSL3_ST_SR_FINISHED_B: s->d1->change_cipher_spec_ok = 1; ret=ssl3_get_finished(s,SSL3_ST_SR_FINISHED_A, SSL3_ST_SR_FINISHED_B); if (ret <= 0) goto end; dtls1_stop_timer(s); if (s->hit) s->state=SSL_ST_OK; #ifndef OPENSSL_NO_TLSEXT else if (s->tlsext_ticket_expected) s->state=SSL3_ST_SW_SESSION_TICKET_A; #endif else s->state=SSL3_ST_SW_CHANGE_A; s->init_num=0; break; #ifndef OPENSSL_NO_TLSEXT case SSL3_ST_SW_SESSION_TICKET_A: case SSL3_ST_SW_SESSION_TICKET_B: ret=dtls1_send_newsession_ticket(s); if (ret <= 0) goto end; s->state=SSL3_ST_SW_CHANGE_A; s->init_num=0; break; case SSL3_ST_SW_CERT_STATUS_A: case SSL3_ST_SW_CERT_STATUS_B: ret=ssl3_send_cert_status(s); if (ret <= 0) goto end; s->state=SSL3_ST_SW_KEY_EXCH_A; s->init_num=0; break; #endif case SSL3_ST_SW_CHANGE_A: case SSL3_ST_SW_CHANGE_B: s->session->cipher=s->s3->tmp.new_cipher; if (!s->method->ssl3_enc->setup_key_block(s)) { ret= -1; goto end; } ret=dtls1_send_change_cipher_spec(s, SSL3_ST_SW_CHANGE_A,SSL3_ST_SW_CHANGE_B); if (ret <= 0) goto end; #ifndef OPENSSL_NO_SCTP /* Change to new shared key of SCTP-Auth, * will be ignored if no SCTP used. */ BIO_ctrl(SSL_get_wbio(s), BIO_CTRL_DGRAM_SCTP_NEXT_AUTH_KEY, 0, NULL); #endif s->state=SSL3_ST_SW_FINISHED_A; s->init_num=0; if (!s->method->ssl3_enc->change_cipher_state(s, SSL3_CHANGE_CIPHER_SERVER_WRITE)) { ret= -1; goto end; } dtls1_reset_seq_numbers(s, SSL3_CC_WRITE); break; case SSL3_ST_SW_FINISHED_A: case SSL3_ST_SW_FINISHED_B: ret=dtls1_send_finished(s, SSL3_ST_SW_FINISHED_A,SSL3_ST_SW_FINISHED_B, s->method->ssl3_enc->server_finished_label, s->method->ssl3_enc->server_finished_label_len); if (ret <= 0) goto end; s->state=SSL3_ST_SW_FLUSH; if (s->hit) s->s3->tmp.next_state=SSL3_ST_SR_FINISHED_A; else { s->s3->tmp.next_state=SSL_ST_OK; #ifndef OPENSSL_NO_SCTP if (BIO_dgram_is_sctp(SSL_get_wbio(s))) { s->d1->next_state = s->s3->tmp.next_state; s->s3->tmp.next_state=DTLS1_SCTP_ST_SW_WRITE_SOCK; } #endif } s->init_num=0; break; case SSL_ST_OK: /* clean a few things up */ ssl3_cleanup_key_block(s); #if 0 BUF_MEM_free(s->init_buf); s->init_buf=NULL; #endif /* remove buffering on output */ ssl_free_wbio_buffer(s); s->init_num=0; if (s->renegotiate == 2) /* skipped if we just sent a HelloRequest */ { s->renegotiate=0; s->new_session=0; ssl_update_cache(s,SSL_SESS_CACHE_SERVER); s->ctx->stats.sess_accept_good++; /* s->server=1; */ s->handshake_func=dtls1_accept; if (cb != NULL) cb(s,SSL_CB_HANDSHAKE_DONE,1); } ret = 1; /* done handshaking, next message is client hello */ s->d1->handshake_read_seq = 0; /* next message is server hello */ s->d1->handshake_write_seq = 0; s->d1->next_handshake_write_seq = 0; goto end; /* break; */ default: SSLerr(SSL_F_DTLS1_ACCEPT,SSL_R_UNKNOWN_STATE); ret= -1; goto end; /* break; */ } if (!s->s3->tmp.reuse_message && !skip) { if (s->debug) { if ((ret=BIO_flush(s->wbio)) <= 0) goto end; } if ((cb != NULL) && (s->state != state)) { new_state=s->state; s->state=state; cb(s,SSL_CB_ACCEPT_LOOP,1); s->state=new_state; } } skip=0; } end: /* BIO_flush(s->wbio); */ s->in_handshake--; #ifndef OPENSSL_NO_SCTP /* Notify SCTP BIO socket to leave handshake * mode and prevent stream identifier other * than 0. Will be ignored if no SCTP is used. */ BIO_ctrl(SSL_get_wbio(s), BIO_CTRL_DGRAM_SCTP_SET_IN_HANDSHAKE, s->in_handshake, NULL); #endif if (cb != NULL) cb(s,SSL_CB_ACCEPT_EXIT,ret); return(ret); } int dtls1_send_hello_request(SSL *s) { unsigned char *p; if (s->state == SSL3_ST_SW_HELLO_REQ_A) { p=(unsigned char *)s->init_buf->data; p = dtls1_set_message_header(s, p, SSL3_MT_HELLO_REQUEST, 0, 0, 0); s->state=SSL3_ST_SW_HELLO_REQ_B; /* number of bytes to write */ s->init_num=DTLS1_HM_HEADER_LENGTH; s->init_off=0; /* no need to buffer this message, since there are no retransmit * requests for it */ } /* SSL3_ST_SW_HELLO_REQ_B */ return(dtls1_do_write(s,SSL3_RT_HANDSHAKE)); } int dtls1_send_hello_verify_request(SSL *s) { unsigned int msg_len; unsigned char *msg, *buf, *p; if (s->state == DTLS1_ST_SW_HELLO_VERIFY_REQUEST_A) { buf = (unsigned char *)s->init_buf->data; msg = p = &(buf[DTLS1_HM_HEADER_LENGTH]); *(p++) = s->version >> 8; *(p++) = s->version & 0xFF; if (s->ctx->app_gen_cookie_cb == NULL || s->ctx->app_gen_cookie_cb(s, s->d1->cookie, &(s->d1->cookie_len)) == 0) { SSLerr(SSL_F_DTLS1_SEND_HELLO_VERIFY_REQUEST,ERR_R_INTERNAL_ERROR); return 0; } *(p++) = (unsigned char) s->d1->cookie_len; memcpy(p, s->d1->cookie, s->d1->cookie_len); p += s->d1->cookie_len; msg_len = p - msg; dtls1_set_message_header(s, buf, DTLS1_MT_HELLO_VERIFY_REQUEST, msg_len, 0, msg_len); s->state=DTLS1_ST_SW_HELLO_VERIFY_REQUEST_B; /* number of bytes to write */ s->init_num=p-buf; s->init_off=0; } /* s->state = DTLS1_ST_SW_HELLO_VERIFY_REQUEST_B */ return(dtls1_do_write(s,SSL3_RT_HANDSHAKE)); } int dtls1_send_server_hello(SSL *s) { unsigned char *buf; unsigned char *p,*d; int i; unsigned int sl; unsigned long l,Time; if (s->state == SSL3_ST_SW_SRVR_HELLO_A) { buf=(unsigned char *)s->init_buf->data; p=s->s3->server_random; Time=(unsigned long)time(NULL); /* Time */ l2n(Time,p); RAND_pseudo_bytes(p,SSL3_RANDOM_SIZE-4); /* Do the message type and length last */ d=p= &(buf[DTLS1_HM_HEADER_LENGTH]); *(p++)=s->version>>8; *(p++)=s->version&0xff; /* Random stuff */ memcpy(p,s->s3->server_random,SSL3_RANDOM_SIZE); p+=SSL3_RANDOM_SIZE; /* now in theory we have 3 options to sending back the * session id. If it is a re-use, we send back the * old session-id, if it is a new session, we send * back the new session-id or we send back a 0 length * session-id if we want it to be single use. * Currently I will not implement the '0' length session-id * 12-Jan-98 - I'll now support the '0' length stuff. */ if (!(s->ctx->session_cache_mode & SSL_SESS_CACHE_SERVER)) s->session->session_id_length=0; sl=s->session->session_id_length; if (sl > sizeof s->session->session_id) { SSLerr(SSL_F_DTLS1_SEND_SERVER_HELLO, ERR_R_INTERNAL_ERROR); return -1; } *(p++)=sl; memcpy(p,s->session->session_id,sl); p+=sl; /* put the cipher */ if (s->s3->tmp.new_cipher == NULL) return -1; i=ssl3_put_cipher_by_char(s->s3->tmp.new_cipher,p); p+=i; /* put the compression method */ #ifdef OPENSSL_NO_COMP *(p++)=0; #else if (s->s3->tmp.new_compression == NULL) *(p++)=0; else *(p++)=s->s3->tmp.new_compression->id; #endif #ifndef OPENSSL_NO_TLSEXT if ((p = ssl_add_serverhello_tlsext(s, p, buf+SSL3_RT_MAX_PLAIN_LENGTH)) == NULL) { SSLerr(SSL_F_DTLS1_SEND_SERVER_HELLO,ERR_R_INTERNAL_ERROR); return -1; } #endif /* do the header */ l=(p-d); d=buf; d = dtls1_set_message_header(s, d, SSL3_MT_SERVER_HELLO, l, 0, l); s->state=SSL3_ST_SW_SRVR_HELLO_B; /* number of bytes to write */ s->init_num=p-buf; s->init_off=0; /* buffer the message to handle re-xmits */ dtls1_buffer_message(s, 0); } /* SSL3_ST_SW_SRVR_HELLO_B */ return(dtls1_do_write(s,SSL3_RT_HANDSHAKE)); } int dtls1_send_server_done(SSL *s) { unsigned char *p; if (s->state == SSL3_ST_SW_SRVR_DONE_A) { p=(unsigned char *)s->init_buf->data; /* do the header */ p = dtls1_set_message_header(s, p, SSL3_MT_SERVER_DONE, 0, 0, 0); s->state=SSL3_ST_SW_SRVR_DONE_B; /* number of bytes to write */ s->init_num=DTLS1_HM_HEADER_LENGTH; s->init_off=0; /* buffer the message to handle re-xmits */ dtls1_buffer_message(s, 0); } /* SSL3_ST_SW_SRVR_DONE_B */ return(dtls1_do_write(s,SSL3_RT_HANDSHAKE)); } int dtls1_send_server_key_exchange(SSL *s) { #ifndef OPENSSL_NO_RSA unsigned char *q; int j,num; RSA *rsa; unsigned char md_buf[MD5_DIGEST_LENGTH+SHA_DIGEST_LENGTH]; unsigned int u; #endif #ifndef OPENSSL_NO_DH DH *dh=NULL,*dhp; #endif #ifndef OPENSSL_NO_ECDH EC_KEY *ecdh=NULL, *ecdhp; unsigned char *encodedPoint = NULL; int encodedlen = 0; int curve_id = 0; BN_CTX *bn_ctx = NULL; #endif EVP_PKEY *pkey; unsigned char *p,*d; int al,i; unsigned long type; int n; CERT *cert; BIGNUM *r[4]; int nr[4],kn; BUF_MEM *buf; EVP_MD_CTX md_ctx; EVP_MD_CTX_init(&md_ctx); if (s->state == SSL3_ST_SW_KEY_EXCH_A) { type=s->s3->tmp.new_cipher->algorithm_mkey; cert=s->cert; buf=s->init_buf; r[0]=r[1]=r[2]=r[3]=NULL; n=0; #ifndef OPENSSL_NO_RSA if (type & SSL_kRSA) { rsa=cert->rsa_tmp; if ((rsa == NULL) && (s->cert->rsa_tmp_cb != NULL)) { rsa=s->cert->rsa_tmp_cb(s, SSL_C_IS_EXPORT(s->s3->tmp.new_cipher), SSL_C_EXPORT_PKEYLENGTH(s->s3->tmp.new_cipher)); if(rsa == NULL) { al=SSL_AD_HANDSHAKE_FAILURE; SSLerr(SSL_F_DTLS1_SEND_SERVER_KEY_EXCHANGE,SSL_R_ERROR_GENERATING_TMP_RSA_KEY); goto f_err; } RSA_up_ref(rsa); cert->rsa_tmp=rsa; } if (rsa == NULL) { al=SSL_AD_HANDSHAKE_FAILURE; SSLerr(SSL_F_DTLS1_SEND_SERVER_KEY_EXCHANGE,SSL_R_MISSING_TMP_RSA_KEY); goto f_err; } r[0]=rsa->n; r[1]=rsa->e; s->s3->tmp.use_rsa_tmp=1; } else #endif #ifndef OPENSSL_NO_DH if (type & SSL_kEDH) { dhp=cert->dh_tmp; if ((dhp == NULL) && (s->cert->dh_tmp_cb != NULL)) dhp=s->cert->dh_tmp_cb(s, SSL_C_IS_EXPORT(s->s3->tmp.new_cipher), SSL_C_EXPORT_PKEYLENGTH(s->s3->tmp.new_cipher)); if (dhp == NULL) { al=SSL_AD_HANDSHAKE_FAILURE; SSLerr(SSL_F_DTLS1_SEND_SERVER_KEY_EXCHANGE,SSL_R_MISSING_TMP_DH_KEY); goto f_err; } if (s->s3->tmp.dh != NULL) { DH_free(dh); SSLerr(SSL_F_DTLS1_SEND_SERVER_KEY_EXCHANGE, ERR_R_INTERNAL_ERROR); goto err; } if ((dh=DHparams_dup(dhp)) == NULL) { SSLerr(SSL_F_DTLS1_SEND_SERVER_KEY_EXCHANGE,ERR_R_DH_LIB); goto err; } s->s3->tmp.dh=dh; if ((dhp->pub_key == NULL || dhp->priv_key == NULL || (s->options & SSL_OP_SINGLE_DH_USE))) { if(!DH_generate_key(dh)) { SSLerr(SSL_F_DTLS1_SEND_SERVER_KEY_EXCHANGE, ERR_R_DH_LIB); goto err; } } else { dh->pub_key=BN_dup(dhp->pub_key); dh->priv_key=BN_dup(dhp->priv_key); if ((dh->pub_key == NULL) || (dh->priv_key == NULL)) { SSLerr(SSL_F_DTLS1_SEND_SERVER_KEY_EXCHANGE,ERR_R_DH_LIB); goto err; } } r[0]=dh->p; r[1]=dh->g; r[2]=dh->pub_key; } else #endif #ifndef OPENSSL_NO_ECDH if (type & SSL_kEECDH) { const EC_GROUP *group; ecdhp=cert->ecdh_tmp; if ((ecdhp == NULL) && (s->cert->ecdh_tmp_cb != NULL)) { ecdhp=s->cert->ecdh_tmp_cb(s, SSL_C_IS_EXPORT(s->s3->tmp.new_cipher), SSL_C_EXPORT_PKEYLENGTH(s->s3->tmp.new_cipher)); } if (ecdhp == NULL) { al=SSL_AD_HANDSHAKE_FAILURE; SSLerr(SSL_F_DTLS1_SEND_SERVER_KEY_EXCHANGE,SSL_R_MISSING_TMP_ECDH_KEY); goto f_err; } if (s->s3->tmp.ecdh != NULL) { EC_KEY_free(s->s3->tmp.ecdh); SSLerr(SSL_F_DTLS1_SEND_SERVER_KEY_EXCHANGE, ERR_R_INTERNAL_ERROR); goto err; } /* Duplicate the ECDH structure. */ if (ecdhp == NULL) { SSLerr(SSL_F_DTLS1_SEND_SERVER_KEY_EXCHANGE,ERR_R_ECDH_LIB); goto err; } if ((ecdh = EC_KEY_dup(ecdhp)) == NULL) { SSLerr(SSL_F_DTLS1_SEND_SERVER_KEY_EXCHANGE,ERR_R_ECDH_LIB); goto err; } s->s3->tmp.ecdh=ecdh; if ((EC_KEY_get0_public_key(ecdh) == NULL) || (EC_KEY_get0_private_key(ecdh) == NULL) || (s->options & SSL_OP_SINGLE_ECDH_USE)) { if(!EC_KEY_generate_key(ecdh)) { SSLerr(SSL_F_DTLS1_SEND_SERVER_KEY_EXCHANGE,ERR_R_ECDH_LIB); goto err; } } if (((group = EC_KEY_get0_group(ecdh)) == NULL) || (EC_KEY_get0_public_key(ecdh) == NULL) || (EC_KEY_get0_private_key(ecdh) == NULL)) { SSLerr(SSL_F_DTLS1_SEND_SERVER_KEY_EXCHANGE,ERR_R_ECDH_LIB); goto err; } if (SSL_C_IS_EXPORT(s->s3->tmp.new_cipher) && (EC_GROUP_get_degree(group) > 163)) { SSLerr(SSL_F_DTLS1_SEND_SERVER_KEY_EXCHANGE,SSL_R_ECGROUP_TOO_LARGE_FOR_CIPHER); goto err; } /* XXX: For now, we only support ephemeral ECDH * keys over named (not generic) curves. For * supported named curves, curve_id is non-zero. */ if ((curve_id = tls1_ec_nid2curve_id(EC_GROUP_get_curve_name(group))) == 0) { SSLerr(SSL_F_DTLS1_SEND_SERVER_KEY_EXCHANGE,SSL_R_UNSUPPORTED_ELLIPTIC_CURVE); goto err; } /* Encode the public key. * First check the size of encoding and * allocate memory accordingly. */ encodedlen = EC_POINT_point2oct(group, EC_KEY_get0_public_key(ecdh), POINT_CONVERSION_UNCOMPRESSED, NULL, 0, NULL); encodedPoint = (unsigned char *) OPENSSL_malloc(encodedlen*sizeof(unsigned char)); bn_ctx = BN_CTX_new(); if ((encodedPoint == NULL) || (bn_ctx == NULL)) { SSLerr(SSL_F_DTLS1_SEND_SERVER_KEY_EXCHANGE,ERR_R_MALLOC_FAILURE); goto err; } encodedlen = EC_POINT_point2oct(group, EC_KEY_get0_public_key(ecdh), POINT_CONVERSION_UNCOMPRESSED, encodedPoint, encodedlen, bn_ctx); if (encodedlen == 0) { SSLerr(SSL_F_DTLS1_SEND_SERVER_KEY_EXCHANGE,ERR_R_ECDH_LIB); goto err; } BN_CTX_free(bn_ctx); bn_ctx=NULL; /* XXX: For now, we only support named (not * generic) curves in ECDH ephemeral key exchanges. * In this situation, we need four additional bytes * to encode the entire ServerECDHParams * structure. */ n = 4 + encodedlen; /* We'll generate the serverKeyExchange message * explicitly so we can set these to NULLs */ r[0]=NULL; r[1]=NULL; r[2]=NULL; r[3]=NULL; } else #endif /* !OPENSSL_NO_ECDH */ #ifndef OPENSSL_NO_PSK if (type & SSL_kPSK) { /* reserve size for record length and PSK identity hint*/ n+=2+strlen(s->ctx->psk_identity_hint); } else #endif /* !OPENSSL_NO_PSK */ { al=SSL_AD_HANDSHAKE_FAILURE; SSLerr(SSL_F_DTLS1_SEND_SERVER_KEY_EXCHANGE,SSL_R_UNKNOWN_KEY_EXCHANGE_TYPE); goto f_err; } for (i=0; r[i] != NULL; i++) { nr[i]=BN_num_bytes(r[i]); n+=2+nr[i]; } if (!(s->s3->tmp.new_cipher->algorithm_auth & SSL_aNULL) && !(s->s3->tmp.new_cipher->algorithm_mkey & SSL_kPSK)) { if ((pkey=ssl_get_sign_pkey(s,s->s3->tmp.new_cipher, NULL)) == NULL) { al=SSL_AD_DECODE_ERROR; goto f_err; } kn=EVP_PKEY_size(pkey); } else { pkey=NULL; kn=0; } if (!BUF_MEM_grow_clean(buf,n+DTLS1_HM_HEADER_LENGTH+kn)) { SSLerr(SSL_F_DTLS1_SEND_SERVER_KEY_EXCHANGE,ERR_LIB_BUF); goto err; } d=(unsigned char *)s->init_buf->data; p= &(d[DTLS1_HM_HEADER_LENGTH]); for (i=0; r[i] != NULL; i++) { s2n(nr[i],p); BN_bn2bin(r[i],p); p+=nr[i]; } #ifndef OPENSSL_NO_ECDH if (type & SSL_kEECDH) { /* XXX: For now, we only support named (not generic) curves. * In this situation, the serverKeyExchange message has: * [1 byte CurveType], [2 byte CurveName] * [1 byte length of encoded point], followed by * the actual encoded point itself */ *p = NAMED_CURVE_TYPE; p += 1; *p = 0; p += 1; *p = curve_id; p += 1; *p = encodedlen; p += 1; memcpy((unsigned char*)p, (unsigned char *)encodedPoint, encodedlen); OPENSSL_free(encodedPoint); p += encodedlen; } #endif #ifndef OPENSSL_NO_PSK if (type & SSL_kPSK) { /* copy PSK identity hint */ s2n(strlen(s->ctx->psk_identity_hint), p); strncpy((char *)p, s->ctx->psk_identity_hint, strlen(s->ctx->psk_identity_hint)); p+=strlen(s->ctx->psk_identity_hint); } #endif /* not anonymous */ if (pkey != NULL) { /* n is the length of the params, they start at * &(d[DTLS1_HM_HEADER_LENGTH]) and p points to the space * at the end. */ #ifndef OPENSSL_NO_RSA if (pkey->type == EVP_PKEY_RSA) { q=md_buf; j=0; for (num=2; num > 0; num--) { EVP_DigestInit_ex(&md_ctx,(num == 2) ?s->ctx->md5:s->ctx->sha1, NULL); EVP_DigestUpdate(&md_ctx,&(s->s3->client_random[0]),SSL3_RANDOM_SIZE); EVP_DigestUpdate(&md_ctx,&(s->s3->server_random[0]),SSL3_RANDOM_SIZE); EVP_DigestUpdate(&md_ctx,&(d[DTLS1_HM_HEADER_LENGTH]),n); EVP_DigestFinal_ex(&md_ctx,q, (unsigned int *)&i); q+=i; j+=i; } if (RSA_sign(NID_md5_sha1, md_buf, j, &(p[2]), &u, pkey->pkey.rsa) <= 0) { SSLerr(SSL_F_DTLS1_SEND_SERVER_KEY_EXCHANGE,ERR_LIB_RSA); goto err; } s2n(u,p); n+=u+2; } else #endif #if !defined(OPENSSL_NO_DSA) if (pkey->type == EVP_PKEY_DSA) { /* lets do DSS */ EVP_SignInit_ex(&md_ctx,EVP_dss1(), NULL); EVP_SignUpdate(&md_ctx,&(s->s3->client_random[0]),SSL3_RANDOM_SIZE); EVP_SignUpdate(&md_ctx,&(s->s3->server_random[0]),SSL3_RANDOM_SIZE); EVP_SignUpdate(&md_ctx,&(d[DTLS1_HM_HEADER_LENGTH]),n); if (!EVP_SignFinal(&md_ctx,&(p[2]), (unsigned int *)&i,pkey)) { SSLerr(SSL_F_DTLS1_SEND_SERVER_KEY_EXCHANGE,ERR_LIB_DSA); goto err; } s2n(i,p); n+=i+2; } else #endif #if !defined(OPENSSL_NO_ECDSA) if (pkey->type == EVP_PKEY_EC) { /* let's do ECDSA */ EVP_SignInit_ex(&md_ctx,EVP_ecdsa(), NULL); EVP_SignUpdate(&md_ctx,&(s->s3->client_random[0]),SSL3_RANDOM_SIZE); EVP_SignUpdate(&md_ctx,&(s->s3->server_random[0]),SSL3_RANDOM_SIZE); EVP_SignUpdate(&md_ctx,&(d[DTLS1_HM_HEADER_LENGTH]),n); if (!EVP_SignFinal(&md_ctx,&(p[2]), (unsigned int *)&i,pkey)) { SSLerr(SSL_F_DTLS1_SEND_SERVER_KEY_EXCHANGE,ERR_LIB_ECDSA); goto err; } s2n(i,p); n+=i+2; } else #endif { /* Is this error check actually needed? */ al=SSL_AD_HANDSHAKE_FAILURE; SSLerr(SSL_F_DTLS1_SEND_SERVER_KEY_EXCHANGE,SSL_R_UNKNOWN_PKEY_TYPE); goto f_err; } } d = dtls1_set_message_header(s, d, SSL3_MT_SERVER_KEY_EXCHANGE, n, 0, n); /* we should now have things packed up, so lets send * it off */ s->init_num=n+DTLS1_HM_HEADER_LENGTH; s->init_off=0; /* buffer the message to handle re-xmits */ dtls1_buffer_message(s, 0); } s->state = SSL3_ST_SW_KEY_EXCH_B; EVP_MD_CTX_cleanup(&md_ctx); return(dtls1_do_write(s,SSL3_RT_HANDSHAKE)); f_err: ssl3_send_alert(s,SSL3_AL_FATAL,al); err: #ifndef OPENSSL_NO_ECDH if (encodedPoint != NULL) OPENSSL_free(encodedPoint); BN_CTX_free(bn_ctx); #endif EVP_MD_CTX_cleanup(&md_ctx); return(-1); } int dtls1_send_certificate_request(SSL *s) { unsigned char *p,*d; int i,j,nl,off,n; STACK_OF(X509_NAME) *sk=NULL; X509_NAME *name; BUF_MEM *buf; unsigned int msg_len; if (s->state == SSL3_ST_SW_CERT_REQ_A) { buf=s->init_buf; d=p=(unsigned char *)&(buf->data[DTLS1_HM_HEADER_LENGTH]); /* get the list of acceptable cert types */ p++; n=ssl3_get_req_cert_type(s,p); d[0]=n; p+=n; n++; off=n; p+=2; n+=2; sk=SSL_get_client_CA_list(s); nl=0; if (sk != NULL) { for (i=0; i<sk_X509_NAME_num(sk); i++) { name=sk_X509_NAME_value(sk,i); j=i2d_X509_NAME(name,NULL); if (!BUF_MEM_grow_clean(buf,DTLS1_HM_HEADER_LENGTH+n+j+2)) { SSLerr(SSL_F_DTLS1_SEND_CERTIFICATE_REQUEST,ERR_R_BUF_LIB); goto err; } p=(unsigned char *)&(buf->data[DTLS1_HM_HEADER_LENGTH+n]); if (!(s->options & SSL_OP_NETSCAPE_CA_DN_BUG)) { s2n(j,p); i2d_X509_NAME(name,&p); n+=2+j; nl+=2+j; } else { d=p; i2d_X509_NAME(name,&p); j-=2; s2n(j,d); j+=2; n+=j; nl+=j; } } } /* else no CA names */ p=(unsigned char *)&(buf->data[DTLS1_HM_HEADER_LENGTH+off]); s2n(nl,p); d=(unsigned char *)buf->data; *(d++)=SSL3_MT_CERTIFICATE_REQUEST; l2n3(n,d); s2n(s->d1->handshake_write_seq,d); s->d1->handshake_write_seq++; /* we should now have things packed up, so lets send * it off */ s->init_num=n+DTLS1_HM_HEADER_LENGTH; s->init_off=0; #ifdef NETSCAPE_HANG_BUG /* XXX: what to do about this? */ p=(unsigned char *)s->init_buf->data + s->init_num; /* do the header */ *(p++)=SSL3_MT_SERVER_DONE; *(p++)=0; *(p++)=0; *(p++)=0; s->init_num += 4; #endif /* XDTLS: set message header ? */ msg_len = s->init_num - DTLS1_HM_HEADER_LENGTH; dtls1_set_message_header(s, (void *)s->init_buf->data, SSL3_MT_CERTIFICATE_REQUEST, msg_len, 0, msg_len); /* buffer the message to handle re-xmits */ dtls1_buffer_message(s, 0); s->state = SSL3_ST_SW_CERT_REQ_B; } /* SSL3_ST_SW_CERT_REQ_B */ return(dtls1_do_write(s,SSL3_RT_HANDSHAKE)); err: return(-1); } int dtls1_send_server_certificate(SSL *s) { unsigned long l; X509 *x; if (s->state == SSL3_ST_SW_CERT_A) { x=ssl_get_server_send_cert(s); if (x == NULL) { /* VRS: allow null cert if auth == KRB5 */ if ((s->s3->tmp.new_cipher->algorithm_mkey != SSL_kKRB5) || (s->s3->tmp.new_cipher->algorithm_auth != SSL_aKRB5)) { SSLerr(SSL_F_DTLS1_SEND_SERVER_CERTIFICATE,ERR_R_INTERNAL_ERROR); return(0); } } l=dtls1_output_cert_chain(s,x); s->state=SSL3_ST_SW_CERT_B; s->init_num=(int)l; s->init_off=0; /* buffer the message to handle re-xmits */ dtls1_buffer_message(s, 0); } /* SSL3_ST_SW_CERT_B */ return(dtls1_do_write(s,SSL3_RT_HANDSHAKE)); } #ifndef OPENSSL_NO_TLSEXT int dtls1_send_newsession_ticket(SSL *s) { if (s->state == SSL3_ST_SW_SESSION_TICKET_A) { unsigned char *p, *senc, *macstart; int len, slen; unsigned int hlen, msg_len; EVP_CIPHER_CTX ctx; HMAC_CTX hctx; SSL_CTX *tctx = s->initial_ctx; unsigned char iv[EVP_MAX_IV_LENGTH]; unsigned char key_name[16]; /* get session encoding length */ slen = i2d_SSL_SESSION(s->session, NULL); /* Some length values are 16 bits, so forget it if session is * too long */ if (slen > 0xFF00) return -1; /* Grow buffer if need be: the length calculation is as * follows 12 (DTLS handshake message header) + * 4 (ticket lifetime hint) + 2 (ticket length) + * 16 (key name) + max_iv_len (iv length) + * session_length + max_enc_block_size (max encrypted session * length) + max_md_size (HMAC). */ if (!BUF_MEM_grow(s->init_buf, DTLS1_HM_HEADER_LENGTH + 22 + EVP_MAX_IV_LENGTH + EVP_MAX_BLOCK_LENGTH + EVP_MAX_MD_SIZE + slen)) return -1; senc = OPENSSL_malloc(slen); if (!senc) return -1; p = senc; i2d_SSL_SESSION(s->session, &p); p=(unsigned char *)&(s->init_buf->data[DTLS1_HM_HEADER_LENGTH]); EVP_CIPHER_CTX_init(&ctx); HMAC_CTX_init(&hctx); /* Initialize HMAC and cipher contexts. If callback present * it does all the work otherwise use generated values * from parent ctx. */ if (tctx->tlsext_ticket_key_cb) { if (tctx->tlsext_ticket_key_cb(s, key_name, iv, &ctx, &hctx, 1) < 0) { OPENSSL_free(senc); return -1; } } else { RAND_pseudo_bytes(iv, 16); EVP_EncryptInit_ex(&ctx, EVP_aes_128_cbc(), NULL, tctx->tlsext_tick_aes_key, iv); HMAC_Init_ex(&hctx, tctx->tlsext_tick_hmac_key, 16, tlsext_tick_md(), NULL); memcpy(key_name, tctx->tlsext_tick_key_name, 16); } l2n(s->session->tlsext_tick_lifetime_hint, p); /* Skip ticket length for now */ p += 2; /* Output key name */ macstart = p; memcpy(p, key_name, 16); p += 16; /* output IV */ memcpy(p, iv, EVP_CIPHER_CTX_iv_length(&ctx)); p += EVP_CIPHER_CTX_iv_length(&ctx); /* Encrypt session data */ EVP_EncryptUpdate(&ctx, p, &len, senc, slen); p += len; EVP_EncryptFinal(&ctx, p, &len); p += len; EVP_CIPHER_CTX_cleanup(&ctx); HMAC_Update(&hctx, macstart, p - macstart); HMAC_Final(&hctx, p, &hlen); HMAC_CTX_cleanup(&hctx); p += hlen; /* Now write out lengths: p points to end of data written */ /* Total length */ len = p - (unsigned char *)(s->init_buf->data); /* Ticket length */ p=(unsigned char *)&(s->init_buf->data[DTLS1_HM_HEADER_LENGTH]) + 4; s2n(len - DTLS1_HM_HEADER_LENGTH - 6, p); /* number of bytes to write */ s->init_num= len; s->state=SSL3_ST_SW_SESSION_TICKET_B; s->init_off=0; OPENSSL_free(senc); /* XDTLS: set message header ? */ msg_len = s->init_num - DTLS1_HM_HEADER_LENGTH; dtls1_set_message_header(s, (void *)s->init_buf->data, SSL3_MT_NEWSESSION_TICKET, msg_len, 0, msg_len); /* buffer the message to handle re-xmits */ dtls1_buffer_message(s, 0); } /* SSL3_ST_SW_SESSION_TICKET_B */ return(dtls1_do_write(s,SSL3_RT_HANDSHAKE)); } #endif
gpl-2.0
dhinesh77/android_kernel_samsung_corsica
drivers/net/sfc/siena.c
1691
18497
/**************************************************************************** * Driver for Solarflare Solarstorm network controllers and boards * Copyright 2005-2006 Fen Systems Ltd. * Copyright 2006-2010 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation, incorporated herein by reference. */ #include <linux/bitops.h> #include <linux/delay.h> #include <linux/pci.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/random.h> #include "net_driver.h" #include "bitfield.h" #include "efx.h" #include "nic.h" #include "mac.h" #include "spi.h" #include "regs.h" #include "io.h" #include "phy.h" #include "workarounds.h" #include "mcdi.h" #include "mcdi_pcol.h" /* Hardware control for SFC9000 family including SFL9021 (aka Siena). */ static void siena_init_wol(struct efx_nic *efx); static void siena_push_irq_moderation(struct efx_channel *channel) { efx_dword_t timer_cmd; if (channel->irq_moderation) EFX_POPULATE_DWORD_2(timer_cmd, FRF_CZ_TC_TIMER_MODE, FFE_CZ_TIMER_MODE_INT_HLDOFF, FRF_CZ_TC_TIMER_VAL, channel->irq_moderation - 1); else EFX_POPULATE_DWORD_2(timer_cmd, FRF_CZ_TC_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS, FRF_CZ_TC_TIMER_VAL, 0); efx_writed_page_locked(channel->efx, &timer_cmd, FR_BZ_TIMER_COMMAND_P0, channel->channel); } static void siena_push_multicast_hash(struct efx_nic *efx) { WARN_ON(!mutex_is_locked(&efx->mac_lock)); efx_mcdi_rpc(efx, MC_CMD_SET_MCAST_HASH, efx->multicast_hash.byte, sizeof(efx->multicast_hash), NULL, 0, NULL); } static int siena_mdio_write(struct net_device *net_dev, int prtad, int devad, u16 addr, u16 value) { struct efx_nic *efx = netdev_priv(net_dev); uint32_t status; int rc; rc = efx_mcdi_mdio_write(efx, efx->mdio_bus, prtad, devad, addr, value, &status); if (rc) return rc; if (status != MC_CMD_MDIO_STATUS_GOOD) return -EIO; return 0; } static int siena_mdio_read(struct net_device *net_dev, int prtad, int devad, u16 addr) { struct efx_nic *efx = netdev_priv(net_dev); uint16_t value; uint32_t status; int rc; rc = efx_mcdi_mdio_read(efx, efx->mdio_bus, prtad, devad, addr, &value, &status); if (rc) return rc; if (status != MC_CMD_MDIO_STATUS_GOOD) return -EIO; return (int)value; } /* This call is responsible for hooking in the MAC and PHY operations */ static int siena_probe_port(struct efx_nic *efx) { int rc; /* Hook in PHY operations table */ efx->phy_op = &efx_mcdi_phy_ops; /* Set up MDIO structure for PHY */ efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; efx->mdio.mdio_read = siena_mdio_read; efx->mdio.mdio_write = siena_mdio_write; /* Fill out MDIO structure, loopback modes, and initial link state */ rc = efx->phy_op->probe(efx); if (rc != 0) return rc; /* Allocate buffer for stats */ rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer, MC_CMD_MAC_NSTATS * sizeof(u64)); if (rc) return rc; netif_dbg(efx, probe, efx->net_dev, "stats buffer at %llx (virt %p phys %llx)\n", (u64)efx->stats_buffer.dma_addr, efx->stats_buffer.addr, (u64)virt_to_phys(efx->stats_buffer.addr)); efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 1); return 0; } static void siena_remove_port(struct efx_nic *efx) { efx->phy_op->remove(efx); efx_nic_free_buffer(efx, &efx->stats_buffer); } static const struct efx_nic_register_test siena_register_tests[] = { { FR_AZ_ADR_REGION, EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) }, { FR_CZ_USR_EV_CFG, EFX_OWORD32(0x000103FF, 0x00000000, 0x00000000, 0x00000000) }, { FR_AZ_RX_CFG, EFX_OWORD32(0xFFFFFFFE, 0xFFFFFFFF, 0x0003FFFF, 0x00000000) }, { FR_AZ_TX_CFG, EFX_OWORD32(0x7FFF0037, 0xFFFF8000, 0xFFFFFFFF, 0x03FFFFFF) }, { FR_AZ_TX_RESERVED, EFX_OWORD32(0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF) }, { FR_AZ_SRM_TX_DC_CFG, EFX_OWORD32(0x001FFFFF, 0x00000000, 0x00000000, 0x00000000) }, { FR_AZ_RX_DC_CFG, EFX_OWORD32(0x00000003, 0x00000000, 0x00000000, 0x00000000) }, { FR_AZ_RX_DC_PF_WM, EFX_OWORD32(0x000003FF, 0x00000000, 0x00000000, 0x00000000) }, { FR_BZ_DP_CTRL, EFX_OWORD32(0x00000FFF, 0x00000000, 0x00000000, 0x00000000) }, { FR_BZ_RX_RSS_TKEY, EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF) }, { FR_CZ_RX_RSS_IPV6_REG1, EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF) }, { FR_CZ_RX_RSS_IPV6_REG2, EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF) }, { FR_CZ_RX_RSS_IPV6_REG3, EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0x00000007, 0x00000000) }, }; static int siena_test_registers(struct efx_nic *efx) { return efx_nic_test_registers(efx, siena_register_tests, ARRAY_SIZE(siena_register_tests)); } /************************************************************************** * * Device reset * ************************************************************************** */ static int siena_reset_hw(struct efx_nic *efx, enum reset_type method) { int rc; /* Recover from a failed assertion pre-reset */ rc = efx_mcdi_handle_assertion(efx); if (rc) return rc; if (method == RESET_TYPE_WORLD) return efx_mcdi_reset_mc(efx); else return efx_mcdi_reset_port(efx); } static int siena_probe_nvconfig(struct efx_nic *efx) { return efx_mcdi_get_board_cfg(efx, efx->net_dev->perm_addr, NULL); } static int siena_probe_nic(struct efx_nic *efx) { struct siena_nic_data *nic_data; bool already_attached = 0; efx_oword_t reg; int rc; /* Allocate storage for hardware specific data */ nic_data = kzalloc(sizeof(struct siena_nic_data), GFP_KERNEL); if (!nic_data) return -ENOMEM; efx->nic_data = nic_data; if (efx_nic_fpga_ver(efx) != 0) { netif_err(efx, probe, efx->net_dev, "Siena FPGA not supported\n"); rc = -ENODEV; goto fail1; } efx_reado(efx, &reg, FR_AZ_CS_DEBUG); efx->net_dev->dev_id = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1; efx_mcdi_init(efx); /* Recover from a failed assertion before probing */ rc = efx_mcdi_handle_assertion(efx); if (rc) goto fail1; /* Let the BMC know that the driver is now in charge of link and * filter settings. We must do this before we reset the NIC */ rc = efx_mcdi_drv_attach(efx, true, &already_attached); if (rc) { netif_err(efx, probe, efx->net_dev, "Unable to register driver with MCPU\n"); goto fail2; } if (already_attached) /* Not a fatal error */ netif_err(efx, probe, efx->net_dev, "Host already registered with MCPU\n"); /* Now we can reset the NIC */ rc = siena_reset_hw(efx, RESET_TYPE_ALL); if (rc) { netif_err(efx, probe, efx->net_dev, "failed to reset NIC\n"); goto fail3; } siena_init_wol(efx); /* Allocate memory for INT_KER */ rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t)); if (rc) goto fail4; BUG_ON(efx->irq_status.dma_addr & 0x0f); netif_dbg(efx, probe, efx->net_dev, "INT_KER at %llx (virt %p phys %llx)\n", (unsigned long long)efx->irq_status.dma_addr, efx->irq_status.addr, (unsigned long long)virt_to_phys(efx->irq_status.addr)); /* Read in the non-volatile configuration */ rc = siena_probe_nvconfig(efx); if (rc == -EINVAL) { netif_err(efx, probe, efx->net_dev, "NVRAM is invalid therefore using defaults\n"); efx->phy_type = PHY_TYPE_NONE; efx->mdio.prtad = MDIO_PRTAD_NONE; } else if (rc) { goto fail5; } return 0; fail5: efx_nic_free_buffer(efx, &efx->irq_status); fail4: fail3: efx_mcdi_drv_attach(efx, false, NULL); fail2: fail1: kfree(efx->nic_data); return rc; } /* This call performs hardware-specific global initialisation, such as * defining the descriptor cache sizes and number of RSS channels. * It does not set up any buffers, descriptor rings or event queues. */ static int siena_init_nic(struct efx_nic *efx) { efx_oword_t temp; int rc; /* Recover from a failed assertion post-reset */ rc = efx_mcdi_handle_assertion(efx); if (rc) return rc; /* Squash TX of packets of 16 bytes or less */ efx_reado(efx, &temp, FR_AZ_TX_RESERVED); EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1); efx_writeo(efx, &temp, FR_AZ_TX_RESERVED); /* Do not enable TX_NO_EOP_DISC_EN, since it limits packets to 16 * descriptors (which is bad). */ efx_reado(efx, &temp, FR_AZ_TX_CFG); EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_NO_EOP_DISC_EN, 0); EFX_SET_OWORD_FIELD(temp, FRF_CZ_TX_FILTER_EN_BIT, 1); efx_writeo(efx, &temp, FR_AZ_TX_CFG); efx_reado(efx, &temp, FR_AZ_RX_CFG); EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_DESC_PUSH_EN, 0); EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_INGR_EN, 1); /* Enable hash insertion. This is broken for the 'Falcon' hash * if IPv6 hashing is also enabled, so also select Toeplitz * TCP/IPv4 and IPv4 hashes. */ EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_HASH_INSRT_HDR, 1); EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_HASH_ALG, 1); EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_IP_HASH, 1); efx_writeo(efx, &temp, FR_AZ_RX_CFG); /* Set hash key for IPv4 */ memcpy(&temp, efx->rx_hash_key, sizeof(temp)); efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY); /* Enable IPv6 RSS */ BUILD_BUG_ON(sizeof(efx->rx_hash_key) < 2 * sizeof(temp) + FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8 || FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN != 0); memcpy(&temp, efx->rx_hash_key, sizeof(temp)); efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG1); memcpy(&temp, efx->rx_hash_key + sizeof(temp), sizeof(temp)); efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG2); EFX_POPULATE_OWORD_2(temp, FRF_CZ_RX_RSS_IPV6_THASH_ENABLE, 1, FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE, 1); memcpy(&temp, efx->rx_hash_key + 2 * sizeof(temp), FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8); efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG3); /* Enable event logging */ rc = efx_mcdi_log_ctrl(efx, true, false, 0); if (rc) return rc; /* Set destination of both TX and RX Flush events */ EFX_POPULATE_OWORD_1(temp, FRF_BZ_FLS_EVQ_ID, 0); efx_writeo(efx, &temp, FR_BZ_DP_CTRL); EFX_POPULATE_OWORD_1(temp, FRF_CZ_USREV_DIS, 1); efx_writeo(efx, &temp, FR_CZ_USR_EV_CFG); efx_nic_init_common(efx); return 0; } static void siena_remove_nic(struct efx_nic *efx) { efx_nic_free_buffer(efx, &efx->irq_status); siena_reset_hw(efx, RESET_TYPE_ALL); /* Relinquish the device back to the BMC */ if (efx_nic_has_mc(efx)) efx_mcdi_drv_attach(efx, false, NULL); /* Tear down the private nic state */ kfree(efx->nic_data); efx->nic_data = NULL; } #define STATS_GENERATION_INVALID ((u64)(-1)) static int siena_try_update_nic_stats(struct efx_nic *efx) { u64 *dma_stats; struct efx_mac_stats *mac_stats; u64 generation_start; u64 generation_end; mac_stats = &efx->mac_stats; dma_stats = (u64 *)efx->stats_buffer.addr; generation_end = dma_stats[MC_CMD_MAC_GENERATION_END]; if (generation_end == STATS_GENERATION_INVALID) return 0; rmb(); #define MAC_STAT(M, D) \ mac_stats->M = dma_stats[MC_CMD_MAC_ ## D] MAC_STAT(tx_bytes, TX_BYTES); MAC_STAT(tx_bad_bytes, TX_BAD_BYTES); mac_stats->tx_good_bytes = (mac_stats->tx_bytes - mac_stats->tx_bad_bytes); MAC_STAT(tx_packets, TX_PKTS); MAC_STAT(tx_bad, TX_BAD_FCS_PKTS); MAC_STAT(tx_pause, TX_PAUSE_PKTS); MAC_STAT(tx_control, TX_CONTROL_PKTS); MAC_STAT(tx_unicast, TX_UNICAST_PKTS); MAC_STAT(tx_multicast, TX_MULTICAST_PKTS); MAC_STAT(tx_broadcast, TX_BROADCAST_PKTS); MAC_STAT(tx_lt64, TX_LT64_PKTS); MAC_STAT(tx_64, TX_64_PKTS); MAC_STAT(tx_65_to_127, TX_65_TO_127_PKTS); MAC_STAT(tx_128_to_255, TX_128_TO_255_PKTS); MAC_STAT(tx_256_to_511, TX_256_TO_511_PKTS); MAC_STAT(tx_512_to_1023, TX_512_TO_1023_PKTS); MAC_STAT(tx_1024_to_15xx, TX_1024_TO_15XX_PKTS); MAC_STAT(tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS); MAC_STAT(tx_gtjumbo, TX_GTJUMBO_PKTS); mac_stats->tx_collision = 0; MAC_STAT(tx_single_collision, TX_SINGLE_COLLISION_PKTS); MAC_STAT(tx_multiple_collision, TX_MULTIPLE_COLLISION_PKTS); MAC_STAT(tx_excessive_collision, TX_EXCESSIVE_COLLISION_PKTS); MAC_STAT(tx_deferred, TX_DEFERRED_PKTS); MAC_STAT(tx_late_collision, TX_LATE_COLLISION_PKTS); mac_stats->tx_collision = (mac_stats->tx_single_collision + mac_stats->tx_multiple_collision + mac_stats->tx_excessive_collision + mac_stats->tx_late_collision); MAC_STAT(tx_excessive_deferred, TX_EXCESSIVE_DEFERRED_PKTS); MAC_STAT(tx_non_tcpudp, TX_NON_TCPUDP_PKTS); MAC_STAT(tx_mac_src_error, TX_MAC_SRC_ERR_PKTS); MAC_STAT(tx_ip_src_error, TX_IP_SRC_ERR_PKTS); MAC_STAT(rx_bytes, RX_BYTES); MAC_STAT(rx_bad_bytes, RX_BAD_BYTES); mac_stats->rx_good_bytes = (mac_stats->rx_bytes - mac_stats->rx_bad_bytes); MAC_STAT(rx_packets, RX_PKTS); MAC_STAT(rx_good, RX_GOOD_PKTS); MAC_STAT(rx_bad, RX_BAD_FCS_PKTS); MAC_STAT(rx_pause, RX_PAUSE_PKTS); MAC_STAT(rx_control, RX_CONTROL_PKTS); MAC_STAT(rx_unicast, RX_UNICAST_PKTS); MAC_STAT(rx_multicast, RX_MULTICAST_PKTS); MAC_STAT(rx_broadcast, RX_BROADCAST_PKTS); MAC_STAT(rx_lt64, RX_UNDERSIZE_PKTS); MAC_STAT(rx_64, RX_64_PKTS); MAC_STAT(rx_65_to_127, RX_65_TO_127_PKTS); MAC_STAT(rx_128_to_255, RX_128_TO_255_PKTS); MAC_STAT(rx_256_to_511, RX_256_TO_511_PKTS); MAC_STAT(rx_512_to_1023, RX_512_TO_1023_PKTS); MAC_STAT(rx_1024_to_15xx, RX_1024_TO_15XX_PKTS); MAC_STAT(rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS); MAC_STAT(rx_gtjumbo, RX_GTJUMBO_PKTS); mac_stats->rx_bad_lt64 = 0; mac_stats->rx_bad_64_to_15xx = 0; mac_stats->rx_bad_15xx_to_jumbo = 0; MAC_STAT(rx_bad_gtjumbo, RX_JABBER_PKTS); MAC_STAT(rx_overflow, RX_OVERFLOW_PKTS); mac_stats->rx_missed = 0; MAC_STAT(rx_false_carrier, RX_FALSE_CARRIER_PKTS); MAC_STAT(rx_symbol_error, RX_SYMBOL_ERROR_PKTS); MAC_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS); MAC_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS); MAC_STAT(rx_internal_error, RX_INTERNAL_ERROR_PKTS); mac_stats->rx_good_lt64 = 0; efx->n_rx_nodesc_drop_cnt = dma_stats[MC_CMD_MAC_RX_NODESC_DROPS]; #undef MAC_STAT rmb(); generation_start = dma_stats[MC_CMD_MAC_GENERATION_START]; if (generation_end != generation_start) return -EAGAIN; return 0; } static void siena_update_nic_stats(struct efx_nic *efx) { int retry; /* If we're unlucky enough to read statistics wduring the DMA, wait * up to 10ms for it to finish (typically takes <500us) */ for (retry = 0; retry < 100; ++retry) { if (siena_try_update_nic_stats(efx) == 0) return; udelay(100); } /* Use the old values instead */ } static void siena_start_nic_stats(struct efx_nic *efx) { u64 *dma_stats = (u64 *)efx->stats_buffer.addr; dma_stats[MC_CMD_MAC_GENERATION_END] = STATS_GENERATION_INVALID; efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, MC_CMD_MAC_NSTATS * sizeof(u64), 1, 0); } static void siena_stop_nic_stats(struct efx_nic *efx) { efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 0); } /************************************************************************** * * Wake on LAN * ************************************************************************** */ static void siena_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol) { struct siena_nic_data *nic_data = efx->nic_data; wol->supported = WAKE_MAGIC; if (nic_data->wol_filter_id != -1) wol->wolopts = WAKE_MAGIC; else wol->wolopts = 0; memset(&wol->sopass, 0, sizeof(wol->sopass)); } static int siena_set_wol(struct efx_nic *efx, u32 type) { struct siena_nic_data *nic_data = efx->nic_data; int rc; if (type & ~WAKE_MAGIC) return -EINVAL; if (type & WAKE_MAGIC) { if (nic_data->wol_filter_id != -1) efx_mcdi_wol_filter_remove(efx, nic_data->wol_filter_id); rc = efx_mcdi_wol_filter_set_magic(efx, efx->net_dev->dev_addr, &nic_data->wol_filter_id); if (rc) goto fail; pci_wake_from_d3(efx->pci_dev, true); } else { rc = efx_mcdi_wol_filter_reset(efx); nic_data->wol_filter_id = -1; pci_wake_from_d3(efx->pci_dev, false); if (rc) goto fail; } return 0; fail: netif_err(efx, hw, efx->net_dev, "%s failed: type=%d rc=%d\n", __func__, type, rc); return rc; } static void siena_init_wol(struct efx_nic *efx) { struct siena_nic_data *nic_data = efx->nic_data; int rc; rc = efx_mcdi_wol_filter_get_magic(efx, &nic_data->wol_filter_id); if (rc != 0) { /* If it failed, attempt to get into a synchronised * state with MC by resetting any set WoL filters */ efx_mcdi_wol_filter_reset(efx); nic_data->wol_filter_id = -1; } else if (nic_data->wol_filter_id != -1) { pci_wake_from_d3(efx->pci_dev, true); } } /************************************************************************** * * Revision-dependent attributes used by efx.c and nic.c * ************************************************************************** */ const struct efx_nic_type siena_a0_nic_type = { .probe = siena_probe_nic, .remove = siena_remove_nic, .init = siena_init_nic, .fini = efx_port_dummy_op_void, .monitor = NULL, .reset = siena_reset_hw, .probe_port = siena_probe_port, .remove_port = siena_remove_port, .prepare_flush = efx_port_dummy_op_void, .update_stats = siena_update_nic_stats, .start_stats = siena_start_nic_stats, .stop_stats = siena_stop_nic_stats, .set_id_led = efx_mcdi_set_id_led, .push_irq_moderation = siena_push_irq_moderation, .push_multicast_hash = siena_push_multicast_hash, .reconfigure_port = efx_mcdi_phy_reconfigure, .get_wol = siena_get_wol, .set_wol = siena_set_wol, .resume_wol = siena_init_wol, .test_registers = siena_test_registers, .test_nvram = efx_mcdi_nvram_test_all, .default_mac_ops = &efx_mcdi_mac_operations, .revision = EFX_REV_SIENA_A0, .mem_map_size = (FR_CZ_MC_TREG_SMEM + FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS), .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL, .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL, .buf_tbl_base = FR_BZ_BUF_FULL_TBL, .evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL, .evq_rptr_tbl_base = FR_BZ_EVQ_RPTR, .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH), .rx_buffer_hash_size = 0x10, .rx_buffer_padding = 0, .max_interrupt_mode = EFX_INT_MODE_MSIX, .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy * interrupt handler only supports 32 * channels */ .tx_dc_base = 0x88000, .rx_dc_base = 0x68000, .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXHASH | NETIF_F_NTUPLE), .reset_world_flags = ETH_RESET_MGMT << ETH_RESET_SHARED_SHIFT, };
gpl-2.0
artemh/asuswrt-merlin
release/src-rt-6.x.4708/linux/linux-2.6.36/fs/autofs/inode.c
1691
6827
/* -*- linux-c -*- --------------------------------------------------------- * * * linux/fs/autofs/inode.c * * Copyright 1997-1998 Transmeta Corporation -- All Rights Reserved * * This file is part of the Linux kernel and is made available under * the terms of the GNU General Public License, version 2, or at your * option, any later version, incorporated herein by reference. * * ------------------------------------------------------------------------- */ #include <linux/kernel.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/file.h> #include <linux/parser.h> #include <linux/bitops.h> #include <linux/magic.h> #include "autofs_i.h" #include <linux/module.h> void autofs_kill_sb(struct super_block *sb) { struct autofs_sb_info *sbi = autofs_sbi(sb); unsigned int n; /* * In the event of a failure in get_sb_nodev the superblock * info is not present so nothing else has been setup, so * just call kill_anon_super when we are called from * deactivate_super. */ if (!sbi) goto out_kill_sb; if (!sbi->catatonic) autofs_catatonic_mode(sbi); /* Free wait queues, close pipe */ put_pid(sbi->oz_pgrp); autofs_hash_nuke(sbi); for (n = 0; n < AUTOFS_MAX_SYMLINKS; n++) { if (test_bit(n, sbi->symlink_bitmap)) kfree(sbi->symlink[n].data); } kfree(sb->s_fs_info); out_kill_sb: DPRINTK(("autofs: shutting down\n")); kill_anon_super(sb); } static const struct super_operations autofs_sops = { .statfs = simple_statfs, .show_options = generic_show_options, }; enum {Opt_err, Opt_fd, Opt_uid, Opt_gid, Opt_pgrp, Opt_minproto, Opt_maxproto}; static const match_table_t autofs_tokens = { {Opt_fd, "fd=%u"}, {Opt_uid, "uid=%u"}, {Opt_gid, "gid=%u"}, {Opt_pgrp, "pgrp=%u"}, {Opt_minproto, "minproto=%u"}, {Opt_maxproto, "maxproto=%u"}, {Opt_err, NULL} }; static int parse_options(char *options, int *pipefd, uid_t *uid, gid_t *gid, pid_t *pgrp, int *minproto, int *maxproto) { char *p; substring_t args[MAX_OPT_ARGS]; int option; *uid = current_uid(); *gid = current_gid(); *pgrp = task_pgrp_nr(current); *minproto = *maxproto = AUTOFS_PROTO_VERSION; *pipefd = -1; if (!options) return 1; while ((p = strsep(&options, ",")) != NULL) { int token; if (!*p) continue; token = match_token(p, autofs_tokens, args); switch (token) { case Opt_fd: if (match_int(&args[0], &option)) return 1; *pipefd = option; break; case Opt_uid: if (match_int(&args[0], &option)) return 1; *uid = option; break; case Opt_gid: if (match_int(&args[0], &option)) return 1; *gid = option; break; case Opt_pgrp: if (match_int(&args[0], &option)) return 1; *pgrp = option; break; case Opt_minproto: if (match_int(&args[0], &option)) return 1; *minproto = option; break; case Opt_maxproto: if (match_int(&args[0], &option)) return 1; *maxproto = option; break; default: return 1; } } return (*pipefd < 0); } int autofs_fill_super(struct super_block *s, void *data, int silent) { struct inode * root_inode; struct dentry * root; struct file * pipe; int pipefd; struct autofs_sb_info *sbi; int minproto, maxproto; pid_t pgid; save_mount_options(s, data); sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); if (!sbi) goto fail_unlock; DPRINTK(("autofs: starting up, sbi = %p\n",sbi)); s->s_fs_info = sbi; sbi->magic = AUTOFS_SBI_MAGIC; sbi->pipe = NULL; sbi->catatonic = 1; sbi->exp_timeout = 0; autofs_initialize_hash(&sbi->dirhash); sbi->queues = NULL; memset(sbi->symlink_bitmap, 0, sizeof(long)*AUTOFS_SYMLINK_BITMAP_LEN); sbi->next_dir_ino = AUTOFS_FIRST_DIR_INO; s->s_blocksize = 1024; s->s_blocksize_bits = 10; s->s_magic = AUTOFS_SUPER_MAGIC; s->s_op = &autofs_sops; s->s_time_gran = 1; sbi->sb = s; root_inode = autofs_iget(s, AUTOFS_ROOT_INO); if (IS_ERR(root_inode)) goto fail_free; root = d_alloc_root(root_inode); pipe = NULL; if (!root) goto fail_iput; /* Can this call block? - WTF cares? s is locked. */ if (parse_options(data, &pipefd, &root_inode->i_uid, &root_inode->i_gid, &pgid, &minproto, &maxproto)) { printk("autofs: called with bogus options\n"); goto fail_dput; } /* Couldn't this be tested earlier? */ if (minproto > AUTOFS_PROTO_VERSION || maxproto < AUTOFS_PROTO_VERSION) { printk("autofs: kernel does not match daemon version\n"); goto fail_dput; } DPRINTK(("autofs: pipe fd = %d, pgrp = %u\n", pipefd, pgid)); sbi->oz_pgrp = find_get_pid(pgid); if (!sbi->oz_pgrp) { printk("autofs: could not find process group %d\n", pgid); goto fail_dput; } pipe = fget(pipefd); if (!pipe) { printk("autofs: could not open pipe file descriptor\n"); goto fail_put_pid; } if (!pipe->f_op || !pipe->f_op->write) goto fail_fput; sbi->pipe = pipe; sbi->catatonic = 0; /* * Success! Install the root dentry now to indicate completion. */ s->s_root = root; return 0; fail_fput: printk("autofs: pipe file descriptor does not contain proper ops\n"); fput(pipe); fail_put_pid: put_pid(sbi->oz_pgrp); fail_dput: dput(root); goto fail_free; fail_iput: printk("autofs: get root dentry failed\n"); iput(root_inode); fail_free: kfree(sbi); s->s_fs_info = NULL; fail_unlock: return -EINVAL; } struct inode *autofs_iget(struct super_block *sb, unsigned long ino) { unsigned int n; struct autofs_sb_info *sbi = autofs_sbi(sb); struct inode *inode; inode = iget_locked(sb, ino); if (!inode) return ERR_PTR(-ENOMEM); if (!(inode->i_state & I_NEW)) return inode; /* Initialize to the default case (stub directory) */ inode->i_op = &simple_dir_inode_operations; inode->i_fop = &simple_dir_operations; inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO; inode->i_nlink = 2; inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; if (ino == AUTOFS_ROOT_INO) { inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO | S_IWUSR; inode->i_op = &autofs_root_inode_operations; inode->i_fop = &autofs_root_operations; goto done; } inode->i_uid = inode->i_sb->s_root->d_inode->i_uid; inode->i_gid = inode->i_sb->s_root->d_inode->i_gid; if (ino >= AUTOFS_FIRST_SYMLINK && ino < AUTOFS_FIRST_DIR_INO) { /* Symlink inode - should be in symlink list */ struct autofs_symlink *sl; n = ino - AUTOFS_FIRST_SYMLINK; if (n >= AUTOFS_MAX_SYMLINKS || !test_bit(n,sbi->symlink_bitmap)) { printk("autofs: Looking for bad symlink inode %u\n", (unsigned int) ino); goto done; } inode->i_op = &autofs_symlink_inode_operations; sl = &sbi->symlink[n]; inode->i_private = sl; inode->i_mode = S_IFLNK | S_IRWXUGO; inode->i_mtime.tv_sec = inode->i_ctime.tv_sec = sl->mtime; inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec = 0; inode->i_size = sl->len; inode->i_nlink = 1; } done: unlock_new_inode(inode); return inode; }
gpl-2.0
mythos234/zerolte-kernel-CM
arch/arm/mach-tegra/tegra2_emc.c
1947
7822
/* * Copyright (C) 2011 Google, Inc. * * Author: * Colin Cross <ccross@android.com> * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/kernel.h> #include <linux/device.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/io.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/platform_data/tegra_emc.h> #include "tegra2_emc.h" #include "fuse.h" #ifdef CONFIG_TEGRA_EMC_SCALING_ENABLE static bool emc_enable = true; #else static bool emc_enable; #endif module_param(emc_enable, bool, 0644); static struct platform_device *emc_pdev; static void __iomem *emc_regbase; static inline void emc_writel(u32 val, unsigned long addr) { writel(val, emc_regbase + addr); } static inline u32 emc_readl(unsigned long addr) { return readl(emc_regbase + addr); } static const unsigned long emc_reg_addr[TEGRA_EMC_NUM_REGS] = { 0x2c, /* RC */ 0x30, /* RFC */ 0x34, /* RAS */ 0x38, /* RP */ 0x3c, /* R2W */ 0x40, /* W2R */ 0x44, /* R2P */ 0x48, /* W2P */ 0x4c, /* RD_RCD */ 0x50, /* WR_RCD */ 0x54, /* RRD */ 0x58, /* REXT */ 0x5c, /* WDV */ 0x60, /* QUSE */ 0x64, /* QRST */ 0x68, /* QSAFE */ 0x6c, /* RDV */ 0x70, /* REFRESH */ 0x74, /* BURST_REFRESH_NUM */ 0x78, /* PDEX2WR */ 0x7c, /* PDEX2RD */ 0x80, /* PCHG2PDEN */ 0x84, /* ACT2PDEN */ 0x88, /* AR2PDEN */ 0x8c, /* RW2PDEN */ 0x90, /* TXSR */ 0x94, /* TCKE */ 0x98, /* TFAW */ 0x9c, /* TRPAB */ 0xa0, /* TCLKSTABLE */ 0xa4, /* TCLKSTOP */ 0xa8, /* TREFBW */ 0xac, /* QUSE_EXTRA */ 0x114, /* FBIO_CFG6 */ 0xb0, /* ODT_WRITE */ 0xb4, /* ODT_READ */ 0x104, /* FBIO_CFG5 */ 0x2bc, /* CFG_DIG_DLL */ 0x2c0, /* DLL_XFORM_DQS */ 0x2c4, /* DLL_XFORM_QUSE */ 0x2e0, /* ZCAL_REF_CNT */ 0x2e4, /* ZCAL_WAIT_CNT */ 0x2a8, /* AUTO_CAL_INTERVAL */ 0x2d0, /* CFG_CLKTRIM_0 */ 0x2d4, /* CFG_CLKTRIM_1 */ 0x2d8, /* CFG_CLKTRIM_2 */ }; /* Select the closest EMC rate that is higher than the requested rate */ long tegra_emc_round_rate(unsigned long rate) { struct tegra_emc_pdata *pdata; int i; int best = -1; unsigned long distance = ULONG_MAX; if (!emc_pdev) return -EINVAL; pdata = emc_pdev->dev.platform_data; pr_debug("%s: %lu\n", __func__, rate); /* * The EMC clock rate is twice the bus rate, and the bus rate is * measured in kHz */ rate = rate / 2 / 1000; for (i = 0; i < pdata->num_tables; i++) { if (pdata->tables[i].rate >= rate && (pdata->tables[i].rate - rate) < distance) { distance = pdata->tables[i].rate - rate; best = i; } } if (best < 0) return -EINVAL; pr_debug("%s: using %lu\n", __func__, pdata->tables[best].rate); return pdata->tables[best].rate * 2 * 1000; } /* * The EMC registers have shadow registers. When the EMC clock is updated * in the clock controller, the shadow registers are copied to the active * registers, allowing glitchless memory bus frequency changes. * This function updates the shadow registers for a new clock frequency, * and relies on the clock lock on the emc clock to avoid races between * multiple frequency changes */ int tegra_emc_set_rate(unsigned long rate) { struct tegra_emc_pdata *pdata; int i; int j; if (!emc_pdev) return -EINVAL; pdata = emc_pdev->dev.platform_data; /* * The EMC clock rate is twice the bus rate, and the bus rate is * measured in kHz */ rate = rate / 2 / 1000; for (i = 0; i < pdata->num_tables; i++) if (pdata->tables[i].rate == rate) break; if (i >= pdata->num_tables) return -EINVAL; pr_debug("%s: setting to %lu\n", __func__, rate); for (j = 0; j < TEGRA_EMC_NUM_REGS; j++) emc_writel(pdata->tables[i].regs[j], emc_reg_addr[j]); emc_readl(pdata->tables[i].regs[TEGRA_EMC_NUM_REGS - 1]); return 0; } #ifdef CONFIG_OF static struct device_node *tegra_emc_ramcode_devnode(struct device_node *np) { struct device_node *iter; u32 reg; for_each_child_of_node(np, iter) { if (of_property_read_u32(np, "nvidia,ram-code", &reg)) continue; if (reg == tegra_bct_strapping) return of_node_get(iter); } return NULL; } static struct tegra_emc_pdata *tegra_emc_dt_parse_pdata( struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct device_node *tnp, *iter; struct tegra_emc_pdata *pdata; int ret, i, num_tables; if (!np) return NULL; if (of_find_property(np, "nvidia,use-ram-code", NULL)) { tnp = tegra_emc_ramcode_devnode(np); if (!tnp) dev_warn(&pdev->dev, "can't find emc table for ram-code 0x%02x\n", tegra_bct_strapping); } else tnp = of_node_get(np); if (!tnp) return NULL; num_tables = 0; for_each_child_of_node(tnp, iter) if (of_device_is_compatible(iter, "nvidia,tegra20-emc-table")) num_tables++; if (!num_tables) { pdata = NULL; goto out; } pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); pdata->tables = devm_kzalloc(&pdev->dev, sizeof(*pdata->tables) * num_tables, GFP_KERNEL); i = 0; for_each_child_of_node(tnp, iter) { u32 prop; ret = of_property_read_u32(iter, "clock-frequency", &prop); if (ret) { dev_err(&pdev->dev, "no clock-frequency in %s\n", iter->full_name); continue; } pdata->tables[i].rate = prop; ret = of_property_read_u32_array(iter, "nvidia,emc-registers", pdata->tables[i].regs, TEGRA_EMC_NUM_REGS); if (ret) { dev_err(&pdev->dev, "malformed emc-registers property in %s\n", iter->full_name); continue; } i++; } pdata->num_tables = i; out: of_node_put(tnp); return pdata; } #else static struct tegra_emc_pdata *tegra_emc_dt_parse_pdata( struct platform_device *pdev) { return NULL; } #endif static struct tegra_emc_pdata *tegra_emc_fill_pdata(struct platform_device *pdev) { struct clk *c = clk_get_sys(NULL, "emc"); struct tegra_emc_pdata *pdata; unsigned long khz; int i; WARN_ON(pdev->dev.platform_data); BUG_ON(IS_ERR(c)); pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); pdata->tables = devm_kzalloc(&pdev->dev, sizeof(*pdata->tables), GFP_KERNEL); pdata->tables[0].rate = clk_get_rate(c) / 2 / 1000; for (i = 0; i < TEGRA_EMC_NUM_REGS; i++) pdata->tables[0].regs[i] = emc_readl(emc_reg_addr[i]); pdata->num_tables = 1; khz = pdata->tables[0].rate; dev_info(&pdev->dev, "no tables provided, using %ld kHz emc, " "%ld kHz mem\n", khz * 2, khz); return pdata; } static int tegra_emc_probe(struct platform_device *pdev) { struct tegra_emc_pdata *pdata; struct resource *res; if (!emc_enable) { dev_err(&pdev->dev, "disabled per module parameter\n"); return -ENODEV; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); emc_regbase = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(emc_regbase)) return PTR_ERR(emc_regbase); pdata = pdev->dev.platform_data; if (!pdata) pdata = tegra_emc_dt_parse_pdata(pdev); if (!pdata) pdata = tegra_emc_fill_pdata(pdev); pdev->dev.platform_data = pdata; emc_pdev = pdev; return 0; } static struct of_device_id tegra_emc_of_match[] = { { .compatible = "nvidia,tegra20-emc", }, { }, }; static struct platform_driver tegra_emc_driver = { .driver = { .name = "tegra-emc", .owner = THIS_MODULE, .of_match_table = tegra_emc_of_match, }, .probe = tegra_emc_probe, }; static int __init tegra_emc_init(void) { return platform_driver_register(&tegra_emc_driver); } device_initcall(tegra_emc_init);
gpl-2.0
civato/KK_RUNNER-Note8.0
drivers/media/video/saa7164/saa7164-dvb.c
2203
15812
/* * Driver for the NXP SAA7164 PCIe bridge * * Copyright (c) 2010 Steven Toth <stoth@kernellabs.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include "saa7164.h" #include "tda10048.h" #include "tda18271.h" #include "s5h1411.h" #define DRIVER_NAME "saa7164" DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); /* addr is in the card struct, get it from there */ static struct tda10048_config hauppauge_hvr2200_1_config = { .demod_address = 0x10 >> 1, .output_mode = TDA10048_SERIAL_OUTPUT, .fwbulkwritelen = TDA10048_BULKWRITE_200, .inversion = TDA10048_INVERSION_ON, .dtv6_if_freq_khz = TDA10048_IF_3300, .dtv7_if_freq_khz = TDA10048_IF_3500, .dtv8_if_freq_khz = TDA10048_IF_4000, .clk_freq_khz = TDA10048_CLK_16000, }; static struct tda10048_config hauppauge_hvr2200_2_config = { .demod_address = 0x12 >> 1, .output_mode = TDA10048_SERIAL_OUTPUT, .fwbulkwritelen = TDA10048_BULKWRITE_200, .inversion = TDA10048_INVERSION_ON, .dtv6_if_freq_khz = TDA10048_IF_3300, .dtv7_if_freq_khz = TDA10048_IF_3500, .dtv8_if_freq_khz = TDA10048_IF_4000, .clk_freq_khz = TDA10048_CLK_16000, }; static struct tda18271_std_map hauppauge_tda18271_std_map = { .atsc_6 = { .if_freq = 3250, .agc_mode = 3, .std = 3, .if_lvl = 6, .rfagc_top = 0x37 }, .qam_6 = { .if_freq = 4000, .agc_mode = 3, .std = 0, .if_lvl = 6, .rfagc_top = 0x37 }, }; static struct tda18271_config hauppauge_hvr22x0_tuner_config = { .std_map = &hauppauge_tda18271_std_map, .gate = TDA18271_GATE_ANALOG, .role = TDA18271_MASTER, }; static struct tda18271_config hauppauge_hvr22x0s_tuner_config = { .std_map = &hauppauge_tda18271_std_map, .gate = TDA18271_GATE_ANALOG, .role = TDA18271_SLAVE, .output_opt = TDA18271_OUTPUT_LT_OFF, .rf_cal_on_startup = 1 }; static struct s5h1411_config hauppauge_s5h1411_config = { .output_mode = S5H1411_SERIAL_OUTPUT, .gpio = S5H1411_GPIO_ON, .qam_if = S5H1411_IF_4000, .vsb_if = S5H1411_IF_3250, .inversion = S5H1411_INVERSION_ON, .status_mode = S5H1411_DEMODLOCKING, .mpeg_timing = S5H1411_MPEGTIMING_CONTINOUS_NONINVERTING_CLOCK, }; static int saa7164_dvb_stop_port(struct saa7164_port *port) { struct saa7164_dev *dev = port->dev; int ret; ret = saa7164_api_transition_port(port, SAA_DMASTATE_STOP); if ((ret != SAA_OK) && (ret != SAA_ERR_ALREADY_STOPPED)) { printk(KERN_ERR "%s() stop transition failed, ret = 0x%x\n", __func__, ret); ret = -EIO; } else { dprintk(DBGLVL_DVB, "%s() Stopped\n", __func__); ret = 0; } return ret; } static int saa7164_dvb_acquire_port(struct saa7164_port *port) { struct saa7164_dev *dev = port->dev; int ret; ret = saa7164_api_transition_port(port, SAA_DMASTATE_ACQUIRE); if ((ret != SAA_OK) && (ret != SAA_ERR_ALREADY_STOPPED)) { printk(KERN_ERR "%s() acquire transition failed, ret = 0x%x\n", __func__, ret); ret = -EIO; } else { dprintk(DBGLVL_DVB, "%s() Acquired\n", __func__); ret = 0; } return ret; } static int saa7164_dvb_pause_port(struct saa7164_port *port) { struct saa7164_dev *dev = port->dev; int ret; ret = saa7164_api_transition_port(port, SAA_DMASTATE_PAUSE); if ((ret != SAA_OK) && (ret != SAA_ERR_ALREADY_STOPPED)) { printk(KERN_ERR "%s() pause transition failed, ret = 0x%x\n", __func__, ret); ret = -EIO; } else { dprintk(DBGLVL_DVB, "%s() Paused\n", __func__); ret = 0; } return ret; } /* Firmware is very windows centric, meaning you have to transition * the part through AVStream / KS Windows stages, forwards or backwards. * States are: stopped, acquired (h/w), paused, started. */ static int saa7164_dvb_stop_streaming(struct saa7164_port *port) { struct saa7164_dev *dev = port->dev; struct saa7164_buffer *buf; struct list_head *p, *q; int ret; dprintk(DBGLVL_DVB, "%s(port=%d)\n", __func__, port->nr); ret = saa7164_dvb_pause_port(port); ret = saa7164_dvb_acquire_port(port); ret = saa7164_dvb_stop_port(port); /* Mark the hardware buffers as free */ mutex_lock(&port->dmaqueue_lock); list_for_each_safe(p, q, &port->dmaqueue.list) { buf = list_entry(p, struct saa7164_buffer, list); buf->flags = SAA7164_BUFFER_FREE; } mutex_unlock(&port->dmaqueue_lock); return ret; } static int saa7164_dvb_start_port(struct saa7164_port *port) { struct saa7164_dev *dev = port->dev; int ret = 0, result; dprintk(DBGLVL_DVB, "%s(port=%d)\n", __func__, port->nr); saa7164_buffer_cfg_port(port); /* Acquire the hardware */ result = saa7164_api_transition_port(port, SAA_DMASTATE_ACQUIRE); if ((result != SAA_OK) && (result != SAA_ERR_ALREADY_STOPPED)) { printk(KERN_ERR "%s() acquire transition failed, res = 0x%x\n", __func__, result); /* Stop the hardware, regardless */ result = saa7164_api_transition_port(port, SAA_DMASTATE_STOP); if ((result != SAA_OK) && (result != SAA_ERR_ALREADY_STOPPED)) { printk(KERN_ERR "%s() acquire/forced stop transition " "failed, res = 0x%x\n", __func__, result); } ret = -EIO; goto out; } else dprintk(DBGLVL_DVB, "%s() Acquired\n", __func__); /* Pause the hardware */ result = saa7164_api_transition_port(port, SAA_DMASTATE_PAUSE); if ((result != SAA_OK) && (result != SAA_ERR_ALREADY_STOPPED)) { printk(KERN_ERR "%s() pause transition failed, res = 0x%x\n", __func__, result); /* Stop the hardware, regardless */ result = saa7164_api_transition_port(port, SAA_DMASTATE_STOP); if ((result != SAA_OK) && (result != SAA_ERR_ALREADY_STOPPED)) { printk(KERN_ERR "%s() pause/forced stop transition " "failed, res = 0x%x\n", __func__, result); } ret = -EIO; goto out; } else dprintk(DBGLVL_DVB, "%s() Paused\n", __func__); /* Start the hardware */ result = saa7164_api_transition_port(port, SAA_DMASTATE_RUN); if ((result != SAA_OK) && (result != SAA_ERR_ALREADY_STOPPED)) { printk(KERN_ERR "%s() run transition failed, result = 0x%x\n", __func__, result); /* Stop the hardware, regardless */ result = saa7164_api_transition_port(port, SAA_DMASTATE_STOP); if ((result != SAA_OK) && (result != SAA_ERR_ALREADY_STOPPED)) { printk(KERN_ERR "%s() run/forced stop transition " "failed, res = 0x%x\n", __func__, result); } ret = -EIO; } else dprintk(DBGLVL_DVB, "%s() Running\n", __func__); out: return ret; } static int saa7164_dvb_start_feed(struct dvb_demux_feed *feed) { struct dvb_demux *demux = feed->demux; struct saa7164_port *port = (struct saa7164_port *) demux->priv; struct saa7164_dvb *dvb = &port->dvb; struct saa7164_dev *dev = port->dev; int ret = 0; dprintk(DBGLVL_DVB, "%s(port=%d)\n", __func__, port->nr); if (!demux->dmx.frontend) return -EINVAL; if (dvb) { mutex_lock(&dvb->lock); if (dvb->feeding++ == 0) { /* Start transport */ ret = saa7164_dvb_start_port(port); } mutex_unlock(&dvb->lock); dprintk(DBGLVL_DVB, "%s(port=%d) now feeding = %d\n", __func__, port->nr, dvb->feeding); } return ret; } static int saa7164_dvb_stop_feed(struct dvb_demux_feed *feed) { struct dvb_demux *demux = feed->demux; struct saa7164_port *port = (struct saa7164_port *) demux->priv; struct saa7164_dvb *dvb = &port->dvb; struct saa7164_dev *dev = port->dev; int ret = 0; dprintk(DBGLVL_DVB, "%s(port=%d)\n", __func__, port->nr); if (dvb) { mutex_lock(&dvb->lock); if (--dvb->feeding == 0) { /* Stop transport */ ret = saa7164_dvb_stop_streaming(port); } mutex_unlock(&dvb->lock); dprintk(DBGLVL_DVB, "%s(port=%d) now feeding = %d\n", __func__, port->nr, dvb->feeding); } return ret; } static int dvb_register(struct saa7164_port *port) { struct saa7164_dvb *dvb = &port->dvb; struct saa7164_dev *dev = port->dev; struct saa7164_buffer *buf; int result, i; dprintk(DBGLVL_DVB, "%s(port=%d)\n", __func__, port->nr); if (port->type != SAA7164_MPEG_DVB) BUG(); /* Sanity check that the PCI configuration space is active */ if (port->hwcfg.BARLocation == 0) { result = -ENOMEM; printk(KERN_ERR "%s: dvb_register_adapter failed " "(errno = %d), NO PCI configuration\n", DRIVER_NAME, result); goto fail_adapter; } /* Init and establish defaults */ port->hw_streamingparams.bitspersample = 8; port->hw_streamingparams.samplesperline = 188; port->hw_streamingparams.numberoflines = (SAA7164_TS_NUMBER_OF_LINES * 188) / 188; port->hw_streamingparams.pitch = 188; port->hw_streamingparams.linethreshold = 0; port->hw_streamingparams.pagetablelistvirt = NULL; port->hw_streamingparams.pagetablelistphys = NULL; port->hw_streamingparams.numpagetables = 2 + ((SAA7164_TS_NUMBER_OF_LINES * 188) / PAGE_SIZE); port->hw_streamingparams.numpagetableentries = port->hwcfg.buffercount; /* Allocate the PCI resources */ for (i = 0; i < port->hwcfg.buffercount; i++) { buf = saa7164_buffer_alloc(port, port->hw_streamingparams.numberoflines * port->hw_streamingparams.pitch); if (!buf) { result = -ENOMEM; printk(KERN_ERR "%s: dvb_register_adapter failed " "(errno = %d), unable to allocate buffers\n", DRIVER_NAME, result); goto fail_adapter; } mutex_lock(&port->dmaqueue_lock); list_add_tail(&buf->list, &port->dmaqueue.list); mutex_unlock(&port->dmaqueue_lock); } /* register adapter */ result = dvb_register_adapter(&dvb->adapter, DRIVER_NAME, THIS_MODULE, &dev->pci->dev, adapter_nr); if (result < 0) { printk(KERN_ERR "%s: dvb_register_adapter failed " "(errno = %d)\n", DRIVER_NAME, result); goto fail_adapter; } dvb->adapter.priv = port; /* register frontend */ result = dvb_register_frontend(&dvb->adapter, dvb->frontend); if (result < 0) { printk(KERN_ERR "%s: dvb_register_frontend failed " "(errno = %d)\n", DRIVER_NAME, result); goto fail_frontend; } /* register demux stuff */ dvb->demux.dmx.capabilities = DMX_TS_FILTERING | DMX_SECTION_FILTERING | DMX_MEMORY_BASED_FILTERING; dvb->demux.priv = port; dvb->demux.filternum = 256; dvb->demux.feednum = 256; dvb->demux.start_feed = saa7164_dvb_start_feed; dvb->demux.stop_feed = saa7164_dvb_stop_feed; result = dvb_dmx_init(&dvb->demux); if (result < 0) { printk(KERN_ERR "%s: dvb_dmx_init failed (errno = %d)\n", DRIVER_NAME, result); goto fail_dmx; } dvb->dmxdev.filternum = 256; dvb->dmxdev.demux = &dvb->demux.dmx; dvb->dmxdev.capabilities = 0; result = dvb_dmxdev_init(&dvb->dmxdev, &dvb->adapter); if (result < 0) { printk(KERN_ERR "%s: dvb_dmxdev_init failed (errno = %d)\n", DRIVER_NAME, result); goto fail_dmxdev; } dvb->fe_hw.source = DMX_FRONTEND_0; result = dvb->demux.dmx.add_frontend(&dvb->demux.dmx, &dvb->fe_hw); if (result < 0) { printk(KERN_ERR "%s: add_frontend failed " "(DMX_FRONTEND_0, errno = %d)\n", DRIVER_NAME, result); goto fail_fe_hw; } dvb->fe_mem.source = DMX_MEMORY_FE; result = dvb->demux.dmx.add_frontend(&dvb->demux.dmx, &dvb->fe_mem); if (result < 0) { printk(KERN_ERR "%s: add_frontend failed " "(DMX_MEMORY_FE, errno = %d)\n", DRIVER_NAME, result); goto fail_fe_mem; } result = dvb->demux.dmx.connect_frontend(&dvb->demux.dmx, &dvb->fe_hw); if (result < 0) { printk(KERN_ERR "%s: connect_frontend failed (errno = %d)\n", DRIVER_NAME, result); goto fail_fe_conn; } /* register network adapter */ dvb_net_init(&dvb->adapter, &dvb->net, &dvb->demux.dmx); return 0; fail_fe_conn: dvb->demux.dmx.remove_frontend(&dvb->demux.dmx, &dvb->fe_mem); fail_fe_mem: dvb->demux.dmx.remove_frontend(&dvb->demux.dmx, &dvb->fe_hw); fail_fe_hw: dvb_dmxdev_release(&dvb->dmxdev); fail_dmxdev: dvb_dmx_release(&dvb->demux); fail_dmx: dvb_unregister_frontend(dvb->frontend); fail_frontend: dvb_frontend_detach(dvb->frontend); dvb_unregister_adapter(&dvb->adapter); fail_adapter: return result; } int saa7164_dvb_unregister(struct saa7164_port *port) { struct saa7164_dvb *dvb = &port->dvb; struct saa7164_dev *dev = port->dev; struct saa7164_buffer *b; struct list_head *c, *n; dprintk(DBGLVL_DVB, "%s()\n", __func__); if (port->type != SAA7164_MPEG_DVB) BUG(); /* Remove any allocated buffers */ mutex_lock(&port->dmaqueue_lock); list_for_each_safe(c, n, &port->dmaqueue.list) { b = list_entry(c, struct saa7164_buffer, list); list_del(c); saa7164_buffer_dealloc(b); } mutex_unlock(&port->dmaqueue_lock); if (dvb->frontend == NULL) return 0; dvb_net_release(&dvb->net); dvb->demux.dmx.remove_frontend(&dvb->demux.dmx, &dvb->fe_mem); dvb->demux.dmx.remove_frontend(&dvb->demux.dmx, &dvb->fe_hw); dvb_dmxdev_release(&dvb->dmxdev); dvb_dmx_release(&dvb->demux); dvb_unregister_frontend(dvb->frontend); dvb_frontend_detach(dvb->frontend); dvb_unregister_adapter(&dvb->adapter); return 0; } /* All the DVB attach calls go here, this function get's modified * for each new card. */ int saa7164_dvb_register(struct saa7164_port *port) { struct saa7164_dev *dev = port->dev; struct saa7164_dvb *dvb = &port->dvb; struct saa7164_i2c *i2c_bus = NULL; int ret; dprintk(DBGLVL_DVB, "%s()\n", __func__); /* init frontend */ switch (dev->board) { case SAA7164_BOARD_HAUPPAUGE_HVR2200: case SAA7164_BOARD_HAUPPAUGE_HVR2200_2: case SAA7164_BOARD_HAUPPAUGE_HVR2200_3: case SAA7164_BOARD_HAUPPAUGE_HVR2200_4: i2c_bus = &dev->i2c_bus[port->nr + 1]; switch (port->nr) { case 0: port->dvb.frontend = dvb_attach(tda10048_attach, &hauppauge_hvr2200_1_config, &i2c_bus->i2c_adap); if (port->dvb.frontend != NULL) { /* TODO: addr is in the card struct */ dvb_attach(tda18271_attach, port->dvb.frontend, 0xc0 >> 1, &i2c_bus->i2c_adap, &hauppauge_hvr22x0_tuner_config); } break; case 1: port->dvb.frontend = dvb_attach(tda10048_attach, &hauppauge_hvr2200_2_config, &i2c_bus->i2c_adap); if (port->dvb.frontend != NULL) { /* TODO: addr is in the card struct */ dvb_attach(tda18271_attach, port->dvb.frontend, 0xc0 >> 1, &i2c_bus->i2c_adap, &hauppauge_hvr22x0s_tuner_config); } break; } break; case SAA7164_BOARD_HAUPPAUGE_HVR2250: case SAA7164_BOARD_HAUPPAUGE_HVR2250_2: case SAA7164_BOARD_HAUPPAUGE_HVR2250_3: i2c_bus = &dev->i2c_bus[port->nr + 1]; port->dvb.frontend = dvb_attach(s5h1411_attach, &hauppauge_s5h1411_config, &i2c_bus->i2c_adap); if (port->dvb.frontend != NULL) { if (port->nr == 0) { /* Master TDA18271 */ /* TODO: addr is in the card struct */ dvb_attach(tda18271_attach, port->dvb.frontend, 0xc0 >> 1, &i2c_bus->i2c_adap, &hauppauge_hvr22x0_tuner_config); } else { /* Slave TDA18271 */ dvb_attach(tda18271_attach, port->dvb.frontend, 0xc0 >> 1, &i2c_bus->i2c_adap, &hauppauge_hvr22x0s_tuner_config); } } break; default: printk(KERN_ERR "%s: The frontend isn't supported\n", dev->name); break; } if (NULL == dvb->frontend) { printk(KERN_ERR "%s() Frontend initialization failed\n", __func__); return -1; } /* register everything */ ret = dvb_register(port); if (ret < 0) { if (dvb->frontend->ops.release) dvb->frontend->ops.release(dvb->frontend); return ret; } return 0; }
gpl-2.0
BlackBox-Kernel/blackbox_sprout_lp
drivers/scsi/qla2xxx/qla_mid.c
2203
21694
/* * QLogic Fibre Channel HBA Driver * Copyright (c) 2003-2013 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ #include "qla_def.h" #include "qla_gbl.h" #include "qla_target.h" #include <linux/moduleparam.h> #include <linux/vmalloc.h> #include <linux/slab.h> #include <linux/list.h> #include <scsi/scsi_tcq.h> #include <scsi/scsicam.h> #include <linux/delay.h> void qla2x00_vp_stop_timer(scsi_qla_host_t *vha) { if (vha->vp_idx && vha->timer_active) { del_timer_sync(&vha->timer); vha->timer_active = 0; } } static uint32_t qla24xx_allocate_vp_id(scsi_qla_host_t *vha) { uint32_t vp_id; struct qla_hw_data *ha = vha->hw; unsigned long flags; /* Find an empty slot and assign an vp_id */ mutex_lock(&ha->vport_lock); vp_id = find_first_zero_bit(ha->vp_idx_map, ha->max_npiv_vports + 1); if (vp_id > ha->max_npiv_vports) { ql_dbg(ql_dbg_vport, vha, 0xa000, "vp_id %d is bigger than max-supported %d.\n", vp_id, ha->max_npiv_vports); mutex_unlock(&ha->vport_lock); return vp_id; } set_bit(vp_id, ha->vp_idx_map); ha->num_vhosts++; vha->vp_idx = vp_id; spin_lock_irqsave(&ha->vport_slock, flags); list_add_tail(&vha->list, &ha->vp_list); qlt_update_vp_map(vha, SET_VP_IDX); spin_unlock_irqrestore(&ha->vport_slock, flags); mutex_unlock(&ha->vport_lock); return vp_id; } void qla24xx_deallocate_vp_id(scsi_qla_host_t *vha) { uint16_t vp_id; struct qla_hw_data *ha = vha->hw; unsigned long flags = 0; mutex_lock(&ha->vport_lock); /* * Wait for all pending activities to finish before removing vport from * the list. * Lock needs to be held for safe removal from the list (it * ensures no active vp_list traversal while the vport is removed * from the queue) */ spin_lock_irqsave(&ha->vport_slock, flags); while (atomic_read(&vha->vref_count)) { spin_unlock_irqrestore(&ha->vport_slock, flags); msleep(500); spin_lock_irqsave(&ha->vport_slock, flags); } list_del(&vha->list); qlt_update_vp_map(vha, RESET_VP_IDX); spin_unlock_irqrestore(&ha->vport_slock, flags); vp_id = vha->vp_idx; ha->num_vhosts--; clear_bit(vp_id, ha->vp_idx_map); mutex_unlock(&ha->vport_lock); } static scsi_qla_host_t * qla24xx_find_vhost_by_name(struct qla_hw_data *ha, uint8_t *port_name) { scsi_qla_host_t *vha; struct scsi_qla_host *tvha; unsigned long flags; spin_lock_irqsave(&ha->vport_slock, flags); /* Locate matching device in database. */ list_for_each_entry_safe(vha, tvha, &ha->vp_list, list) { if (!memcmp(port_name, vha->port_name, WWN_SIZE)) { spin_unlock_irqrestore(&ha->vport_slock, flags); return vha; } } spin_unlock_irqrestore(&ha->vport_slock, flags); return NULL; } /* * qla2x00_mark_vp_devices_dead * Updates fcport state when device goes offline. * * Input: * ha = adapter block pointer. * fcport = port structure pointer. * * Return: * None. * * Context: */ static void qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha) { /* * !!! NOTE !!! * This function, if called in contexts other than vp create, disable * or delete, please make sure this is synchronized with the * delete thread. */ fc_port_t *fcport; list_for_each_entry(fcport, &vha->vp_fcports, list) { ql_dbg(ql_dbg_vport, vha, 0xa001, "Marking port dead, loop_id=0x%04x : %x.\n", fcport->loop_id, fcport->vha->vp_idx); qla2x00_mark_device_lost(vha, fcport, 0, 0); qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED); } } int qla24xx_disable_vp(scsi_qla_host_t *vha) { unsigned long flags; int ret; ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL); atomic_set(&vha->loop_state, LOOP_DOWN); atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); /* Remove port id from vp target map */ spin_lock_irqsave(&vha->hw->vport_slock, flags); qlt_update_vp_map(vha, RESET_AL_PA); spin_unlock_irqrestore(&vha->hw->vport_slock, flags); qla2x00_mark_vp_devices_dead(vha); atomic_set(&vha->vp_state, VP_FAILED); vha->flags.management_server_logged_in = 0; if (ret == QLA_SUCCESS) { fc_vport_set_state(vha->fc_vport, FC_VPORT_DISABLED); } else { fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); return -1; } return 0; } int qla24xx_enable_vp(scsi_qla_host_t *vha) { int ret; struct qla_hw_data *ha = vha->hw; scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); /* Check if physical ha port is Up */ if (atomic_read(&base_vha->loop_state) == LOOP_DOWN || atomic_read(&base_vha->loop_state) == LOOP_DEAD || !(ha->current_topology & ISP_CFG_F)) { vha->vp_err_state = VP_ERR_PORTDWN; fc_vport_set_state(vha->fc_vport, FC_VPORT_LINKDOWN); goto enable_failed; } /* Initialize the new vport unless it is a persistent port */ mutex_lock(&ha->vport_lock); ret = qla24xx_modify_vp_config(vha); mutex_unlock(&ha->vport_lock); if (ret != QLA_SUCCESS) { fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); goto enable_failed; } ql_dbg(ql_dbg_taskm, vha, 0x801a, "Virtual port with id: %d - Enabled.\n", vha->vp_idx); return 0; enable_failed: ql_dbg(ql_dbg_taskm, vha, 0x801b, "Virtual port with id: %d - Disabled.\n", vha->vp_idx); return 1; } static void qla24xx_configure_vp(scsi_qla_host_t *vha) { struct fc_vport *fc_vport; int ret; fc_vport = vha->fc_vport; ql_dbg(ql_dbg_vport, vha, 0xa002, "%s: change request #3.\n", __func__); ret = qla2x00_send_change_request(vha, 0x3, vha->vp_idx); if (ret != QLA_SUCCESS) { ql_dbg(ql_dbg_vport, vha, 0xa003, "Failed to enable " "receiving of RSCN requests: 0x%x.\n", ret); return; } else { /* Corresponds to SCR enabled */ clear_bit(VP_SCR_NEEDED, &vha->vp_flags); } vha->flags.online = 1; if (qla24xx_configure_vhba(vha)) return; atomic_set(&vha->vp_state, VP_ACTIVE); fc_vport_set_state(fc_vport, FC_VPORT_ACTIVE); } void qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb) { scsi_qla_host_t *vha; struct qla_hw_data *ha = rsp->hw; int i = 0; unsigned long flags; spin_lock_irqsave(&ha->vport_slock, flags); list_for_each_entry(vha, &ha->vp_list, list) { if (vha->vp_idx) { atomic_inc(&vha->vref_count); spin_unlock_irqrestore(&ha->vport_slock, flags); switch (mb[0]) { case MBA_LIP_OCCURRED: case MBA_LOOP_UP: case MBA_LOOP_DOWN: case MBA_LIP_RESET: case MBA_POINT_TO_POINT: case MBA_CHG_IN_CONNECTION: case MBA_PORT_UPDATE: case MBA_RSCN_UPDATE: ql_dbg(ql_dbg_async, vha, 0x5024, "Async_event for VP[%d], mb=0x%x vha=%p.\n", i, *mb, vha); qla2x00_async_event(vha, rsp, mb); break; } spin_lock_irqsave(&ha->vport_slock, flags); atomic_dec(&vha->vref_count); } i++; } spin_unlock_irqrestore(&ha->vport_slock, flags); } int qla2x00_vp_abort_isp(scsi_qla_host_t *vha) { /* * Physical port will do most of the abort and recovery work. We can * just treat it as a loop down */ if (atomic_read(&vha->loop_state) != LOOP_DOWN) { atomic_set(&vha->loop_state, LOOP_DOWN); qla2x00_mark_all_devices_lost(vha, 0); } else { if (!atomic_read(&vha->loop_down_timer)) atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); } /* * To exclusively reset vport, we need to log it out first. Note: this * control_vp can fail if ISP reset is already issued, this is * expected, as the vp would be already logged out due to ISP reset. */ if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL); ql_dbg(ql_dbg_taskm, vha, 0x801d, "Scheduling enable of Vport %d.\n", vha->vp_idx); return qla24xx_enable_vp(vha); } static int qla2x00_do_dpc_vp(scsi_qla_host_t *vha) { ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x4012, "Entering %s vp_flags: 0x%lx.\n", __func__, vha->vp_flags); qla2x00_do_work(vha); if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) { /* VP acquired. complete port configuration */ ql_dbg(ql_dbg_dpc, vha, 0x4014, "Configure VP scheduled.\n"); qla24xx_configure_vp(vha); ql_dbg(ql_dbg_dpc, vha, 0x4015, "Configure VP end.\n"); return 0; } if (test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) { ql_dbg(ql_dbg_dpc, vha, 0x4016, "FCPort update scheduled.\n"); qla2x00_update_fcports(vha); clear_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags); ql_dbg(ql_dbg_dpc, vha, 0x4017, "FCPort update end.\n"); } if ((test_and_clear_bit(RELOGIN_NEEDED, &vha->dpc_flags)) && !test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) && atomic_read(&vha->loop_state) != LOOP_DOWN) { ql_dbg(ql_dbg_dpc, vha, 0x4018, "Relogin needed scheduled.\n"); qla2x00_relogin(vha); ql_dbg(ql_dbg_dpc, vha, 0x4019, "Relogin needed end.\n"); } if (test_and_clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) && (!(test_and_set_bit(RESET_ACTIVE, &vha->dpc_flags)))) { clear_bit(RESET_ACTIVE, &vha->dpc_flags); } if (test_and_clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) { if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))) { ql_dbg(ql_dbg_dpc, vha, 0x401a, "Loop resync scheduled.\n"); qla2x00_loop_resync(vha); clear_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags); ql_dbg(ql_dbg_dpc, vha, 0x401b, "Loop resync end.\n"); } } ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x401c, "Exiting %s.\n", __func__); return 0; } void qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha) { int ret; struct qla_hw_data *ha = vha->hw; scsi_qla_host_t *vp; unsigned long flags = 0; if (vha->vp_idx) return; if (list_empty(&ha->vp_list)) return; clear_bit(VP_DPC_NEEDED, &vha->dpc_flags); if (!(ha->current_topology & ISP_CFG_F)) return; spin_lock_irqsave(&ha->vport_slock, flags); list_for_each_entry(vp, &ha->vp_list, list) { if (vp->vp_idx) { atomic_inc(&vp->vref_count); spin_unlock_irqrestore(&ha->vport_slock, flags); ret = qla2x00_do_dpc_vp(vp); spin_lock_irqsave(&ha->vport_slock, flags); atomic_dec(&vp->vref_count); } } spin_unlock_irqrestore(&ha->vport_slock, flags); } int qla24xx_vport_create_req_sanity_check(struct fc_vport *fc_vport) { scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost); struct qla_hw_data *ha = base_vha->hw; scsi_qla_host_t *vha; uint8_t port_name[WWN_SIZE]; if (fc_vport->roles != FC_PORT_ROLE_FCP_INITIATOR) return VPCERR_UNSUPPORTED; /* Check up the F/W and H/W support NPIV */ if (!ha->flags.npiv_supported) return VPCERR_UNSUPPORTED; /* Check up whether npiv supported switch presented */ if (!(ha->switch_cap & FLOGI_MID_SUPPORT)) return VPCERR_NO_FABRIC_SUPP; /* Check up unique WWPN */ u64_to_wwn(fc_vport->port_name, port_name); if (!memcmp(port_name, base_vha->port_name, WWN_SIZE)) return VPCERR_BAD_WWN; vha = qla24xx_find_vhost_by_name(ha, port_name); if (vha) return VPCERR_BAD_WWN; /* Check up max-npiv-supports */ if (ha->num_vhosts > ha->max_npiv_vports) { ql_dbg(ql_dbg_vport, vha, 0xa004, "num_vhosts %ud is bigger " "than max_npiv_vports %ud.\n", ha->num_vhosts, ha->max_npiv_vports); return VPCERR_UNSUPPORTED; } return 0; } scsi_qla_host_t * qla24xx_create_vhost(struct fc_vport *fc_vport) { scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost); struct qla_hw_data *ha = base_vha->hw; scsi_qla_host_t *vha; struct scsi_host_template *sht = &qla2xxx_driver_template; struct Scsi_Host *host; vha = qla2x00_create_host(sht, ha); if (!vha) { ql_log(ql_log_warn, vha, 0xa005, "scsi_host_alloc() failed for vport.\n"); return(NULL); } host = vha->host; fc_vport->dd_data = vha; /* New host info */ u64_to_wwn(fc_vport->node_name, vha->node_name); u64_to_wwn(fc_vport->port_name, vha->port_name); vha->fc_vport = fc_vport; vha->device_flags = 0; vha->vp_idx = qla24xx_allocate_vp_id(vha); if (vha->vp_idx > ha->max_npiv_vports) { ql_dbg(ql_dbg_vport, vha, 0xa006, "Couldn't allocate vp_id.\n"); goto create_vhost_failed; } vha->mgmt_svr_loop_id = 10 + vha->vp_idx; vha->dpc_flags = 0L; /* * To fix the issue of processing a parent's RSCN for the vport before * its SCR is complete. */ set_bit(VP_SCR_NEEDED, &vha->vp_flags); atomic_set(&vha->loop_state, LOOP_DOWN); atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); qla2x00_start_timer(vha, qla2x00_timer, WATCH_INTERVAL); vha->req = base_vha->req; host->can_queue = base_vha->req->length + 128; host->cmd_per_lun = 3; if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) host->max_cmd_len = 32; else host->max_cmd_len = MAX_CMDSZ; host->max_channel = MAX_BUSES - 1; host->max_lun = ql2xmaxlun; host->unique_id = host->host_no; host->max_id = ha->max_fibre_devices; host->transportt = qla2xxx_transport_vport_template; ql_dbg(ql_dbg_vport, vha, 0xa007, "Detect vport hba %ld at address = %p.\n", vha->host_no, vha); vha->flags.init_done = 1; mutex_lock(&ha->vport_lock); set_bit(vha->vp_idx, ha->vp_idx_map); ha->cur_vport_count++; mutex_unlock(&ha->vport_lock); return vha; create_vhost_failed: return NULL; } static void qla25xx_free_req_que(struct scsi_qla_host *vha, struct req_que *req) { struct qla_hw_data *ha = vha->hw; uint16_t que_id = req->id; dma_free_coherent(&ha->pdev->dev, (req->length + 1) * sizeof(request_t), req->ring, req->dma); req->ring = NULL; req->dma = 0; if (que_id) { ha->req_q_map[que_id] = NULL; mutex_lock(&ha->vport_lock); clear_bit(que_id, ha->req_qid_map); mutex_unlock(&ha->vport_lock); } kfree(req->outstanding_cmds); kfree(req); req = NULL; } static void qla25xx_free_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) { struct qla_hw_data *ha = vha->hw; uint16_t que_id = rsp->id; if (rsp->msix && rsp->msix->have_irq) { free_irq(rsp->msix->vector, rsp); rsp->msix->have_irq = 0; rsp->msix->rsp = NULL; } dma_free_coherent(&ha->pdev->dev, (rsp->length + 1) * sizeof(response_t), rsp->ring, rsp->dma); rsp->ring = NULL; rsp->dma = 0; if (que_id) { ha->rsp_q_map[que_id] = NULL; mutex_lock(&ha->vport_lock); clear_bit(que_id, ha->rsp_qid_map); mutex_unlock(&ha->vport_lock); } kfree(rsp); rsp = NULL; } int qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req) { int ret = -1; if (req) { req->options |= BIT_0; ret = qla25xx_init_req_que(vha, req); } if (ret == QLA_SUCCESS) qla25xx_free_req_que(vha, req); return ret; } static int qla25xx_delete_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) { int ret = -1; if (rsp) { rsp->options |= BIT_0; ret = qla25xx_init_rsp_que(vha, rsp); } if (ret == QLA_SUCCESS) qla25xx_free_rsp_que(vha, rsp); return ret; } /* Delete all queues for a given vhost */ int qla25xx_delete_queues(struct scsi_qla_host *vha) { int cnt, ret = 0; struct req_que *req = NULL; struct rsp_que *rsp = NULL; struct qla_hw_data *ha = vha->hw; /* Delete request queues */ for (cnt = 1; cnt < ha->max_req_queues; cnt++) { req = ha->req_q_map[cnt]; if (req) { ret = qla25xx_delete_req_que(vha, req); if (ret != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x00ea, "Couldn't delete req que %d.\n", req->id); return ret; } } } /* Delete response queues */ for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) { rsp = ha->rsp_q_map[cnt]; if (rsp) { ret = qla25xx_delete_rsp_que(vha, rsp); if (ret != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x00eb, "Couldn't delete rsp que %d.\n", rsp->id); return ret; } } } return ret; } int qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options, uint8_t vp_idx, uint16_t rid, int rsp_que, uint8_t qos) { int ret = 0; struct req_que *req = NULL; struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); uint16_t que_id = 0; device_reg_t __iomem *reg; uint32_t cnt; req = kzalloc(sizeof(struct req_que), GFP_KERNEL); if (req == NULL) { ql_log(ql_log_fatal, base_vha, 0x00d9, "Failed to allocate memory for request queue.\n"); goto failed; } req->length = REQUEST_ENTRY_CNT_24XX; req->ring = dma_alloc_coherent(&ha->pdev->dev, (req->length + 1) * sizeof(request_t), &req->dma, GFP_KERNEL); if (req->ring == NULL) { ql_log(ql_log_fatal, base_vha, 0x00da, "Failed to allocate memory for request_ring.\n"); goto que_failed; } ret = qla2x00_alloc_outstanding_cmds(ha, req); if (ret != QLA_SUCCESS) goto que_failed; mutex_lock(&ha->vport_lock); que_id = find_first_zero_bit(ha->req_qid_map, ha->max_req_queues); if (que_id >= ha->max_req_queues) { mutex_unlock(&ha->vport_lock); ql_log(ql_log_warn, base_vha, 0x00db, "No resources to create additional request queue.\n"); goto que_failed; } set_bit(que_id, ha->req_qid_map); ha->req_q_map[que_id] = req; req->rid = rid; req->vp_idx = vp_idx; req->qos = qos; ql_dbg(ql_dbg_multiq, base_vha, 0xc002, "queue_id=%d rid=%d vp_idx=%d qos=%d.\n", que_id, req->rid, req->vp_idx, req->qos); ql_dbg(ql_dbg_init, base_vha, 0x00dc, "queue_id=%d rid=%d vp_idx=%d qos=%d.\n", que_id, req->rid, req->vp_idx, req->qos); if (rsp_que < 0) req->rsp = NULL; else req->rsp = ha->rsp_q_map[rsp_que]; /* Use alternate PCI bus number */ if (MSB(req->rid)) options |= BIT_4; /* Use alternate PCI devfn */ if (LSB(req->rid)) options |= BIT_5; req->options = options; ql_dbg(ql_dbg_multiq, base_vha, 0xc003, "options=0x%x.\n", req->options); ql_dbg(ql_dbg_init, base_vha, 0x00dd, "options=0x%x.\n", req->options); for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) req->outstanding_cmds[cnt] = NULL; req->current_outstanding_cmd = 1; req->ring_ptr = req->ring; req->ring_index = 0; req->cnt = req->length; req->id = que_id; reg = ISP_QUE_REG(ha, que_id); req->max_q_depth = ha->req_q_map[0]->max_q_depth; mutex_unlock(&ha->vport_lock); ql_dbg(ql_dbg_multiq, base_vha, 0xc004, "ring_ptr=%p ring_index=%d, " "cnt=%d id=%d max_q_depth=%d.\n", req->ring_ptr, req->ring_index, req->cnt, req->id, req->max_q_depth); ql_dbg(ql_dbg_init, base_vha, 0x00de, "ring_ptr=%p ring_index=%d, " "cnt=%d id=%d max_q_depth=%d.\n", req->ring_ptr, req->ring_index, req->cnt, req->id, req->max_q_depth); ret = qla25xx_init_req_que(base_vha, req); if (ret != QLA_SUCCESS) { ql_log(ql_log_fatal, base_vha, 0x00df, "%s failed.\n", __func__); mutex_lock(&ha->vport_lock); clear_bit(que_id, ha->req_qid_map); mutex_unlock(&ha->vport_lock); goto que_failed; } return req->id; que_failed: qla25xx_free_req_que(base_vha, req); failed: return 0; } static void qla_do_work(struct work_struct *work) { unsigned long flags; struct rsp_que *rsp = container_of(work, struct rsp_que, q_work); struct scsi_qla_host *vha; struct qla_hw_data *ha = rsp->hw; spin_lock_irqsave(&rsp->hw->hardware_lock, flags); vha = pci_get_drvdata(ha->pdev); qla24xx_process_response_queue(vha, rsp); spin_unlock_irqrestore(&rsp->hw->hardware_lock, flags); } /* create response queue */ int qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options, uint8_t vp_idx, uint16_t rid, int req) { int ret = 0; struct rsp_que *rsp = NULL; struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); uint16_t que_id = 0; device_reg_t __iomem *reg; rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL); if (rsp == NULL) { ql_log(ql_log_warn, base_vha, 0x0066, "Failed to allocate memory for response queue.\n"); goto failed; } rsp->length = RESPONSE_ENTRY_CNT_MQ; rsp->ring = dma_alloc_coherent(&ha->pdev->dev, (rsp->length + 1) * sizeof(response_t), &rsp->dma, GFP_KERNEL); if (rsp->ring == NULL) { ql_log(ql_log_warn, base_vha, 0x00e1, "Failed to allocate memory for response ring.\n"); goto que_failed; } mutex_lock(&ha->vport_lock); que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_rsp_queues); if (que_id >= ha->max_rsp_queues) { mutex_unlock(&ha->vport_lock); ql_log(ql_log_warn, base_vha, 0x00e2, "No resources to create additional request queue.\n"); goto que_failed; } set_bit(que_id, ha->rsp_qid_map); if (ha->flags.msix_enabled) rsp->msix = &ha->msix_entries[que_id + 1]; else ql_log(ql_log_warn, base_vha, 0x00e3, "MSIX not enalbled.\n"); ha->rsp_q_map[que_id] = rsp; rsp->rid = rid; rsp->vp_idx = vp_idx; rsp->hw = ha; ql_dbg(ql_dbg_init, base_vha, 0x00e4, "queue_id=%d rid=%d vp_idx=%d hw=%p.\n", que_id, rsp->rid, rsp->vp_idx, rsp->hw); /* Use alternate PCI bus number */ if (MSB(rsp->rid)) options |= BIT_4; /* Use alternate PCI devfn */ if (LSB(rsp->rid)) options |= BIT_5; /* Enable MSIX handshake mode on for uncapable adapters */ if (!IS_MSIX_NACK_CAPABLE(ha)) options |= BIT_6; rsp->options = options; rsp->id = que_id; reg = ISP_QUE_REG(ha, que_id); rsp->rsp_q_in = &reg->isp25mq.rsp_q_in; rsp->rsp_q_out = &reg->isp25mq.rsp_q_out; mutex_unlock(&ha->vport_lock); ql_dbg(ql_dbg_multiq, base_vha, 0xc00b, "options=%x id=%d rsp_q_in=%p rsp_q_out=%p", rsp->options, rsp->id, rsp->rsp_q_in, rsp->rsp_q_out); ql_dbg(ql_dbg_init, base_vha, 0x00e5, "options=%x id=%d rsp_q_in=%p rsp_q_out=%p", rsp->options, rsp->id, rsp->rsp_q_in, rsp->rsp_q_out); ret = qla25xx_request_irq(rsp); if (ret) goto que_failed; ret = qla25xx_init_rsp_que(base_vha, rsp); if (ret != QLA_SUCCESS) { ql_log(ql_log_fatal, base_vha, 0x00e7, "%s failed.\n", __func__); mutex_lock(&ha->vport_lock); clear_bit(que_id, ha->rsp_qid_map); mutex_unlock(&ha->vport_lock); goto que_failed; } if (req >= 0) rsp->req = ha->req_q_map[req]; else rsp->req = NULL; qla2x00_init_response_q_entries(rsp); if (rsp->hw->wq) INIT_WORK(&rsp->q_work, qla_do_work); return rsp->id; que_failed: qla25xx_free_rsp_que(base_vha, rsp); failed: return 0; }
gpl-2.0
karltsou/fsl-imx6-linux
drivers/acpi/acpica/utstate.c
3227
10325
/******************************************************************************* * * Module Name: utstate - state object support procedures * ******************************************************************************/ /* * Copyright (C) 2000 - 2011, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #define _COMPONENT ACPI_UTILITIES ACPI_MODULE_NAME("utstate") /******************************************************************************* * * FUNCTION: acpi_ut_create_pkg_state_and_push * * PARAMETERS: Object - Object to be added to the new state * Action - Increment/Decrement * state_list - List the state will be added to * * RETURN: Status * * DESCRIPTION: Create a new state and push it * ******************************************************************************/ acpi_status acpi_ut_create_pkg_state_and_push(void *internal_object, void *external_object, u16 index, union acpi_generic_state **state_list) { union acpi_generic_state *state; ACPI_FUNCTION_ENTRY(); state = acpi_ut_create_pkg_state(internal_object, external_object, index); if (!state) { return (AE_NO_MEMORY); } acpi_ut_push_generic_state(state_list, state); return (AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ut_push_generic_state * * PARAMETERS: list_head - Head of the state stack * State - State object to push * * RETURN: None * * DESCRIPTION: Push a state object onto a state stack * ******************************************************************************/ void acpi_ut_push_generic_state(union acpi_generic_state **list_head, union acpi_generic_state *state) { ACPI_FUNCTION_TRACE(ut_push_generic_state); /* Push the state object onto the front of the list (stack) */ state->common.next = *list_head; *list_head = state; return_VOID; } /******************************************************************************* * * FUNCTION: acpi_ut_pop_generic_state * * PARAMETERS: list_head - Head of the state stack * * RETURN: The popped state object * * DESCRIPTION: Pop a state object from a state stack * ******************************************************************************/ union acpi_generic_state *acpi_ut_pop_generic_state(union acpi_generic_state **list_head) { union acpi_generic_state *state; ACPI_FUNCTION_TRACE(ut_pop_generic_state); /* Remove the state object at the head of the list (stack) */ state = *list_head; if (state) { /* Update the list head */ *list_head = state->common.next; } return_PTR(state); } /******************************************************************************* * * FUNCTION: acpi_ut_create_generic_state * * PARAMETERS: None * * RETURN: The new state object. NULL on failure. * * DESCRIPTION: Create a generic state object. Attempt to obtain one from * the global state cache; If none available, create a new one. * ******************************************************************************/ union acpi_generic_state *acpi_ut_create_generic_state(void) { union acpi_generic_state *state; ACPI_FUNCTION_ENTRY(); state = acpi_os_acquire_object(acpi_gbl_state_cache); if (state) { /* Initialize */ memset(state, 0, sizeof(union acpi_generic_state)); state->common.descriptor_type = ACPI_DESC_TYPE_STATE; } return (state); } /******************************************************************************* * * FUNCTION: acpi_ut_create_thread_state * * PARAMETERS: None * * RETURN: New Thread State. NULL on failure * * DESCRIPTION: Create a "Thread State" - a flavor of the generic state used * to track per-thread info during method execution * ******************************************************************************/ struct acpi_thread_state *acpi_ut_create_thread_state(void) { union acpi_generic_state *state; ACPI_FUNCTION_TRACE(ut_create_thread_state); /* Create the generic state object */ state = acpi_ut_create_generic_state(); if (!state) { return_PTR(NULL); } /* Init fields specific to the update struct */ state->common.descriptor_type = ACPI_DESC_TYPE_STATE_THREAD; state->thread.thread_id = acpi_os_get_thread_id(); /* Check for invalid thread ID - zero is very bad, it will break things */ if (!state->thread.thread_id) { ACPI_ERROR((AE_INFO, "Invalid zero ID from AcpiOsGetThreadId")); state->thread.thread_id = (acpi_thread_id) 1; } return_PTR((struct acpi_thread_state *)state); } /******************************************************************************* * * FUNCTION: acpi_ut_create_update_state * * PARAMETERS: Object - Initial Object to be installed in the state * Action - Update action to be performed * * RETURN: New state object, null on failure * * DESCRIPTION: Create an "Update State" - a flavor of the generic state used * to update reference counts and delete complex objects such * as packages. * ******************************************************************************/ union acpi_generic_state *acpi_ut_create_update_state(union acpi_operand_object *object, u16 action) { union acpi_generic_state *state; ACPI_FUNCTION_TRACE_PTR(ut_create_update_state, object); /* Create the generic state object */ state = acpi_ut_create_generic_state(); if (!state) { return_PTR(NULL); } /* Init fields specific to the update struct */ state->common.descriptor_type = ACPI_DESC_TYPE_STATE_UPDATE; state->update.object = object; state->update.value = action; return_PTR(state); } /******************************************************************************* * * FUNCTION: acpi_ut_create_pkg_state * * PARAMETERS: Object - Initial Object to be installed in the state * Action - Update action to be performed * * RETURN: New state object, null on failure * * DESCRIPTION: Create a "Package State" * ******************************************************************************/ union acpi_generic_state *acpi_ut_create_pkg_state(void *internal_object, void *external_object, u16 index) { union acpi_generic_state *state; ACPI_FUNCTION_TRACE_PTR(ut_create_pkg_state, internal_object); /* Create the generic state object */ state = acpi_ut_create_generic_state(); if (!state) { return_PTR(NULL); } /* Init fields specific to the update struct */ state->common.descriptor_type = ACPI_DESC_TYPE_STATE_PACKAGE; state->pkg.source_object = (union acpi_operand_object *)internal_object; state->pkg.dest_object = external_object; state->pkg.index = index; state->pkg.num_packages = 1; return_PTR(state); } /******************************************************************************* * * FUNCTION: acpi_ut_create_control_state * * PARAMETERS: None * * RETURN: New state object, null on failure * * DESCRIPTION: Create a "Control State" - a flavor of the generic state used * to support nested IF/WHILE constructs in the AML. * ******************************************************************************/ union acpi_generic_state *acpi_ut_create_control_state(void) { union acpi_generic_state *state; ACPI_FUNCTION_TRACE(ut_create_control_state); /* Create the generic state object */ state = acpi_ut_create_generic_state(); if (!state) { return_PTR(NULL); } /* Init fields specific to the control struct */ state->common.descriptor_type = ACPI_DESC_TYPE_STATE_CONTROL; state->common.state = ACPI_CONTROL_CONDITIONAL_EXECUTING; return_PTR(state); } /******************************************************************************* * * FUNCTION: acpi_ut_delete_generic_state * * PARAMETERS: State - The state object to be deleted * * RETURN: None * * DESCRIPTION: Release a state object to the state cache. NULL state objects * are ignored. * ******************************************************************************/ void acpi_ut_delete_generic_state(union acpi_generic_state *state) { ACPI_FUNCTION_TRACE(ut_delete_generic_state); /* Ignore null state */ if (state) { (void)acpi_os_release_object(acpi_gbl_state_cache, state); } return_VOID; }
gpl-2.0
randomblame/a500_2.6
drivers/acpi/acpica/exsystem.c
3227
9338
/****************************************************************************** * * Module Name: exsystem - Interface to OS services * *****************************************************************************/ /* * Copyright (C) 2000 - 2011, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #include "acinterp.h" #define _COMPONENT ACPI_EXECUTER ACPI_MODULE_NAME("exsystem") /******************************************************************************* * * FUNCTION: acpi_ex_system_wait_semaphore * * PARAMETERS: Semaphore - Semaphore to wait on * Timeout - Max time to wait * * RETURN: Status * * DESCRIPTION: Implements a semaphore wait with a check to see if the * semaphore is available immediately. If it is not, the * interpreter is released before waiting. * ******************************************************************************/ acpi_status acpi_ex_system_wait_semaphore(acpi_semaphore semaphore, u16 timeout) { acpi_status status; ACPI_FUNCTION_TRACE(ex_system_wait_semaphore); status = acpi_os_wait_semaphore(semaphore, 1, ACPI_DO_NOT_WAIT); if (ACPI_SUCCESS(status)) { return_ACPI_STATUS(status); } if (status == AE_TIME) { /* We must wait, so unlock the interpreter */ acpi_ex_relinquish_interpreter(); status = acpi_os_wait_semaphore(semaphore, 1, timeout); ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "*** Thread awake after blocking, %s\n", acpi_format_exception(status))); /* Reacquire the interpreter */ acpi_ex_reacquire_interpreter(); } return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ex_system_wait_mutex * * PARAMETERS: Mutex - Mutex to wait on * Timeout - Max time to wait * * RETURN: Status * * DESCRIPTION: Implements a mutex wait with a check to see if the * mutex is available immediately. If it is not, the * interpreter is released before waiting. * ******************************************************************************/ acpi_status acpi_ex_system_wait_mutex(acpi_mutex mutex, u16 timeout) { acpi_status status; ACPI_FUNCTION_TRACE(ex_system_wait_mutex); status = acpi_os_acquire_mutex(mutex, ACPI_DO_NOT_WAIT); if (ACPI_SUCCESS(status)) { return_ACPI_STATUS(status); } if (status == AE_TIME) { /* We must wait, so unlock the interpreter */ acpi_ex_relinquish_interpreter(); status = acpi_os_acquire_mutex(mutex, timeout); ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "*** Thread awake after blocking, %s\n", acpi_format_exception(status))); /* Reacquire the interpreter */ acpi_ex_reacquire_interpreter(); } return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ex_system_do_stall * * PARAMETERS: how_long - The amount of time to stall, * in microseconds * * RETURN: Status * * DESCRIPTION: Suspend running thread for specified amount of time. * Note: ACPI specification requires that Stall() does not * relinquish the processor, and delays longer than 100 usec * should use Sleep() instead. We allow stalls up to 255 usec * for compatibility with other interpreters and existing BIOSs. * ******************************************************************************/ acpi_status acpi_ex_system_do_stall(u32 how_long) { acpi_status status = AE_OK; ACPI_FUNCTION_ENTRY(); if (how_long > 255) { /* 255 microseconds */ /* * Longer than 255 usec, this is an error * * (ACPI specifies 100 usec as max, but this gives some slack in * order to support existing BIOSs) */ ACPI_ERROR((AE_INFO, "Time parameter is too large (%u)", how_long)); status = AE_AML_OPERAND_VALUE; } else { acpi_os_stall(how_long); } return (status); } /******************************************************************************* * * FUNCTION: acpi_ex_system_do_sleep * * PARAMETERS: how_long - The amount of time to sleep, * in milliseconds * * RETURN: None * * DESCRIPTION: Sleep the running thread for specified amount of time. * ******************************************************************************/ acpi_status acpi_ex_system_do_sleep(u64 how_long) { ACPI_FUNCTION_ENTRY(); /* Since this thread will sleep, we must release the interpreter */ acpi_ex_relinquish_interpreter(); /* * For compatibility with other ACPI implementations and to prevent * accidental deep sleeps, limit the sleep time to something reasonable. */ if (how_long > ACPI_MAX_SLEEP) { how_long = ACPI_MAX_SLEEP; } acpi_os_sleep(how_long); /* And now we must get the interpreter again */ acpi_ex_reacquire_interpreter(); return (AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ex_system_signal_event * * PARAMETERS: obj_desc - The object descriptor for this op * * RETURN: Status * * DESCRIPTION: Provides an access point to perform synchronization operations * within the AML. * ******************************************************************************/ acpi_status acpi_ex_system_signal_event(union acpi_operand_object * obj_desc) { acpi_status status = AE_OK; ACPI_FUNCTION_TRACE(ex_system_signal_event); if (obj_desc) { status = acpi_os_signal_semaphore(obj_desc->event.os_semaphore, 1); } return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ex_system_wait_event * * PARAMETERS: time_desc - The 'time to delay' object descriptor * obj_desc - The object descriptor for this op * * RETURN: Status * * DESCRIPTION: Provides an access point to perform synchronization operations * within the AML. This operation is a request to wait for an * event. * ******************************************************************************/ acpi_status acpi_ex_system_wait_event(union acpi_operand_object *time_desc, union acpi_operand_object *obj_desc) { acpi_status status = AE_OK; ACPI_FUNCTION_TRACE(ex_system_wait_event); if (obj_desc) { status = acpi_ex_system_wait_semaphore(obj_desc->event.os_semaphore, (u16) time_desc->integer. value); } return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ex_system_reset_event * * PARAMETERS: obj_desc - The object descriptor for this op * * RETURN: Status * * DESCRIPTION: Reset an event to a known state. * ******************************************************************************/ acpi_status acpi_ex_system_reset_event(union acpi_operand_object *obj_desc) { acpi_status status = AE_OK; acpi_semaphore temp_semaphore; ACPI_FUNCTION_ENTRY(); /* * We are going to simply delete the existing semaphore and * create a new one! */ status = acpi_os_create_semaphore(ACPI_NO_UNIT_LIMIT, 0, &temp_semaphore); if (ACPI_SUCCESS(status)) { (void)acpi_os_delete_semaphore(obj_desc->event.os_semaphore); obj_desc->event.os_semaphore = temp_semaphore; } return (status); }
gpl-2.0
Caio99BR/FalconSSKernel
drivers/net/wireless/ath/ath9k/ar9003_phy.c
3227
44609
/* * Copyright (c) 2010-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <linux/export.h> #include "hw.h" #include "ar9003_phy.h" static const int firstep_table[] = /* level: 0 1 2 3 4 5 6 7 8 */ { -4, -2, 0, 2, 4, 6, 8, 10, 12 }; /* lvl 0-8, default 2 */ static const int cycpwrThr1_table[] = /* level: 0 1 2 3 4 5 6 7 8 */ { -6, -4, -2, 0, 2, 4, 6, 8 }; /* lvl 0-7, default 3 */ /* * register values to turn OFDM weak signal detection OFF */ static const int m1ThreshLow_off = 127; static const int m2ThreshLow_off = 127; static const int m1Thresh_off = 127; static const int m2Thresh_off = 127; static const int m2CountThr_off = 31; static const int m2CountThrLow_off = 63; static const int m1ThreshLowExt_off = 127; static const int m2ThreshLowExt_off = 127; static const int m1ThreshExt_off = 127; static const int m2ThreshExt_off = 127; /** * ar9003_hw_set_channel - set channel on single-chip device * @ah: atheros hardware structure * @chan: * * This is the function to change channel on single-chip devices, that is * for AR9300 family of chipsets. * * This function takes the channel value in MHz and sets * hardware channel value. Assumes writes have been enabled to analog bus. * * Actual Expression, * * For 2GHz channel, * Channel Frequency = (3/4) * freq_ref * (chansel[8:0] + chanfrac[16:0]/2^17) * (freq_ref = 40MHz) * * For 5GHz channel, * Channel Frequency = (3/2) * freq_ref * (chansel[8:0] + chanfrac[16:0]/2^10) * (freq_ref = 40MHz/(24>>amodeRefSel)) * * For 5GHz channels which are 5MHz spaced, * Channel Frequency = (3/2) * freq_ref * (chansel[8:0] + chanfrac[16:0]/2^17) * (freq_ref = 40MHz) */ static int ar9003_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan) { u16 bMode, fracMode = 0, aModeRefSel = 0; u32 freq, channelSel = 0, reg32 = 0; struct chan_centers centers; int loadSynthChannel; ath9k_hw_get_channel_centers(ah, chan, &centers); freq = centers.synth_center; if (freq < 4800) { /* 2 GHz, fractional mode */ if (AR_SREV_9330(ah)) { u32 chan_frac; u32 div; if (ah->is_clk_25mhz) div = 75; else div = 120; channelSel = (freq * 4) / div; chan_frac = (((freq * 4) % div) * 0x20000) / div; channelSel = (channelSel << 17) | chan_frac; } else if (AR_SREV_9485(ah)) { u32 chan_frac; /* * freq_ref = 40 / (refdiva >> amoderefsel); where refdiva=1 and amoderefsel=0 * ndiv = ((chan_mhz * 4) / 3) / freq_ref; * chansel = int(ndiv), chanfrac = (ndiv - chansel) * 0x20000 */ channelSel = (freq * 4) / 120; chan_frac = (((freq * 4) % 120) * 0x20000) / 120; channelSel = (channelSel << 17) | chan_frac; } else if (AR_SREV_9340(ah)) { if (ah->is_clk_25mhz) { u32 chan_frac; channelSel = (freq * 2) / 75; chan_frac = (((freq * 2) % 75) * 0x20000) / 75; channelSel = (channelSel << 17) | chan_frac; } else channelSel = CHANSEL_2G(freq) >> 1; } else channelSel = CHANSEL_2G(freq); /* Set to 2G mode */ bMode = 1; } else { if (AR_SREV_9340(ah) && ah->is_clk_25mhz) { u32 chan_frac; channelSel = (freq * 2) / 75; chan_frac = (((freq * 2) % 75) * 0x20000) / 75; channelSel = (channelSel << 17) | chan_frac; } else { channelSel = CHANSEL_5G(freq); /* Doubler is ON, so, divide channelSel by 2. */ channelSel >>= 1; } /* Set to 5G mode */ bMode = 0; } /* Enable fractional mode for all channels */ fracMode = 1; aModeRefSel = 0; loadSynthChannel = 0; reg32 = (bMode << 29); REG_WRITE(ah, AR_PHY_SYNTH_CONTROL, reg32); /* Enable Long shift Select for Synthesizer */ REG_RMW_FIELD(ah, AR_PHY_65NM_CH0_SYNTH4, AR_PHY_SYNTH4_LONG_SHIFT_SELECT, 1); /* Program Synth. setting */ reg32 = (channelSel << 2) | (fracMode << 30) | (aModeRefSel << 28) | (loadSynthChannel << 31); REG_WRITE(ah, AR_PHY_65NM_CH0_SYNTH7, reg32); /* Toggle Load Synth channel bit */ loadSynthChannel = 1; reg32 = (channelSel << 2) | (fracMode << 30) | (aModeRefSel << 28) | (loadSynthChannel << 31); REG_WRITE(ah, AR_PHY_65NM_CH0_SYNTH7, reg32); ah->curchan = chan; ah->curchan_rad_index = -1; return 0; } /** * ar9003_hw_spur_mitigate_mrc_cck - convert baseband spur frequency * @ah: atheros hardware structure * @chan: * * For single-chip solutions. Converts to baseband spur frequency given the * input channel frequency and compute register settings below. * * Spur mitigation for MRC CCK */ static void ar9003_hw_spur_mitigate_mrc_cck(struct ath_hw *ah, struct ath9k_channel *chan) { static const u32 spur_freq[4] = { 2420, 2440, 2464, 2480 }; int cur_bb_spur, negative = 0, cck_spur_freq; int i; int range, max_spur_cnts, synth_freq; u8 *spur_fbin_ptr = NULL; /* * Need to verify range +/- 10 MHz in control channel, otherwise spur * is out-of-band and can be ignored. */ if (AR_SREV_9485(ah) || AR_SREV_9340(ah) || AR_SREV_9330(ah)) { spur_fbin_ptr = ar9003_get_spur_chan_ptr(ah, IS_CHAN_2GHZ(chan)); if (spur_fbin_ptr[0] == 0) /* No spur */ return; max_spur_cnts = 5; if (IS_CHAN_HT40(chan)) { range = 19; if (REG_READ_FIELD(ah, AR_PHY_GEN_CTRL, AR_PHY_GC_DYN2040_PRI_CH) == 0) synth_freq = chan->channel + 10; else synth_freq = chan->channel - 10; } else { range = 10; synth_freq = chan->channel; } } else { range = AR_SREV_9462(ah) ? 5 : 10; max_spur_cnts = 4; synth_freq = chan->channel; } for (i = 0; i < max_spur_cnts; i++) { if (AR_SREV_9462(ah) && (i == 0 || i == 3)) continue; negative = 0; if (AR_SREV_9485(ah) || AR_SREV_9340(ah) || AR_SREV_9330(ah)) cur_bb_spur = FBIN2FREQ(spur_fbin_ptr[i], IS_CHAN_2GHZ(chan)) - synth_freq; else cur_bb_spur = spur_freq[i] - synth_freq; if (cur_bb_spur < 0) { negative = 1; cur_bb_spur = -cur_bb_spur; } if (cur_bb_spur < range) { cck_spur_freq = (int)((cur_bb_spur << 19) / 11); if (negative == 1) cck_spur_freq = -cck_spur_freq; cck_spur_freq = cck_spur_freq & 0xfffff; REG_RMW_FIELD(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_YCOK_MAX, 0x7); REG_RMW_FIELD(ah, AR_PHY_CCK_SPUR_MIT, AR_PHY_CCK_SPUR_MIT_SPUR_RSSI_THR, 0x7f); REG_RMW_FIELD(ah, AR_PHY_CCK_SPUR_MIT, AR_PHY_CCK_SPUR_MIT_SPUR_FILTER_TYPE, 0x2); REG_RMW_FIELD(ah, AR_PHY_CCK_SPUR_MIT, AR_PHY_CCK_SPUR_MIT_USE_CCK_SPUR_MIT, 0x1); REG_RMW_FIELD(ah, AR_PHY_CCK_SPUR_MIT, AR_PHY_CCK_SPUR_MIT_CCK_SPUR_FREQ, cck_spur_freq); return; } } REG_RMW_FIELD(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_YCOK_MAX, 0x5); REG_RMW_FIELD(ah, AR_PHY_CCK_SPUR_MIT, AR_PHY_CCK_SPUR_MIT_USE_CCK_SPUR_MIT, 0x0); REG_RMW_FIELD(ah, AR_PHY_CCK_SPUR_MIT, AR_PHY_CCK_SPUR_MIT_CCK_SPUR_FREQ, 0x0); } /* Clean all spur register fields */ static void ar9003_hw_spur_ofdm_clear(struct ath_hw *ah) { REG_RMW_FIELD(ah, AR_PHY_TIMING4, AR_PHY_TIMING4_ENABLE_SPUR_FILTER, 0); REG_RMW_FIELD(ah, AR_PHY_TIMING11, AR_PHY_TIMING11_SPUR_FREQ_SD, 0); REG_RMW_FIELD(ah, AR_PHY_TIMING11, AR_PHY_TIMING11_SPUR_DELTA_PHASE, 0); REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT, AR_PHY_SFCORR_EXT_SPUR_SUBCHANNEL_SD, 0); REG_RMW_FIELD(ah, AR_PHY_TIMING11, AR_PHY_TIMING11_USE_SPUR_FILTER_IN_AGC, 0); REG_RMW_FIELD(ah, AR_PHY_TIMING11, AR_PHY_TIMING11_USE_SPUR_FILTER_IN_SELFCOR, 0); REG_RMW_FIELD(ah, AR_PHY_TIMING4, AR_PHY_TIMING4_ENABLE_SPUR_RSSI, 0); REG_RMW_FIELD(ah, AR_PHY_SPUR_REG, AR_PHY_SPUR_REG_EN_VIT_SPUR_RSSI, 0); REG_RMW_FIELD(ah, AR_PHY_SPUR_REG, AR_PHY_SPUR_REG_ENABLE_NF_RSSI_SPUR_MIT, 0); REG_RMW_FIELD(ah, AR_PHY_SPUR_REG, AR_PHY_SPUR_REG_ENABLE_MASK_PPM, 0); REG_RMW_FIELD(ah, AR_PHY_TIMING4, AR_PHY_TIMING4_ENABLE_PILOT_MASK, 0); REG_RMW_FIELD(ah, AR_PHY_TIMING4, AR_PHY_TIMING4_ENABLE_CHAN_MASK, 0); REG_RMW_FIELD(ah, AR_PHY_PILOT_SPUR_MASK, AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_IDX_A, 0); REG_RMW_FIELD(ah, AR_PHY_SPUR_MASK_A, AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_IDX_A, 0); REG_RMW_FIELD(ah, AR_PHY_CHAN_SPUR_MASK, AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_IDX_A, 0); REG_RMW_FIELD(ah, AR_PHY_PILOT_SPUR_MASK, AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_A, 0); REG_RMW_FIELD(ah, AR_PHY_CHAN_SPUR_MASK, AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_A, 0); REG_RMW_FIELD(ah, AR_PHY_SPUR_MASK_A, AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_A, 0); REG_RMW_FIELD(ah, AR_PHY_SPUR_REG, AR_PHY_SPUR_REG_MASK_RATE_CNTL, 0); } static void ar9003_hw_spur_ofdm(struct ath_hw *ah, int freq_offset, int spur_freq_sd, int spur_delta_phase, int spur_subchannel_sd) { int mask_index = 0; /* OFDM Spur mitigation */ REG_RMW_FIELD(ah, AR_PHY_TIMING4, AR_PHY_TIMING4_ENABLE_SPUR_FILTER, 0x1); REG_RMW_FIELD(ah, AR_PHY_TIMING11, AR_PHY_TIMING11_SPUR_FREQ_SD, spur_freq_sd); REG_RMW_FIELD(ah, AR_PHY_TIMING11, AR_PHY_TIMING11_SPUR_DELTA_PHASE, spur_delta_phase); REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT, AR_PHY_SFCORR_EXT_SPUR_SUBCHANNEL_SD, spur_subchannel_sd); REG_RMW_FIELD(ah, AR_PHY_TIMING11, AR_PHY_TIMING11_USE_SPUR_FILTER_IN_AGC, 0x1); REG_RMW_FIELD(ah, AR_PHY_TIMING11, AR_PHY_TIMING11_USE_SPUR_FILTER_IN_SELFCOR, 0x1); REG_RMW_FIELD(ah, AR_PHY_TIMING4, AR_PHY_TIMING4_ENABLE_SPUR_RSSI, 0x1); REG_RMW_FIELD(ah, AR_PHY_SPUR_REG, AR_PHY_SPUR_REG_SPUR_RSSI_THRESH, 34); REG_RMW_FIELD(ah, AR_PHY_SPUR_REG, AR_PHY_SPUR_REG_EN_VIT_SPUR_RSSI, 1); if (REG_READ_FIELD(ah, AR_PHY_MODE, AR_PHY_MODE_DYNAMIC) == 0x1) REG_RMW_FIELD(ah, AR_PHY_SPUR_REG, AR_PHY_SPUR_REG_ENABLE_NF_RSSI_SPUR_MIT, 1); mask_index = (freq_offset << 4) / 5; if (mask_index < 0) mask_index = mask_index - 1; mask_index = mask_index & 0x7f; REG_RMW_FIELD(ah, AR_PHY_SPUR_REG, AR_PHY_SPUR_REG_ENABLE_MASK_PPM, 0x1); REG_RMW_FIELD(ah, AR_PHY_TIMING4, AR_PHY_TIMING4_ENABLE_PILOT_MASK, 0x1); REG_RMW_FIELD(ah, AR_PHY_TIMING4, AR_PHY_TIMING4_ENABLE_CHAN_MASK, 0x1); REG_RMW_FIELD(ah, AR_PHY_PILOT_SPUR_MASK, AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_IDX_A, mask_index); REG_RMW_FIELD(ah, AR_PHY_SPUR_MASK_A, AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_IDX_A, mask_index); REG_RMW_FIELD(ah, AR_PHY_CHAN_SPUR_MASK, AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_IDX_A, mask_index); REG_RMW_FIELD(ah, AR_PHY_PILOT_SPUR_MASK, AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_A, 0xc); REG_RMW_FIELD(ah, AR_PHY_CHAN_SPUR_MASK, AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_A, 0xc); REG_RMW_FIELD(ah, AR_PHY_SPUR_MASK_A, AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_A, 0xa0); REG_RMW_FIELD(ah, AR_PHY_SPUR_REG, AR_PHY_SPUR_REG_MASK_RATE_CNTL, 0xff); } static void ar9003_hw_spur_ofdm_work(struct ath_hw *ah, struct ath9k_channel *chan, int freq_offset) { int spur_freq_sd = 0; int spur_subchannel_sd = 0; int spur_delta_phase = 0; if (IS_CHAN_HT40(chan)) { if (freq_offset < 0) { if (REG_READ_FIELD(ah, AR_PHY_GEN_CTRL, AR_PHY_GC_DYN2040_PRI_CH) == 0x0) spur_subchannel_sd = 1; else spur_subchannel_sd = 0; spur_freq_sd = ((freq_offset + 10) << 9) / 11; } else { if (REG_READ_FIELD(ah, AR_PHY_GEN_CTRL, AR_PHY_GC_DYN2040_PRI_CH) == 0x0) spur_subchannel_sd = 0; else spur_subchannel_sd = 1; spur_freq_sd = ((freq_offset - 10) << 9) / 11; } spur_delta_phase = (freq_offset << 17) / 5; } else { spur_subchannel_sd = 0; spur_freq_sd = (freq_offset << 9) /11; spur_delta_phase = (freq_offset << 18) / 5; } spur_freq_sd = spur_freq_sd & 0x3ff; spur_delta_phase = spur_delta_phase & 0xfffff; ar9003_hw_spur_ofdm(ah, freq_offset, spur_freq_sd, spur_delta_phase, spur_subchannel_sd); } /* Spur mitigation for OFDM */ static void ar9003_hw_spur_mitigate_ofdm(struct ath_hw *ah, struct ath9k_channel *chan) { int synth_freq; int range = 10; int freq_offset = 0; int mode; u8* spurChansPtr; unsigned int i; struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep; if (IS_CHAN_5GHZ(chan)) { spurChansPtr = &(eep->modalHeader5G.spurChans[0]); mode = 0; } else { spurChansPtr = &(eep->modalHeader2G.spurChans[0]); mode = 1; } if (spurChansPtr[0] == 0) return; /* No spur in the mode */ if (IS_CHAN_HT40(chan)) { range = 19; if (REG_READ_FIELD(ah, AR_PHY_GEN_CTRL, AR_PHY_GC_DYN2040_PRI_CH) == 0x0) synth_freq = chan->channel - 10; else synth_freq = chan->channel + 10; } else { range = 10; synth_freq = chan->channel; } ar9003_hw_spur_ofdm_clear(ah); for (i = 0; i < AR_EEPROM_MODAL_SPURS && spurChansPtr[i]; i++) { freq_offset = FBIN2FREQ(spurChansPtr[i], mode) - synth_freq; if (abs(freq_offset) < range) { ar9003_hw_spur_ofdm_work(ah, chan, freq_offset); break; } } } static void ar9003_hw_spur_mitigate(struct ath_hw *ah, struct ath9k_channel *chan) { ar9003_hw_spur_mitigate_mrc_cck(ah, chan); ar9003_hw_spur_mitigate_ofdm(ah, chan); } static u32 ar9003_hw_compute_pll_control(struct ath_hw *ah, struct ath9k_channel *chan) { u32 pll; pll = SM(0x5, AR_RTC_9300_PLL_REFDIV); if (chan && IS_CHAN_HALF_RATE(chan)) pll |= SM(0x1, AR_RTC_9300_PLL_CLKSEL); else if (chan && IS_CHAN_QUARTER_RATE(chan)) pll |= SM(0x2, AR_RTC_9300_PLL_CLKSEL); pll |= SM(0x2c, AR_RTC_9300_PLL_DIV); return pll; } static void ar9003_hw_set_channel_regs(struct ath_hw *ah, struct ath9k_channel *chan) { u32 phymode; u32 enableDacFifo = 0; enableDacFifo = (REG_READ(ah, AR_PHY_GEN_CTRL) & AR_PHY_GC_ENABLE_DAC_FIFO); /* Enable 11n HT, 20 MHz */ phymode = AR_PHY_GC_HT_EN | AR_PHY_GC_SINGLE_HT_LTF1 | AR_PHY_GC_SHORT_GI_40 | enableDacFifo; /* Configure baseband for dynamic 20/40 operation */ if (IS_CHAN_HT40(chan)) { phymode |= AR_PHY_GC_DYN2040_EN; /* Configure control (primary) channel at +-10MHz */ if ((chan->chanmode == CHANNEL_A_HT40PLUS) || (chan->chanmode == CHANNEL_G_HT40PLUS)) phymode |= AR_PHY_GC_DYN2040_PRI_CH; } /* make sure we preserve INI settings */ phymode |= REG_READ(ah, AR_PHY_GEN_CTRL); /* turn off Green Field detection for STA for now */ phymode &= ~AR_PHY_GC_GF_DETECT_EN; REG_WRITE(ah, AR_PHY_GEN_CTRL, phymode); /* Configure MAC for 20/40 operation */ ath9k_hw_set11nmac2040(ah); /* global transmit timeout (25 TUs default)*/ REG_WRITE(ah, AR_GTXTO, 25 << AR_GTXTO_TIMEOUT_LIMIT_S); /* carrier sense timeout */ REG_WRITE(ah, AR_CST, 0xF << AR_CST_TIMEOUT_LIMIT_S); } static void ar9003_hw_init_bb(struct ath_hw *ah, struct ath9k_channel *chan) { u32 synthDelay; /* * Wait for the frequency synth to settle (synth goes on * via AR_PHY_ACTIVE_EN). Read the phy active delay register. * Value is in 100ns increments. */ synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY; if (IS_CHAN_B(chan)) synthDelay = (4 * synthDelay) / 22; else synthDelay /= 10; /* Activate the PHY (includes baseband activate + synthesizer on) */ REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN); /* * There is an issue if the AP starts the calibration before * the base band timeout completes. This could result in the * rx_clear false triggering. As a workaround we add delay an * extra BASE_ACTIVATE_DELAY usecs to ensure this condition * does not happen. */ udelay(synthDelay + BASE_ACTIVATE_DELAY); } static void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx) { switch (rx) { case 0x5: REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP, AR_PHY_SWAP_ALT_CHAIN); case 0x3: case 0x1: case 0x2: case 0x7: REG_WRITE(ah, AR_PHY_RX_CHAINMASK, rx); REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, rx); break; default: break; } if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) && (tx == 0x7)) REG_WRITE(ah, AR_SELFGEN_MASK, 0x3); else if (AR_SREV_9462(ah)) /* xxx only when MCI support is enabled */ REG_WRITE(ah, AR_SELFGEN_MASK, 0x3); else REG_WRITE(ah, AR_SELFGEN_MASK, tx); if (tx == 0x5) { REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP, AR_PHY_SWAP_ALT_CHAIN); } } /* * Override INI values with chip specific configuration. */ static void ar9003_hw_override_ini(struct ath_hw *ah) { u32 val; /* * Set the RX_ABORT and RX_DIS and clear it only after * RXE is set for MAC. This prevents frames with * corrupted descriptor status. */ REG_SET_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT)); /* * For AR9280 and above, there is a new feature that allows * Multicast search based on both MAC Address and Key ID. By default, * this feature is enabled. But since the driver is not using this * feature, we switch it off; otherwise multicast search based on * MAC addr only will fail. */ val = REG_READ(ah, AR_PCU_MISC_MODE2) & (~AR_ADHOC_MCAST_KEYID_ENABLE); REG_WRITE(ah, AR_PCU_MISC_MODE2, val | AR_AGG_WEP_ENABLE_FIX | AR_AGG_WEP_ENABLE); REG_SET_BIT(ah, AR_PHY_CCK_DETECT, AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV); } static void ar9003_hw_prog_ini(struct ath_hw *ah, struct ar5416IniArray *iniArr, int column) { unsigned int i, regWrites = 0; /* New INI format: Array may be undefined (pre, core, post arrays) */ if (!iniArr->ia_array) return; /* * New INI format: Pre, core, and post arrays for a given subsystem * may be modal (> 2 columns) or non-modal (2 columns). Determine if * the array is non-modal and force the column to 1. */ if (column >= iniArr->ia_columns) column = 1; for (i = 0; i < iniArr->ia_rows; i++) { u32 reg = INI_RA(iniArr, i, 0); u32 val = INI_RA(iniArr, i, column); REG_WRITE(ah, reg, val); DO_DELAY(regWrites); } } static int ar9003_hw_process_ini(struct ath_hw *ah, struct ath9k_channel *chan) { unsigned int regWrites = 0, i; u32 modesIndex; switch (chan->chanmode) { case CHANNEL_A: case CHANNEL_A_HT20: modesIndex = 1; break; case CHANNEL_A_HT40PLUS: case CHANNEL_A_HT40MINUS: modesIndex = 2; break; case CHANNEL_G: case CHANNEL_G_HT20: case CHANNEL_B: modesIndex = 4; break; case CHANNEL_G_HT40PLUS: case CHANNEL_G_HT40MINUS: modesIndex = 3; break; default: return -EINVAL; } for (i = 0; i < ATH_INI_NUM_SPLIT; i++) { ar9003_hw_prog_ini(ah, &ah->iniSOC[i], modesIndex); ar9003_hw_prog_ini(ah, &ah->iniMac[i], modesIndex); ar9003_hw_prog_ini(ah, &ah->iniBB[i], modesIndex); ar9003_hw_prog_ini(ah, &ah->iniRadio[i], modesIndex); if (i == ATH_INI_POST && AR_SREV_9462_20(ah)) ar9003_hw_prog_ini(ah, &ah->ini_radio_post_sys2ant, modesIndex); } REG_WRITE_ARRAY(&ah->iniModesRxGain, 1, regWrites); REG_WRITE_ARRAY(&ah->iniModesTxGain, modesIndex, regWrites); /* * For 5GHz channels requiring Fast Clock, apply * different modal values. */ if (IS_CHAN_A_FAST_CLOCK(ah, chan)) REG_WRITE_ARRAY(&ah->iniModesFastClock, modesIndex, regWrites); REG_WRITE_ARRAY(&ah->iniAdditional, 1, regWrites); if (AR_SREV_9462(ah)) ar9003_hw_prog_ini(ah, &ah->ini_BTCOEX_MAX_TXPWR, 1); if (chan->channel == 2484) ar9003_hw_prog_ini(ah, &ah->ini_japan2484, 1); ah->modes_index = modesIndex; ar9003_hw_override_ini(ah); ar9003_hw_set_channel_regs(ah, chan); ar9003_hw_set_chain_masks(ah, ah->rxchainmask, ah->txchainmask); ath9k_hw_apply_txpower(ah, chan, false); if (AR_SREV_9462(ah)) { if (REG_READ_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_0, AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL)) ah->enabled_cals |= TX_IQ_CAL; else ah->enabled_cals &= ~TX_IQ_CAL; if (REG_READ(ah, AR_PHY_CL_CAL_CTL) & AR_PHY_CL_CAL_ENABLE) ah->enabled_cals |= TX_CL_CAL; else ah->enabled_cals &= ~TX_CL_CAL; } return 0; } static void ar9003_hw_set_rfmode(struct ath_hw *ah, struct ath9k_channel *chan) { u32 rfMode = 0; if (chan == NULL) return; rfMode |= (IS_CHAN_B(chan) || IS_CHAN_G(chan)) ? AR_PHY_MODE_DYNAMIC : AR_PHY_MODE_OFDM; if (IS_CHAN_A_FAST_CLOCK(ah, chan)) rfMode |= (AR_PHY_MODE_DYNAMIC | AR_PHY_MODE_DYN_CCK_DISABLE); REG_WRITE(ah, AR_PHY_MODE, rfMode); } static void ar9003_hw_mark_phy_inactive(struct ath_hw *ah) { REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS); } static void ar9003_hw_set_delta_slope(struct ath_hw *ah, struct ath9k_channel *chan) { u32 coef_scaled, ds_coef_exp, ds_coef_man; u32 clockMhzScaled = 0x64000000; struct chan_centers centers; /* * half and quarter rate can divide the scaled clock by 2 or 4 * scale for selected channel bandwidth */ if (IS_CHAN_HALF_RATE(chan)) clockMhzScaled = clockMhzScaled >> 1; else if (IS_CHAN_QUARTER_RATE(chan)) clockMhzScaled = clockMhzScaled >> 2; /* * ALGO -> coef = 1e8/fcarrier*fclock/40; * scaled coef to provide precision for this floating calculation */ ath9k_hw_get_channel_centers(ah, chan, &centers); coef_scaled = clockMhzScaled / centers.synth_center; ath9k_hw_get_delta_slope_vals(ah, coef_scaled, &ds_coef_man, &ds_coef_exp); REG_RMW_FIELD(ah, AR_PHY_TIMING3, AR_PHY_TIMING3_DSC_MAN, ds_coef_man); REG_RMW_FIELD(ah, AR_PHY_TIMING3, AR_PHY_TIMING3_DSC_EXP, ds_coef_exp); /* * For Short GI, * scaled coeff is 9/10 that of normal coeff */ coef_scaled = (9 * coef_scaled) / 10; ath9k_hw_get_delta_slope_vals(ah, coef_scaled, &ds_coef_man, &ds_coef_exp); /* for short gi */ REG_RMW_FIELD(ah, AR_PHY_SGI_DELTA, AR_PHY_SGI_DSC_MAN, ds_coef_man); REG_RMW_FIELD(ah, AR_PHY_SGI_DELTA, AR_PHY_SGI_DSC_EXP, ds_coef_exp); } static bool ar9003_hw_rfbus_req(struct ath_hw *ah) { REG_WRITE(ah, AR_PHY_RFBUS_REQ, AR_PHY_RFBUS_REQ_EN); return ath9k_hw_wait(ah, AR_PHY_RFBUS_GRANT, AR_PHY_RFBUS_GRANT_EN, AR_PHY_RFBUS_GRANT_EN, AH_WAIT_TIMEOUT); } /* * Wait for the frequency synth to settle (synth goes on via PHY_ACTIVE_EN). * Read the phy active delay register. Value is in 100ns increments. */ static void ar9003_hw_rfbus_done(struct ath_hw *ah) { u32 synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY; if (IS_CHAN_B(ah->curchan)) synthDelay = (4 * synthDelay) / 22; else synthDelay /= 10; udelay(synthDelay + BASE_ACTIVATE_DELAY); REG_WRITE(ah, AR_PHY_RFBUS_REQ, 0); } static bool ar9003_hw_ani_control(struct ath_hw *ah, enum ath9k_ani_cmd cmd, int param) { struct ath_common *common = ath9k_hw_common(ah); struct ath9k_channel *chan = ah->curchan; struct ar5416AniState *aniState = &chan->ani; s32 value, value2; switch (cmd & ah->ani_function) { case ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION:{ /* * on == 1 means ofdm weak signal detection is ON * on == 1 is the default, for less noise immunity * * on == 0 means ofdm weak signal detection is OFF * on == 0 means more noise imm */ u32 on = param ? 1 : 0; /* * make register setting for default * (weak sig detect ON) come from INI file */ int m1ThreshLow = on ? aniState->iniDef.m1ThreshLow : m1ThreshLow_off; int m2ThreshLow = on ? aniState->iniDef.m2ThreshLow : m2ThreshLow_off; int m1Thresh = on ? aniState->iniDef.m1Thresh : m1Thresh_off; int m2Thresh = on ? aniState->iniDef.m2Thresh : m2Thresh_off; int m2CountThr = on ? aniState->iniDef.m2CountThr : m2CountThr_off; int m2CountThrLow = on ? aniState->iniDef.m2CountThrLow : m2CountThrLow_off; int m1ThreshLowExt = on ? aniState->iniDef.m1ThreshLowExt : m1ThreshLowExt_off; int m2ThreshLowExt = on ? aniState->iniDef.m2ThreshLowExt : m2ThreshLowExt_off; int m1ThreshExt = on ? aniState->iniDef.m1ThreshExt : m1ThreshExt_off; int m2ThreshExt = on ? aniState->iniDef.m2ThreshExt : m2ThreshExt_off; REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW, AR_PHY_SFCORR_LOW_M1_THRESH_LOW, m1ThreshLow); REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW, AR_PHY_SFCORR_LOW_M2_THRESH_LOW, m2ThreshLow); REG_RMW_FIELD(ah, AR_PHY_SFCORR, AR_PHY_SFCORR_M1_THRESH, m1Thresh); REG_RMW_FIELD(ah, AR_PHY_SFCORR, AR_PHY_SFCORR_M2_THRESH, m2Thresh); REG_RMW_FIELD(ah, AR_PHY_SFCORR, AR_PHY_SFCORR_M2COUNT_THR, m2CountThr); REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW, AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW, m2CountThrLow); REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT, AR_PHY_SFCORR_EXT_M1_THRESH_LOW, m1ThreshLowExt); REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT, AR_PHY_SFCORR_EXT_M2_THRESH_LOW, m2ThreshLowExt); REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT, AR_PHY_SFCORR_EXT_M1_THRESH, m1ThreshExt); REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT, AR_PHY_SFCORR_EXT_M2_THRESH, m2ThreshExt); if (on) REG_SET_BIT(ah, AR_PHY_SFCORR_LOW, AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW); else REG_CLR_BIT(ah, AR_PHY_SFCORR_LOW, AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW); if (!on != aniState->ofdmWeakSigDetectOff) { ath_dbg(common, ANI, "** ch %d: ofdm weak signal: %s=>%s\n", chan->channel, !aniState->ofdmWeakSigDetectOff ? "on" : "off", on ? "on" : "off"); if (on) ah->stats.ast_ani_ofdmon++; else ah->stats.ast_ani_ofdmoff++; aniState->ofdmWeakSigDetectOff = !on; } break; } case ATH9K_ANI_FIRSTEP_LEVEL:{ u32 level = param; if (level >= ARRAY_SIZE(firstep_table)) { ath_dbg(common, ANI, "ATH9K_ANI_FIRSTEP_LEVEL: level out of range (%u > %zu)\n", level, ARRAY_SIZE(firstep_table)); return false; } /* * make register setting relative to default * from INI file & cap value */ value = firstep_table[level] - firstep_table[ATH9K_ANI_FIRSTEP_LVL_NEW] + aniState->iniDef.firstep; if (value < ATH9K_SIG_FIRSTEP_SETTING_MIN) value = ATH9K_SIG_FIRSTEP_SETTING_MIN; if (value > ATH9K_SIG_FIRSTEP_SETTING_MAX) value = ATH9K_SIG_FIRSTEP_SETTING_MAX; REG_RMW_FIELD(ah, AR_PHY_FIND_SIG, AR_PHY_FIND_SIG_FIRSTEP, value); /* * we need to set first step low register too * make register setting relative to default * from INI file & cap value */ value2 = firstep_table[level] - firstep_table[ATH9K_ANI_FIRSTEP_LVL_NEW] + aniState->iniDef.firstepLow; if (value2 < ATH9K_SIG_FIRSTEP_SETTING_MIN) value2 = ATH9K_SIG_FIRSTEP_SETTING_MIN; if (value2 > ATH9K_SIG_FIRSTEP_SETTING_MAX) value2 = ATH9K_SIG_FIRSTEP_SETTING_MAX; REG_RMW_FIELD(ah, AR_PHY_FIND_SIG_LOW, AR_PHY_FIND_SIG_LOW_FIRSTEP_LOW, value2); if (level != aniState->firstepLevel) { ath_dbg(common, ANI, "** ch %d: level %d=>%d[def:%d] firstep[level]=%d ini=%d\n", chan->channel, aniState->firstepLevel, level, ATH9K_ANI_FIRSTEP_LVL_NEW, value, aniState->iniDef.firstep); ath_dbg(common, ANI, "** ch %d: level %d=>%d[def:%d] firstep_low[level]=%d ini=%d\n", chan->channel, aniState->firstepLevel, level, ATH9K_ANI_FIRSTEP_LVL_NEW, value2, aniState->iniDef.firstepLow); if (level > aniState->firstepLevel) ah->stats.ast_ani_stepup++; else if (level < aniState->firstepLevel) ah->stats.ast_ani_stepdown++; aniState->firstepLevel = level; } break; } case ATH9K_ANI_SPUR_IMMUNITY_LEVEL:{ u32 level = param; if (level >= ARRAY_SIZE(cycpwrThr1_table)) { ath_dbg(common, ANI, "ATH9K_ANI_SPUR_IMMUNITY_LEVEL: level out of range (%u > %zu)\n", level, ARRAY_SIZE(cycpwrThr1_table)); return false; } /* * make register setting relative to default * from INI file & cap value */ value = cycpwrThr1_table[level] - cycpwrThr1_table[ATH9K_ANI_SPUR_IMMUNE_LVL_NEW] + aniState->iniDef.cycpwrThr1; if (value < ATH9K_SIG_SPUR_IMM_SETTING_MIN) value = ATH9K_SIG_SPUR_IMM_SETTING_MIN; if (value > ATH9K_SIG_SPUR_IMM_SETTING_MAX) value = ATH9K_SIG_SPUR_IMM_SETTING_MAX; REG_RMW_FIELD(ah, AR_PHY_TIMING5, AR_PHY_TIMING5_CYCPWR_THR1, value); /* * set AR_PHY_EXT_CCA for extension channel * make register setting relative to default * from INI file & cap value */ value2 = cycpwrThr1_table[level] - cycpwrThr1_table[ATH9K_ANI_SPUR_IMMUNE_LVL_NEW] + aniState->iniDef.cycpwrThr1Ext; if (value2 < ATH9K_SIG_SPUR_IMM_SETTING_MIN) value2 = ATH9K_SIG_SPUR_IMM_SETTING_MIN; if (value2 > ATH9K_SIG_SPUR_IMM_SETTING_MAX) value2 = ATH9K_SIG_SPUR_IMM_SETTING_MAX; REG_RMW_FIELD(ah, AR_PHY_EXT_CCA, AR_PHY_EXT_CYCPWR_THR1, value2); if (level != aniState->spurImmunityLevel) { ath_dbg(common, ANI, "** ch %d: level %d=>%d[def:%d] cycpwrThr1[level]=%d ini=%d\n", chan->channel, aniState->spurImmunityLevel, level, ATH9K_ANI_SPUR_IMMUNE_LVL_NEW, value, aniState->iniDef.cycpwrThr1); ath_dbg(common, ANI, "** ch %d: level %d=>%d[def:%d] cycpwrThr1Ext[level]=%d ini=%d\n", chan->channel, aniState->spurImmunityLevel, level, ATH9K_ANI_SPUR_IMMUNE_LVL_NEW, value2, aniState->iniDef.cycpwrThr1Ext); if (level > aniState->spurImmunityLevel) ah->stats.ast_ani_spurup++; else if (level < aniState->spurImmunityLevel) ah->stats.ast_ani_spurdown++; aniState->spurImmunityLevel = level; } break; } case ATH9K_ANI_MRC_CCK:{ /* * is_on == 1 means MRC CCK ON (default, less noise imm) * is_on == 0 means MRC CCK is OFF (more noise imm) */ bool is_on = param ? 1 : 0; REG_RMW_FIELD(ah, AR_PHY_MRC_CCK_CTRL, AR_PHY_MRC_CCK_ENABLE, is_on); REG_RMW_FIELD(ah, AR_PHY_MRC_CCK_CTRL, AR_PHY_MRC_CCK_MUX_REG, is_on); if (!is_on != aniState->mrcCCKOff) { ath_dbg(common, ANI, "** ch %d: MRC CCK: %s=>%s\n", chan->channel, !aniState->mrcCCKOff ? "on" : "off", is_on ? "on" : "off"); if (is_on) ah->stats.ast_ani_ccklow++; else ah->stats.ast_ani_cckhigh++; aniState->mrcCCKOff = !is_on; } break; } case ATH9K_ANI_PRESENT: break; default: ath_dbg(common, ANI, "invalid cmd %u\n", cmd); return false; } ath_dbg(common, ANI, "ANI parameters: SI=%d, ofdmWS=%s FS=%d MRCcck=%s listenTime=%d ofdmErrs=%d cckErrs=%d\n", aniState->spurImmunityLevel, !aniState->ofdmWeakSigDetectOff ? "on" : "off", aniState->firstepLevel, !aniState->mrcCCKOff ? "on" : "off", aniState->listenTime, aniState->ofdmPhyErrCount, aniState->cckPhyErrCount); return true; } static void ar9003_hw_do_getnf(struct ath_hw *ah, int16_t nfarray[NUM_NF_READINGS]) { #define AR_PHY_CH_MINCCA_PWR 0x1FF00000 #define AR_PHY_CH_MINCCA_PWR_S 20 #define AR_PHY_CH_EXT_MINCCA_PWR 0x01FF0000 #define AR_PHY_CH_EXT_MINCCA_PWR_S 16 int16_t nf; int i; for (i = 0; i < AR9300_MAX_CHAINS; i++) { if (ah->rxchainmask & BIT(i)) { nf = MS(REG_READ(ah, ah->nf_regs[i]), AR_PHY_CH_MINCCA_PWR); nfarray[i] = sign_extend32(nf, 8); if (IS_CHAN_HT40(ah->curchan)) { u8 ext_idx = AR9300_MAX_CHAINS + i; nf = MS(REG_READ(ah, ah->nf_regs[ext_idx]), AR_PHY_CH_EXT_MINCCA_PWR); nfarray[ext_idx] = sign_extend32(nf, 8); } } } } static void ar9003_hw_set_nf_limits(struct ath_hw *ah) { ah->nf_2g.max = AR_PHY_CCA_MAX_GOOD_VAL_9300_2GHZ; ah->nf_2g.min = AR_PHY_CCA_MIN_GOOD_VAL_9300_2GHZ; ah->nf_2g.nominal = AR_PHY_CCA_NOM_VAL_9300_2GHZ; ah->nf_5g.max = AR_PHY_CCA_MAX_GOOD_VAL_9300_5GHZ; ah->nf_5g.min = AR_PHY_CCA_MIN_GOOD_VAL_9300_5GHZ; ah->nf_5g.nominal = AR_PHY_CCA_NOM_VAL_9300_5GHZ; if (AR_SREV_9330(ah)) ah->nf_2g.nominal = AR_PHY_CCA_NOM_VAL_9330_2GHZ; if (AR_SREV_9462(ah)) { ah->nf_2g.min = AR_PHY_CCA_MIN_GOOD_VAL_9462_2GHZ; ah->nf_2g.nominal = AR_PHY_CCA_NOM_VAL_9462_2GHZ; ah->nf_5g.min = AR_PHY_CCA_MIN_GOOD_VAL_9462_5GHZ; ah->nf_5g.nominal = AR_PHY_CCA_NOM_VAL_9462_5GHZ; } } /* * Initialize the ANI register values with default (ini) values. * This routine is called during a (full) hardware reset after * all the registers are initialised from the INI. */ static void ar9003_hw_ani_cache_ini_regs(struct ath_hw *ah) { struct ar5416AniState *aniState; struct ath_common *common = ath9k_hw_common(ah); struct ath9k_channel *chan = ah->curchan; struct ath9k_ani_default *iniDef; u32 val; aniState = &ah->curchan->ani; iniDef = &aniState->iniDef; ath_dbg(common, ANI, "ver %d.%d opmode %u chan %d Mhz/0x%x\n", ah->hw_version.macVersion, ah->hw_version.macRev, ah->opmode, chan->channel, chan->channelFlags); val = REG_READ(ah, AR_PHY_SFCORR); iniDef->m1Thresh = MS(val, AR_PHY_SFCORR_M1_THRESH); iniDef->m2Thresh = MS(val, AR_PHY_SFCORR_M2_THRESH); iniDef->m2CountThr = MS(val, AR_PHY_SFCORR_M2COUNT_THR); val = REG_READ(ah, AR_PHY_SFCORR_LOW); iniDef->m1ThreshLow = MS(val, AR_PHY_SFCORR_LOW_M1_THRESH_LOW); iniDef->m2ThreshLow = MS(val, AR_PHY_SFCORR_LOW_M2_THRESH_LOW); iniDef->m2CountThrLow = MS(val, AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW); val = REG_READ(ah, AR_PHY_SFCORR_EXT); iniDef->m1ThreshExt = MS(val, AR_PHY_SFCORR_EXT_M1_THRESH); iniDef->m2ThreshExt = MS(val, AR_PHY_SFCORR_EXT_M2_THRESH); iniDef->m1ThreshLowExt = MS(val, AR_PHY_SFCORR_EXT_M1_THRESH_LOW); iniDef->m2ThreshLowExt = MS(val, AR_PHY_SFCORR_EXT_M2_THRESH_LOW); iniDef->firstep = REG_READ_FIELD(ah, AR_PHY_FIND_SIG, AR_PHY_FIND_SIG_FIRSTEP); iniDef->firstepLow = REG_READ_FIELD(ah, AR_PHY_FIND_SIG_LOW, AR_PHY_FIND_SIG_LOW_FIRSTEP_LOW); iniDef->cycpwrThr1 = REG_READ_FIELD(ah, AR_PHY_TIMING5, AR_PHY_TIMING5_CYCPWR_THR1); iniDef->cycpwrThr1Ext = REG_READ_FIELD(ah, AR_PHY_EXT_CCA, AR_PHY_EXT_CYCPWR_THR1); /* these levels just got reset to defaults by the INI */ aniState->spurImmunityLevel = ATH9K_ANI_SPUR_IMMUNE_LVL_NEW; aniState->firstepLevel = ATH9K_ANI_FIRSTEP_LVL_NEW; aniState->ofdmWeakSigDetectOff = !ATH9K_ANI_USE_OFDM_WEAK_SIG; aniState->mrcCCKOff = !ATH9K_ANI_ENABLE_MRC_CCK; } static void ar9003_hw_set_radar_params(struct ath_hw *ah, struct ath_hw_radar_conf *conf) { u32 radar_0 = 0, radar_1 = 0; if (!conf) { REG_CLR_BIT(ah, AR_PHY_RADAR_0, AR_PHY_RADAR_0_ENA); return; } radar_0 |= AR_PHY_RADAR_0_ENA | AR_PHY_RADAR_0_FFT_ENA; radar_0 |= SM(conf->fir_power, AR_PHY_RADAR_0_FIRPWR); radar_0 |= SM(conf->radar_rssi, AR_PHY_RADAR_0_RRSSI); radar_0 |= SM(conf->pulse_height, AR_PHY_RADAR_0_HEIGHT); radar_0 |= SM(conf->pulse_rssi, AR_PHY_RADAR_0_PRSSI); radar_0 |= SM(conf->pulse_inband, AR_PHY_RADAR_0_INBAND); radar_1 |= AR_PHY_RADAR_1_MAX_RRSSI; radar_1 |= AR_PHY_RADAR_1_BLOCK_CHECK; radar_1 |= SM(conf->pulse_maxlen, AR_PHY_RADAR_1_MAXLEN); radar_1 |= SM(conf->pulse_inband_step, AR_PHY_RADAR_1_RELSTEP_THRESH); radar_1 |= SM(conf->radar_inband, AR_PHY_RADAR_1_RELPWR_THRESH); REG_WRITE(ah, AR_PHY_RADAR_0, radar_0); REG_WRITE(ah, AR_PHY_RADAR_1, radar_1); if (conf->ext_channel) REG_SET_BIT(ah, AR_PHY_RADAR_EXT, AR_PHY_RADAR_EXT_ENA); else REG_CLR_BIT(ah, AR_PHY_RADAR_EXT, AR_PHY_RADAR_EXT_ENA); } static void ar9003_hw_set_radar_conf(struct ath_hw *ah) { struct ath_hw_radar_conf *conf = &ah->radar_conf; conf->fir_power = -28; conf->radar_rssi = 0; conf->pulse_height = 10; conf->pulse_rssi = 24; conf->pulse_inband = 8; conf->pulse_maxlen = 255; conf->pulse_inband_step = 12; conf->radar_inband = 8; } static void ar9003_hw_antdiv_comb_conf_get(struct ath_hw *ah, struct ath_hw_antcomb_conf *antconf) { u32 regval; regval = REG_READ(ah, AR_PHY_MC_GAIN_CTRL); antconf->main_lna_conf = (regval & AR_PHY_9485_ANT_DIV_MAIN_LNACONF) >> AR_PHY_9485_ANT_DIV_MAIN_LNACONF_S; antconf->alt_lna_conf = (regval & AR_PHY_9485_ANT_DIV_ALT_LNACONF) >> AR_PHY_9485_ANT_DIV_ALT_LNACONF_S; antconf->fast_div_bias = (regval & AR_PHY_9485_ANT_FAST_DIV_BIAS) >> AR_PHY_9485_ANT_FAST_DIV_BIAS_S; if (AR_SREV_9330_11(ah)) { antconf->lna1_lna2_delta = -9; antconf->div_group = 1; } else if (AR_SREV_9485(ah)) { antconf->lna1_lna2_delta = -9; antconf->div_group = 2; } else { antconf->lna1_lna2_delta = -3; antconf->div_group = 0; } } static void ar9003_hw_antdiv_comb_conf_set(struct ath_hw *ah, struct ath_hw_antcomb_conf *antconf) { u32 regval; regval = REG_READ(ah, AR_PHY_MC_GAIN_CTRL); regval &= ~(AR_PHY_9485_ANT_DIV_MAIN_LNACONF | AR_PHY_9485_ANT_DIV_ALT_LNACONF | AR_PHY_9485_ANT_FAST_DIV_BIAS | AR_PHY_9485_ANT_DIV_MAIN_GAINTB | AR_PHY_9485_ANT_DIV_ALT_GAINTB); regval |= ((antconf->main_lna_conf << AR_PHY_9485_ANT_DIV_MAIN_LNACONF_S) & AR_PHY_9485_ANT_DIV_MAIN_LNACONF); regval |= ((antconf->alt_lna_conf << AR_PHY_9485_ANT_DIV_ALT_LNACONF_S) & AR_PHY_9485_ANT_DIV_ALT_LNACONF); regval |= ((antconf->fast_div_bias << AR_PHY_9485_ANT_FAST_DIV_BIAS_S) & AR_PHY_9485_ANT_FAST_DIV_BIAS); regval |= ((antconf->main_gaintb << AR_PHY_9485_ANT_DIV_MAIN_GAINTB_S) & AR_PHY_9485_ANT_DIV_MAIN_GAINTB); regval |= ((antconf->alt_gaintb << AR_PHY_9485_ANT_DIV_ALT_GAINTB_S) & AR_PHY_9485_ANT_DIV_ALT_GAINTB); REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval); } static int ar9003_hw_fast_chan_change(struct ath_hw *ah, struct ath9k_channel *chan, u8 *ini_reloaded) { unsigned int regWrites = 0; u32 modesIndex; switch (chan->chanmode) { case CHANNEL_A: case CHANNEL_A_HT20: modesIndex = 1; break; case CHANNEL_A_HT40PLUS: case CHANNEL_A_HT40MINUS: modesIndex = 2; break; case CHANNEL_G: case CHANNEL_G_HT20: case CHANNEL_B: modesIndex = 4; break; case CHANNEL_G_HT40PLUS: case CHANNEL_G_HT40MINUS: modesIndex = 3; break; default: return -EINVAL; } if (modesIndex == ah->modes_index) { *ini_reloaded = false; goto set_rfmode; } ar9003_hw_prog_ini(ah, &ah->iniSOC[ATH_INI_POST], modesIndex); ar9003_hw_prog_ini(ah, &ah->iniMac[ATH_INI_POST], modesIndex); ar9003_hw_prog_ini(ah, &ah->iniBB[ATH_INI_POST], modesIndex); ar9003_hw_prog_ini(ah, &ah->iniRadio[ATH_INI_POST], modesIndex); if (AR_SREV_9462_20(ah)) ar9003_hw_prog_ini(ah, &ah->ini_radio_post_sys2ant, modesIndex); REG_WRITE_ARRAY(&ah->iniModesTxGain, modesIndex, regWrites); /* * For 5GHz channels requiring Fast Clock, apply * different modal values. */ if (IS_CHAN_A_FAST_CLOCK(ah, chan)) REG_WRITE_ARRAY(&ah->iniModesFastClock, modesIndex, regWrites); REG_WRITE_ARRAY(&ah->iniAdditional, 1, regWrites); ah->modes_index = modesIndex; *ini_reloaded = true; set_rfmode: ar9003_hw_set_rfmode(ah, chan); return 0; } void ar9003_hw_attach_phy_ops(struct ath_hw *ah) { struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah); struct ath_hw_ops *ops = ath9k_hw_ops(ah); static const u32 ar9300_cca_regs[6] = { AR_PHY_CCA_0, AR_PHY_CCA_1, AR_PHY_CCA_2, AR_PHY_EXT_CCA, AR_PHY_EXT_CCA_1, AR_PHY_EXT_CCA_2, }; priv_ops->rf_set_freq = ar9003_hw_set_channel; priv_ops->spur_mitigate_freq = ar9003_hw_spur_mitigate; priv_ops->compute_pll_control = ar9003_hw_compute_pll_control; priv_ops->set_channel_regs = ar9003_hw_set_channel_regs; priv_ops->init_bb = ar9003_hw_init_bb; priv_ops->process_ini = ar9003_hw_process_ini; priv_ops->set_rfmode = ar9003_hw_set_rfmode; priv_ops->mark_phy_inactive = ar9003_hw_mark_phy_inactive; priv_ops->set_delta_slope = ar9003_hw_set_delta_slope; priv_ops->rfbus_req = ar9003_hw_rfbus_req; priv_ops->rfbus_done = ar9003_hw_rfbus_done; priv_ops->ani_control = ar9003_hw_ani_control; priv_ops->do_getnf = ar9003_hw_do_getnf; priv_ops->ani_cache_ini_regs = ar9003_hw_ani_cache_ini_regs; priv_ops->set_radar_params = ar9003_hw_set_radar_params; priv_ops->fast_chan_change = ar9003_hw_fast_chan_change; ops->antdiv_comb_conf_get = ar9003_hw_antdiv_comb_conf_get; ops->antdiv_comb_conf_set = ar9003_hw_antdiv_comb_conf_set; ar9003_hw_set_nf_limits(ah); ar9003_hw_set_radar_conf(ah); memcpy(ah->nf_regs, ar9300_cca_regs, sizeof(ah->nf_regs)); } void ar9003_hw_bb_watchdog_config(struct ath_hw *ah) { struct ath_common *common = ath9k_hw_common(ah); u32 idle_tmo_ms = ah->bb_watchdog_timeout_ms; u32 val, idle_count; if (!idle_tmo_ms) { /* disable IRQ, disable chip-reset for BB panic */ REG_WRITE(ah, AR_PHY_WATCHDOG_CTL_2, REG_READ(ah, AR_PHY_WATCHDOG_CTL_2) & ~(AR_PHY_WATCHDOG_RST_ENABLE | AR_PHY_WATCHDOG_IRQ_ENABLE)); /* disable watchdog in non-IDLE mode, disable in IDLE mode */ REG_WRITE(ah, AR_PHY_WATCHDOG_CTL_1, REG_READ(ah, AR_PHY_WATCHDOG_CTL_1) & ~(AR_PHY_WATCHDOG_NON_IDLE_ENABLE | AR_PHY_WATCHDOG_IDLE_ENABLE)); ath_dbg(common, RESET, "Disabled BB Watchdog\n"); return; } /* enable IRQ, disable chip-reset for BB watchdog */ val = REG_READ(ah, AR_PHY_WATCHDOG_CTL_2) & AR_PHY_WATCHDOG_CNTL2_MASK; REG_WRITE(ah, AR_PHY_WATCHDOG_CTL_2, (val | AR_PHY_WATCHDOG_IRQ_ENABLE) & ~AR_PHY_WATCHDOG_RST_ENABLE); /* bound limit to 10 secs */ if (idle_tmo_ms > 10000) idle_tmo_ms = 10000; /* * The time unit for watchdog event is 2^15 44/88MHz cycles. * * For HT20 we have a time unit of 2^15/44 MHz = .74 ms per tick * For HT40 we have a time unit of 2^15/88 MHz = .37 ms per tick * * Given we use fast clock now in 5 GHz, these time units should * be common for both 2 GHz and 5 GHz. */ idle_count = (100 * idle_tmo_ms) / 74; if (ah->curchan && IS_CHAN_HT40(ah->curchan)) idle_count = (100 * idle_tmo_ms) / 37; /* * enable watchdog in non-IDLE mode, disable in IDLE mode, * set idle time-out. */ REG_WRITE(ah, AR_PHY_WATCHDOG_CTL_1, AR_PHY_WATCHDOG_NON_IDLE_ENABLE | AR_PHY_WATCHDOG_IDLE_MASK | (AR_PHY_WATCHDOG_NON_IDLE_MASK & (idle_count << 2))); ath_dbg(common, RESET, "Enabled BB Watchdog timeout (%u ms)\n", idle_tmo_ms); } void ar9003_hw_bb_watchdog_read(struct ath_hw *ah) { /* * we want to avoid printing in ISR context so we save the * watchdog status to be printed later in bottom half context. */ ah->bb_watchdog_last_status = REG_READ(ah, AR_PHY_WATCHDOG_STATUS); /* * the watchdog timer should reset on status read but to be sure * sure we write 0 to the watchdog status bit. */ REG_WRITE(ah, AR_PHY_WATCHDOG_STATUS, ah->bb_watchdog_last_status & ~AR_PHY_WATCHDOG_STATUS_CLR); } void ar9003_hw_bb_watchdog_dbg_info(struct ath_hw *ah) { struct ath_common *common = ath9k_hw_common(ah); u32 status; if (likely(!(common->debug_mask & ATH_DBG_RESET))) return; status = ah->bb_watchdog_last_status; ath_dbg(common, RESET, "\n==== BB update: BB status=0x%08x ====\n", status); ath_dbg(common, RESET, "** BB state: wd=%u det=%u rdar=%u rOFDM=%d rCCK=%u tOFDM=%u tCCK=%u agc=%u src=%u **\n", MS(status, AR_PHY_WATCHDOG_INFO), MS(status, AR_PHY_WATCHDOG_DET_HANG), MS(status, AR_PHY_WATCHDOG_RADAR_SM), MS(status, AR_PHY_WATCHDOG_RX_OFDM_SM), MS(status, AR_PHY_WATCHDOG_RX_CCK_SM), MS(status, AR_PHY_WATCHDOG_TX_OFDM_SM), MS(status, AR_PHY_WATCHDOG_TX_CCK_SM), MS(status, AR_PHY_WATCHDOG_AGC_SM), MS(status, AR_PHY_WATCHDOG_SRCH_SM)); ath_dbg(common, RESET, "** BB WD cntl: cntl1=0x%08x cntl2=0x%08x **\n", REG_READ(ah, AR_PHY_WATCHDOG_CTL_1), REG_READ(ah, AR_PHY_WATCHDOG_CTL_2)); ath_dbg(common, RESET, "** BB mode: BB_gen_controls=0x%08x **\n", REG_READ(ah, AR_PHY_GEN_CTRL)); #define PCT(_field) (common->cc_survey._field * 100 / common->cc_survey.cycles) if (common->cc_survey.cycles) ath_dbg(common, RESET, "** BB busy times: rx_clear=%d%%, rx_frame=%d%%, tx_frame=%d%% **\n", PCT(rx_busy), PCT(rx_frame), PCT(tx_frame)); ath_dbg(common, RESET, "==== BB update: done ====\n\n"); } EXPORT_SYMBOL(ar9003_hw_bb_watchdog_dbg_info); void ar9003_hw_disable_phy_restart(struct ath_hw *ah) { u32 val; /* While receiving unsupported rate frame rx state machine * gets into a state 0xb and if phy_restart happens in that * state, BB would go hang. If RXSM is in 0xb state after * first bb panic, ensure to disable the phy_restart. */ if (!((MS(ah->bb_watchdog_last_status, AR_PHY_WATCHDOG_RX_OFDM_SM) == 0xb) || ah->bb_hang_rx_ofdm)) return; ah->bb_hang_rx_ofdm = true; val = REG_READ(ah, AR_PHY_RESTART); val &= ~AR_PHY_RESTART_ENA; REG_WRITE(ah, AR_PHY_RESTART, val); } EXPORT_SYMBOL(ar9003_hw_disable_phy_restart);
gpl-2.0
npeacock/android_kernel_mediatek_mt6575
arch/mips/cobalt/irq.c
3995
1363
/* * IRQ vector handles * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1995, 1996, 1997, 2003 by Ralf Baechle */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <asm/i8259.h> #include <asm/irq_cpu.h> #include <asm/irq_gt641xx.h> #include <asm/gt64120.h> #include <irq.h> asmlinkage void plat_irq_dispatch(void) { unsigned pending = read_c0_status() & read_c0_cause() & ST0_IM; int irq; if (pending & CAUSEF_IP2) gt641xx_irq_dispatch(); else if (pending & CAUSEF_IP6) { irq = i8259_irq(); if (irq < 0) spurious_interrupt(); else do_IRQ(irq); } else if (pending & CAUSEF_IP3) do_IRQ(MIPS_CPU_IRQ_BASE + 3); else if (pending & CAUSEF_IP4) do_IRQ(MIPS_CPU_IRQ_BASE + 4); else if (pending & CAUSEF_IP5) do_IRQ(MIPS_CPU_IRQ_BASE + 5); else if (pending & CAUSEF_IP7) do_IRQ(MIPS_CPU_IRQ_BASE + 7); else spurious_interrupt(); } static struct irqaction cascade = { .handler = no_action, .name = "cascade", }; void __init arch_init_irq(void) { mips_cpu_irq_init(); gt641xx_irq_init(); init_i8259_irqs(); setup_irq(GT641XX_CASCADE_IRQ, &cascade); setup_irq(I8259_CASCADE_IRQ, &cascade); }
gpl-2.0
ibkim/uml
sound/soc/pxa/tavorevb3.c
7323
5283
/* * tavorevb3.c -- SoC audio for Tavor EVB3 * * Copyright (C) 2010 Marvell International Ltd. * Haojian Zhuang <haojian.zhuang@marvell.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/device.h> #include <linux/clk.h> #include <linux/i2c.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <sound/jack.h> #include <asm/mach-types.h> #include "../codecs/88pm860x-codec.h" #include "pxa-ssp.h" static int evb3_pm860x_init(struct snd_soc_pcm_runtime *rtd); static struct platform_device *evb3_snd_device; static struct snd_soc_jack hs_jack, mic_jack; static struct snd_soc_jack_pin hs_jack_pins[] = { { .pin = "Headset Stereophone", .mask = SND_JACK_HEADPHONE, }, }; static struct snd_soc_jack_pin mic_jack_pins[] = { { .pin = "Headset Mic 2", .mask = SND_JACK_MICROPHONE, }, }; /* tavorevb3 machine dapm widgets */ static const struct snd_soc_dapm_widget evb3_dapm_widgets[] = { SND_SOC_DAPM_HP("Headset Stereophone", NULL), SND_SOC_DAPM_LINE("Lineout Out 1", NULL), SND_SOC_DAPM_LINE("Lineout Out 2", NULL), SND_SOC_DAPM_SPK("Ext Speaker", NULL), SND_SOC_DAPM_MIC("Ext Mic 1", NULL), SND_SOC_DAPM_MIC("Headset Mic 2", NULL), SND_SOC_DAPM_MIC("Ext Mic 3", NULL), }; /* tavorevb3 machine audio map */ static const struct snd_soc_dapm_route evb3_audio_map[] = { {"Headset Stereophone", NULL, "HS1"}, {"Headset Stereophone", NULL, "HS2"}, {"Ext Speaker", NULL, "LSP"}, {"Ext Speaker", NULL, "LSN"}, {"Lineout Out 1", NULL, "LINEOUT1"}, {"Lineout Out 2", NULL, "LINEOUT2"}, {"MIC1P", NULL, "Mic1 Bias"}, {"MIC1N", NULL, "Mic1 Bias"}, {"Mic1 Bias", NULL, "Ext Mic 1"}, {"MIC2P", NULL, "Mic1 Bias"}, {"MIC2N", NULL, "Mic1 Bias"}, {"Mic1 Bias", NULL, "Headset Mic 2"}, {"MIC3P", NULL, "Mic3 Bias"}, {"MIC3N", NULL, "Mic3 Bias"}, {"Mic3 Bias", NULL, "Ext Mic 3"}, }; static int evb3_i2s_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_dai *codec_dai = rtd->codec_dai; struct snd_soc_dai *cpu_dai = rtd->cpu_dai; int width = snd_pcm_format_physical_width(params_format(params)); int ret; ret = snd_soc_dai_set_sysclk(cpu_dai, PXA_SSP_CLK_NET_PLL, 0, PM860X_CLK_DIR_OUT); if (ret < 0) return ret; ret = snd_soc_dai_set_sysclk(codec_dai, 0, 0, PM860X_CLK_DIR_OUT); if (ret < 0) return ret; ret = snd_soc_dai_set_tdm_slot(cpu_dai, 3, 3, 2, width); return ret; } static struct snd_soc_ops evb3_i2s_ops = { .hw_params = evb3_i2s_hw_params, }; static struct snd_soc_dai_link evb3_dai[] = { { .name = "88PM860x I2S", .stream_name = "I2S Audio", .cpu_dai_name = "pxa-ssp-dai.1", .codec_dai_name = "88pm860x-i2s", .platform_name = "pxa-pcm-audio", .codec_name = "88pm860x-codec", .init = evb3_pm860x_init, .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBM_CFM, .ops = &evb3_i2s_ops, }, }; static struct snd_soc_card snd_soc_card_evb3 = { .name = "Tavor EVB3", .owner = THIS_MODULE, .dai_link = evb3_dai, .num_links = ARRAY_SIZE(evb3_dai), .dapm_widgets = evb3_dapm_widgets, .num_dapm_widgets = ARRAY_SIZE(evb3_dapm_widgets), .dapm_routes = evb3_audio_map, .num_dapm_routes = ARRAY_SIZE(evb3_audio_map), }; static int evb3_pm860x_init(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_codec *codec = rtd->codec; struct snd_soc_dapm_context *dapm = &codec->dapm; /* connected pins */ snd_soc_dapm_enable_pin(dapm, "Ext Speaker"); snd_soc_dapm_enable_pin(dapm, "Ext Mic 1"); snd_soc_dapm_enable_pin(dapm, "Ext Mic 3"); snd_soc_dapm_disable_pin(dapm, "Headset Mic 2"); snd_soc_dapm_disable_pin(dapm, "Headset Stereophone"); /* Headset jack detection */ snd_soc_jack_new(codec, "Headphone Jack", SND_JACK_HEADPHONE | SND_JACK_BTN_0 | SND_JACK_BTN_1 | SND_JACK_BTN_2, &hs_jack); snd_soc_jack_add_pins(&hs_jack, ARRAY_SIZE(hs_jack_pins), hs_jack_pins); snd_soc_jack_new(codec, "Microphone Jack", SND_JACK_MICROPHONE, &mic_jack); snd_soc_jack_add_pins(&mic_jack, ARRAY_SIZE(mic_jack_pins), mic_jack_pins); /* headphone, microphone detection & headset short detection */ pm860x_hs_jack_detect(codec, &hs_jack, SND_JACK_HEADPHONE, SND_JACK_BTN_0, SND_JACK_BTN_1, SND_JACK_BTN_2); pm860x_mic_jack_detect(codec, &hs_jack, SND_JACK_MICROPHONE); return 0; } static int __init tavorevb3_init(void) { int ret; if (!machine_is_tavorevb3()) return -ENODEV; evb3_snd_device = platform_device_alloc("soc-audio", -1); if (!evb3_snd_device) return -ENOMEM; platform_set_drvdata(evb3_snd_device, &snd_soc_card_evb3); ret = platform_device_add(evb3_snd_device); if (ret) platform_device_put(evb3_snd_device); return ret; } static void __exit tavorevb3_exit(void) { platform_device_unregister(evb3_snd_device); } module_init(tavorevb3_init); module_exit(tavorevb3_exit); MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com>"); MODULE_DESCRIPTION("ALSA SoC 88PM860x Tavor EVB3"); MODULE_LICENSE("GPL");
gpl-2.0
limitedev66/android_kernel_shooteru
drivers/staging/comedi/drivers/addi-data/hwdrv_apci3120.c
7835
93021
/** @verbatim Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module. ADDI-DATA GmbH Dieselstrasse 3 D-77833 Ottersweier Tel: +19(0)7223/9493-0 Fax: +49(0)7223/9493-92 http://www.addi-data.com info@addi-data.com This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA You should also find the complete GPL in the COPYING file accompanying this source code. @endverbatim */ /* +-----------------------------------------------------------------------+ | (C) ADDI-DATA GmbH Dieselstrasse 3 D-77833 Ottersweier | +-----------------------------------------------------------------------+ | Tel : +49 (0) 7223/9493-0 | email : info@addi-data.com | | Fax : +49 (0) 7223/9493-92 | Internet : http://www.addi-data.com | +-----------------------------------------------------------------------+ | Project : APCI-3120 | Compiler : GCC | | Module name : hwdrv_apci3120.c| Version : 2.96 | +-------------------------------+---------------------------------------+ | Project manager: Eric Stolz | Date : 02/12/2002 | +-----------------------------------------------------------------------+ | Description :APCI3120 Module. Hardware abstraction Layer for APCI3120| +-----------------------------------------------------------------------+ | UPDATE'S | +-----------------------------------------------------------------------+ | Date | Author | Description of updates | +----------+-----------+------------------------------------------------+ | | | | | | | | +----------+-----------+------------------------------------------------+ */ #include "hwdrv_apci3120.h" static unsigned int ui_Temp; /* FUNCTION DEFINITIONS */ /* +----------------------------------------------------------------------------+ | ANALOG INPUT SUBDEVICE | +----------------------------------------------------------------------------+ */ /* +----------------------------------------------------------------------------+ | Function name :int i_APCI3120_InsnConfigAnalogInput(struct comedi_device *dev,| | struct comedi_subdevice *s,struct comedi_insn *insn,unsigned int *data) | | | +----------------------------------------------------------------------------+ | Task : Calls card specific function | | | +----------------------------------------------------------------------------+ | Input Parameters : struct comedi_device *dev | | struct comedi_subdevice *s | | struct comedi_insn *insn | | unsigned int *data | +----------------------------------------------------------------------------+ | Return Value : | | | +----------------------------------------------------------------------------+ */ int i_APCI3120_InsnConfigAnalogInput(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned int i; if ((data[0] != APCI3120_EOC_MODE) && (data[0] != APCI3120_EOS_MODE)) return -1; /* Check for Conversion time to be added ?? */ devpriv->ui_EocEosConversionTime = data[2]; if (data[0] == APCI3120_EOS_MODE) { /* Test the number of the channel */ for (i = 0; i < data[3]; i++) { if (CR_CHAN(data[4 + i]) >= devpriv->s_EeParameters.i_NbrAiChannel) { printk("bad channel list\n"); return -2; } } devpriv->b_InterruptMode = APCI3120_EOS_MODE; if (data[1]) devpriv->b_EocEosInterrupt = APCI3120_ENABLE; else devpriv->b_EocEosInterrupt = APCI3120_DISABLE; /* Copy channel list and Range List to devpriv */ devpriv->ui_AiNbrofChannels = data[3]; for (i = 0; i < devpriv->ui_AiNbrofChannels; i++) devpriv->ui_AiChannelList[i] = data[4 + i]; } else { /* EOC */ devpriv->b_InterruptMode = APCI3120_EOC_MODE; if (data[1]) devpriv->b_EocEosInterrupt = APCI3120_ENABLE; else devpriv->b_EocEosInterrupt = APCI3120_DISABLE; } return insn->n; } /* +----------------------------------------------------------------------------+ | Function name :int i_APCI3120_InsnReadAnalogInput(struct comedi_device *dev, | | struct comedi_subdevice *s,struct comedi_insn *insn, unsigned int *data) | | | +----------------------------------------------------------------------------+ | Task : card specific function | | Reads analog input in synchronous mode | | EOC and EOS is selected as per configured | | if no conversion time is set uses default conversion | | time 10 microsec. | | | +----------------------------------------------------------------------------+ | Input Parameters : struct comedi_device *dev | | struct comedi_subdevice *s | | struct comedi_insn *insn | | unsigned int *data | +----------------------------------------------------------------------------+ | Return Value : | | | +----------------------------------------------------------------------------+ */ int i_APCI3120_InsnReadAnalogInput(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned short us_ConvertTiming, us_TmpValue, i; unsigned char b_Tmp; /* fix conversion time to 10 us */ if (!devpriv->ui_EocEosConversionTime) { printk("No timer0 Value using 10 us\n"); us_ConvertTiming = 10; } else us_ConvertTiming = (unsigned short) (devpriv->ui_EocEosConversionTime / 1000); /* nano to useconds */ /* this_board->i_hwdrv_InsnReadAnalogInput(dev,us_ConvertTiming,insn->n,&insn->chanspec,data,insn->unused[0]); */ /* Clear software registers */ devpriv->b_TimerSelectMode = 0; devpriv->b_ModeSelectRegister = 0; devpriv->us_OutputRegister = 0; /* devpriv->b_DigitalOutputRegister=0; */ if (insn->unused[0] == 222) { /* second insn read */ for (i = 0; i < insn->n; i++) data[i] = devpriv->ui_AiReadData[i]; } else { devpriv->tsk_Current = current; /* Save the current process task structure */ /* * Testing if board have the new Quartz and calculate the time value * to set in the timer */ us_TmpValue = (unsigned short) inw(devpriv->iobase + APCI3120_RD_STATUS); /* EL250804: Testing if board APCI3120 have the new Quartz or if it is an APCI3001 */ if ((us_TmpValue & 0x00B0) == 0x00B0 || !strcmp(this_board->pc_DriverName, "apci3001")) { us_ConvertTiming = (us_ConvertTiming * 2) - 2; } else { us_ConvertTiming = ((us_ConvertTiming * 12926) / 10000) - 1; } us_TmpValue = (unsigned short) devpriv->b_InterruptMode; switch (us_TmpValue) { case APCI3120_EOC_MODE: /* * Testing the interrupt flag and set the EOC bit Clears the FIFO */ inw(devpriv->iobase + APCI3120_RESET_FIFO); /* Initialize the sequence array */ /* if (!i_APCI3120_SetupChannelList(dev,s,1,chanlist,0)) return -EINVAL; */ if (!i_APCI3120_SetupChannelList(dev, s, 1, &insn->chanspec, 0)) return -EINVAL; /* Initialize Timer 0 mode 4 */ devpriv->b_TimerSelectMode = (devpriv-> b_TimerSelectMode & 0xFC) | APCI3120_TIMER_0_MODE_4; outb(devpriv->b_TimerSelectMode, devpriv->iobase + APCI3120_TIMER_CRT1); /* Reset the scan bit and Disables the EOS, DMA, EOC interrupt */ devpriv->b_ModeSelectRegister = devpriv-> b_ModeSelectRegister & APCI3120_DISABLE_SCAN; if (devpriv->b_EocEosInterrupt == APCI3120_ENABLE) { /* Disables the EOS,DMA and enables the EOC interrupt */ devpriv->b_ModeSelectRegister = (devpriv-> b_ModeSelectRegister & APCI3120_DISABLE_EOS_INT) | APCI3120_ENABLE_EOC_INT; inw(devpriv->iobase); } else { devpriv->b_ModeSelectRegister = devpriv-> b_ModeSelectRegister & APCI3120_DISABLE_ALL_INTERRUPT_WITHOUT_TIMER; } outb(devpriv->b_ModeSelectRegister, devpriv->iobase + APCI3120_WRITE_MODE_SELECT); /* Sets gate 0 */ devpriv->us_OutputRegister = (devpriv-> us_OutputRegister & APCI3120_CLEAR_PA_PR) | APCI3120_ENABLE_TIMER0; outw(devpriv->us_OutputRegister, devpriv->iobase + APCI3120_WR_ADDRESS); /* Select Timer 0 */ b_Tmp = ((devpriv-> b_DigitalOutputRegister) & 0xF0) | APCI3120_SELECT_TIMER_0_WORD; outb(b_Tmp, devpriv->iobase + APCI3120_TIMER_CRT0); /* Set the conversion time */ outw(us_ConvertTiming, devpriv->iobase + APCI3120_TIMER_VALUE); us_TmpValue = (unsigned short) inw(dev->iobase + APCI3120_RD_STATUS); if (devpriv->b_EocEosInterrupt == APCI3120_DISABLE) { do { /* Waiting for the end of conversion */ us_TmpValue = inw(devpriv->iobase + APCI3120_RD_STATUS); } while ((us_TmpValue & APCI3120_EOC) == APCI3120_EOC); /* Read the result in FIFO and put it in insn data pointer */ us_TmpValue = inw(devpriv->iobase + 0); *data = us_TmpValue; inw(devpriv->iobase + APCI3120_RESET_FIFO); } break; case APCI3120_EOS_MODE: inw(devpriv->iobase); /* Clears the FIFO */ inw(devpriv->iobase + APCI3120_RESET_FIFO); /* clear PA PR and disable timer 0 */ devpriv->us_OutputRegister = (devpriv-> us_OutputRegister & APCI3120_CLEAR_PA_PR) | APCI3120_DISABLE_TIMER0; outw(devpriv->us_OutputRegister, devpriv->iobase + APCI3120_WR_ADDRESS); if (!i_APCI3120_SetupChannelList(dev, s, devpriv->ui_AiNbrofChannels, devpriv->ui_AiChannelList, 0)) return -EINVAL; /* Initialize Timer 0 mode 2 */ devpriv->b_TimerSelectMode = (devpriv-> b_TimerSelectMode & 0xFC) | APCI3120_TIMER_0_MODE_2; outb(devpriv->b_TimerSelectMode, devpriv->iobase + APCI3120_TIMER_CRT1); /* Select Timer 0 */ b_Tmp = ((devpriv-> b_DigitalOutputRegister) & 0xF0) | APCI3120_SELECT_TIMER_0_WORD; outb(b_Tmp, devpriv->iobase + APCI3120_TIMER_CRT0); /* Set the conversion time */ outw(us_ConvertTiming, devpriv->iobase + APCI3120_TIMER_VALUE); /* Set the scan bit */ devpriv->b_ModeSelectRegister = devpriv-> b_ModeSelectRegister | APCI3120_ENABLE_SCAN; outb(devpriv->b_ModeSelectRegister, devpriv->iobase + APCI3120_WRITE_MODE_SELECT); /* If Interrupt function is loaded */ if (devpriv->b_EocEosInterrupt == APCI3120_ENABLE) { /* Disables the EOC,DMA and enables the EOS interrupt */ devpriv->b_ModeSelectRegister = (devpriv-> b_ModeSelectRegister & APCI3120_DISABLE_EOC_INT) | APCI3120_ENABLE_EOS_INT; inw(devpriv->iobase); } else devpriv->b_ModeSelectRegister = devpriv-> b_ModeSelectRegister & APCI3120_DISABLE_ALL_INTERRUPT_WITHOUT_TIMER; outb(devpriv->b_ModeSelectRegister, devpriv->iobase + APCI3120_WRITE_MODE_SELECT); inw(devpriv->iobase + APCI3120_RD_STATUS); /* Sets gate 0 */ devpriv->us_OutputRegister = devpriv-> us_OutputRegister | APCI3120_ENABLE_TIMER0; outw(devpriv->us_OutputRegister, devpriv->iobase + APCI3120_WR_ADDRESS); /* Start conversion */ outw(0, devpriv->iobase + APCI3120_START_CONVERSION); /* Waiting of end of conversion if interrupt is not installed */ if (devpriv->b_EocEosInterrupt == APCI3120_DISABLE) { /* Waiting the end of conversion */ do { us_TmpValue = inw(devpriv->iobase + APCI3120_RD_STATUS); } while ((us_TmpValue & APCI3120_EOS) != APCI3120_EOS); for (i = 0; i < devpriv->ui_AiNbrofChannels; i++) { /* Read the result in FIFO and write them in shared memory */ us_TmpValue = inw(devpriv->iobase); data[i] = (unsigned int) us_TmpValue; } devpriv->b_InterruptMode = APCI3120_EOC_MODE; /* Restore defaults. */ } break; default: printk("inputs wrong\n"); } devpriv->ui_EocEosConversionTime = 0; /* re initializing the variable; */ } return insn->n; } /* +----------------------------------------------------------------------------+ | Function name :int i_APCI3120_StopCyclicAcquisition(struct comedi_device *dev,| | struct comedi_subdevice *s)| | | +----------------------------------------------------------------------------+ | Task : Stops Cyclic acquisition | | | +----------------------------------------------------------------------------+ | Input Parameters : struct comedi_device *dev | | struct comedi_subdevice *s | | | +----------------------------------------------------------------------------+ | Return Value :0 | | | +----------------------------------------------------------------------------+ */ int i_APCI3120_StopCyclicAcquisition(struct comedi_device *dev, struct comedi_subdevice *s) { /* Disable A2P Fifo write and AMWEN signal */ outw(0, devpriv->i_IobaseAddon + 4); /* Disable Bus Master ADD ON */ outw(APCI3120_ADD_ON_AGCSTS_LOW, devpriv->i_IobaseAddon + 0); outw(0, devpriv->i_IobaseAddon + 2); outw(APCI3120_ADD_ON_AGCSTS_HIGH, devpriv->i_IobaseAddon + 0); outw(0, devpriv->i_IobaseAddon + 2); /* Disable BUS Master PCI */ outl(0, devpriv->i_IobaseAmcc + AMCC_OP_REG_MCSR); /* outl(inl(devpriv->i_IobaseAmcc+AMCC_OP_REG_INTCSR)&(~AINT_WRITE_COMPL), * devpriv->i_IobaseAmcc+AMCC_OP_REG_INTCSR); stop amcc irqs */ /* outl(inl(devpriv->i_IobaseAmcc+AMCC_OP_REG_MCSR)&(~EN_A2P_TRANSFERS), * devpriv->i_IobaseAmcc+AMCC_OP_REG_MCSR); stop DMA */ /* Disable ext trigger */ i_APCI3120_ExttrigDisable(dev); devpriv->us_OutputRegister = 0; /* stop counters */ outw(devpriv-> us_OutputRegister & APCI3120_DISABLE_TIMER0 & APCI3120_DISABLE_TIMER1, dev->iobase + APCI3120_WR_ADDRESS); outw(APCI3120_DISABLE_ALL_TIMER, dev->iobase + APCI3120_WR_ADDRESS); /* DISABLE_ALL_INTERRUPT */ outb(APCI3120_DISABLE_ALL_INTERRUPT, dev->iobase + APCI3120_WRITE_MODE_SELECT); /* Flush FIFO */ inb(dev->iobase + APCI3120_RESET_FIFO); inw(dev->iobase + APCI3120_RD_STATUS); devpriv->ui_AiActualScan = 0; devpriv->ui_AiActualScanPosition = 0; s->async->cur_chan = 0; devpriv->ui_AiBufferPtr = 0; devpriv->b_AiContinuous = 0; devpriv->ui_DmaActualBuffer = 0; devpriv->b_AiCyclicAcquisition = APCI3120_DISABLE; devpriv->b_InterruptMode = APCI3120_EOC_MODE; devpriv->b_EocEosInterrupt = APCI3120_DISABLE; i_APCI3120_Reset(dev); return 0; } /* +----------------------------------------------------------------------------+ | Function name :int i_APCI3120_CommandTestAnalogInput(struct comedi_device *dev| | ,struct comedi_subdevice *s,struct comedi_cmd *cmd) | | | +----------------------------------------------------------------------------+ | Task : Test validity for a command for cyclic anlog input | | acquisition | | | +----------------------------------------------------------------------------+ | Input Parameters : struct comedi_device *dev | | struct comedi_subdevice *s | | struct comedi_cmd *cmd | +----------------------------------------------------------------------------+ | Return Value :0 | | | +----------------------------------------------------------------------------+ */ int i_APCI3120_CommandTestAnalogInput(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd) { int err = 0; int tmp; /* divisor1,divisor2; */ /* step 1: make sure trigger sources are trivially valid */ tmp = cmd->start_src; cmd->start_src &= TRIG_NOW | TRIG_EXT; if (!cmd->start_src || tmp != cmd->start_src) err++; tmp = cmd->scan_begin_src; cmd->scan_begin_src &= TRIG_TIMER | TRIG_FOLLOW; if (!cmd->scan_begin_src || tmp != cmd->scan_begin_src) err++; tmp = cmd->convert_src; cmd->convert_src &= TRIG_TIMER; if (!cmd->convert_src || tmp != cmd->convert_src) err++; tmp = cmd->scan_end_src; cmd->scan_end_src &= TRIG_COUNT; if (!cmd->scan_end_src || tmp != cmd->scan_end_src) err++; tmp = cmd->stop_src; cmd->stop_src &= TRIG_COUNT | TRIG_NONE; if (!cmd->stop_src || tmp != cmd->stop_src) err++; if (err) return 1; /* step 2: make sure trigger sources are unique and mutually compatible */ if (cmd->start_src != TRIG_NOW && cmd->start_src != TRIG_EXT) err++; if (cmd->scan_begin_src != TRIG_TIMER && cmd->scan_begin_src != TRIG_FOLLOW) err++; if (cmd->convert_src != TRIG_TIMER) err++; if (cmd->scan_end_src != TRIG_COUNT) { cmd->scan_end_src = TRIG_COUNT; err++; } if (cmd->stop_src != TRIG_NONE && cmd->stop_src != TRIG_COUNT) err++; if (err) return 2; /* step 3: make sure arguments are trivially compatible */ if (cmd->start_arg != 0) { cmd->start_arg = 0; err++; } if (cmd->scan_begin_src == TRIG_TIMER) { /* Test Delay timing */ if (cmd->scan_begin_arg < devpriv->s_EeParameters.ui_MinDelaytimeNs) { cmd->scan_begin_arg = devpriv->s_EeParameters.ui_MinDelaytimeNs; err++; } } if (cmd->convert_src == TRIG_TIMER) { /* Test Acquisition timing */ if (cmd->scan_begin_src == TRIG_TIMER) { if ((cmd->convert_arg) && (cmd->convert_arg < devpriv->s_EeParameters. ui_MinAcquisitiontimeNs)) { cmd->convert_arg = devpriv->s_EeParameters. ui_MinAcquisitiontimeNs; err++; } } else { if (cmd->convert_arg < devpriv->s_EeParameters.ui_MinAcquisitiontimeNs ) { cmd->convert_arg = devpriv->s_EeParameters. ui_MinAcquisitiontimeNs; err++; } } } if (!cmd->chanlist_len) { cmd->chanlist_len = 1; err++; } if (cmd->chanlist_len > this_board->i_AiChannelList) { cmd->chanlist_len = this_board->i_AiChannelList; err++; } if (cmd->stop_src == TRIG_COUNT) { if (!cmd->stop_arg) { cmd->stop_arg = 1; err++; } } else { /* TRIG_NONE */ if (cmd->stop_arg != 0) { cmd->stop_arg = 0; err++; } } if (err) return 3; /* step 4: fix up any arguments */ if (cmd->convert_src == TRIG_TIMER) { if (cmd->scan_begin_src == TRIG_TIMER && cmd->scan_begin_arg < cmd->convert_arg * cmd->scan_end_arg) { cmd->scan_begin_arg = cmd->convert_arg * cmd->scan_end_arg; err++; } } if (err) return 4; return 0; } /* +----------------------------------------------------------------------------+ | Function name : int i_APCI3120_CommandAnalogInput(struct comedi_device *dev, | | struct comedi_subdevice *s) | | | +----------------------------------------------------------------------------+ | Task : Does asynchronous acquisition | | Determines the mode 1 or 2. | | | +----------------------------------------------------------------------------+ | Input Parameters : struct comedi_device *dev | | struct comedi_subdevice *s | | | +----------------------------------------------------------------------------+ | Return Value : | | | +----------------------------------------------------------------------------+ */ int i_APCI3120_CommandAnalogInput(struct comedi_device *dev, struct comedi_subdevice *s) { struct comedi_cmd *cmd = &s->async->cmd; /* loading private structure with cmd structure inputs */ devpriv->ui_AiFlags = cmd->flags; devpriv->ui_AiNbrofChannels = cmd->chanlist_len; devpriv->ui_AiScanLength = cmd->scan_end_arg; devpriv->pui_AiChannelList = cmd->chanlist; /* UPDATE-0.7.57->0.7.68devpriv->AiData=s->async->data; */ devpriv->AiData = s->async->prealloc_buf; /* UPDATE-0.7.57->0.7.68devpriv->ui_AiDataLength=s->async->data_len; */ devpriv->ui_AiDataLength = s->async->prealloc_bufsz; if (cmd->stop_src == TRIG_COUNT) devpriv->ui_AiNbrofScans = cmd->stop_arg; else devpriv->ui_AiNbrofScans = 0; devpriv->ui_AiTimer0 = 0; /* variables changed to timer0,timer1 */ devpriv->ui_AiTimer1 = 0; if ((devpriv->ui_AiNbrofScans == 0) || (devpriv->ui_AiNbrofScans == -1)) devpriv->b_AiContinuous = 1; /* user want neverending analog acquisition */ /* stopped using cancel */ if (cmd->start_src == TRIG_EXT) devpriv->b_ExttrigEnable = APCI3120_ENABLE; else devpriv->b_ExttrigEnable = APCI3120_DISABLE; if (cmd->scan_begin_src == TRIG_FOLLOW) { /* mode 1 or 3 */ if (cmd->convert_src == TRIG_TIMER) { /* mode 1 */ devpriv->ui_AiTimer0 = cmd->convert_arg; /* timer constant in nano seconds */ /* return this_board->i_hwdrv_CommandAnalogInput(1,dev,s); */ return i_APCI3120_CyclicAnalogInput(1, dev, s); } } if ((cmd->scan_begin_src == TRIG_TIMER) && (cmd->convert_src == TRIG_TIMER)) { /* mode 2 */ devpriv->ui_AiTimer1 = cmd->scan_begin_arg; devpriv->ui_AiTimer0 = cmd->convert_arg; /* variable changed timer2 to timer0 */ /* return this_board->i_hwdrv_CommandAnalogInput(2,dev,s); */ return i_APCI3120_CyclicAnalogInput(2, dev, s); } return -1; } /* +----------------------------------------------------------------------------+ | Function name : int i_APCI3120_CyclicAnalogInput(int mode, | | struct comedi_device * dev,struct comedi_subdevice * s) | +----------------------------------------------------------------------------+ | Task : This is used for analog input cyclic acquisition | | Performs the command operations. | | If DMA is configured does DMA initialization | | otherwise does the acquisition with EOS interrupt. | | | +----------------------------------------------------------------------------+ | Input Parameters : | | | | | +----------------------------------------------------------------------------+ | Return Value : | | | +----------------------------------------------------------------------------+ */ int i_APCI3120_CyclicAnalogInput(int mode, struct comedi_device *dev, struct comedi_subdevice *s) { unsigned char b_Tmp; unsigned int ui_Tmp, ui_DelayTiming = 0, ui_TimerValue1 = 0, dmalen0 = 0, dmalen1 = 0, ui_TimerValue2 = 0, ui_TimerValue0, ui_ConvertTiming; unsigned short us_TmpValue; /* BEGIN JK 07.05.04: Comparison between WIN32 and Linux driver */ /* devpriv->b_AiCyclicAcquisition=APCI3120_ENABLE; */ /* END JK 07.05.04: Comparison between WIN32 and Linux driver */ /*******************/ /* Resets the FIFO */ /*******************/ inb(dev->iobase + APCI3120_RESET_FIFO); /* BEGIN JK 07.05.04: Comparison between WIN32 and Linux driver */ /* inw(dev->iobase+APCI3120_RD_STATUS); */ /* END JK 07.05.04: Comparison between WIN32 and Linux driver */ /***************************/ /* Acquisition initialized */ /***************************/ /* BEGIN JK 07.05.04: Comparison between WIN32 and Linux driver */ devpriv->b_AiCyclicAcquisition = APCI3120_ENABLE; /* END JK 07.05.04: Comparison between WIN32 and Linux driver */ /* clear software registers */ devpriv->b_TimerSelectMode = 0; devpriv->us_OutputRegister = 0; devpriv->b_ModeSelectRegister = 0; /* devpriv->b_DigitalOutputRegister=0; */ /* COMMENT JK 07.05.04: Followings calls are in i_APCI3120_StartAnalogInputAcquisition */ /****************************/ /* Clear Timer Write TC int */ /****************************/ outl(APCI3120_CLEAR_WRITE_TC_INT, devpriv->i_IobaseAmcc + APCI3120_AMCC_OP_REG_INTCSR); /************************************/ /* Clears the timer status register */ /************************************/ /* BEGIN JK 07.05.04: Comparison between WIN32 and Linux driver */ /* inw(dev->iobase+APCI3120_TIMER_STATUS_REGISTER); */ /* inb(dev->iobase + APCI3120_TIMER_STATUS_REGISTER); */ /* END JK 07.05.04: Comparison between WIN32 and Linux driver */ /**************************/ /* Disables All Timer */ /* Sets PR and PA to 0 */ /**************************/ devpriv->us_OutputRegister = devpriv->us_OutputRegister & APCI3120_DISABLE_TIMER0 & APCI3120_DISABLE_TIMER1 & APCI3120_CLEAR_PA_PR; outw(devpriv->us_OutputRegister, dev->iobase + APCI3120_WR_ADDRESS); /*******************/ /* Resets the FIFO */ /*******************/ /* BEGIN JK 07.05.04: Comparison between WIN32 and Linux driver */ inb(devpriv->iobase + APCI3120_RESET_FIFO); /* END JK 07.05.04: Comparison between WIN32 and Linux driver */ devpriv->ui_AiActualScan = 0; devpriv->ui_AiActualScanPosition = 0; s->async->cur_chan = 0; devpriv->ui_AiBufferPtr = 0; devpriv->ui_DmaActualBuffer = 0; /* value for timer2 minus -2 has to be done .....dunno y?? */ ui_TimerValue2 = devpriv->ui_AiNbrofScans - 2; ui_ConvertTiming = devpriv->ui_AiTimer0; if (mode == 2) ui_DelayTiming = devpriv->ui_AiTimer1; /**********************************/ /* Initializes the sequence array */ /**********************************/ if (!i_APCI3120_SetupChannelList(dev, s, devpriv->ui_AiNbrofChannels, devpriv->pui_AiChannelList, 0)) return -EINVAL; us_TmpValue = (unsigned short) inw(dev->iobase + APCI3120_RD_STATUS); /*** EL241003 : add this section in comment because floats must not be used if((us_TmpValue & 0x00B0)==0x00B0) { f_ConvertValue=(((float)ui_ConvertTiming * 0.002) - 2); ui_TimerValue0=(unsigned int)f_ConvertValue; if (mode==2) { f_DelayValue = (((float)ui_DelayTiming * 0.00002) - 2); ui_TimerValue1 = (unsigned int) f_DelayValue; } } else { f_ConvertValue=(((float)ui_ConvertTiming * 0.0012926) - 1); ui_TimerValue0=(unsigned int)f_ConvertValue; if (mode == 2) { f_DelayValue = (((float)ui_DelayTiming * 0.000012926) - 1); ui_TimerValue1 = (unsigned int) f_DelayValue; } } ***********************************************************************************************/ /*** EL241003 Begin : add this section to replace floats calculation by integer calculations **/ /* EL250804: Testing if board APCI3120 have the new Quartz or if it is an APCI3001 */ if ((us_TmpValue & 0x00B0) == 0x00B0 || !strcmp(this_board->pc_DriverName, "apci3001")) { ui_TimerValue0 = ui_ConvertTiming * 2 - 2000; ui_TimerValue0 = ui_TimerValue0 / 1000; if (mode == 2) { ui_DelayTiming = ui_DelayTiming / 1000; ui_TimerValue1 = ui_DelayTiming * 2 - 200; ui_TimerValue1 = ui_TimerValue1 / 100; } } else { ui_ConvertTiming = ui_ConvertTiming / 1000; ui_TimerValue0 = ui_ConvertTiming * 12926 - 10000; ui_TimerValue0 = ui_TimerValue0 / 10000; if (mode == 2) { ui_DelayTiming = ui_DelayTiming / 1000; ui_TimerValue1 = ui_DelayTiming * 12926 - 1; ui_TimerValue1 = ui_TimerValue1 / 1000000; } } /*** EL241003 End ******************************************************************************/ if (devpriv->b_ExttrigEnable == APCI3120_ENABLE) i_APCI3120_ExttrigEnable(dev); /* activate EXT trigger */ switch (mode) { case 1: /* init timer0 in mode 2 */ devpriv->b_TimerSelectMode = (devpriv-> b_TimerSelectMode & 0xFC) | APCI3120_TIMER_0_MODE_2; outb(devpriv->b_TimerSelectMode, dev->iobase + APCI3120_TIMER_CRT1); /* Select Timer 0 */ b_Tmp = ((devpriv-> b_DigitalOutputRegister) & 0xF0) | APCI3120_SELECT_TIMER_0_WORD; outb(b_Tmp, dev->iobase + APCI3120_TIMER_CRT0); /* Set the conversion time */ outw(((unsigned short) ui_TimerValue0), dev->iobase + APCI3120_TIMER_VALUE); break; case 2: /* init timer1 in mode 2 */ devpriv->b_TimerSelectMode = (devpriv-> b_TimerSelectMode & 0xF3) | APCI3120_TIMER_1_MODE_2; outb(devpriv->b_TimerSelectMode, dev->iobase + APCI3120_TIMER_CRT1); /* Select Timer 1 */ b_Tmp = ((devpriv-> b_DigitalOutputRegister) & 0xF0) | APCI3120_SELECT_TIMER_1_WORD; outb(b_Tmp, dev->iobase + APCI3120_TIMER_CRT0); /* Set the conversion time */ outw(((unsigned short) ui_TimerValue1), dev->iobase + APCI3120_TIMER_VALUE); /* init timer0 in mode 2 */ devpriv->b_TimerSelectMode = (devpriv-> b_TimerSelectMode & 0xFC) | APCI3120_TIMER_0_MODE_2; outb(devpriv->b_TimerSelectMode, dev->iobase + APCI3120_TIMER_CRT1); /* Select Timer 0 */ b_Tmp = ((devpriv-> b_DigitalOutputRegister) & 0xF0) | APCI3120_SELECT_TIMER_0_WORD; outb(b_Tmp, dev->iobase + APCI3120_TIMER_CRT0); /* Set the conversion time */ outw(((unsigned short) ui_TimerValue0), dev->iobase + APCI3120_TIMER_VALUE); break; } /* ##########common for all modes################# */ /***********************/ /* Clears the SCAN bit */ /***********************/ /* BEGIN JK 07.05.04: Comparison between WIN32 and Linux driver */ /* devpriv->b_ModeSelectRegister=devpriv->b_ModeSelectRegister | APCI3120_DISABLE_SCAN; */ devpriv->b_ModeSelectRegister = devpriv->b_ModeSelectRegister & APCI3120_DISABLE_SCAN; /* END JK 07.05.04: Comparison between WIN32 and Linux driver */ outb(devpriv->b_ModeSelectRegister, dev->iobase + APCI3120_WRITE_MODE_SELECT); /* If DMA is disabled */ if (devpriv->us_UseDma == APCI3120_DISABLE) { /* disable EOC and enable EOS */ devpriv->b_InterruptMode = APCI3120_EOS_MODE; devpriv->b_EocEosInterrupt = APCI3120_ENABLE; devpriv->b_ModeSelectRegister = (devpriv-> b_ModeSelectRegister & APCI3120_DISABLE_EOC_INT) | APCI3120_ENABLE_EOS_INT; outb(devpriv->b_ModeSelectRegister, dev->iobase + APCI3120_WRITE_MODE_SELECT); if (!devpriv->b_AiContinuous) { /* * configure Timer2 For counting EOS Reset gate 2 of Timer 2 to * disable it (Set Bit D14 to 0) */ devpriv->us_OutputRegister = devpriv-> us_OutputRegister & APCI3120_DISABLE_TIMER2; outw(devpriv->us_OutputRegister, dev->iobase + APCI3120_WR_ADDRESS); /* DISABLE TIMER intERRUPT */ devpriv->b_ModeSelectRegister = devpriv-> b_ModeSelectRegister & APCI3120_DISABLE_TIMER_INT & 0xEF; outb(devpriv->b_ModeSelectRegister, dev->iobase + APCI3120_WRITE_MODE_SELECT); /* (1) Init timer 2 in mode 0 and write timer value */ devpriv->b_TimerSelectMode = (devpriv-> b_TimerSelectMode & 0x0F) | APCI3120_TIMER_2_MODE_0; outb(devpriv->b_TimerSelectMode, dev->iobase + APCI3120_TIMER_CRT1); /* Writing LOW unsigned short */ b_Tmp = ((devpriv-> b_DigitalOutputRegister) & 0xF0) | APCI3120_SELECT_TIMER_2_LOW_WORD; outb(b_Tmp, dev->iobase + APCI3120_TIMER_CRT0); outw(LOWORD(ui_TimerValue2), dev->iobase + APCI3120_TIMER_VALUE); /* Writing HIGH unsigned short */ b_Tmp = ((devpriv-> b_DigitalOutputRegister) & 0xF0) | APCI3120_SELECT_TIMER_2_HIGH_WORD; outb(b_Tmp, dev->iobase + APCI3120_TIMER_CRT0); outw(HIWORD(ui_TimerValue2), dev->iobase + APCI3120_TIMER_VALUE); /* (2) Reset FC_TIMER BIT Clearing timer status register */ inb(dev->iobase + APCI3120_TIMER_STATUS_REGISTER); /* enable timer counter and disable watch dog */ devpriv->b_ModeSelectRegister = (devpriv-> b_ModeSelectRegister | APCI3120_ENABLE_TIMER_COUNTER) & APCI3120_DISABLE_WATCHDOG; /* select EOS clock input for timer 2 */ devpriv->b_ModeSelectRegister = devpriv-> b_ModeSelectRegister | APCI3120_TIMER2_SELECT_EOS; /* Enable timer2 interrupt */ devpriv->b_ModeSelectRegister = devpriv-> b_ModeSelectRegister | APCI3120_ENABLE_TIMER_INT; outb(devpriv->b_ModeSelectRegister, dev->iobase + APCI3120_WRITE_MODE_SELECT); devpriv->b_Timer2Mode = APCI3120_COUNTER; devpriv->b_Timer2Interrupt = APCI3120_ENABLE; } } else { /* If DMA Enabled */ /* BEGIN JK 07.05.04: Comparison between WIN32 and Linux driver */ /* inw(dev->iobase+0); reset EOC bit */ /* END JK 07.05.04: Comparison between WIN32 and Linux driver */ devpriv->b_InterruptMode = APCI3120_DMA_MODE; /************************************/ /* Disables the EOC, EOS interrupt */ /************************************/ devpriv->b_ModeSelectRegister = devpriv->b_ModeSelectRegister & APCI3120_DISABLE_EOC_INT & APCI3120_DISABLE_EOS_INT; outb(devpriv->b_ModeSelectRegister, dev->iobase + APCI3120_WRITE_MODE_SELECT); dmalen0 = devpriv->ui_DmaBufferSize[0]; dmalen1 = devpriv->ui_DmaBufferSize[1]; if (!devpriv->b_AiContinuous) { if (dmalen0 > (devpriv->ui_AiNbrofScans * devpriv->ui_AiScanLength * 2)) { /* must we fill full first buffer? */ dmalen0 = devpriv->ui_AiNbrofScans * devpriv->ui_AiScanLength * 2; } else if (dmalen1 > (devpriv->ui_AiNbrofScans * devpriv->ui_AiScanLength * 2 - dmalen0)) /* and must we fill full second buffer when first is once filled? */ dmalen1 = devpriv->ui_AiNbrofScans * devpriv->ui_AiScanLength * 2 - dmalen0; } if (devpriv->ui_AiFlags & TRIG_WAKE_EOS) { /* don't we want wake up every scan? */ if (dmalen0 > (devpriv->ui_AiScanLength * 2)) { dmalen0 = devpriv->ui_AiScanLength * 2; if (devpriv->ui_AiScanLength & 1) dmalen0 += 2; } if (dmalen1 > (devpriv->ui_AiScanLength * 2)) { dmalen1 = devpriv->ui_AiScanLength * 2; if (devpriv->ui_AiScanLength & 1) dmalen1 -= 2; if (dmalen1 < 4) dmalen1 = 4; } } else { /* isn't output buff smaller that our DMA buff? */ if (dmalen0 > (devpriv->ui_AiDataLength)) dmalen0 = devpriv->ui_AiDataLength; if (dmalen1 > (devpriv->ui_AiDataLength)) dmalen1 = devpriv->ui_AiDataLength; } devpriv->ui_DmaBufferUsesize[0] = dmalen0; devpriv->ui_DmaBufferUsesize[1] = dmalen1; /* Initialize DMA */ /* * Set Transfer count enable bit and A2P_fifo reset bit in AGCSTS * register 1 */ ui_Tmp = AGCSTS_TC_ENABLE | AGCSTS_RESET_A2P_FIFO; outl(ui_Tmp, devpriv->i_IobaseAmcc + AMCC_OP_REG_AGCSTS); /* changed since 16 bit interface for add on */ /*********************/ /* ENABLE BUS MASTER */ /*********************/ outw(APCI3120_ADD_ON_AGCSTS_LOW, devpriv->i_IobaseAddon + 0); outw(APCI3120_ENABLE_TRANSFER_ADD_ON_LOW, devpriv->i_IobaseAddon + 2); outw(APCI3120_ADD_ON_AGCSTS_HIGH, devpriv->i_IobaseAddon + 0); outw(APCI3120_ENABLE_TRANSFER_ADD_ON_HIGH, devpriv->i_IobaseAddon + 2); /* * TO VERIFIED BEGIN JK 07.05.04: Comparison between WIN32 and Linux * driver */ outw(0x1000, devpriv->i_IobaseAddon + 2); /* END JK 07.05.04: Comparison between WIN32 and Linux driver */ /* 2 No change */ /* A2P FIFO MANAGEMENT */ /* A2P fifo reset & transfer control enable */ /***********************/ /* A2P FIFO MANAGEMENT */ /***********************/ outl(APCI3120_A2P_FIFO_MANAGEMENT, devpriv->i_IobaseAmcc + APCI3120_AMCC_OP_MCSR); /* * 3 * beginning address of dma buf The 32 bit address of dma buffer * is converted into two 16 bit addresses Can done by using _attach * and put into into an array array used may be for differnet pages */ /* DMA Start Address Low */ outw(APCI3120_ADD_ON_MWAR_LOW, devpriv->i_IobaseAddon + 0); outw((devpriv->ul_DmaBufferHw[0] & 0xFFFF), devpriv->i_IobaseAddon + 2); /*************************/ /* DMA Start Address High */ /*************************/ outw(APCI3120_ADD_ON_MWAR_HIGH, devpriv->i_IobaseAddon + 0); outw((devpriv->ul_DmaBufferHw[0] / 65536), devpriv->i_IobaseAddon + 2); /* * 4 * amount of bytes to be transferred set transfer count used ADDON * MWTC register commented testing * outl(devpriv->ui_DmaBufferUsesize[0], * devpriv->i_IobaseAddon+AMCC_OP_REG_AMWTC); */ /**************************/ /* Nbr of acquisition LOW */ /**************************/ outw(APCI3120_ADD_ON_MWTC_LOW, devpriv->i_IobaseAddon + 0); outw((devpriv->ui_DmaBufferUsesize[0] & 0xFFFF), devpriv->i_IobaseAddon + 2); /***************************/ /* Nbr of acquisition HIGH */ /***************************/ outw(APCI3120_ADD_ON_MWTC_HIGH, devpriv->i_IobaseAddon + 0); outw((devpriv->ui_DmaBufferUsesize[0] / 65536), devpriv->i_IobaseAddon + 2); /* * 5 * To configure A2P FIFO testing outl( * FIFO_ADVANCE_ON_BYTE_2,devpriv->i_IobaseAmcc+AMCC_OP_REG_INTCSR); */ /******************/ /* A2P FIFO RESET */ /******************/ /* * TO VERIFY BEGIN JK 07.05.04: Comparison between WIN32 and Linux * driver */ outl(0x04000000UL, devpriv->i_IobaseAmcc + AMCC_OP_REG_MCSR); /* END JK 07.05.04: Comparison between WIN32 and Linux driver */ /* * 6 * ENABLE A2P FIFO WRITE AND ENABLE AMWEN AMWEN_ENABLE | * A2P_FIFO_WRITE_ENABLE (0x01|0x02)=0x03 */ /* BEGIN JK 07.05.04: Comparison between WIN32 and Linux driver */ /* outw(3,devpriv->i_IobaseAddon + 4); */ /* END JK 07.05.04: Comparison between WIN32 and Linux driver */ /* * 7 * initialise end of dma interrupt AINT_WRITE_COMPL = * ENABLE_WRITE_TC_INT(ADDI) */ /***************************************************/ /* A2P FIFO CONFIGURATE, END OF DMA intERRUPT INIT */ /***************************************************/ outl((APCI3120_FIFO_ADVANCE_ON_BYTE_2 | APCI3120_ENABLE_WRITE_TC_INT), devpriv->i_IobaseAmcc + AMCC_OP_REG_INTCSR); /* BEGIN JK 07.05.04: Comparison between WIN32 and Linux driver */ /******************************************/ /* ENABLE A2P FIFO WRITE AND ENABLE AMWEN */ /******************************************/ outw(3, devpriv->i_IobaseAddon + 4); /* END JK 07.05.04: Comparison between WIN32 and Linux driver */ /******************/ /* A2P FIFO RESET */ /******************/ /* BEGIN JK 07.05.04: Comparison between WIN32 and Linux driver */ outl(0x04000000UL, devpriv->i_IobaseAmcc + APCI3120_AMCC_OP_MCSR); /* END JK 07.05.04: Comparison between WIN32 and Linux driver */ } if ((devpriv->us_UseDma == APCI3120_DISABLE) && !devpriv->b_AiContinuous) { /* set gate 2 to start conversion */ devpriv->us_OutputRegister = devpriv->us_OutputRegister | APCI3120_ENABLE_TIMER2; outw(devpriv->us_OutputRegister, dev->iobase + APCI3120_WR_ADDRESS); } switch (mode) { case 1: /* set gate 0 to start conversion */ devpriv->us_OutputRegister = devpriv->us_OutputRegister | APCI3120_ENABLE_TIMER0; outw(devpriv->us_OutputRegister, dev->iobase + APCI3120_WR_ADDRESS); break; case 2: /* set gate 0 and gate 1 */ devpriv->us_OutputRegister = devpriv->us_OutputRegister | APCI3120_ENABLE_TIMER1; devpriv->us_OutputRegister = devpriv->us_OutputRegister | APCI3120_ENABLE_TIMER0; outw(devpriv->us_OutputRegister, dev->iobase + APCI3120_WR_ADDRESS); break; } return 0; } /* +----------------------------------------------------------------------------+ | intERNAL FUNCTIONS | +----------------------------------------------------------------------------+ */ /* +----------------------------------------------------------------------------+ | Function name : int i_APCI3120_Reset(struct comedi_device *dev) | | | | | +----------------------------------------------------------------------------+ | Task : Hardware reset function | | | +----------------------------------------------------------------------------+ | Input Parameters : struct comedi_device *dev | | | | | +----------------------------------------------------------------------------+ | Return Value : | | | +----------------------------------------------------------------------------+ */ int i_APCI3120_Reset(struct comedi_device *dev) { unsigned int i; unsigned short us_TmpValue; devpriv->b_AiCyclicAcquisition = APCI3120_DISABLE; devpriv->b_EocEosInterrupt = APCI3120_DISABLE; devpriv->b_InterruptMode = APCI3120_EOC_MODE; devpriv->ui_EocEosConversionTime = 0; /* set eoc eos conv time to 0 */ devpriv->b_OutputMemoryStatus = 0; /* variables used in timer subdevice */ devpriv->b_Timer2Mode = 0; devpriv->b_Timer2Interrupt = 0; devpriv->b_ExttrigEnable = 0; /* Disable ext trigger */ /* Disable all interrupts, watchdog for the anolog output */ devpriv->b_ModeSelectRegister = 0; outb(devpriv->b_ModeSelectRegister, dev->iobase + APCI3120_WRITE_MODE_SELECT); /* Disables all counters, ext trigger and clears PA, PR */ devpriv->us_OutputRegister = 0; outw(devpriv->us_OutputRegister, dev->iobase + APCI3120_WR_ADDRESS); /* * Code to set the all anolog o/p channel to 0v 8191 is decimal * value for zero(0 v)volt in bipolar mode(default) */ outw(8191 | APCI3120_ANALOG_OP_CHANNEL_1, dev->iobase + APCI3120_ANALOG_OUTPUT_1); /* channel 1 */ outw(8191 | APCI3120_ANALOG_OP_CHANNEL_2, dev->iobase + APCI3120_ANALOG_OUTPUT_1); /* channel 2 */ outw(8191 | APCI3120_ANALOG_OP_CHANNEL_3, dev->iobase + APCI3120_ANALOG_OUTPUT_1); /* channel 3 */ outw(8191 | APCI3120_ANALOG_OP_CHANNEL_4, dev->iobase + APCI3120_ANALOG_OUTPUT_1); /* channel 4 */ outw(8191 | APCI3120_ANALOG_OP_CHANNEL_5, dev->iobase + APCI3120_ANALOG_OUTPUT_2); /* channel 5 */ outw(8191 | APCI3120_ANALOG_OP_CHANNEL_6, dev->iobase + APCI3120_ANALOG_OUTPUT_2); /* channel 6 */ outw(8191 | APCI3120_ANALOG_OP_CHANNEL_7, dev->iobase + APCI3120_ANALOG_OUTPUT_2); /* channel 7 */ outw(8191 | APCI3120_ANALOG_OP_CHANNEL_8, dev->iobase + APCI3120_ANALOG_OUTPUT_2); /* channel 8 */ /* Reset digital output to L0W */ /* ES05 outb(0x0,dev->iobase+APCI3120_DIGITAL_OUTPUT); */ udelay(10); inw(dev->iobase + 0); /* make a dummy read */ inb(dev->iobase + APCI3120_RESET_FIFO); /* flush FIFO */ inw(dev->iobase + APCI3120_RD_STATUS); /* flush A/D status register */ /* code to reset the RAM sequence */ for (i = 0; i < 16; i++) { us_TmpValue = i << 8; /* select the location */ outw(us_TmpValue, dev->iobase + APCI3120_SEQ_RAM_ADDRESS); } return 0; } /* +----------------------------------------------------------------------------+ | Function name : int i_APCI3120_SetupChannelList(struct comedi_device * dev, | | struct comedi_subdevice * s, int n_chan,unsigned int *chanlist| | ,char check) | | | +----------------------------------------------------------------------------+ | Task :This function will first check channel list is ok or not| |and then initialize the sequence RAM with the polarity, Gain,Channel number | |If the last argument of function "check"is 1 then it only checks the channel| |list is ok or not. | | | +----------------------------------------------------------------------------+ | Input Parameters : struct comedi_device * dev | | struct comedi_subdevice * s | | int n_chan | unsigned int *chanlist char check +----------------------------------------------------------------------------+ | Return Value : | | | +----------------------------------------------------------------------------+ */ int i_APCI3120_SetupChannelList(struct comedi_device *dev, struct comedi_subdevice *s, int n_chan, unsigned int *chanlist, char check) { unsigned int i; /* , differencial=0, bipolar=0; */ unsigned int gain; unsigned short us_TmpValue; /* correct channel and range number check itself comedi/range.c */ if (n_chan < 1) { if (!check) comedi_error(dev, "range/channel list is empty!"); return 0; } /* All is ok, so we can setup channel/range list */ if (check) return 1; /* Code to set the PA and PR...Here it set PA to 0.. */ devpriv->us_OutputRegister = devpriv->us_OutputRegister & APCI3120_CLEAR_PA_PR; devpriv->us_OutputRegister = ((n_chan - 1) & 0xf) << 8; outw(devpriv->us_OutputRegister, dev->iobase + APCI3120_WR_ADDRESS); for (i = 0; i < n_chan; i++) { /* store range list to card */ us_TmpValue = CR_CHAN(chanlist[i]); /* get channel number; */ if (CR_RANGE(chanlist[i]) < APCI3120_BIPOLAR_RANGES) us_TmpValue &= ((~APCI3120_UNIPOLAR) & 0xff); /* set bipolar */ else us_TmpValue |= APCI3120_UNIPOLAR; /* enable unipolar...... */ gain = CR_RANGE(chanlist[i]); /* get gain number */ us_TmpValue |= ((gain & 0x03) << 4); /* <<4 for G0 and G1 bit in RAM */ us_TmpValue |= i << 8; /* To select the RAM LOCATION.... */ outw(us_TmpValue, dev->iobase + APCI3120_SEQ_RAM_ADDRESS); printk("\n Gain = %i", (((unsigned char)CR_RANGE(chanlist[i]) & 0x03) << 2)); printk("\n Channel = %i", CR_CHAN(chanlist[i])); printk("\n Polarity = %i", us_TmpValue & APCI3120_UNIPOLAR); } return 1; /* we can serve this with scan logic */ } /* +----------------------------------------------------------------------------+ | Function name : int i_APCI3120_ExttrigEnable(struct comedi_device * dev) | | | | | +----------------------------------------------------------------------------+ | Task : Enable the external trigger | | | +----------------------------------------------------------------------------+ | Input Parameters : struct comedi_device * dev | | | | | +----------------------------------------------------------------------------+ | Return Value : 0 | | | +----------------------------------------------------------------------------+ */ int i_APCI3120_ExttrigEnable(struct comedi_device *dev) { devpriv->us_OutputRegister |= APCI3120_ENABLE_EXT_TRIGGER; outw(devpriv->us_OutputRegister, dev->iobase + APCI3120_WR_ADDRESS); return 0; } /* +----------------------------------------------------------------------------+ | Function name : int i_APCI3120_ExttrigDisable(struct comedi_device * dev) | | | +----------------------------------------------------------------------------+ | Task : Disables the external trigger | | | +----------------------------------------------------------------------------+ | Input Parameters : struct comedi_device * dev | | | | | +----------------------------------------------------------------------------+ | Return Value : 0 | | | +----------------------------------------------------------------------------+ */ int i_APCI3120_ExttrigDisable(struct comedi_device *dev) { devpriv->us_OutputRegister &= ~APCI3120_ENABLE_EXT_TRIGGER; outw(devpriv->us_OutputRegister, dev->iobase + APCI3120_WR_ADDRESS); return 0; } /* +----------------------------------------------------------------------------+ | intERRUPT FUNCTIONS | +----------------------------------------------------------------------------+ */ /* +----------------------------------------------------------------------------+ | Function name : void v_APCI3120_Interrupt(int irq, void *d) | | | | | +----------------------------------------------------------------------------+ | Task :Interrupt handler for APCI3120 | | When interrupt occurs this gets called. | | First it finds which interrupt has been generated and | | handles corresponding interrupt | | | +----------------------------------------------------------------------------+ | Input Parameters : int irq | | void *d | | | +----------------------------------------------------------------------------+ | Return Value : void | | | +----------------------------------------------------------------------------+ */ void v_APCI3120_Interrupt(int irq, void *d) { struct comedi_device *dev = d; unsigned short int_daq; unsigned int int_amcc, ui_Check, i; unsigned short us_TmpValue; unsigned char b_DummyRead; struct comedi_subdevice *s = dev->subdevices + 0; ui_Check = 1; int_daq = inw(dev->iobase + APCI3120_RD_STATUS) & 0xf000; /* get IRQ reasons */ int_amcc = inl(devpriv->i_IobaseAmcc + AMCC_OP_REG_INTCSR); /* get AMCC int register */ if ((!int_daq) && (!(int_amcc & ANY_S593X_INT))) { comedi_error(dev, "IRQ from unknown source"); return; } outl(int_amcc | 0x00ff0000, devpriv->i_IobaseAmcc + AMCC_OP_REG_INTCSR); /* shutdown IRQ reasons in AMCC */ int_daq = (int_daq >> 12) & 0xF; if (devpriv->b_ExttrigEnable == APCI3120_ENABLE) { /* Disable ext trigger */ i_APCI3120_ExttrigDisable(dev); devpriv->b_ExttrigEnable = APCI3120_DISABLE; } /* clear the timer 2 interrupt */ inb(devpriv->i_IobaseAmcc + APCI3120_TIMER_STATUS_REGISTER); if (int_amcc & MASTER_ABORT_INT) comedi_error(dev, "AMCC IRQ - MASTER DMA ABORT!"); if (int_amcc & TARGET_ABORT_INT) comedi_error(dev, "AMCC IRQ - TARGET DMA ABORT!"); /* Ckeck if EOC interrupt */ if (((int_daq & 0x8) == 0) && (devpriv->b_InterruptMode == APCI3120_EOC_MODE)) { if (devpriv->b_EocEosInterrupt == APCI3120_ENABLE) { /* Read the AI Value */ devpriv->ui_AiReadData[0] = (unsigned int) inw(devpriv->iobase + 0); devpriv->b_EocEosInterrupt = APCI3120_DISABLE; send_sig(SIGIO, devpriv->tsk_Current, 0); /* send signal to the sample */ } else { /* Disable EOC Interrupt */ devpriv->b_ModeSelectRegister = devpriv-> b_ModeSelectRegister & APCI3120_DISABLE_EOC_INT; outb(devpriv->b_ModeSelectRegister, devpriv->iobase + APCI3120_WRITE_MODE_SELECT); } } /* Check If EOS interrupt */ if ((int_daq & 0x2) && (devpriv->b_InterruptMode == APCI3120_EOS_MODE)) { if (devpriv->b_EocEosInterrupt == APCI3120_ENABLE) { /* enable this in without DMA ??? */ if (devpriv->b_AiCyclicAcquisition == APCI3120_ENABLE) { ui_Check = 0; i_APCI3120_InterruptHandleEos(dev); devpriv->ui_AiActualScan++; devpriv->b_ModeSelectRegister = devpriv-> b_ModeSelectRegister | APCI3120_ENABLE_EOS_INT; outb(devpriv->b_ModeSelectRegister, dev->iobase + APCI3120_WRITE_MODE_SELECT); } else { ui_Check = 0; for (i = 0; i < devpriv->ui_AiNbrofChannels; i++) { us_TmpValue = inw(devpriv->iobase + 0); devpriv->ui_AiReadData[i] = (unsigned int) us_TmpValue; } devpriv->b_EocEosInterrupt = APCI3120_DISABLE; devpriv->b_InterruptMode = APCI3120_EOC_MODE; send_sig(SIGIO, devpriv->tsk_Current, 0); /* send signal to the sample */ } } else { devpriv->b_ModeSelectRegister = devpriv-> b_ModeSelectRegister & APCI3120_DISABLE_EOS_INT; outb(devpriv->b_ModeSelectRegister, dev->iobase + APCI3120_WRITE_MODE_SELECT); devpriv->b_EocEosInterrupt = APCI3120_DISABLE; /* Default settings */ devpriv->b_InterruptMode = APCI3120_EOC_MODE; } } /* Timer2 interrupt */ if (int_daq & 0x1) { switch (devpriv->b_Timer2Mode) { case APCI3120_COUNTER: devpriv->b_AiCyclicAcquisition = APCI3120_DISABLE; devpriv->b_ModeSelectRegister = devpriv-> b_ModeSelectRegister & APCI3120_DISABLE_EOS_INT; outb(devpriv->b_ModeSelectRegister, dev->iobase + APCI3120_WRITE_MODE_SELECT); /* stop timer 2 */ devpriv->us_OutputRegister = devpriv-> us_OutputRegister & APCI3120_DISABLE_ALL_TIMER; outw(devpriv->us_OutputRegister, dev->iobase + APCI3120_WR_ADDRESS); /* stop timer 0 and timer 1 */ i_APCI3120_StopCyclicAcquisition(dev, s); devpriv->b_AiCyclicAcquisition = APCI3120_DISABLE; /* UPDATE-0.7.57->0.7.68comedi_done(dev,s); */ s->async->events |= COMEDI_CB_EOA; comedi_event(dev, s); break; case APCI3120_TIMER: /* Send a signal to from kernel to user space */ send_sig(SIGIO, devpriv->tsk_Current, 0); break; case APCI3120_WATCHDOG: /* Send a signal to from kernel to user space */ send_sig(SIGIO, devpriv->tsk_Current, 0); break; default: /* disable Timer Interrupt */ devpriv->b_ModeSelectRegister = devpriv-> b_ModeSelectRegister & APCI3120_DISABLE_TIMER_INT; outb(devpriv->b_ModeSelectRegister, dev->iobase + APCI3120_WRITE_MODE_SELECT); } b_DummyRead = inb(dev->iobase + APCI3120_TIMER_STATUS_REGISTER); } if ((int_daq & 0x4) && (devpriv->b_InterruptMode == APCI3120_DMA_MODE)) { if (devpriv->b_AiCyclicAcquisition == APCI3120_ENABLE) { /****************************/ /* Clear Timer Write TC int */ /****************************/ outl(APCI3120_CLEAR_WRITE_TC_INT, devpriv->i_IobaseAmcc + APCI3120_AMCC_OP_REG_INTCSR); /************************************/ /* Clears the timer status register */ /************************************/ inw(dev->iobase + APCI3120_TIMER_STATUS_REGISTER); v_APCI3120_InterruptDma(irq, d); /* do some data transfer */ } else { /* Stops the Timer */ outw(devpriv-> us_OutputRegister & APCI3120_DISABLE_TIMER0 & APCI3120_DISABLE_TIMER1, dev->iobase + APCI3120_WR_ADDRESS); } } return; } /* +----------------------------------------------------------------------------+ | Function name :int i_APCI3120_InterruptHandleEos(struct comedi_device *dev) | | | | | +----------------------------------------------------------------------------+ | Task : This function handles EOS interrupt. | | This function copies the acquired data(from FIFO) | | to Comedi buffer. | | | +----------------------------------------------------------------------------+ | Input Parameters : struct comedi_device *dev | | | | | +----------------------------------------------------------------------------+ | Return Value : 0 | | | +----------------------------------------------------------------------------+ */ int i_APCI3120_InterruptHandleEos(struct comedi_device *dev) { int n_chan, i; struct comedi_subdevice *s = dev->subdevices + 0; int err = 1; n_chan = devpriv->ui_AiNbrofChannels; s->async->events = 0; for (i = 0; i < n_chan; i++) err &= comedi_buf_put(s->async, inw(dev->iobase + 0)); s->async->events |= COMEDI_CB_EOS; if (err == 0) s->async->events |= COMEDI_CB_OVERFLOW; comedi_event(dev, s); return 0; } /* +----------------------------------------------------------------------------+ | Function name : void v_APCI3120_InterruptDma(int irq, void *d) | | | +----------------------------------------------------------------------------+ | Task : This is a handler for the DMA interrupt | | This function copies the data to Comedi Buffer. | | For continuous DMA it reinitializes the DMA operation. | | For single mode DMA it stop the acquisition. | | | +----------------------------------------------------------------------------+ | Input Parameters : int irq, void *d | | | +----------------------------------------------------------------------------+ | Return Value : void | | | +----------------------------------------------------------------------------+ */ void v_APCI3120_InterruptDma(int irq, void *d) { struct comedi_device *dev = d; struct comedi_subdevice *s = dev->subdevices + 0; unsigned int next_dma_buf, samplesinbuf; unsigned long low_word, high_word, var; unsigned int ui_Tmp; samplesinbuf = devpriv->ui_DmaBufferUsesize[devpriv->ui_DmaActualBuffer] - inl(devpriv->i_IobaseAmcc + AMCC_OP_REG_MWTC); if (samplesinbuf < devpriv->ui_DmaBufferUsesize[devpriv->ui_DmaActualBuffer]) { comedi_error(dev, "Interrupted DMA transfer!"); } if (samplesinbuf & 1) { comedi_error(dev, "Odd count of bytes in DMA ring!"); i_APCI3120_StopCyclicAcquisition(dev, s); devpriv->b_AiCyclicAcquisition = APCI3120_DISABLE; return; } samplesinbuf = samplesinbuf >> 1; /* number of received samples */ if (devpriv->b_DmaDoubleBuffer) { /* switch DMA buffers if is used double buffering */ next_dma_buf = 1 - devpriv->ui_DmaActualBuffer; ui_Tmp = AGCSTS_TC_ENABLE | AGCSTS_RESET_A2P_FIFO; outl(ui_Tmp, devpriv->i_IobaseAddon + AMCC_OP_REG_AGCSTS); /* changed since 16 bit interface for add on */ outw(APCI3120_ADD_ON_AGCSTS_LOW, devpriv->i_IobaseAddon + 0); outw(APCI3120_ENABLE_TRANSFER_ADD_ON_LOW, devpriv->i_IobaseAddon + 2); outw(APCI3120_ADD_ON_AGCSTS_HIGH, devpriv->i_IobaseAddon + 0); outw(APCI3120_ENABLE_TRANSFER_ADD_ON_HIGH, devpriv->i_IobaseAddon + 2); /* 0x1000 is out putted in windows driver */ var = devpriv->ul_DmaBufferHw[next_dma_buf]; low_word = var & 0xffff; var = devpriv->ul_DmaBufferHw[next_dma_buf]; high_word = var / 65536; /* DMA Start Address Low */ outw(APCI3120_ADD_ON_MWAR_LOW, devpriv->i_IobaseAddon + 0); outw(low_word, devpriv->i_IobaseAddon + 2); /* DMA Start Address High */ outw(APCI3120_ADD_ON_MWAR_HIGH, devpriv->i_IobaseAddon + 0); outw(high_word, devpriv->i_IobaseAddon + 2); var = devpriv->ui_DmaBufferUsesize[next_dma_buf]; low_word = var & 0xffff; var = devpriv->ui_DmaBufferUsesize[next_dma_buf]; high_word = var / 65536; /* Nbr of acquisition LOW */ outw(APCI3120_ADD_ON_MWTC_LOW, devpriv->i_IobaseAddon + 0); outw(low_word, devpriv->i_IobaseAddon + 2); /* Nbr of acquisition HIGH */ outw(APCI3120_ADD_ON_MWTC_HIGH, devpriv->i_IobaseAddon + 0); outw(high_word, devpriv->i_IobaseAddon + 2); /* * To configure A2P FIFO * ENABLE A2P FIFO WRITE AND ENABLE AMWEN * AMWEN_ENABLE | A2P_FIFO_WRITE_ENABLE (0x01|0x02)=0x03 */ outw(3, devpriv->i_IobaseAddon + 4); /* initialise end of dma interrupt AINT_WRITE_COMPL = ENABLE_WRITE_TC_INT(ADDI) */ outl((APCI3120_FIFO_ADVANCE_ON_BYTE_2 | APCI3120_ENABLE_WRITE_TC_INT), devpriv->i_IobaseAmcc + AMCC_OP_REG_INTCSR); } if (samplesinbuf) { v_APCI3120_InterruptDmaMoveBlock16bit(dev, s, devpriv->ul_DmaBufferVirtual[devpriv-> ui_DmaActualBuffer], samplesinbuf); if (!(devpriv->ui_AiFlags & TRIG_WAKE_EOS)) { s->async->events |= COMEDI_CB_EOS; comedi_event(dev, s); } } if (!devpriv->b_AiContinuous) if (devpriv->ui_AiActualScan >= devpriv->ui_AiNbrofScans) { /* all data sampled */ i_APCI3120_StopCyclicAcquisition(dev, s); devpriv->b_AiCyclicAcquisition = APCI3120_DISABLE; s->async->events |= COMEDI_CB_EOA; comedi_event(dev, s); return; } if (devpriv->b_DmaDoubleBuffer) { /* switch dma buffers */ devpriv->ui_DmaActualBuffer = 1 - devpriv->ui_DmaActualBuffer; } else { /* * restart DMA if is not used double buffering * ADDED REINITIALISE THE DMA */ ui_Tmp = AGCSTS_TC_ENABLE | AGCSTS_RESET_A2P_FIFO; outl(ui_Tmp, devpriv->i_IobaseAddon + AMCC_OP_REG_AGCSTS); /* changed since 16 bit interface for add on */ outw(APCI3120_ADD_ON_AGCSTS_LOW, devpriv->i_IobaseAddon + 0); outw(APCI3120_ENABLE_TRANSFER_ADD_ON_LOW, devpriv->i_IobaseAddon + 2); outw(APCI3120_ADD_ON_AGCSTS_HIGH, devpriv->i_IobaseAddon + 0); outw(APCI3120_ENABLE_TRANSFER_ADD_ON_HIGH, devpriv->i_IobaseAddon + 2); /* */ /* * A2P FIFO MANAGEMENT * A2P fifo reset & transfer control enable */ outl(APCI3120_A2P_FIFO_MANAGEMENT, devpriv->i_IobaseAmcc + AMCC_OP_REG_MCSR); var = devpriv->ul_DmaBufferHw[0]; low_word = var & 0xffff; var = devpriv->ul_DmaBufferHw[0]; high_word = var / 65536; outw(APCI3120_ADD_ON_MWAR_LOW, devpriv->i_IobaseAddon + 0); outw(low_word, devpriv->i_IobaseAddon + 2); outw(APCI3120_ADD_ON_MWAR_HIGH, devpriv->i_IobaseAddon + 0); outw(high_word, devpriv->i_IobaseAddon + 2); var = devpriv->ui_DmaBufferUsesize[0]; low_word = var & 0xffff; /* changed */ var = devpriv->ui_DmaBufferUsesize[0]; high_word = var / 65536; outw(APCI3120_ADD_ON_MWTC_LOW, devpriv->i_IobaseAddon + 0); outw(low_word, devpriv->i_IobaseAddon + 2); outw(APCI3120_ADD_ON_MWTC_HIGH, devpriv->i_IobaseAddon + 0); outw(high_word, devpriv->i_IobaseAddon + 2); /* * To configure A2P FIFO * ENABLE A2P FIFO WRITE AND ENABLE AMWEN * AMWEN_ENABLE | A2P_FIFO_WRITE_ENABLE (0x01|0x02)=0x03 */ outw(3, devpriv->i_IobaseAddon + 4); /* initialise end of dma interrupt AINT_WRITE_COMPL = ENABLE_WRITE_TC_INT(ADDI) */ outl((APCI3120_FIFO_ADVANCE_ON_BYTE_2 | APCI3120_ENABLE_WRITE_TC_INT), devpriv->i_IobaseAmcc + AMCC_OP_REG_INTCSR); } } /* +----------------------------------------------------------------------------+ | Function name :void v_APCI3120_InterruptDmaMoveBlock16bit(comedi_device| |*dev,struct comedi_subdevice *s,short *dma,short *data,int n) | | | +----------------------------------------------------------------------------+ | Task : This function copies the data from DMA buffer to the | | Comedi buffer | | | +----------------------------------------------------------------------------+ | Input Parameters : struct comedi_device *dev | | struct comedi_subdevice *s | | short *dma | | short *data,int n | +----------------------------------------------------------------------------+ | Return Value : void | | | +----------------------------------------------------------------------------+ */ void v_APCI3120_InterruptDmaMoveBlock16bit(struct comedi_device *dev, struct comedi_subdevice *s, short *dma_buffer, unsigned int num_samples) { devpriv->ui_AiActualScan += (s->async->cur_chan + num_samples) / devpriv->ui_AiScanLength; s->async->cur_chan += num_samples; s->async->cur_chan %= devpriv->ui_AiScanLength; cfc_write_array_to_buffer(s, dma_buffer, num_samples * sizeof(short)); } /* +----------------------------------------------------------------------------+ | TIMER SUBDEVICE | +----------------------------------------------------------------------------+ */ /* +----------------------------------------------------------------------------+ | Function name :int i_APCI3120_InsnConfigTimer(struct comedi_device *dev, | | struct comedi_subdevice *s,struct comedi_insn *insn,unsigned int *data) | | | +----------------------------------------------------------------------------+ | Task :Configure Timer 2 | | | +----------------------------------------------------------------------------+ | Input Parameters : struct comedi_device *dev | | struct comedi_subdevice *s | | struct comedi_insn *insn | | unsigned int *data | | | | data[0]= TIMER configure as timer | | = WATCHDOG configure as watchdog | | data[1] = Timer constant | | data[2] = Timer2 interrupt (1)enable or(0) disable | | | +----------------------------------------------------------------------------+ | Return Value : | | | +----------------------------------------------------------------------------+ */ int i_APCI3120_InsnConfigTimer(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned int ui_Timervalue2; unsigned short us_TmpValue; unsigned char b_Tmp; if (!data[1]) comedi_error(dev, "config:No timer constant !"); devpriv->b_Timer2Interrupt = (unsigned char) data[2]; /* save info whether to enable or disable interrupt */ ui_Timervalue2 = data[1] / 1000; /* convert nano seconds to u seconds */ /* this_board->i_hwdrv_InsnConfigTimer(dev, ui_Timervalue2,(unsigned char)data[0]); */ us_TmpValue = (unsigned short) inw(devpriv->iobase + APCI3120_RD_STATUS); /* * EL250804: Testing if board APCI3120 have the new Quartz or if it * is an APCI3001 and calculate the time value to set in the timer */ if ((us_TmpValue & 0x00B0) == 0x00B0 || !strcmp(this_board->pc_DriverName, "apci3001")) { /* Calculate the time value to set in the timer */ ui_Timervalue2 = ui_Timervalue2 / 50; } else { /* Calculate the time value to set in the timer */ ui_Timervalue2 = ui_Timervalue2 / 70; } /* Reset gate 2 of Timer 2 to disable it (Set Bit D14 to 0) */ devpriv->us_OutputRegister = devpriv->us_OutputRegister & APCI3120_DISABLE_TIMER2; outw(devpriv->us_OutputRegister, devpriv->iobase + APCI3120_WR_ADDRESS); /* Disable TIMER Interrupt */ devpriv->b_ModeSelectRegister = devpriv-> b_ModeSelectRegister & APCI3120_DISABLE_TIMER_INT & 0xEF; /* Disable Eoc and Eos Interrupts */ devpriv->b_ModeSelectRegister = devpriv-> b_ModeSelectRegister & APCI3120_DISABLE_EOC_INT & APCI3120_DISABLE_EOS_INT; outb(devpriv->b_ModeSelectRegister, devpriv->iobase + APCI3120_WRITE_MODE_SELECT); if (data[0] == APCI3120_TIMER) { /* initialize timer */ /* devpriv->b_ModeSelectRegister=devpriv->b_ModeSelectRegister | * APCI3120_ENABLE_TIMER_INT; */ /* outb(devpriv->b_ModeSelectRegister,devpriv->iobase+APCI3120_WRITE_MODE_SELECT); */ /* Set the Timer 2 in mode 2(Timer) */ devpriv->b_TimerSelectMode = (devpriv-> b_TimerSelectMode & 0x0F) | APCI3120_TIMER_2_MODE_2; outb(devpriv->b_TimerSelectMode, devpriv->iobase + APCI3120_TIMER_CRT1); /* * Configure the timer 2 for writing the LOW unsigned short of timer * is Delay value You must make a b_tmp variable with * DigitalOutPutRegister because at Address_1+APCI3120_TIMER_CRT0 * you can set the digital output and configure the timer 2,and if * you don't make this, digital output are erase (Set to 0) */ /* Writing LOW unsigned short */ b_Tmp = ((devpriv-> b_DigitalOutputRegister) & 0xF0) | APCI3120_SELECT_TIMER_2_LOW_WORD; outb(b_Tmp, devpriv->iobase + APCI3120_TIMER_CRT0); outw(LOWORD(ui_Timervalue2), devpriv->iobase + APCI3120_TIMER_VALUE); /* Writing HIGH unsigned short */ b_Tmp = ((devpriv-> b_DigitalOutputRegister) & 0xF0) | APCI3120_SELECT_TIMER_2_HIGH_WORD; outb(b_Tmp, devpriv->iobase + APCI3120_TIMER_CRT0); outw(HIWORD(ui_Timervalue2), devpriv->iobase + APCI3120_TIMER_VALUE); /* timer2 in Timer mode enabled */ devpriv->b_Timer2Mode = APCI3120_TIMER; } else { /* Initialize Watch dog */ /* Set the Timer 2 in mode 5(Watchdog) */ devpriv->b_TimerSelectMode = (devpriv-> b_TimerSelectMode & 0x0F) | APCI3120_TIMER_2_MODE_5; outb(devpriv->b_TimerSelectMode, devpriv->iobase + APCI3120_TIMER_CRT1); /* * Configure the timer 2 for writing the LOW unsigned short of timer * is Delay value You must make a b_tmp variable with * DigitalOutPutRegister because at Address_1+APCI3120_TIMER_CRT0 * you can set the digital output and configure the timer 2,and if * you don't make this, digital output are erase (Set to 0) */ /* Writing LOW unsigned short */ b_Tmp = ((devpriv-> b_DigitalOutputRegister) & 0xF0) | APCI3120_SELECT_TIMER_2_LOW_WORD; outb(b_Tmp, devpriv->iobase + APCI3120_TIMER_CRT0); outw(LOWORD(ui_Timervalue2), devpriv->iobase + APCI3120_TIMER_VALUE); /* Writing HIGH unsigned short */ b_Tmp = ((devpriv-> b_DigitalOutputRegister) & 0xF0) | APCI3120_SELECT_TIMER_2_HIGH_WORD; outb(b_Tmp, devpriv->iobase + APCI3120_TIMER_CRT0); outw(HIWORD(ui_Timervalue2), devpriv->iobase + APCI3120_TIMER_VALUE); /* watchdog enabled */ devpriv->b_Timer2Mode = APCI3120_WATCHDOG; } return insn->n; } /* +----------------------------------------------------------------------------+ | Function name :int i_APCI3120_InsnWriteTimer(struct comedi_device *dev, | | struct comedi_subdevice *s, struct comedi_insn *insn,unsigned int *data) | | | +----------------------------------------------------------------------------+ | Task : To start and stop the timer | +----------------------------------------------------------------------------+ | Input Parameters : struct comedi_device *dev | | struct comedi_subdevice *s | | struct comedi_insn *insn | | unsigned int *data | | | | data[0] = 1 (start) | | data[0] = 0 (stop ) | | data[0] = 2 (write new value) | | data[1]= new value | | | | devpriv->b_Timer2Mode = 0 DISABLE | | 1 Timer | | 2 Watch dog | | | +----------------------------------------------------------------------------+ | Return Value : | | | +----------------------------------------------------------------------------+ */ int i_APCI3120_InsnWriteTimer(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned int ui_Timervalue2 = 0; unsigned short us_TmpValue; unsigned char b_Tmp; if ((devpriv->b_Timer2Mode != APCI3120_WATCHDOG) && (devpriv->b_Timer2Mode != APCI3120_TIMER)) { comedi_error(dev, "\nwrite:timer2 not configured "); return -EINVAL; } if (data[0] == 2) { /* write new value */ if (devpriv->b_Timer2Mode != APCI3120_TIMER) { comedi_error(dev, "write :timer2 not configured in TIMER MODE"); return -EINVAL; } if (data[1]) ui_Timervalue2 = data[1]; else ui_Timervalue2 = 0; } /* this_board->i_hwdrv_InsnWriteTimer(dev,data[0],ui_Timervalue2); */ switch (data[0]) { case APCI3120_START: /* Reset FC_TIMER BIT */ inb(devpriv->iobase + APCI3120_TIMER_STATUS_REGISTER); if (devpriv->b_Timer2Mode == APCI3120_TIMER) { /* start timer */ /* Enable Timer */ devpriv->b_ModeSelectRegister = devpriv->b_ModeSelectRegister & 0x0B; } else { /* start watch dog */ /* Enable WatchDog */ devpriv->b_ModeSelectRegister = (devpriv-> b_ModeSelectRegister & 0x0B) | APCI3120_ENABLE_WATCHDOG; } /* enable disable interrupt */ if ((devpriv->b_Timer2Interrupt) == APCI3120_ENABLE) { devpriv->b_ModeSelectRegister = devpriv-> b_ModeSelectRegister | APCI3120_ENABLE_TIMER_INT; /* save the task structure to pass info to user */ devpriv->tsk_Current = current; } else { devpriv->b_ModeSelectRegister = devpriv-> b_ModeSelectRegister & APCI3120_DISABLE_TIMER_INT; } outb(devpriv->b_ModeSelectRegister, devpriv->iobase + APCI3120_WRITE_MODE_SELECT); if (devpriv->b_Timer2Mode == APCI3120_TIMER) { /* start timer */ /* For Timer mode is Gate2 must be activated **timer started */ devpriv->us_OutputRegister = devpriv-> us_OutputRegister | APCI3120_ENABLE_TIMER2; outw(devpriv->us_OutputRegister, devpriv->iobase + APCI3120_WR_ADDRESS); } break; case APCI3120_STOP: if (devpriv->b_Timer2Mode == APCI3120_TIMER) { /* Disable timer */ devpriv->b_ModeSelectRegister = devpriv-> b_ModeSelectRegister & APCI3120_DISABLE_TIMER_COUNTER; } else { /* Disable WatchDog */ devpriv->b_ModeSelectRegister = devpriv-> b_ModeSelectRegister & APCI3120_DISABLE_WATCHDOG; } /* Disable timer interrupt */ devpriv->b_ModeSelectRegister = devpriv-> b_ModeSelectRegister & APCI3120_DISABLE_TIMER_INT; /* Write above states to register */ outb(devpriv->b_ModeSelectRegister, devpriv->iobase + APCI3120_WRITE_MODE_SELECT); /* Reset Gate 2 */ devpriv->us_OutputRegister = devpriv->us_OutputRegister & APCI3120_DISABLE_TIMER_INT; outw(devpriv->us_OutputRegister, devpriv->iobase + APCI3120_WR_ADDRESS); /* Reset FC_TIMER BIT */ inb(devpriv->iobase + APCI3120_TIMER_STATUS_REGISTER); /* Disable timer */ /* devpriv->b_Timer2Mode=APCI3120_DISABLE; */ break; case 2: /* write new value to Timer */ if (devpriv->b_Timer2Mode != APCI3120_TIMER) { comedi_error(dev, "write :timer2 not configured in TIMER MODE"); return -EINVAL; } /* ui_Timervalue2=data[1]; // passed as argument */ us_TmpValue = (unsigned short) inw(devpriv->iobase + APCI3120_RD_STATUS); /* * EL250804: Testing if board APCI3120 have the new Quartz or if it * is an APCI3001 and calculate the time value to set in the timer */ if ((us_TmpValue & 0x00B0) == 0x00B0 || !strcmp(this_board->pc_DriverName, "apci3001")) { /* Calculate the time value to set in the timer */ ui_Timervalue2 = ui_Timervalue2 / 50; } else { /* Calculate the time value to set in the timer */ ui_Timervalue2 = ui_Timervalue2 / 70; } /* Writing LOW unsigned short */ b_Tmp = ((devpriv-> b_DigitalOutputRegister) & 0xF0) | APCI3120_SELECT_TIMER_2_LOW_WORD; outb(b_Tmp, devpriv->iobase + APCI3120_TIMER_CRT0); outw(LOWORD(ui_Timervalue2), devpriv->iobase + APCI3120_TIMER_VALUE); /* Writing HIGH unsigned short */ b_Tmp = ((devpriv-> b_DigitalOutputRegister) & 0xF0) | APCI3120_SELECT_TIMER_2_HIGH_WORD; outb(b_Tmp, devpriv->iobase + APCI3120_TIMER_CRT0); outw(HIWORD(ui_Timervalue2), devpriv->iobase + APCI3120_TIMER_VALUE); break; default: return -EINVAL; /* Not a valid input */ } return insn->n; } /* +----------------------------------------------------------------------------+ | Function name : int i_APCI3120_InsnReadTimer(struct comedi_device *dev, | | struct comedi_subdevice *s,struct comedi_insn *insn, unsigned int *data) | | | | | +----------------------------------------------------------------------------+ | Task : read the Timer value | +----------------------------------------------------------------------------+ | Input Parameters : struct comedi_device *dev | | struct comedi_subdevice *s | | struct comedi_insn *insn | | unsigned int *data | | | +----------------------------------------------------------------------------+ | Return Value : | | for Timer: data[0]= Timer constant | | | | for watchdog: data[0]=0 (still running) | | data[0]=1 (run down) | | | +----------------------------------------------------------------------------+ */ int i_APCI3120_InsnReadTimer(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned char b_Tmp; unsigned short us_TmpValue, us_TmpValue_2, us_StatusValue; if ((devpriv->b_Timer2Mode != APCI3120_WATCHDOG) && (devpriv->b_Timer2Mode != APCI3120_TIMER)) { comedi_error(dev, "\nread:timer2 not configured "); } /* this_board->i_hwdrv_InsnReadTimer(dev,data); */ if (devpriv->b_Timer2Mode == APCI3120_TIMER) { /* Read the LOW unsigned short of Timer 2 register */ b_Tmp = ((devpriv-> b_DigitalOutputRegister) & 0xF0) | APCI3120_SELECT_TIMER_2_LOW_WORD; outb(b_Tmp, devpriv->iobase + APCI3120_TIMER_CRT0); us_TmpValue = inw(devpriv->iobase + APCI3120_TIMER_VALUE); /* Read the HIGH unsigned short of Timer 2 register */ b_Tmp = ((devpriv-> b_DigitalOutputRegister) & 0xF0) | APCI3120_SELECT_TIMER_2_HIGH_WORD; outb(b_Tmp, devpriv->iobase + APCI3120_TIMER_CRT0); us_TmpValue_2 = inw(devpriv->iobase + APCI3120_TIMER_VALUE); /* combining both words */ data[0] = (unsigned int) ((us_TmpValue) | ((us_TmpValue_2) << 16)); } else { /* Read watch dog status */ us_StatusValue = inw(devpriv->iobase + APCI3120_RD_STATUS); us_StatusValue = ((us_StatusValue & APCI3120_FC_TIMER) >> 12) & 1; if (us_StatusValue == 1) { /* RESET FC_TIMER BIT */ inb(devpriv->iobase + APCI3120_TIMER_STATUS_REGISTER); } data[0] = us_StatusValue; /* when data[0] = 1 then the watch dog has rundown */ } return insn->n; } /* +----------------------------------------------------------------------------+ | DIGITAL INPUT SUBDEVICE | +----------------------------------------------------------------------------+ */ /* +----------------------------------------------------------------------------+ | Function name :int i_APCI3120_InsnReadDigitalInput(struct comedi_device *dev, | | struct comedi_subdevice *s, struct comedi_insn *insn,unsigned int *data) | | | | | +----------------------------------------------------------------------------+ | Task : Reads the value of the specified Digital input channel| | | +----------------------------------------------------------------------------+ | Input Parameters : struct comedi_device *dev | | struct comedi_subdevice *s | | struct comedi_insn *insn | | unsigned int *data | +----------------------------------------------------------------------------+ | Return Value : | | | +----------------------------------------------------------------------------+ */ int i_APCI3120_InsnReadDigitalInput(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned int ui_Chan, ui_TmpValue; ui_Chan = CR_CHAN(insn->chanspec); /* channel specified */ /* this_board->i_hwdrv_InsnReadDigitalInput(dev,ui_Chan,data); */ if (ui_Chan <= 3) { ui_TmpValue = (unsigned int) inw(devpriv->iobase + APCI3120_RD_STATUS); /* * since only 1 channel reqd to bring it to last bit it is rotated 8 * +(chan - 1) times then ANDed with 1 for last bit. */ *data = (ui_TmpValue >> (ui_Chan + 8)) & 1; /* return 0; */ } else { /* comedi_error(dev," chan spec wrong"); */ return -EINVAL; /* "sorry channel spec wrong " */ } return insn->n; } /* +----------------------------------------------------------------------------+ | Function name :int i_APCI3120_InsnBitsDigitalInput(struct comedi_device *dev, | |struct comedi_subdevice *s, struct comedi_insn *insn,unsigned int *data) | | | +----------------------------------------------------------------------------+ | Task : Reads the value of the Digital input Port i.e.4channels| | value is returned in data[0] | | | +----------------------------------------------------------------------------+ | Input Parameters : struct comedi_device *dev | | struct comedi_subdevice *s | | struct comedi_insn *insn | | unsigned int *data | +----------------------------------------------------------------------------+ | Return Value : | | | +----------------------------------------------------------------------------+ */ int i_APCI3120_InsnBitsDigitalInput(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned int ui_TmpValue; ui_TmpValue = (unsigned int) inw(devpriv->iobase + APCI3120_RD_STATUS); /***** state of 4 channels in the 11, 10, 9, 8 bits of status reg rotated right 8 times to bring them to last four bits ANDed with oxf for value. *****/ *data = (ui_TmpValue >> 8) & 0xf; /* this_board->i_hwdrv_InsnBitsDigitalInput(dev,data); */ return insn->n; } /* +----------------------------------------------------------------------------+ | DIGITAL OUTPUT SUBDEVICE | +----------------------------------------------------------------------------+ */ /* +----------------------------------------------------------------------------+ | Function name :int i_APCI3120_InsnConfigDigitalOutput(struct comedi_device | | *dev,struct comedi_subdevice *s,struct comedi_insn *insn,unsigned int *data) | | | +----------------------------------------------------------------------------+ | Task :Configure the output memory ON or OFF | | | +----------------------------------------------------------------------------+ | Input Parameters :struct comedi_device *dev | | struct comedi_subdevice *s | | struct comedi_insn *insn | | unsigned int *data | +----------------------------------------------------------------------------+ | Return Value : | | | +----------------------------------------------------------------------------+ */ int i_APCI3120_InsnConfigDigitalOutput(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { if ((data[0] != 0) && (data[0] != 1)) { comedi_error(dev, "Not a valid Data !!! ,Data should be 1 or 0\n"); return -EINVAL; } if (data[0]) { devpriv->b_OutputMemoryStatus = APCI3120_ENABLE; } else { devpriv->b_OutputMemoryStatus = APCI3120_DISABLE; devpriv->b_DigitalOutputRegister = 0; } if (!devpriv->b_OutputMemoryStatus) ui_Temp = 0; /* if(!devpriv->b_OutputMemoryStatus ) */ return insn->n; } /* +----------------------------------------------------------------------------+ | Function name :int i_APCI3120_InsnBitsDigitalOutput(struct comedi_device *dev, | | struct comedi_subdevice *s, struct comedi_insn *insn,unsigned int *data) | | | +----------------------------------------------------------------------------+ | Task : write diatal output port | | | +----------------------------------------------------------------------------+ | Input Parameters : struct comedi_device *dev | | struct comedi_subdevice *s | | struct comedi_insn *insn | | unsigned int *data | | data[0] Value to be written | data[1] :1 Set digital o/p ON | data[1] 2 Set digital o/p OFF with memory ON +----------------------------------------------------------------------------+ | Return Value : | | | +----------------------------------------------------------------------------+ */ int i_APCI3120_InsnBitsDigitalOutput(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { if ((data[0] > devpriv->s_EeParameters.i_DoMaxdata) || (data[0] < 0)) { comedi_error(dev, "Data is not valid !!! \n"); return -EINVAL; } switch (data[1]) { case 1: data[0] = (data[0] << 4) | devpriv->b_DigitalOutputRegister; break; case 2: data[0] = data[0]; break; default: printk("\nThe parameter passed is in error \n"); return -EINVAL; } /* switch(data[1]) */ outb(data[0], devpriv->iobase + APCI3120_DIGITAL_OUTPUT); devpriv->b_DigitalOutputRegister = data[0] & 0xF0; return insn->n; } /* +----------------------------------------------------------------------------+ | Function name :int i_APCI3120_InsnWriteDigitalOutput(struct comedi_device *dev,| |struct comedi_subdevice *s,struct comedi_insn *insn,unsigned int *data) | | | +----------------------------------------------------------------------------+ | Task : Write digiatl output | | | +----------------------------------------------------------------------------+ | Input Parameters : struct comedi_device *dev | | struct comedi_subdevice *s | | struct comedi_insn *insn | | unsigned int *data | data[0] Value to be written data[1] :1 Set digital o/p ON data[1] 2 Set digital o/p OFF with memory ON +----------------------------------------------------------------------------+ | Return Value : | | | +----------------------------------------------------------------------------+ */ int i_APCI3120_InsnWriteDigitalOutput(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned int ui_Temp1; unsigned int ui_NoOfChannel = CR_CHAN(insn->chanspec); /* get the channel */ if ((data[0] != 0) && (data[0] != 1)) { comedi_error(dev, "Not a valid Data !!! ,Data should be 1 or 0\n"); return -EINVAL; } if (ui_NoOfChannel > devpriv->s_EeParameters.i_NbrDoChannel - 1) { comedi_error(dev, "This board doesn't have specified channel !!! \n"); return -EINVAL; } switch (data[1]) { case 1: data[0] = (data[0] << ui_NoOfChannel); /* ES05 data[0]=(data[0]<<4)|ui_Temp; */ data[0] = (data[0] << 4) | devpriv->b_DigitalOutputRegister; break; case 2: data[0] = ~data[0] & 0x1; ui_Temp1 = 1; ui_Temp1 = ui_Temp1 << ui_NoOfChannel; ui_Temp1 = ui_Temp1 << 4; /* ES05 ui_Temp=ui_Temp|ui_Temp1; */ devpriv->b_DigitalOutputRegister = devpriv->b_DigitalOutputRegister | ui_Temp1; data[0] = (data[0] << ui_NoOfChannel) ^ 0xf; data[0] = data[0] << 4; /* ES05 data[0]=data[0]& ui_Temp; */ data[0] = data[0] & devpriv->b_DigitalOutputRegister; break; default: printk("\nThe parameter passed is in error \n"); return -EINVAL; } /* switch(data[1]) */ outb(data[0], devpriv->iobase + APCI3120_DIGITAL_OUTPUT); /* ES05 ui_Temp=data[0] & 0xf0; */ devpriv->b_DigitalOutputRegister = data[0] & 0xf0; return insn->n; } /* +----------------------------------------------------------------------------+ | ANALOG OUTPUT SUBDEVICE | +----------------------------------------------------------------------------+ */ /* +----------------------------------------------------------------------------+ | Function name :int i_APCI3120_InsnWriteAnalogOutput(struct comedi_device *dev,| |struct comedi_subdevice *s, struct comedi_insn *insn,unsigned int *data) | | | +----------------------------------------------------------------------------+ | Task : Write analog output | | | +----------------------------------------------------------------------------+ | Input Parameters : struct comedi_device *dev | | struct comedi_subdevice *s | | struct comedi_insn *insn | | unsigned int *data | +----------------------------------------------------------------------------+ | Return Value : | | | +----------------------------------------------------------------------------+ */ int i_APCI3120_InsnWriteAnalogOutput(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned int ui_Range, ui_Channel; unsigned short us_TmpValue; ui_Range = CR_RANGE(insn->chanspec); ui_Channel = CR_CHAN(insn->chanspec); /* this_board->i_hwdrv_InsnWriteAnalogOutput(dev, ui_Range, ui_Channel,data[0]); */ if (ui_Range) { /* if 1 then unipolar */ if (data[0] != 0) data[0] = ((((ui_Channel & 0x03) << 14) & 0xC000) | (1 << 13) | (data[0] + 8191)); else data[0] = ((((ui_Channel & 0x03) << 14) & 0xC000) | (1 << 13) | 8192); } else { /* if 0 then bipolar */ data[0] = ((((ui_Channel & 0x03) << 14) & 0xC000) | (0 << 13) | data[0]); } /* * out put n values at the given channel. printk("\nwaiting for * DA_READY BIT"); */ do { /* Waiting of DA_READY BIT */ us_TmpValue = ((unsigned short) inw(devpriv->iobase + APCI3120_RD_STATUS)) & 0x0001; } while (us_TmpValue != 0x0001); if (ui_Channel <= 3) /* * for channel 0-3 out at the register 1 (wrDac1-8) data[i] * typecasted to ushort since word write is to be done */ outw((unsigned short) data[0], devpriv->iobase + APCI3120_ANALOG_OUTPUT_1); else /* * for channel 4-7 out at the register 2 (wrDac5-8) data[i] * typecasted to ushort since word write is to be done */ outw((unsigned short) data[0], devpriv->iobase + APCI3120_ANALOG_OUTPUT_2); return insn->n; }
gpl-2.0
gpkulkarni/linux-arm64
drivers/mfd/tmio_core.c
9883
1480
/* * Copyright(c) 2009 Ian Molton <spyro@f2s.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/export.h> #include <linux/mfd/tmio.h> int tmio_core_mmc_enable(void __iomem *cnf, int shift, unsigned long base) { /* Enable the MMC/SD Control registers */ sd_config_write16(cnf, shift, CNF_CMD, SDCREN); sd_config_write32(cnf, shift, CNF_CTL_BASE, base & 0xfffe); /* Disable SD power during suspend */ sd_config_write8(cnf, shift, CNF_PWR_CTL_3, 0x01); /* The below is required but why? FIXME */ sd_config_write8(cnf, shift, CNF_STOP_CLK_CTL, 0x1f); /* Power down SD bus */ sd_config_write8(cnf, shift, CNF_PWR_CTL_2, 0x00); return 0; } EXPORT_SYMBOL(tmio_core_mmc_enable); int tmio_core_mmc_resume(void __iomem *cnf, int shift, unsigned long base) { /* Enable the MMC/SD Control registers */ sd_config_write16(cnf, shift, CNF_CMD, SDCREN); sd_config_write32(cnf, shift, CNF_CTL_BASE, base & 0xfffe); return 0; } EXPORT_SYMBOL(tmio_core_mmc_resume); void tmio_core_mmc_pwr(void __iomem *cnf, int shift, int state) { sd_config_write8(cnf, shift, CNF_PWR_CTL_2, state ? 0x02 : 0x00); } EXPORT_SYMBOL(tmio_core_mmc_pwr); void tmio_core_mmc_clk_div(void __iomem *cnf, int shift, int state) { sd_config_write8(cnf, shift, CNF_SD_CLK_MODE, state ? 1 : 0); } EXPORT_SYMBOL(tmio_core_mmc_clk_div);
gpl-2.0
acklinr/omap-android-3.4
drivers/sh/intc/balancing.c
10395
2327
/* * Support for hardware-managed IRQ auto-distribution. * * Copyright (C) 2010 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include "internals.h" static unsigned long dist_handle[INTC_NR_IRQS]; void intc_balancing_enable(unsigned int irq) { struct intc_desc_int *d = get_intc_desc(irq); unsigned long handle = dist_handle[irq]; unsigned long addr; if (irq_balancing_disabled(irq) || !handle) return; addr = INTC_REG(d, _INTC_ADDR_D(handle), 0); intc_reg_fns[_INTC_FN(handle)](addr, handle, 1); } void intc_balancing_disable(unsigned int irq) { struct intc_desc_int *d = get_intc_desc(irq); unsigned long handle = dist_handle[irq]; unsigned long addr; if (irq_balancing_disabled(irq) || !handle) return; addr = INTC_REG(d, _INTC_ADDR_D(handle), 0); intc_reg_fns[_INTC_FN(handle)](addr, handle, 0); } static unsigned int intc_dist_data(struct intc_desc *desc, struct intc_desc_int *d, intc_enum enum_id) { struct intc_mask_reg *mr = desc->hw.mask_regs; unsigned int i, j, fn, mode; unsigned long reg_e, reg_d; for (i = 0; mr && enum_id && i < desc->hw.nr_mask_regs; i++) { mr = desc->hw.mask_regs + i; /* * Skip this entry if there's no auto-distribution * register associated with it. */ if (!mr->dist_reg) continue; for (j = 0; j < ARRAY_SIZE(mr->enum_ids); j++) { if (mr->enum_ids[j] != enum_id) continue; fn = REG_FN_MODIFY_BASE; mode = MODE_ENABLE_REG; reg_e = mr->dist_reg; reg_d = mr->dist_reg; fn += (mr->reg_width >> 3) - 1; return _INTC_MK(fn, mode, intc_get_reg(d, reg_e), intc_get_reg(d, reg_d), 1, (mr->reg_width - 1) - j); } } /* * It's possible we've gotten here with no distribution options * available for the IRQ in question, so we just skip over those. */ return 0; } void intc_set_dist_handle(unsigned int irq, struct intc_desc *desc, struct intc_desc_int *d, intc_enum id) { unsigned long flags; /* * Nothing to do for this IRQ. */ if (!desc->hw.mask_regs) return; raw_spin_lock_irqsave(&intc_big_lock, flags); dist_handle[irq] = intc_dist_data(desc, d, id); raw_spin_unlock_irqrestore(&intc_big_lock, flags); }
gpl-2.0
alexpotter1/DeltaKernel_msm8974_hammerhead
arch/x86/um/elfcore.c
11419
1947
#include <linux/elf.h> #include <linux/coredump.h> #include <linux/fs.h> #include <linux/mm.h> #include <asm/elf.h> Elf32_Half elf_core_extra_phdrs(void) { return vsyscall_ehdr ? (((struct elfhdr *)vsyscall_ehdr)->e_phnum) : 0; } int elf_core_write_extra_phdrs(struct file *file, loff_t offset, size_t *size, unsigned long limit) { if ( vsyscall_ehdr ) { const struct elfhdr *const ehdrp = (struct elfhdr *) vsyscall_ehdr; const struct elf_phdr *const phdrp = (const struct elf_phdr *) (vsyscall_ehdr + ehdrp->e_phoff); int i; Elf32_Off ofs = 0; for (i = 0; i < ehdrp->e_phnum; ++i) { struct elf_phdr phdr = phdrp[i]; if (phdr.p_type == PT_LOAD) { ofs = phdr.p_offset = offset; offset += phdr.p_filesz; } else { phdr.p_offset += ofs; } phdr.p_paddr = 0; /* match other core phdrs */ *size += sizeof(phdr); if (*size > limit || !dump_write(file, &phdr, sizeof(phdr))) return 0; } } return 1; } int elf_core_write_extra_data(struct file *file, size_t *size, unsigned long limit) { if ( vsyscall_ehdr ) { const struct elfhdr *const ehdrp = (struct elfhdr *) vsyscall_ehdr; const struct elf_phdr *const phdrp = (const struct elf_phdr *) (vsyscall_ehdr + ehdrp->e_phoff); int i; for (i = 0; i < ehdrp->e_phnum; ++i) { if (phdrp[i].p_type == PT_LOAD) { void *addr = (void *) phdrp[i].p_vaddr; size_t filesz = phdrp[i].p_filesz; *size += filesz; if (*size > limit || !dump_write(file, addr, filesz)) return 0; } } } return 1; } size_t elf_core_extra_data_size(void) { if ( vsyscall_ehdr ) { const struct elfhdr *const ehdrp = (struct elfhdr *)vsyscall_ehdr; const struct elf_phdr *const phdrp = (const struct elf_phdr *) (vsyscall_ehdr + ehdrp->e_phoff); int i; for (i = 0; i < ehdrp->e_phnum; ++i) if (phdrp[i].p_type == PT_LOAD) return (size_t) phdrp[i].p_filesz; } return 0; }
gpl-2.0
rohanpurohit/android_kernel_sony_msm8930
arch/x86/um/elfcore.c
11419
1947
#include <linux/elf.h> #include <linux/coredump.h> #include <linux/fs.h> #include <linux/mm.h> #include <asm/elf.h> Elf32_Half elf_core_extra_phdrs(void) { return vsyscall_ehdr ? (((struct elfhdr *)vsyscall_ehdr)->e_phnum) : 0; } int elf_core_write_extra_phdrs(struct file *file, loff_t offset, size_t *size, unsigned long limit) { if ( vsyscall_ehdr ) { const struct elfhdr *const ehdrp = (struct elfhdr *) vsyscall_ehdr; const struct elf_phdr *const phdrp = (const struct elf_phdr *) (vsyscall_ehdr + ehdrp->e_phoff); int i; Elf32_Off ofs = 0; for (i = 0; i < ehdrp->e_phnum; ++i) { struct elf_phdr phdr = phdrp[i]; if (phdr.p_type == PT_LOAD) { ofs = phdr.p_offset = offset; offset += phdr.p_filesz; } else { phdr.p_offset += ofs; } phdr.p_paddr = 0; /* match other core phdrs */ *size += sizeof(phdr); if (*size > limit || !dump_write(file, &phdr, sizeof(phdr))) return 0; } } return 1; } int elf_core_write_extra_data(struct file *file, size_t *size, unsigned long limit) { if ( vsyscall_ehdr ) { const struct elfhdr *const ehdrp = (struct elfhdr *) vsyscall_ehdr; const struct elf_phdr *const phdrp = (const struct elf_phdr *) (vsyscall_ehdr + ehdrp->e_phoff); int i; for (i = 0; i < ehdrp->e_phnum; ++i) { if (phdrp[i].p_type == PT_LOAD) { void *addr = (void *) phdrp[i].p_vaddr; size_t filesz = phdrp[i].p_filesz; *size += filesz; if (*size > limit || !dump_write(file, addr, filesz)) return 0; } } } return 1; } size_t elf_core_extra_data_size(void) { if ( vsyscall_ehdr ) { const struct elfhdr *const ehdrp = (struct elfhdr *)vsyscall_ehdr; const struct elf_phdr *const phdrp = (const struct elf_phdr *) (vsyscall_ehdr + ehdrp->e_phoff); int i; for (i = 0; i < ehdrp->e_phnum; ++i) if (phdrp[i].p_type == PT_LOAD) return (size_t) phdrp[i].p_filesz; } return 0; }
gpl-2.0
bticino/u-boot
drivers/net/tigon3.c
156
176892
/******************************************************************************/ /* */ /* Broadcom BCM5700 Linux Network Driver, Copyright (c) 2000 Broadcom */ /* Corporation. */ /* All rights reserved. */ /* */ /* This program is free software; you can redistribute it and/or modify */ /* it under the terms of the GNU General Public License as published by */ /* the Free Software Foundation, located in the file LICENSE. */ /* */ /* History: */ /******************************************************************************/ #include <common.h> #include <asm/types.h> #ifdef CONFIG_BMW #include <mpc824x.h> #endif #include <malloc.h> #include <linux/byteorder/big_endian.h> #include "bcm570x_mm.h" #define EMBEDDED 1 /******************************************************************************/ /* Local functions. */ /******************************************************************************/ LM_STATUS LM_Abort (PLM_DEVICE_BLOCK pDevice); LM_STATUS LM_QueueRxPackets (PLM_DEVICE_BLOCK pDevice); static LM_STATUS LM_TranslateRequestedMediaType (LM_REQUESTED_MEDIA_TYPE RequestedMediaType, PLM_MEDIA_TYPE pMediaType, PLM_LINE_SPEED pLineSpeed, PLM_DUPLEX_MODE pDuplexMode); static LM_STATUS LM_InitBcm540xPhy (PLM_DEVICE_BLOCK pDevice); __inline static LM_VOID LM_ServiceRxInterrupt (PLM_DEVICE_BLOCK pDevice); __inline static LM_VOID LM_ServiceTxInterrupt (PLM_DEVICE_BLOCK pDevice); static LM_STATUS LM_ForceAutoNegBcm540xPhy (PLM_DEVICE_BLOCK pDevice, LM_REQUESTED_MEDIA_TYPE RequestedMediaType); static LM_STATUS LM_ForceAutoNeg (PLM_DEVICE_BLOCK pDevice, LM_REQUESTED_MEDIA_TYPE RequestedMediaType); static LM_UINT32 GetPhyAdFlowCntrlSettings (PLM_DEVICE_BLOCK pDevice); STATIC LM_STATUS LM_SetFlowControl (PLM_DEVICE_BLOCK pDevice, LM_UINT32 LocalPhyAd, LM_UINT32 RemotePhyAd); #if INCLUDE_TBI_SUPPORT STATIC LM_STATUS LM_SetupFiberPhy (PLM_DEVICE_BLOCK pDevice); STATIC LM_STATUS LM_InitBcm800xPhy (PLM_DEVICE_BLOCK pDevice); #endif STATIC LM_STATUS LM_SetupCopperPhy (PLM_DEVICE_BLOCK pDevice); STATIC PLM_ADAPTER_INFO LM_GetAdapterInfoBySsid (LM_UINT16 Svid, LM_UINT16 Ssid); STATIC LM_STATUS LM_DmaTest (PLM_DEVICE_BLOCK pDevice, PLM_UINT8 pBufferVirt, LM_PHYSICAL_ADDRESS BufferPhy, LM_UINT32 BufferSize); STATIC LM_STATUS LM_HaltCpu (PLM_DEVICE_BLOCK pDevice, LM_UINT32 cpu_number); STATIC LM_STATUS LM_ResetChip (PLM_DEVICE_BLOCK pDevice); STATIC LM_STATUS LM_Test4GBoundary (PLM_DEVICE_BLOCK pDevice, PLM_PACKET pPacket, PT3_SND_BD pSendBd); /******************************************************************************/ /* External functions. */ /******************************************************************************/ LM_STATUS LM_LoadRlsFirmware (PLM_DEVICE_BLOCK pDevice); /******************************************************************************/ /* Description: */ /* */ /* Return: */ /******************************************************************************/ LM_UINT32 LM_RegRdInd (PLM_DEVICE_BLOCK pDevice, LM_UINT32 Register) { LM_UINT32 Value32; #if PCIX_TARGET_WORKAROUND MM_ACQUIRE_UNDI_LOCK (pDevice); #endif MM_WriteConfig32 (pDevice, T3_PCI_REG_ADDR_REG, Register); MM_ReadConfig32 (pDevice, T3_PCI_REG_DATA_REG, &Value32); #if PCIX_TARGET_WORKAROUND MM_RELEASE_UNDI_LOCK (pDevice); #endif return Value32; } /* LM_RegRdInd */ /******************************************************************************/ /* Description: */ /* */ /* Return: */ /******************************************************************************/ LM_VOID LM_RegWrInd (PLM_DEVICE_BLOCK pDevice, LM_UINT32 Register, LM_UINT32 Value32) { #if PCIX_TARGET_WORKAROUND MM_ACQUIRE_UNDI_LOCK (pDevice); #endif MM_WriteConfig32 (pDevice, T3_PCI_REG_ADDR_REG, Register); MM_WriteConfig32 (pDevice, T3_PCI_REG_DATA_REG, Value32); #if PCIX_TARGET_WORKAROUND MM_RELEASE_UNDI_LOCK (pDevice); #endif } /* LM_RegWrInd */ /******************************************************************************/ /* Description: */ /* */ /* Return: */ /******************************************************************************/ LM_UINT32 LM_MemRdInd (PLM_DEVICE_BLOCK pDevice, LM_UINT32 MemAddr) { LM_UINT32 Value32; MM_ACQUIRE_UNDI_LOCK (pDevice); #ifdef BIG_ENDIAN_HOST MM_WriteConfig32 (pDevice, T3_PCI_MEM_WIN_ADDR_REG, MemAddr); Value32 = REG_RD (pDevice, PciCfg.MemWindowData); /* Value32 = REG_RD(pDevice,uIntMem.Mbuf[(MemAddr & 0x7fff)/4]); */ #else MM_WriteConfig32 (pDevice, T3_PCI_MEM_WIN_ADDR_REG, MemAddr); MM_ReadConfig32 (pDevice, T3_PCI_MEM_WIN_DATA_REG, &Value32); #endif MM_RELEASE_UNDI_LOCK (pDevice); return Value32; } /* LM_MemRdInd */ /******************************************************************************/ /* Description: */ /* */ /* Return: */ /******************************************************************************/ LM_VOID LM_MemWrInd (PLM_DEVICE_BLOCK pDevice, LM_UINT32 MemAddr, LM_UINT32 Value32) { MM_ACQUIRE_UNDI_LOCK (pDevice); #ifdef BIG_ENDIAN_HOST REG_WR (pDevice, PciCfg.MemWindowBaseAddr, MemAddr); REG_WR (pDevice, uIntMem.Mbuf[(MemAddr & 0x7fff) / 4], Value32); #else MM_WriteConfig32 (pDevice, T3_PCI_MEM_WIN_ADDR_REG, MemAddr); MM_WriteConfig32 (pDevice, T3_PCI_MEM_WIN_DATA_REG, Value32); #endif MM_RELEASE_UNDI_LOCK (pDevice); } /* LM_MemWrInd */ /******************************************************************************/ /* Description: */ /* */ /* Return: */ /******************************************************************************/ LM_STATUS LM_QueueRxPackets (PLM_DEVICE_BLOCK pDevice) { LM_STATUS Lmstatus; PLM_PACKET pPacket; PT3_RCV_BD pRcvBd; LM_UINT32 StdBdAdded = 0; #if T3_JUMBO_RCV_RCB_ENTRY_COUNT LM_UINT32 JumboBdAdded = 0; #endif /* T3_JUMBO_RCV_RCB_ENTRY_COUNT */ Lmstatus = LM_STATUS_SUCCESS; pPacket = (PLM_PACKET) QQ_PopHead (&pDevice->RxPacketFreeQ.Container); while (pPacket) { switch (pPacket->u.Rx.RcvProdRing) { #if T3_JUMBO_RCV_RCB_ENTRY_COUNT case T3_JUMBO_RCV_PROD_RING: /* Jumbo Receive Ring. */ /* Initialize the buffer descriptor. */ pRcvBd = &pDevice->pRxJumboBdVirt[pDevice->RxJumboProdIdx]; pRcvBd->Flags = RCV_BD_FLAG_END | RCV_BD_FLAG_JUMBO_RING; pRcvBd->Len = (LM_UINT16) pDevice->RxJumboBufferSize; /* Initialize the receive buffer pointer */ #if 0 /* Jimmy, deleted in new */ pRcvBd->HostAddr.Low = pPacket->u.Rx.RxBufferPhy.Low; pRcvBd->HostAddr.High = pPacket->u.Rx.RxBufferPhy.High; #endif MM_MapRxDma (pDevice, pPacket, &pRcvBd->HostAddr); /* The opaque field may point to an offset from a fix addr. */ pRcvBd->Opaque = (LM_UINT32) (MM_UINT_PTR (pPacket) - MM_UINT_PTR (pDevice-> pPacketDescBase)); /* Update the producer index. */ pDevice->RxJumboProdIdx = (pDevice->RxJumboProdIdx + 1) & T3_JUMBO_RCV_RCB_ENTRY_COUNT_MASK; JumboBdAdded++; break; #endif /* T3_JUMBO_RCV_RCB_ENTRY_COUNT */ case T3_STD_RCV_PROD_RING: /* Standard Receive Ring. */ /* Initialize the buffer descriptor. */ pRcvBd = &pDevice->pRxStdBdVirt[pDevice->RxStdProdIdx]; pRcvBd->Flags = RCV_BD_FLAG_END; pRcvBd->Len = MAX_STD_RCV_BUFFER_SIZE; /* Initialize the receive buffer pointer */ #if 0 /* Jimmy, deleted in new replaced with MM_MapRxDma */ pRcvBd->HostAddr.Low = pPacket->u.Rx.RxBufferPhy.Low; pRcvBd->HostAddr.High = pPacket->u.Rx.RxBufferPhy.High; #endif MM_MapRxDma (pDevice, pPacket, &pRcvBd->HostAddr); /* The opaque field may point to an offset from a fix addr. */ pRcvBd->Opaque = (LM_UINT32) (MM_UINT_PTR (pPacket) - MM_UINT_PTR (pDevice-> pPacketDescBase)); /* Update the producer index. */ pDevice->RxStdProdIdx = (pDevice->RxStdProdIdx + 1) & T3_STD_RCV_RCB_ENTRY_COUNT_MASK; StdBdAdded++; break; case T3_UNKNOWN_RCV_PROD_RING: default: Lmstatus = LM_STATUS_FAILURE; break; } /* switch */ /* Bail out if there is any error. */ if (Lmstatus != LM_STATUS_SUCCESS) { break; } pPacket = (PLM_PACKET) QQ_PopHead (&pDevice->RxPacketFreeQ.Container); } /* while */ wmb (); /* Update the procedure index. */ if (StdBdAdded) { MB_REG_WR (pDevice, Mailbox.RcvStdProdIdx.Low, pDevice->RxStdProdIdx); } #if T3_JUMBO_RCV_RCB_ENTRY_COUNT if (JumboBdAdded) { MB_REG_WR (pDevice, Mailbox.RcvJumboProdIdx.Low, pDevice->RxJumboProdIdx); } #endif /* T3_JUMBO_RCV_RCB_ENTRY_COUNT */ return Lmstatus; } /* LM_QueueRxPackets */ /******************************************************************************/ /* Description: */ /* */ /* Return: */ /******************************************************************************/ STATIC LM_VOID LM_NvramInit (PLM_DEVICE_BLOCK pDevice) { LM_UINT32 Value32; LM_UINT32 j; /* Intialize clock period and state machine. */ Value32 = SEEPROM_ADDR_CLK_PERD (SEEPROM_CLOCK_PERIOD) | SEEPROM_ADDR_FSM_RESET; REG_WR (pDevice, Grc.EepromAddr, Value32); for (j = 0; j < 100; j++) { MM_Wait (10); } /* Serial eeprom access using the Grc.EepromAddr/EepromData registers. */ Value32 = REG_RD (pDevice, Grc.LocalCtrl); REG_WR (pDevice, Grc.LocalCtrl, Value32 | GRC_MISC_LOCAL_CTRL_AUTO_SEEPROM); /* Set the 5701 compatibility mode if we are using EEPROM. */ if (T3_ASIC_REV (pDevice->ChipRevId) != T3_ASIC_REV_5700 && T3_ASIC_REV (pDevice->ChipRevId) != T3_ASIC_REV_5701) { Value32 = REG_RD (pDevice, Nvram.Config1); if ((Value32 & FLASH_INTERFACE_ENABLE) == 0) { /* Use the new interface to read EEPROM. */ Value32 &= ~FLASH_COMPAT_BYPASS; REG_WR (pDevice, Nvram.Config1, Value32); } } } /* LM_NvRamInit */ /******************************************************************************/ /* Description: */ /* */ /* Return: */ /******************************************************************************/ STATIC LM_STATUS LM_EepromRead (PLM_DEVICE_BLOCK pDevice, LM_UINT32 Offset, LM_UINT32 * pData) { LM_UINT32 Value32; LM_UINT32 Addr; LM_UINT32 Dev; LM_UINT32 j; if (Offset > SEEPROM_CHIP_SIZE) { return LM_STATUS_FAILURE; } Dev = Offset / SEEPROM_CHIP_SIZE; Addr = Offset % SEEPROM_CHIP_SIZE; Value32 = REG_RD (pDevice, Grc.EepromAddr); Value32 &= ~(SEEPROM_ADDR_ADDRESS_MASK | SEEPROM_ADDR_DEV_ID_MASK | SEEPROM_ADDR_RW_MASK); REG_WR (pDevice, Grc.EepromAddr, Value32 | SEEPROM_ADDR_DEV_ID (Dev) | SEEPROM_ADDR_ADDRESS (Addr) | SEEPROM_ADDR_START | SEEPROM_ADDR_READ); for (j = 0; j < 1000; j++) { Value32 = REG_RD (pDevice, Grc.EepromAddr); if (Value32 & SEEPROM_ADDR_COMPLETE) { break; } MM_Wait (10); } if (Value32 & SEEPROM_ADDR_COMPLETE) { Value32 = REG_RD (pDevice, Grc.EepromData); *pData = Value32; return LM_STATUS_SUCCESS; } return LM_STATUS_FAILURE; } /* LM_EepromRead */ /******************************************************************************/ /* Description: */ /* */ /* Return: */ /******************************************************************************/ STATIC LM_STATUS LM_NvramRead (PLM_DEVICE_BLOCK pDevice, LM_UINT32 Offset, LM_UINT32 * pData) { LM_UINT32 Value32; LM_STATUS Status; LM_UINT32 j; if (T3_ASIC_REV (pDevice->ChipRevId) == T3_ASIC_REV_5700 || T3_ASIC_REV (pDevice->ChipRevId) == T3_ASIC_REV_5701) { Status = LM_EepromRead (pDevice, Offset, pData); } else { /* Determine if we have flash or EEPROM. */ Value32 = REG_RD (pDevice, Nvram.Config1); if (Value32 & FLASH_INTERFACE_ENABLE) { if (Value32 & FLASH_SSRAM_BUFFERRED_MODE) { Offset = ((Offset / BUFFERED_FLASH_PAGE_SIZE) << BUFFERED_FLASH_PAGE_POS) + (Offset % BUFFERED_FLASH_PAGE_SIZE); } } REG_WR (pDevice, Nvram.SwArb, SW_ARB_REQ_SET1); for (j = 0; j < 1000; j++) { if (REG_RD (pDevice, Nvram.SwArb) & SW_ARB_GNT1) { break; } MM_Wait (20); } if (j == 1000) { return LM_STATUS_FAILURE; } /* Read from flash or EEPROM with the new 5703/02 interface. */ REG_WR (pDevice, Nvram.Addr, Offset & NVRAM_ADDRESS_MASK); REG_WR (pDevice, Nvram.Cmd, NVRAM_CMD_RD | NVRAM_CMD_DO_IT | NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE); /* Wait for the done bit to clear. */ for (j = 0; j < 500; j++) { MM_Wait (10); Value32 = REG_RD (pDevice, Nvram.Cmd); if (!(Value32 & NVRAM_CMD_DONE)) { break; } } /* Wait for the done bit. */ if (!(Value32 & NVRAM_CMD_DONE)) { for (j = 0; j < 500; j++) { MM_Wait (10); Value32 = REG_RD (pDevice, Nvram.Cmd); if (Value32 & NVRAM_CMD_DONE) { MM_Wait (10); *pData = REG_RD (pDevice, Nvram.ReadData); /* Change the endianess. */ *pData = ((*pData & 0xff) << 24) | ((*pData & 0xff00) << 8) | ((*pData & 0xff0000) >> 8) | ((*pData >> 24) & 0xff); break; } } } REG_WR (pDevice, Nvram.SwArb, SW_ARB_REQ_CLR1); if (Value32 & NVRAM_CMD_DONE) { Status = LM_STATUS_SUCCESS; } else { Status = LM_STATUS_FAILURE; } } return Status; } /* LM_NvramRead */ STATIC void LM_ReadVPD (PLM_DEVICE_BLOCK pDevice) { LM_UINT32 Vpd_arr[256 / 4]; LM_UINT8 *Vpd = (LM_UINT8 *) & Vpd_arr[0]; LM_UINT32 *Vpd_dptr = &Vpd_arr[0]; LM_UINT32 Value32; unsigned int j; /* Read PN from VPD */ for (j = 0; j < 256; j += 4, Vpd_dptr++) { if (LM_NvramRead (pDevice, 0x100 + j, &Value32) != LM_STATUS_SUCCESS) { printf ("BCM570x: LM_ReadVPD: VPD read failed" " (no EEPROM onboard)\n"); return; } *Vpd_dptr = cpu_to_le32 (Value32); } for (j = 0; j < 256;) { unsigned int Vpd_r_len; unsigned int Vpd_r_end; if ((Vpd[j] == 0x82) || (Vpd[j] == 0x91)) { j = j + 3 + Vpd[j + 1] + (Vpd[j + 2] << 8); } else if (Vpd[j] == 0x90) { Vpd_r_len = Vpd[j + 1] + (Vpd[j + 2] << 8); j += 3; Vpd_r_end = Vpd_r_len + j; while (j < Vpd_r_end) { if ((Vpd[j] == 'P') && (Vpd[j + 1] == 'N')) { unsigned int len = Vpd[j + 2]; if (len <= 24) { memcpy (pDevice->PartNo, &Vpd[j + 3], len); } break; } else { if (Vpd[j + 2] == 0) { break; } j = j + Vpd[j + 2]; } } break; } else { break; } } } STATIC void LM_ReadBootCodeVersion (PLM_DEVICE_BLOCK pDevice) { LM_UINT32 Value32, offset, ver_offset; int i; if (LM_NvramRead (pDevice, 0x0, &Value32) != LM_STATUS_SUCCESS) return; if (Value32 != 0xaa559966) return; if (LM_NvramRead (pDevice, 0xc, &offset) != LM_STATUS_SUCCESS) return; offset = ((offset & 0xff) << 24) | ((offset & 0xff00) << 8) | ((offset & 0xff0000) >> 8) | ((offset >> 24) & 0xff); if (LM_NvramRead (pDevice, offset, &Value32) != LM_STATUS_SUCCESS) return; if ((Value32 == 0x0300000e) && (LM_NvramRead (pDevice, offset + 4, &Value32) == LM_STATUS_SUCCESS) && (Value32 == 0)) { if (LM_NvramRead (pDevice, offset + 8, &ver_offset) != LM_STATUS_SUCCESS) return; ver_offset = ((ver_offset & 0xff0000) >> 8) | ((ver_offset >> 24) & 0xff); for (i = 0; i < 16; i += 4) { if (LM_NvramRead (pDevice, offset + ver_offset + i, &Value32) != LM_STATUS_SUCCESS) { return; } *((LM_UINT32 *) & pDevice->BootCodeVer[i]) = cpu_to_le32 (Value32); } } else { char c; if (LM_NvramRead (pDevice, 0x94, &Value32) != LM_STATUS_SUCCESS) return; i = 0; c = ((Value32 & 0xff0000) >> 16); if (c < 10) { pDevice->BootCodeVer[i++] = c + '0'; } else { pDevice->BootCodeVer[i++] = (c / 10) + '0'; pDevice->BootCodeVer[i++] = (c % 10) + '0'; } pDevice->BootCodeVer[i++] = '.'; c = (Value32 & 0xff000000) >> 24; if (c < 10) { pDevice->BootCodeVer[i++] = c + '0'; } else { pDevice->BootCodeVer[i++] = (c / 10) + '0'; pDevice->BootCodeVer[i++] = (c % 10) + '0'; } pDevice->BootCodeVer[i] = 0; } } STATIC void LM_GetBusSpeed (PLM_DEVICE_BLOCK pDevice) { LM_UINT32 PciState = pDevice->PciState; LM_UINT32 ClockCtrl; char *SpeedStr = ""; if (PciState & T3_PCI_STATE_32BIT_PCI_BUS) { strcpy (pDevice->BusSpeedStr, "32-bit "); } else { strcpy (pDevice->BusSpeedStr, "64-bit "); } if (PciState & T3_PCI_STATE_CONVENTIONAL_PCI_MODE) { strcat (pDevice->BusSpeedStr, "PCI "); if (PciState & T3_PCI_STATE_HIGH_BUS_SPEED) { SpeedStr = "66MHz"; } else { SpeedStr = "33MHz"; } } else { strcat (pDevice->BusSpeedStr, "PCIX "); if (pDevice->BondId == GRC_MISC_BD_ID_5704CIOBE) { SpeedStr = "133MHz"; } else { ClockCtrl = REG_RD (pDevice, PciCfg.ClockCtrl) & 0x1f; switch (ClockCtrl) { case 0: SpeedStr = "33MHz"; break; case 2: SpeedStr = "50MHz"; break; case 4: SpeedStr = "66MHz"; break; case 6: SpeedStr = "100MHz"; break; case 7: SpeedStr = "133MHz"; break; } } } strcat (pDevice->BusSpeedStr, SpeedStr); } /******************************************************************************/ /* Description: */ /* This routine initializes default parameters and reads the PCI */ /* configurations. */ /* */ /* Return: */ /* LM_STATUS_SUCCESS */ /******************************************************************************/ LM_STATUS LM_GetAdapterInfo (PLM_DEVICE_BLOCK pDevice) { PLM_ADAPTER_INFO pAdapterInfo; LM_UINT32 Value32; LM_STATUS Status; LM_UINT32 j; LM_UINT32 EeSigFound; LM_UINT32 EePhyTypeSerdes = 0; LM_UINT32 EePhyLedMode = 0; LM_UINT32 EePhyId = 0; /* Get Device Id and Vendor Id */ Status = MM_ReadConfig32 (pDevice, PCI_VENDOR_ID_REG, &Value32); if (Status != LM_STATUS_SUCCESS) { return Status; } pDevice->PciVendorId = (LM_UINT16) Value32; pDevice->PciDeviceId = (LM_UINT16) (Value32 >> 16); /* If we are not getting the write adapter, exit. */ if ((Value32 != T3_PCI_ID_BCM5700) && (Value32 != T3_PCI_ID_BCM5701) && (Value32 != T3_PCI_ID_BCM5702) && (Value32 != T3_PCI_ID_BCM5702x) && (Value32 != T3_PCI_ID_BCM5702FE) && (Value32 != T3_PCI_ID_BCM5703) && (Value32 != T3_PCI_ID_BCM5703x) && (Value32 != T3_PCI_ID_BCM5704)) { return LM_STATUS_FAILURE; } Status = MM_ReadConfig32 (pDevice, PCI_REV_ID_REG, &Value32); if (Status != LM_STATUS_SUCCESS) { return Status; } pDevice->PciRevId = (LM_UINT8) Value32; /* Get IRQ. */ Status = MM_ReadConfig32 (pDevice, PCI_INT_LINE_REG, &Value32); if (Status != LM_STATUS_SUCCESS) { return Status; } pDevice->Irq = (LM_UINT8) Value32; /* Get interrupt pin. */ pDevice->IntPin = (LM_UINT8) (Value32 >> 8); /* Get chip revision id. */ Status = MM_ReadConfig32 (pDevice, T3_PCI_MISC_HOST_CTRL_REG, &Value32); pDevice->ChipRevId = Value32 >> 16; /* Get subsystem vendor. */ Status = MM_ReadConfig32 (pDevice, PCI_SUBSYSTEM_VENDOR_ID_REG, &Value32); if (Status != LM_STATUS_SUCCESS) { return Status; } pDevice->SubsystemVendorId = (LM_UINT16) Value32; /* Get PCI subsystem id. */ pDevice->SubsystemId = (LM_UINT16) (Value32 >> 16); /* Get the cache line size. */ MM_ReadConfig32 (pDevice, PCI_CACHE_LINE_SIZE_REG, &Value32); pDevice->CacheLineSize = (LM_UINT8) Value32; pDevice->SavedCacheLineReg = Value32; if (pDevice->ChipRevId != T3_CHIP_ID_5703_A1 && pDevice->ChipRevId != T3_CHIP_ID_5703_A2 && pDevice->ChipRevId != T3_CHIP_ID_5704_A0) { pDevice->UndiFix = FALSE; } #if !PCIX_TARGET_WORKAROUND pDevice->UndiFix = FALSE; #endif /* Map the memory base to system address space. */ if (!pDevice->UndiFix) { Status = MM_MapMemBase (pDevice); if (Status != LM_STATUS_SUCCESS) { return Status; } /* Initialize the memory view pointer. */ pDevice->pMemView = (PT3_STD_MEM_MAP) pDevice->pMappedMemBase; } #if PCIX_TARGET_WORKAROUND /* store whether we are in PCI are PCI-X mode */ pDevice->EnablePciXFix = FALSE; MM_ReadConfig32 (pDevice, T3_PCI_STATE_REG, &Value32); if ((Value32 & T3_PCI_STATE_CONVENTIONAL_PCI_MODE) == 0) { /* Enable PCI-X workaround only if we are running on 5700 BX. */ if (T3_CHIP_REV (pDevice->ChipRevId) == T3_CHIP_REV_5700_BX) { pDevice->EnablePciXFix = TRUE; } } if (pDevice->UndiFix) { pDevice->EnablePciXFix = TRUE; } #endif /* Bx bug: due to the "byte_enable bug" in PCI-X mode, the power */ /* management register may be clobbered which may cause the */ /* BCM5700 to go into D3 state. While in this state, we will */ /* not have memory mapped register access. As a workaround, we */ /* need to restore the device to D0 state. */ MM_ReadConfig32 (pDevice, T3_PCI_PM_STATUS_CTRL_REG, &Value32); Value32 |= T3_PM_PME_ASSERTED; Value32 &= ~T3_PM_POWER_STATE_MASK; Value32 |= T3_PM_POWER_STATE_D0; MM_WriteConfig32 (pDevice, T3_PCI_PM_STATUS_CTRL_REG, Value32); /* read the current PCI command word */ MM_ReadConfig32 (pDevice, PCI_COMMAND_REG, &Value32); /* Make sure bus-mastering is enabled. */ Value32 |= PCI_BUSMASTER_ENABLE; #if PCIX_TARGET_WORKAROUND /* if we are in PCI-X mode, also make sure mem-mapping and SERR#/PERR# are enabled */ if (pDevice->EnablePciXFix == TRUE) { Value32 |= (PCI_MEM_SPACE_ENABLE | PCI_SYSTEM_ERROR_ENABLE | PCI_PARITY_ERROR_ENABLE); } if (pDevice->UndiFix) { Value32 &= ~PCI_MEM_SPACE_ENABLE; } #endif if (pDevice->EnableMWI) { Value32 |= PCI_MEMORY_WRITE_INVALIDATE; } else { Value32 &= (~PCI_MEMORY_WRITE_INVALIDATE); } /* Error out if mem-mapping is NOT enabled for PCI systems */ if (!(Value32 | PCI_MEM_SPACE_ENABLE)) { return LM_STATUS_FAILURE; } /* save the value we are going to write into the PCI command word */ pDevice->PciCommandStatusWords = Value32; Status = MM_WriteConfig32 (pDevice, PCI_COMMAND_REG, Value32); if (Status != LM_STATUS_SUCCESS) { return Status; } /* Set power state to D0. */ LM_SetPowerState (pDevice, LM_POWER_STATE_D0); #ifdef BIG_ENDIAN_PCI pDevice->MiscHostCtrl = MISC_HOST_CTRL_MASK_PCI_INT | MISC_HOST_CTRL_ENABLE_INDIRECT_ACCESS | MISC_HOST_CTRL_ENABLE_ENDIAN_WORD_SWAP | MISC_HOST_CTRL_ENABLE_PCI_STATE_REG_RW; #else /* No CPU Swap modes for PCI IO */ /* Setup the mode registers. */ pDevice->MiscHostCtrl = MISC_HOST_CTRL_MASK_PCI_INT | MISC_HOST_CTRL_ENABLE_ENDIAN_WORD_SWAP | #ifdef BIG_ENDIAN_HOST MISC_HOST_CTRL_ENABLE_ENDIAN_BYTE_SWAP | #endif /* BIG_ENDIAN_HOST */ MISC_HOST_CTRL_ENABLE_INDIRECT_ACCESS | MISC_HOST_CTRL_ENABLE_PCI_STATE_REG_RW; #endif /* !BIG_ENDIAN_PCI */ /* write to PCI misc host ctr first in order to enable indirect accesses */ MM_WriteConfig32 (pDevice, T3_PCI_MISC_HOST_CTRL_REG, pDevice->MiscHostCtrl); REG_WR (pDevice, PciCfg.MiscHostCtrl, pDevice->MiscHostCtrl); #ifdef BIG_ENDIAN_PCI Value32 = GRC_MODE_WORD_SWAP_DATA | GRC_MODE_WORD_SWAP_NON_FRAME_DATA; #else /* No CPU Swap modes for PCI IO */ #ifdef BIG_ENDIAN_HOST Value32 = GRC_MODE_BYTE_SWAP_NON_FRAME_DATA | GRC_MODE_WORD_SWAP_NON_FRAME_DATA; #else Value32 = GRC_MODE_BYTE_SWAP_NON_FRAME_DATA | GRC_MODE_BYTE_SWAP_DATA; #endif #endif /* !BIG_ENDIAN_PCI */ REG_WR (pDevice, Grc.Mode, Value32); if (T3_ASIC_REV (pDevice->ChipRevId) == T3_ASIC_REV_5700) { REG_WR (pDevice, Grc.LocalCtrl, GRC_MISC_LOCAL_CTRL_GPIO_OUTPUT1 | GRC_MISC_LOCAL_CTRL_GPIO_OE1); } MM_Wait (40); /* Enable indirect memory access */ REG_WR (pDevice, MemArbiter.Mode, T3_MEM_ARBITER_MODE_ENABLE); if (REG_RD (pDevice, PciCfg.ClockCtrl) & T3_PCI_44MHZ_CORE_CLOCK) { REG_WR (pDevice, PciCfg.ClockCtrl, T3_PCI_44MHZ_CORE_CLOCK | T3_PCI_SELECT_ALTERNATE_CLOCK); REG_WR (pDevice, PciCfg.ClockCtrl, T3_PCI_SELECT_ALTERNATE_CLOCK); MM_Wait (40); /* required delay is 27usec */ } REG_WR (pDevice, PciCfg.ClockCtrl, 0); REG_WR (pDevice, PciCfg.MemWindowBaseAddr, 0); #if PCIX_TARGET_WORKAROUND MM_ReadConfig32 (pDevice, T3_PCI_STATE_REG, &Value32); if ((pDevice->EnablePciXFix == FALSE) && ((Value32 & T3_PCI_STATE_CONVENTIONAL_PCI_MODE) == 0)) { if (pDevice->ChipRevId == T3_CHIP_ID_5701_A0 || pDevice->ChipRevId == T3_CHIP_ID_5701_B0 || pDevice->ChipRevId == T3_CHIP_ID_5701_B2 || pDevice->ChipRevId == T3_CHIP_ID_5701_B5) { __raw_writel (0, &(pDevice->pMemView->uIntMem. MemBlock32K[0x300])); __raw_writel (0, &(pDevice->pMemView->uIntMem. MemBlock32K[0x301])); __raw_writel (0xffffffff, &(pDevice->pMemView->uIntMem. MemBlock32K[0x301])); if (__raw_readl (&(pDevice->pMemView->uIntMem.MemBlock32K[0x300]))) { pDevice->EnablePciXFix = TRUE; } } } #endif #if 1 /* * This code was at the beginning of else block below, but that's * a bug if node address in shared memory. */ MM_Wait (50); LM_NvramInit (pDevice); #endif /* Get the node address. First try to get in from the shared memory. */ /* If the signature is not present, then get it from the NVRAM. */ Value32 = MEM_RD_OFFSET (pDevice, T3_MAC_ADDR_HIGH_MAILBOX); if ((Value32 >> 16) == 0x484b) { pDevice->NodeAddress[0] = (LM_UINT8) (Value32 >> 8); pDevice->NodeAddress[1] = (LM_UINT8) Value32; Value32 = MEM_RD_OFFSET (pDevice, T3_MAC_ADDR_LOW_MAILBOX); pDevice->NodeAddress[2] = (LM_UINT8) (Value32 >> 24); pDevice->NodeAddress[3] = (LM_UINT8) (Value32 >> 16); pDevice->NodeAddress[4] = (LM_UINT8) (Value32 >> 8); pDevice->NodeAddress[5] = (LM_UINT8) Value32; Status = LM_STATUS_SUCCESS; } else { Status = LM_NvramRead (pDevice, 0x7c, &Value32); if (Status == LM_STATUS_SUCCESS) { pDevice->NodeAddress[0] = (LM_UINT8) (Value32 >> 16); pDevice->NodeAddress[1] = (LM_UINT8) (Value32 >> 24); Status = LM_NvramRead (pDevice, 0x80, &Value32); pDevice->NodeAddress[2] = (LM_UINT8) Value32; pDevice->NodeAddress[3] = (LM_UINT8) (Value32 >> 8); pDevice->NodeAddress[4] = (LM_UINT8) (Value32 >> 16); pDevice->NodeAddress[5] = (LM_UINT8) (Value32 >> 24); } } /* Assign a default address. */ if (Status != LM_STATUS_SUCCESS) { #ifndef EMBEDDED printk (KERN_ERR "Cannot get MAC addr from NVRAM. Using default.\n"); #endif pDevice->NodeAddress[0] = 0x00; pDevice->NodeAddress[1] = 0x10; pDevice->NodeAddress[2] = 0x18; pDevice->NodeAddress[3] = 0x68; pDevice->NodeAddress[4] = 0x61; pDevice->NodeAddress[5] = 0x76; } pDevice->PermanentNodeAddress[0] = pDevice->NodeAddress[0]; pDevice->PermanentNodeAddress[1] = pDevice->NodeAddress[1]; pDevice->PermanentNodeAddress[2] = pDevice->NodeAddress[2]; pDevice->PermanentNodeAddress[3] = pDevice->NodeAddress[3]; pDevice->PermanentNodeAddress[4] = pDevice->NodeAddress[4]; pDevice->PermanentNodeAddress[5] = pDevice->NodeAddress[5]; /* Initialize the default values. */ pDevice->NoTxPseudoHdrChksum = FALSE; pDevice->NoRxPseudoHdrChksum = FALSE; pDevice->NicSendBd = FALSE; pDevice->TxPacketDescCnt = DEFAULT_TX_PACKET_DESC_COUNT; pDevice->RxStdDescCnt = DEFAULT_STD_RCV_DESC_COUNT; pDevice->RxCoalescingTicks = DEFAULT_RX_COALESCING_TICKS; pDevice->TxCoalescingTicks = DEFAULT_TX_COALESCING_TICKS; pDevice->RxMaxCoalescedFrames = DEFAULT_RX_MAX_COALESCED_FRAMES; pDevice->TxMaxCoalescedFrames = DEFAULT_TX_MAX_COALESCED_FRAMES; pDevice->RxCoalescingTicksDuringInt = BAD_DEFAULT_VALUE; pDevice->TxCoalescingTicksDuringInt = BAD_DEFAULT_VALUE; pDevice->RxMaxCoalescedFramesDuringInt = BAD_DEFAULT_VALUE; pDevice->TxMaxCoalescedFramesDuringInt = BAD_DEFAULT_VALUE; pDevice->StatsCoalescingTicks = DEFAULT_STATS_COALESCING_TICKS; pDevice->EnableMWI = FALSE; pDevice->TxMtu = MAX_ETHERNET_PACKET_SIZE_NO_CRC; pDevice->RxMtu = MAX_ETHERNET_PACKET_SIZE_NO_CRC; pDevice->DisableAutoNeg = FALSE; pDevice->PhyIntMode = T3_PHY_INT_MODE_AUTO; pDevice->LinkChngMode = T3_LINK_CHNG_MODE_AUTO; pDevice->LedMode = LED_MODE_AUTO; pDevice->ResetPhyOnInit = TRUE; pDevice->DelayPciGrant = TRUE; pDevice->UseTaggedStatus = FALSE; pDevice->OneDmaAtOnce = BAD_DEFAULT_VALUE; pDevice->DmaMbufLowMark = T3_DEF_DMA_MBUF_LOW_WMARK_JUMBO; pDevice->RxMacMbufLowMark = T3_DEF_RX_MAC_MBUF_LOW_WMARK_JUMBO; pDevice->MbufHighMark = T3_DEF_MBUF_HIGH_WMARK_JUMBO; pDevice->RequestedMediaType = LM_REQUESTED_MEDIA_TYPE_AUTO; pDevice->TaskOffloadCap = LM_TASK_OFFLOAD_NONE; pDevice->FlowControlCap = LM_FLOW_CONTROL_AUTO_PAUSE; pDevice->EnableTbi = FALSE; #if INCLUDE_TBI_SUPPORT pDevice->PollTbiLink = BAD_DEFAULT_VALUE; #endif switch (T3_ASIC_REV (pDevice->ChipRevId)) { case T3_ASIC_REV_5704: pDevice->MbufBase = T3_NIC_MBUF_POOL_ADDR; pDevice->MbufSize = T3_NIC_MBUF_POOL_SIZE64; break; default: pDevice->MbufBase = T3_NIC_MBUF_POOL_ADDR; pDevice->MbufSize = T3_NIC_MBUF_POOL_SIZE96; break; } pDevice->LinkStatus = LM_STATUS_LINK_DOWN; pDevice->QueueRxPackets = TRUE; pDevice->EnableWireSpeed = TRUE; #if T3_JUMBO_RCV_RCB_ENTRY_COUNT pDevice->RxJumboDescCnt = DEFAULT_JUMBO_RCV_DESC_COUNT; #endif /* T3_JUMBO_RCV_RCB_ENTRY_COUNT */ /* Make this is a known adapter. */ pAdapterInfo = LM_GetAdapterInfoBySsid (pDevice->SubsystemVendorId, pDevice->SubsystemId); pDevice->BondId = REG_RD (pDevice, Grc.MiscCfg) & GRC_MISC_BD_ID_MASK; if (pDevice->BondId != GRC_MISC_BD_ID_5700 && pDevice->BondId != GRC_MISC_BD_ID_5701 && pDevice->BondId != GRC_MISC_BD_ID_5702FE && pDevice->BondId != GRC_MISC_BD_ID_5703 && pDevice->BondId != GRC_MISC_BD_ID_5703S && pDevice->BondId != GRC_MISC_BD_ID_5704 && pDevice->BondId != GRC_MISC_BD_ID_5704CIOBE) { return LM_STATUS_UNKNOWN_ADAPTER; } pDevice->SplitModeEnable = SPLIT_MODE_DISABLE; if ((pDevice->ChipRevId == T3_CHIP_ID_5704_A0) && (pDevice->BondId == GRC_MISC_BD_ID_5704CIOBE)) { pDevice->SplitModeEnable = SPLIT_MODE_ENABLE; pDevice->SplitModeMaxReq = SPLIT_MODE_5704_MAX_REQ; } /* Get Eeprom info. */ Value32 = MEM_RD_OFFSET (pDevice, T3_NIC_DATA_SIG_ADDR); if (Value32 == T3_NIC_DATA_SIG) { EeSigFound = TRUE; Value32 = MEM_RD_OFFSET (pDevice, T3_NIC_DATA_NIC_CFG_ADDR); /* Determine PHY type. */ switch (Value32 & T3_NIC_CFG_PHY_TYPE_MASK) { case T3_NIC_CFG_PHY_TYPE_COPPER: EePhyTypeSerdes = FALSE; break; case T3_NIC_CFG_PHY_TYPE_FIBER: EePhyTypeSerdes = TRUE; break; default: EePhyTypeSerdes = FALSE; break; } /* Determine PHY led mode. */ if (T3_ASIC_REV (pDevice->ChipRevId) == T3_ASIC_REV_5700 || T3_ASIC_REV (pDevice->ChipRevId) == T3_ASIC_REV_5701) { switch (Value32 & T3_NIC_CFG_LED_MODE_MASK) { case T3_NIC_CFG_LED_MODE_TRIPLE_SPEED: EePhyLedMode = LED_MODE_THREE_LINK; break; case T3_NIC_CFG_LED_MODE_LINK_SPEED: EePhyLedMode = LED_MODE_LINK10; break; default: EePhyLedMode = LED_MODE_AUTO; break; } } else { switch (Value32 & T3_NIC_CFG_LED_MODE_MASK) { case T3_NIC_CFG_LED_MODE_OPEN_DRAIN: EePhyLedMode = LED_MODE_OPEN_DRAIN; break; case T3_NIC_CFG_LED_MODE_OUTPUT: EePhyLedMode = LED_MODE_OUTPUT; break; default: EePhyLedMode = LED_MODE_AUTO; break; } } if (pDevice->ChipRevId == T3_CHIP_ID_5703_A1 || pDevice->ChipRevId == T3_CHIP_ID_5703_A2) { /* Enable EEPROM write protection. */ if (Value32 & T3_NIC_EEPROM_WP) { pDevice->EepromWp = TRUE; } } /* Get the PHY Id. */ Value32 = MEM_RD_OFFSET (pDevice, T3_NIC_DATA_PHY_ID_ADDR); if (Value32) { EePhyId = (((Value32 & T3_NIC_PHY_ID1_MASK) >> 16) & PHY_ID1_OUI_MASK) << 10; Value32 = Value32 & T3_NIC_PHY_ID2_MASK; EePhyId |= ((Value32 & PHY_ID2_OUI_MASK) << 16) | (Value32 & PHY_ID2_MODEL_MASK) | (Value32 & PHY_ID2_REV_MASK); } else { EePhyId = 0; } } else { EeSigFound = FALSE; } /* Set the PHY address. */ pDevice->PhyAddr = PHY_DEVICE_ID; /* Disable auto polling. */ pDevice->MiMode = 0xc0000; REG_WR (pDevice, MacCtrl.MiMode, pDevice->MiMode); MM_Wait (40); /* Get the PHY id. */ LM_ReadPhy (pDevice, PHY_ID1_REG, &Value32); pDevice->PhyId = (Value32 & PHY_ID1_OUI_MASK) << 10; LM_ReadPhy (pDevice, PHY_ID2_REG, &Value32); pDevice->PhyId |= ((Value32 & PHY_ID2_OUI_MASK) << 16) | (Value32 & PHY_ID2_MODEL_MASK) | (Value32 & PHY_ID2_REV_MASK); /* Set the EnableTbi flag to false if we have a copper PHY. */ switch (pDevice->PhyId & PHY_ID_MASK) { case PHY_BCM5400_PHY_ID: pDevice->EnableTbi = FALSE; break; case PHY_BCM5401_PHY_ID: pDevice->EnableTbi = FALSE; break; case PHY_BCM5411_PHY_ID: pDevice->EnableTbi = FALSE; break; case PHY_BCM5701_PHY_ID: pDevice->EnableTbi = FALSE; break; case PHY_BCM5703_PHY_ID: pDevice->EnableTbi = FALSE; break; case PHY_BCM5704_PHY_ID: pDevice->EnableTbi = FALSE; break; case PHY_BCM8002_PHY_ID: pDevice->EnableTbi = TRUE; break; default: if (pAdapterInfo) { pDevice->PhyId = pAdapterInfo->PhyId; pDevice->EnableTbi = pAdapterInfo->Serdes; } else if (EeSigFound) { pDevice->PhyId = EePhyId; pDevice->EnableTbi = EePhyTypeSerdes; } break; } /* Bail out if we don't know the copper PHY id. */ if (UNKNOWN_PHY_ID (pDevice->PhyId) && !pDevice->EnableTbi) { return LM_STATUS_FAILURE; } if (T3_ASIC_REV (pDevice->ChipRevId) == T3_ASIC_REV_5703) { if ((pDevice->SavedCacheLineReg & 0xff00) < 0x4000) { pDevice->SavedCacheLineReg &= 0xffff00ff; pDevice->SavedCacheLineReg |= 0x4000; } } /* Change driver parameters. */ Status = MM_GetConfig (pDevice); if (Status != LM_STATUS_SUCCESS) { return Status; } #if INCLUDE_5701_AX_FIX if (pDevice->ChipRevId == T3_CHIP_ID_5701_A0 || pDevice->ChipRevId == T3_CHIP_ID_5701_B0) { pDevice->ResetPhyOnInit = TRUE; } #endif /* Save the current phy link status. */ if (!pDevice->EnableTbi) { LM_ReadPhy (pDevice, PHY_STATUS_REG, &Value32); LM_ReadPhy (pDevice, PHY_STATUS_REG, &Value32); /* If we don't have link reset the PHY. */ if (!(Value32 & PHY_STATUS_LINK_PASS) || pDevice->ResetPhyOnInit) { LM_WritePhy (pDevice, PHY_CTRL_REG, PHY_CTRL_PHY_RESET); for (j = 0; j < 100; j++) { MM_Wait (10); LM_ReadPhy (pDevice, PHY_CTRL_REG, &Value32); if (Value32 && !(Value32 & PHY_CTRL_PHY_RESET)) { MM_Wait (40); break; } } #if INCLUDE_5701_AX_FIX /* 5701_AX_BX bug: only advertises 10mb speed. */ if (pDevice->ChipRevId == T3_CHIP_ID_5701_A0 || pDevice->ChipRevId == T3_CHIP_ID_5701_B0) { Value32 = PHY_AN_AD_PROTOCOL_802_3_CSMA_CD | PHY_AN_AD_10BASET_HALF | PHY_AN_AD_10BASET_FULL | PHY_AN_AD_100BASETX_FULL | PHY_AN_AD_100BASETX_HALF; Value32 |= GetPhyAdFlowCntrlSettings (pDevice); LM_WritePhy (pDevice, PHY_AN_AD_REG, Value32); pDevice->advertising = Value32; Value32 = BCM540X_AN_AD_1000BASET_HALF | BCM540X_AN_AD_1000BASET_FULL | BCM540X_CONFIG_AS_MASTER | BCM540X_ENABLE_CONFIG_AS_MASTER; LM_WritePhy (pDevice, BCM540X_1000BASET_CTRL_REG, Value32); pDevice->advertising1000 = Value32; LM_WritePhy (pDevice, PHY_CTRL_REG, PHY_CTRL_AUTO_NEG_ENABLE | PHY_CTRL_RESTART_AUTO_NEG); } #endif if (T3_ASIC_REV (pDevice->ChipRevId) == T3_ASIC_REV_5703) { LM_WritePhy (pDevice, 0x18, 0x0c00); LM_WritePhy (pDevice, 0x17, 0x201f); LM_WritePhy (pDevice, 0x15, 0x2aaa); } if (pDevice->ChipRevId == T3_CHIP_ID_5704_A0) { LM_WritePhy (pDevice, 0x1c, 0x8d68); LM_WritePhy (pDevice, 0x1c, 0x8d68); } /* Enable Ethernet@WireSpeed. */ if (pDevice->EnableWireSpeed) { LM_WritePhy (pDevice, 0x18, 0x7007); LM_ReadPhy (pDevice, 0x18, &Value32); LM_WritePhy (pDevice, 0x18, Value32 | BIT_15 | BIT_4); } } } /* Turn off tap power management. */ if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5401_PHY_ID) { LM_WritePhy (pDevice, BCM5401_AUX_CTRL, 0x0c20); LM_WritePhy (pDevice, BCM540X_DSP_ADDRESS_REG, 0x0012); LM_WritePhy (pDevice, BCM540X_DSP_RW_PORT, 0x1804); LM_WritePhy (pDevice, BCM540X_DSP_ADDRESS_REG, 0x0013); LM_WritePhy (pDevice, BCM540X_DSP_RW_PORT, 0x1204); LM_WritePhy (pDevice, BCM540X_DSP_ADDRESS_REG, 0x8006); LM_WritePhy (pDevice, BCM540X_DSP_RW_PORT, 0x0132); LM_WritePhy (pDevice, BCM540X_DSP_ADDRESS_REG, 0x8006); LM_WritePhy (pDevice, BCM540X_DSP_RW_PORT, 0x0232); LM_WritePhy (pDevice, BCM540X_DSP_ADDRESS_REG, 0x201f); LM_WritePhy (pDevice, BCM540X_DSP_RW_PORT, 0x0a20); MM_Wait (40); } #if INCLUDE_TBI_SUPPORT pDevice->IgnoreTbiLinkChange = FALSE; if (pDevice->EnableTbi) { pDevice->WakeUpModeCap = LM_WAKE_UP_MODE_NONE; pDevice->PhyIntMode = T3_PHY_INT_MODE_LINK_READY; if ((pDevice->PollTbiLink == BAD_DEFAULT_VALUE) || pDevice->DisableAutoNeg) { pDevice->PollTbiLink = FALSE; } } else { pDevice->PollTbiLink = FALSE; } #endif /* INCLUDE_TBI_SUPPORT */ /* UseTaggedStatus is only valid for 5701 and later. */ if (T3_ASIC_REV (pDevice->ChipRevId) == T3_ASIC_REV_5700) { pDevice->UseTaggedStatus = FALSE; pDevice->CoalesceMode = 0; } else { pDevice->CoalesceMode = HOST_COALESCE_CLEAR_TICKS_ON_RX_BD_EVENT | HOST_COALESCE_CLEAR_TICKS_ON_TX_BD_EVENT; } /* Set the status block size. */ if (T3_CHIP_REV (pDevice->ChipRevId) != T3_CHIP_REV_5700_AX && T3_CHIP_REV (pDevice->ChipRevId) != T3_CHIP_REV_5700_BX) { pDevice->CoalesceMode |= HOST_COALESCE_32_BYTE_STATUS_MODE; } /* Check the DURING_INT coalescing ticks parameters. */ if (pDevice->UseTaggedStatus) { if (pDevice->RxCoalescingTicksDuringInt == BAD_DEFAULT_VALUE) { pDevice->RxCoalescingTicksDuringInt = DEFAULT_RX_COALESCING_TICKS_DURING_INT; } if (pDevice->TxCoalescingTicksDuringInt == BAD_DEFAULT_VALUE) { pDevice->TxCoalescingTicksDuringInt = DEFAULT_TX_COALESCING_TICKS_DURING_INT; } if (pDevice->RxMaxCoalescedFramesDuringInt == BAD_DEFAULT_VALUE) { pDevice->RxMaxCoalescedFramesDuringInt = DEFAULT_RX_MAX_COALESCED_FRAMES_DURING_INT; } if (pDevice->TxMaxCoalescedFramesDuringInt == BAD_DEFAULT_VALUE) { pDevice->TxMaxCoalescedFramesDuringInt = DEFAULT_TX_MAX_COALESCED_FRAMES_DURING_INT; } } else { if (pDevice->RxCoalescingTicksDuringInt == BAD_DEFAULT_VALUE) { pDevice->RxCoalescingTicksDuringInt = 0; } if (pDevice->TxCoalescingTicksDuringInt == BAD_DEFAULT_VALUE) { pDevice->TxCoalescingTicksDuringInt = 0; } if (pDevice->RxMaxCoalescedFramesDuringInt == BAD_DEFAULT_VALUE) { pDevice->RxMaxCoalescedFramesDuringInt = 0; } if (pDevice->TxMaxCoalescedFramesDuringInt == BAD_DEFAULT_VALUE) { pDevice->TxMaxCoalescedFramesDuringInt = 0; } } #if T3_JUMBO_RCV_RCB_ENTRY_COUNT if (pDevice->RxMtu <= (MAX_STD_RCV_BUFFER_SIZE - 8 /* CRC */ )) { pDevice->RxJumboDescCnt = 0; if (pDevice->RxMtu <= MAX_ETHERNET_PACKET_SIZE_NO_CRC) { pDevice->RxMtu = MAX_ETHERNET_PACKET_SIZE_NO_CRC; } } else { pDevice->RxJumboBufferSize = (pDevice->RxMtu + 8 /* CRC + VLAN */ + COMMON_CACHE_LINE_SIZE - 1) & ~COMMON_CACHE_LINE_MASK; if (pDevice->RxJumboBufferSize > MAX_JUMBO_RCV_BUFFER_SIZE) { pDevice->RxJumboBufferSize = DEFAULT_JUMBO_RCV_BUFFER_SIZE; pDevice->RxMtu = pDevice->RxJumboBufferSize - 8 /* CRC + VLAN */ ; } pDevice->TxMtu = pDevice->RxMtu; } #else pDevice->RxMtu = MAX_ETHERNET_PACKET_SIZE_NO_CRC; #endif /* T3_JUMBO_RCV_RCB_ENTRY_COUNT */ pDevice->RxPacketDescCnt = #if T3_JUMBO_RCV_RCB_ENTRY_COUNT pDevice->RxJumboDescCnt + #endif /* T3_JUMBO_RCV_RCB_ENTRY_COUNT */ pDevice->RxStdDescCnt; if (pDevice->TxMtu < MAX_ETHERNET_PACKET_SIZE_NO_CRC) { pDevice->TxMtu = MAX_ETHERNET_PACKET_SIZE_NO_CRC; } if (pDevice->TxMtu > MAX_JUMBO_TX_BUFFER_SIZE) { pDevice->TxMtu = MAX_JUMBO_TX_BUFFER_SIZE; } /* Configure the proper ways to get link change interrupt. */ if (pDevice->PhyIntMode == T3_PHY_INT_MODE_AUTO) { if (T3_ASIC_REV (pDevice->ChipRevId) == T3_ASIC_REV_5700) { pDevice->PhyIntMode = T3_PHY_INT_MODE_MI_INTERRUPT; } else { pDevice->PhyIntMode = T3_PHY_INT_MODE_LINK_READY; } } else if (pDevice->PhyIntMode == T3_PHY_INT_MODE_AUTO_POLLING) { /* Auto-polling does not work on 5700_AX and 5700_BX. */ if (T3_ASIC_REV (pDevice->ChipRevId) == T3_ASIC_REV_5700) { pDevice->PhyIntMode = T3_PHY_INT_MODE_MI_INTERRUPT; } } /* Determine the method to get link change status. */ if (pDevice->LinkChngMode == T3_LINK_CHNG_MODE_AUTO) { /* The link status bit in the status block does not work on 5700_AX */ /* and 5700_BX chips. */ if (T3_ASIC_REV (pDevice->ChipRevId) == T3_ASIC_REV_5700) { pDevice->LinkChngMode = T3_LINK_CHNG_MODE_USE_STATUS_REG; } else { pDevice->LinkChngMode = T3_LINK_CHNG_MODE_USE_STATUS_BLOCK; } } if (pDevice->PhyIntMode == T3_PHY_INT_MODE_MI_INTERRUPT || T3_ASIC_REV (pDevice->ChipRevId) == T3_ASIC_REV_5700) { pDevice->LinkChngMode = T3_LINK_CHNG_MODE_USE_STATUS_REG; } /* Configure PHY led mode. */ if (pDevice->LedMode == LED_MODE_AUTO) { if (T3_ASIC_REV (pDevice->ChipRevId) == T3_ASIC_REV_5700 || T3_ASIC_REV (pDevice->ChipRevId) == T3_ASIC_REV_5701) { if (pDevice->SubsystemVendorId == T3_SVID_DELL) { pDevice->LedMode = LED_MODE_LINK10; } else { pDevice->LedMode = LED_MODE_THREE_LINK; if (EeSigFound && EePhyLedMode != LED_MODE_AUTO) { pDevice->LedMode = EePhyLedMode; } } /* bug? 5701 in LINK10 mode does not seem to work when */ /* PhyIntMode is LINK_READY. */ if (T3_ASIC_REV (pDevice->ChipRevId) != T3_ASIC_REV_5700 && #if INCLUDE_TBI_SUPPORT pDevice->EnableTbi == FALSE && #endif pDevice->LedMode == LED_MODE_LINK10) { pDevice->PhyIntMode = T3_PHY_INT_MODE_MI_INTERRUPT; pDevice->LinkChngMode = T3_LINK_CHNG_MODE_USE_STATUS_REG; } if (pDevice->EnableTbi) { pDevice->LedMode = LED_MODE_THREE_LINK; } } else { if (EeSigFound && EePhyLedMode != LED_MODE_AUTO) { pDevice->LedMode = EePhyLedMode; } else { pDevice->LedMode = LED_MODE_OPEN_DRAIN; } } } /* Enable OneDmaAtOnce. */ if (pDevice->OneDmaAtOnce == BAD_DEFAULT_VALUE) { pDevice->OneDmaAtOnce = FALSE; } if (T3_ASIC_REV (pDevice->ChipRevId) == T3_ASIC_REV_5700 || pDevice->ChipRevId == T3_CHIP_ID_5701_A0 || pDevice->ChipRevId == T3_CHIP_ID_5701_B0 || pDevice->ChipRevId == T3_CHIP_ID_5701_B2) { pDevice->WolSpeed = WOL_SPEED_10MB; } else { pDevice->WolSpeed = WOL_SPEED_100MB; } /* Offloadings. */ pDevice->TaskToOffload = LM_TASK_OFFLOAD_NONE; /* Turn off task offloading on Ax. */ if (pDevice->ChipRevId == T3_CHIP_ID_5700_B0) { pDevice->TaskOffloadCap &= ~(LM_TASK_OFFLOAD_TX_TCP_CHECKSUM | LM_TASK_OFFLOAD_TX_UDP_CHECKSUM); } pDevice->PciState = REG_RD (pDevice, PciCfg.PciState); LM_ReadVPD (pDevice); LM_ReadBootCodeVersion (pDevice); LM_GetBusSpeed (pDevice); return LM_STATUS_SUCCESS; } /* LM_GetAdapterInfo */ STATIC PLM_ADAPTER_INFO LM_GetAdapterInfoBySsid (LM_UINT16 Svid, LM_UINT16 Ssid) { static LM_ADAPTER_INFO AdapterArr[] = { {T3_SVID_BROADCOM, T3_SSID_BROADCOM_BCM95700A6, PHY_BCM5401_PHY_ID, 0}, {T3_SVID_BROADCOM, T3_SSID_BROADCOM_BCM95701A5, PHY_BCM5701_PHY_ID, 0}, {T3_SVID_BROADCOM, T3_SSID_BROADCOM_BCM95700T6, PHY_BCM8002_PHY_ID, 1}, {T3_SVID_BROADCOM, T3_SSID_BROADCOM_BCM95700A9, 0, 1}, {T3_SVID_BROADCOM, T3_SSID_BROADCOM_BCM95701T1, PHY_BCM5701_PHY_ID, 0}, {T3_SVID_BROADCOM, T3_SSID_BROADCOM_BCM95701T8, PHY_BCM5701_PHY_ID, 0}, {T3_SVID_BROADCOM, T3_SSID_BROADCOM_BCM95701A7, 0, 1}, {T3_SVID_BROADCOM, T3_SSID_BROADCOM_BCM95701A10, PHY_BCM5701_PHY_ID, 0}, {T3_SVID_BROADCOM, T3_SSID_BROADCOM_BCM95701A12, PHY_BCM5701_PHY_ID, 0}, {T3_SVID_BROADCOM, T3_SSID_BROADCOM_BCM95703Ax1, PHY_BCM5701_PHY_ID, 0}, {T3_SVID_BROADCOM, T3_SSID_BROADCOM_BCM95703Ax2, PHY_BCM5701_PHY_ID, 0}, {T3_SVID_3COM, T3_SSID_3COM_3C996T, PHY_BCM5401_PHY_ID, 0}, {T3_SVID_3COM, T3_SSID_3COM_3C996BT, PHY_BCM5701_PHY_ID, 0}, {T3_SVID_3COM, T3_SSID_3COM_3C996SX, 0, 1}, {T3_SVID_3COM, T3_SSID_3COM_3C1000T, PHY_BCM5701_PHY_ID, 0}, {T3_SVID_3COM, T3_SSID_3COM_3C940BR01, PHY_BCM5701_PHY_ID, 0}, {T3_SVID_DELL, T3_SSID_DELL_VIPER, PHY_BCM5401_PHY_ID, 0}, {T3_SVID_DELL, T3_SSID_DELL_JAGUAR, PHY_BCM5401_PHY_ID, 0}, {T3_SVID_DELL, T3_SSID_DELL_MERLOT, PHY_BCM5411_PHY_ID, 0}, {T3_SVID_DELL, T3_SSID_DELL_SLIM_MERLOT, PHY_BCM5411_PHY_ID, 0}, {T3_SVID_COMPAQ, T3_SSID_COMPAQ_BANSHEE, PHY_BCM5701_PHY_ID, 0}, {T3_SVID_COMPAQ, T3_SSID_COMPAQ_BANSHEE_2, PHY_BCM5701_PHY_ID, 0}, {T3_SVID_COMPAQ, T3_SSID_COMPAQ_CHANGELING, 0, 1}, {T3_SVID_COMPAQ, T3_SSID_COMPAQ_NC7780, PHY_BCM5701_PHY_ID, 0}, {T3_SVID_COMPAQ, T3_SSID_COMPAQ_NC7780_2, PHY_BCM5701_PHY_ID, 0}, }; LM_UINT32 j; for (j = 0; j < sizeof (AdapterArr) / sizeof (LM_ADAPTER_INFO); j++) { if (AdapterArr[j].Svid == Svid && AdapterArr[j].Ssid == Ssid) { return &AdapterArr[j]; } } return NULL; } /******************************************************************************/ /* Description: */ /* This routine sets up receive/transmit buffer descriptions queues. */ /* */ /* Return: */ /* LM_STATUS_SUCCESS */ /******************************************************************************/ LM_STATUS LM_InitializeAdapter (PLM_DEVICE_BLOCK pDevice) { LM_PHYSICAL_ADDRESS MemPhy; PLM_UINT8 pMemVirt; PLM_PACKET pPacket; LM_STATUS Status; LM_UINT32 Size; LM_UINT32 j; /* Set power state to D0. */ LM_SetPowerState (pDevice, LM_POWER_STATE_D0); /* Intialize the queues. */ QQ_InitQueue (&pDevice->RxPacketReceivedQ.Container, MAX_RX_PACKET_DESC_COUNT); QQ_InitQueue (&pDevice->RxPacketFreeQ.Container, MAX_RX_PACKET_DESC_COUNT); QQ_InitQueue (&pDevice->TxPacketFreeQ.Container, MAX_TX_PACKET_DESC_COUNT); QQ_InitQueue (&pDevice->TxPacketActiveQ.Container, MAX_TX_PACKET_DESC_COUNT); QQ_InitQueue (&pDevice->TxPacketXmittedQ.Container, MAX_TX_PACKET_DESC_COUNT); /* Allocate shared memory for: status block, the buffers for receive */ /* rings -- standard, mini, jumbo, and return rings. */ Size = T3_STATUS_BLOCK_SIZE + sizeof (T3_STATS_BLOCK) + T3_STD_RCV_RCB_ENTRY_COUNT * sizeof (T3_RCV_BD) + #if T3_JUMBO_RCV_RCB_ENTRY_COUNT T3_JUMBO_RCV_RCB_ENTRY_COUNT * sizeof (T3_RCV_BD) + #endif /* T3_JUMBO_RCV_RCB_ENTRY_COUNT */ T3_RCV_RETURN_RCB_ENTRY_COUNT * sizeof (T3_RCV_BD); /* Memory for host based Send BD. */ if (pDevice->NicSendBd == FALSE) { Size += sizeof (T3_SND_BD) * T3_SEND_RCB_ENTRY_COUNT; } /* Allocate the memory block. */ Status = MM_AllocateSharedMemory (pDevice, Size, (PLM_VOID) & pMemVirt, &MemPhy, FALSE); if (Status != LM_STATUS_SUCCESS) { return Status; } /* Program DMA Read/Write */ if (pDevice->PciState & T3_PCI_STATE_NOT_PCI_X_BUS) { pDevice->DmaReadWriteCtrl = 0x763f000f; } else { if (T3_ASIC_REV (pDevice->ChipRevId) == T3_ASIC_REV_5704) { pDevice->DmaReadWriteCtrl = 0x761f0000; } else { pDevice->DmaReadWriteCtrl = 0x761b000f; } if (pDevice->ChipRevId == T3_CHIP_ID_5703_A1 || pDevice->ChipRevId == T3_CHIP_ID_5703_A2) { pDevice->OneDmaAtOnce = TRUE; } } if (T3_ASIC_REV (pDevice->ChipRevId) == T3_ASIC_REV_5703) { pDevice->DmaReadWriteCtrl &= 0xfffffff0; } if (pDevice->OneDmaAtOnce) { pDevice->DmaReadWriteCtrl |= DMA_CTRL_WRITE_ONE_DMA_AT_ONCE; } REG_WR (pDevice, PciCfg.DmaReadWriteCtrl, pDevice->DmaReadWriteCtrl); if (LM_DmaTest (pDevice, pMemVirt, MemPhy, 0x400) != LM_STATUS_SUCCESS) { return LM_STATUS_FAILURE; } /* Status block. */ pDevice->pStatusBlkVirt = (PT3_STATUS_BLOCK) pMemVirt; pDevice->StatusBlkPhy = MemPhy; pMemVirt += T3_STATUS_BLOCK_SIZE; LM_INC_PHYSICAL_ADDRESS (&MemPhy, T3_STATUS_BLOCK_SIZE); /* Statistics block. */ pDevice->pStatsBlkVirt = (PT3_STATS_BLOCK) pMemVirt; pDevice->StatsBlkPhy = MemPhy; pMemVirt += sizeof (T3_STATS_BLOCK); LM_INC_PHYSICAL_ADDRESS (&MemPhy, sizeof (T3_STATS_BLOCK)); /* Receive standard BD buffer. */ pDevice->pRxStdBdVirt = (PT3_RCV_BD) pMemVirt; pDevice->RxStdBdPhy = MemPhy; pMemVirt += T3_STD_RCV_RCB_ENTRY_COUNT * sizeof (T3_RCV_BD); LM_INC_PHYSICAL_ADDRESS (&MemPhy, T3_STD_RCV_RCB_ENTRY_COUNT * sizeof (T3_RCV_BD)); #if T3_JUMBO_RCV_RCB_ENTRY_COUNT /* Receive jumbo BD buffer. */ pDevice->pRxJumboBdVirt = (PT3_RCV_BD) pMemVirt; pDevice->RxJumboBdPhy = MemPhy; pMemVirt += T3_JUMBO_RCV_RCB_ENTRY_COUNT * sizeof (T3_RCV_BD); LM_INC_PHYSICAL_ADDRESS (&MemPhy, T3_JUMBO_RCV_RCB_ENTRY_COUNT * sizeof (T3_RCV_BD)); #endif /* T3_JUMBO_RCV_RCB_ENTRY_COUNT */ /* Receive return BD buffer. */ pDevice->pRcvRetBdVirt = (PT3_RCV_BD) pMemVirt; pDevice->RcvRetBdPhy = MemPhy; pMemVirt += T3_RCV_RETURN_RCB_ENTRY_COUNT * sizeof (T3_RCV_BD); LM_INC_PHYSICAL_ADDRESS (&MemPhy, T3_RCV_RETURN_RCB_ENTRY_COUNT * sizeof (T3_RCV_BD)); /* Set up Send BD. */ if (pDevice->NicSendBd == FALSE) { pDevice->pSendBdVirt = (PT3_SND_BD) pMemVirt; pDevice->SendBdPhy = MemPhy; pMemVirt += sizeof (T3_SND_BD) * T3_SEND_RCB_ENTRY_COUNT; LM_INC_PHYSICAL_ADDRESS (&MemPhy, sizeof (T3_SND_BD) * T3_SEND_RCB_ENTRY_COUNT); } else { pDevice->pSendBdVirt = (PT3_SND_BD) pDevice->pMemView->uIntMem.First32k.BufferDesc; pDevice->SendBdPhy.High = 0; pDevice->SendBdPhy.Low = T3_NIC_SND_BUFFER_DESC_ADDR; } /* Allocate memory for packet descriptors. */ Size = (pDevice->RxPacketDescCnt + pDevice->TxPacketDescCnt) * MM_PACKET_DESC_SIZE; Status = MM_AllocateMemory (pDevice, Size, (PLM_VOID *) & pPacket); if (Status != LM_STATUS_SUCCESS) { return Status; } pDevice->pPacketDescBase = (PLM_VOID) pPacket; /* Create transmit packet descriptors from the memory block and add them */ /* to the TxPacketFreeQ for each send ring. */ for (j = 0; j < pDevice->TxPacketDescCnt; j++) { /* Ring index. */ pPacket->Flags = 0; /* Queue the descriptor in the TxPacketFreeQ of the 'k' ring. */ QQ_PushTail (&pDevice->TxPacketFreeQ.Container, pPacket); /* Get the pointer to the next descriptor. MM_PACKET_DESC_SIZE */ /* is the total size of the packet descriptor including the */ /* os-specific extensions in the UM_PACKET structure. */ pPacket = (PLM_PACKET) ((PLM_UINT8) pPacket + MM_PACKET_DESC_SIZE); } /* for(j.. */ /* Create receive packet descriptors from the memory block and add them */ /* to the RxPacketFreeQ. Create the Standard packet descriptors. */ for (j = 0; j < pDevice->RxStdDescCnt; j++) { /* Receive producer ring. */ pPacket->u.Rx.RcvProdRing = T3_STD_RCV_PROD_RING; /* Receive buffer size. */ pPacket->u.Rx.RxBufferSize = MAX_STD_RCV_BUFFER_SIZE; /* Add the descriptor to RxPacketFreeQ. */ QQ_PushTail (&pDevice->RxPacketFreeQ.Container, pPacket); /* Get the pointer to the next descriptor. MM_PACKET_DESC_SIZE */ /* is the total size of the packet descriptor including the */ /* os-specific extensions in the UM_PACKET structure. */ pPacket = (PLM_PACKET) ((PLM_UINT8) pPacket + MM_PACKET_DESC_SIZE); } /* for */ #if T3_JUMBO_RCV_RCB_ENTRY_COUNT /* Create the Jumbo packet descriptors. */ for (j = 0; j < pDevice->RxJumboDescCnt; j++) { /* Receive producer ring. */ pPacket->u.Rx.RcvProdRing = T3_JUMBO_RCV_PROD_RING; /* Receive buffer size. */ pPacket->u.Rx.RxBufferSize = pDevice->RxJumboBufferSize; /* Add the descriptor to RxPacketFreeQ. */ QQ_PushTail (&pDevice->RxPacketFreeQ.Container, pPacket); /* Get the pointer to the next descriptor. MM_PACKET_DESC_SIZE */ /* is the total size of the packet descriptor including the */ /* os-specific extensions in the UM_PACKET structure. */ pPacket = (PLM_PACKET) ((PLM_UINT8) pPacket + MM_PACKET_DESC_SIZE); } /* for */ #endif /* T3_JUMBO_RCV_RCB_ENTRY_COUNT */ /* Initialize the rest of the packet descriptors. */ Status = MM_InitializeUmPackets (pDevice); if (Status != LM_STATUS_SUCCESS) { return Status; } /* if */ /* Default receive mask. */ pDevice->ReceiveMask = LM_ACCEPT_MULTICAST | LM_ACCEPT_BROADCAST | LM_ACCEPT_UNICAST; /* Make sure we are in the first 32k memory window or NicSendBd. */ REG_WR (pDevice, PciCfg.MemWindowBaseAddr, 0); /* Initialize the hardware. */ Status = LM_ResetAdapter (pDevice); if (Status != LM_STATUS_SUCCESS) { return Status; } /* We are done with initialization. */ pDevice->InitDone = TRUE; return LM_STATUS_SUCCESS; } /* LM_InitializeAdapter */ /******************************************************************************/ /* Description: */ /* This function Enables/Disables a given block. */ /* */ /* Return: */ /* LM_STATUS_SUCCESS */ /******************************************************************************/ LM_STATUS LM_CntrlBlock (PLM_DEVICE_BLOCK pDevice, LM_UINT32 mask, LM_UINT32 cntrl) { LM_UINT32 j, i, data; LM_UINT32 MaxWaitCnt; MaxWaitCnt = 2; j = 0; for (i = 0; i < 32; i++) { if (!(mask & (1 << i))) continue; switch (1 << i) { case T3_BLOCK_DMA_RD: data = REG_RD (pDevice, DmaRead.Mode); if (cntrl == LM_DISABLE) { data &= ~DMA_READ_MODE_ENABLE; REG_WR (pDevice, DmaRead.Mode, data); for (j = 0; j < MaxWaitCnt; j++) { if (! (REG_RD (pDevice, DmaRead.Mode) & DMA_READ_MODE_ENABLE)) break; MM_Wait (10); } } else REG_WR (pDevice, DmaRead.Mode, data | DMA_READ_MODE_ENABLE); break; case T3_BLOCK_DMA_COMP: data = REG_RD (pDevice, DmaComp.Mode); if (cntrl == LM_DISABLE) { data &= ~DMA_COMP_MODE_ENABLE; REG_WR (pDevice, DmaComp.Mode, data); for (j = 0; j < MaxWaitCnt; j++) { if (! (REG_RD (pDevice, DmaComp.Mode) & DMA_COMP_MODE_ENABLE)) break; MM_Wait (10); } } else REG_WR (pDevice, DmaComp.Mode, data | DMA_COMP_MODE_ENABLE); break; case T3_BLOCK_RX_BD_INITIATOR: data = REG_RD (pDevice, RcvBdIn.Mode); if (cntrl == LM_DISABLE) { data &= ~RCV_BD_IN_MODE_ENABLE; REG_WR (pDevice, RcvBdIn.Mode, data); for (j = 0; j < MaxWaitCnt; j++) { if (! (REG_RD (pDevice, RcvBdIn.Mode) & RCV_BD_IN_MODE_ENABLE)) break; MM_Wait (10); } } else REG_WR (pDevice, RcvBdIn.Mode, data | RCV_BD_IN_MODE_ENABLE); break; case T3_BLOCK_RX_BD_COMP: data = REG_RD (pDevice, RcvBdComp.Mode); if (cntrl == LM_DISABLE) { data &= ~RCV_BD_COMP_MODE_ENABLE; REG_WR (pDevice, RcvBdComp.Mode, data); for (j = 0; j < MaxWaitCnt; j++) { if (! (REG_RD (pDevice, RcvBdComp.Mode) & RCV_BD_COMP_MODE_ENABLE)) break; MM_Wait (10); } } else REG_WR (pDevice, RcvBdComp.Mode, data | RCV_BD_COMP_MODE_ENABLE); break; case T3_BLOCK_DMA_WR: data = REG_RD (pDevice, DmaWrite.Mode); if (cntrl == LM_DISABLE) { data &= ~DMA_WRITE_MODE_ENABLE; REG_WR (pDevice, DmaWrite.Mode, data); for (j = 0; j < MaxWaitCnt; j++) { if (! (REG_RD (pDevice, DmaWrite.Mode) & DMA_WRITE_MODE_ENABLE)) break; MM_Wait (10); } } else REG_WR (pDevice, DmaWrite.Mode, data | DMA_WRITE_MODE_ENABLE); break; case T3_BLOCK_MSI_HANDLER: data = REG_RD (pDevice, Msi.Mode); if (cntrl == LM_DISABLE) { data &= ~MSI_MODE_ENABLE; REG_WR (pDevice, Msi.Mode, data); for (j = 0; j < MaxWaitCnt; j++) { if (! (REG_RD (pDevice, Msi.Mode) & MSI_MODE_ENABLE)) break; MM_Wait (10); } } else REG_WR (pDevice, Msi.Mode, data | MSI_MODE_ENABLE); break; case T3_BLOCK_RX_LIST_PLMT: data = REG_RD (pDevice, RcvListPlmt.Mode); if (cntrl == LM_DISABLE) { data &= ~RCV_LIST_PLMT_MODE_ENABLE; REG_WR (pDevice, RcvListPlmt.Mode, data); for (j = 0; j < MaxWaitCnt; j++) { if (! (REG_RD (pDevice, RcvListPlmt.Mode) & RCV_LIST_PLMT_MODE_ENABLE)) break; MM_Wait (10); } } else REG_WR (pDevice, RcvListPlmt.Mode, data | RCV_LIST_PLMT_MODE_ENABLE); break; case T3_BLOCK_RX_LIST_SELECTOR: data = REG_RD (pDevice, RcvListSel.Mode); if (cntrl == LM_DISABLE) { data &= ~RCV_LIST_SEL_MODE_ENABLE; REG_WR (pDevice, RcvListSel.Mode, data); for (j = 0; j < MaxWaitCnt; j++) { if (! (REG_RD (pDevice, RcvListSel.Mode) & RCV_LIST_SEL_MODE_ENABLE)) break; MM_Wait (10); } } else REG_WR (pDevice, RcvListSel.Mode, data | RCV_LIST_SEL_MODE_ENABLE); break; case T3_BLOCK_RX_DATA_INITIATOR: data = REG_RD (pDevice, RcvDataBdIn.Mode); if (cntrl == LM_DISABLE) { data &= ~RCV_DATA_BD_IN_MODE_ENABLE; REG_WR (pDevice, RcvDataBdIn.Mode, data); for (j = 0; j < MaxWaitCnt; j++) { if (! (REG_RD (pDevice, RcvDataBdIn.Mode) & RCV_DATA_BD_IN_MODE_ENABLE)) break; MM_Wait (10); } } else REG_WR (pDevice, RcvDataBdIn.Mode, data | RCV_DATA_BD_IN_MODE_ENABLE); break; case T3_BLOCK_RX_DATA_COMP: data = REG_RD (pDevice, RcvDataComp.Mode); if (cntrl == LM_DISABLE) { data &= ~RCV_DATA_COMP_MODE_ENABLE; REG_WR (pDevice, RcvDataComp.Mode, data); for (j = 0; j < MaxWaitCnt; j++) { if (! (REG_RD (pDevice, RcvDataBdIn.Mode) & RCV_DATA_COMP_MODE_ENABLE)) break; MM_Wait (10); } } else REG_WR (pDevice, RcvDataComp.Mode, data | RCV_DATA_COMP_MODE_ENABLE); break; case T3_BLOCK_HOST_COALESING: data = REG_RD (pDevice, HostCoalesce.Mode); if (cntrl == LM_DISABLE) { data &= ~HOST_COALESCE_ENABLE; REG_WR (pDevice, HostCoalesce.Mode, data); for (j = 0; j < MaxWaitCnt; j++) { if (! (REG_RD (pDevice, SndBdIn.Mode) & HOST_COALESCE_ENABLE)) break; MM_Wait (10); } } else REG_WR (pDevice, HostCoalesce.Mode, data | HOST_COALESCE_ENABLE); break; case T3_BLOCK_MAC_RX_ENGINE: if (cntrl == LM_DISABLE) { pDevice->RxMode &= ~RX_MODE_ENABLE; REG_WR (pDevice, MacCtrl.RxMode, pDevice->RxMode); for (j = 0; j < MaxWaitCnt; j++) { if (! (REG_RD (pDevice, MacCtrl.RxMode) & RX_MODE_ENABLE)) { break; } MM_Wait (10); } } else { pDevice->RxMode |= RX_MODE_ENABLE; REG_WR (pDevice, MacCtrl.RxMode, pDevice->RxMode); } break; case T3_BLOCK_MBUF_CLUSTER_FREE: data = REG_RD (pDevice, MbufClusterFree.Mode); if (cntrl == LM_DISABLE) { data &= ~MBUF_CLUSTER_FREE_MODE_ENABLE; REG_WR (pDevice, MbufClusterFree.Mode, data); for (j = 0; j < MaxWaitCnt; j++) { if (! (REG_RD (pDevice, MbufClusterFree. Mode) & MBUF_CLUSTER_FREE_MODE_ENABLE)) break; MM_Wait (10); } } else REG_WR (pDevice, MbufClusterFree.Mode, data | MBUF_CLUSTER_FREE_MODE_ENABLE); break; case T3_BLOCK_SEND_BD_INITIATOR: data = REG_RD (pDevice, SndBdIn.Mode); if (cntrl == LM_DISABLE) { data &= ~SND_BD_IN_MODE_ENABLE; REG_WR (pDevice, SndBdIn.Mode, data); for (j = 0; j < MaxWaitCnt; j++) { if (! (REG_RD (pDevice, SndBdIn.Mode) & SND_BD_IN_MODE_ENABLE)) break; MM_Wait (10); } } else REG_WR (pDevice, SndBdIn.Mode, data | SND_BD_IN_MODE_ENABLE); break; case T3_BLOCK_SEND_BD_COMP: data = REG_RD (pDevice, SndBdComp.Mode); if (cntrl == LM_DISABLE) { data &= ~SND_BD_COMP_MODE_ENABLE; REG_WR (pDevice, SndBdComp.Mode, data); for (j = 0; j < MaxWaitCnt; j++) { if (! (REG_RD (pDevice, SndBdComp.Mode) & SND_BD_COMP_MODE_ENABLE)) break; MM_Wait (10); } } else REG_WR (pDevice, SndBdComp.Mode, data | SND_BD_COMP_MODE_ENABLE); break; case T3_BLOCK_SEND_BD_SELECTOR: data = REG_RD (pDevice, SndBdSel.Mode); if (cntrl == LM_DISABLE) { data &= ~SND_BD_SEL_MODE_ENABLE; REG_WR (pDevice, SndBdSel.Mode, data); for (j = 0; j < MaxWaitCnt; j++) { if (! (REG_RD (pDevice, SndBdSel.Mode) & SND_BD_SEL_MODE_ENABLE)) break; MM_Wait (10); } } else REG_WR (pDevice, SndBdSel.Mode, data | SND_BD_SEL_MODE_ENABLE); break; case T3_BLOCK_SEND_DATA_INITIATOR: data = REG_RD (pDevice, SndDataIn.Mode); if (cntrl == LM_DISABLE) { data &= ~T3_SND_DATA_IN_MODE_ENABLE; REG_WR (pDevice, SndDataIn.Mode, data); for (j = 0; j < MaxWaitCnt; j++) { if (! (REG_RD (pDevice, SndDataIn.Mode) & T3_SND_DATA_IN_MODE_ENABLE)) break; MM_Wait (10); } } else REG_WR (pDevice, SndDataIn.Mode, data | T3_SND_DATA_IN_MODE_ENABLE); break; case T3_BLOCK_SEND_DATA_COMP: data = REG_RD (pDevice, SndDataComp.Mode); if (cntrl == LM_DISABLE) { data &= ~SND_DATA_COMP_MODE_ENABLE; REG_WR (pDevice, SndDataComp.Mode, data); for (j = 0; j < MaxWaitCnt; j++) { if (! (REG_RD (pDevice, SndDataComp.Mode) & SND_DATA_COMP_MODE_ENABLE)) break; MM_Wait (10); } } else REG_WR (pDevice, SndDataComp.Mode, data | SND_DATA_COMP_MODE_ENABLE); break; case T3_BLOCK_MAC_TX_ENGINE: if (cntrl == LM_DISABLE) { pDevice->TxMode &= ~TX_MODE_ENABLE; REG_WR (pDevice, MacCtrl.TxMode, pDevice->TxMode); for (j = 0; j < MaxWaitCnt; j++) { if (! (REG_RD (pDevice, MacCtrl.TxMode) & TX_MODE_ENABLE)) break; MM_Wait (10); } } else { pDevice->TxMode |= TX_MODE_ENABLE; REG_WR (pDevice, MacCtrl.TxMode, pDevice->TxMode); } break; case T3_BLOCK_MEM_ARBITOR: data = REG_RD (pDevice, MemArbiter.Mode); if (cntrl == LM_DISABLE) { data &= ~T3_MEM_ARBITER_MODE_ENABLE; REG_WR (pDevice, MemArbiter.Mode, data); for (j = 0; j < MaxWaitCnt; j++) { if (! (REG_RD (pDevice, MemArbiter.Mode) & T3_MEM_ARBITER_MODE_ENABLE)) break; MM_Wait (10); } } else REG_WR (pDevice, MemArbiter.Mode, data | T3_MEM_ARBITER_MODE_ENABLE); break; case T3_BLOCK_MBUF_MANAGER: data = REG_RD (pDevice, BufMgr.Mode); if (cntrl == LM_DISABLE) { data &= ~BUFMGR_MODE_ENABLE; REG_WR (pDevice, BufMgr.Mode, data); for (j = 0; j < MaxWaitCnt; j++) { if (! (REG_RD (pDevice, BufMgr.Mode) & BUFMGR_MODE_ENABLE)) break; MM_Wait (10); } } else REG_WR (pDevice, BufMgr.Mode, data | BUFMGR_MODE_ENABLE); break; case T3_BLOCK_MAC_GLOBAL: if (cntrl == LM_DISABLE) { pDevice->MacMode &= ~(MAC_MODE_ENABLE_TDE | MAC_MODE_ENABLE_RDE | MAC_MODE_ENABLE_FHDE); } else { pDevice->MacMode |= (MAC_MODE_ENABLE_TDE | MAC_MODE_ENABLE_RDE | MAC_MODE_ENABLE_FHDE); } REG_WR (pDevice, MacCtrl.Mode, pDevice->MacMode); break; default: return LM_STATUS_FAILURE; } /* switch */ if (j >= MaxWaitCnt) { return LM_STATUS_FAILURE; } } return LM_STATUS_SUCCESS; } /******************************************************************************/ /* Description: */ /* This function reinitializes the adapter. */ /* */ /* Return: */ /* LM_STATUS_SUCCESS */ /******************************************************************************/ LM_STATUS LM_ResetAdapter (PLM_DEVICE_BLOCK pDevice) { LM_UINT32 Value32; LM_UINT16 Value16; LM_UINT32 j, k; /* Disable interrupt. */ LM_DisableInterrupt (pDevice); /* May get a spurious interrupt */ pDevice->pStatusBlkVirt->Status = STATUS_BLOCK_UPDATED; /* Disable transmit and receive DMA engines. Abort all pending requests. */ if (pDevice->InitDone) { LM_Abort (pDevice); } pDevice->ShuttingDown = FALSE; LM_ResetChip (pDevice); /* Bug: Athlon fix for B3 silicon only. This bit does not do anything */ /* in other chip revisions. */ if (pDevice->DelayPciGrant) { Value32 = REG_RD (pDevice, PciCfg.ClockCtrl); REG_WR (pDevice, PciCfg.ClockCtrl, Value32 | BIT_31); } if (pDevice->ChipRevId == T3_CHIP_ID_5704_A0) { if (!(pDevice->PciState & T3_PCI_STATE_CONVENTIONAL_PCI_MODE)) { Value32 = REG_RD (pDevice, PciCfg.PciState); Value32 |= T3_PCI_STATE_RETRY_SAME_DMA; REG_WR (pDevice, PciCfg.PciState, Value32); } } /* Enable TaggedStatus mode. */ if (pDevice->UseTaggedStatus) { pDevice->MiscHostCtrl |= MISC_HOST_CTRL_ENABLE_TAGGED_STATUS_MODE; } /* Restore PCI configuration registers. */ MM_WriteConfig32 (pDevice, PCI_CACHE_LINE_SIZE_REG, pDevice->SavedCacheLineReg); MM_WriteConfig32 (pDevice, PCI_SUBSYSTEM_VENDOR_ID_REG, (pDevice->SubsystemId << 16) | pDevice-> SubsystemVendorId); /* Clear the statistics block. */ for (j = 0x0300; j < 0x0b00; j++) { MEM_WR_OFFSET (pDevice, j, 0); } /* Initialize the statistis Block */ pDevice->pStatusBlkVirt->Status = 0; pDevice->pStatusBlkVirt->RcvStdConIdx = 0; pDevice->pStatusBlkVirt->RcvJumboConIdx = 0; pDevice->pStatusBlkVirt->RcvMiniConIdx = 0; for (j = 0; j < 16; j++) { pDevice->pStatusBlkVirt->Idx[j].RcvProdIdx = 0; pDevice->pStatusBlkVirt->Idx[j].SendConIdx = 0; } for (k = 0; k < T3_STD_RCV_RCB_ENTRY_COUNT; k++) { pDevice->pRxStdBdVirt[k].HostAddr.High = 0; pDevice->pRxStdBdVirt[k].HostAddr.Low = 0; } #if T3_JUMBO_RCV_RCB_ENTRY_COUNT /* Receive jumbo BD buffer. */ for (k = 0; k < T3_JUMBO_RCV_RCB_ENTRY_COUNT; k++) { pDevice->pRxJumboBdVirt[k].HostAddr.High = 0; pDevice->pRxJumboBdVirt[k].HostAddr.Low = 0; } #endif REG_WR (pDevice, PciCfg.DmaReadWriteCtrl, pDevice->DmaReadWriteCtrl); /* GRC mode control register. */ #ifdef BIG_ENDIAN_PCI /* Jimmy, this ifdef block deleted in new code! */ Value32 = GRC_MODE_WORD_SWAP_DATA | GRC_MODE_WORD_SWAP_NON_FRAME_DATA | GRC_MODE_INT_ON_MAC_ATTN | GRC_MODE_HOST_STACK_UP; #else /* No CPU Swap modes for PCI IO */ Value32 = #ifdef BIG_ENDIAN_HOST GRC_MODE_BYTE_SWAP_NON_FRAME_DATA | GRC_MODE_WORD_SWAP_NON_FRAME_DATA | GRC_MODE_BYTE_SWAP_DATA | GRC_MODE_WORD_SWAP_DATA | #else GRC_MODE_WORD_SWAP_NON_FRAME_DATA | GRC_MODE_BYTE_SWAP_DATA | GRC_MODE_WORD_SWAP_DATA | #endif GRC_MODE_INT_ON_MAC_ATTN | GRC_MODE_HOST_STACK_UP; #endif /* !BIG_ENDIAN_PCI */ /* Configure send BD mode. */ if (pDevice->NicSendBd == FALSE) { Value32 |= GRC_MODE_HOST_SEND_BDS; } else { Value32 |= GRC_MODE_4X_NIC_BASED_SEND_RINGS; } /* Configure pseudo checksum mode. */ if (pDevice->NoTxPseudoHdrChksum) { Value32 |= GRC_MODE_TX_NO_PSEUDO_HEADER_CHKSUM; } if (pDevice->NoRxPseudoHdrChksum) { Value32 |= GRC_MODE_RX_NO_PSEUDO_HEADER_CHKSUM; } REG_WR (pDevice, Grc.Mode, Value32); /* Setup the timer prescalar register. */ REG_WR (pDevice, Grc.MiscCfg, 65 << 1); /* Clock is alwasy 66MHz. */ /* Set up the MBUF pool base address and size. */ REG_WR (pDevice, BufMgr.MbufPoolAddr, pDevice->MbufBase); REG_WR (pDevice, BufMgr.MbufPoolSize, pDevice->MbufSize); /* Set up the DMA descriptor pool base address and size. */ REG_WR (pDevice, BufMgr.DmaDescPoolAddr, T3_NIC_DMA_DESC_POOL_ADDR); REG_WR (pDevice, BufMgr.DmaDescPoolSize, T3_NIC_DMA_DESC_POOL_SIZE); /* Configure MBUF and Threshold watermarks */ /* Configure the DMA read MBUF low water mark. */ if (pDevice->DmaMbufLowMark) { REG_WR (pDevice, BufMgr.MbufReadDmaLowWaterMark, pDevice->DmaMbufLowMark); } else { if (pDevice->TxMtu < MAX_ETHERNET_PACKET_BUFFER_SIZE) { REG_WR (pDevice, BufMgr.MbufReadDmaLowWaterMark, T3_DEF_DMA_MBUF_LOW_WMARK); } else { REG_WR (pDevice, BufMgr.MbufReadDmaLowWaterMark, T3_DEF_DMA_MBUF_LOW_WMARK_JUMBO); } } /* Configure the MAC Rx MBUF low water mark. */ if (pDevice->RxMacMbufLowMark) { REG_WR (pDevice, BufMgr.MbufMacRxLowWaterMark, pDevice->RxMacMbufLowMark); } else { if (pDevice->TxMtu < MAX_ETHERNET_PACKET_BUFFER_SIZE) { REG_WR (pDevice, BufMgr.MbufMacRxLowWaterMark, T3_DEF_RX_MAC_MBUF_LOW_WMARK); } else { REG_WR (pDevice, BufMgr.MbufMacRxLowWaterMark, T3_DEF_RX_MAC_MBUF_LOW_WMARK_JUMBO); } } /* Configure the MBUF high water mark. */ if (pDevice->MbufHighMark) { REG_WR (pDevice, BufMgr.MbufHighWaterMark, pDevice->MbufHighMark); } else { if (pDevice->TxMtu < MAX_ETHERNET_PACKET_BUFFER_SIZE) { REG_WR (pDevice, BufMgr.MbufHighWaterMark, T3_DEF_MBUF_HIGH_WMARK); } else { REG_WR (pDevice, BufMgr.MbufHighWaterMark, T3_DEF_MBUF_HIGH_WMARK_JUMBO); } } REG_WR (pDevice, BufMgr.DmaLowWaterMark, T3_DEF_DMA_DESC_LOW_WMARK); REG_WR (pDevice, BufMgr.DmaHighWaterMark, T3_DEF_DMA_DESC_HIGH_WMARK); /* Enable buffer manager. */ REG_WR (pDevice, BufMgr.Mode, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE); for (j = 0; j < 2000; j++) { if (REG_RD (pDevice, BufMgr.Mode) & BUFMGR_MODE_ENABLE) break; MM_Wait (10); } if (j >= 2000) { return LM_STATUS_FAILURE; } /* Enable the FTQs. */ REG_WR (pDevice, Ftq.Reset, 0xffffffff); REG_WR (pDevice, Ftq.Reset, 0); /* Wait until FTQ is ready */ for (j = 0; j < 2000; j++) { if (REG_RD (pDevice, Ftq.Reset) == 0) break; MM_Wait (10); } if (j >= 2000) { return LM_STATUS_FAILURE; } /* Initialize the Standard Receive RCB. */ REG_WR (pDevice, RcvDataBdIn.StdRcvRcb.HostRingAddr.High, pDevice->RxStdBdPhy.High); REG_WR (pDevice, RcvDataBdIn.StdRcvRcb.HostRingAddr.Low, pDevice->RxStdBdPhy.Low); REG_WR (pDevice, RcvDataBdIn.StdRcvRcb.u.MaxLen_Flags, MAX_STD_RCV_BUFFER_SIZE << 16); /* Initialize the Jumbo Receive RCB. */ REG_WR (pDevice, RcvDataBdIn.JumboRcvRcb.u.MaxLen_Flags, T3_RCB_FLAG_RING_DISABLED); #if T3_JUMBO_RCV_RCB_ENTRY_COUNT REG_WR (pDevice, RcvDataBdIn.JumboRcvRcb.HostRingAddr.High, pDevice->RxJumboBdPhy.High); REG_WR (pDevice, RcvDataBdIn.JumboRcvRcb.HostRingAddr.Low, pDevice->RxJumboBdPhy.Low); REG_WR (pDevice, RcvDataBdIn.JumboRcvRcb.u.MaxLen_Flags, 0); #endif /* T3_JUMBO_RCV_RCB_ENTRY_COUNT */ /* Initialize the Mini Receive RCB. */ REG_WR (pDevice, RcvDataBdIn.MiniRcvRcb.u.MaxLen_Flags, T3_RCB_FLAG_RING_DISABLED); { REG_WR (pDevice, RcvDataBdIn.StdRcvRcb.NicRingAddr, (LM_UINT32) T3_NIC_STD_RCV_BUFFER_DESC_ADDR); REG_WR (pDevice, RcvDataBdIn.JumboRcvRcb.NicRingAddr, (LM_UINT32) T3_NIC_JUMBO_RCV_BUFFER_DESC_ADDR); } /* Receive BD Ring replenish threshold. */ REG_WR (pDevice, RcvBdIn.StdRcvThreshold, pDevice->RxStdDescCnt / 8); #if T3_JUMBO_RCV_RCB_ENTRY_COUNT REG_WR (pDevice, RcvBdIn.JumboRcvThreshold, pDevice->RxJumboDescCnt / 8); #endif /* T3_JUMBO_RCV_RCB_ENTRY_COUNT */ /* Disable all the unused rings. */ for (j = 0; j < T3_MAX_SEND_RCB_COUNT; j++) { MEM_WR (pDevice, SendRcb[j].u.MaxLen_Flags, T3_RCB_FLAG_RING_DISABLED); } /* for */ /* Initialize the indices. */ pDevice->SendProdIdx = 0; pDevice->SendConIdx = 0; MB_REG_WR (pDevice, Mailbox.SendHostProdIdx[0].Low, 0); MB_REG_WR (pDevice, Mailbox.SendNicProdIdx[0].Low, 0); /* Set up host or NIC based send RCB. */ if (pDevice->NicSendBd == FALSE) { MEM_WR (pDevice, SendRcb[0].HostRingAddr.High, pDevice->SendBdPhy.High); MEM_WR (pDevice, SendRcb[0].HostRingAddr.Low, pDevice->SendBdPhy.Low); /* Set up the NIC ring address in the RCB. */ MEM_WR (pDevice, SendRcb[0].NicRingAddr, T3_NIC_SND_BUFFER_DESC_ADDR); /* Setup the RCB. */ MEM_WR (pDevice, SendRcb[0].u.MaxLen_Flags, T3_SEND_RCB_ENTRY_COUNT << 16); for (k = 0; k < T3_SEND_RCB_ENTRY_COUNT; k++) { pDevice->pSendBdVirt[k].HostAddr.High = 0; pDevice->pSendBdVirt[k].HostAddr.Low = 0; } } else { MEM_WR (pDevice, SendRcb[0].HostRingAddr.High, 0); MEM_WR (pDevice, SendRcb[0].HostRingAddr.Low, 0); MEM_WR (pDevice, SendRcb[0].NicRingAddr, pDevice->SendBdPhy.Low); for (k = 0; k < T3_SEND_RCB_ENTRY_COUNT; k++) { __raw_writel (0, &(pDevice->pSendBdVirt[k].HostAddr.High)); __raw_writel (0, &(pDevice->pSendBdVirt[k].HostAddr.Low)); __raw_writel (0, &(pDevice->pSendBdVirt[k].u1.Len_Flags)); pDevice->ShadowSendBd[k].HostAddr.High = 0; pDevice->ShadowSendBd[k].u1.Len_Flags = 0; } } atomic_set (&pDevice->SendBdLeft, T3_SEND_RCB_ENTRY_COUNT - 1); /* Configure the receive return rings. */ for (j = 0; j < T3_MAX_RCV_RETURN_RCB_COUNT; j++) { MEM_WR (pDevice, RcvRetRcb[j].u.MaxLen_Flags, T3_RCB_FLAG_RING_DISABLED); } pDevice->RcvRetConIdx = 0; MEM_WR (pDevice, RcvRetRcb[0].HostRingAddr.High, pDevice->RcvRetBdPhy.High); MEM_WR (pDevice, RcvRetRcb[0].HostRingAddr.Low, pDevice->RcvRetBdPhy.Low); /* Set up the NIC ring address in the RCB. */ /* Not very clear from the spec. I am guessing that for Receive */ /* Return Ring, NicRingAddr is not used. */ MEM_WR (pDevice, RcvRetRcb[0].NicRingAddr, 0); /* Setup the RCB. */ MEM_WR (pDevice, RcvRetRcb[0].u.MaxLen_Flags, T3_RCV_RETURN_RCB_ENTRY_COUNT << 16); /* Reinitialize RX ring producer index */ MB_REG_WR (pDevice, Mailbox.RcvStdProdIdx.Low, 0); MB_REG_WR (pDevice, Mailbox.RcvJumboProdIdx.Low, 0); MB_REG_WR (pDevice, Mailbox.RcvMiniProdIdx.Low, 0); #if T3_JUMBO_RCV_RCB_ENTRY_COUNT pDevice->RxJumboProdIdx = 0; pDevice->RxJumboQueuedCnt = 0; #endif /* Reinitialize our copy of the indices. */ pDevice->RxStdProdIdx = 0; pDevice->RxStdQueuedCnt = 0; #if T3_JUMBO_RCV_ENTRY_COUNT pDevice->RxJumboProdIdx = 0; #endif /* T3_JUMBO_RCV_ENTRY_COUNT */ /* Configure the MAC address. */ LM_SetMacAddress (pDevice); /* Initialize the transmit random backoff seed. */ Value32 = (pDevice->NodeAddress[0] + pDevice->NodeAddress[1] + pDevice->NodeAddress[2] + pDevice->NodeAddress[3] + pDevice->NodeAddress[4] + pDevice->NodeAddress[5]) & MAC_TX_BACKOFF_SEED_MASK; REG_WR (pDevice, MacCtrl.TxBackoffSeed, Value32); /* Receive MTU. Frames larger than the MTU is marked as oversized. */ REG_WR (pDevice, MacCtrl.MtuSize, pDevice->RxMtu + 8); /* CRC + VLAN. */ /* Configure Time slot/IPG per 802.3 */ REG_WR (pDevice, MacCtrl.TxLengths, 0x2620); /* * Configure Receive Rules so that packets don't match * Programmble rule will be queued to Return Ring 1 */ REG_WR (pDevice, MacCtrl.RcvRuleCfg, RX_RULE_DEFAULT_CLASS); /* * Configure to have 16 Classes of Services (COS) and one * queue per class. Bad frames are queued to RRR#1. * And frames don't match rules are also queued to COS#1. */ REG_WR (pDevice, RcvListPlmt.Config, 0x181); /* Enable Receive Placement Statistics */ REG_WR (pDevice, RcvListPlmt.StatsEnableMask, 0xffffff); REG_WR (pDevice, RcvListPlmt.StatsCtrl, RCV_LIST_STATS_ENABLE); /* Enable Send Data Initator Statistics */ REG_WR (pDevice, SndDataIn.StatsEnableMask, 0xffffff); REG_WR (pDevice, SndDataIn.StatsCtrl, T3_SND_DATA_IN_STATS_CTRL_ENABLE | T3_SND_DATA_IN_STATS_CTRL_FASTER_UPDATE); /* Disable the host coalescing state machine before configuring it's */ /* parameters. */ REG_WR (pDevice, HostCoalesce.Mode, 0); for (j = 0; j < 2000; j++) { Value32 = REG_RD (pDevice, HostCoalesce.Mode); if (!(Value32 & HOST_COALESCE_ENABLE)) { break; } MM_Wait (10); } /* Host coalescing configurations. */ REG_WR (pDevice, HostCoalesce.RxCoalescingTicks, pDevice->RxCoalescingTicks); REG_WR (pDevice, HostCoalesce.TxCoalescingTicks, pDevice->TxCoalescingTicks); REG_WR (pDevice, HostCoalesce.RxMaxCoalescedFrames, pDevice->RxMaxCoalescedFrames); REG_WR (pDevice, HostCoalesce.TxMaxCoalescedFrames, pDevice->TxMaxCoalescedFrames); REG_WR (pDevice, HostCoalesce.RxCoalescedTickDuringInt, pDevice->RxCoalescingTicksDuringInt); REG_WR (pDevice, HostCoalesce.TxCoalescedTickDuringInt, pDevice->TxCoalescingTicksDuringInt); REG_WR (pDevice, HostCoalesce.RxMaxCoalescedFramesDuringInt, pDevice->RxMaxCoalescedFramesDuringInt); REG_WR (pDevice, HostCoalesce.TxMaxCoalescedFramesDuringInt, pDevice->TxMaxCoalescedFramesDuringInt); /* Initialize the address of the status block. The NIC will DMA */ /* the status block to this memory which resides on the host. */ REG_WR (pDevice, HostCoalesce.StatusBlkHostAddr.High, pDevice->StatusBlkPhy.High); REG_WR (pDevice, HostCoalesce.StatusBlkHostAddr.Low, pDevice->StatusBlkPhy.Low); /* Initialize the address of the statistics block. The NIC will DMA */ /* the statistics to this block of memory. */ REG_WR (pDevice, HostCoalesce.StatsBlkHostAddr.High, pDevice->StatsBlkPhy.High); REG_WR (pDevice, HostCoalesce.StatsBlkHostAddr.Low, pDevice->StatsBlkPhy.Low); REG_WR (pDevice, HostCoalesce.StatsCoalescingTicks, pDevice->StatsCoalescingTicks); REG_WR (pDevice, HostCoalesce.StatsBlkNicAddr, 0x300); REG_WR (pDevice, HostCoalesce.StatusBlkNicAddr, 0xb00); /* Enable Host Coalesing state machine */ REG_WR (pDevice, HostCoalesce.Mode, HOST_COALESCE_ENABLE | pDevice->CoalesceMode); /* Enable the Receive BD Completion state machine. */ REG_WR (pDevice, RcvBdComp.Mode, RCV_BD_COMP_MODE_ENABLE | RCV_BD_COMP_MODE_ATTN_ENABLE); /* Enable the Receive List Placement state machine. */ REG_WR (pDevice, RcvListPlmt.Mode, RCV_LIST_PLMT_MODE_ENABLE); /* Enable the Receive List Selector state machine. */ REG_WR (pDevice, RcvListSel.Mode, RCV_LIST_SEL_MODE_ENABLE | RCV_LIST_SEL_MODE_ATTN_ENABLE); /* Enable transmit DMA, clear statistics. */ pDevice->MacMode = MAC_MODE_ENABLE_TX_STATISTICS | MAC_MODE_ENABLE_RX_STATISTICS | MAC_MODE_ENABLE_TDE | MAC_MODE_ENABLE_RDE | MAC_MODE_ENABLE_FHDE; REG_WR (pDevice, MacCtrl.Mode, pDevice->MacMode | MAC_MODE_CLEAR_RX_STATISTICS | MAC_MODE_CLEAR_TX_STATISTICS); /* GRC miscellaneous local control register. */ pDevice->GrcLocalCtrl = GRC_MISC_LOCAL_CTRL_INT_ON_ATTN | GRC_MISC_LOCAL_CTRL_AUTO_SEEPROM; if (T3_ASIC_REV (pDevice->ChipRevId) == T3_ASIC_REV_5700) { pDevice->GrcLocalCtrl |= GRC_MISC_LOCAL_CTRL_GPIO_OE1 | GRC_MISC_LOCAL_CTRL_GPIO_OUTPUT1; } REG_WR (pDevice, Grc.LocalCtrl, pDevice->GrcLocalCtrl); MM_Wait (40); /* Reset RX counters. */ for (j = 0; j < sizeof (LM_RX_COUNTERS); j++) { ((PLM_UINT8) & pDevice->RxCounters)[j] = 0; } /* Reset TX counters. */ for (j = 0; j < sizeof (LM_TX_COUNTERS); j++) { ((PLM_UINT8) & pDevice->TxCounters)[j] = 0; } MB_REG_WR (pDevice, Mailbox.Interrupt[0].Low, 0); /* Enable the DMA Completion state machine. */ REG_WR (pDevice, DmaComp.Mode, DMA_COMP_MODE_ENABLE); /* Enable the DMA Write state machine. */ Value32 = DMA_WRITE_MODE_ENABLE | DMA_WRITE_MODE_TARGET_ABORT_ATTN_ENABLE | DMA_WRITE_MODE_MASTER_ABORT_ATTN_ENABLE | DMA_WRITE_MODE_PARITY_ERROR_ATTN_ENABLE | DMA_WRITE_MODE_ADDR_OVERFLOW_ATTN_ENABLE | DMA_WRITE_MODE_FIFO_OVERRUN_ATTN_ENABLE | DMA_WRITE_MODE_FIFO_UNDERRUN_ATTN_ENABLE | DMA_WRITE_MODE_FIFO_OVERREAD_ATTN_ENABLE | DMA_WRITE_MODE_LONG_READ_ATTN_ENABLE; REG_WR (pDevice, DmaWrite.Mode, Value32); if (!(pDevice->PciState & T3_PCI_STATE_CONVENTIONAL_PCI_MODE)) { if (pDevice->ChipRevId == T3_CHIP_ID_5704_A0) { Value16 = REG_RD (pDevice, PciCfg.PciXCommand); Value16 &= ~(PCIX_CMD_MAX_SPLIT_MASK | PCIX_CMD_MAX_BURST_MASK); Value16 |= ((PCIX_CMD_MAX_BURST_CPIOB << PCIX_CMD_MAX_BURST_SHL) & PCIX_CMD_MAX_BURST_MASK); if (pDevice->SplitModeEnable == SPLIT_MODE_ENABLE) { Value16 |= (pDevice-> SplitModeMaxReq << PCIX_CMD_MAX_SPLIT_SHL) & PCIX_CMD_MAX_SPLIT_MASK; } REG_WR (pDevice, PciCfg.PciXCommand, Value16); } } /* Enable the Read DMA state machine. */ Value32 = DMA_READ_MODE_ENABLE | DMA_READ_MODE_TARGET_ABORT_ATTN_ENABLE | DMA_READ_MODE_MASTER_ABORT_ATTN_ENABLE | DMA_READ_MODE_PARITY_ERROR_ATTN_ENABLE | DMA_READ_MODE_ADDR_OVERFLOW_ATTN_ENABLE | DMA_READ_MODE_FIFO_OVERRUN_ATTN_ENABLE | DMA_READ_MODE_FIFO_UNDERRUN_ATTN_ENABLE | DMA_READ_MODE_FIFO_OVERREAD_ATTN_ENABLE | DMA_READ_MODE_LONG_READ_ATTN_ENABLE; if (pDevice->SplitModeEnable == SPLIT_MODE_ENABLE) { Value32 |= DMA_READ_MODE_SPLIT_ENABLE; } REG_WR (pDevice, DmaRead.Mode, Value32); /* Enable the Receive Data Completion state machine. */ REG_WR (pDevice, RcvDataComp.Mode, RCV_DATA_COMP_MODE_ENABLE | RCV_DATA_COMP_MODE_ATTN_ENABLE); /* Enable the Mbuf Cluster Free state machine. */ REG_WR (pDevice, MbufClusterFree.Mode, MBUF_CLUSTER_FREE_MODE_ENABLE); /* Enable the Send Data Completion state machine. */ REG_WR (pDevice, SndDataComp.Mode, SND_DATA_COMP_MODE_ENABLE); /* Enable the Send BD Completion state machine. */ REG_WR (pDevice, SndBdComp.Mode, SND_BD_COMP_MODE_ENABLE | SND_BD_COMP_MODE_ATTN_ENABLE); /* Enable the Receive BD Initiator state machine. */ REG_WR (pDevice, RcvBdIn.Mode, RCV_BD_IN_MODE_ENABLE | RCV_BD_IN_MODE_BD_IN_DIABLED_RCB_ATTN_ENABLE); /* Enable the Receive Data and Receive BD Initiator state machine. */ REG_WR (pDevice, RcvDataBdIn.Mode, RCV_DATA_BD_IN_MODE_ENABLE | RCV_DATA_BD_IN_MODE_INVALID_RING_SIZE); /* Enable the Send Data Initiator state machine. */ REG_WR (pDevice, SndDataIn.Mode, T3_SND_DATA_IN_MODE_ENABLE); /* Enable the Send BD Initiator state machine. */ REG_WR (pDevice, SndBdIn.Mode, SND_BD_IN_MODE_ENABLE | SND_BD_IN_MODE_ATTN_ENABLE); /* Enable the Send BD Selector state machine. */ REG_WR (pDevice, SndBdSel.Mode, SND_BD_SEL_MODE_ENABLE | SND_BD_SEL_MODE_ATTN_ENABLE); #if INCLUDE_5701_AX_FIX /* Load the firmware for the 5701_A0 workaround. */ if (pDevice->ChipRevId == T3_CHIP_ID_5701_A0) { LM_LoadRlsFirmware (pDevice); } #endif /* Enable the transmitter. */ pDevice->TxMode = TX_MODE_ENABLE; REG_WR (pDevice, MacCtrl.TxMode, pDevice->TxMode); /* Enable the receiver. */ pDevice->RxMode = RX_MODE_ENABLE; REG_WR (pDevice, MacCtrl.RxMode, pDevice->RxMode); if (pDevice->RestoreOnWakeUp) { pDevice->RestoreOnWakeUp = FALSE; pDevice->DisableAutoNeg = pDevice->WakeUpDisableAutoNeg; pDevice->RequestedMediaType = pDevice->WakeUpRequestedMediaType; } /* Disable auto polling. */ pDevice->MiMode = 0xc0000; REG_WR (pDevice, MacCtrl.MiMode, pDevice->MiMode); if (T3_ASIC_REV (pDevice->ChipRevId) == T3_ASIC_REV_5700 || T3_ASIC_REV (pDevice->ChipRevId) == T3_ASIC_REV_5701) { Value32 = LED_CTRL_PHY_MODE_1; } else { if (pDevice->LedMode == LED_MODE_OUTPUT) { Value32 = LED_CTRL_PHY_MODE_2; } else { Value32 = LED_CTRL_PHY_MODE_1; } } REG_WR (pDevice, MacCtrl.LedCtrl, Value32); /* Activate Link to enable MAC state machine */ REG_WR (pDevice, MacCtrl.MiStatus, MI_STATUS_ENABLE_LINK_STATUS_ATTN); if (pDevice->EnableTbi) { REG_WR (pDevice, MacCtrl.RxMode, RX_MODE_RESET); MM_Wait (10); REG_WR (pDevice, MacCtrl.RxMode, pDevice->RxMode); if (pDevice->ChipRevId == T3_CHIP_ID_5703_A1) { REG_WR (pDevice, MacCtrl.SerdesCfg, 0x616000); } } /* Setup the phy chip. */ LM_SetupPhy (pDevice); if (!pDevice->EnableTbi) { /* Clear CRC stats */ LM_ReadPhy (pDevice, 0x1e, &Value32); LM_WritePhy (pDevice, 0x1e, Value32 | 0x8000); LM_ReadPhy (pDevice, 0x14, &Value32); } /* Set up the receive mask. */ LM_SetReceiveMask (pDevice, pDevice->ReceiveMask); /* Queue Rx packet buffers. */ if (pDevice->QueueRxPackets) { LM_QueueRxPackets (pDevice); } /* Enable interrupt to the host. */ if (pDevice->InitDone) { LM_EnableInterrupt (pDevice); } return LM_STATUS_SUCCESS; } /* LM_ResetAdapter */ /******************************************************************************/ /* Description: */ /* This routine disables the adapter from generating interrupts. */ /* */ /* Return: */ /* LM_STATUS_SUCCESS */ /******************************************************************************/ LM_STATUS LM_DisableInterrupt (PLM_DEVICE_BLOCK pDevice) { REG_WR (pDevice, PciCfg.MiscHostCtrl, pDevice->MiscHostCtrl | MISC_HOST_CTRL_MASK_PCI_INT); MB_REG_WR (pDevice, Mailbox.Interrupt[0].Low, 1); return LM_STATUS_SUCCESS; } /******************************************************************************/ /* Description: */ /* This routine enables the adapter to generate interrupts. */ /* */ /* Return: */ /* LM_STATUS_SUCCESS */ /******************************************************************************/ LM_STATUS LM_EnableInterrupt (PLM_DEVICE_BLOCK pDevice) { REG_WR (pDevice, PciCfg.MiscHostCtrl, pDevice->MiscHostCtrl & ~MISC_HOST_CTRL_MASK_PCI_INT); MB_REG_WR (pDevice, Mailbox.Interrupt[0].Low, 0); if (pDevice->pStatusBlkVirt->Status & STATUS_BLOCK_UPDATED) { REG_WR (pDevice, Grc.LocalCtrl, pDevice->GrcLocalCtrl | GRC_MISC_LOCAL_CTRL_SET_INT); } return LM_STATUS_SUCCESS; } /******************************************************************************/ /* Description: */ /* This routine puts a packet on the wire if there is a transmit DMA */ /* descriptor available; otherwise the packet is queued for later */ /* transmission. If the second argue is NULL, this routine will put */ /* the queued packet on the wire if possible. */ /* */ /* Return: */ /* LM_STATUS_SUCCESS */ /******************************************************************************/ #if 0 LM_STATUS LM_SendPacket (PLM_DEVICE_BLOCK pDevice, PLM_PACKET pPacket) { LM_UINT32 FragCount; PT3_SND_BD pSendBd; PT3_SND_BD pShadowSendBd; LM_UINT32 Value32, Len; LM_UINT32 Idx; if (T3_ASIC_REV (pDevice->ChipRevId) == T3_ASIC_REV_5700) { return LM_5700SendPacket (pDevice, pPacket); } /* Update the SendBdLeft count. */ atomic_sub (pPacket->u.Tx.FragCount, &pDevice->SendBdLeft); /* Initalize the send buffer descriptors. */ Idx = pDevice->SendProdIdx; pSendBd = &pDevice->pSendBdVirt[Idx]; /* Next producer index. */ if (pDevice->NicSendBd == TRUE) { T3_64BIT_HOST_ADDR paddr; pShadowSendBd = &pDevice->ShadowSendBd[Idx]; for (FragCount = 0;;) { MM_MapTxDma (pDevice, pPacket, &paddr, &Len, FragCount); /* Initialize the pointer to the send buffer fragment. */ if (paddr.High != pShadowSendBd->HostAddr.High) { __raw_writel (paddr.High, &(pSendBd->HostAddr.High)); pShadowSendBd->HostAddr.High = paddr.High; } __raw_writel (paddr.Low, &(pSendBd->HostAddr.Low)); /* Setup the control flags and send buffer size. */ Value32 = (Len << 16) | pPacket->Flags; Idx = (Idx + 1) & T3_SEND_RCB_ENTRY_COUNT_MASK; FragCount++; if (FragCount >= pPacket->u.Tx.FragCount) { Value32 |= SND_BD_FLAG_END; if (Value32 != pShadowSendBd->u1.Len_Flags) { __raw_writel (Value32, &(pSendBd->u1.Len_Flags)); pShadowSendBd->u1.Len_Flags = Value32; } if (pPacket->Flags & SND_BD_FLAG_VLAN_TAG) { __raw_writel (pPacket->VlanTag, &(pSendBd->u2.VlanTag)); } break; } else { if (Value32 != pShadowSendBd->u1.Len_Flags) { __raw_writel (Value32, &(pSendBd->u1.Len_Flags)); pShadowSendBd->u1.Len_Flags = Value32; } if (pPacket->Flags & SND_BD_FLAG_VLAN_TAG) { __raw_writel (pPacket->VlanTag, &(pSendBd->u2.VlanTag)); } } pSendBd++; pShadowSendBd++; if (Idx == 0) { pSendBd = &pDevice->pSendBdVirt[0]; pShadowSendBd = &pDevice->ShadowSendBd[0]; } } /* for */ /* Put the packet descriptor in the ActiveQ. */ QQ_PushTail (&pDevice->TxPacketActiveQ.Container, pPacket); wmb (); MB_REG_WR (pDevice, Mailbox.SendNicProdIdx[0].Low, Idx); } else { for (FragCount = 0;;) { /* Initialize the pointer to the send buffer fragment. */ MM_MapTxDma (pDevice, pPacket, &pSendBd->HostAddr, &Len, FragCount); pSendBd->u2.VlanTag = pPacket->VlanTag; /* Setup the control flags and send buffer size. */ Value32 = (Len << 16) | pPacket->Flags; Idx = (Idx + 1) & T3_SEND_RCB_ENTRY_COUNT_MASK; FragCount++; if (FragCount >= pPacket->u.Tx.FragCount) { pSendBd->u1.Len_Flags = Value32 | SND_BD_FLAG_END; break; } else { pSendBd->u1.Len_Flags = Value32; } pSendBd++; if (Idx == 0) { pSendBd = &pDevice->pSendBdVirt[0]; } } /* for */ /* Put the packet descriptor in the ActiveQ. */ QQ_PushTail (&pDevice->TxPacketActiveQ.Container, pPacket); wmb (); MB_REG_WR (pDevice, Mailbox.SendHostProdIdx[0].Low, Idx); } /* Update the producer index. */ pDevice->SendProdIdx = Idx; return LM_STATUS_SUCCESS; } #endif LM_STATUS LM_SendPacket (PLM_DEVICE_BLOCK pDevice, PLM_PACKET pPacket) { LM_UINT32 FragCount; PT3_SND_BD pSendBd, pTmpSendBd, pShadowSendBd; T3_SND_BD NicSendBdArr[MAX_FRAGMENT_COUNT]; LM_UINT32 StartIdx, Idx; while (1) { /* Initalize the send buffer descriptors. */ StartIdx = Idx = pDevice->SendProdIdx; if (pDevice->NicSendBd) { pTmpSendBd = pSendBd = &NicSendBdArr[0]; } else { pTmpSendBd = pSendBd = &pDevice->pSendBdVirt[Idx]; } /* Next producer index. */ for (FragCount = 0;;) { LM_UINT32 Value32, Len; /* Initialize the pointer to the send buffer fragment. */ MM_MapTxDma (pDevice, pPacket, &pSendBd->HostAddr, &Len, FragCount); pSendBd->u2.VlanTag = pPacket->VlanTag; /* Setup the control flags and send buffer size. */ Value32 = (Len << 16) | pPacket->Flags; Idx = (Idx + 1) & T3_SEND_RCB_ENTRY_COUNT_MASK; FragCount++; if (FragCount >= pPacket->u.Tx.FragCount) { pSendBd->u1.Len_Flags = Value32 | SND_BD_FLAG_END; break; } else { pSendBd->u1.Len_Flags = Value32; } pSendBd++; if ((Idx == 0) && !pDevice->NicSendBd) { pSendBd = &pDevice->pSendBdVirt[0]; } } /* for */ if (T3_ASIC_REV (pDevice->ChipRevId) == T3_ASIC_REV_5700) { if (LM_Test4GBoundary (pDevice, pPacket, pTmpSendBd) == LM_STATUS_SUCCESS) { if (MM_CoalesceTxBuffer (pDevice, pPacket) != LM_STATUS_SUCCESS) { QQ_PushHead (&pDevice->TxPacketFreeQ. Container, pPacket); return LM_STATUS_FAILURE; } continue; } } break; } /* Put the packet descriptor in the ActiveQ. */ QQ_PushTail (&pDevice->TxPacketActiveQ.Container, pPacket); if (pDevice->NicSendBd) { pSendBd = &pDevice->pSendBdVirt[StartIdx]; pShadowSendBd = &pDevice->ShadowSendBd[StartIdx]; while (StartIdx != Idx) { LM_UINT32 Value32; if ((Value32 = pTmpSendBd->HostAddr.High) != pShadowSendBd->HostAddr.High) { __raw_writel (Value32, &(pSendBd->HostAddr.High)); pShadowSendBd->HostAddr.High = Value32; } __raw_writel (pTmpSendBd->HostAddr.Low, &(pSendBd->HostAddr.Low)); if ((Value32 = pTmpSendBd->u1.Len_Flags) != pShadowSendBd->u1.Len_Flags) { __raw_writel (Value32, &(pSendBd->u1.Len_Flags)); pShadowSendBd->u1.Len_Flags = Value32; } if (pPacket->Flags & SND_BD_FLAG_VLAN_TAG) { __raw_writel (pTmpSendBd->u2.VlanTag, &(pSendBd->u2.VlanTag)); } StartIdx = (StartIdx + 1) & T3_SEND_RCB_ENTRY_COUNT_MASK; if (StartIdx == 0) pSendBd = &pDevice->pSendBdVirt[0]; else pSendBd++; pTmpSendBd++; } wmb (); MB_REG_WR (pDevice, Mailbox.SendNicProdIdx[0].Low, Idx); if (T3_CHIP_REV (pDevice->ChipRevId) == T3_CHIP_REV_5700_BX) { MB_REG_WR (pDevice, Mailbox.SendNicProdIdx[0].Low, Idx); } } else { wmb (); MB_REG_WR (pDevice, Mailbox.SendHostProdIdx[0].Low, Idx); if (T3_CHIP_REV (pDevice->ChipRevId) == T3_CHIP_REV_5700_BX) { MB_REG_WR (pDevice, Mailbox.SendHostProdIdx[0].Low, Idx); } } /* Update the SendBdLeft count. */ atomic_sub (pPacket->u.Tx.FragCount, &pDevice->SendBdLeft); /* Update the producer index. */ pDevice->SendProdIdx = Idx; return LM_STATUS_SUCCESS; } STATIC LM_STATUS LM_Test4GBoundary (PLM_DEVICE_BLOCK pDevice, PLM_PACKET pPacket, PT3_SND_BD pSendBd) { int FragCount; LM_UINT32 Idx, Base, Len; Idx = pDevice->SendProdIdx; for (FragCount = 0;;) { Len = pSendBd->u1.Len_Flags >> 16; if (((Base = pSendBd->HostAddr.Low) > 0xffffdcc0) && (pSendBd->HostAddr.High == 0) && ((Base + 8 + Len) < Base)) { return LM_STATUS_SUCCESS; } FragCount++; if (FragCount >= pPacket->u.Tx.FragCount) { break; } pSendBd++; if (!pDevice->NicSendBd) { Idx = (Idx + 1) & T3_SEND_RCB_ENTRY_COUNT_MASK; if (Idx == 0) { pSendBd = &pDevice->pSendBdVirt[0]; } } } return LM_STATUS_FAILURE; } /******************************************************************************/ /* Description: */ /* */ /* Return: */ /******************************************************************************/ __inline static unsigned long ComputeCrc32 (unsigned char *pBuffer, unsigned long BufferSize) { unsigned long Reg; unsigned long Tmp; unsigned long j, k; Reg = 0xffffffff; for (j = 0; j < BufferSize; j++) { Reg ^= pBuffer[j]; for (k = 0; k < 8; k++) { Tmp = Reg & 0x01; Reg >>= 1; if (Tmp) { Reg ^= 0xedb88320; } } } return ~Reg; } /* ComputeCrc32 */ /******************************************************************************/ /* Description: */ /* This routine sets the receive control register according to ReceiveMask */ /* */ /* Return: */ /* LM_STATUS_SUCCESS */ /******************************************************************************/ LM_STATUS LM_SetReceiveMask (PLM_DEVICE_BLOCK pDevice, LM_UINT32 Mask) { LM_UINT32 ReceiveMask; LM_UINT32 RxMode; LM_UINT32 j, k; ReceiveMask = Mask; RxMode = pDevice->RxMode; if (Mask & LM_ACCEPT_UNICAST) { Mask &= ~LM_ACCEPT_UNICAST; } if (Mask & LM_ACCEPT_MULTICAST) { Mask &= ~LM_ACCEPT_MULTICAST; } if (Mask & LM_ACCEPT_ALL_MULTICAST) { Mask &= ~LM_ACCEPT_ALL_MULTICAST; } if (Mask & LM_ACCEPT_BROADCAST) { Mask &= ~LM_ACCEPT_BROADCAST; } RxMode &= ~RX_MODE_PROMISCUOUS_MODE; if (Mask & LM_PROMISCUOUS_MODE) { RxMode |= RX_MODE_PROMISCUOUS_MODE; Mask &= ~LM_PROMISCUOUS_MODE; } RxMode &= ~(RX_MODE_ACCEPT_RUNTS | RX_MODE_ACCEPT_OVERSIZED); if (Mask & LM_ACCEPT_ERROR_PACKET) { RxMode |= RX_MODE_ACCEPT_RUNTS | RX_MODE_ACCEPT_OVERSIZED; Mask &= ~LM_ACCEPT_ERROR_PACKET; } /* Make sure all the bits are valid before committing changes. */ if (Mask) { return LM_STATUS_FAILURE; } /* Commit the new filter. */ pDevice->RxMode = RxMode; REG_WR (pDevice, MacCtrl.RxMode, RxMode); pDevice->ReceiveMask = ReceiveMask; /* Set up the MC hash table. */ if (ReceiveMask & LM_ACCEPT_ALL_MULTICAST) { for (k = 0; k < 4; k++) { REG_WR (pDevice, MacCtrl.HashReg[k], 0xffffffff); } } else if (ReceiveMask & LM_ACCEPT_MULTICAST) { LM_UINT32 HashReg[4]; HashReg[0] = 0; HashReg[1] = 0; HashReg[2] = 0; HashReg[3] = 0; for (j = 0; j < pDevice->McEntryCount; j++) { LM_UINT32 RegIndex; LM_UINT32 Bitpos; LM_UINT32 Crc32; Crc32 = ComputeCrc32 (pDevice->McTable[j], ETHERNET_ADDRESS_SIZE); /* The most significant 7 bits of the CRC32 (no inversion), */ /* are used to index into one of the possible 128 bit positions. */ Bitpos = ~Crc32 & 0x7f; /* Hash register index. */ RegIndex = (Bitpos & 0x60) >> 5; /* Bit to turn on within a hash register. */ Bitpos &= 0x1f; /* Enable the multicast bit. */ HashReg[RegIndex] |= (1 << Bitpos); } /* REV_AX has problem with multicast filtering where it uses both */ /* DA and SA to perform hashing. */ for (k = 0; k < 4; k++) { REG_WR (pDevice, MacCtrl.HashReg[k], HashReg[k]); } } else { /* Reject all multicast frames. */ for (j = 0; j < 4; j++) { REG_WR (pDevice, MacCtrl.HashReg[j], 0); } } /* By default, Tigon3 will accept broadcast frames. We need to setup */ if (ReceiveMask & LM_ACCEPT_BROADCAST) { REG_WR (pDevice, MacCtrl.RcvRules[RCV_RULE1_REJECT_BROADCAST_IDX].Rule, REJECT_BROADCAST_RULE1_RULE & RCV_DISABLE_RULE_MASK); REG_WR (pDevice, MacCtrl.RcvRules[RCV_RULE1_REJECT_BROADCAST_IDX].Value, REJECT_BROADCAST_RULE1_VALUE & RCV_DISABLE_RULE_MASK); REG_WR (pDevice, MacCtrl.RcvRules[RCV_RULE2_REJECT_BROADCAST_IDX].Rule, REJECT_BROADCAST_RULE1_RULE & RCV_DISABLE_RULE_MASK); REG_WR (pDevice, MacCtrl.RcvRules[RCV_RULE2_REJECT_BROADCAST_IDX].Value, REJECT_BROADCAST_RULE1_VALUE & RCV_DISABLE_RULE_MASK); } else { REG_WR (pDevice, MacCtrl.RcvRules[RCV_RULE1_REJECT_BROADCAST_IDX].Rule, REJECT_BROADCAST_RULE1_RULE); REG_WR (pDevice, MacCtrl.RcvRules[RCV_RULE1_REJECT_BROADCAST_IDX].Value, REJECT_BROADCAST_RULE1_VALUE); REG_WR (pDevice, MacCtrl.RcvRules[RCV_RULE2_REJECT_BROADCAST_IDX].Rule, REJECT_BROADCAST_RULE2_RULE); REG_WR (pDevice, MacCtrl.RcvRules[RCV_RULE2_REJECT_BROADCAST_IDX].Value, REJECT_BROADCAST_RULE2_VALUE); } /* disable the rest of the rules. */ for (j = RCV_LAST_RULE_IDX; j < 16; j++) { REG_WR (pDevice, MacCtrl.RcvRules[j].Rule, 0); REG_WR (pDevice, MacCtrl.RcvRules[j].Value, 0); } return LM_STATUS_SUCCESS; } /* LM_SetReceiveMask */ /******************************************************************************/ /* Description: */ /* Disable the interrupt and put the transmitter and receiver engines in */ /* an idle state. Also aborts all pending send requests and receive */ /* buffers. */ /* */ /* Return: */ /* LM_STATUS_SUCCESS */ /******************************************************************************/ LM_STATUS LM_Abort (PLM_DEVICE_BLOCK pDevice) { PLM_PACKET pPacket; LM_UINT Idx; LM_DisableInterrupt (pDevice); /* Disable all the state machines. */ LM_CntrlBlock (pDevice, T3_BLOCK_MAC_RX_ENGINE, LM_DISABLE); LM_CntrlBlock (pDevice, T3_BLOCK_RX_BD_INITIATOR, LM_DISABLE); LM_CntrlBlock (pDevice, T3_BLOCK_RX_LIST_PLMT, LM_DISABLE); LM_CntrlBlock (pDevice, T3_BLOCK_RX_LIST_SELECTOR, LM_DISABLE); LM_CntrlBlock (pDevice, T3_BLOCK_RX_DATA_INITIATOR, LM_DISABLE); LM_CntrlBlock (pDevice, T3_BLOCK_RX_DATA_COMP, LM_DISABLE); LM_CntrlBlock (pDevice, T3_BLOCK_RX_BD_COMP, LM_DISABLE); LM_CntrlBlock (pDevice, T3_BLOCK_SEND_BD_SELECTOR, LM_DISABLE); LM_CntrlBlock (pDevice, T3_BLOCK_SEND_BD_INITIATOR, LM_DISABLE); LM_CntrlBlock (pDevice, T3_BLOCK_SEND_DATA_INITIATOR, LM_DISABLE); LM_CntrlBlock (pDevice, T3_BLOCK_DMA_RD, LM_DISABLE); LM_CntrlBlock (pDevice, T3_BLOCK_SEND_DATA_COMP, LM_DISABLE); LM_CntrlBlock (pDevice, T3_BLOCK_DMA_COMP, LM_DISABLE); LM_CntrlBlock (pDevice, T3_BLOCK_SEND_BD_COMP, LM_DISABLE); /* Clear TDE bit */ pDevice->MacMode &= ~MAC_MODE_ENABLE_TDE; REG_WR (pDevice, MacCtrl.Mode, pDevice->MacMode); LM_CntrlBlock (pDevice, T3_BLOCK_MAC_TX_ENGINE, LM_DISABLE); LM_CntrlBlock (pDevice, T3_BLOCK_HOST_COALESING, LM_DISABLE); LM_CntrlBlock (pDevice, T3_BLOCK_DMA_WR, LM_DISABLE); LM_CntrlBlock (pDevice, T3_BLOCK_MBUF_CLUSTER_FREE, LM_DISABLE); /* Reset all FTQs */ REG_WR (pDevice, Ftq.Reset, 0xffffffff); REG_WR (pDevice, Ftq.Reset, 0x0); LM_CntrlBlock (pDevice, T3_BLOCK_MBUF_MANAGER, LM_DISABLE); LM_CntrlBlock (pDevice, T3_BLOCK_MEM_ARBITOR, LM_DISABLE); MM_ACQUIRE_INT_LOCK (pDevice); /* Abort packets that have already queued to go out. */ pPacket = (PLM_PACKET) QQ_PopHead (&pDevice->TxPacketActiveQ.Container); while (pPacket) { pPacket->PacketStatus = LM_STATUS_TRANSMIT_ABORTED; pDevice->TxCounters.TxPacketAbortedCnt++; atomic_add (pPacket->u.Tx.FragCount, &pDevice->SendBdLeft); QQ_PushTail (&pDevice->TxPacketXmittedQ.Container, pPacket); pPacket = (PLM_PACKET) QQ_PopHead (&pDevice->TxPacketActiveQ.Container); } /* Cleanup the receive return rings. */ LM_ServiceRxInterrupt (pDevice); /* Don't want to indicate rx packets in Ndis miniport shutdown context. */ /* Doing so may cause system crash. */ if (!pDevice->ShuttingDown) { /* Indicate packets to the protocol. */ MM_IndicateTxPackets (pDevice); /* Indicate received packets to the protocols. */ MM_IndicateRxPackets (pDevice); } else { /* Move the receive packet descriptors in the ReceivedQ to the */ /* free queue. */ for (;;) { pPacket = (PLM_PACKET) QQ_PopHead (&pDevice-> RxPacketReceivedQ. Container); if (pPacket == NULL) { break; } QQ_PushTail (&pDevice->RxPacketFreeQ.Container, pPacket); } } /* Clean up the Std Receive Producer ring. */ Idx = pDevice->pStatusBlkVirt->RcvStdConIdx; while (Idx != pDevice->RxStdProdIdx) { pPacket = (PLM_PACKET) (MM_UINT_PTR (pDevice->pPacketDescBase) + MM_UINT_PTR (pDevice->pRxStdBdVirt[Idx]. Opaque)); QQ_PushTail (&pDevice->RxPacketFreeQ.Container, pPacket); Idx = (Idx + 1) & T3_STD_RCV_RCB_ENTRY_COUNT_MASK; } /* while */ /* Reinitialize our copy of the indices. */ pDevice->RxStdProdIdx = 0; #if T3_JUMBO_RCV_RCB_ENTRY_COUNT /* Clean up the Jumbo Receive Producer ring. */ Idx = pDevice->pStatusBlkVirt->RcvJumboConIdx; while (Idx != pDevice->RxJumboProdIdx) { pPacket = (PLM_PACKET) (MM_UINT_PTR (pDevice->pPacketDescBase) + MM_UINT_PTR (pDevice-> pRxJumboBdVirt[Idx]. Opaque)); QQ_PushTail (&pDevice->RxPacketFreeQ.Container, pPacket); Idx = (Idx + 1) & T3_JUMBO_RCV_RCB_ENTRY_COUNT_MASK; } /* while */ /* Reinitialize our copy of the indices. */ pDevice->RxJumboProdIdx = 0; #endif /* T3_JUMBO_RCV_RCB_ENTRY_COUNT */ MM_RELEASE_INT_LOCK (pDevice); /* Initialize the statistis Block */ pDevice->pStatusBlkVirt->Status = 0; pDevice->pStatusBlkVirt->RcvStdConIdx = 0; pDevice->pStatusBlkVirt->RcvJumboConIdx = 0; pDevice->pStatusBlkVirt->RcvMiniConIdx = 0; return LM_STATUS_SUCCESS; } /* LM_Abort */ /******************************************************************************/ /* Description: */ /* Disable the interrupt and put the transmitter and receiver engines in */ /* an idle state. Aborts all pending send requests and receive buffers. */ /* Also free all the receive buffers. */ /* */ /* Return: */ /* LM_STATUS_SUCCESS */ /******************************************************************************/ LM_STATUS LM_Halt (PLM_DEVICE_BLOCK pDevice) { PLM_PACKET pPacket; LM_UINT32 EntryCnt; LM_Abort (pDevice); /* Get the number of entries in the queue. */ EntryCnt = QQ_GetEntryCnt (&pDevice->RxPacketFreeQ.Container); /* Make sure all the packets have been accounted for. */ for (EntryCnt = 0; EntryCnt < pDevice->RxPacketDescCnt; EntryCnt++) { pPacket = (PLM_PACKET) QQ_PopHead (&pDevice->RxPacketFreeQ.Container); if (pPacket == 0) break; MM_FreeRxBuffer (pDevice, pPacket); QQ_PushTail (&pDevice->RxPacketFreeQ.Container, pPacket); } LM_ResetChip (pDevice); /* Restore PCI configuration registers. */ MM_WriteConfig32 (pDevice, PCI_CACHE_LINE_SIZE_REG, pDevice->SavedCacheLineReg); LM_RegWrInd (pDevice, PCI_SUBSYSTEM_VENDOR_ID_REG, (pDevice->SubsystemId << 16) | pDevice->SubsystemVendorId); /* Reprogram the MAC address. */ LM_SetMacAddress (pDevice); return LM_STATUS_SUCCESS; } /* LM_Halt */ STATIC LM_STATUS LM_ResetChip (PLM_DEVICE_BLOCK pDevice) { LM_UINT32 Value32; LM_UINT32 j; /* Wait for access to the nvram interface before resetting. This is */ /* a workaround to prevent EEPROM corruption. */ if (T3_ASIC_REV (pDevice->ChipRevId) != T3_ASIC_REV_5700 && T3_ASIC_REV (pDevice->ChipRevId) != T3_ASIC_REV_5701) { /* Request access to the flash interface. */ REG_WR (pDevice, Nvram.SwArb, SW_ARB_REQ_SET1); for (j = 0; j < 100000; j++) { Value32 = REG_RD (pDevice, Nvram.SwArb); if (Value32 & SW_ARB_GNT1) { break; } MM_Wait (10); } } /* Global reset. */ REG_WR (pDevice, Grc.MiscCfg, GRC_MISC_CFG_CORE_CLOCK_RESET); MM_Wait (40); MM_Wait (40); MM_Wait (40); /* make sure we re-enable indirect accesses */ MM_WriteConfig32 (pDevice, T3_PCI_MISC_HOST_CTRL_REG, pDevice->MiscHostCtrl); /* Set MAX PCI retry to zero. */ Value32 = T3_PCI_STATE_PCI_ROM_ENABLE | T3_PCI_STATE_PCI_ROM_RETRY_ENABLE; if (pDevice->ChipRevId == T3_CHIP_ID_5704_A0) { if (!(pDevice->PciState & T3_PCI_STATE_CONVENTIONAL_PCI_MODE)) { Value32 |= T3_PCI_STATE_RETRY_SAME_DMA; } } MM_WriteConfig32 (pDevice, T3_PCI_STATE_REG, Value32); /* Restore PCI command register. */ MM_WriteConfig32 (pDevice, PCI_COMMAND_REG, pDevice->PciCommandStatusWords); /* Disable PCI-X relaxed ordering bit. */ MM_ReadConfig32 (pDevice, PCIX_CAP_REG, &Value32); Value32 &= ~PCIX_ENABLE_RELAXED_ORDERING; MM_WriteConfig32 (pDevice, PCIX_CAP_REG, Value32); /* Enable memory arbiter. */ REG_WR (pDevice, MemArbiter.Mode, T3_MEM_ARBITER_MODE_ENABLE); #ifdef BIG_ENDIAN_PCI /* This from jfd */ Value32 = GRC_MODE_WORD_SWAP_DATA | GRC_MODE_WORD_SWAP_NON_FRAME_DATA; #else #ifdef BIG_ENDIAN_HOST /* Reconfigure the mode register. */ Value32 = GRC_MODE_BYTE_SWAP_NON_FRAME_DATA | GRC_MODE_WORD_SWAP_NON_FRAME_DATA | GRC_MODE_BYTE_SWAP_DATA | GRC_MODE_WORD_SWAP_DATA; #else /* Reconfigure the mode register. */ Value32 = GRC_MODE_BYTE_SWAP_NON_FRAME_DATA | GRC_MODE_BYTE_SWAP_DATA; #endif #endif REG_WR (pDevice, Grc.Mode, Value32); /* Prevent PXE from restarting. */ MEM_WR_OFFSET (pDevice, 0x0b50, T3_MAGIC_NUM); if (pDevice->EnableTbi) { pDevice->MacMode = MAC_MODE_PORT_MODE_TBI; REG_WR (pDevice, MacCtrl.Mode, MAC_MODE_PORT_MODE_TBI); } else { REG_WR (pDevice, MacCtrl.Mode, 0); } /* Wait for the firmware to finish initialization. */ for (j = 0; j < 100000; j++) { MM_Wait (10); Value32 = MEM_RD_OFFSET (pDevice, 0x0b50); if (Value32 == ~T3_MAGIC_NUM) { break; } } return LM_STATUS_SUCCESS; } /******************************************************************************/ /* Description: */ /* */ /* Return: */ /******************************************************************************/ __inline static void LM_ServiceTxInterrupt (PLM_DEVICE_BLOCK pDevice) { PLM_PACKET pPacket; LM_UINT32 HwConIdx; LM_UINT32 SwConIdx; HwConIdx = pDevice->pStatusBlkVirt->Idx[0].SendConIdx; /* Get our copy of the consumer index. The buffer descriptors */ /* that are in between the consumer indices are freed. */ SwConIdx = pDevice->SendConIdx; /* Move the packets from the TxPacketActiveQ that are sent out to */ /* the TxPacketXmittedQ. Packets that are sent use the */ /* descriptors that are between SwConIdx and HwConIdx. */ while (SwConIdx != HwConIdx) { /* Get the packet that was sent from the TxPacketActiveQ. */ pPacket = (PLM_PACKET) QQ_PopHead (&pDevice->TxPacketActiveQ. Container); /* Set the return status. */ pPacket->PacketStatus = LM_STATUS_SUCCESS; /* Put the packet in the TxPacketXmittedQ for indication later. */ QQ_PushTail (&pDevice->TxPacketXmittedQ.Container, pPacket); /* Move to the next packet's BD. */ SwConIdx = (SwConIdx + pPacket->u.Tx.FragCount) & T3_SEND_RCB_ENTRY_COUNT_MASK; /* Update the number of unused BDs. */ atomic_add (pPacket->u.Tx.FragCount, &pDevice->SendBdLeft); /* Get the new updated HwConIdx. */ HwConIdx = pDevice->pStatusBlkVirt->Idx[0].SendConIdx; } /* while */ /* Save the new SwConIdx. */ pDevice->SendConIdx = SwConIdx; } /* LM_ServiceTxInterrupt */ /******************************************************************************/ /* Description: */ /* */ /* Return: */ /******************************************************************************/ __inline static void LM_ServiceRxInterrupt (PLM_DEVICE_BLOCK pDevice) { PLM_PACKET pPacket; PT3_RCV_BD pRcvBd; LM_UINT32 HwRcvRetProdIdx; LM_UINT32 SwRcvRetConIdx; /* Loop thru the receive return rings for received packets. */ HwRcvRetProdIdx = pDevice->pStatusBlkVirt->Idx[0].RcvProdIdx; SwRcvRetConIdx = pDevice->RcvRetConIdx; while (SwRcvRetConIdx != HwRcvRetProdIdx) { pRcvBd = &pDevice->pRcvRetBdVirt[SwRcvRetConIdx]; /* Get the received packet descriptor. */ pPacket = (PLM_PACKET) (MM_UINT_PTR (pDevice->pPacketDescBase) + MM_UINT_PTR (pRcvBd->Opaque)); /* Check the error flag. */ if (pRcvBd->ErrorFlag && pRcvBd->ErrorFlag != RCV_BD_ERR_ODD_NIBBLED_RCVD_MII) { pPacket->PacketStatus = LM_STATUS_FAILURE; pDevice->RxCounters.RxPacketErrCnt++; if (pRcvBd->ErrorFlag & RCV_BD_ERR_BAD_CRC) { pDevice->RxCounters.RxErrCrcCnt++; } if (pRcvBd->ErrorFlag & RCV_BD_ERR_COLL_DETECT) { pDevice->RxCounters.RxErrCollCnt++; } if (pRcvBd->ErrorFlag & RCV_BD_ERR_LINK_LOST_DURING_PKT) { pDevice->RxCounters.RxErrLinkLostCnt++; } if (pRcvBd->ErrorFlag & RCV_BD_ERR_PHY_DECODE_ERR) { pDevice->RxCounters.RxErrPhyDecodeCnt++; } if (pRcvBd->ErrorFlag & RCV_BD_ERR_ODD_NIBBLED_RCVD_MII) { pDevice->RxCounters.RxErrOddNibbleCnt++; } if (pRcvBd->ErrorFlag & RCV_BD_ERR_MAC_ABORT) { pDevice->RxCounters.RxErrMacAbortCnt++; } if (pRcvBd->ErrorFlag & RCV_BD_ERR_LEN_LT_64) { pDevice->RxCounters.RxErrShortPacketCnt++; } if (pRcvBd->ErrorFlag & RCV_BD_ERR_TRUNC_NO_RESOURCES) { pDevice->RxCounters.RxErrNoResourceCnt++; } if (pRcvBd->ErrorFlag & RCV_BD_ERR_GIANT_FRAME_RCVD) { pDevice->RxCounters.RxErrLargePacketCnt++; } } else { pPacket->PacketStatus = LM_STATUS_SUCCESS; pPacket->PacketSize = pRcvBd->Len - 4; pPacket->Flags = pRcvBd->Flags; if (pRcvBd->Flags & RCV_BD_FLAG_VLAN_TAG) { pPacket->VlanTag = pRcvBd->VlanTag; } pPacket->u.Rx.TcpUdpChecksum = pRcvBd->TcpUdpCksum; } /* Put the packet descriptor containing the received packet */ /* buffer in the RxPacketReceivedQ for indication later. */ QQ_PushTail (&pDevice->RxPacketReceivedQ.Container, pPacket); /* Go to the next buffer descriptor. */ SwRcvRetConIdx = (SwRcvRetConIdx + 1) & T3_RCV_RETURN_RCB_ENTRY_COUNT_MASK; /* Get the updated HwRcvRetProdIdx. */ HwRcvRetProdIdx = pDevice->pStatusBlkVirt->Idx[0].RcvProdIdx; } /* while */ pDevice->RcvRetConIdx = SwRcvRetConIdx; /* Update the receive return ring consumer index. */ MB_REG_WR (pDevice, Mailbox.RcvRetConIdx[0].Low, SwRcvRetConIdx); } /* LM_ServiceRxInterrupt */ /******************************************************************************/ /* Description: */ /* This is the interrupt event handler routine. It acknowledges all */ /* pending interrupts and process all pending events. */ /* */ /* Return: */ /* LM_STATUS_SUCCESS */ /******************************************************************************/ LM_STATUS LM_ServiceInterrupts (PLM_DEVICE_BLOCK pDevice) { LM_UINT32 Value32; int ServicePhyInt = FALSE; /* Setup the phy chip whenever the link status changes. */ if (pDevice->LinkChngMode == T3_LINK_CHNG_MODE_USE_STATUS_REG) { Value32 = REG_RD (pDevice, MacCtrl.Status); if (pDevice->PhyIntMode == T3_PHY_INT_MODE_MI_INTERRUPT) { if (Value32 & MAC_STATUS_MI_INTERRUPT) { ServicePhyInt = TRUE; } } else if (Value32 & MAC_STATUS_LINK_STATE_CHANGED) { ServicePhyInt = TRUE; } } else { if (pDevice->pStatusBlkVirt-> Status & STATUS_BLOCK_LINK_CHANGED_STATUS) { pDevice->pStatusBlkVirt->Status = STATUS_BLOCK_UPDATED | (pDevice->pStatusBlkVirt-> Status & ~STATUS_BLOCK_LINK_CHANGED_STATUS); ServicePhyInt = TRUE; } } #if INCLUDE_TBI_SUPPORT if (pDevice->IgnoreTbiLinkChange == TRUE) { ServicePhyInt = FALSE; } #endif if (ServicePhyInt == TRUE) { LM_SetupPhy (pDevice); } /* Service receive and transmit interrupts. */ LM_ServiceRxInterrupt (pDevice); LM_ServiceTxInterrupt (pDevice); /* No spinlock for this queue since this routine is serialized. */ if (!QQ_Empty (&pDevice->RxPacketReceivedQ.Container)) { /* Indicate receive packets. */ MM_IndicateRxPackets (pDevice); /* LM_QueueRxPackets(pDevice); */ } /* No spinlock for this queue since this routine is serialized. */ if (!QQ_Empty (&pDevice->TxPacketXmittedQ.Container)) { MM_IndicateTxPackets (pDevice); } return LM_STATUS_SUCCESS; } /* LM_ServiceInterrupts */ /******************************************************************************/ /* Description: */ /* */ /* Return: */ /******************************************************************************/ LM_STATUS LM_MulticastAdd (PLM_DEVICE_BLOCK pDevice, PLM_UINT8 pMcAddress) { PLM_UINT8 pEntry; LM_UINT32 j; pEntry = pDevice->McTable[0]; for (j = 0; j < pDevice->McEntryCount; j++) { if (IS_ETH_ADDRESS_EQUAL (pEntry, pMcAddress)) { /* Found a match, increment the instance count. */ pEntry[LM_MC_INSTANCE_COUNT_INDEX] += 1; return LM_STATUS_SUCCESS; } pEntry += LM_MC_ENTRY_SIZE; } if (pDevice->McEntryCount >= LM_MAX_MC_TABLE_SIZE) { return LM_STATUS_FAILURE; } pEntry = pDevice->McTable[pDevice->McEntryCount]; COPY_ETH_ADDRESS (pMcAddress, pEntry); pEntry[LM_MC_INSTANCE_COUNT_INDEX] = 1; pDevice->McEntryCount++; LM_SetReceiveMask (pDevice, pDevice->ReceiveMask | LM_ACCEPT_MULTICAST); return LM_STATUS_SUCCESS; } /* LM_MulticastAdd */ /******************************************************************************/ /* Description: */ /* */ /* Return: */ /******************************************************************************/ LM_STATUS LM_MulticastDel (PLM_DEVICE_BLOCK pDevice, PLM_UINT8 pMcAddress) { PLM_UINT8 pEntry; LM_UINT32 j; pEntry = pDevice->McTable[0]; for (j = 0; j < pDevice->McEntryCount; j++) { if (IS_ETH_ADDRESS_EQUAL (pEntry, pMcAddress)) { /* Found a match, decrement the instance count. */ pEntry[LM_MC_INSTANCE_COUNT_INDEX] -= 1; /* No more instance left, remove the address from the table. */ /* Move the last entry in the table to the delete slot. */ if (pEntry[LM_MC_INSTANCE_COUNT_INDEX] == 0 && pDevice->McEntryCount > 1) { COPY_ETH_ADDRESS (pDevice-> McTable[pDevice-> McEntryCount - 1], pEntry); pEntry[LM_MC_INSTANCE_COUNT_INDEX] = pDevice->McTable[pDevice->McEntryCount - 1] [LM_MC_INSTANCE_COUNT_INDEX]; } pDevice->McEntryCount--; /* Update the receive mask if the table is empty. */ if (pDevice->McEntryCount == 0) { LM_SetReceiveMask (pDevice, pDevice-> ReceiveMask & ~LM_ACCEPT_MULTICAST); } return LM_STATUS_SUCCESS; } pEntry += LM_MC_ENTRY_SIZE; } return LM_STATUS_FAILURE; } /* LM_MulticastDel */ /******************************************************************************/ /* Description: */ /* */ /* Return: */ /******************************************************************************/ LM_STATUS LM_MulticastClear (PLM_DEVICE_BLOCK pDevice) { pDevice->McEntryCount = 0; LM_SetReceiveMask (pDevice, pDevice->ReceiveMask & ~LM_ACCEPT_MULTICAST); return LM_STATUS_SUCCESS; } /* LM_MulticastClear */ /******************************************************************************/ /* Description: */ /* */ /* Return: */ /******************************************************************************/ LM_STATUS LM_SetMacAddress (PLM_DEVICE_BLOCK pDevice) { LM_UINT32 j; PLM_UINT8 pMacAddress = pDevice->NodeAddress; for (j = 0; j < 4; j++) { REG_WR (pDevice, MacCtrl.MacAddr[j].High, (pMacAddress[0] << 8) | pMacAddress[1]); REG_WR (pDevice, MacCtrl.MacAddr[j].Low, (pMacAddress[2] << 24) | (pMacAddress[3] << 16) | (pMacAddress[4] << 8) | pMacAddress[5]); } return LM_STATUS_SUCCESS; } /******************************************************************************/ /* Description: */ /* Sets up the default line speed, and duplex modes based on the requested */ /* media type. */ /* */ /* Return: */ /* None. */ /******************************************************************************/ static LM_STATUS LM_TranslateRequestedMediaType (LM_REQUESTED_MEDIA_TYPE RequestedMediaType, PLM_MEDIA_TYPE pMediaType, PLM_LINE_SPEED pLineSpeed, PLM_DUPLEX_MODE pDuplexMode) { *pMediaType = LM_MEDIA_TYPE_AUTO; *pLineSpeed = LM_LINE_SPEED_UNKNOWN; *pDuplexMode = LM_DUPLEX_MODE_UNKNOWN; /* determine media type */ switch (RequestedMediaType) { case LM_REQUESTED_MEDIA_TYPE_BNC: *pMediaType = LM_MEDIA_TYPE_BNC; *pLineSpeed = LM_LINE_SPEED_10MBPS; *pDuplexMode = LM_DUPLEX_MODE_HALF; break; case LM_REQUESTED_MEDIA_TYPE_UTP_AUTO: *pMediaType = LM_MEDIA_TYPE_UTP; break; case LM_REQUESTED_MEDIA_TYPE_UTP_10MBPS: *pMediaType = LM_MEDIA_TYPE_UTP; *pLineSpeed = LM_LINE_SPEED_10MBPS; *pDuplexMode = LM_DUPLEX_MODE_HALF; break; case LM_REQUESTED_MEDIA_TYPE_UTP_10MBPS_FULL_DUPLEX: *pMediaType = LM_MEDIA_TYPE_UTP; *pLineSpeed = LM_LINE_SPEED_10MBPS; *pDuplexMode = LM_DUPLEX_MODE_FULL; break; case LM_REQUESTED_MEDIA_TYPE_UTP_100MBPS: *pMediaType = LM_MEDIA_TYPE_UTP; *pLineSpeed = LM_LINE_SPEED_100MBPS; *pDuplexMode = LM_DUPLEX_MODE_HALF; break; case LM_REQUESTED_MEDIA_TYPE_UTP_100MBPS_FULL_DUPLEX: *pMediaType = LM_MEDIA_TYPE_UTP; *pLineSpeed = LM_LINE_SPEED_100MBPS; *pDuplexMode = LM_DUPLEX_MODE_FULL; break; case LM_REQUESTED_MEDIA_TYPE_UTP_1000MBPS: *pMediaType = LM_MEDIA_TYPE_UTP; *pLineSpeed = LM_LINE_SPEED_1000MBPS; *pDuplexMode = LM_DUPLEX_MODE_HALF; break; case LM_REQUESTED_MEDIA_TYPE_UTP_1000MBPS_FULL_DUPLEX: *pMediaType = LM_MEDIA_TYPE_UTP; *pLineSpeed = LM_LINE_SPEED_1000MBPS; *pDuplexMode = LM_DUPLEX_MODE_FULL; break; case LM_REQUESTED_MEDIA_TYPE_FIBER_100MBPS: *pMediaType = LM_MEDIA_TYPE_FIBER; *pLineSpeed = LM_LINE_SPEED_100MBPS; *pDuplexMode = LM_DUPLEX_MODE_HALF; break; case LM_REQUESTED_MEDIA_TYPE_FIBER_100MBPS_FULL_DUPLEX: *pMediaType = LM_MEDIA_TYPE_FIBER; *pLineSpeed = LM_LINE_SPEED_100MBPS; *pDuplexMode = LM_DUPLEX_MODE_FULL; break; case LM_REQUESTED_MEDIA_TYPE_FIBER_1000MBPS: *pMediaType = LM_MEDIA_TYPE_FIBER; *pLineSpeed = LM_LINE_SPEED_1000MBPS; *pDuplexMode = LM_DUPLEX_MODE_HALF; break; case LM_REQUESTED_MEDIA_TYPE_FIBER_1000MBPS_FULL_DUPLEX: *pMediaType = LM_MEDIA_TYPE_FIBER; *pLineSpeed = LM_LINE_SPEED_1000MBPS; *pDuplexMode = LM_DUPLEX_MODE_FULL; break; default: break; } /* switch */ return LM_STATUS_SUCCESS; } /* LM_TranslateRequestedMediaType */ /******************************************************************************/ /* Description: */ /* */ /* Return: */ /* LM_STATUS_LINK_ACTIVE */ /* LM_STATUS_LINK_DOWN */ /******************************************************************************/ static LM_STATUS LM_InitBcm540xPhy (PLM_DEVICE_BLOCK pDevice) { LM_LINE_SPEED CurrentLineSpeed; LM_DUPLEX_MODE CurrentDuplexMode; LM_STATUS CurrentLinkStatus; LM_UINT32 Value32; LM_UINT32 j; #if 1 /* jmb: bugfix -- moved here, out of code that sets initial pwr state */ LM_WritePhy (pDevice, BCM5401_AUX_CTRL, 0x2); #endif if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5401_PHY_ID) { LM_ReadPhy (pDevice, PHY_STATUS_REG, &Value32); LM_ReadPhy (pDevice, PHY_STATUS_REG, &Value32); if (!pDevice->InitDone) { Value32 = 0; } if (!(Value32 & PHY_STATUS_LINK_PASS)) { LM_WritePhy (pDevice, BCM5401_AUX_CTRL, 0x0c20); LM_WritePhy (pDevice, BCM540X_DSP_ADDRESS_REG, 0x0012); LM_WritePhy (pDevice, BCM540X_DSP_RW_PORT, 0x1804); LM_WritePhy (pDevice, BCM540X_DSP_ADDRESS_REG, 0x0013); LM_WritePhy (pDevice, BCM540X_DSP_RW_PORT, 0x1204); LM_WritePhy (pDevice, BCM540X_DSP_ADDRESS_REG, 0x8006); LM_WritePhy (pDevice, BCM540X_DSP_RW_PORT, 0x0132); LM_WritePhy (pDevice, BCM540X_DSP_ADDRESS_REG, 0x8006); LM_WritePhy (pDevice, BCM540X_DSP_RW_PORT, 0x0232); LM_WritePhy (pDevice, BCM540X_DSP_ADDRESS_REG, 0x201f); LM_WritePhy (pDevice, BCM540X_DSP_RW_PORT, 0x0a20); LM_ReadPhy (pDevice, PHY_STATUS_REG, &Value32); for (j = 0; j < 1000; j++) { MM_Wait (10); LM_ReadPhy (pDevice, PHY_STATUS_REG, &Value32); if (Value32 & PHY_STATUS_LINK_PASS) { MM_Wait (40); break; } } if ((pDevice->PhyId & PHY_ID_REV_MASK) == PHY_BCM5401_B0_REV) { if (!(Value32 & PHY_STATUS_LINK_PASS) && (pDevice->OldLineSpeed == LM_LINE_SPEED_1000MBPS)) { LM_WritePhy (pDevice, PHY_CTRL_REG, PHY_CTRL_PHY_RESET); for (j = 0; j < 100; j++) { MM_Wait (10); LM_ReadPhy (pDevice, PHY_CTRL_REG, &Value32); if (! (Value32 & PHY_CTRL_PHY_RESET)) { MM_Wait (40); break; } } LM_WritePhy (pDevice, BCM5401_AUX_CTRL, 0x0c20); LM_WritePhy (pDevice, BCM540X_DSP_ADDRESS_REG, 0x0012); LM_WritePhy (pDevice, BCM540X_DSP_RW_PORT, 0x1804); LM_WritePhy (pDevice, BCM540X_DSP_ADDRESS_REG, 0x0013); LM_WritePhy (pDevice, BCM540X_DSP_RW_PORT, 0x1204); LM_WritePhy (pDevice, BCM540X_DSP_ADDRESS_REG, 0x8006); LM_WritePhy (pDevice, BCM540X_DSP_RW_PORT, 0x0132); LM_WritePhy (pDevice, BCM540X_DSP_ADDRESS_REG, 0x8006); LM_WritePhy (pDevice, BCM540X_DSP_RW_PORT, 0x0232); LM_WritePhy (pDevice, BCM540X_DSP_ADDRESS_REG, 0x201f); LM_WritePhy (pDevice, BCM540X_DSP_RW_PORT, 0x0a20); } } } } else if (pDevice->ChipRevId == T3_CHIP_ID_5701_A0 || pDevice->ChipRevId == T3_CHIP_ID_5701_B0) { /* Bug: 5701 A0, B0 TX CRC workaround. */ LM_WritePhy (pDevice, 0x15, 0x0a75); LM_WritePhy (pDevice, 0x1c, 0x8c68); LM_WritePhy (pDevice, 0x1c, 0x8d68); LM_WritePhy (pDevice, 0x1c, 0x8c68); } /* Acknowledge interrupts. */ LM_ReadPhy (pDevice, BCM540X_INT_STATUS_REG, &Value32); LM_ReadPhy (pDevice, BCM540X_INT_STATUS_REG, &Value32); /* Configure the interrupt mask. */ if (pDevice->PhyIntMode == T3_PHY_INT_MODE_MI_INTERRUPT) { LM_WritePhy (pDevice, BCM540X_INT_MASK_REG, ~BCM540X_INT_LINK_CHANGE); } /* Configure PHY led mode. */ if (T3_ASIC_REV (pDevice->ChipRevId) == T3_ASIC_REV_5701 || (T3_ASIC_REV (pDevice->ChipRevId) == T3_ASIC_REV_5700)) { if (pDevice->LedMode == LED_MODE_THREE_LINK) { LM_WritePhy (pDevice, BCM540X_EXT_CTRL_REG, BCM540X_EXT_CTRL_LINK3_LED_MODE); } else { LM_WritePhy (pDevice, BCM540X_EXT_CTRL_REG, 0); } } CurrentLinkStatus = LM_STATUS_LINK_DOWN; /* Get current link and duplex mode. */ for (j = 0; j < 100; j++) { LM_ReadPhy (pDevice, PHY_STATUS_REG, &Value32); LM_ReadPhy (pDevice, PHY_STATUS_REG, &Value32); if (Value32 & PHY_STATUS_LINK_PASS) { break; } MM_Wait (40); } if (Value32 & PHY_STATUS_LINK_PASS) { /* Determine the current line and duplex settings. */ LM_ReadPhy (pDevice, BCM540X_AUX_STATUS_REG, &Value32); for (j = 0; j < 2000; j++) { MM_Wait (10); LM_ReadPhy (pDevice, BCM540X_AUX_STATUS_REG, &Value32); if (Value32) { break; } } switch (Value32 & BCM540X_AUX_SPEED_MASK) { case BCM540X_AUX_10BASET_HD: CurrentLineSpeed = LM_LINE_SPEED_10MBPS; CurrentDuplexMode = LM_DUPLEX_MODE_HALF; break; case BCM540X_AUX_10BASET_FD: CurrentLineSpeed = LM_LINE_SPEED_10MBPS; CurrentDuplexMode = LM_DUPLEX_MODE_FULL; break; case BCM540X_AUX_100BASETX_HD: CurrentLineSpeed = LM_LINE_SPEED_100MBPS; CurrentDuplexMode = LM_DUPLEX_MODE_HALF; break; case BCM540X_AUX_100BASETX_FD: CurrentLineSpeed = LM_LINE_SPEED_100MBPS; CurrentDuplexMode = LM_DUPLEX_MODE_FULL; break; case BCM540X_AUX_100BASET_HD: CurrentLineSpeed = LM_LINE_SPEED_1000MBPS; CurrentDuplexMode = LM_DUPLEX_MODE_HALF; break; case BCM540X_AUX_100BASET_FD: CurrentLineSpeed = LM_LINE_SPEED_1000MBPS; CurrentDuplexMode = LM_DUPLEX_MODE_FULL; break; default: CurrentLineSpeed = LM_LINE_SPEED_UNKNOWN; CurrentDuplexMode = LM_DUPLEX_MODE_UNKNOWN; break; } /* Make sure we are in auto-neg mode. */ for (j = 0; j < 200; j++) { LM_ReadPhy (pDevice, PHY_CTRL_REG, &Value32); if (Value32 && Value32 != 0x7fff) { break; } if (Value32 == 0 && pDevice->RequestedMediaType == LM_REQUESTED_MEDIA_TYPE_UTP_10MBPS) { break; } MM_Wait (10); } /* Use the current line settings for "auto" mode. */ if (pDevice->RequestedMediaType == LM_REQUESTED_MEDIA_TYPE_AUTO || pDevice->RequestedMediaType == LM_REQUESTED_MEDIA_TYPE_UTP_AUTO) { if (Value32 & PHY_CTRL_AUTO_NEG_ENABLE) { CurrentLinkStatus = LM_STATUS_LINK_ACTIVE; /* We may be exiting low power mode and the link is in */ /* 10mb. In this case, we need to restart autoneg. */ LM_ReadPhy (pDevice, BCM540X_1000BASET_CTRL_REG, &Value32); pDevice->advertising1000 = Value32; /* 5702FE supports 10/100Mb only. */ if (T3_ASIC_REV (pDevice->ChipRevId) != T3_ASIC_REV_5703 || pDevice->BondId != GRC_MISC_BD_ID_5702FE) { if (! (Value32 & (BCM540X_AN_AD_1000BASET_HALF | BCM540X_AN_AD_1000BASET_FULL))) { CurrentLinkStatus = LM_STATUS_LINK_SETTING_MISMATCH; } } } else { CurrentLinkStatus = LM_STATUS_LINK_SETTING_MISMATCH; } } else { /* Force line settings. */ /* Use the current setting if it matches the user's requested */ /* setting. */ LM_ReadPhy (pDevice, PHY_CTRL_REG, &Value32); if ((pDevice->LineSpeed == CurrentLineSpeed) && (pDevice->DuplexMode == CurrentDuplexMode)) { if ((pDevice->DisableAutoNeg && !(Value32 & PHY_CTRL_AUTO_NEG_ENABLE)) || (!pDevice->DisableAutoNeg && (Value32 & PHY_CTRL_AUTO_NEG_ENABLE))) { CurrentLinkStatus = LM_STATUS_LINK_ACTIVE; } else { CurrentLinkStatus = LM_STATUS_LINK_SETTING_MISMATCH; } } else { CurrentLinkStatus = LM_STATUS_LINK_SETTING_MISMATCH; } } /* Save line settings. */ pDevice->LineSpeed = CurrentLineSpeed; pDevice->DuplexMode = CurrentDuplexMode; pDevice->MediaType = LM_MEDIA_TYPE_UTP; } return CurrentLinkStatus; } /* LM_InitBcm540xPhy */ /******************************************************************************/ /* Description: */ /* */ /* Return: */ /******************************************************************************/ LM_STATUS LM_SetFlowControl (PLM_DEVICE_BLOCK pDevice, LM_UINT32 LocalPhyAd, LM_UINT32 RemotePhyAd) { LM_FLOW_CONTROL FlowCap; /* Resolve flow control. */ FlowCap = LM_FLOW_CONTROL_NONE; /* See Table 28B-3 of 802.3ab-1999 spec. */ if (pDevice->FlowControlCap & LM_FLOW_CONTROL_AUTO_PAUSE) { if (LocalPhyAd & PHY_AN_AD_PAUSE_CAPABLE) { if (LocalPhyAd & PHY_AN_AD_ASYM_PAUSE) { if (RemotePhyAd & PHY_LINK_PARTNER_PAUSE_CAPABLE) { FlowCap = LM_FLOW_CONTROL_TRANSMIT_PAUSE | LM_FLOW_CONTROL_RECEIVE_PAUSE; } else if (RemotePhyAd & PHY_LINK_PARTNER_ASYM_PAUSE) { FlowCap = LM_FLOW_CONTROL_RECEIVE_PAUSE; } } else { if (RemotePhyAd & PHY_LINK_PARTNER_PAUSE_CAPABLE) { FlowCap = LM_FLOW_CONTROL_TRANSMIT_PAUSE | LM_FLOW_CONTROL_RECEIVE_PAUSE; } } } else if (LocalPhyAd & PHY_AN_AD_ASYM_PAUSE) { if ((RemotePhyAd & PHY_LINK_PARTNER_PAUSE_CAPABLE) && (RemotePhyAd & PHY_LINK_PARTNER_ASYM_PAUSE)) { FlowCap = LM_FLOW_CONTROL_TRANSMIT_PAUSE; } } } else { FlowCap = pDevice->FlowControlCap; } /* Enable/disable rx PAUSE. */ pDevice->RxMode &= ~RX_MODE_ENABLE_FLOW_CONTROL; if (FlowCap & LM_FLOW_CONTROL_RECEIVE_PAUSE && (pDevice->FlowControlCap == LM_FLOW_CONTROL_AUTO_PAUSE || pDevice->FlowControlCap & LM_FLOW_CONTROL_RECEIVE_PAUSE)) { pDevice->FlowControl |= LM_FLOW_CONTROL_RECEIVE_PAUSE; pDevice->RxMode |= RX_MODE_ENABLE_FLOW_CONTROL; } REG_WR (pDevice, MacCtrl.RxMode, pDevice->RxMode); /* Enable/disable tx PAUSE. */ pDevice->TxMode &= ~TX_MODE_ENABLE_FLOW_CONTROL; if (FlowCap & LM_FLOW_CONTROL_TRANSMIT_PAUSE && (pDevice->FlowControlCap == LM_FLOW_CONTROL_AUTO_PAUSE || pDevice->FlowControlCap & LM_FLOW_CONTROL_TRANSMIT_PAUSE)) { pDevice->FlowControl |= LM_FLOW_CONTROL_TRANSMIT_PAUSE; pDevice->TxMode |= TX_MODE_ENABLE_FLOW_CONTROL; } REG_WR (pDevice, MacCtrl.TxMode, pDevice->TxMode); return LM_STATUS_SUCCESS; } #if INCLUDE_TBI_SUPPORT /******************************************************************************/ /* Description: */ /* */ /* Return: */ /******************************************************************************/ STATIC LM_STATUS LM_InitBcm800xPhy (PLM_DEVICE_BLOCK pDevice) { LM_UINT32 Value32; LM_UINT32 j; Value32 = REG_RD (pDevice, MacCtrl.Status); /* Reset the SERDES during init and when we have link. */ if (!pDevice->InitDone || Value32 & MAC_STATUS_PCS_SYNCED) { /* Set PLL lock range. */ LM_WritePhy (pDevice, 0x16, 0x8007); /* Software reset. */ LM_WritePhy (pDevice, 0x00, 0x8000); /* Wait for reset to complete. */ for (j = 0; j < 500; j++) { MM_Wait (10); } /* Config mode; seletct PMA/Ch 1 regs. */ LM_WritePhy (pDevice, 0x10, 0x8411); /* Enable auto-lock and comdet, select txclk for tx. */ LM_WritePhy (pDevice, 0x11, 0x0a10); LM_WritePhy (pDevice, 0x18, 0x00a0); LM_WritePhy (pDevice, 0x16, 0x41ff); /* Assert and deassert POR. */ LM_WritePhy (pDevice, 0x13, 0x0400); MM_Wait (40); LM_WritePhy (pDevice, 0x13, 0x0000); LM_WritePhy (pDevice, 0x11, 0x0a50); MM_Wait (40); LM_WritePhy (pDevice, 0x11, 0x0a10); /* Delay for signal to stabilize. */ for (j = 0; j < 15000; j++) { MM_Wait (10); } /* Deselect the channel register so we can read the PHY id later. */ LM_WritePhy (pDevice, 0x10, 0x8011); } return LM_STATUS_SUCCESS; } /******************************************************************************/ /* Description: */ /* */ /* Return: */ /******************************************************************************/ STATIC LM_STATUS LM_SetupFiberPhy (PLM_DEVICE_BLOCK pDevice) { LM_STATUS CurrentLinkStatus; AUTONEG_STATUS AnStatus = 0; LM_UINT32 Value32; LM_UINT32 Cnt; LM_UINT32 j, k; pDevice->MacMode &= ~(MAC_MODE_HALF_DUPLEX | MAC_MODE_PORT_MODE_MASK); /* Initialize the send_config register. */ REG_WR (pDevice, MacCtrl.TxAutoNeg, 0); /* Enable TBI and full duplex mode. */ pDevice->MacMode |= MAC_MODE_PORT_MODE_TBI; REG_WR (pDevice, MacCtrl.Mode, pDevice->MacMode); /* Initialize the BCM8002 SERDES PHY. */ switch (pDevice->PhyId & PHY_ID_MASK) { case PHY_BCM8002_PHY_ID: LM_InitBcm800xPhy (pDevice); break; default: break; } /* Enable link change interrupt. */ REG_WR (pDevice, MacCtrl.MacEvent, MAC_EVENT_ENABLE_LINK_STATE_CHANGED_ATTN); /* Default to link down. */ CurrentLinkStatus = LM_STATUS_LINK_DOWN; /* Get the link status. */ Value32 = REG_RD (pDevice, MacCtrl.Status); if (Value32 & MAC_STATUS_PCS_SYNCED) { if ((pDevice->RequestedMediaType == LM_REQUESTED_MEDIA_TYPE_AUTO) || (pDevice->DisableAutoNeg == FALSE)) { /* auto-negotiation mode. */ /* Initialize the autoneg default capaiblities. */ AutonegInit (&pDevice->AnInfo); /* Set the context pointer to point to the main device structure. */ pDevice->AnInfo.pContext = pDevice; /* Setup flow control advertisement register. */ Value32 = GetPhyAdFlowCntrlSettings (pDevice); if (Value32 & PHY_AN_AD_PAUSE_CAPABLE) { pDevice->AnInfo.mr_adv_sym_pause = 1; } else { pDevice->AnInfo.mr_adv_sym_pause = 0; } if (Value32 & PHY_AN_AD_ASYM_PAUSE) { pDevice->AnInfo.mr_adv_asym_pause = 1; } else { pDevice->AnInfo.mr_adv_asym_pause = 0; } /* Try to autoneg up to six times. */ if (pDevice->IgnoreTbiLinkChange) { Cnt = 1; } else { Cnt = 6; } for (j = 0; j < Cnt; j++) { REG_WR (pDevice, MacCtrl.TxAutoNeg, 0); Value32 = pDevice->MacMode & ~MAC_MODE_PORT_MODE_MASK; REG_WR (pDevice, MacCtrl.Mode, Value32); MM_Wait (20); REG_WR (pDevice, MacCtrl.Mode, pDevice-> MacMode | MAC_MODE_SEND_CONFIGS); MM_Wait (20); pDevice->AnInfo.State = AN_STATE_UNKNOWN; pDevice->AnInfo.CurrentTime_us = 0; REG_WR (pDevice, Grc.Timer, 0); for (k = 0; (pDevice->AnInfo.CurrentTime_us < 75000) && (k < 75000); k++) { AnStatus = Autoneg8023z (&pDevice->AnInfo); if ((AnStatus == AUTONEG_STATUS_DONE) || (AnStatus == AUTONEG_STATUS_FAILED)) { break; } pDevice->AnInfo.CurrentTime_us = REG_RD (pDevice, Grc.Timer); } if ((AnStatus == AUTONEG_STATUS_DONE) || (AnStatus == AUTONEG_STATUS_FAILED)) { break; } if (j >= 1) { if (!(REG_RD (pDevice, MacCtrl.Status) & MAC_STATUS_PCS_SYNCED)) { break; } } } /* Stop sending configs. */ MM_AnTxIdle (&pDevice->AnInfo); /* Resolve flow control settings. */ if ((AnStatus == AUTONEG_STATUS_DONE) && pDevice->AnInfo.mr_an_complete && pDevice->AnInfo.mr_link_ok && pDevice->AnInfo.mr_lp_adv_full_duplex) { LM_UINT32 RemotePhyAd; LM_UINT32 LocalPhyAd; LocalPhyAd = 0; if (pDevice->AnInfo.mr_adv_sym_pause) { LocalPhyAd |= PHY_AN_AD_PAUSE_CAPABLE; } if (pDevice->AnInfo.mr_adv_asym_pause) { LocalPhyAd |= PHY_AN_AD_ASYM_PAUSE; } RemotePhyAd = 0; if (pDevice->AnInfo.mr_lp_adv_sym_pause) { RemotePhyAd |= PHY_LINK_PARTNER_PAUSE_CAPABLE; } if (pDevice->AnInfo.mr_lp_adv_asym_pause) { RemotePhyAd |= PHY_LINK_PARTNER_ASYM_PAUSE; } LM_SetFlowControl (pDevice, LocalPhyAd, RemotePhyAd); CurrentLinkStatus = LM_STATUS_LINK_ACTIVE; } for (j = 0; j < 30; j++) { MM_Wait (20); REG_WR (pDevice, MacCtrl.Status, MAC_STATUS_SYNC_CHANGED | MAC_STATUS_CFG_CHANGED); MM_Wait (20); if ((REG_RD (pDevice, MacCtrl.Status) & (MAC_STATUS_SYNC_CHANGED | MAC_STATUS_CFG_CHANGED)) == 0) break; } if (pDevice->PollTbiLink) { Value32 = REG_RD (pDevice, MacCtrl.Status); if (Value32 & MAC_STATUS_RECEIVING_CFG) { pDevice->IgnoreTbiLinkChange = TRUE; } else { pDevice->IgnoreTbiLinkChange = FALSE; } } Value32 = REG_RD (pDevice, MacCtrl.Status); if (CurrentLinkStatus == LM_STATUS_LINK_DOWN && (Value32 & MAC_STATUS_PCS_SYNCED) && ((Value32 & MAC_STATUS_RECEIVING_CFG) == 0)) { CurrentLinkStatus = LM_STATUS_LINK_ACTIVE; } } else { /* We are forcing line speed. */ pDevice->FlowControlCap &= ~LM_FLOW_CONTROL_AUTO_PAUSE; LM_SetFlowControl (pDevice, 0, 0); CurrentLinkStatus = LM_STATUS_LINK_ACTIVE; REG_WR (pDevice, MacCtrl.Mode, pDevice->MacMode | MAC_MODE_SEND_CONFIGS); } } /* Set the link polarity bit. */ pDevice->MacMode &= ~MAC_MODE_LINK_POLARITY; REG_WR (pDevice, MacCtrl.Mode, pDevice->MacMode); pDevice->pStatusBlkVirt->Status = STATUS_BLOCK_UPDATED | (pDevice->pStatusBlkVirt-> Status & ~STATUS_BLOCK_LINK_CHANGED_STATUS); for (j = 0; j < 100; j++) { REG_WR (pDevice, MacCtrl.Status, MAC_STATUS_SYNC_CHANGED | MAC_STATUS_CFG_CHANGED); MM_Wait (5); if ((REG_RD (pDevice, MacCtrl.Status) & (MAC_STATUS_SYNC_CHANGED | MAC_STATUS_CFG_CHANGED)) == 0) break; } Value32 = REG_RD (pDevice, MacCtrl.Status); if ((Value32 & MAC_STATUS_PCS_SYNCED) == 0) { CurrentLinkStatus = LM_STATUS_LINK_DOWN; if (pDevice->DisableAutoNeg == FALSE) { REG_WR (pDevice, MacCtrl.Mode, pDevice->MacMode | MAC_MODE_SEND_CONFIGS); MM_Wait (1); REG_WR (pDevice, MacCtrl.Mode, pDevice->MacMode); } } /* Initialize the current link status. */ if (CurrentLinkStatus == LM_STATUS_LINK_ACTIVE) { pDevice->LineSpeed = LM_LINE_SPEED_1000MBPS; pDevice->DuplexMode = LM_DUPLEX_MODE_FULL; REG_WR (pDevice, MacCtrl.LedCtrl, LED_CTRL_OVERRIDE_LINK_LED | LED_CTRL_1000MBPS_LED_ON); } else { pDevice->LineSpeed = LM_LINE_SPEED_UNKNOWN; pDevice->DuplexMode = LM_DUPLEX_MODE_UNKNOWN; REG_WR (pDevice, MacCtrl.LedCtrl, LED_CTRL_OVERRIDE_LINK_LED | LED_CTRL_OVERRIDE_TRAFFIC_LED); } /* Indicate link status. */ if (pDevice->LinkStatus != CurrentLinkStatus) { pDevice->LinkStatus = CurrentLinkStatus; MM_IndicateStatus (pDevice, CurrentLinkStatus); } return LM_STATUS_SUCCESS; } #endif /* INCLUDE_TBI_SUPPORT */ /******************************************************************************/ /* Description: */ /* */ /* Return: */ /******************************************************************************/ LM_STATUS LM_SetupCopperPhy (PLM_DEVICE_BLOCK pDevice) { LM_STATUS CurrentLinkStatus; LM_UINT32 Value32; /* Assume there is not link first. */ CurrentLinkStatus = LM_STATUS_LINK_DOWN; /* Disable phy link change attention. */ REG_WR (pDevice, MacCtrl.MacEvent, 0); /* Clear link change attention. */ REG_WR (pDevice, MacCtrl.Status, MAC_STATUS_SYNC_CHANGED | MAC_STATUS_CFG_CHANGED); /* Disable auto-polling for the moment. */ pDevice->MiMode = 0xc0000; REG_WR (pDevice, MacCtrl.MiMode, pDevice->MiMode); MM_Wait (40); /* Determine the requested line speed and duplex. */ pDevice->OldLineSpeed = pDevice->LineSpeed; LM_TranslateRequestedMediaType (pDevice->RequestedMediaType, &pDevice->MediaType, &pDevice->LineSpeed, &pDevice->DuplexMode); /* Initialize the phy chip. */ switch (pDevice->PhyId & PHY_ID_MASK) { case PHY_BCM5400_PHY_ID: case PHY_BCM5401_PHY_ID: case PHY_BCM5411_PHY_ID: case PHY_BCM5701_PHY_ID: case PHY_BCM5703_PHY_ID: case PHY_BCM5704_PHY_ID: CurrentLinkStatus = LM_InitBcm540xPhy (pDevice); break; default: break; } if (CurrentLinkStatus == LM_STATUS_LINK_SETTING_MISMATCH) { CurrentLinkStatus = LM_STATUS_LINK_DOWN; } /* Setup flow control. */ pDevice->FlowControl = LM_FLOW_CONTROL_NONE; if (CurrentLinkStatus == LM_STATUS_LINK_ACTIVE) { LM_FLOW_CONTROL FlowCap; /* Flow control capability. */ FlowCap = LM_FLOW_CONTROL_NONE; if (pDevice->DuplexMode == LM_DUPLEX_MODE_FULL) { if (pDevice->DisableAutoNeg == FALSE || pDevice->RequestedMediaType == LM_REQUESTED_MEDIA_TYPE_AUTO || pDevice->RequestedMediaType == LM_REQUESTED_MEDIA_TYPE_UTP_AUTO) { LM_UINT32 ExpectedPhyAd; LM_UINT32 LocalPhyAd; LM_UINT32 RemotePhyAd; LM_ReadPhy (pDevice, PHY_AN_AD_REG, &LocalPhyAd); pDevice->advertising = LocalPhyAd; LocalPhyAd &= (PHY_AN_AD_ASYM_PAUSE | PHY_AN_AD_PAUSE_CAPABLE); ExpectedPhyAd = GetPhyAdFlowCntrlSettings (pDevice); if (LocalPhyAd != ExpectedPhyAd) { CurrentLinkStatus = LM_STATUS_LINK_DOWN; } else { LM_ReadPhy (pDevice, PHY_LINK_PARTNER_ABILITY_REG, &RemotePhyAd); LM_SetFlowControl (pDevice, LocalPhyAd, RemotePhyAd); } } else { pDevice->FlowControlCap &= ~LM_FLOW_CONTROL_AUTO_PAUSE; LM_SetFlowControl (pDevice, 0, 0); } } } if (CurrentLinkStatus == LM_STATUS_LINK_DOWN) { LM_ForceAutoNeg (pDevice, pDevice->RequestedMediaType); /* If we force line speed, we make get link right away. */ LM_ReadPhy (pDevice, PHY_STATUS_REG, &Value32); LM_ReadPhy (pDevice, PHY_STATUS_REG, &Value32); if (Value32 & PHY_STATUS_LINK_PASS) { CurrentLinkStatus = LM_STATUS_LINK_ACTIVE; } } /* GMII interface. */ pDevice->MacMode &= ~MAC_MODE_PORT_MODE_MASK; if (CurrentLinkStatus == LM_STATUS_LINK_ACTIVE) { if (pDevice->LineSpeed == LM_LINE_SPEED_100MBPS || pDevice->LineSpeed == LM_LINE_SPEED_10MBPS) { pDevice->MacMode |= MAC_MODE_PORT_MODE_MII; } else { pDevice->MacMode |= MAC_MODE_PORT_MODE_GMII; } } else { pDevice->MacMode |= MAC_MODE_PORT_MODE_GMII; } /* Set the MAC to operate in the appropriate duplex mode. */ pDevice->MacMode &= ~MAC_MODE_HALF_DUPLEX; if (pDevice->DuplexMode == LM_DUPLEX_MODE_HALF) { pDevice->MacMode |= MAC_MODE_HALF_DUPLEX; } /* Set the link polarity bit. */ pDevice->MacMode &= ~MAC_MODE_LINK_POLARITY; if (T3_ASIC_REV (pDevice->ChipRevId) == T3_ASIC_REV_5700) { if ((pDevice->LedMode == LED_MODE_LINK10) || (CurrentLinkStatus == LM_STATUS_LINK_ACTIVE && pDevice->LineSpeed == LM_LINE_SPEED_10MBPS)) { pDevice->MacMode |= MAC_MODE_LINK_POLARITY; } } else { if (CurrentLinkStatus == LM_STATUS_LINK_ACTIVE) { pDevice->MacMode |= MAC_MODE_LINK_POLARITY; } /* Set LED mode. */ if (T3_ASIC_REV (pDevice->ChipRevId) == T3_ASIC_REV_5700 || T3_ASIC_REV (pDevice->ChipRevId) == T3_ASIC_REV_5701) { Value32 = LED_CTRL_PHY_MODE_1; } else { if (pDevice->LedMode == LED_MODE_OUTPUT) { Value32 = LED_CTRL_PHY_MODE_2; } else { Value32 = LED_CTRL_PHY_MODE_1; } } REG_WR (pDevice, MacCtrl.LedCtrl, Value32); } REG_WR (pDevice, MacCtrl.Mode, pDevice->MacMode); /* Enable auto polling. */ if (pDevice->PhyIntMode == T3_PHY_INT_MODE_AUTO_POLLING) { pDevice->MiMode |= MI_MODE_AUTO_POLLING_ENABLE; REG_WR (pDevice, MacCtrl.MiMode, pDevice->MiMode); } /* Enable phy link change attention. */ if (pDevice->PhyIntMode == T3_PHY_INT_MODE_MI_INTERRUPT) { REG_WR (pDevice, MacCtrl.MacEvent, MAC_EVENT_ENABLE_MI_INTERRUPT); } else { REG_WR (pDevice, MacCtrl.MacEvent, MAC_EVENT_ENABLE_LINK_STATE_CHANGED_ATTN); } if ((T3_ASIC_REV (pDevice->ChipRevId) == T3_ASIC_REV_5700) && (CurrentLinkStatus == LM_STATUS_LINK_ACTIVE) && (pDevice->LineSpeed == LM_LINE_SPEED_1000MBPS) && (((pDevice->PciState & T3_PCI_STATE_CONVENTIONAL_PCI_MODE) && (pDevice->PciState & T3_PCI_STATE_BUS_SPEED_HIGH)) || !(pDevice->PciState & T3_PCI_STATE_CONVENTIONAL_PCI_MODE))) { MM_Wait (120); REG_WR (pDevice, MacCtrl.Status, MAC_STATUS_SYNC_CHANGED | MAC_STATUS_CFG_CHANGED); MEM_WR_OFFSET (pDevice, T3_FIRMWARE_MAILBOX, T3_MAGIC_NUM_DISABLE_DMAW_ON_LINK_CHANGE); } /* Indicate link status. */ if (pDevice->LinkStatus != CurrentLinkStatus) { pDevice->LinkStatus = CurrentLinkStatus; MM_IndicateStatus (pDevice, CurrentLinkStatus); } return LM_STATUS_SUCCESS; } /* LM_SetupCopperPhy */ /******************************************************************************/ /* Description: */ /* */ /* Return: */ /******************************************************************************/ LM_STATUS LM_SetupPhy (PLM_DEVICE_BLOCK pDevice) { LM_STATUS LmStatus; LM_UINT32 Value32; #if INCLUDE_TBI_SUPPORT if (pDevice->EnableTbi) { LmStatus = LM_SetupFiberPhy (pDevice); } else #endif /* INCLUDE_TBI_SUPPORT */ { LmStatus = LM_SetupCopperPhy (pDevice); } if (pDevice->ChipRevId == T3_CHIP_ID_5704_A0) { if (!(pDevice->PciState & T3_PCI_STATE_CONVENTIONAL_PCI_MODE)) { Value32 = REG_RD (pDevice, PciCfg.PciState); REG_WR (pDevice, PciCfg.PciState, Value32 | T3_PCI_STATE_RETRY_SAME_DMA); } } if ((pDevice->LineSpeed == LM_LINE_SPEED_1000MBPS) && (pDevice->DuplexMode == LM_DUPLEX_MODE_HALF)) { REG_WR (pDevice, MacCtrl.TxLengths, 0x26ff); } else { REG_WR (pDevice, MacCtrl.TxLengths, 0x2620); } return LmStatus; } /******************************************************************************/ /* Description: */ /* */ /* Return: */ /******************************************************************************/ LM_VOID LM_ReadPhy (PLM_DEVICE_BLOCK pDevice, LM_UINT32 PhyReg, PLM_UINT32 pData32) { LM_UINT32 Value32; LM_UINT32 j; if (pDevice->PhyIntMode == T3_PHY_INT_MODE_AUTO_POLLING) { REG_WR (pDevice, MacCtrl.MiMode, pDevice->MiMode & ~MI_MODE_AUTO_POLLING_ENABLE); MM_Wait (40); } Value32 = (pDevice->PhyAddr << MI_COM_FIRST_PHY_ADDR_BIT) | ((PhyReg & MI_COM_PHY_REG_ADDR_MASK) << MI_COM_FIRST_PHY_REG_ADDR_BIT) | MI_COM_CMD_READ | MI_COM_START; REG_WR (pDevice, MacCtrl.MiCom, Value32); for (j = 0; j < 20; j++) { MM_Wait (25); Value32 = REG_RD (pDevice, MacCtrl.MiCom); if (!(Value32 & MI_COM_BUSY)) { MM_Wait (5); Value32 = REG_RD (pDevice, MacCtrl.MiCom); Value32 &= MI_COM_PHY_DATA_MASK; break; } } if (Value32 & MI_COM_BUSY) { Value32 = 0; } *pData32 = Value32; if (pDevice->PhyIntMode == T3_PHY_INT_MODE_AUTO_POLLING) { REG_WR (pDevice, MacCtrl.MiMode, pDevice->MiMode); MM_Wait (40); } } /* LM_ReadPhy */ /******************************************************************************/ /* Description: */ /* */ /* Return: */ /******************************************************************************/ LM_VOID LM_WritePhy (PLM_DEVICE_BLOCK pDevice, LM_UINT32 PhyReg, LM_UINT32 Data32) { LM_UINT32 Value32; LM_UINT32 j; if (pDevice->PhyIntMode == T3_PHY_INT_MODE_AUTO_POLLING) { REG_WR (pDevice, MacCtrl.MiMode, pDevice->MiMode & ~MI_MODE_AUTO_POLLING_ENABLE); MM_Wait (40); } Value32 = (pDevice->PhyAddr << MI_COM_FIRST_PHY_ADDR_BIT) | ((PhyReg & MI_COM_PHY_REG_ADDR_MASK) << MI_COM_FIRST_PHY_REG_ADDR_BIT) | (Data32 & MI_COM_PHY_DATA_MASK) | MI_COM_CMD_WRITE | MI_COM_START; REG_WR (pDevice, MacCtrl.MiCom, Value32); for (j = 0; j < 20; j++) { MM_Wait (25); Value32 = REG_RD (pDevice, MacCtrl.MiCom); if (!(Value32 & MI_COM_BUSY)) { MM_Wait (5); break; } } if (pDevice->PhyIntMode == T3_PHY_INT_MODE_AUTO_POLLING) { REG_WR (pDevice, MacCtrl.MiMode, pDevice->MiMode); MM_Wait (40); } } /* LM_WritePhy */ /******************************************************************************/ /* Description: */ /* */ /* Return: */ /******************************************************************************/ LM_STATUS LM_SetPowerState (PLM_DEVICE_BLOCK pDevice, LM_POWER_STATE PowerLevel) { LM_UINT32 PmeSupport; LM_UINT32 Value32; LM_UINT32 PmCtrl; /* make sureindirect accesses are enabled */ MM_WriteConfig32 (pDevice, T3_PCI_MISC_HOST_CTRL_REG, pDevice->MiscHostCtrl); /* Clear the PME_ASSERT bit and the power state bits. Also enable */ /* the PME bit. */ MM_ReadConfig32 (pDevice, T3_PCI_PM_STATUS_CTRL_REG, &PmCtrl); PmCtrl |= T3_PM_PME_ASSERTED; PmCtrl &= ~T3_PM_POWER_STATE_MASK; /* Set the appropriate power state. */ if (PowerLevel == LM_POWER_STATE_D0) { /* Bring the card out of low power mode. */ PmCtrl |= T3_PM_POWER_STATE_D0; MM_WriteConfig32 (pDevice, T3_PCI_PM_STATUS_CTRL_REG, PmCtrl); REG_WR (pDevice, Grc.LocalCtrl, pDevice->GrcLocalCtrl); MM_Wait (40); #if 0 /* Bugfix by jmb...can't call WritePhy here because pDevice not fully initialized */ LM_WritePhy (pDevice, BCM5401_AUX_CTRL, 0x02); #endif return LM_STATUS_SUCCESS; } else if (PowerLevel == LM_POWER_STATE_D1) { PmCtrl |= T3_PM_POWER_STATE_D1; } else if (PowerLevel == LM_POWER_STATE_D2) { PmCtrl |= T3_PM_POWER_STATE_D2; } else if (PowerLevel == LM_POWER_STATE_D3) { PmCtrl |= T3_PM_POWER_STATE_D3; } else { return LM_STATUS_FAILURE; } PmCtrl |= T3_PM_PME_ENABLE; /* Mask out all interrupts so LM_SetupPhy won't be called while we are */ /* setting new line speed. */ Value32 = REG_RD (pDevice, PciCfg.MiscHostCtrl); REG_WR (pDevice, PciCfg.MiscHostCtrl, Value32 | MISC_HOST_CTRL_MASK_PCI_INT); if (!pDevice->RestoreOnWakeUp) { pDevice->RestoreOnWakeUp = TRUE; pDevice->WakeUpDisableAutoNeg = pDevice->DisableAutoNeg; pDevice->WakeUpRequestedMediaType = pDevice->RequestedMediaType; } /* Force auto-negotiation to 10 line speed. */ pDevice->DisableAutoNeg = FALSE; pDevice->RequestedMediaType = LM_REQUESTED_MEDIA_TYPE_UTP_10MBPS; LM_SetupPhy (pDevice); /* Put the driver in the initial state, and go through the power down */ /* sequence. */ LM_Halt (pDevice); MM_ReadConfig32 (pDevice, T3_PCI_PM_CAP_REG, &PmeSupport); if (pDevice->WakeUpModeCap != LM_WAKE_UP_MODE_NONE) { /* Enable WOL. */ LM_WritePhy (pDevice, BCM5401_AUX_CTRL, 0x5a); MM_Wait (40); /* Set LED mode. */ if (T3_ASIC_REV (pDevice->ChipRevId) == T3_ASIC_REV_5700 || T3_ASIC_REV (pDevice->ChipRevId) == T3_ASIC_REV_5701) { Value32 = LED_CTRL_PHY_MODE_1; } else { if (pDevice->LedMode == LED_MODE_OUTPUT) { Value32 = LED_CTRL_PHY_MODE_2; } else { Value32 = LED_CTRL_PHY_MODE_1; } } Value32 = MAC_MODE_PORT_MODE_MII; if (T3_ASIC_REV (pDevice->ChipRevId) == T3_ASIC_REV_5700) { if (pDevice->LedMode == LED_MODE_LINK10 || pDevice->WolSpeed == WOL_SPEED_10MB) { Value32 |= MAC_MODE_LINK_POLARITY; } } else { Value32 |= MAC_MODE_LINK_POLARITY; } REG_WR (pDevice, MacCtrl.Mode, Value32); MM_Wait (40); MM_Wait (40); MM_Wait (40); /* Always enable magic packet wake-up if we have vaux. */ if ((PmeSupport & T3_PCI_PM_CAP_PME_D3COLD) && (pDevice->WakeUpModeCap & LM_WAKE_UP_MODE_MAGIC_PACKET)) { Value32 |= MAC_MODE_DETECT_MAGIC_PACKET_ENABLE; } REG_WR (pDevice, MacCtrl.Mode, Value32); /* Enable the receiver. */ REG_WR (pDevice, MacCtrl.RxMode, RX_MODE_ENABLE); } /* Disable tx/rx clocks, and seletect an alternate clock. */ if (pDevice->WolSpeed == WOL_SPEED_100MB) { if (T3_ASIC_REV (pDevice->ChipRevId) == T3_ASIC_REV_5700 || T3_ASIC_REV (pDevice->ChipRevId) == T3_ASIC_REV_5701) { Value32 = T3_PCI_DISABLE_RX_CLOCK | T3_PCI_DISABLE_TX_CLOCK | T3_PCI_SELECT_ALTERNATE_CLOCK; } else { Value32 = T3_PCI_SELECT_ALTERNATE_CLOCK; } REG_WR (pDevice, PciCfg.ClockCtrl, Value32); MM_Wait (40); if (T3_ASIC_REV (pDevice->ChipRevId) == T3_ASIC_REV_5700 || T3_ASIC_REV (pDevice->ChipRevId) == T3_ASIC_REV_5701) { Value32 = T3_PCI_DISABLE_RX_CLOCK | T3_PCI_DISABLE_TX_CLOCK | T3_PCI_SELECT_ALTERNATE_CLOCK | T3_PCI_44MHZ_CORE_CLOCK; } else { Value32 = T3_PCI_SELECT_ALTERNATE_CLOCK | T3_PCI_44MHZ_CORE_CLOCK; } REG_WR (pDevice, PciCfg.ClockCtrl, Value32); MM_Wait (40); if (T3_ASIC_REV (pDevice->ChipRevId) == T3_ASIC_REV_5700 || T3_ASIC_REV (pDevice->ChipRevId) == T3_ASIC_REV_5701) { Value32 = T3_PCI_DISABLE_RX_CLOCK | T3_PCI_DISABLE_TX_CLOCK | T3_PCI_44MHZ_CORE_CLOCK; } else { Value32 = T3_PCI_44MHZ_CORE_CLOCK; } REG_WR (pDevice, PciCfg.ClockCtrl, Value32); } else { if (T3_ASIC_REV (pDevice->ChipRevId) == T3_ASIC_REV_5700 || T3_ASIC_REV (pDevice->ChipRevId) == T3_ASIC_REV_5701) { Value32 = T3_PCI_DISABLE_RX_CLOCK | T3_PCI_DISABLE_TX_CLOCK | T3_PCI_SELECT_ALTERNATE_CLOCK | T3_PCI_POWER_DOWN_PCI_PLL133; } else { Value32 = T3_PCI_SELECT_ALTERNATE_CLOCK | T3_PCI_POWER_DOWN_PCI_PLL133; } REG_WR (pDevice, PciCfg.ClockCtrl, Value32); } MM_Wait (40); if (!pDevice->EepromWp && (pDevice->WakeUpModeCap != LM_WAKE_UP_MODE_NONE)) { /* Switch adapter to auxilliary power. */ if (T3_ASIC_REV (pDevice->ChipRevId) == T3_ASIC_REV_5700 || T3_ASIC_REV (pDevice->ChipRevId) == T3_ASIC_REV_5701) { /* GPIO0 = 1, GPIO1 = 1, GPIO2 = 0. */ REG_WR (pDevice, Grc.LocalCtrl, pDevice->GrcLocalCtrl | GRC_MISC_LOCAL_CTRL_GPIO_OE0 | GRC_MISC_LOCAL_CTRL_GPIO_OE1 | GRC_MISC_LOCAL_CTRL_GPIO_OE2 | GRC_MISC_LOCAL_CTRL_GPIO_OUTPUT0 | GRC_MISC_LOCAL_CTRL_GPIO_OUTPUT1); MM_Wait (40); } else { /* GPIO0 = 0, GPIO1 = 1, GPIO2 = 1. */ REG_WR (pDevice, Grc.LocalCtrl, pDevice->GrcLocalCtrl | GRC_MISC_LOCAL_CTRL_GPIO_OE0 | GRC_MISC_LOCAL_CTRL_GPIO_OE1 | GRC_MISC_LOCAL_CTRL_GPIO_OE2 | GRC_MISC_LOCAL_CTRL_GPIO_OUTPUT1 | GRC_MISC_LOCAL_CTRL_GPIO_OUTPUT2); MM_Wait (40); /* GPIO0 = 1, GPIO1 = 1, GPIO2 = 1. */ REG_WR (pDevice, Grc.LocalCtrl, pDevice->GrcLocalCtrl | GRC_MISC_LOCAL_CTRL_GPIO_OE0 | GRC_MISC_LOCAL_CTRL_GPIO_OE1 | GRC_MISC_LOCAL_CTRL_GPIO_OE2 | GRC_MISC_LOCAL_CTRL_GPIO_OUTPUT0 | GRC_MISC_LOCAL_CTRL_GPIO_OUTPUT1 | GRC_MISC_LOCAL_CTRL_GPIO_OUTPUT2); MM_Wait (40); /* GPIO0 = 1, GPIO1 = 1, GPIO2 = 0. */ REG_WR (pDevice, Grc.LocalCtrl, pDevice->GrcLocalCtrl | GRC_MISC_LOCAL_CTRL_GPIO_OE0 | GRC_MISC_LOCAL_CTRL_GPIO_OE1 | GRC_MISC_LOCAL_CTRL_GPIO_OE2 | GRC_MISC_LOCAL_CTRL_GPIO_OUTPUT0 | GRC_MISC_LOCAL_CTRL_GPIO_OUTPUT1); MM_Wait (40); } } /* Set the phy to low power mode. */ /* Put the the hardware in low power mode. */ MM_WriteConfig32 (pDevice, T3_PCI_PM_STATUS_CTRL_REG, PmCtrl); return LM_STATUS_SUCCESS; } /* LM_SetPowerState */ /******************************************************************************/ /* Description: */ /* */ /* Return: */ /******************************************************************************/ static LM_UINT32 GetPhyAdFlowCntrlSettings (PLM_DEVICE_BLOCK pDevice) { LM_UINT32 Value32; Value32 = 0; /* Auto negotiation flow control only when autonegotiation is enabled. */ if (pDevice->DisableAutoNeg == FALSE || pDevice->RequestedMediaType == LM_REQUESTED_MEDIA_TYPE_AUTO || pDevice->RequestedMediaType == LM_REQUESTED_MEDIA_TYPE_UTP_AUTO) { /* Please refer to Table 28B-3 of the 802.3ab-1999 spec. */ if ((pDevice->FlowControlCap == LM_FLOW_CONTROL_AUTO_PAUSE) || ((pDevice->FlowControlCap & LM_FLOW_CONTROL_RECEIVE_PAUSE) && (pDevice-> FlowControlCap & LM_FLOW_CONTROL_TRANSMIT_PAUSE))) { Value32 |= PHY_AN_AD_PAUSE_CAPABLE; } else if (pDevice-> FlowControlCap & LM_FLOW_CONTROL_TRANSMIT_PAUSE) { Value32 |= PHY_AN_AD_ASYM_PAUSE; } else if (pDevice-> FlowControlCap & LM_FLOW_CONTROL_RECEIVE_PAUSE) { Value32 |= PHY_AN_AD_PAUSE_CAPABLE | PHY_AN_AD_ASYM_PAUSE; } } return Value32; } /******************************************************************************/ /* Description: */ /* */ /* Return: */ /* LM_STATUS_FAILURE */ /* LM_STATUS_SUCCESS */ /* */ /******************************************************************************/ static LM_STATUS LM_ForceAutoNegBcm540xPhy (PLM_DEVICE_BLOCK pDevice, LM_REQUESTED_MEDIA_TYPE RequestedMediaType) { LM_MEDIA_TYPE MediaType; LM_LINE_SPEED LineSpeed; LM_DUPLEX_MODE DuplexMode; LM_UINT32 NewPhyCtrl; LM_UINT32 Value32; LM_UINT32 Cnt; /* Get the interface type, line speed, and duplex mode. */ LM_TranslateRequestedMediaType (RequestedMediaType, &MediaType, &LineSpeed, &DuplexMode); if (pDevice->RestoreOnWakeUp) { LM_WritePhy (pDevice, BCM540X_1000BASET_CTRL_REG, 0); pDevice->advertising1000 = 0; Value32 = PHY_AN_AD_10BASET_FULL | PHY_AN_AD_10BASET_HALF; if (pDevice->WolSpeed == WOL_SPEED_100MB) { Value32 |= PHY_AN_AD_100BASETX_FULL | PHY_AN_AD_100BASETX_HALF; } Value32 |= PHY_AN_AD_PROTOCOL_802_3_CSMA_CD; Value32 |= GetPhyAdFlowCntrlSettings (pDevice); LM_WritePhy (pDevice, PHY_AN_AD_REG, Value32); pDevice->advertising = Value32; } /* Setup the auto-negotiation advertisement register. */ else if (LineSpeed == LM_LINE_SPEED_UNKNOWN) { /* Setup the 10/100 Mbps auto-negotiation advertisement register. */ Value32 = PHY_AN_AD_PROTOCOL_802_3_CSMA_CD | PHY_AN_AD_10BASET_HALF | PHY_AN_AD_10BASET_FULL | PHY_AN_AD_100BASETX_FULL | PHY_AN_AD_100BASETX_HALF; Value32 |= GetPhyAdFlowCntrlSettings (pDevice); LM_WritePhy (pDevice, PHY_AN_AD_REG, Value32); pDevice->advertising = Value32; /* Advertise 1000Mbps */ Value32 = BCM540X_AN_AD_1000BASET_HALF | BCM540X_AN_AD_1000BASET_FULL; #if INCLUDE_5701_AX_FIX /* Bug: workaround for CRC error in gigabit mode when we are in */ /* slave mode. This will force the PHY to operate in */ /* master mode. */ if (pDevice->ChipRevId == T3_CHIP_ID_5701_A0 || pDevice->ChipRevId == T3_CHIP_ID_5701_B0) { Value32 |= BCM540X_CONFIG_AS_MASTER | BCM540X_ENABLE_CONFIG_AS_MASTER; } #endif LM_WritePhy (pDevice, BCM540X_1000BASET_CTRL_REG, Value32); pDevice->advertising1000 = Value32; } else { if (LineSpeed == LM_LINE_SPEED_1000MBPS) { Value32 = PHY_AN_AD_PROTOCOL_802_3_CSMA_CD; Value32 |= GetPhyAdFlowCntrlSettings (pDevice); LM_WritePhy (pDevice, PHY_AN_AD_REG, Value32); pDevice->advertising = Value32; if (DuplexMode != LM_DUPLEX_MODE_FULL) { Value32 = BCM540X_AN_AD_1000BASET_HALF; } else { Value32 = BCM540X_AN_AD_1000BASET_FULL; } LM_WritePhy (pDevice, BCM540X_1000BASET_CTRL_REG, Value32); pDevice->advertising1000 = Value32; } else if (LineSpeed == LM_LINE_SPEED_100MBPS) { LM_WritePhy (pDevice, BCM540X_1000BASET_CTRL_REG, 0); pDevice->advertising1000 = 0; if (DuplexMode != LM_DUPLEX_MODE_FULL) { Value32 = PHY_AN_AD_100BASETX_HALF; } else { Value32 = PHY_AN_AD_100BASETX_FULL; } Value32 |= PHY_AN_AD_PROTOCOL_802_3_CSMA_CD; Value32 |= GetPhyAdFlowCntrlSettings (pDevice); LM_WritePhy (pDevice, PHY_AN_AD_REG, Value32); pDevice->advertising = Value32; } else if (LineSpeed == LM_LINE_SPEED_10MBPS) { LM_WritePhy (pDevice, BCM540X_1000BASET_CTRL_REG, 0); pDevice->advertising1000 = 0; if (DuplexMode != LM_DUPLEX_MODE_FULL) { Value32 = PHY_AN_AD_10BASET_HALF; } else { Value32 = PHY_AN_AD_10BASET_FULL; } Value32 |= PHY_AN_AD_PROTOCOL_802_3_CSMA_CD; Value32 |= GetPhyAdFlowCntrlSettings (pDevice); LM_WritePhy (pDevice, PHY_AN_AD_REG, Value32); pDevice->advertising = Value32; } } /* Force line speed if auto-negotiation is disabled. */ if (pDevice->DisableAutoNeg && LineSpeed != LM_LINE_SPEED_UNKNOWN) { /* This code path is executed only when there is link. */ pDevice->MediaType = MediaType; pDevice->LineSpeed = LineSpeed; pDevice->DuplexMode = DuplexMode; /* Force line seepd. */ NewPhyCtrl = 0; switch (LineSpeed) { case LM_LINE_SPEED_10MBPS: NewPhyCtrl |= PHY_CTRL_SPEED_SELECT_10MBPS; break; case LM_LINE_SPEED_100MBPS: NewPhyCtrl |= PHY_CTRL_SPEED_SELECT_100MBPS; break; case LM_LINE_SPEED_1000MBPS: NewPhyCtrl |= PHY_CTRL_SPEED_SELECT_1000MBPS; break; default: NewPhyCtrl |= PHY_CTRL_SPEED_SELECT_1000MBPS; break; } if (DuplexMode == LM_DUPLEX_MODE_FULL) { NewPhyCtrl |= PHY_CTRL_FULL_DUPLEX_MODE; } /* Don't do anything if the PHY_CTRL is already what we wanted. */ LM_ReadPhy (pDevice, PHY_CTRL_REG, &Value32); if (Value32 != NewPhyCtrl) { /* Temporary bring the link down before forcing line speed. */ LM_WritePhy (pDevice, PHY_CTRL_REG, PHY_CTRL_LOOPBACK_MODE); /* Wait for link to go down. */ for (Cnt = 0; Cnt < 15000; Cnt++) { MM_Wait (10); LM_ReadPhy (pDevice, PHY_STATUS_REG, &Value32); LM_ReadPhy (pDevice, PHY_STATUS_REG, &Value32); if (!(Value32 & PHY_STATUS_LINK_PASS)) { MM_Wait (40); break; } } LM_WritePhy (pDevice, PHY_CTRL_REG, NewPhyCtrl); MM_Wait (40); } } else { LM_WritePhy (pDevice, PHY_CTRL_REG, PHY_CTRL_AUTO_NEG_ENABLE | PHY_CTRL_RESTART_AUTO_NEG); } return LM_STATUS_SUCCESS; } /* LM_ForceAutoNegBcm540xPhy */ /******************************************************************************/ /* Description: */ /* */ /* Return: */ /******************************************************************************/ static LM_STATUS LM_ForceAutoNeg (PLM_DEVICE_BLOCK pDevice, LM_REQUESTED_MEDIA_TYPE RequestedMediaType) { LM_STATUS LmStatus; /* Initialize the phy chip. */ switch (pDevice->PhyId & PHY_ID_MASK) { case PHY_BCM5400_PHY_ID: case PHY_BCM5401_PHY_ID: case PHY_BCM5411_PHY_ID: case PHY_BCM5701_PHY_ID: case PHY_BCM5703_PHY_ID: case PHY_BCM5704_PHY_ID: LmStatus = LM_ForceAutoNegBcm540xPhy (pDevice, RequestedMediaType); break; default: LmStatus = LM_STATUS_FAILURE; break; } return LmStatus; } /* LM_ForceAutoNeg */ /******************************************************************************/ /* Description: */ /* */ /* Return: */ /******************************************************************************/ LM_STATUS LM_LoadFirmware (PLM_DEVICE_BLOCK pDevice, PT3_FWIMG_INFO pFwImg, LM_UINT32 LoadCpu, LM_UINT32 StartCpu) { LM_UINT32 i; LM_UINT32 address; if (LoadCpu & T3_RX_CPU_ID) { if (LM_HaltCpu (pDevice, T3_RX_CPU_ID) != LM_STATUS_SUCCESS) { return LM_STATUS_FAILURE; } /* First of all clear scrach pad memory */ for (i = 0; i < T3_RX_CPU_SPAD_SIZE; i += 4) { LM_RegWrInd (pDevice, T3_RX_CPU_SPAD_ADDR + i, 0); } /* Copy code first */ address = T3_RX_CPU_SPAD_ADDR + (pFwImg->Text.Offset & 0xffff); for (i = 0; i <= pFwImg->Text.Length; i += 4) { LM_RegWrInd (pDevice, address + i, ((LM_UINT32 *) pFwImg->Text.Buffer)[i / 4]); } address = T3_RX_CPU_SPAD_ADDR + (pFwImg->ROnlyData.Offset & 0xffff); for (i = 0; i <= pFwImg->ROnlyData.Length; i += 4) { LM_RegWrInd (pDevice, address + i, ((LM_UINT32 *) pFwImg->ROnlyData. Buffer)[i / 4]); } address = T3_RX_CPU_SPAD_ADDR + (pFwImg->Data.Offset & 0xffff); for (i = 0; i <= pFwImg->Data.Length; i += 4) { LM_RegWrInd (pDevice, address + i, ((LM_UINT32 *) pFwImg->Data.Buffer)[i / 4]); } } if (LoadCpu & T3_TX_CPU_ID) { if (LM_HaltCpu (pDevice, T3_TX_CPU_ID) != LM_STATUS_SUCCESS) { return LM_STATUS_FAILURE; } /* First of all clear scrach pad memory */ for (i = 0; i < T3_TX_CPU_SPAD_SIZE; i += 4) { LM_RegWrInd (pDevice, T3_TX_CPU_SPAD_ADDR + i, 0); } /* Copy code first */ address = T3_TX_CPU_SPAD_ADDR + (pFwImg->Text.Offset & 0xffff); for (i = 0; i <= pFwImg->Text.Length; i += 4) { LM_RegWrInd (pDevice, address + i, ((LM_UINT32 *) pFwImg->Text.Buffer)[i / 4]); } address = T3_TX_CPU_SPAD_ADDR + (pFwImg->ROnlyData.Offset & 0xffff); for (i = 0; i <= pFwImg->ROnlyData.Length; i += 4) { LM_RegWrInd (pDevice, address + i, ((LM_UINT32 *) pFwImg->ROnlyData. Buffer)[i / 4]); } address = T3_TX_CPU_SPAD_ADDR + (pFwImg->Data.Offset & 0xffff); for (i = 0; i <= pFwImg->Data.Length; i += 4) { LM_RegWrInd (pDevice, address + i, ((LM_UINT32 *) pFwImg->Data.Buffer)[i / 4]); } } if (StartCpu & T3_RX_CPU_ID) { /* Start Rx CPU */ REG_WR (pDevice, rxCpu.reg.state, 0xffffffff); REG_WR (pDevice, rxCpu.reg.PC, pFwImg->StartAddress); for (i = 0; i < 5; i++) { if (pFwImg->StartAddress == REG_RD (pDevice, rxCpu.reg.PC)) break; REG_WR (pDevice, rxCpu.reg.state, 0xffffffff); REG_WR (pDevice, rxCpu.reg.mode, CPU_MODE_HALT); REG_WR (pDevice, rxCpu.reg.PC, pFwImg->StartAddress); MM_Wait (1000); } REG_WR (pDevice, rxCpu.reg.state, 0xffffffff); REG_WR (pDevice, rxCpu.reg.mode, 0); } if (StartCpu & T3_TX_CPU_ID) { /* Start Tx CPU */ REG_WR (pDevice, txCpu.reg.state, 0xffffffff); REG_WR (pDevice, txCpu.reg.PC, pFwImg->StartAddress); for (i = 0; i < 5; i++) { if (pFwImg->StartAddress == REG_RD (pDevice, txCpu.reg.PC)) break; REG_WR (pDevice, txCpu.reg.state, 0xffffffff); REG_WR (pDevice, txCpu.reg.mode, CPU_MODE_HALT); REG_WR (pDevice, txCpu.reg.PC, pFwImg->StartAddress); MM_Wait (1000); } REG_WR (pDevice, txCpu.reg.state, 0xffffffff); REG_WR (pDevice, txCpu.reg.mode, 0); } return LM_STATUS_SUCCESS; } STATIC LM_STATUS LM_HaltCpu (PLM_DEVICE_BLOCK pDevice, LM_UINT32 cpu_number) { LM_UINT32 i; if (cpu_number == T3_RX_CPU_ID) { for (i = 0; i < 10000; i++) { REG_WR (pDevice, rxCpu.reg.state, 0xffffffff); REG_WR (pDevice, rxCpu.reg.mode, CPU_MODE_HALT); if (REG_RD (pDevice, rxCpu.reg.mode) & CPU_MODE_HALT) break; } REG_WR (pDevice, rxCpu.reg.state, 0xffffffff); REG_WR (pDevice, rxCpu.reg.mode, CPU_MODE_HALT); MM_Wait (10); } else { for (i = 0; i < 10000; i++) { REG_WR (pDevice, txCpu.reg.state, 0xffffffff); REG_WR (pDevice, txCpu.reg.mode, CPU_MODE_HALT); if (REG_RD (pDevice, txCpu.reg.mode) & CPU_MODE_HALT) break; } } return ((i == 10000) ? LM_STATUS_FAILURE : LM_STATUS_SUCCESS); } int LM_BlinkLED (PLM_DEVICE_BLOCK pDevice, LM_UINT32 BlinkDurationSec) { LM_UINT32 Oldcfg; int j; int ret = 0; if (BlinkDurationSec == 0) { return 0; } if (BlinkDurationSec > 120) { BlinkDurationSec = 120; } Oldcfg = REG_RD (pDevice, MacCtrl.LedCtrl); for (j = 0; j < BlinkDurationSec * 2; j++) { if (j % 2) { /* Turn on the LEDs. */ REG_WR (pDevice, MacCtrl.LedCtrl, LED_CTRL_OVERRIDE_LINK_LED | LED_CTRL_1000MBPS_LED_ON | LED_CTRL_100MBPS_LED_ON | LED_CTRL_10MBPS_LED_ON | LED_CTRL_OVERRIDE_TRAFFIC_LED | LED_CTRL_BLINK_TRAFFIC_LED | LED_CTRL_TRAFFIC_LED); } else { /* Turn off the LEDs. */ REG_WR (pDevice, MacCtrl.LedCtrl, LED_CTRL_OVERRIDE_LINK_LED | LED_CTRL_OVERRIDE_TRAFFIC_LED); } #ifndef EMBEDDED current->state = TASK_INTERRUPTIBLE; if (schedule_timeout (HZ / 2) != 0) { ret = -EINTR; break; } #else udelay (100000); /* 1s sleep */ #endif } REG_WR (pDevice, MacCtrl.LedCtrl, Oldcfg); return ret; } int t3_do_dma (PLM_DEVICE_BLOCK pDevice, LM_PHYSICAL_ADDRESS host_addr_phy, int length, int dma_read) { T3_DMA_DESC dma_desc; int i; LM_UINT32 dma_desc_addr; LM_UINT32 value32; REG_WR (pDevice, BufMgr.Mode, 0); REG_WR (pDevice, Ftq.Reset, 0); dma_desc.host_addr.High = host_addr_phy.High; dma_desc.host_addr.Low = host_addr_phy.Low; dma_desc.nic_mbuf = 0x2100; dma_desc.len = length; dma_desc.flags = 0x00000004; /* Generate Rx-CPU event */ if (dma_read) { dma_desc.cqid_sqid = (T3_QID_RX_BD_COMP << 8) | T3_QID_DMA_HIGH_PRI_READ; REG_WR (pDevice, DmaRead.Mode, DMA_READ_MODE_ENABLE); } else { dma_desc.cqid_sqid = (T3_QID_RX_DATA_COMP << 8) | T3_QID_DMA_HIGH_PRI_WRITE; REG_WR (pDevice, DmaWrite.Mode, DMA_WRITE_MODE_ENABLE); } dma_desc_addr = T3_NIC_DMA_DESC_POOL_ADDR; /* Writing this DMA descriptor to DMA memory */ for (i = 0; i < sizeof (T3_DMA_DESC); i += 4) { value32 = *((PLM_UINT32) (((PLM_UINT8) & dma_desc) + i)); MM_WriteConfig32 (pDevice, T3_PCI_MEM_WIN_ADDR_REG, dma_desc_addr + i); MM_WriteConfig32 (pDevice, T3_PCI_MEM_WIN_DATA_REG, cpu_to_le32 (value32)); } MM_WriteConfig32 (pDevice, T3_PCI_MEM_WIN_ADDR_REG, 0); if (dma_read) REG_WR (pDevice, Ftq.DmaHighReadFtqFifoEnqueueDequeue, dma_desc_addr); else REG_WR (pDevice, Ftq.DmaHighWriteFtqFifoEnqueueDequeue, dma_desc_addr); for (i = 0; i < 40; i++) { if (dma_read) value32 = REG_RD (pDevice, Ftq.RcvBdCompFtqFifoEnqueueDequeue); else value32 = REG_RD (pDevice, Ftq.RcvDataCompFtqFifoEnqueueDequeue); if ((value32 & 0xffff) == dma_desc_addr) break; MM_Wait (10); } return LM_STATUS_SUCCESS; } STATIC LM_STATUS LM_DmaTest (PLM_DEVICE_BLOCK pDevice, PLM_UINT8 pBufferVirt, LM_PHYSICAL_ADDRESS BufferPhy, LM_UINT32 BufferSize) { int j; LM_UINT32 *ptr; int dma_success = 0; if (T3_ASIC_REV (pDevice->ChipRevId) != T3_ASIC_REV_5700 && T3_ASIC_REV (pDevice->ChipRevId) != T3_ASIC_REV_5701) { return LM_STATUS_SUCCESS; } while (!dma_success) { /* Fill data with incremental patterns */ ptr = (LM_UINT32 *) pBufferVirt; for (j = 0; j < BufferSize / 4; j++) *ptr++ = j; if (t3_do_dma (pDevice, BufferPhy, BufferSize, 1) == LM_STATUS_FAILURE) { return LM_STATUS_FAILURE; } MM_Wait (40); ptr = (LM_UINT32 *) pBufferVirt; /* Fill data with zero */ for (j = 0; j < BufferSize / 4; j++) *ptr++ = 0; if (t3_do_dma (pDevice, BufferPhy, BufferSize, 0) == LM_STATUS_FAILURE) { return LM_STATUS_FAILURE; } MM_Wait (40); /* Check for data */ ptr = (LM_UINT32 *) pBufferVirt; for (j = 0; j < BufferSize / 4; j++) { if (*ptr++ != j) { if ((pDevice-> DmaReadWriteCtrl & DMA_CTRL_WRITE_BOUNDARY_MASK) == DMA_CTRL_WRITE_BOUNDARY_DISABLE) { pDevice->DmaReadWriteCtrl = (pDevice-> DmaReadWriteCtrl & ~DMA_CTRL_WRITE_BOUNDARY_MASK) | DMA_CTRL_WRITE_BOUNDARY_16; REG_WR (pDevice, PciCfg.DmaReadWriteCtrl, pDevice->DmaReadWriteCtrl); break; } else { return LM_STATUS_FAILURE; } } } if (j == (BufferSize / 4)) dma_success = 1; } return LM_STATUS_SUCCESS; }
gpl-2.0
SlimLPXperia/android_kernel_sony_u8500
net/compat-wireless/compat/compat-2.6.39.c
156
3333
/* * Copyright 2011 Hauke Mehrtens <hauke@hauke-m.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Compatibility file for Linux wireless for kernels 2.6.39. */ #include <linux/compat.h> #include <linux/tty.h> #include <linux/sched.h> #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)) /* * Termios Helper Methods */ static void unset_locked_termios(struct ktermios *termios, struct ktermios *old, struct ktermios *locked) { int i; #define NOSET_MASK(x, y, z) (x = ((x) & ~(z)) | ((y) & (z))) if (!locked) { printk(KERN_WARNING "Warning?!? termios_locked is NULL.\n"); return; } NOSET_MASK(termios->c_iflag, old->c_iflag, locked->c_iflag); NOSET_MASK(termios->c_oflag, old->c_oflag, locked->c_oflag); NOSET_MASK(termios->c_cflag, old->c_cflag, locked->c_cflag); NOSET_MASK(termios->c_lflag, old->c_lflag, locked->c_lflag); termios->c_line = locked->c_line ? old->c_line : termios->c_line; for (i = 0; i < NCCS; i++) termios->c_cc[i] = locked->c_cc[i] ? old->c_cc[i] : termios->c_cc[i]; /* FIXME: What should we do for i/ospeed */ } /** * tty_set_termios - update termios values * @tty: tty to update * @new_termios: desired new value * * Perform updates to the termios values set on this terminal. There * is a bit of layering violation here with n_tty in terms of the * internal knowledge of this function. * * Locking: termios_mutex */ int tty_set_termios(struct tty_struct *tty, struct ktermios *new_termios) { struct ktermios old_termios; struct tty_ldisc *ld; unsigned long flags; /* * Perform the actual termios internal changes under lock. */ /* FIXME: we need to decide on some locking/ordering semantics for the set_termios notification eventually */ mutex_lock(&tty->termios_mutex); old_termios = *tty->termios; *tty->termios = *new_termios; unset_locked_termios(tty->termios, &old_termios, tty->termios_locked); /* See if packet mode change of state. */ if (tty->link && tty->link->packet) { int extproc = (old_termios.c_lflag & EXTPROC) | (tty->termios->c_lflag & EXTPROC); int old_flow = ((old_termios.c_iflag & IXON) && (old_termios.c_cc[VSTOP] == '\023') && (old_termios.c_cc[VSTART] == '\021')); int new_flow = (I_IXON(tty) && STOP_CHAR(tty) == '\023' && START_CHAR(tty) == '\021'); if ((old_flow != new_flow) || extproc) { spin_lock_irqsave(&tty->ctrl_lock, flags); if (old_flow != new_flow) { tty->ctrl_status &= ~(TIOCPKT_DOSTOP | TIOCPKT_NOSTOP); if (new_flow) tty->ctrl_status |= TIOCPKT_DOSTOP; else tty->ctrl_status |= TIOCPKT_NOSTOP; } if (extproc) tty->ctrl_status |= TIOCPKT_IOCTL; spin_unlock_irqrestore(&tty->ctrl_lock, flags); wake_up_interruptible(&tty->link->read_wait); } } if (tty->ops->set_termios) (*tty->ops->set_termios)(tty, &old_termios); else tty_termios_copy_hw(tty->termios, &old_termios); ld = tty_ldisc_ref(tty); if (ld != NULL) { if (ld->ops->set_termios) (ld->ops->set_termios)(tty, &old_termios); tty_ldisc_deref(ld); } mutex_unlock(&tty->termios_mutex); return 0; } EXPORT_SYMBOL_GPL(tty_set_termios); #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)) */
gpl-2.0
dkhoi1997/android_kernel_samsung_aries
fs/jbd/commit.c
924
28937
/* * linux/fs/jbd/commit.c * * Written by Stephen C. Tweedie <sct@redhat.com>, 1998 * * Copyright 1998 Red Hat corp --- All Rights Reserved * * This file is part of the Linux kernel and is made available under * the terms of the GNU General Public License, version 2, or at your * option, any later version, incorporated herein by reference. * * Journal commit routines for the generic filesystem journaling code; * part of the ext2fs journaling system. */ #include <linux/time.h> #include <linux/fs.h> #include <linux/jbd.h> #include <linux/errno.h> #include <linux/mm.h> #include <linux/pagemap.h> #include <linux/bio.h> #include <linux/blkdev.h> /* * Default IO end handler for temporary BJ_IO buffer_heads. */ static void journal_end_buffer_io_sync(struct buffer_head *bh, int uptodate) { BUFFER_TRACE(bh, ""); if (uptodate) set_buffer_uptodate(bh); else clear_buffer_uptodate(bh); unlock_buffer(bh); } /* * When an ext3-ordered file is truncated, it is possible that many pages are * not successfully freed, because they are attached to a committing transaction. * After the transaction commits, these pages are left on the LRU, with no * ->mapping, and with attached buffers. These pages are trivially reclaimable * by the VM, but their apparent absence upsets the VM accounting, and it makes * the numbers in /proc/meminfo look odd. * * So here, we have a buffer which has just come off the forget list. Look to * see if we can strip all buffers from the backing page. * * Called under journal->j_list_lock. The caller provided us with a ref * against the buffer, and we drop that here. */ static void release_buffer_page(struct buffer_head *bh) { struct page *page; if (buffer_dirty(bh)) goto nope; if (atomic_read(&bh->b_count) != 1) goto nope; page = bh->b_page; if (!page) goto nope; if (page->mapping) goto nope; /* OK, it's a truncated page */ if (!trylock_page(page)) goto nope; page_cache_get(page); __brelse(bh); try_to_free_buffers(page); unlock_page(page); page_cache_release(page); return; nope: __brelse(bh); } /* * Decrement reference counter for data buffer. If it has been marked * 'BH_Freed', release it and the page to which it belongs if possible. */ static void release_data_buffer(struct buffer_head *bh) { if (buffer_freed(bh)) { WARN_ON_ONCE(buffer_dirty(bh)); clear_buffer_freed(bh); clear_buffer_mapped(bh); clear_buffer_new(bh); clear_buffer_req(bh); bh->b_bdev = NULL; release_buffer_page(bh); } else put_bh(bh); } /* * Try to acquire jbd_lock_bh_state() against the buffer, when j_list_lock is * held. For ranking reasons we must trylock. If we lose, schedule away and * return 0. j_list_lock is dropped in this case. */ static int inverted_lock(journal_t *journal, struct buffer_head *bh) { if (!jbd_trylock_bh_state(bh)) { spin_unlock(&journal->j_list_lock); schedule(); return 0; } return 1; } /* Done it all: now write the commit record. We should have * cleaned up our previous buffers by now, so if we are in abort * mode we can now just skip the rest of the journal write * entirely. * * Returns 1 if the journal needs to be aborted or 0 on success */ static int journal_write_commit_record(journal_t *journal, transaction_t *commit_transaction) { struct journal_head *descriptor; struct buffer_head *bh; journal_header_t *header; int ret; if (is_journal_aborted(journal)) return 0; descriptor = journal_get_descriptor_buffer(journal); if (!descriptor) return 1; bh = jh2bh(descriptor); header = (journal_header_t *)(bh->b_data); header->h_magic = cpu_to_be32(JFS_MAGIC_NUMBER); header->h_blocktype = cpu_to_be32(JFS_COMMIT_BLOCK); header->h_sequence = cpu_to_be32(commit_transaction->t_tid); JBUFFER_TRACE(descriptor, "write commit block"); set_buffer_dirty(bh); if (journal->j_flags & JFS_BARRIER) ret = __sync_dirty_buffer(bh, WRITE_SYNC | WRITE_FLUSH_FUA); else ret = sync_dirty_buffer(bh); put_bh(bh); /* One for getblk() */ journal_put_journal_head(descriptor); return (ret == -EIO); } static void journal_do_submit_data(struct buffer_head **wbuf, int bufs, int write_op) { int i; for (i = 0; i < bufs; i++) { wbuf[i]->b_end_io = end_buffer_write_sync; /* We use-up our safety reference in submit_bh() */ submit_bh(write_op, wbuf[i]); } } /* * Submit all the data buffers to disk */ static int journal_submit_data_buffers(journal_t *journal, transaction_t *commit_transaction, int write_op) { struct journal_head *jh; struct buffer_head *bh; int locked; int bufs = 0; struct buffer_head **wbuf = journal->j_wbuf; int err = 0; /* * Whenever we unlock the journal and sleep, things can get added * onto ->t_sync_datalist, so we have to keep looping back to * write_out_data until we *know* that the list is empty. * * Cleanup any flushed data buffers from the data list. Even in * abort mode, we want to flush this out as soon as possible. */ write_out_data: cond_resched(); spin_lock(&journal->j_list_lock); while (commit_transaction->t_sync_datalist) { jh = commit_transaction->t_sync_datalist; bh = jh2bh(jh); locked = 0; /* Get reference just to make sure buffer does not disappear * when we are forced to drop various locks */ get_bh(bh); /* If the buffer is dirty, we need to submit IO and hence * we need the buffer lock. We try to lock the buffer without * blocking. If we fail, we need to drop j_list_lock and do * blocking lock_buffer(). */ if (buffer_dirty(bh)) { if (!trylock_buffer(bh)) { BUFFER_TRACE(bh, "needs blocking lock"); spin_unlock(&journal->j_list_lock); /* Write out all data to prevent deadlocks */ journal_do_submit_data(wbuf, bufs, write_op); bufs = 0; lock_buffer(bh); spin_lock(&journal->j_list_lock); } locked = 1; } /* We have to get bh_state lock. Again out of order, sigh. */ if (!inverted_lock(journal, bh)) { jbd_lock_bh_state(bh); spin_lock(&journal->j_list_lock); } /* Someone already cleaned up the buffer? */ if (!buffer_jbd(bh) || bh2jh(bh) != jh || jh->b_transaction != commit_transaction || jh->b_jlist != BJ_SyncData) { jbd_unlock_bh_state(bh); if (locked) unlock_buffer(bh); BUFFER_TRACE(bh, "already cleaned up"); release_data_buffer(bh); continue; } if (locked && test_clear_buffer_dirty(bh)) { BUFFER_TRACE(bh, "needs writeout, adding to array"); wbuf[bufs++] = bh; __journal_file_buffer(jh, commit_transaction, BJ_Locked); jbd_unlock_bh_state(bh); if (bufs == journal->j_wbufsize) { spin_unlock(&journal->j_list_lock); journal_do_submit_data(wbuf, bufs, write_op); bufs = 0; goto write_out_data; } } else if (!locked && buffer_locked(bh)) { __journal_file_buffer(jh, commit_transaction, BJ_Locked); jbd_unlock_bh_state(bh); put_bh(bh); } else { BUFFER_TRACE(bh, "writeout complete: unfile"); if (unlikely(!buffer_uptodate(bh))) err = -EIO; __journal_unfile_buffer(jh); jbd_unlock_bh_state(bh); if (locked) unlock_buffer(bh); journal_remove_journal_head(bh); /* One for our safety reference, other for * journal_remove_journal_head() */ put_bh(bh); release_data_buffer(bh); } if (need_resched() || spin_needbreak(&journal->j_list_lock)) { spin_unlock(&journal->j_list_lock); goto write_out_data; } } spin_unlock(&journal->j_list_lock); journal_do_submit_data(wbuf, bufs, write_op); return err; } /* * journal_commit_transaction * * The primary function for committing a transaction to the log. This * function is called by the journal thread to begin a complete commit. */ void journal_commit_transaction(journal_t *journal) { transaction_t *commit_transaction; struct journal_head *jh, *new_jh, *descriptor; struct buffer_head **wbuf = journal->j_wbuf; int bufs; int flags; int err; unsigned int blocknr; ktime_t start_time; u64 commit_time; char *tagp = NULL; journal_header_t *header; journal_block_tag_t *tag = NULL; int space_left = 0; int first_tag = 0; int tag_flag; int i; struct blk_plug plug; /* * First job: lock down the current transaction and wait for * all outstanding updates to complete. */ /* Do we need to erase the effects of a prior journal_flush? */ if (journal->j_flags & JFS_FLUSHED) { jbd_debug(3, "super block updated\n"); journal_update_superblock(journal, 1); } else { jbd_debug(3, "superblock not updated\n"); } J_ASSERT(journal->j_running_transaction != NULL); J_ASSERT(journal->j_committing_transaction == NULL); commit_transaction = journal->j_running_transaction; J_ASSERT(commit_transaction->t_state == T_RUNNING); jbd_debug(1, "JBD: starting commit of transaction %d\n", commit_transaction->t_tid); spin_lock(&journal->j_state_lock); commit_transaction->t_state = T_LOCKED; spin_lock(&commit_transaction->t_handle_lock); while (commit_transaction->t_updates) { DEFINE_WAIT(wait); prepare_to_wait(&journal->j_wait_updates, &wait, TASK_UNINTERRUPTIBLE); if (commit_transaction->t_updates) { spin_unlock(&commit_transaction->t_handle_lock); spin_unlock(&journal->j_state_lock); schedule(); spin_lock(&journal->j_state_lock); spin_lock(&commit_transaction->t_handle_lock); } finish_wait(&journal->j_wait_updates, &wait); } spin_unlock(&commit_transaction->t_handle_lock); J_ASSERT (commit_transaction->t_outstanding_credits <= journal->j_max_transaction_buffers); /* * First thing we are allowed to do is to discard any remaining * BJ_Reserved buffers. Note, it is _not_ permissible to assume * that there are no such buffers: if a large filesystem * operation like a truncate needs to split itself over multiple * transactions, then it may try to do a journal_restart() while * there are still BJ_Reserved buffers outstanding. These must * be released cleanly from the current transaction. * * In this case, the filesystem must still reserve write access * again before modifying the buffer in the new transaction, but * we do not require it to remember exactly which old buffers it * has reserved. This is consistent with the existing behaviour * that multiple journal_get_write_access() calls to the same * buffer are perfectly permissible. */ while (commit_transaction->t_reserved_list) { jh = commit_transaction->t_reserved_list; JBUFFER_TRACE(jh, "reserved, unused: refile"); /* * A journal_get_undo_access()+journal_release_buffer() may * leave undo-committed data. */ if (jh->b_committed_data) { struct buffer_head *bh = jh2bh(jh); jbd_lock_bh_state(bh); jbd_free(jh->b_committed_data, bh->b_size); jh->b_committed_data = NULL; jbd_unlock_bh_state(bh); } journal_refile_buffer(journal, jh); } /* * Now try to drop any written-back buffers from the journal's * checkpoint lists. We do this *before* commit because it potentially * frees some memory */ spin_lock(&journal->j_list_lock); __journal_clean_checkpoint_list(journal); spin_unlock(&journal->j_list_lock); jbd_debug (3, "JBD: commit phase 1\n"); /* * Switch to a new revoke table. */ journal_switch_revoke_table(journal); commit_transaction->t_state = T_FLUSH; journal->j_committing_transaction = commit_transaction; journal->j_running_transaction = NULL; start_time = ktime_get(); commit_transaction->t_log_start = journal->j_head; wake_up(&journal->j_wait_transaction_locked); spin_unlock(&journal->j_state_lock); jbd_debug (3, "JBD: commit phase 2\n"); /* * Now start flushing things to disk, in the order they appear * on the transaction lists. Data blocks go first. */ blk_start_plug(&plug); err = journal_submit_data_buffers(journal, commit_transaction, WRITE_SYNC); blk_finish_plug(&plug); /* * Wait for all previously submitted IO to complete. */ spin_lock(&journal->j_list_lock); while (commit_transaction->t_locked_list) { struct buffer_head *bh; jh = commit_transaction->t_locked_list->b_tprev; bh = jh2bh(jh); get_bh(bh); if (buffer_locked(bh)) { spin_unlock(&journal->j_list_lock); wait_on_buffer(bh); spin_lock(&journal->j_list_lock); } if (unlikely(!buffer_uptodate(bh))) { if (!trylock_page(bh->b_page)) { spin_unlock(&journal->j_list_lock); lock_page(bh->b_page); spin_lock(&journal->j_list_lock); } if (bh->b_page->mapping) set_bit(AS_EIO, &bh->b_page->mapping->flags); unlock_page(bh->b_page); SetPageError(bh->b_page); err = -EIO; } if (!inverted_lock(journal, bh)) { put_bh(bh); spin_lock(&journal->j_list_lock); continue; } if (buffer_jbd(bh) && bh2jh(bh) == jh && jh->b_transaction == commit_transaction && jh->b_jlist == BJ_Locked) { __journal_unfile_buffer(jh); jbd_unlock_bh_state(bh); journal_remove_journal_head(bh); put_bh(bh); } else { jbd_unlock_bh_state(bh); } release_data_buffer(bh); cond_resched_lock(&journal->j_list_lock); } spin_unlock(&journal->j_list_lock); if (err) { char b[BDEVNAME_SIZE]; printk(KERN_WARNING "JBD: Detected IO errors while flushing file data " "on %s\n", bdevname(journal->j_fs_dev, b)); if (journal->j_flags & JFS_ABORT_ON_SYNCDATA_ERR) journal_abort(journal, err); err = 0; } blk_start_plug(&plug); journal_write_revoke_records(journal, commit_transaction, WRITE_SYNC); /* * If we found any dirty or locked buffers, then we should have * looped back up to the write_out_data label. If there weren't * any then journal_clean_data_list should have wiped the list * clean by now, so check that it is in fact empty. */ J_ASSERT (commit_transaction->t_sync_datalist == NULL); jbd_debug (3, "JBD: commit phase 3\n"); /* * Way to go: we have now written out all of the data for a * transaction! Now comes the tricky part: we need to write out * metadata. Loop over the transaction's entire buffer list: */ spin_lock(&journal->j_state_lock); commit_transaction->t_state = T_COMMIT; spin_unlock(&journal->j_state_lock); J_ASSERT(commit_transaction->t_nr_buffers <= commit_transaction->t_outstanding_credits); descriptor = NULL; bufs = 0; while (commit_transaction->t_buffers) { /* Find the next buffer to be journaled... */ jh = commit_transaction->t_buffers; /* If we're in abort mode, we just un-journal the buffer and release it. */ if (is_journal_aborted(journal)) { clear_buffer_jbddirty(jh2bh(jh)); JBUFFER_TRACE(jh, "journal is aborting: refile"); journal_refile_buffer(journal, jh); /* If that was the last one, we need to clean up * any descriptor buffers which may have been * already allocated, even if we are now * aborting. */ if (!commit_transaction->t_buffers) goto start_journal_io; continue; } /* Make sure we have a descriptor block in which to record the metadata buffer. */ if (!descriptor) { struct buffer_head *bh; J_ASSERT (bufs == 0); jbd_debug(4, "JBD: get descriptor\n"); descriptor = journal_get_descriptor_buffer(journal); if (!descriptor) { journal_abort(journal, -EIO); continue; } bh = jh2bh(descriptor); jbd_debug(4, "JBD: got buffer %llu (%p)\n", (unsigned long long)bh->b_blocknr, bh->b_data); header = (journal_header_t *)&bh->b_data[0]; header->h_magic = cpu_to_be32(JFS_MAGIC_NUMBER); header->h_blocktype = cpu_to_be32(JFS_DESCRIPTOR_BLOCK); header->h_sequence = cpu_to_be32(commit_transaction->t_tid); tagp = &bh->b_data[sizeof(journal_header_t)]; space_left = bh->b_size - sizeof(journal_header_t); first_tag = 1; set_buffer_jwrite(bh); set_buffer_dirty(bh); wbuf[bufs++] = bh; /* Record it so that we can wait for IO completion later */ BUFFER_TRACE(bh, "ph3: file as descriptor"); journal_file_buffer(descriptor, commit_transaction, BJ_LogCtl); } /* Where is the buffer to be written? */ err = journal_next_log_block(journal, &blocknr); /* If the block mapping failed, just abandon the buffer and repeat this loop: we'll fall into the refile-on-abort condition above. */ if (err) { journal_abort(journal, err); continue; } /* * start_this_handle() uses t_outstanding_credits to determine * the free space in the log, but this counter is changed * by journal_next_log_block() also. */ commit_transaction->t_outstanding_credits--; /* Bump b_count to prevent truncate from stumbling over the shadowed buffer! @@@ This can go if we ever get rid of the BJ_IO/BJ_Shadow pairing of buffers. */ get_bh(jh2bh(jh)); /* Make a temporary IO buffer with which to write it out (this will requeue both the metadata buffer and the temporary IO buffer). new_bh goes on BJ_IO*/ set_buffer_jwrite(jh2bh(jh)); /* * akpm: journal_write_metadata_buffer() sets * new_bh->b_transaction to commit_transaction. * We need to clean this up before we release new_bh * (which is of type BJ_IO) */ JBUFFER_TRACE(jh, "ph3: write metadata"); flags = journal_write_metadata_buffer(commit_transaction, jh, &new_jh, blocknr); set_buffer_jwrite(jh2bh(new_jh)); wbuf[bufs++] = jh2bh(new_jh); /* Record the new block's tag in the current descriptor buffer */ tag_flag = 0; if (flags & 1) tag_flag |= JFS_FLAG_ESCAPE; if (!first_tag) tag_flag |= JFS_FLAG_SAME_UUID; tag = (journal_block_tag_t *) tagp; tag->t_blocknr = cpu_to_be32(jh2bh(jh)->b_blocknr); tag->t_flags = cpu_to_be32(tag_flag); tagp += sizeof(journal_block_tag_t); space_left -= sizeof(journal_block_tag_t); if (first_tag) { memcpy (tagp, journal->j_uuid, 16); tagp += 16; space_left -= 16; first_tag = 0; } /* If there's no more to do, or if the descriptor is full, let the IO rip! */ if (bufs == journal->j_wbufsize || commit_transaction->t_buffers == NULL || space_left < sizeof(journal_block_tag_t) + 16) { jbd_debug(4, "JBD: Submit %d IOs\n", bufs); /* Write an end-of-descriptor marker before submitting the IOs. "tag" still points to the last tag we set up. */ tag->t_flags |= cpu_to_be32(JFS_FLAG_LAST_TAG); start_journal_io: for (i = 0; i < bufs; i++) { struct buffer_head *bh = wbuf[i]; lock_buffer(bh); clear_buffer_dirty(bh); set_buffer_uptodate(bh); bh->b_end_io = journal_end_buffer_io_sync; submit_bh(WRITE_SYNC, bh); } cond_resched(); /* Force a new descriptor to be generated next time round the loop. */ descriptor = NULL; bufs = 0; } } blk_finish_plug(&plug); /* Lo and behold: we have just managed to send a transaction to the log. Before we can commit it, wait for the IO so far to complete. Control buffers being written are on the transaction's t_log_list queue, and metadata buffers are on the t_iobuf_list queue. Wait for the buffers in reverse order. That way we are less likely to be woken up until all IOs have completed, and so we incur less scheduling load. */ jbd_debug(3, "JBD: commit phase 4\n"); /* * akpm: these are BJ_IO, and j_list_lock is not needed. * See __journal_try_to_free_buffer. */ wait_for_iobuf: while (commit_transaction->t_iobuf_list != NULL) { struct buffer_head *bh; jh = commit_transaction->t_iobuf_list->b_tprev; bh = jh2bh(jh); if (buffer_locked(bh)) { wait_on_buffer(bh); goto wait_for_iobuf; } if (cond_resched()) goto wait_for_iobuf; if (unlikely(!buffer_uptodate(bh))) err = -EIO; clear_buffer_jwrite(bh); JBUFFER_TRACE(jh, "ph4: unfile after journal write"); journal_unfile_buffer(journal, jh); /* * ->t_iobuf_list should contain only dummy buffer_heads * which were created by journal_write_metadata_buffer(). */ BUFFER_TRACE(bh, "dumping temporary bh"); journal_put_journal_head(jh); __brelse(bh); J_ASSERT_BH(bh, atomic_read(&bh->b_count) == 0); free_buffer_head(bh); /* We also have to unlock and free the corresponding shadowed buffer */ jh = commit_transaction->t_shadow_list->b_tprev; bh = jh2bh(jh); clear_buffer_jwrite(bh); J_ASSERT_BH(bh, buffer_jbddirty(bh)); /* The metadata is now released for reuse, but we need to remember it against this transaction so that when we finally commit, we can do any checkpointing required. */ JBUFFER_TRACE(jh, "file as BJ_Forget"); journal_file_buffer(jh, commit_transaction, BJ_Forget); /* * Wake up any transactions which were waiting for this * IO to complete. The barrier must be here so that changes * by journal_file_buffer() take effect before wake_up_bit() * does the waitqueue check. */ smp_mb(); wake_up_bit(&bh->b_state, BH_Unshadow); JBUFFER_TRACE(jh, "brelse shadowed buffer"); __brelse(bh); } J_ASSERT (commit_transaction->t_shadow_list == NULL); jbd_debug(3, "JBD: commit phase 5\n"); /* Here we wait for the revoke record and descriptor record buffers */ wait_for_ctlbuf: while (commit_transaction->t_log_list != NULL) { struct buffer_head *bh; jh = commit_transaction->t_log_list->b_tprev; bh = jh2bh(jh); if (buffer_locked(bh)) { wait_on_buffer(bh); goto wait_for_ctlbuf; } if (cond_resched()) goto wait_for_ctlbuf; if (unlikely(!buffer_uptodate(bh))) err = -EIO; BUFFER_TRACE(bh, "ph5: control buffer writeout done: unfile"); clear_buffer_jwrite(bh); journal_unfile_buffer(journal, jh); journal_put_journal_head(jh); __brelse(bh); /* One for getblk */ /* AKPM: bforget here */ } if (err) journal_abort(journal, err); jbd_debug(3, "JBD: commit phase 6\n"); /* All metadata is written, now write commit record and do cleanup */ spin_lock(&journal->j_state_lock); J_ASSERT(commit_transaction->t_state == T_COMMIT); commit_transaction->t_state = T_COMMIT_RECORD; spin_unlock(&journal->j_state_lock); if (journal_write_commit_record(journal, commit_transaction)) err = -EIO; if (err) journal_abort(journal, err); /* End of a transaction! Finally, we can do checkpoint processing: any buffers committed as a result of this transaction can be removed from any checkpoint list it was on before. */ jbd_debug(3, "JBD: commit phase 7\n"); J_ASSERT(commit_transaction->t_sync_datalist == NULL); J_ASSERT(commit_transaction->t_buffers == NULL); J_ASSERT(commit_transaction->t_checkpoint_list == NULL); J_ASSERT(commit_transaction->t_iobuf_list == NULL); J_ASSERT(commit_transaction->t_shadow_list == NULL); J_ASSERT(commit_transaction->t_log_list == NULL); restart_loop: /* * As there are other places (journal_unmap_buffer()) adding buffers * to this list we have to be careful and hold the j_list_lock. */ spin_lock(&journal->j_list_lock); while (commit_transaction->t_forget) { transaction_t *cp_transaction; struct buffer_head *bh; jh = commit_transaction->t_forget; spin_unlock(&journal->j_list_lock); bh = jh2bh(jh); jbd_lock_bh_state(bh); J_ASSERT_JH(jh, jh->b_transaction == commit_transaction || jh->b_transaction == journal->j_running_transaction); /* * If there is undo-protected committed data against * this buffer, then we can remove it now. If it is a * buffer needing such protection, the old frozen_data * field now points to a committed version of the * buffer, so rotate that field to the new committed * data. * * Otherwise, we can just throw away the frozen data now. */ if (jh->b_committed_data) { jbd_free(jh->b_committed_data, bh->b_size); jh->b_committed_data = NULL; if (jh->b_frozen_data) { jh->b_committed_data = jh->b_frozen_data; jh->b_frozen_data = NULL; } } else if (jh->b_frozen_data) { jbd_free(jh->b_frozen_data, bh->b_size); jh->b_frozen_data = NULL; } spin_lock(&journal->j_list_lock); cp_transaction = jh->b_cp_transaction; if (cp_transaction) { JBUFFER_TRACE(jh, "remove from old cp transaction"); __journal_remove_checkpoint(jh); } /* Only re-checkpoint the buffer_head if it is marked * dirty. If the buffer was added to the BJ_Forget list * by journal_forget, it may no longer be dirty and * there's no point in keeping a checkpoint record for * it. */ /* * A buffer which has been freed while still being journaled by * a previous transaction. */ if (buffer_freed(bh)) { /* * If the running transaction is the one containing * "add to orphan" operation (b_next_transaction != * NULL), we have to wait for that transaction to * commit before we can really get rid of the buffer. * So just clear b_modified to not confuse transaction * credit accounting and refile the buffer to * BJ_Forget of the running transaction. If the just * committed transaction contains "add to orphan" * operation, we can completely invalidate the buffer * now. We are rather throughout in that since the * buffer may be still accessible when blocksize < * pagesize and it is attached to the last partial * page. */ jh->b_modified = 0; if (!jh->b_next_transaction) { clear_buffer_freed(bh); clear_buffer_jbddirty(bh); clear_buffer_mapped(bh); clear_buffer_new(bh); clear_buffer_req(bh); bh->b_bdev = NULL; } } if (buffer_jbddirty(bh)) { JBUFFER_TRACE(jh, "add to new checkpointing trans"); __journal_insert_checkpoint(jh, commit_transaction); if (is_journal_aborted(journal)) clear_buffer_jbddirty(bh); JBUFFER_TRACE(jh, "refile for checkpoint writeback"); __journal_refile_buffer(jh); jbd_unlock_bh_state(bh); } else { J_ASSERT_BH(bh, !buffer_dirty(bh)); /* The buffer on BJ_Forget list and not jbddirty means * it has been freed by this transaction and hence it * could not have been reallocated until this * transaction has committed. *BUT* it could be * reallocated once we have written all the data to * disk and before we process the buffer on BJ_Forget * list. */ JBUFFER_TRACE(jh, "refile or unfile freed buffer"); __journal_refile_buffer(jh); if (!jh->b_transaction) { jbd_unlock_bh_state(bh); /* needs a brelse */ journal_remove_journal_head(bh); release_buffer_page(bh); } else jbd_unlock_bh_state(bh); } cond_resched_lock(&journal->j_list_lock); } spin_unlock(&journal->j_list_lock); /* * This is a bit sleazy. We use j_list_lock to protect transition * of a transaction into T_FINISHED state and calling * __journal_drop_transaction(). Otherwise we could race with * other checkpointing code processing the transaction... */ spin_lock(&journal->j_state_lock); spin_lock(&journal->j_list_lock); /* * Now recheck if some buffers did not get attached to the transaction * while the lock was dropped... */ if (commit_transaction->t_forget) { spin_unlock(&journal->j_list_lock); spin_unlock(&journal->j_state_lock); goto restart_loop; } /* Done with this transaction! */ jbd_debug(3, "JBD: commit phase 8\n"); J_ASSERT(commit_transaction->t_state == T_COMMIT_RECORD); commit_transaction->t_state = T_FINISHED; J_ASSERT(commit_transaction == journal->j_committing_transaction); journal->j_commit_sequence = commit_transaction->t_tid; journal->j_committing_transaction = NULL; commit_time = ktime_to_ns(ktime_sub(ktime_get(), start_time)); /* * weight the commit time higher than the average time so we don't * react too strongly to vast changes in commit time */ if (likely(journal->j_average_commit_time)) journal->j_average_commit_time = (commit_time*3 + journal->j_average_commit_time) / 4; else journal->j_average_commit_time = commit_time; spin_unlock(&journal->j_state_lock); if (commit_transaction->t_checkpoint_list == NULL && commit_transaction->t_checkpoint_io_list == NULL) { __journal_drop_transaction(journal, commit_transaction); } else { if (journal->j_checkpoint_transactions == NULL) { journal->j_checkpoint_transactions = commit_transaction; commit_transaction->t_cpnext = commit_transaction; commit_transaction->t_cpprev = commit_transaction; } else { commit_transaction->t_cpnext = journal->j_checkpoint_transactions; commit_transaction->t_cpprev = commit_transaction->t_cpnext->t_cpprev; commit_transaction->t_cpnext->t_cpprev = commit_transaction; commit_transaction->t_cpprev->t_cpnext = commit_transaction; } } spin_unlock(&journal->j_list_lock); jbd_debug(1, "JBD: commit %d complete, head %d\n", journal->j_commit_sequence, journal->j_tail_sequence); wake_up(&journal->j_wait_done_commit); }
gpl-2.0
aospan/linux-netup-1.4
drivers/ide/ide-pnp.c
1180
2517
/* * This file provides autodetection for ISA PnP IDE interfaces. * It was tested with "ESS ES1868 Plug and Play AudioDrive" IDE interface. * * Copyright (C) 2000 Andrey Panin <pazke@donpac.ru> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * You should have received a copy of the GNU General Public License * (for example /usr/src/linux/COPYING); if not, write to the Free * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/init.h> #include <linux/pnp.h> #include <linux/ide.h> #include <linux/module.h> #define DRV_NAME "ide-pnp" /* Add your devices here :)) */ static struct pnp_device_id idepnp_devices[] = { /* Generic ESDI/IDE/ATA compatible hard disk controller */ {.id = "PNP0600", .driver_data = 0}, {.id = ""} }; static const struct ide_port_info ide_pnp_port_info = { .host_flags = IDE_HFLAG_NO_DMA, .chipset = ide_generic, }; static int idepnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id) { struct ide_host *host; unsigned long base, ctl; int rc; struct ide_hw hw, *hws[] = { &hw }; printk(KERN_INFO DRV_NAME ": generic PnP IDE interface\n"); if (!(pnp_port_valid(dev, 0) && pnp_port_valid(dev, 1) && pnp_irq_valid(dev, 0))) return -1; base = pnp_port_start(dev, 0); ctl = pnp_port_start(dev, 1); if (!request_region(base, 8, DRV_NAME)) { printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n", DRV_NAME, base, base + 7); return -EBUSY; } if (!request_region(ctl, 1, DRV_NAME)) { printk(KERN_ERR "%s: I/O resource 0x%lX not free.\n", DRV_NAME, ctl); release_region(base, 8); return -EBUSY; } memset(&hw, 0, sizeof(hw)); ide_std_init_ports(&hw, base, ctl); hw.irq = pnp_irq(dev, 0); rc = ide_host_add(&ide_pnp_port_info, hws, 1, &host); if (rc) goto out; pnp_set_drvdata(dev, host); return 0; out: release_region(ctl, 1); release_region(base, 8); return rc; } static void idepnp_remove(struct pnp_dev *dev) { struct ide_host *host = pnp_get_drvdata(dev); ide_host_remove(host); release_region(pnp_port_start(dev, 1), 1); release_region(pnp_port_start(dev, 0), 8); } static struct pnp_driver idepnp_driver = { .name = "ide", .id_table = idepnp_devices, .probe = idepnp_probe, .remove = idepnp_remove, }; module_pnp_driver(idepnp_driver); MODULE_LICENSE("GPL");
gpl-2.0
KylinUI/android_kernel_oppo_find5
arch/arm/mach-msm/msm-buspm-dev.c
1180
6050
/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ /* #define DEBUG */ #include <linux/module.h> #include <linux/fs.h> #include <linux/mm.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/errno.h> #include <linux/device.h> #include <linux/uaccess.h> #include <linux/miscdevice.h> #include <linux/memory_alloc.h> #include "msm-buspm-dev.h" #define MSM_BUSPM_DRV_NAME "msm-buspm-dev" /* * Allocate kernel buffer. * Currently limited to one buffer per file descriptor. If alloc() is * called twice for the same descriptor, the original buffer is freed. * There is also no locking protection so the same descriptor can not be shared. */ static inline void *msm_buspm_dev_get_vaddr(struct file *filp) { struct msm_buspm_map_dev *dev = filp->private_data; return (dev) ? dev->vaddr : NULL; } static inline unsigned int msm_buspm_dev_get_buflen(struct file *filp) { struct msm_buspm_map_dev *dev = filp->private_data; return dev ? dev->buflen : 0; } static inline unsigned long msm_buspm_dev_get_paddr(struct file *filp) { struct msm_buspm_map_dev *dev = filp->private_data; return (dev) ? dev->paddr : 0L; } static void msm_buspm_dev_free(struct file *filp) { struct msm_buspm_map_dev *dev = filp->private_data; if (dev) { pr_debug("freeing memory at 0x%p\n", dev->vaddr); free_contiguous_memory(dev->vaddr); dev->paddr = 0L; dev->vaddr = NULL; } } static int msm_buspm_dev_open(struct inode *inode, struct file *filp) { struct msm_buspm_map_dev *dev; if (capable(CAP_SYS_ADMIN)) { dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (dev) filp->private_data = dev; else return -ENOMEM; } else { return -EPERM; } return 0; } static int msm_buspm_dev_alloc(struct file *filp, struct buspm_alloc_params data) { unsigned long paddr; void *vaddr; struct msm_buspm_map_dev *dev = filp->private_data; /* If buffer already allocated, then free it */ if (dev->vaddr) msm_buspm_dev_free(filp); /* Allocate uncached memory */ vaddr = allocate_contiguous_ebi(data.size, PAGE_SIZE, 0); paddr = (vaddr) ? memory_pool_node_paddr(vaddr) : 0L; if (vaddr == NULL) { pr_err("allocation of 0x%x bytes failed", data.size); return -ENOMEM; } dev->vaddr = vaddr; dev->paddr = paddr; dev->buflen = data.size; filp->f_pos = 0; pr_debug("virt addr = 0x%p\n", dev->vaddr); pr_debug("phys addr = 0x%lx\n", dev->paddr); return 0; } static long msm_buspm_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct buspm_xfer_req xfer; struct buspm_alloc_params alloc_data; unsigned long paddr; int retval = 0; void *buf = msm_buspm_dev_get_vaddr(filp); unsigned int buflen = msm_buspm_dev_get_buflen(filp); unsigned char *dbgbuf = buf; switch (cmd) { case MSM_BUSPM_IOC_FREE: pr_debug("cmd = 0x%x (FREE)\n", cmd); msm_buspm_dev_free(filp); break; case MSM_BUSPM_IOC_ALLOC: pr_debug("cmd = 0x%x (ALLOC)\n", cmd); retval = __get_user(alloc_data.size, (size_t __user *)arg); if (retval == 0) retval = msm_buspm_dev_alloc(filp, alloc_data); break; case MSM_BUSPM_IOC_RD_PHYS_ADDR: pr_debug("Read Physical Address\n"); paddr = msm_buspm_dev_get_paddr(filp); if (paddr == 0L) { retval = -EINVAL; } else { pr_debug("phys addr = 0x%lx\n", paddr); retval = __put_user(paddr, (unsigned long __user *)arg); } break; case MSM_BUSPM_IOC_RDBUF: pr_debug("Read Buffer: 0x%x%x%x%x\n", dbgbuf[0], dbgbuf[1], dbgbuf[2], dbgbuf[3]); if (!buf) { retval = -EINVAL; break; } if (copy_from_user(&xfer, (void __user *)arg, sizeof(xfer))) { retval = -EFAULT; break; } if ((xfer.size <= buflen) && (copy_to_user((void __user *)xfer.data, buf, xfer.size))) { retval = -EFAULT; break; } break; case MSM_BUSPM_IOC_WRBUF: pr_debug("Write Buffer\n"); if (!buf) { retval = -EINVAL; break; } if (copy_from_user(&xfer, (void __user *)arg, sizeof(xfer))) { retval = -EFAULT; break; } if ((buflen <= xfer.size) && (copy_from_user(buf, (void __user *)xfer.data, xfer.size))) { retval = -EFAULT; break; } break; default: pr_debug("Unknown command 0x%x\n", cmd); retval = -EINVAL; break; } return retval; } static int msm_buspm_dev_release(struct inode *inode, struct file *filp) { struct msm_buspm_map_dev *dev = filp->private_data; msm_buspm_dev_free(filp); kfree(dev); filp->private_data = NULL; return 0; } static int msm_buspm_dev_mmap(struct file *filp, struct vm_area_struct *vma) { pr_debug("vma = 0x%p\n", vma); /* Mappings are uncached */ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, vma->vm_end - vma->vm_start, vma->vm_page_prot)) return -EFAULT; return 0; } static const struct file_operations msm_buspm_dev_fops = { .owner = THIS_MODULE, .mmap = msm_buspm_dev_mmap, .open = msm_buspm_dev_open, .unlocked_ioctl = msm_buspm_dev_ioctl, .llseek = noop_llseek, .release = msm_buspm_dev_release, }; struct miscdevice msm_buspm_misc = { .minor = MISC_DYNAMIC_MINOR, .name = MSM_BUSPM_DRV_NAME, .fops = &msm_buspm_dev_fops, }; static int __init msm_buspm_dev_init(void) { int ret = 0; ret = misc_register(&msm_buspm_misc); if (ret < 0) pr_err("%s: Cannot register misc device\n", __func__); return ret; } static void __exit msm_buspm_dev_exit(void) { misc_deregister(&msm_buspm_misc); } module_init(msm_buspm_dev_init); module_exit(msm_buspm_dev_exit); MODULE_LICENSE("GPL v2"); MODULE_VERSION("1.0"); MODULE_ALIAS("platform:"MSM_BUSPM_DRV_NAME);
gpl-2.0
WaRP7/linux-fslc
drivers/gpu/drm/radeon/r420.c
1692
14347
/* * Copyright 2008 Advanced Micro Devices, Inc. * Copyright 2008 Red Hat Inc. * Copyright 2009 Jerome Glisse. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Dave Airlie * Alex Deucher * Jerome Glisse */ #include <linux/seq_file.h> #include <linux/slab.h> #include <drm/drmP.h> #include "radeon_reg.h" #include "radeon.h" #include "radeon_asic.h" #include "atom.h" #include "r100d.h" #include "r420d.h" #include "r420_reg_safe.h" void r420_pm_init_profile(struct radeon_device *rdev) { /* default */ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0; /* low sh */ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0; rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0; rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; /* mid sh */ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0; rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1; rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0; /* high sh */ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0; rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0; /* low mh */ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0; rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; /* mid mh */ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0; rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0; /* high mh */ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0; rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0; } static void r420_set_reg_safe(struct radeon_device *rdev) { rdev->config.r300.reg_safe_bm = r420_reg_safe_bm; rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r420_reg_safe_bm); } void r420_pipes_init(struct radeon_device *rdev) { unsigned tmp; unsigned gb_pipe_select; unsigned num_pipes; /* GA_ENHANCE workaround TCL deadlock issue */ WREG32(R300_GA_ENHANCE, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL | (1 << 2) | (1 << 3)); /* add idle wait as per freedesktop.org bug 24041 */ if (r100_gui_wait_for_idle(rdev)) { printk(KERN_WARNING "Failed to wait GUI idle while " "programming pipes. Bad things might happen.\n"); } /* get max number of pipes */ gb_pipe_select = RREG32(R400_GB_PIPE_SELECT); num_pipes = ((gb_pipe_select >> 12) & 3) + 1; /* SE chips have 1 pipe */ if ((rdev->pdev->device == 0x5e4c) || (rdev->pdev->device == 0x5e4f)) num_pipes = 1; rdev->num_gb_pipes = num_pipes; tmp = 0; switch (num_pipes) { default: /* force to 1 pipe */ num_pipes = 1; case 1: tmp = (0 << 1); break; case 2: tmp = (3 << 1); break; case 3: tmp = (6 << 1); break; case 4: tmp = (7 << 1); break; } WREG32(R500_SU_REG_DEST, (1 << num_pipes) - 1); /* Sub pixel 1/12 so we can have 4K rendering according to doc */ tmp |= R300_TILE_SIZE_16 | R300_ENABLE_TILING; WREG32(R300_GB_TILE_CONFIG, tmp); if (r100_gui_wait_for_idle(rdev)) { printk(KERN_WARNING "Failed to wait GUI idle while " "programming pipes. Bad things might happen.\n"); } tmp = RREG32(R300_DST_PIPE_CONFIG); WREG32(R300_DST_PIPE_CONFIG, tmp | R300_PIPE_AUTO_CONFIG); WREG32(R300_RB2D_DSTCACHE_MODE, RREG32(R300_RB2D_DSTCACHE_MODE) | R300_DC_AUTOFLUSH_ENABLE | R300_DC_DC_DISABLE_IGNORE_PE); if (r100_gui_wait_for_idle(rdev)) { printk(KERN_WARNING "Failed to wait GUI idle while " "programming pipes. Bad things might happen.\n"); } if (rdev->family == CHIP_RV530) { tmp = RREG32(RV530_GB_PIPE_SELECT2); if ((tmp & 3) == 3) rdev->num_z_pipes = 2; else rdev->num_z_pipes = 1; } else rdev->num_z_pipes = 1; DRM_INFO("radeon: %d quad pipes, %d z pipes initialized.\n", rdev->num_gb_pipes, rdev->num_z_pipes); } u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg) { unsigned long flags; u32 r; spin_lock_irqsave(&rdev->mc_idx_lock, flags); WREG32(R_0001F8_MC_IND_INDEX, S_0001F8_MC_IND_ADDR(reg)); r = RREG32(R_0001FC_MC_IND_DATA); spin_unlock_irqrestore(&rdev->mc_idx_lock, flags); return r; } void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v) { unsigned long flags; spin_lock_irqsave(&rdev->mc_idx_lock, flags); WREG32(R_0001F8_MC_IND_INDEX, S_0001F8_MC_IND_ADDR(reg) | S_0001F8_MC_IND_WR_EN(1)); WREG32(R_0001FC_MC_IND_DATA, v); spin_unlock_irqrestore(&rdev->mc_idx_lock, flags); } static void r420_debugfs(struct radeon_device *rdev) { if (r100_debugfs_rbbm_init(rdev)) { DRM_ERROR("Failed to register debugfs file for RBBM !\n"); } if (r420_debugfs_pipes_info_init(rdev)) { DRM_ERROR("Failed to register debugfs file for pipes !\n"); } } static void r420_clock_resume(struct radeon_device *rdev) { u32 sclk_cntl; if (radeon_dynclks != -1 && radeon_dynclks) radeon_atom_set_clock_gating(rdev, 1); sclk_cntl = RREG32_PLL(R_00000D_SCLK_CNTL); sclk_cntl |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1); if (rdev->family == CHIP_R420) sclk_cntl |= S_00000D_FORCE_PX(1) | S_00000D_FORCE_TX(1); WREG32_PLL(R_00000D_SCLK_CNTL, sclk_cntl); } static void r420_cp_errata_init(struct radeon_device *rdev) { struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; /* RV410 and R420 can lock up if CP DMA to host memory happens * while the 2D engine is busy. * * The proper workaround is to queue a RESYNC at the beginning * of the CP init, apparently. */ radeon_scratch_get(rdev, &rdev->config.r300.resync_scratch); radeon_ring_lock(rdev, ring, 8); radeon_ring_write(ring, PACKET0(R300_CP_RESYNC_ADDR, 1)); radeon_ring_write(ring, rdev->config.r300.resync_scratch); radeon_ring_write(ring, 0xDEADBEEF); radeon_ring_unlock_commit(rdev, ring, false); } static void r420_cp_errata_fini(struct radeon_device *rdev) { struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; /* Catch the RESYNC we dispatched all the way back, * at the very beginning of the CP init. */ radeon_ring_lock(rdev, ring, 8); radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); radeon_ring_write(ring, R300_RB3D_DC_FINISH); radeon_ring_unlock_commit(rdev, ring, false); radeon_scratch_free(rdev, rdev->config.r300.resync_scratch); } static int r420_startup(struct radeon_device *rdev) { int r; /* set common regs */ r100_set_common_regs(rdev); /* program mc */ r300_mc_program(rdev); /* Resume clock */ r420_clock_resume(rdev); /* Initialize GART (initialize after TTM so we can allocate * memory through TTM but finalize after TTM) */ if (rdev->flags & RADEON_IS_PCIE) { r = rv370_pcie_gart_enable(rdev); if (r) return r; } if (rdev->flags & RADEON_IS_PCI) { r = r100_pci_gart_enable(rdev); if (r) return r; } r420_pipes_init(rdev); /* allocate wb buffer */ r = radeon_wb_init(rdev); if (r) return r; r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX); if (r) { dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); return r; } /* Enable IRQ */ if (!rdev->irq.installed) { r = radeon_irq_kms_init(rdev); if (r) return r; } r100_irq_set(rdev); rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); /* 1M ring buffer */ r = r100_cp_init(rdev, 1024 * 1024); if (r) { dev_err(rdev->dev, "failed initializing CP (%d).\n", r); return r; } r420_cp_errata_init(rdev); r = radeon_ib_pool_init(rdev); if (r) { dev_err(rdev->dev, "IB initialization failed (%d).\n", r); return r; } return 0; } int r420_resume(struct radeon_device *rdev) { int r; /* Make sur GART are not working */ if (rdev->flags & RADEON_IS_PCIE) rv370_pcie_gart_disable(rdev); if (rdev->flags & RADEON_IS_PCI) r100_pci_gart_disable(rdev); /* Resume clock before doing reset */ r420_clock_resume(rdev); /* Reset gpu before posting otherwise ATOM will enter infinite loop */ if (radeon_asic_reset(rdev)) { dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", RREG32(R_000E40_RBBM_STATUS), RREG32(R_0007C0_CP_STAT)); } /* check if cards are posted or not */ if (rdev->is_atom_bios) { atom_asic_init(rdev->mode_info.atom_context); } else { radeon_combios_asic_init(rdev->ddev); } /* Resume clock after posting */ r420_clock_resume(rdev); /* Initialize surface registers */ radeon_surface_init(rdev); rdev->accel_working = true; r = r420_startup(rdev); if (r) { rdev->accel_working = false; } return r; } int r420_suspend(struct radeon_device *rdev) { radeon_pm_suspend(rdev); r420_cp_errata_fini(rdev); r100_cp_disable(rdev); radeon_wb_disable(rdev); r100_irq_disable(rdev); if (rdev->flags & RADEON_IS_PCIE) rv370_pcie_gart_disable(rdev); if (rdev->flags & RADEON_IS_PCI) r100_pci_gart_disable(rdev); return 0; } void r420_fini(struct radeon_device *rdev) { radeon_pm_fini(rdev); r100_cp_fini(rdev); radeon_wb_fini(rdev); radeon_ib_pool_fini(rdev); radeon_gem_fini(rdev); if (rdev->flags & RADEON_IS_PCIE) rv370_pcie_gart_fini(rdev); if (rdev->flags & RADEON_IS_PCI) r100_pci_gart_fini(rdev); radeon_agp_fini(rdev); radeon_irq_kms_fini(rdev); radeon_fence_driver_fini(rdev); radeon_bo_fini(rdev); if (rdev->is_atom_bios) { radeon_atombios_fini(rdev); } else { radeon_combios_fini(rdev); } kfree(rdev->bios); rdev->bios = NULL; } int r420_init(struct radeon_device *rdev) { int r; /* Initialize scratch registers */ radeon_scratch_init(rdev); /* Initialize surface registers */ radeon_surface_init(rdev); /* TODO: disable VGA need to use VGA request */ /* restore some register to sane defaults */ r100_restore_sanity(rdev); /* BIOS*/ if (!radeon_get_bios(rdev)) { if (ASIC_IS_AVIVO(rdev)) return -EINVAL; } if (rdev->is_atom_bios) { r = radeon_atombios_init(rdev); if (r) { return r; } } else { r = radeon_combios_init(rdev); if (r) { return r; } } /* Reset gpu before posting otherwise ATOM will enter infinite loop */ if (radeon_asic_reset(rdev)) { dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", RREG32(R_000E40_RBBM_STATUS), RREG32(R_0007C0_CP_STAT)); } /* check if cards are posted or not */ if (radeon_boot_test_post_card(rdev) == false) return -EINVAL; /* Initialize clocks */ radeon_get_clock_info(rdev->ddev); /* initialize AGP */ if (rdev->flags & RADEON_IS_AGP) { r = radeon_agp_init(rdev); if (r) { radeon_agp_disable(rdev); } } /* initialize memory controller */ r300_mc_init(rdev); r420_debugfs(rdev); /* Fence driver */ r = radeon_fence_driver_init(rdev); if (r) { return r; } /* Memory manager */ r = radeon_bo_init(rdev); if (r) { return r; } if (rdev->family == CHIP_R420) r100_enable_bm(rdev); if (rdev->flags & RADEON_IS_PCIE) { r = rv370_pcie_gart_init(rdev); if (r) return r; } if (rdev->flags & RADEON_IS_PCI) { r = r100_pci_gart_init(rdev); if (r) return r; } r420_set_reg_safe(rdev); /* Initialize power management */ radeon_pm_init(rdev); rdev->accel_working = true; r = r420_startup(rdev); if (r) { /* Somethings want wront with the accel init stop accel */ dev_err(rdev->dev, "Disabling GPU acceleration\n"); r100_cp_fini(rdev); radeon_wb_fini(rdev); radeon_ib_pool_fini(rdev); radeon_irq_kms_fini(rdev); if (rdev->flags & RADEON_IS_PCIE) rv370_pcie_gart_fini(rdev); if (rdev->flags & RADEON_IS_PCI) r100_pci_gart_fini(rdev); radeon_agp_fini(rdev); rdev->accel_working = false; } return 0; } /* * Debugfs info */ #if defined(CONFIG_DEBUG_FS) static int r420_debugfs_pipes_info(struct seq_file *m, void *data) { struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; struct radeon_device *rdev = dev->dev_private; uint32_t tmp; tmp = RREG32(R400_GB_PIPE_SELECT); seq_printf(m, "GB_PIPE_SELECT 0x%08x\n", tmp); tmp = RREG32(R300_GB_TILE_CONFIG); seq_printf(m, "GB_TILE_CONFIG 0x%08x\n", tmp); tmp = RREG32(R300_DST_PIPE_CONFIG); seq_printf(m, "DST_PIPE_CONFIG 0x%08x\n", tmp); return 0; } static struct drm_info_list r420_pipes_info_list[] = { {"r420_pipes_info", r420_debugfs_pipes_info, 0, NULL}, }; #endif int r420_debugfs_pipes_info_init(struct radeon_device *rdev) { #if defined(CONFIG_DEBUG_FS) return radeon_debugfs_add_files(rdev, r420_pipes_info_list, 1); #else return 0; #endif }
gpl-2.0
thypon/android_kernel_samsung_tuna
drivers/acpi/acpica/utosi.c
3228
12320
/****************************************************************************** * * Module Name: utosi - Support for the _OSI predefined control method * *****************************************************************************/ /* * Copyright (C) 2000 - 2011, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #define _COMPONENT ACPI_UTILITIES ACPI_MODULE_NAME("utosi") /* * Strings supported by the _OSI predefined control method (which is * implemented internally within this module.) * * March 2009: Removed "Linux" as this host no longer wants to respond true * for this string. Basically, the only safe OS strings are windows-related * and in many or most cases represent the only test path within the * BIOS-provided ASL code. * * The last element of each entry is used to track the newest version of * Windows that the BIOS has requested. */ static struct acpi_interface_info acpi_default_supported_interfaces[] = { /* Operating System Vendor Strings */ {"Windows 2000", NULL, 0, ACPI_OSI_WIN_2000}, /* Windows 2000 */ {"Windows 2001", NULL, 0, ACPI_OSI_WIN_XP}, /* Windows XP */ {"Windows 2001 SP1", NULL, 0, ACPI_OSI_WIN_XP_SP1}, /* Windows XP SP1 */ {"Windows 2001.1", NULL, 0, ACPI_OSI_WINSRV_2003}, /* Windows Server 2003 */ {"Windows 2001 SP2", NULL, 0, ACPI_OSI_WIN_XP_SP2}, /* Windows XP SP2 */ {"Windows 2001.1 SP1", NULL, 0, ACPI_OSI_WINSRV_2003_SP1}, /* Windows Server 2003 SP1 - Added 03/2006 */ {"Windows 2006", NULL, 0, ACPI_OSI_WIN_VISTA}, /* Windows Vista - Added 03/2006 */ {"Windows 2006.1", NULL, 0, ACPI_OSI_WINSRV_2008}, /* Windows Server 2008 - Added 09/2009 */ {"Windows 2006 SP1", NULL, 0, ACPI_OSI_WIN_VISTA_SP1}, /* Windows Vista SP1 - Added 09/2009 */ {"Windows 2006 SP2", NULL, 0, ACPI_OSI_WIN_VISTA_SP2}, /* Windows Vista SP2 - Added 09/2010 */ {"Windows 2009", NULL, 0, ACPI_OSI_WIN_7}, /* Windows 7 and Server 2008 R2 - Added 09/2009 */ /* Feature Group Strings */ {"Extended Address Space Descriptor", NULL, 0, 0} /* * All "optional" feature group strings (features that are implemented * by the host) should be dynamically added by the host via * acpi_install_interface and should not be manually added here. * * Examples of optional feature group strings: * * "Module Device" * "Processor Device" * "3.0 Thermal Model" * "3.0 _SCP Extensions" * "Processor Aggregator Device" */ }; /******************************************************************************* * * FUNCTION: acpi_ut_initialize_interfaces * * PARAMETERS: None * * RETURN: Status * * DESCRIPTION: Initialize the global _OSI supported interfaces list * ******************************************************************************/ acpi_status acpi_ut_initialize_interfaces(void) { u32 i; (void)acpi_os_acquire_mutex(acpi_gbl_osi_mutex, ACPI_WAIT_FOREVER); acpi_gbl_supported_interfaces = acpi_default_supported_interfaces; /* Link the static list of supported interfaces */ for (i = 0; i < (ACPI_ARRAY_LENGTH(acpi_default_supported_interfaces) - 1); i++) { acpi_default_supported_interfaces[i].next = &acpi_default_supported_interfaces[(acpi_size) i + 1]; } acpi_os_release_mutex(acpi_gbl_osi_mutex); return (AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ut_interface_terminate * * PARAMETERS: None * * RETURN: None * * DESCRIPTION: Delete all interfaces in the global list. Sets * acpi_gbl_supported_interfaces to NULL. * ******************************************************************************/ void acpi_ut_interface_terminate(void) { struct acpi_interface_info *next_interface; (void)acpi_os_acquire_mutex(acpi_gbl_osi_mutex, ACPI_WAIT_FOREVER); next_interface = acpi_gbl_supported_interfaces; while (next_interface) { acpi_gbl_supported_interfaces = next_interface->next; /* Only interfaces added at runtime can be freed */ if (next_interface->flags & ACPI_OSI_DYNAMIC) { ACPI_FREE(next_interface->name); ACPI_FREE(next_interface); } next_interface = acpi_gbl_supported_interfaces; } acpi_os_release_mutex(acpi_gbl_osi_mutex); } /******************************************************************************* * * FUNCTION: acpi_ut_install_interface * * PARAMETERS: interface_name - The interface to install * * RETURN: Status * * DESCRIPTION: Install the interface into the global interface list. * Caller MUST hold acpi_gbl_osi_mutex * ******************************************************************************/ acpi_status acpi_ut_install_interface(acpi_string interface_name) { struct acpi_interface_info *interface_info; /* Allocate info block and space for the name string */ interface_info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_interface_info)); if (!interface_info) { return (AE_NO_MEMORY); } interface_info->name = ACPI_ALLOCATE_ZEROED(ACPI_STRLEN(interface_name) + 1); if (!interface_info->name) { ACPI_FREE(interface_info); return (AE_NO_MEMORY); } /* Initialize new info and insert at the head of the global list */ ACPI_STRCPY(interface_info->name, interface_name); interface_info->flags = ACPI_OSI_DYNAMIC; interface_info->next = acpi_gbl_supported_interfaces; acpi_gbl_supported_interfaces = interface_info; return (AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ut_remove_interface * * PARAMETERS: interface_name - The interface to remove * * RETURN: Status * * DESCRIPTION: Remove the interface from the global interface list. * Caller MUST hold acpi_gbl_osi_mutex * ******************************************************************************/ acpi_status acpi_ut_remove_interface(acpi_string interface_name) { struct acpi_interface_info *previous_interface; struct acpi_interface_info *next_interface; previous_interface = next_interface = acpi_gbl_supported_interfaces; while (next_interface) { if (!ACPI_STRCMP(interface_name, next_interface->name)) { /* Found: name is in either the static list or was added at runtime */ if (next_interface->flags & ACPI_OSI_DYNAMIC) { /* Interface was added dynamically, remove and free it */ if (previous_interface == next_interface) { acpi_gbl_supported_interfaces = next_interface->next; } else { previous_interface->next = next_interface->next; } ACPI_FREE(next_interface->name); ACPI_FREE(next_interface); } else { /* * Interface is in static list. If marked invalid, then it * does not actually exist. Else, mark it invalid. */ if (next_interface->flags & ACPI_OSI_INVALID) { return (AE_NOT_EXIST); } next_interface->flags |= ACPI_OSI_INVALID; } return (AE_OK); } previous_interface = next_interface; next_interface = next_interface->next; } /* Interface was not found */ return (AE_NOT_EXIST); } /******************************************************************************* * * FUNCTION: acpi_ut_get_interface * * PARAMETERS: interface_name - The interface to find * * RETURN: struct acpi_interface_info if found. NULL if not found. * * DESCRIPTION: Search for the specified interface name in the global list. * Caller MUST hold acpi_gbl_osi_mutex * ******************************************************************************/ struct acpi_interface_info *acpi_ut_get_interface(acpi_string interface_name) { struct acpi_interface_info *next_interface; next_interface = acpi_gbl_supported_interfaces; while (next_interface) { if (!ACPI_STRCMP(interface_name, next_interface->name)) { return (next_interface); } next_interface = next_interface->next; } return (NULL); } /******************************************************************************* * * FUNCTION: acpi_ut_osi_implementation * * PARAMETERS: walk_state - Current walk state * * RETURN: Status * * DESCRIPTION: Implementation of the _OSI predefined control method. When * an invocation of _OSI is encountered in the system AML, * control is transferred to this function. * ******************************************************************************/ acpi_status acpi_ut_osi_implementation(struct acpi_walk_state * walk_state) { union acpi_operand_object *string_desc; union acpi_operand_object *return_desc; struct acpi_interface_info *interface_info; acpi_interface_handler interface_handler; u32 return_value; ACPI_FUNCTION_TRACE(ut_osi_implementation); /* Validate the string input argument (from the AML caller) */ string_desc = walk_state->arguments[0].object; if (!string_desc || (string_desc->common.type != ACPI_TYPE_STRING)) { return_ACPI_STATUS(AE_TYPE); } /* Create a return object */ return_desc = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER); if (!return_desc) { return_ACPI_STATUS(AE_NO_MEMORY); } /* Default return value is 0, NOT SUPPORTED */ return_value = 0; (void)acpi_os_acquire_mutex(acpi_gbl_osi_mutex, ACPI_WAIT_FOREVER); /* Lookup the interface in the global _OSI list */ interface_info = acpi_ut_get_interface(string_desc->string.pointer); if (interface_info && !(interface_info->flags & ACPI_OSI_INVALID)) { /* * The interface is supported. * Update the osi_data if necessary. We keep track of the latest * version of Windows that has been requested by the BIOS. */ if (interface_info->value > acpi_gbl_osi_data) { acpi_gbl_osi_data = interface_info->value; } return_value = ACPI_UINT32_MAX; } acpi_os_release_mutex(acpi_gbl_osi_mutex); /* * Invoke an optional _OSI interface handler. The host OS may wish * to do some interface-specific handling. For example, warn about * certain interfaces or override the true/false support value. */ interface_handler = acpi_gbl_interface_handler; if (interface_handler) { return_value = interface_handler(string_desc->string.pointer, return_value); } ACPI_DEBUG_PRINT_RAW((ACPI_DB_INFO, "ACPI: BIOS _OSI(\"%s\") is %ssupported\n", string_desc->string.pointer, return_value == 0 ? "not " : "")); /* Complete the return object */ return_desc->integer.value = return_value; walk_state->return_desc = return_desc; return_ACPI_STATUS(AE_OK); }
gpl-2.0
mialwe/mngb
drivers/leds/leds-ams-delta.c
3740
2963
/* * LEDs driver for Amstrad Delta (E3) * * Copyright (C) 2006 Jonathan McDowell <noodles@earth.li> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/leds.h> #include <plat/board-ams-delta.h> /* * Our context */ struct ams_delta_led { struct led_classdev cdev; u8 bitmask; }; static void ams_delta_led_set(struct led_classdev *led_cdev, enum led_brightness value) { struct ams_delta_led *led_dev = container_of(led_cdev, struct ams_delta_led, cdev); if (value) ams_delta_latch1_write(led_dev->bitmask, led_dev->bitmask); else ams_delta_latch1_write(led_dev->bitmask, 0); } static struct ams_delta_led ams_delta_leds[] = { { .cdev = { .name = "ams-delta::camera", .brightness_set = ams_delta_led_set, }, .bitmask = AMS_DELTA_LATCH1_LED_CAMERA, }, { .cdev = { .name = "ams-delta::advert", .brightness_set = ams_delta_led_set, }, .bitmask = AMS_DELTA_LATCH1_LED_ADVERT, }, { .cdev = { .name = "ams-delta::email", .brightness_set = ams_delta_led_set, }, .bitmask = AMS_DELTA_LATCH1_LED_EMAIL, }, { .cdev = { .name = "ams-delta::handsfree", .brightness_set = ams_delta_led_set, }, .bitmask = AMS_DELTA_LATCH1_LED_HANDSFREE, }, { .cdev = { .name = "ams-delta::voicemail", .brightness_set = ams_delta_led_set, }, .bitmask = AMS_DELTA_LATCH1_LED_VOICEMAIL, }, { .cdev = { .name = "ams-delta::voice", .brightness_set = ams_delta_led_set, }, .bitmask = AMS_DELTA_LATCH1_LED_VOICE, }, }; static int ams_delta_led_probe(struct platform_device *pdev) { int i, ret; for (i = 0; i < ARRAY_SIZE(ams_delta_leds); i++) { ams_delta_leds[i].cdev.flags |= LED_CORE_SUSPENDRESUME; ret = led_classdev_register(&pdev->dev, &ams_delta_leds[i].cdev); if (ret < 0) goto fail; } return 0; fail: while (--i >= 0) led_classdev_unregister(&ams_delta_leds[i].cdev); return ret; } static int ams_delta_led_remove(struct platform_device *pdev) { int i; for (i = 0; i < ARRAY_SIZE(ams_delta_leds); i++) led_classdev_unregister(&ams_delta_leds[i].cdev); return 0; } static struct platform_driver ams_delta_led_driver = { .probe = ams_delta_led_probe, .remove = ams_delta_led_remove, .driver = { .name = "ams-delta-led", .owner = THIS_MODULE, }, }; static int __init ams_delta_led_init(void) { return platform_driver_register(&ams_delta_led_driver); } static void __exit ams_delta_led_exit(void) { platform_driver_unregister(&ams_delta_led_driver); } module_init(ams_delta_led_init); module_exit(ams_delta_led_exit); MODULE_AUTHOR("Jonathan McDowell <noodles@earth.li>"); MODULE_DESCRIPTION("Amstrad Delta LED driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:ams-delta-led");
gpl-2.0
yuzaipiaofei/android_kernel_cyanogen_msm8916
net/core/stream.c
10396
5229
/* * SUCS NET3: * * Generic stream handling routines. These are generic for most * protocols. Even IP. Tonight 8-). * This is used because TCP, LLC (others too) layer all have mostly * identical sendmsg() and recvmsg() code. * So we (will) share it here. * * Authors: Arnaldo Carvalho de Melo <acme@conectiva.com.br> * (from old tcp.c code) * Alan Cox <alan@lxorguk.ukuu.org.uk> (Borrowed comments 8-)) */ #include <linux/module.h> #include <linux/net.h> #include <linux/signal.h> #include <linux/tcp.h> #include <linux/wait.h> #include <net/sock.h> /** * sk_stream_write_space - stream socket write_space callback. * @sk: socket * * FIXME: write proper description */ void sk_stream_write_space(struct sock *sk) { struct socket *sock = sk->sk_socket; struct socket_wq *wq; if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) && sock) { clear_bit(SOCK_NOSPACE, &sock->flags); rcu_read_lock(); wq = rcu_dereference(sk->sk_wq); if (wq_has_sleeper(wq)) wake_up_interruptible_poll(&wq->wait, POLLOUT | POLLWRNORM | POLLWRBAND); if (wq && wq->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN)) sock_wake_async(sock, SOCK_WAKE_SPACE, POLL_OUT); rcu_read_unlock(); } } EXPORT_SYMBOL(sk_stream_write_space); /** * sk_stream_wait_connect - Wait for a socket to get into the connected state * @sk: sock to wait on * @timeo_p: for how long to wait * * Must be called with the socket locked. */ int sk_stream_wait_connect(struct sock *sk, long *timeo_p) { struct task_struct *tsk = current; DEFINE_WAIT(wait); int done; do { int err = sock_error(sk); if (err) return err; if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV)) return -EPIPE; if (!*timeo_p) return -EAGAIN; if (signal_pending(tsk)) return sock_intr_errno(*timeo_p); prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); sk->sk_write_pending++; done = sk_wait_event(sk, timeo_p, !sk->sk_err && !((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))); finish_wait(sk_sleep(sk), &wait); sk->sk_write_pending--; } while (!done); return 0; } EXPORT_SYMBOL(sk_stream_wait_connect); /** * sk_stream_closing - Return 1 if we still have things to send in our buffers. * @sk: socket to verify */ static inline int sk_stream_closing(struct sock *sk) { return (1 << sk->sk_state) & (TCPF_FIN_WAIT1 | TCPF_CLOSING | TCPF_LAST_ACK); } void sk_stream_wait_close(struct sock *sk, long timeout) { if (timeout) { DEFINE_WAIT(wait); do { prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); if (sk_wait_event(sk, &timeout, !sk_stream_closing(sk))) break; } while (!signal_pending(current) && timeout); finish_wait(sk_sleep(sk), &wait); } } EXPORT_SYMBOL(sk_stream_wait_close); /** * sk_stream_wait_memory - Wait for more memory for a socket * @sk: socket to wait for memory * @timeo_p: for how long */ int sk_stream_wait_memory(struct sock *sk, long *timeo_p) { int err = 0; long vm_wait = 0; long current_timeo = *timeo_p; DEFINE_WAIT(wait); if (sk_stream_memory_free(sk)) current_timeo = vm_wait = (net_random() % (HZ / 5)) + 2; while (1) { set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) goto do_error; if (!*timeo_p) goto do_nonblock; if (signal_pending(current)) goto do_interrupted; clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); if (sk_stream_memory_free(sk) && !vm_wait) break; set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); sk->sk_write_pending++; sk_wait_event(sk, &current_timeo, sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN) || (sk_stream_memory_free(sk) && !vm_wait)); sk->sk_write_pending--; if (vm_wait) { vm_wait -= current_timeo; current_timeo = *timeo_p; if (current_timeo != MAX_SCHEDULE_TIMEOUT && (current_timeo -= vm_wait) < 0) current_timeo = 0; vm_wait = 0; } *timeo_p = current_timeo; } out: finish_wait(sk_sleep(sk), &wait); return err; do_error: err = -EPIPE; goto out; do_nonblock: err = -EAGAIN; goto out; do_interrupted: err = sock_intr_errno(*timeo_p); goto out; } EXPORT_SYMBOL(sk_stream_wait_memory); int sk_stream_error(struct sock *sk, int flags, int err) { if (err == -EPIPE) err = sock_error(sk) ? : -EPIPE; if (err == -EPIPE && !(flags & MSG_NOSIGNAL)) send_sig(SIGPIPE, current, 0); return err; } EXPORT_SYMBOL(sk_stream_error); void sk_stream_kill_queues(struct sock *sk) { /* First the read buffer. */ __skb_queue_purge(&sk->sk_receive_queue); /* Next, the error queue. */ __skb_queue_purge(&sk->sk_error_queue); /* Next, the write queue. */ WARN_ON(!skb_queue_empty(&sk->sk_write_queue)); /* Account for returned memory. */ sk_mem_reclaim(sk); WARN_ON(sk->sk_wmem_queued); WARN_ON(sk->sk_forward_alloc); /* It is _impossible_ for the backlog to contain anything * when we get here. All user references to this socket * have gone away, only the net layer knows can touch it. */ } EXPORT_SYMBOL(sk_stream_kill_queues);
gpl-2.0
pantech-msm8960/android_kernel_pantech_msm8960
drivers/video/msm/mdp4_wfd_writeback.c
157
2455
/* Copyright (c) 2011, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/types.h> #include <linux/list.h> #include <linux/ioctl.h> #include <linux/spinlock.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/version.h> #include "mdp4_wfd_writeback_util.h" #include "msm_fb.h" static int writeback_on(struct platform_device *pdev) { return 0; } static int writeback_off(struct platform_device *pdev) { return 0; } static int writeback_probe(struct platform_device *pdev) { struct msm_fb_data_type *mfd; struct platform_device *mdp_dev = NULL; struct msm_fb_panel_data *pdata = NULL; int rc = 0; WRITEBACK_MSG_ERR("Inside writeback_probe\n"); mfd = platform_get_drvdata(pdev); if (!mfd) return -ENODEV; if (mfd->key != MFD_KEY) return -EINVAL; mdp_dev = platform_device_alloc("mdp", pdev->id); if (!mdp_dev) return -ENOMEM; /* * link to the latest pdev */ mfd->pdev = mdp_dev; mfd->dest = DISPLAY_LCD; if (platform_device_add_data (mdp_dev, pdev->dev.platform_data, sizeof(struct msm_fb_panel_data))) { pr_err("writeback_probe: " "platform_device_add_data failed!\n"); platform_device_put(mdp_dev); return -ENOMEM; } pdata = (struct msm_fb_panel_data *)mdp_dev->dev.platform_data; pdata->on = writeback_on; pdata->off = writeback_off; pdata->next = pdev; /* * get/set panel specific fb info */ mfd->panel_info = pdata->panel_info; mfd->fb_imgType = MDP_RGB_565; platform_set_drvdata(mdp_dev, mfd); rc = platform_device_add(mdp_dev); if (rc) { WRITEBACK_MSG_ERR("failed to add device"); platform_device_put(mdp_dev); return rc; } return rc; } static struct platform_driver writeback_driver = { .probe = writeback_probe, .driver = { .name = "writeback", }, }; static int __init writeback_driver_init(void) { int rc = 0; WRITEBACK_MSG_ERR("Inside writeback_driver_init\n"); rc = platform_driver_register(&writeback_driver); return rc; } module_init(writeback_driver_init);
gpl-2.0
perkarom/Shark-E
drivers/video/backlight/ea8061.c
413
21497
/* linux/drivers/video/backlight/ea8061.c * * MIPI-DSI based ea8061 AMOLED lcd 5.55 inch panel driver. * * Joongmock Shin <jmock.shin@samsung.com> * Eunchul Kim <chulspro.kim@samsung.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/mutex.h> #include <linux/wait.h> #include <linux/ctype.h> #include <linux/io.h> #include <linux/delay.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/lcd.h> #include <linux/lcd-property.h> #include <linux/fb.h> #include <linux/backlight.h> #include <linux/regulator/consumer.h> #include <linux/firmware.h> #include <video/mipi_display.h> #ifdef CONFIG_ARM_EXYNOS4_DISPLAY_DEVFREQ #include <linux/devfreq/exynos4_display.h> #endif #include <plat/mipi_dsim2.h> #include "ea8061.h" #include "ea8061_gamma.h" #ifdef CONFIG_BACKLIGHT_SMART_DIMMING #include "smart_dimming.h" #endif #define VER_161 (0xA1) /* MACH_SLP_T0_LTE */ #define LDI_FW_PATH "ea8061/reg_%s.bin" #define MAX_STR 255 #define LDI_MTP_LENGTH 24 #define MAX_READ_LENGTH 64 #define MIN_BRIGHTNESS (0) #define MAX_BRIGHTNESS (24) #define POWER_IS_ON(pwr) ((pwr) == FB_BLANK_UNBLANK) #define POWER_IS_OFF(pwr) ((pwr) == FB_BLANK_POWERDOWN) #define POWER_IS_NRM(pwr) ((pwr) == FB_BLANK_NORMAL) #define lcd_to_master(a) (a->dsim_dev->master) #define lcd_to_master_ops(a) ((lcd_to_master(a))->master_ops) struct panel_model { int ver; char *name; }; struct ea8061 { struct device *dev; struct lcd_device *ld; struct backlight_device *bd; struct mipi_dsim_lcd_device *dsim_dev; struct lcd_platform_data *ddi_pd; struct lcd_property *property; struct regulator *reg_vdd3; struct regulator *reg_vci; #ifdef CONFIG_ARM_EXYNOS4_DISPLAY_DEVFREQ struct notifier_block nb_disp; #endif struct mutex lock; unsigned int id; unsigned int aid; unsigned int ver; unsigned int power; unsigned int acl_enable; unsigned int cur_addr; const struct panel_model *model; unsigned int model_count; #ifdef CONFIG_BACKLIGHT_SMART_DIMMING unsigned int support_elvss; struct str_smart_dim smart_dim; #endif }; static void ea8061_delay(unsigned int msecs) { /* refer from documentation/timers/timers-howto.txt */ if (msecs < 20) usleep_range(msecs*1000, (msecs+1)*1000); else msleep(msecs); } static void ea8061_sleep_in(struct ea8061 *lcd) { struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd); ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_SHORT_WRITE, 0x10, 0x00); } static void ea8061_sleep_out(struct ea8061 *lcd) { struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd); ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_SHORT_WRITE, 0x11, 0x00); } static void ea8061_apply_level_1_key(struct ea8061 *lcd) { struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd); const unsigned char data_to_send[] = { 0xF0, 0x5A, 0x5A }; ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_LONG_WRITE, (unsigned int)data_to_send, ARRAY_SIZE(data_to_send)); } static void ea8061_apply_level_2_key(struct ea8061 *lcd) { struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd); const unsigned char data_to_send[] = { 0xFC, 0x5A, 0x5A }; ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_LONG_WRITE, (unsigned int)data_to_send, ARRAY_SIZE(data_to_send)); } static void ea8061_acl_on(struct ea8061 *lcd) { struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd); /* FIXME: off, 33%, 40%, 50% */ ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_SHORT_WRITE_PARAM, 0x55, 0x03); } static void ea8061_acl_off(struct ea8061 *lcd) { struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd); /* FIXME: off, 33%, 40%, 50% */ ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_SHORT_WRITE_PARAM, 0x55, 0x00); } static void ea8061_enable_mtp_register(struct ea8061 *lcd) { struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd); const unsigned char data_to_send[] = { 0xF1, 0x5A, 0x5A }; ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_LONG_WRITE, (unsigned int)data_to_send, ARRAY_SIZE(data_to_send)); } static void ea8061_disable_mtp_register(struct ea8061 *lcd) { struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd); const unsigned char data_to_send[] = { 0xF1, 0xA5, 0xA5 }; ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_LONG_WRITE, (unsigned int)data_to_send, ARRAY_SIZE(data_to_send)); } static void ea8061_read_id(struct ea8061 *lcd, u8 *mtp_id) { unsigned int ret; unsigned int addr = 0xD1; /* MTP ID */ struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd); ret = ops->cmd_read(lcd_to_master(lcd), MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM, addr, 3, mtp_id); } static unsigned int ea8061_read_mtp(struct ea8061 *lcd, u8 *mtp_data) { unsigned int ret; unsigned int addr = 0xD3; /* MTP addr */ struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd); ea8061_enable_mtp_register(lcd); ret = ops->cmd_read(lcd_to_master(lcd), MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM, addr, LDI_MTP_LENGTH, mtp_data); ea8061_disable_mtp_register(lcd); return ret; } static void ea8061_disp_cond(struct ea8061 *lcd) { struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd); ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_SHORT_WRITE_PARAM, 0x36, 0x02); } static void ea8061_panel_cond(struct ea8061 *lcd, int high_freq) { struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd); const unsigned char data_to_send[] = { 0xc4, 0x4E, 0xBD, 0x00, 0x00, 0x58, 0xA7, 0x0B, 0x34, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0B, 0x92, 0x0B, 0x92, 0x08, 0x08, 0x07, 0x30, 0x30, 0x30, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x04, 0x04 }; /* ToDo : Low requency control */ ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_LONG_WRITE, (unsigned int)data_to_send, ARRAY_SIZE(data_to_send)); } unsigned int convert_brightness_to_gamma(int brightness) { const unsigned int gamma_table[] = { 30, 30, 50, 70, 80, 90, 100, 120, 130, 140, 150, 160, 170, 180, 190, 200, 210, 220, 230, 240, 250, 260, 270, 280, 300 }; return gamma_table[brightness] - 1; } static int ea8061_gamma_ctrl(struct ea8061 *lcd, int brightness) { struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd); #ifdef CONFIG_BACKLIGHT_SMART_DIMMING unsigned int gamma; unsigned char gamma_set[GAMMA_TABLE_COUNT] = {0,}; gamma = convert_brightness_to_gamma(brightness); gamma_set[0] = 0xfa; gamma_set[1] = 0x01; calc_gamma_table(&lcd->smart_dim, gamma, gamma_set + 2); ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_LONG_WRITE, (unsigned int)gamma_set, GAMMA_TABLE_COUNT); #else ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_LONG_WRITE, (unsigned int)ea8061_gamma22_table[brightness], GAMMA_TABLE_COUNT); #endif /* update gamma table. */ ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_SHORT_WRITE_PARAM, 0xf7, 0x03); ea8061_acl_on(lcd); return 0; } static void ea8061_elvss_nvm_set(struct ea8061 *lcd) { struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd); unsigned char data_to_send[] = { 0xB2, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0C, 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x0B, 0x0C, 0x0E, 0x10, 0x12, 0x13, 0x15, 0x17, 0x18, 0x1A, 0x1A, 0x1B, 0x1B, 0x1B, 0x1C, 0x1C, 0x1C, 0xB4, 0xA0, 0x00, 0x00, 0x00, 0x00 }; ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_LONG_WRITE, (unsigned int)data_to_send, ARRAY_SIZE(data_to_send)); } static void ea8061_slew_ctl(struct ea8061 *lcd) { struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd); unsigned char data_to_send[] = { 0xB4, 0x33, 0x0D, 0x00 }; ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_LONG_WRITE, (unsigned int)data_to_send, ARRAY_SIZE(data_to_send)); } static int ea8061_panel_init(struct ea8061 *lcd) { struct backlight_device *bd = lcd->bd; int brightness = bd->props.brightness; ea8061_delay(5); ea8061_apply_level_1_key(lcd); ea8061_apply_level_2_key(lcd); ea8061_panel_cond(lcd, 1); ea8061_disp_cond(lcd); ea8061_gamma_ctrl(lcd, brightness); ea8061_elvss_nvm_set(lcd); ea8061_acl_on(lcd); ea8061_slew_ctl(lcd); ea8061_sleep_out(lcd); /* wait more than 120ms */ ea8061_delay(lcd->ddi_pd->power_on_delay); dev_info(lcd->dev, "panel init sequence done.\n"); return 0; } static void ea8061_display_on(struct ea8061 *lcd) { struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd); ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_SHORT_WRITE, 0x29, 0x00); } static void ea8061_display_off(struct ea8061 *lcd) { struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd); ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_SHORT_WRITE, 0x28, 0x00); } static int ea8061_early_set_power(struct lcd_device *ld, int power) { struct ea8061 *lcd = lcd_get_data(ld); struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd); int ret = 0; if (power != FB_BLANK_UNBLANK && power != FB_BLANK_POWERDOWN && power != FB_BLANK_NORMAL) { dev_err(lcd->dev, "power value should be 0, 1 or 4.\n"); return -EINVAL; } if (lcd->power == power) { dev_err(lcd->dev, "power mode is same as previous one.\n"); return -EINVAL; } if (ops->set_early_blank_mode) { /* LCD power off */ if ((POWER_IS_OFF(power) && POWER_IS_ON(lcd->power)) || (POWER_IS_ON(lcd->power) && POWER_IS_NRM(power))) { ret = ops->set_early_blank_mode(lcd_to_master(lcd), power); if (!ret && lcd->power != power) lcd->power = power; } } return ret; } static int ea8061_set_power(struct lcd_device *ld, int power) { struct ea8061 *lcd = lcd_get_data(ld); struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd); int ret = 0; if (power != FB_BLANK_UNBLANK && power != FB_BLANK_POWERDOWN && power != FB_BLANK_NORMAL) { dev_err(lcd->dev, "power value should be 0, 1 or 4.\n"); return -EINVAL; } if (lcd->power == power) { dev_err(lcd->dev, "power mode is same as previous one.\n"); return -EINVAL; } if (ops->set_blank_mode) { ret = ops->set_blank_mode(lcd_to_master(lcd), power); if (!ret && lcd->power != power) lcd->power = power; } return ret; } static int ea8061_get_power(struct lcd_device *ld) { struct ea8061 *lcd = lcd_get_data(ld); return lcd->power; } static struct lcd_ops ea8061_lcd_ops = { .early_set_power = ea8061_early_set_power, .set_power = ea8061_set_power, .get_power = ea8061_get_power, }; static int ea8061_get_brightness(struct backlight_device *bd) { return bd->props.brightness; } static int ea8061_set_brightness(struct backlight_device *bd) { int ret = 0, brightness = bd->props.brightness; struct ea8061 *lcd = bl_get_data(bd); if (lcd->power == FB_BLANK_POWERDOWN) { dev_err(lcd->dev, "lcd off: brightness set failed.\n"); return -EINVAL; } if (brightness < MIN_BRIGHTNESS || brightness > bd->props.max_brightness) { dev_err(lcd->dev, "lcd brightness should be %d to %d.\n", MIN_BRIGHTNESS, MAX_BRIGHTNESS); return -EINVAL; } ret = ea8061_gamma_ctrl(lcd, brightness); if (ret) { dev_err(&bd->dev, "lcd brightness setting failed.\n"); return -EIO; } return ret; } static const struct backlight_ops ea8061_backlight_ops = { .get_brightness = ea8061_get_brightness, .update_status = ea8061_set_brightness, }; static ssize_t acl_control_show(struct device *dev, struct device_attribute * attr, char *buf) { struct ea8061 *lcd = dev_get_drvdata(dev); char temp[3]; sprintf(temp, "%d\n", lcd->acl_enable); strcpy(buf, temp); return strlen(buf); } static ssize_t acl_control_store(struct device *dev, struct device_attribute * attr, const char *buf, size_t size) { struct ea8061 *lcd = dev_get_drvdata(dev); unsigned int value; int rc; rc = strict_strtoul(buf, (unsigned int)0, (unsigned long *)&value); if (rc < 0) return rc; if (lcd->acl_enable != value) { dev_info(dev, "acl control changed from %d to %d\n", lcd->acl_enable, value); lcd->acl_enable = value; if (lcd->acl_enable) ea8061_acl_on(lcd); else ea8061_acl_off(lcd); } return size; } static ssize_t lcd_type_show(struct device *dev, struct device_attribute * attr, char *buf) { struct ea8061 *lcd = dev_get_drvdata(dev); char temp[32]; int i; for (i = 0; i < lcd->model_count; i++) { if (lcd->ver == lcd->model[i].ver) break; } if (i == lcd->model_count) return -EINVAL; sprintf(temp, "%s\n", lcd->model[i].name); strcpy(buf, temp); return strlen(buf); } static int ea8061_read_reg(struct ea8061 *lcd, unsigned int addr, char *buf) { unsigned char data[MAX_READ_LENGTH]; unsigned int size; int i; int pos = 0; struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd); memset(data, 0x0, ARRAY_SIZE(data)); size = ops->cmd_read(lcd_to_master(lcd), MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM, addr, MAX_READ_LENGTH, data); if (!size) { dev_err(lcd->dev, "failed to read 0x%.2x register.\n", addr); return size; } pos += sprintf(buf, "0x%.2x, ", addr); for (i = 1; i < size+1; i++) { if (i % 9 == 0) pos += sprintf(buf+pos, "\n"); pos += sprintf(buf+pos, "0x%.2x, ", data[i-1]); } pos += sprintf(buf+pos, "\n"); return pos; } static int ea8061_write_reg(struct ea8061 *lcd, char *name) { struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd); const struct firmware *fw; char fw_path[MAX_STR+1]; int ret = 0; mutex_lock(&lcd->lock); snprintf(fw_path, MAX_STR, LDI_FW_PATH, name); ret = request_firmware(&fw, fw_path, lcd->dev); if (ret) { dev_err(lcd->dev, "failed to request firmware.\n"); mutex_unlock(&lcd->lock); return ret; } if (fw->size == 1) ret = ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_SHORT_WRITE, (unsigned int)fw->data[0], 0); else if (fw->size == 2) ret = ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_SHORT_WRITE_PARAM, (unsigned int)fw->data[0], fw->data[1]); else ret = ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_LONG_WRITE, (unsigned int)fw->data, fw->size); if (ret) dev_err(lcd->dev, "failed to write 0x%.2x register and %d error.\n", fw->data[0], ret); release_firmware(fw); mutex_unlock(&lcd->lock); return ret; } static ssize_t read_reg_show(struct device *dev, struct device_attribute * attr, char *buf) { struct ea8061 *lcd = dev_get_drvdata(dev); if (lcd->cur_addr == 0) { dev_err(dev, "failed to set current lcd register.\n"); return -EINVAL; } return ea8061_read_reg(lcd, lcd->cur_addr, buf); } static ssize_t read_reg_store(struct device *dev, struct device_attribute * attr, const char *buf, size_t size) { struct ea8061 *lcd = dev_get_drvdata(dev); unsigned int value; int ret; ret = sscanf(buf, "0x%x", &value); if (ret < 0) return ret; dev_info(dev, "success to set 0x%x address.\n", value); lcd->cur_addr = value; return size; } static ssize_t write_reg_store(struct device *dev, struct device_attribute * attr, const char *buf, size_t size) { struct ea8061 *lcd = dev_get_drvdata(dev); char name[32]; int ret; ret = sscanf(buf, "%s", name); if (ret < 0) return ret; ret = ea8061_write_reg(lcd, name); if (ret < 0) return ret; dev_info(dev, "success to set %s address.\n", name); return size; } static struct device_attribute device_attrs[] = { __ATTR(acl_control, S_IRUGO|S_IWUSR|S_IWGRP, acl_control_show, acl_control_store), __ATTR(lcd_type, S_IRUGO, lcd_type_show, NULL), __ATTR(read_reg, S_IRUGO|S_IWUSR|S_IWGRP, read_reg_show, read_reg_store), __ATTR(write_reg, S_IWUSR|S_IWGRP, NULL, write_reg_store), }; static struct panel_model ea8061_model[] = { { .ver = VER_161, /* MACH_SLP_T0_LTE */ .name = "SMD_AMS555HBxx-0", } }; #ifdef CONFIG_ARM_EXYNOS4_DISPLAY_DEVFREQ static int ea8061_notifier_callback(struct notifier_block *this, unsigned long event, void *_data) { struct ea8061 *lcd = container_of(this, struct ea8061, nb_disp); if (lcd->power == FB_BLANK_POWERDOWN) return NOTIFY_DONE; switch (event) { case EXYNOS4_DISPLAY_LV_HF: ea8061_panel_cond(lcd, 1); break; case EXYNOS4_DISPLAY_LV_LF: ea8061_panel_cond(lcd, 0); break; default: return NOTIFY_BAD; } return NOTIFY_DONE; } #endif static void ea8061_regulator_ctl(struct ea8061 *lcd, bool enable) { mutex_lock(&lcd->lock); if (enable) { if (lcd->reg_vdd3) regulator_enable(lcd->reg_vdd3); if (lcd->reg_vci) regulator_enable(lcd->reg_vci); } else { if (lcd->reg_vci) regulator_disable(lcd->reg_vci); if (lcd->reg_vdd3) regulator_disable(lcd->reg_vdd3); } mutex_unlock(&lcd->lock); } static void ea8061_power_on(struct mipi_dsim_lcd_device *dsim_dev, unsigned int enable) { struct ea8061 *lcd = dev_get_drvdata(&dsim_dev->dev); dev_dbg(lcd->dev, "%s:enable[%d]\n", __func__, enable); if (enable) { /* lcd power on */ ea8061_regulator_ctl(lcd, true); ea8061_delay(lcd->ddi_pd->reset_delay); /* lcd reset high */ if (lcd->ddi_pd->reset) lcd->ddi_pd->reset(lcd->ld); /* wait more than 5ms */ ea8061_delay(5); } else { /* lcd reset low */ if (lcd->ddi_pd->reset) lcd->ddi_pd->reset(lcd->ld); /* lcd power off */ ea8061_regulator_ctl(lcd, false); } } static int ea8061_check_mtp(struct mipi_dsim_lcd_device *dsim_dev) { /* FIXME:! read id mtp failed */ return 0; } static void ea8061_set_sequence(struct mipi_dsim_lcd_device *dsim_dev) { struct ea8061 *lcd = dev_get_drvdata(&dsim_dev->dev); ea8061_panel_init(lcd); ea8061_display_on(lcd); } static int ea8061_probe(struct mipi_dsim_lcd_device *dsim_dev) { struct ea8061 *lcd; int ret; int i; lcd = kzalloc(sizeof(struct ea8061), GFP_KERNEL); if (!lcd) { dev_err(&dsim_dev->dev, "failed to allocate ea8061 structure.\n"); return -ENOMEM; } lcd->dsim_dev = dsim_dev; lcd->ddi_pd = (struct lcd_platform_data *)dsim_dev->platform_data; lcd->dev = &dsim_dev->dev; mutex_init(&lcd->lock); lcd->reg_vdd3 = regulator_get(lcd->dev, "VDD3"); if (IS_ERR(lcd->reg_vdd3)) { ret = PTR_ERR(lcd->reg_vdd3); dev_err(lcd->dev, "failed to get %s regulator (%d)\n", "VDD3", ret); lcd->reg_vdd3 = NULL; } lcd->reg_vci = regulator_get(lcd->dev, "VCI"); if (IS_ERR(lcd->reg_vci)) { ret = PTR_ERR(lcd->reg_vci); dev_err(lcd->dev, "failed to get %s regulator (%d)\n", "VCI", ret); lcd->reg_vci = NULL; } lcd->ld = lcd_device_register("ea8061", lcd->dev, lcd, &ea8061_lcd_ops); if (IS_ERR(lcd->ld)) { dev_err(lcd->dev, "failed to register lcd ops.\n"); ret = PTR_ERR(lcd->ld); goto err_regulator; } lcd->bd = backlight_device_register("ea8061-bl", lcd->dev, lcd, &ea8061_backlight_ops, NULL); if (IS_ERR(lcd->bd)) { dev_err(lcd->dev, "failed to register backlight ops.\n"); ret = PTR_ERR(lcd->bd); goto err_unregister_lcd; } ea8061_regulator_ctl(lcd, true); if (lcd->ddi_pd) lcd->property = lcd->ddi_pd->pdata; #ifdef CONFIG_ARM_EXYNOS4_DISPLAY_DEVFREQ if (lcd->property && lcd->property->dynamic_refresh) { lcd->nb_disp.notifier_call = ea8061_notifier_callback; ret = exynos4_display_register_client(&lcd->nb_disp); if (ret < 0) dev_warn(&lcd->ld->dev, "failed to register exynos-display notifier\n"); } #endif lcd->bd->props.max_brightness = MAX_BRIGHTNESS; lcd->bd->props.brightness = MAX_BRIGHTNESS; lcd->power = FB_BLANK_UNBLANK; lcd->model = ea8061_model; lcd->model_count = ARRAY_SIZE(ea8061_model); for (i = 0; i < ARRAY_SIZE(device_attrs); i++) { ret = device_create_file(&lcd->ld->dev, &device_attrs[i]); if (ret < 0) { dev_err(&lcd->ld->dev, "failed to add sysfs entries\n"); break; } } dev_set_drvdata(&dsim_dev->dev, lcd); dev_info(lcd->dev, "probed ea8061 panel driver(%s).\n", dev_name(&lcd->ld->dev)); return 0; err_unregister_lcd: lcd_device_unregister(lcd->ld); err_regulator: regulator_put(lcd->reg_vci); regulator_put(lcd->reg_vdd3); kfree(lcd); return ret; } static void ea8061_remove(struct mipi_dsim_lcd_device *dsim_dev) { struct ea8061 *lcd = dev_get_drvdata(&dsim_dev->dev); backlight_device_unregister(lcd->bd); lcd_device_unregister(lcd->ld); regulator_put(lcd->reg_vci); regulator_put(lcd->reg_vdd3); #ifdef CONFIG_ARM_EXYNOS4_DISPLAY_DEVFREQ if (lcd->property && lcd->property->dynamic_refresh) exynos4_display_unregister_client(&lcd->nb_disp); #endif kfree(lcd); } #ifdef CONFIG_PM static int ea8061_suspend(struct mipi_dsim_lcd_device *dsim_dev) { struct ea8061 *lcd = dev_get_drvdata(&dsim_dev->dev); ea8061_display_off(lcd); ea8061_sleep_in(lcd); ea8061_delay(lcd->ddi_pd->power_off_delay); return 0; } static int ea8061_resume(struct mipi_dsim_lcd_device *dsim_dev) { struct ea8061 *lcd = dev_get_drvdata(&dsim_dev->dev); ea8061_sleep_out(lcd); ea8061_delay(lcd->ddi_pd->power_on_delay); return 0; } #else #define ea8061_suspend NULL #define ea8061_resume NULL #endif static struct mipi_dsim_lcd_driver ea8061_dsim_ddi_driver = { .name = "ea8061", .id = -1, .power_on = ea8061_power_on, .check_mtp = ea8061_check_mtp, .set_sequence = ea8061_set_sequence, .probe = ea8061_probe, .remove = ea8061_remove, .suspend = ea8061_suspend, .resume = ea8061_resume, }; static int ea8061_init(void) { s5p_mipi_dsi_register_lcd_driver(&ea8061_dsim_ddi_driver); return 0; } static void ea8061_exit(void) { return; } module_init(ea8061_init); module_exit(ea8061_exit); MODULE_AUTHOR("Joongmock Shin <jmock.shin@samsung.com>"); MODULE_AUTHOR("Eunchul Kim <chulspro.kim@samsung.com>"); MODULE_DESCRIPTION("MIPI-DSI based ea8061 AMOLED Panel Driver"); MODULE_LICENSE("GPL");
gpl-2.0
01org/KVMGT-kernel
drivers/char/hw_random/picoxcell-rng.c
413
4785
/* * Copyright (c) 2010-2011 Picochip Ltd., Jamie Iles * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * All enquiries to support@picochip.com */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/hw_random.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #define DATA_REG_OFFSET 0x0200 #define CSR_REG_OFFSET 0x0278 #define CSR_OUT_EMPTY_MASK (1 << 24) #define CSR_FAULT_MASK (1 << 1) #define TRNG_BLOCK_RESET_MASK (1 << 0) #define TAI_REG_OFFSET 0x0380 /* * The maximum amount of time in microseconds to spend waiting for data if the * core wants us to wait. The TRNG should generate 32 bits every 320ns so a * timeout of 20us seems reasonable. The TRNG does builtin tests of the data * for randomness so we can't always assume there is data present. */ #define PICO_TRNG_TIMEOUT 20 static void __iomem *rng_base; static struct clk *rng_clk; static struct device *rng_dev; static inline u32 picoxcell_trng_read_csr(void) { return __raw_readl(rng_base + CSR_REG_OFFSET); } static inline bool picoxcell_trng_is_empty(void) { return picoxcell_trng_read_csr() & CSR_OUT_EMPTY_MASK; } /* * Take the random number generator out of reset and make sure the interrupts * are masked. We shouldn't need to get large amounts of random bytes so just * poll the status register. The hardware generates 32 bits every 320ns so we * shouldn't have to wait long enough to warrant waiting for an IRQ. */ static void picoxcell_trng_start(void) { __raw_writel(0, rng_base + TAI_REG_OFFSET); __raw_writel(0, rng_base + CSR_REG_OFFSET); } static void picoxcell_trng_reset(void) { __raw_writel(TRNG_BLOCK_RESET_MASK, rng_base + CSR_REG_OFFSET); __raw_writel(TRNG_BLOCK_RESET_MASK, rng_base + TAI_REG_OFFSET); picoxcell_trng_start(); } /* * Get some random data from the random number generator. The hw_random core * layer provides us with locking. */ static int picoxcell_trng_read(struct hwrng *rng, void *buf, size_t max, bool wait) { int i; /* Wait for some data to become available. */ for (i = 0; i < PICO_TRNG_TIMEOUT && picoxcell_trng_is_empty(); ++i) { if (!wait) return 0; udelay(1); } if (picoxcell_trng_read_csr() & CSR_FAULT_MASK) { dev_err(rng_dev, "fault detected, resetting TRNG\n"); picoxcell_trng_reset(); return -EIO; } if (i == PICO_TRNG_TIMEOUT) return 0; *(u32 *)buf = __raw_readl(rng_base + DATA_REG_OFFSET); return sizeof(u32); } static struct hwrng picoxcell_trng = { .name = "picoxcell", .read = picoxcell_trng_read, }; static int picoxcell_trng_probe(struct platform_device *pdev) { int ret; struct resource *mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!mem) { dev_warn(&pdev->dev, "no memory resource\n"); return -ENOMEM; } if (!devm_request_mem_region(&pdev->dev, mem->start, resource_size(mem), "picoxcell_trng")) { dev_warn(&pdev->dev, "unable to request io mem\n"); return -EBUSY; } rng_base = devm_ioremap(&pdev->dev, mem->start, resource_size(mem)); if (!rng_base) { dev_warn(&pdev->dev, "unable to remap io mem\n"); return -ENOMEM; } rng_clk = clk_get(&pdev->dev, NULL); if (IS_ERR(rng_clk)) { dev_warn(&pdev->dev, "no clk\n"); return PTR_ERR(rng_clk); } ret = clk_enable(rng_clk); if (ret) { dev_warn(&pdev->dev, "unable to enable clk\n"); goto err_enable; } picoxcell_trng_start(); ret = hwrng_register(&picoxcell_trng); if (ret) goto err_register; rng_dev = &pdev->dev; dev_info(&pdev->dev, "pixoxcell random number generator active\n"); return 0; err_register: clk_disable(rng_clk); err_enable: clk_put(rng_clk); return ret; } static int picoxcell_trng_remove(struct platform_device *pdev) { hwrng_unregister(&picoxcell_trng); clk_disable(rng_clk); clk_put(rng_clk); return 0; } #ifdef CONFIG_PM static int picoxcell_trng_suspend(struct device *dev) { clk_disable(rng_clk); return 0; } static int picoxcell_trng_resume(struct device *dev) { return clk_enable(rng_clk); } static const struct dev_pm_ops picoxcell_trng_pm_ops = { .suspend = picoxcell_trng_suspend, .resume = picoxcell_trng_resume, }; #endif /* CONFIG_PM */ static struct platform_driver picoxcell_trng_driver = { .probe = picoxcell_trng_probe, .remove = picoxcell_trng_remove, .driver = { .name = "picoxcell-trng", .owner = THIS_MODULE, #ifdef CONFIG_PM .pm = &picoxcell_trng_pm_ops, #endif /* CONFIG_PM */ }, }; module_platform_driver(picoxcell_trng_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Jamie Iles"); MODULE_DESCRIPTION("Picochip picoXcell TRNG driver");
gpl-2.0
dutchanddutch/ti81xx-linux
drivers/virtio/virtio_pci.c
669
20105
/* * Virtio PCI driver * * This module allows virtio devices to be used over a virtual PCI device. * This can be used with QEMU based VMMs like KVM or Xen. * * Copyright IBM Corp. 2007 * * Authors: * Anthony Liguori <aliguori@us.ibm.com> * * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. * */ #include <linux/module.h> #include <linux/list.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/virtio.h> #include <linux/virtio_config.h> #include <linux/virtio_ring.h> #include <linux/virtio_pci.h> #include <linux/highmem.h> #include <linux/spinlock.h> MODULE_AUTHOR("Anthony Liguori <aliguori@us.ibm.com>"); MODULE_DESCRIPTION("virtio-pci"); MODULE_LICENSE("GPL"); MODULE_VERSION("1"); /* Our device structure */ struct virtio_pci_device { struct virtio_device vdev; struct pci_dev *pci_dev; /* the IO mapping for the PCI config space */ void __iomem *ioaddr; /* a list of queues so we can dispatch IRQs */ spinlock_t lock; struct list_head virtqueues; /* MSI-X support */ int msix_enabled; int intx_enabled; struct msix_entry *msix_entries; /* Name strings for interrupts. This size should be enough, * and I'm too lazy to allocate each name separately. */ char (*msix_names)[256]; /* Number of available vectors */ unsigned msix_vectors; /* Vectors allocated, excluding per-vq vectors if any */ unsigned msix_used_vectors; /* Whether we have vector per vq */ bool per_vq_vectors; }; /* Constants for MSI-X */ /* Use first vector for configuration changes, second and the rest for * virtqueues Thus, we need at least 2 vectors for MSI. */ enum { VP_MSIX_CONFIG_VECTOR = 0, VP_MSIX_VQ_VECTOR = 1, }; struct virtio_pci_vq_info { /* the actual virtqueue */ struct virtqueue *vq; /* the number of entries in the queue */ int num; /* the index of the queue */ int queue_index; /* the virtual address of the ring queue */ void *queue; /* the list node for the virtqueues list */ struct list_head node; /* MSI-X vector (or none) */ unsigned msix_vector; }; /* Qumranet donated their vendor ID for devices 0x1000 thru 0x10FF. */ static struct pci_device_id virtio_pci_id_table[] = { { 0x1af4, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, { 0 }, }; MODULE_DEVICE_TABLE(pci, virtio_pci_id_table); /* A PCI device has it's own struct device and so does a virtio device so * we create a place for the virtio devices to show up in sysfs. I think it * would make more sense for virtio to not insist on having it's own device. */ static struct device *virtio_pci_root; /* Convert a generic virtio device to our structure */ static struct virtio_pci_device *to_vp_device(struct virtio_device *vdev) { return container_of(vdev, struct virtio_pci_device, vdev); } /* virtio config->get_features() implementation */ static u32 vp_get_features(struct virtio_device *vdev) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); /* When someone needs more than 32 feature bits, we'll need to * steal a bit to indicate that the rest are somewhere else. */ return ioread32(vp_dev->ioaddr + VIRTIO_PCI_HOST_FEATURES); } /* virtio config->finalize_features() implementation */ static void vp_finalize_features(struct virtio_device *vdev) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); /* Give virtio_ring a chance to accept features. */ vring_transport_features(vdev); /* We only support 32 feature bits. */ BUILD_BUG_ON(ARRAY_SIZE(vdev->features) != 1); iowrite32(vdev->features[0], vp_dev->ioaddr+VIRTIO_PCI_GUEST_FEATURES); } /* virtio config->get() implementation */ static void vp_get(struct virtio_device *vdev, unsigned offset, void *buf, unsigned len) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); void __iomem *ioaddr = vp_dev->ioaddr + VIRTIO_PCI_CONFIG(vp_dev) + offset; u8 *ptr = buf; int i; for (i = 0; i < len; i++) ptr[i] = ioread8(ioaddr + i); } /* the config->set() implementation. it's symmetric to the config->get() * implementation */ static void vp_set(struct virtio_device *vdev, unsigned offset, const void *buf, unsigned len) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); void __iomem *ioaddr = vp_dev->ioaddr + VIRTIO_PCI_CONFIG(vp_dev) + offset; const u8 *ptr = buf; int i; for (i = 0; i < len; i++) iowrite8(ptr[i], ioaddr + i); } /* config->{get,set}_status() implementations */ static u8 vp_get_status(struct virtio_device *vdev) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); return ioread8(vp_dev->ioaddr + VIRTIO_PCI_STATUS); } static void vp_set_status(struct virtio_device *vdev, u8 status) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); /* We should never be setting status to 0. */ BUG_ON(status == 0); iowrite8(status, vp_dev->ioaddr + VIRTIO_PCI_STATUS); } static void vp_reset(struct virtio_device *vdev) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); /* 0 status means a reset. */ iowrite8(0, vp_dev->ioaddr + VIRTIO_PCI_STATUS); } /* the notify function used when creating a virt queue */ static void vp_notify(struct virtqueue *vq) { struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); struct virtio_pci_vq_info *info = vq->priv; /* we write the queue's selector into the notification register to * signal the other end */ iowrite16(info->queue_index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY); } /* Handle a configuration change: Tell driver if it wants to know. */ static irqreturn_t vp_config_changed(int irq, void *opaque) { struct virtio_pci_device *vp_dev = opaque; struct virtio_driver *drv; drv = container_of(vp_dev->vdev.dev.driver, struct virtio_driver, driver); if (drv && drv->config_changed) drv->config_changed(&vp_dev->vdev); return IRQ_HANDLED; } /* Notify all virtqueues on an interrupt. */ static irqreturn_t vp_vring_interrupt(int irq, void *opaque) { struct virtio_pci_device *vp_dev = opaque; struct virtio_pci_vq_info *info; irqreturn_t ret = IRQ_NONE; unsigned long flags; spin_lock_irqsave(&vp_dev->lock, flags); list_for_each_entry(info, &vp_dev->virtqueues, node) { if (vring_interrupt(irq, info->vq) == IRQ_HANDLED) ret = IRQ_HANDLED; } spin_unlock_irqrestore(&vp_dev->lock, flags); return ret; } /* A small wrapper to also acknowledge the interrupt when it's handled. * I really need an EIO hook for the vring so I can ack the interrupt once we * know that we'll be handling the IRQ but before we invoke the callback since * the callback may notify the host which results in the host attempting to * raise an interrupt that we would then mask once we acknowledged the * interrupt. */ static irqreturn_t vp_interrupt(int irq, void *opaque) { struct virtio_pci_device *vp_dev = opaque; u8 isr; /* reading the ISR has the effect of also clearing it so it's very * important to save off the value. */ isr = ioread8(vp_dev->ioaddr + VIRTIO_PCI_ISR); /* It's definitely not us if the ISR was not high */ if (!isr) return IRQ_NONE; /* Configuration change? Tell driver if it wants to know. */ if (isr & VIRTIO_PCI_ISR_CONFIG) vp_config_changed(irq, opaque); return vp_vring_interrupt(irq, opaque); } static void vp_free_vectors(struct virtio_device *vdev) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); int i; if (vp_dev->intx_enabled) { free_irq(vp_dev->pci_dev->irq, vp_dev); vp_dev->intx_enabled = 0; } for (i = 0; i < vp_dev->msix_used_vectors; ++i) free_irq(vp_dev->msix_entries[i].vector, vp_dev); if (vp_dev->msix_enabled) { /* Disable the vector used for configuration */ iowrite16(VIRTIO_MSI_NO_VECTOR, vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); /* Flush the write out to device */ ioread16(vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); pci_disable_msix(vp_dev->pci_dev); vp_dev->msix_enabled = 0; vp_dev->msix_vectors = 0; } vp_dev->msix_used_vectors = 0; kfree(vp_dev->msix_names); vp_dev->msix_names = NULL; kfree(vp_dev->msix_entries); vp_dev->msix_entries = NULL; } static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors, bool per_vq_vectors) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); const char *name = dev_name(&vp_dev->vdev.dev); unsigned i, v; int err = -ENOMEM; vp_dev->msix_entries = kmalloc(nvectors * sizeof *vp_dev->msix_entries, GFP_KERNEL); if (!vp_dev->msix_entries) goto error; vp_dev->msix_names = kmalloc(nvectors * sizeof *vp_dev->msix_names, GFP_KERNEL); if (!vp_dev->msix_names) goto error; for (i = 0; i < nvectors; ++i) vp_dev->msix_entries[i].entry = i; /* pci_enable_msix returns positive if we can't get this many. */ err = pci_enable_msix(vp_dev->pci_dev, vp_dev->msix_entries, nvectors); if (err > 0) err = -ENOSPC; if (err) goto error; vp_dev->msix_vectors = nvectors; vp_dev->msix_enabled = 1; /* Set the vector used for configuration */ v = vp_dev->msix_used_vectors; snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names, "%s-config", name); err = request_irq(vp_dev->msix_entries[v].vector, vp_config_changed, 0, vp_dev->msix_names[v], vp_dev); if (err) goto error; ++vp_dev->msix_used_vectors; iowrite16(v, vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); /* Verify we had enough resources to assign the vector */ v = ioread16(vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); if (v == VIRTIO_MSI_NO_VECTOR) { err = -EBUSY; goto error; } if (!per_vq_vectors) { /* Shared vector for all VQs */ v = vp_dev->msix_used_vectors; snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names, "%s-virtqueues", name); err = request_irq(vp_dev->msix_entries[v].vector, vp_vring_interrupt, 0, vp_dev->msix_names[v], vp_dev); if (err) goto error; ++vp_dev->msix_used_vectors; } return 0; error: vp_free_vectors(vdev); return err; } static int vp_request_intx(struct virtio_device *vdev) { int err; struct virtio_pci_device *vp_dev = to_vp_device(vdev); err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, IRQF_SHARED, dev_name(&vdev->dev), vp_dev); if (!err) vp_dev->intx_enabled = 1; return err; } static struct virtqueue *setup_vq(struct virtio_device *vdev, unsigned index, void (*callback)(struct virtqueue *vq), const char *name, u16 msix_vec) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); struct virtio_pci_vq_info *info; struct virtqueue *vq; unsigned long flags, size; u16 num; int err; /* Select the queue we're interested in */ iowrite16(index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL); /* Check if queue is either not available or already active. */ num = ioread16(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NUM); if (!num || ioread32(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN)) return ERR_PTR(-ENOENT); /* allocate and fill out our structure the represents an active * queue */ info = kmalloc(sizeof(struct virtio_pci_vq_info), GFP_KERNEL); if (!info) return ERR_PTR(-ENOMEM); info->queue_index = index; info->num = num; info->msix_vector = msix_vec; size = PAGE_ALIGN(vring_size(num, VIRTIO_PCI_VRING_ALIGN)); info->queue = alloc_pages_exact(size, GFP_KERNEL|__GFP_ZERO); if (info->queue == NULL) { err = -ENOMEM; goto out_info; } /* activate the queue */ iowrite32(virt_to_phys(info->queue) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); /* create the vring */ vq = vring_new_virtqueue(info->num, VIRTIO_PCI_VRING_ALIGN, vdev, info->queue, vp_notify, callback, name); if (!vq) { err = -ENOMEM; goto out_activate_queue; } vq->priv = info; info->vq = vq; if (msix_vec != VIRTIO_MSI_NO_VECTOR) { iowrite16(msix_vec, vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); msix_vec = ioread16(vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); if (msix_vec == VIRTIO_MSI_NO_VECTOR) { err = -EBUSY; goto out_assign; } } spin_lock_irqsave(&vp_dev->lock, flags); list_add(&info->node, &vp_dev->virtqueues); spin_unlock_irqrestore(&vp_dev->lock, flags); return vq; out_assign: vring_del_virtqueue(vq); out_activate_queue: iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); free_pages_exact(info->queue, size); out_info: kfree(info); return ERR_PTR(err); } static void vp_del_vq(struct virtqueue *vq) { struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); struct virtio_pci_vq_info *info = vq->priv; unsigned long flags, size; spin_lock_irqsave(&vp_dev->lock, flags); list_del(&info->node); spin_unlock_irqrestore(&vp_dev->lock, flags); iowrite16(info->queue_index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL); if (vp_dev->msix_enabled) { iowrite16(VIRTIO_MSI_NO_VECTOR, vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); /* Flush the write out to device */ ioread8(vp_dev->ioaddr + VIRTIO_PCI_ISR); } vring_del_virtqueue(vq); /* Select and deactivate the queue */ iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); size = PAGE_ALIGN(vring_size(info->num, VIRTIO_PCI_VRING_ALIGN)); free_pages_exact(info->queue, size); kfree(info); } /* the config->del_vqs() implementation */ static void vp_del_vqs(struct virtio_device *vdev) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); struct virtqueue *vq, *n; struct virtio_pci_vq_info *info; list_for_each_entry_safe(vq, n, &vdev->vqs, list) { info = vq->priv; if (vp_dev->per_vq_vectors && info->msix_vector != VIRTIO_MSI_NO_VECTOR) free_irq(vp_dev->msix_entries[info->msix_vector].vector, vq); vp_del_vq(vq); } vp_dev->per_vq_vectors = false; vp_free_vectors(vdev); } static int vp_try_to_find_vqs(struct virtio_device *vdev, unsigned nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], const char *names[], bool use_msix, bool per_vq_vectors) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); u16 msix_vec; int i, err, nvectors, allocated_vectors; if (!use_msix) { /* Old style: one normal interrupt for change and all vqs. */ err = vp_request_intx(vdev); if (err) goto error_request; } else { if (per_vq_vectors) { /* Best option: one for change interrupt, one per vq. */ nvectors = 1; for (i = 0; i < nvqs; ++i) if (callbacks[i]) ++nvectors; } else { /* Second best: one for change, shared for all vqs. */ nvectors = 2; } err = vp_request_msix_vectors(vdev, nvectors, per_vq_vectors); if (err) goto error_request; } vp_dev->per_vq_vectors = per_vq_vectors; allocated_vectors = vp_dev->msix_used_vectors; for (i = 0; i < nvqs; ++i) { if (!callbacks[i] || !vp_dev->msix_enabled) msix_vec = VIRTIO_MSI_NO_VECTOR; else if (vp_dev->per_vq_vectors) msix_vec = allocated_vectors++; else msix_vec = VP_MSIX_VQ_VECTOR; vqs[i] = setup_vq(vdev, i, callbacks[i], names[i], msix_vec); if (IS_ERR(vqs[i])) { err = PTR_ERR(vqs[i]); goto error_find; } if (!vp_dev->per_vq_vectors || msix_vec == VIRTIO_MSI_NO_VECTOR) continue; /* allocate per-vq irq if available and necessary */ snprintf(vp_dev->msix_names[msix_vec], sizeof *vp_dev->msix_names, "%s-%s", dev_name(&vp_dev->vdev.dev), names[i]); err = request_irq(vp_dev->msix_entries[msix_vec].vector, vring_interrupt, 0, vp_dev->msix_names[msix_vec], vqs[i]); if (err) { vp_del_vq(vqs[i]); goto error_find; } } return 0; error_find: vp_del_vqs(vdev); error_request: return err; } /* the config->find_vqs() implementation */ static int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], const char *names[]) { int err; /* Try MSI-X with one vector per queue. */ err = vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names, true, true); if (!err) return 0; /* Fallback: MSI-X with one vector for config, one shared for queues. */ err = vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names, true, false); if (!err) return 0; /* Finally fall back to regular interrupts. */ return vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names, false, false); } static struct virtio_config_ops virtio_pci_config_ops = { .get = vp_get, .set = vp_set, .get_status = vp_get_status, .set_status = vp_set_status, .reset = vp_reset, .find_vqs = vp_find_vqs, .del_vqs = vp_del_vqs, .get_features = vp_get_features, .finalize_features = vp_finalize_features, }; static void virtio_pci_release_dev(struct device *_d) { struct virtio_device *dev = container_of(_d, struct virtio_device, dev); struct virtio_pci_device *vp_dev = to_vp_device(dev); struct pci_dev *pci_dev = vp_dev->pci_dev; vp_del_vqs(dev); pci_set_drvdata(pci_dev, NULL); pci_iounmap(pci_dev, vp_dev->ioaddr); pci_release_regions(pci_dev); pci_disable_device(pci_dev); kfree(vp_dev); } /* the PCI probing function */ static int __devinit virtio_pci_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) { struct virtio_pci_device *vp_dev; int err; /* We only own devices >= 0x1000 and <= 0x103f: leave the rest. */ if (pci_dev->device < 0x1000 || pci_dev->device > 0x103f) return -ENODEV; if (pci_dev->revision != VIRTIO_PCI_ABI_VERSION) { printk(KERN_ERR "virtio_pci: expected ABI version %d, got %d\n", VIRTIO_PCI_ABI_VERSION, pci_dev->revision); return -ENODEV; } /* allocate our structure and fill it out */ vp_dev = kzalloc(sizeof(struct virtio_pci_device), GFP_KERNEL); if (vp_dev == NULL) return -ENOMEM; vp_dev->vdev.dev.parent = virtio_pci_root; vp_dev->vdev.dev.release = virtio_pci_release_dev; vp_dev->vdev.config = &virtio_pci_config_ops; vp_dev->pci_dev = pci_dev; INIT_LIST_HEAD(&vp_dev->virtqueues); spin_lock_init(&vp_dev->lock); /* Disable MSI/MSIX to bring device to a known good state. */ pci_msi_off(pci_dev); /* enable the device */ err = pci_enable_device(pci_dev); if (err) goto out; err = pci_request_regions(pci_dev, "virtio-pci"); if (err) goto out_enable_device; vp_dev->ioaddr = pci_iomap(pci_dev, 0, 0); if (vp_dev->ioaddr == NULL) goto out_req_regions; pci_set_drvdata(pci_dev, vp_dev); pci_set_master(pci_dev); /* we use the subsystem vendor/device id as the virtio vendor/device * id. this allows us to use the same PCI vendor/device id for all * virtio devices and to identify the particular virtio driver by * the subsystem ids */ vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor; vp_dev->vdev.id.device = pci_dev->subsystem_device; /* finally register the virtio device */ err = register_virtio_device(&vp_dev->vdev); if (err) goto out_set_drvdata; return 0; out_set_drvdata: pci_set_drvdata(pci_dev, NULL); pci_iounmap(pci_dev, vp_dev->ioaddr); out_req_regions: pci_release_regions(pci_dev); out_enable_device: pci_disable_device(pci_dev); out: kfree(vp_dev); return err; } static void __devexit virtio_pci_remove(struct pci_dev *pci_dev) { struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev); unregister_virtio_device(&vp_dev->vdev); } #ifdef CONFIG_PM static int virtio_pci_suspend(struct pci_dev *pci_dev, pm_message_t state) { pci_save_state(pci_dev); pci_set_power_state(pci_dev, PCI_D3hot); return 0; } static int virtio_pci_resume(struct pci_dev *pci_dev) { pci_restore_state(pci_dev); pci_set_power_state(pci_dev, PCI_D0); return 0; } #endif static struct pci_driver virtio_pci_driver = { .name = "virtio-pci", .id_table = virtio_pci_id_table, .probe = virtio_pci_probe, .remove = __devexit_p(virtio_pci_remove), #ifdef CONFIG_PM .suspend = virtio_pci_suspend, .resume = virtio_pci_resume, #endif }; static int __init virtio_pci_init(void) { int err; virtio_pci_root = root_device_register("virtio-pci"); if (IS_ERR(virtio_pci_root)) return PTR_ERR(virtio_pci_root); err = pci_register_driver(&virtio_pci_driver); if (err) root_device_unregister(virtio_pci_root); return err; } module_init(virtio_pci_init); static void __exit virtio_pci_exit(void) { pci_unregister_driver(&virtio_pci_driver); root_device_unregister(virtio_pci_root); } module_exit(virtio_pci_exit);
gpl-2.0
hsarkanen/linux-imx6
fs/ubifs/super.c
669
61332
/* * This file is part of UBIFS. * * Copyright (C) 2006-2008 Nokia Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * * Authors: Artem Bityutskiy (Битюцкий Артём) * Adrian Hunter */ /* * This file implements UBIFS initialization and VFS superblock operations. Some * initialization stuff which is rather large and complex is placed at * corresponding subsystems, but most of it is here. */ #include <linux/init.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/ctype.h> #include <linux/kthread.h> #include <linux/parser.h> #include <linux/seq_file.h> #include <linux/mount.h> #include <linux/math64.h> #include <linux/writeback.h> #include "ubifs.h" /* * Maximum amount of memory we may 'kmalloc()' without worrying that we are * allocating too much. */ #define UBIFS_KMALLOC_OK (128*1024) /* Slab cache for UBIFS inodes */ struct kmem_cache *ubifs_inode_slab; /* UBIFS TNC shrinker description */ static struct shrinker ubifs_shrinker_info = { .shrink = ubifs_shrinker, .seeks = DEFAULT_SEEKS, }; /** * validate_inode - validate inode. * @c: UBIFS file-system description object * @inode: the inode to validate * * This is a helper function for 'ubifs_iget()' which validates various fields * of a newly built inode to make sure they contain sane values and prevent * possible vulnerabilities. Returns zero if the inode is all right and * a non-zero error code if not. */ static int validate_inode(struct ubifs_info *c, const struct inode *inode) { int err; const struct ubifs_inode *ui = ubifs_inode(inode); if (inode->i_size > c->max_inode_sz) { ubifs_err("inode is too large (%lld)", (long long)inode->i_size); return 1; } if (ui->compr_type < 0 || ui->compr_type >= UBIFS_COMPR_TYPES_CNT) { ubifs_err("unknown compression type %d", ui->compr_type); return 2; } if (ui->xattr_names + ui->xattr_cnt > XATTR_LIST_MAX) return 3; if (ui->data_len < 0 || ui->data_len > UBIFS_MAX_INO_DATA) return 4; if (ui->xattr && !S_ISREG(inode->i_mode)) return 5; if (!ubifs_compr_present(ui->compr_type)) { ubifs_warn("inode %lu uses '%s' compression, but it was not compiled in", inode->i_ino, ubifs_compr_name(ui->compr_type)); } err = dbg_check_dir(c, inode); return err; } struct inode *ubifs_iget(struct super_block *sb, unsigned long inum) { int err; union ubifs_key key; struct ubifs_ino_node *ino; struct ubifs_info *c = sb->s_fs_info; struct inode *inode; struct ubifs_inode *ui; dbg_gen("inode %lu", inum); inode = iget_locked(sb, inum); if (!inode) return ERR_PTR(-ENOMEM); if (!(inode->i_state & I_NEW)) return inode; ui = ubifs_inode(inode); ino = kmalloc(UBIFS_MAX_INO_NODE_SZ, GFP_NOFS); if (!ino) { err = -ENOMEM; goto out; } ino_key_init(c, &key, inode->i_ino); err = ubifs_tnc_lookup(c, &key, ino); if (err) goto out_ino; inode->i_flags |= (S_NOCMTIME | S_NOATIME); set_nlink(inode, le32_to_cpu(ino->nlink)); i_uid_write(inode, le32_to_cpu(ino->uid)); i_gid_write(inode, le32_to_cpu(ino->gid)); inode->i_atime.tv_sec = (int64_t)le64_to_cpu(ino->atime_sec); inode->i_atime.tv_nsec = le32_to_cpu(ino->atime_nsec); inode->i_mtime.tv_sec = (int64_t)le64_to_cpu(ino->mtime_sec); inode->i_mtime.tv_nsec = le32_to_cpu(ino->mtime_nsec); inode->i_ctime.tv_sec = (int64_t)le64_to_cpu(ino->ctime_sec); inode->i_ctime.tv_nsec = le32_to_cpu(ino->ctime_nsec); inode->i_mode = le32_to_cpu(ino->mode); inode->i_size = le64_to_cpu(ino->size); ui->data_len = le32_to_cpu(ino->data_len); ui->flags = le32_to_cpu(ino->flags); ui->compr_type = le16_to_cpu(ino->compr_type); ui->creat_sqnum = le64_to_cpu(ino->creat_sqnum); ui->xattr_cnt = le32_to_cpu(ino->xattr_cnt); ui->xattr_size = le32_to_cpu(ino->xattr_size); ui->xattr_names = le32_to_cpu(ino->xattr_names); ui->synced_i_size = ui->ui_size = inode->i_size; ui->xattr = (ui->flags & UBIFS_XATTR_FL) ? 1 : 0; err = validate_inode(c, inode); if (err) goto out_invalid; /* Disable read-ahead */ inode->i_mapping->backing_dev_info = &c->bdi; switch (inode->i_mode & S_IFMT) { case S_IFREG: inode->i_mapping->a_ops = &ubifs_file_address_operations; inode->i_op = &ubifs_file_inode_operations; inode->i_fop = &ubifs_file_operations; if (ui->xattr) { ui->data = kmalloc(ui->data_len + 1, GFP_NOFS); if (!ui->data) { err = -ENOMEM; goto out_ino; } memcpy(ui->data, ino->data, ui->data_len); ((char *)ui->data)[ui->data_len] = '\0'; } else if (ui->data_len != 0) { err = 10; goto out_invalid; } break; case S_IFDIR: inode->i_op = &ubifs_dir_inode_operations; inode->i_fop = &ubifs_dir_operations; if (ui->data_len != 0) { err = 11; goto out_invalid; } break; case S_IFLNK: inode->i_op = &ubifs_symlink_inode_operations; if (ui->data_len <= 0 || ui->data_len > UBIFS_MAX_INO_DATA) { err = 12; goto out_invalid; } ui->data = kmalloc(ui->data_len + 1, GFP_NOFS); if (!ui->data) { err = -ENOMEM; goto out_ino; } memcpy(ui->data, ino->data, ui->data_len); ((char *)ui->data)[ui->data_len] = '\0'; break; case S_IFBLK: case S_IFCHR: { dev_t rdev; union ubifs_dev_desc *dev; ui->data = kmalloc(sizeof(union ubifs_dev_desc), GFP_NOFS); if (!ui->data) { err = -ENOMEM; goto out_ino; } dev = (union ubifs_dev_desc *)ino->data; if (ui->data_len == sizeof(dev->new)) rdev = new_decode_dev(le32_to_cpu(dev->new)); else if (ui->data_len == sizeof(dev->huge)) rdev = huge_decode_dev(le64_to_cpu(dev->huge)); else { err = 13; goto out_invalid; } memcpy(ui->data, ino->data, ui->data_len); inode->i_op = &ubifs_file_inode_operations; init_special_inode(inode, inode->i_mode, rdev); break; } case S_IFSOCK: case S_IFIFO: inode->i_op = &ubifs_file_inode_operations; init_special_inode(inode, inode->i_mode, 0); if (ui->data_len != 0) { err = 14; goto out_invalid; } break; default: err = 15; goto out_invalid; } kfree(ino); ubifs_set_inode_flags(inode); unlock_new_inode(inode); return inode; out_invalid: ubifs_err("inode %lu validation failed, error %d", inode->i_ino, err); ubifs_dump_node(c, ino); ubifs_dump_inode(c, inode); err = -EINVAL; out_ino: kfree(ino); out: ubifs_err("failed to read inode %lu, error %d", inode->i_ino, err); iget_failed(inode); return ERR_PTR(err); } static struct inode *ubifs_alloc_inode(struct super_block *sb) { struct ubifs_inode *ui; ui = kmem_cache_alloc(ubifs_inode_slab, GFP_NOFS); if (!ui) return NULL; memset((void *)ui + sizeof(struct inode), 0, sizeof(struct ubifs_inode) - sizeof(struct inode)); mutex_init(&ui->ui_mutex); spin_lock_init(&ui->ui_lock); return &ui->vfs_inode; }; static void ubifs_i_callback(struct rcu_head *head) { struct inode *inode = container_of(head, struct inode, i_rcu); struct ubifs_inode *ui = ubifs_inode(inode); kmem_cache_free(ubifs_inode_slab, ui); } static void ubifs_destroy_inode(struct inode *inode) { struct ubifs_inode *ui = ubifs_inode(inode); kfree(ui->data); call_rcu(&inode->i_rcu, ubifs_i_callback); } /* * Note, Linux write-back code calls this without 'i_mutex'. */ static int ubifs_write_inode(struct inode *inode, struct writeback_control *wbc) { int err = 0; struct ubifs_info *c = inode->i_sb->s_fs_info; struct ubifs_inode *ui = ubifs_inode(inode); ubifs_assert(!ui->xattr); if (is_bad_inode(inode)) return 0; mutex_lock(&ui->ui_mutex); /* * Due to races between write-back forced by budgeting * (see 'sync_some_inodes()') and background write-back, the inode may * have already been synchronized, do not do this again. This might * also happen if it was synchronized in an VFS operation, e.g. * 'ubifs_link()'. */ if (!ui->dirty) { mutex_unlock(&ui->ui_mutex); return 0; } /* * As an optimization, do not write orphan inodes to the media just * because this is not needed. */ dbg_gen("inode %lu, mode %#x, nlink %u", inode->i_ino, (int)inode->i_mode, inode->i_nlink); if (inode->i_nlink) { err = ubifs_jnl_write_inode(c, inode); if (err) ubifs_err("can't write inode %lu, error %d", inode->i_ino, err); else err = dbg_check_inode_size(c, inode, ui->ui_size); } ui->dirty = 0; mutex_unlock(&ui->ui_mutex); ubifs_release_dirty_inode_budget(c, ui); return err; } static void ubifs_evict_inode(struct inode *inode) { int err; struct ubifs_info *c = inode->i_sb->s_fs_info; struct ubifs_inode *ui = ubifs_inode(inode); if (ui->xattr) /* * Extended attribute inode deletions are fully handled in * 'ubifs_removexattr()'. These inodes are special and have * limited usage, so there is nothing to do here. */ goto out; dbg_gen("inode %lu, mode %#x", inode->i_ino, (int)inode->i_mode); ubifs_assert(!atomic_read(&inode->i_count)); truncate_inode_pages(&inode->i_data, 0); if (inode->i_nlink) goto done; if (is_bad_inode(inode)) goto out; ui->ui_size = inode->i_size = 0; err = ubifs_jnl_delete_inode(c, inode); if (err) /* * Worst case we have a lost orphan inode wasting space, so a * simple error message is OK here. */ ubifs_err("can't delete inode %lu, error %d", inode->i_ino, err); out: if (ui->dirty) ubifs_release_dirty_inode_budget(c, ui); else { /* We've deleted something - clean the "no space" flags */ c->bi.nospace = c->bi.nospace_rp = 0; smp_wmb(); } done: clear_inode(inode); } static void ubifs_dirty_inode(struct inode *inode, int flags) { struct ubifs_inode *ui = ubifs_inode(inode); ubifs_assert(mutex_is_locked(&ui->ui_mutex)); if (!ui->dirty) { ui->dirty = 1; dbg_gen("inode %lu", inode->i_ino); } } static int ubifs_statfs(struct dentry *dentry, struct kstatfs *buf) { struct ubifs_info *c = dentry->d_sb->s_fs_info; unsigned long long free; __le32 *uuid = (__le32 *)c->uuid; free = ubifs_get_free_space(c); dbg_gen("free space %lld bytes (%lld blocks)", free, free >> UBIFS_BLOCK_SHIFT); buf->f_type = UBIFS_SUPER_MAGIC; buf->f_bsize = UBIFS_BLOCK_SIZE; buf->f_blocks = c->block_cnt; buf->f_bfree = free >> UBIFS_BLOCK_SHIFT; if (free > c->report_rp_size) buf->f_bavail = (free - c->report_rp_size) >> UBIFS_BLOCK_SHIFT; else buf->f_bavail = 0; buf->f_files = 0; buf->f_ffree = 0; buf->f_namelen = UBIFS_MAX_NLEN; buf->f_fsid.val[0] = le32_to_cpu(uuid[0]) ^ le32_to_cpu(uuid[2]); buf->f_fsid.val[1] = le32_to_cpu(uuid[1]) ^ le32_to_cpu(uuid[3]); ubifs_assert(buf->f_bfree <= c->block_cnt); return 0; } static int ubifs_show_options(struct seq_file *s, struct dentry *root) { struct ubifs_info *c = root->d_sb->s_fs_info; if (c->mount_opts.unmount_mode == 2) seq_printf(s, ",fast_unmount"); else if (c->mount_opts.unmount_mode == 1) seq_printf(s, ",norm_unmount"); if (c->mount_opts.bulk_read == 2) seq_printf(s, ",bulk_read"); else if (c->mount_opts.bulk_read == 1) seq_printf(s, ",no_bulk_read"); if (c->mount_opts.chk_data_crc == 2) seq_printf(s, ",chk_data_crc"); else if (c->mount_opts.chk_data_crc == 1) seq_printf(s, ",no_chk_data_crc"); if (c->mount_opts.override_compr) { seq_printf(s, ",compr=%s", ubifs_compr_name(c->mount_opts.compr_type)); } return 0; } static int ubifs_sync_fs(struct super_block *sb, int wait) { int i, err; struct ubifs_info *c = sb->s_fs_info; /* * Zero @wait is just an advisory thing to help the file system shove * lots of data into the queues, and there will be the second * '->sync_fs()' call, with non-zero @wait. */ if (!wait) return 0; /* * Synchronize write buffers, because 'ubifs_run_commit()' does not * do this if it waits for an already running commit. */ for (i = 0; i < c->jhead_cnt; i++) { err = ubifs_wbuf_sync(&c->jheads[i].wbuf); if (err) return err; } /* * Strictly speaking, it is not necessary to commit the journal here, * synchronizing write-buffers would be enough. But committing makes * UBIFS free space predictions much more accurate, so we want to let * the user be able to get more accurate results of 'statfs()' after * they synchronize the file system. */ err = ubifs_run_commit(c); if (err) return err; return ubi_sync(c->vi.ubi_num); } /** * init_constants_early - initialize UBIFS constants. * @c: UBIFS file-system description object * * This function initialize UBIFS constants which do not need the superblock to * be read. It also checks that the UBI volume satisfies basic UBIFS * requirements. Returns zero in case of success and a negative error code in * case of failure. */ static int init_constants_early(struct ubifs_info *c) { if (c->vi.corrupted) { ubifs_warn("UBI volume is corrupted - read-only mode"); c->ro_media = 1; } if (c->di.ro_mode) { ubifs_msg("read-only UBI device"); c->ro_media = 1; } if (c->vi.vol_type == UBI_STATIC_VOLUME) { ubifs_msg("static UBI volume - read-only mode"); c->ro_media = 1; } c->leb_cnt = c->vi.size; c->leb_size = c->vi.usable_leb_size; c->leb_start = c->di.leb_start; c->half_leb_size = c->leb_size / 2; c->min_io_size = c->di.min_io_size; c->min_io_shift = fls(c->min_io_size) - 1; c->max_write_size = c->di.max_write_size; c->max_write_shift = fls(c->max_write_size) - 1; if (c->leb_size < UBIFS_MIN_LEB_SZ) { ubifs_err("too small LEBs (%d bytes), min. is %d bytes", c->leb_size, UBIFS_MIN_LEB_SZ); return -EINVAL; } if (c->leb_cnt < UBIFS_MIN_LEB_CNT) { ubifs_err("too few LEBs (%d), min. is %d", c->leb_cnt, UBIFS_MIN_LEB_CNT); return -EINVAL; } if (!is_power_of_2(c->min_io_size)) { ubifs_err("bad min. I/O size %d", c->min_io_size); return -EINVAL; } /* * Maximum write size has to be greater or equivalent to min. I/O * size, and be multiple of min. I/O size. */ if (c->max_write_size < c->min_io_size || c->max_write_size % c->min_io_size || !is_power_of_2(c->max_write_size)) { ubifs_err("bad write buffer size %d for %d min. I/O unit", c->max_write_size, c->min_io_size); return -EINVAL; } /* * UBIFS aligns all node to 8-byte boundary, so to make function in * io.c simpler, assume minimum I/O unit size to be 8 bytes if it is * less than 8. */ if (c->min_io_size < 8) { c->min_io_size = 8; c->min_io_shift = 3; if (c->max_write_size < c->min_io_size) { c->max_write_size = c->min_io_size; c->max_write_shift = c->min_io_shift; } } c->ref_node_alsz = ALIGN(UBIFS_REF_NODE_SZ, c->min_io_size); c->mst_node_alsz = ALIGN(UBIFS_MST_NODE_SZ, c->min_io_size); /* * Initialize node length ranges which are mostly needed for node * length validation. */ c->ranges[UBIFS_PAD_NODE].len = UBIFS_PAD_NODE_SZ; c->ranges[UBIFS_SB_NODE].len = UBIFS_SB_NODE_SZ; c->ranges[UBIFS_MST_NODE].len = UBIFS_MST_NODE_SZ; c->ranges[UBIFS_REF_NODE].len = UBIFS_REF_NODE_SZ; c->ranges[UBIFS_TRUN_NODE].len = UBIFS_TRUN_NODE_SZ; c->ranges[UBIFS_CS_NODE].len = UBIFS_CS_NODE_SZ; c->ranges[UBIFS_INO_NODE].min_len = UBIFS_INO_NODE_SZ; c->ranges[UBIFS_INO_NODE].max_len = UBIFS_MAX_INO_NODE_SZ; c->ranges[UBIFS_ORPH_NODE].min_len = UBIFS_ORPH_NODE_SZ + sizeof(__le64); c->ranges[UBIFS_ORPH_NODE].max_len = c->leb_size; c->ranges[UBIFS_DENT_NODE].min_len = UBIFS_DENT_NODE_SZ; c->ranges[UBIFS_DENT_NODE].max_len = UBIFS_MAX_DENT_NODE_SZ; c->ranges[UBIFS_XENT_NODE].min_len = UBIFS_XENT_NODE_SZ; c->ranges[UBIFS_XENT_NODE].max_len = UBIFS_MAX_XENT_NODE_SZ; c->ranges[UBIFS_DATA_NODE].min_len = UBIFS_DATA_NODE_SZ; c->ranges[UBIFS_DATA_NODE].max_len = UBIFS_MAX_DATA_NODE_SZ; /* * Minimum indexing node size is amended later when superblock is * read and the key length is known. */ c->ranges[UBIFS_IDX_NODE].min_len = UBIFS_IDX_NODE_SZ + UBIFS_BRANCH_SZ; /* * Maximum indexing node size is amended later when superblock is * read and the fanout is known. */ c->ranges[UBIFS_IDX_NODE].max_len = INT_MAX; /* * Initialize dead and dark LEB space watermarks. See gc.c for comments * about these values. */ c->dead_wm = ALIGN(MIN_WRITE_SZ, c->min_io_size); c->dark_wm = ALIGN(UBIFS_MAX_NODE_SZ, c->min_io_size); /* * Calculate how many bytes would be wasted at the end of LEB if it was * fully filled with data nodes of maximum size. This is used in * calculations when reporting free space. */ c->leb_overhead = c->leb_size % UBIFS_MAX_DATA_NODE_SZ; /* Buffer size for bulk-reads */ c->max_bu_buf_len = UBIFS_MAX_BULK_READ * UBIFS_MAX_DATA_NODE_SZ; if (c->max_bu_buf_len > c->leb_size) c->max_bu_buf_len = c->leb_size; return 0; } /** * bud_wbuf_callback - bud LEB write-buffer synchronization call-back. * @c: UBIFS file-system description object * @lnum: LEB the write-buffer was synchronized to * @free: how many free bytes left in this LEB * @pad: how many bytes were padded * * This is a callback function which is called by the I/O unit when the * write-buffer is synchronized. We need this to correctly maintain space * accounting in bud logical eraseblocks. This function returns zero in case of * success and a negative error code in case of failure. * * This function actually belongs to the journal, but we keep it here because * we want to keep it static. */ static int bud_wbuf_callback(struct ubifs_info *c, int lnum, int free, int pad) { return ubifs_update_one_lp(c, lnum, free, pad, 0, 0); } /* * init_constants_sb - initialize UBIFS constants. * @c: UBIFS file-system description object * * This is a helper function which initializes various UBIFS constants after * the superblock has been read. It also checks various UBIFS parameters and * makes sure they are all right. Returns zero in case of success and a * negative error code in case of failure. */ static int init_constants_sb(struct ubifs_info *c) { int tmp, err; long long tmp64; c->main_bytes = (long long)c->main_lebs * c->leb_size; c->max_znode_sz = sizeof(struct ubifs_znode) + c->fanout * sizeof(struct ubifs_zbranch); tmp = ubifs_idx_node_sz(c, 1); c->ranges[UBIFS_IDX_NODE].min_len = tmp; c->min_idx_node_sz = ALIGN(tmp, 8); tmp = ubifs_idx_node_sz(c, c->fanout); c->ranges[UBIFS_IDX_NODE].max_len = tmp; c->max_idx_node_sz = ALIGN(tmp, 8); /* Make sure LEB size is large enough to fit full commit */ tmp = UBIFS_CS_NODE_SZ + UBIFS_REF_NODE_SZ * c->jhead_cnt; tmp = ALIGN(tmp, c->min_io_size); if (tmp > c->leb_size) { ubifs_err("too small LEB size %d, at least %d needed", c->leb_size, tmp); return -EINVAL; } /* * Make sure that the log is large enough to fit reference nodes for * all buds plus one reserved LEB. */ tmp64 = c->max_bud_bytes + c->leb_size - 1; c->max_bud_cnt = div_u64(tmp64, c->leb_size); tmp = (c->ref_node_alsz * c->max_bud_cnt + c->leb_size - 1); tmp /= c->leb_size; tmp += 1; if (c->log_lebs < tmp) { ubifs_err("too small log %d LEBs, required min. %d LEBs", c->log_lebs, tmp); return -EINVAL; } /* * When budgeting we assume worst-case scenarios when the pages are not * be compressed and direntries are of the maximum size. * * Note, data, which may be stored in inodes is budgeted separately, so * it is not included into 'c->bi.inode_budget'. */ c->bi.page_budget = UBIFS_MAX_DATA_NODE_SZ * UBIFS_BLOCKS_PER_PAGE; c->bi.inode_budget = UBIFS_INO_NODE_SZ; c->bi.dent_budget = UBIFS_MAX_DENT_NODE_SZ; /* * When the amount of flash space used by buds becomes * 'c->max_bud_bytes', UBIFS just blocks all writers and starts commit. * The writers are unblocked when the commit is finished. To avoid * writers to be blocked UBIFS initiates background commit in advance, * when number of bud bytes becomes above the limit defined below. */ c->bg_bud_bytes = (c->max_bud_bytes * 13) >> 4; /* * Ensure minimum journal size. All the bytes in the journal heads are * considered to be used, when calculating the current journal usage. * Consequently, if the journal is too small, UBIFS will treat it as * always full. */ tmp64 = (long long)(c->jhead_cnt + 1) * c->leb_size + 1; if (c->bg_bud_bytes < tmp64) c->bg_bud_bytes = tmp64; if (c->max_bud_bytes < tmp64 + c->leb_size) c->max_bud_bytes = tmp64 + c->leb_size; err = ubifs_calc_lpt_geom(c); if (err) return err; /* Initialize effective LEB size used in budgeting calculations */ c->idx_leb_size = c->leb_size - c->max_idx_node_sz; return 0; } /* * init_constants_master - initialize UBIFS constants. * @c: UBIFS file-system description object * * This is a helper function which initializes various UBIFS constants after * the master node has been read. It also checks various UBIFS parameters and * makes sure they are all right. */ static void init_constants_master(struct ubifs_info *c) { long long tmp64; c->bi.min_idx_lebs = ubifs_calc_min_idx_lebs(c); c->report_rp_size = ubifs_reported_space(c, c->rp_size); /* * Calculate total amount of FS blocks. This number is not used * internally because it does not make much sense for UBIFS, but it is * necessary to report something for the 'statfs()' call. * * Subtract the LEB reserved for GC, the LEB which is reserved for * deletions, minimum LEBs for the index, and assume only one journal * head is available. */ tmp64 = c->main_lebs - 1 - 1 - MIN_INDEX_LEBS - c->jhead_cnt + 1; tmp64 *= (long long)c->leb_size - c->leb_overhead; tmp64 = ubifs_reported_space(c, tmp64); c->block_cnt = tmp64 >> UBIFS_BLOCK_SHIFT; } /** * take_gc_lnum - reserve GC LEB. * @c: UBIFS file-system description object * * This function ensures that the LEB reserved for garbage collection is marked * as "taken" in lprops. We also have to set free space to LEB size and dirty * space to zero, because lprops may contain out-of-date information if the * file-system was un-mounted before it has been committed. This function * returns zero in case of success and a negative error code in case of * failure. */ static int take_gc_lnum(struct ubifs_info *c) { int err; if (c->gc_lnum == -1) { ubifs_err("no LEB for GC"); return -EINVAL; } /* And we have to tell lprops that this LEB is taken */ err = ubifs_change_one_lp(c, c->gc_lnum, c->leb_size, 0, LPROPS_TAKEN, 0, 0); return err; } /** * alloc_wbufs - allocate write-buffers. * @c: UBIFS file-system description object * * This helper function allocates and initializes UBIFS write-buffers. Returns * zero in case of success and %-ENOMEM in case of failure. */ static int alloc_wbufs(struct ubifs_info *c) { int i, err; c->jheads = kzalloc(c->jhead_cnt * sizeof(struct ubifs_jhead), GFP_KERNEL); if (!c->jheads) return -ENOMEM; /* Initialize journal heads */ for (i = 0; i < c->jhead_cnt; i++) { INIT_LIST_HEAD(&c->jheads[i].buds_list); err = ubifs_wbuf_init(c, &c->jheads[i].wbuf); if (err) return err; c->jheads[i].wbuf.sync_callback = &bud_wbuf_callback; c->jheads[i].wbuf.jhead = i; c->jheads[i].grouped = 1; } /* * Garbage Collector head does not need to be synchronized by timer. * Also GC head nodes are not grouped. */ c->jheads[GCHD].wbuf.no_timer = 1; c->jheads[GCHD].grouped = 0; return 0; } /** * free_wbufs - free write-buffers. * @c: UBIFS file-system description object */ static void free_wbufs(struct ubifs_info *c) { int i; if (c->jheads) { for (i = 0; i < c->jhead_cnt; i++) { kfree(c->jheads[i].wbuf.buf); kfree(c->jheads[i].wbuf.inodes); } kfree(c->jheads); c->jheads = NULL; } } /** * free_orphans - free orphans. * @c: UBIFS file-system description object */ static void free_orphans(struct ubifs_info *c) { struct ubifs_orphan *orph; while (c->orph_dnext) { orph = c->orph_dnext; c->orph_dnext = orph->dnext; list_del(&orph->list); kfree(orph); } while (!list_empty(&c->orph_list)) { orph = list_entry(c->orph_list.next, struct ubifs_orphan, list); list_del(&orph->list); kfree(orph); ubifs_err("orphan list not empty at unmount"); } vfree(c->orph_buf); c->orph_buf = NULL; } /** * free_buds - free per-bud objects. * @c: UBIFS file-system description object */ static void free_buds(struct ubifs_info *c) { struct rb_node *this = c->buds.rb_node; struct ubifs_bud *bud; while (this) { if (this->rb_left) this = this->rb_left; else if (this->rb_right) this = this->rb_right; else { bud = rb_entry(this, struct ubifs_bud, rb); this = rb_parent(this); if (this) { if (this->rb_left == &bud->rb) this->rb_left = NULL; else this->rb_right = NULL; } kfree(bud); } } } /** * check_volume_empty - check if the UBI volume is empty. * @c: UBIFS file-system description object * * This function checks if the UBIFS volume is empty by looking if its LEBs are * mapped or not. The result of checking is stored in the @c->empty variable. * Returns zero in case of success and a negative error code in case of * failure. */ static int check_volume_empty(struct ubifs_info *c) { int lnum, err; c->empty = 1; for (lnum = 0; lnum < c->leb_cnt; lnum++) { err = ubifs_is_mapped(c, lnum); if (unlikely(err < 0)) return err; if (err == 1) { c->empty = 0; break; } cond_resched(); } return 0; } /* * UBIFS mount options. * * Opt_fast_unmount: do not run a journal commit before un-mounting * Opt_norm_unmount: run a journal commit before un-mounting * Opt_bulk_read: enable bulk-reads * Opt_no_bulk_read: disable bulk-reads * Opt_chk_data_crc: check CRCs when reading data nodes * Opt_no_chk_data_crc: do not check CRCs when reading data nodes * Opt_override_compr: override default compressor * Opt_err: just end of array marker */ enum { Opt_fast_unmount, Opt_norm_unmount, Opt_bulk_read, Opt_no_bulk_read, Opt_chk_data_crc, Opt_no_chk_data_crc, Opt_override_compr, Opt_err, }; static const match_table_t tokens = { {Opt_fast_unmount, "fast_unmount"}, {Opt_norm_unmount, "norm_unmount"}, {Opt_bulk_read, "bulk_read"}, {Opt_no_bulk_read, "no_bulk_read"}, {Opt_chk_data_crc, "chk_data_crc"}, {Opt_no_chk_data_crc, "no_chk_data_crc"}, {Opt_override_compr, "compr=%s"}, {Opt_err, NULL}, }; /** * parse_standard_option - parse a standard mount option. * @option: the option to parse * * Normally, standard mount options like "sync" are passed to file-systems as * flags. However, when a "rootflags=" kernel boot parameter is used, they may * be present in the options string. This function tries to deal with this * situation and parse standard options. Returns 0 if the option was not * recognized, and the corresponding integer flag if it was. * * UBIFS is only interested in the "sync" option, so do not check for anything * else. */ static int parse_standard_option(const char *option) { ubifs_msg("parse %s", option); if (!strcmp(option, "sync")) return MS_SYNCHRONOUS; return 0; } /** * ubifs_parse_options - parse mount parameters. * @c: UBIFS file-system description object * @options: parameters to parse * @is_remount: non-zero if this is FS re-mount * * This function parses UBIFS mount options and returns zero in case success * and a negative error code in case of failure. */ static int ubifs_parse_options(struct ubifs_info *c, char *options, int is_remount) { char *p; substring_t args[MAX_OPT_ARGS]; if (!options) return 0; while ((p = strsep(&options, ","))) { int token; if (!*p) continue; token = match_token(p, tokens, args); switch (token) { /* * %Opt_fast_unmount and %Opt_norm_unmount options are ignored. * We accept them in order to be backward-compatible. But this * should be removed at some point. */ case Opt_fast_unmount: c->mount_opts.unmount_mode = 2; break; case Opt_norm_unmount: c->mount_opts.unmount_mode = 1; break; case Opt_bulk_read: c->mount_opts.bulk_read = 2; c->bulk_read = 1; break; case Opt_no_bulk_read: c->mount_opts.bulk_read = 1; c->bulk_read = 0; break; case Opt_chk_data_crc: c->mount_opts.chk_data_crc = 2; c->no_chk_data_crc = 0; break; case Opt_no_chk_data_crc: c->mount_opts.chk_data_crc = 1; c->no_chk_data_crc = 1; break; case Opt_override_compr: { char *name = match_strdup(&args[0]); if (!name) return -ENOMEM; if (!strcmp(name, "none")) c->mount_opts.compr_type = UBIFS_COMPR_NONE; else if (!strcmp(name, "lzo")) c->mount_opts.compr_type = UBIFS_COMPR_LZO; else if (!strcmp(name, "zlib")) c->mount_opts.compr_type = UBIFS_COMPR_ZLIB; else { ubifs_err("unknown compressor \"%s\"", name); kfree(name); return -EINVAL; } kfree(name); c->mount_opts.override_compr = 1; c->default_compr = c->mount_opts.compr_type; break; } default: { unsigned long flag; struct super_block *sb = c->vfs_sb; flag = parse_standard_option(p); if (!flag) { ubifs_err("unrecognized mount option \"%s\" or missing value", p); return -EINVAL; } sb->s_flags |= flag; break; } } } return 0; } /** * destroy_journal - destroy journal data structures. * @c: UBIFS file-system description object * * This function destroys journal data structures including those that may have * been created by recovery functions. */ static void destroy_journal(struct ubifs_info *c) { while (!list_empty(&c->unclean_leb_list)) { struct ubifs_unclean_leb *ucleb; ucleb = list_entry(c->unclean_leb_list.next, struct ubifs_unclean_leb, list); list_del(&ucleb->list); kfree(ucleb); } while (!list_empty(&c->old_buds)) { struct ubifs_bud *bud; bud = list_entry(c->old_buds.next, struct ubifs_bud, list); list_del(&bud->list); kfree(bud); } ubifs_destroy_idx_gc(c); ubifs_destroy_size_tree(c); ubifs_tnc_close(c); free_buds(c); } /** * bu_init - initialize bulk-read information. * @c: UBIFS file-system description object */ static void bu_init(struct ubifs_info *c) { ubifs_assert(c->bulk_read == 1); if (c->bu.buf) return; /* Already initialized */ again: c->bu.buf = kmalloc(c->max_bu_buf_len, GFP_KERNEL | __GFP_NOWARN); if (!c->bu.buf) { if (c->max_bu_buf_len > UBIFS_KMALLOC_OK) { c->max_bu_buf_len = UBIFS_KMALLOC_OK; goto again; } /* Just disable bulk-read */ ubifs_warn("cannot allocate %d bytes of memory for bulk-read, disabling it", c->max_bu_buf_len); c->mount_opts.bulk_read = 1; c->bulk_read = 0; return; } } /** * check_free_space - check if there is enough free space to mount. * @c: UBIFS file-system description object * * This function makes sure UBIFS has enough free space to be mounted in * read/write mode. UBIFS must always have some free space to allow deletions. */ static int check_free_space(struct ubifs_info *c) { ubifs_assert(c->dark_wm > 0); if (c->lst.total_free + c->lst.total_dirty < c->dark_wm) { ubifs_err("insufficient free space to mount in R/W mode"); ubifs_dump_budg(c, &c->bi); ubifs_dump_lprops(c); return -ENOSPC; } return 0; } /** * mount_ubifs - mount UBIFS file-system. * @c: UBIFS file-system description object * * This function mounts UBIFS file system. Returns zero in case of success and * a negative error code in case of failure. */ static int mount_ubifs(struct ubifs_info *c) { int err; long long x, y; size_t sz; c->ro_mount = !!(c->vfs_sb->s_flags & MS_RDONLY); err = init_constants_early(c); if (err) return err; err = ubifs_debugging_init(c); if (err) return err; err = check_volume_empty(c); if (err) goto out_free; if (c->empty && (c->ro_mount || c->ro_media)) { /* * This UBI volume is empty, and read-only, or the file system * is mounted read-only - we cannot format it. */ ubifs_err("can't format empty UBI volume: read-only %s", c->ro_media ? "UBI volume" : "mount"); err = -EROFS; goto out_free; } if (c->ro_media && !c->ro_mount) { ubifs_err("cannot mount read-write - read-only media"); err = -EROFS; goto out_free; } /* * The requirement for the buffer is that it should fit indexing B-tree * height amount of integers. We assume the height if the TNC tree will * never exceed 64. */ err = -ENOMEM; c->bottom_up_buf = kmalloc(BOTTOM_UP_HEIGHT * sizeof(int), GFP_KERNEL); if (!c->bottom_up_buf) goto out_free; c->sbuf = vmalloc(c->leb_size); if (!c->sbuf) goto out_free; if (!c->ro_mount) { c->ileb_buf = vmalloc(c->leb_size); if (!c->ileb_buf) goto out_free; } if (c->bulk_read == 1) bu_init(c); if (!c->ro_mount) { c->write_reserve_buf = kmalloc(COMPRESSED_DATA_NODE_BUF_SZ, GFP_KERNEL); if (!c->write_reserve_buf) goto out_free; } c->mounting = 1; err = ubifs_read_superblock(c); if (err) goto out_free; /* * Make sure the compressor which is set as default in the superblock * or overridden by mount options is actually compiled in. */ if (!ubifs_compr_present(c->default_compr)) { ubifs_err("'compressor \"%s\" is not compiled in", ubifs_compr_name(c->default_compr)); err = -ENOTSUPP; goto out_free; } err = init_constants_sb(c); if (err) goto out_free; sz = ALIGN(c->max_idx_node_sz, c->min_io_size); sz = ALIGN(sz + c->max_idx_node_sz, c->min_io_size); c->cbuf = kmalloc(sz, GFP_NOFS); if (!c->cbuf) { err = -ENOMEM; goto out_free; } err = alloc_wbufs(c); if (err) goto out_cbuf; sprintf(c->bgt_name, BGT_NAME_PATTERN, c->vi.ubi_num, c->vi.vol_id); if (!c->ro_mount) { /* Create background thread */ c->bgt = kthread_create(ubifs_bg_thread, c, "%s", c->bgt_name); if (IS_ERR(c->bgt)) { err = PTR_ERR(c->bgt); c->bgt = NULL; ubifs_err("cannot spawn \"%s\", error %d", c->bgt_name, err); goto out_wbufs; } wake_up_process(c->bgt); } err = ubifs_read_master(c); if (err) goto out_master; init_constants_master(c); if ((c->mst_node->flags & cpu_to_le32(UBIFS_MST_DIRTY)) != 0) { ubifs_msg("recovery needed"); c->need_recovery = 1; } if (c->need_recovery && !c->ro_mount) { err = ubifs_recover_inl_heads(c, c->sbuf); if (err) goto out_master; } err = ubifs_lpt_init(c, 1, !c->ro_mount); if (err) goto out_master; if (!c->ro_mount && c->space_fixup) { err = ubifs_fixup_free_space(c); if (err) goto out_lpt; } if (!c->ro_mount) { /* * Set the "dirty" flag so that if we reboot uncleanly we * will notice this immediately on the next mount. */ c->mst_node->flags |= cpu_to_le32(UBIFS_MST_DIRTY); err = ubifs_write_master(c); if (err) goto out_lpt; } err = dbg_check_idx_size(c, c->bi.old_idx_sz); if (err) goto out_lpt; err = ubifs_replay_journal(c); if (err) goto out_journal; /* Calculate 'min_idx_lebs' after journal replay */ c->bi.min_idx_lebs = ubifs_calc_min_idx_lebs(c); err = ubifs_mount_orphans(c, c->need_recovery, c->ro_mount); if (err) goto out_orphans; if (!c->ro_mount) { int lnum; err = check_free_space(c); if (err) goto out_orphans; /* Check for enough log space */ lnum = c->lhead_lnum + 1; if (lnum >= UBIFS_LOG_LNUM + c->log_lebs) lnum = UBIFS_LOG_LNUM; if (lnum == c->ltail_lnum) { err = ubifs_consolidate_log(c); if (err) goto out_orphans; } if (c->need_recovery) { err = ubifs_recover_size(c); if (err) goto out_orphans; err = ubifs_rcvry_gc_commit(c); if (err) goto out_orphans; } else { err = take_gc_lnum(c); if (err) goto out_orphans; /* * GC LEB may contain garbage if there was an unclean * reboot, and it should be un-mapped. */ err = ubifs_leb_unmap(c, c->gc_lnum); if (err) goto out_orphans; } err = dbg_check_lprops(c); if (err) goto out_orphans; } else if (c->need_recovery) { err = ubifs_recover_size(c); if (err) goto out_orphans; } else { /* * Even if we mount read-only, we have to set space in GC LEB * to proper value because this affects UBIFS free space * reporting. We do not want to have a situation when * re-mounting from R/O to R/W changes amount of free space. */ err = take_gc_lnum(c); if (err) goto out_orphans; } spin_lock(&ubifs_infos_lock); list_add_tail(&c->infos_list, &ubifs_infos); spin_unlock(&ubifs_infos_lock); if (c->need_recovery) { if (c->ro_mount) ubifs_msg("recovery deferred"); else { c->need_recovery = 0; ubifs_msg("recovery completed"); /* * GC LEB has to be empty and taken at this point. But * the journal head LEBs may also be accounted as * "empty taken" if they are empty. */ ubifs_assert(c->lst.taken_empty_lebs > 0); } } else ubifs_assert(c->lst.taken_empty_lebs > 0); err = dbg_check_filesystem(c); if (err) goto out_infos; err = dbg_debugfs_init_fs(c); if (err) goto out_infos; c->mounting = 0; ubifs_msg("mounted UBI device %d, volume %d, name \"%s\"%s", c->vi.ubi_num, c->vi.vol_id, c->vi.name, c->ro_mount ? ", R/O mode" : ""); x = (long long)c->main_lebs * c->leb_size; y = (long long)c->log_lebs * c->leb_size + c->max_bud_bytes; ubifs_msg("LEB size: %d bytes (%d KiB), min./max. I/O unit sizes: %d bytes/%d bytes", c->leb_size, c->leb_size >> 10, c->min_io_size, c->max_write_size); ubifs_msg("FS size: %lld bytes (%lld MiB, %d LEBs), journal size %lld bytes (%lld MiB, %d LEBs)", x, x >> 20, c->main_lebs, y, y >> 20, c->log_lebs + c->max_bud_cnt); ubifs_msg("reserved for root: %llu bytes (%llu KiB)", c->report_rp_size, c->report_rp_size >> 10); ubifs_msg("media format: w%d/r%d (latest is w%d/r%d), UUID %pUB%s", c->fmt_version, c->ro_compat_version, UBIFS_FORMAT_VERSION, UBIFS_RO_COMPAT_VERSION, c->uuid, c->big_lpt ? ", big LPT model" : ", small LPT model"); dbg_gen("default compressor: %s", ubifs_compr_name(c->default_compr)); dbg_gen("data journal heads: %d", c->jhead_cnt - NONDATA_JHEADS_CNT); dbg_gen("log LEBs: %d (%d - %d)", c->log_lebs, UBIFS_LOG_LNUM, c->log_last); dbg_gen("LPT area LEBs: %d (%d - %d)", c->lpt_lebs, c->lpt_first, c->lpt_last); dbg_gen("orphan area LEBs: %d (%d - %d)", c->orph_lebs, c->orph_first, c->orph_last); dbg_gen("main area LEBs: %d (%d - %d)", c->main_lebs, c->main_first, c->leb_cnt - 1); dbg_gen("index LEBs: %d", c->lst.idx_lebs); dbg_gen("total index bytes: %lld (%lld KiB, %lld MiB)", c->bi.old_idx_sz, c->bi.old_idx_sz >> 10, c->bi.old_idx_sz >> 20); dbg_gen("key hash type: %d", c->key_hash_type); dbg_gen("tree fanout: %d", c->fanout); dbg_gen("reserved GC LEB: %d", c->gc_lnum); dbg_gen("max. znode size %d", c->max_znode_sz); dbg_gen("max. index node size %d", c->max_idx_node_sz); dbg_gen("node sizes: data %zu, inode %zu, dentry %zu", UBIFS_DATA_NODE_SZ, UBIFS_INO_NODE_SZ, UBIFS_DENT_NODE_SZ); dbg_gen("node sizes: trun %zu, sb %zu, master %zu", UBIFS_TRUN_NODE_SZ, UBIFS_SB_NODE_SZ, UBIFS_MST_NODE_SZ); dbg_gen("node sizes: ref %zu, cmt. start %zu, orph %zu", UBIFS_REF_NODE_SZ, UBIFS_CS_NODE_SZ, UBIFS_ORPH_NODE_SZ); dbg_gen("max. node sizes: data %zu, inode %zu dentry %zu, idx %d", UBIFS_MAX_DATA_NODE_SZ, UBIFS_MAX_INO_NODE_SZ, UBIFS_MAX_DENT_NODE_SZ, ubifs_idx_node_sz(c, c->fanout)); dbg_gen("dead watermark: %d", c->dead_wm); dbg_gen("dark watermark: %d", c->dark_wm); dbg_gen("LEB overhead: %d", c->leb_overhead); x = (long long)c->main_lebs * c->dark_wm; dbg_gen("max. dark space: %lld (%lld KiB, %lld MiB)", x, x >> 10, x >> 20); dbg_gen("maximum bud bytes: %lld (%lld KiB, %lld MiB)", c->max_bud_bytes, c->max_bud_bytes >> 10, c->max_bud_bytes >> 20); dbg_gen("BG commit bud bytes: %lld (%lld KiB, %lld MiB)", c->bg_bud_bytes, c->bg_bud_bytes >> 10, c->bg_bud_bytes >> 20); dbg_gen("current bud bytes %lld (%lld KiB, %lld MiB)", c->bud_bytes, c->bud_bytes >> 10, c->bud_bytes >> 20); dbg_gen("max. seq. number: %llu", c->max_sqnum); dbg_gen("commit number: %llu", c->cmt_no); return 0; out_infos: spin_lock(&ubifs_infos_lock); list_del(&c->infos_list); spin_unlock(&ubifs_infos_lock); out_orphans: free_orphans(c); out_journal: destroy_journal(c); out_lpt: ubifs_lpt_free(c, 0); out_master: kfree(c->mst_node); kfree(c->rcvrd_mst_node); if (c->bgt) kthread_stop(c->bgt); out_wbufs: free_wbufs(c); out_cbuf: kfree(c->cbuf); out_free: kfree(c->write_reserve_buf); kfree(c->bu.buf); vfree(c->ileb_buf); vfree(c->sbuf); kfree(c->bottom_up_buf); ubifs_debugging_exit(c); return err; } /** * ubifs_umount - un-mount UBIFS file-system. * @c: UBIFS file-system description object * * Note, this function is called to free allocated resourced when un-mounting, * as well as free resources when an error occurred while we were half way * through mounting (error path cleanup function). So it has to make sure the * resource was actually allocated before freeing it. */ static void ubifs_umount(struct ubifs_info *c) { dbg_gen("un-mounting UBI device %d, volume %d", c->vi.ubi_num, c->vi.vol_id); dbg_debugfs_exit_fs(c); spin_lock(&ubifs_infos_lock); list_del(&c->infos_list); spin_unlock(&ubifs_infos_lock); if (c->bgt) kthread_stop(c->bgt); destroy_journal(c); free_wbufs(c); free_orphans(c); ubifs_lpt_free(c, 0); kfree(c->cbuf); kfree(c->rcvrd_mst_node); kfree(c->mst_node); kfree(c->write_reserve_buf); kfree(c->bu.buf); vfree(c->ileb_buf); vfree(c->sbuf); kfree(c->bottom_up_buf); ubifs_debugging_exit(c); } /** * ubifs_remount_rw - re-mount in read-write mode. * @c: UBIFS file-system description object * * UBIFS avoids allocating many unnecessary resources when mounted in read-only * mode. This function allocates the needed resources and re-mounts UBIFS in * read-write mode. */ static int ubifs_remount_rw(struct ubifs_info *c) { int err, lnum; if (c->rw_incompat) { ubifs_err("the file-system is not R/W-compatible"); ubifs_msg("on-flash format version is w%d/r%d, but software only supports up to version w%d/r%d", c->fmt_version, c->ro_compat_version, UBIFS_FORMAT_VERSION, UBIFS_RO_COMPAT_VERSION); return -EROFS; } mutex_lock(&c->umount_mutex); dbg_save_space_info(c); c->remounting_rw = 1; c->ro_mount = 0; if (c->space_fixup) { err = ubifs_fixup_free_space(c); if (err) return err; } err = check_free_space(c); if (err) goto out; if (c->old_leb_cnt != c->leb_cnt) { struct ubifs_sb_node *sup; sup = ubifs_read_sb_node(c); if (IS_ERR(sup)) { err = PTR_ERR(sup); goto out; } sup->leb_cnt = cpu_to_le32(c->leb_cnt); err = ubifs_write_sb_node(c, sup); kfree(sup); if (err) goto out; } if (c->need_recovery) { ubifs_msg("completing deferred recovery"); err = ubifs_write_rcvrd_mst_node(c); if (err) goto out; err = ubifs_recover_size(c); if (err) goto out; err = ubifs_clean_lebs(c, c->sbuf); if (err) goto out; err = ubifs_recover_inl_heads(c, c->sbuf); if (err) goto out; } else { /* A readonly mount is not allowed to have orphans */ ubifs_assert(c->tot_orphans == 0); err = ubifs_clear_orphans(c); if (err) goto out; } if (!(c->mst_node->flags & cpu_to_le32(UBIFS_MST_DIRTY))) { c->mst_node->flags |= cpu_to_le32(UBIFS_MST_DIRTY); err = ubifs_write_master(c); if (err) goto out; } c->ileb_buf = vmalloc(c->leb_size); if (!c->ileb_buf) { err = -ENOMEM; goto out; } c->write_reserve_buf = kmalloc(COMPRESSED_DATA_NODE_BUF_SZ, GFP_KERNEL); if (!c->write_reserve_buf) goto out; err = ubifs_lpt_init(c, 0, 1); if (err) goto out; /* Create background thread */ c->bgt = kthread_create(ubifs_bg_thread, c, "%s", c->bgt_name); if (IS_ERR(c->bgt)) { err = PTR_ERR(c->bgt); c->bgt = NULL; ubifs_err("cannot spawn \"%s\", error %d", c->bgt_name, err); goto out; } wake_up_process(c->bgt); c->orph_buf = vmalloc(c->leb_size); if (!c->orph_buf) { err = -ENOMEM; goto out; } /* Check for enough log space */ lnum = c->lhead_lnum + 1; if (lnum >= UBIFS_LOG_LNUM + c->log_lebs) lnum = UBIFS_LOG_LNUM; if (lnum == c->ltail_lnum) { err = ubifs_consolidate_log(c); if (err) goto out; } if (c->need_recovery) err = ubifs_rcvry_gc_commit(c); else err = ubifs_leb_unmap(c, c->gc_lnum); if (err) goto out; dbg_gen("re-mounted read-write"); c->remounting_rw = 0; if (c->need_recovery) { c->need_recovery = 0; ubifs_msg("deferred recovery completed"); } else { /* * Do not run the debugging space check if the were doing * recovery, because when we saved the information we had the * file-system in a state where the TNC and lprops has been * modified in memory, but all the I/O operations (including a * commit) were deferred. So the file-system was in * "non-committed" state. Now the file-system is in committed * state, and of course the amount of free space will change * because, for example, the old index size was imprecise. */ err = dbg_check_space_info(c); } mutex_unlock(&c->umount_mutex); return err; out: c->ro_mount = 1; vfree(c->orph_buf); c->orph_buf = NULL; if (c->bgt) { kthread_stop(c->bgt); c->bgt = NULL; } free_wbufs(c); kfree(c->write_reserve_buf); c->write_reserve_buf = NULL; vfree(c->ileb_buf); c->ileb_buf = NULL; ubifs_lpt_free(c, 1); c->remounting_rw = 0; mutex_unlock(&c->umount_mutex); return err; } /** * ubifs_remount_ro - re-mount in read-only mode. * @c: UBIFS file-system description object * * We assume VFS has stopped writing. Possibly the background thread could be * running a commit, however kthread_stop will wait in that case. */ static void ubifs_remount_ro(struct ubifs_info *c) { int i, err; ubifs_assert(!c->need_recovery); ubifs_assert(!c->ro_mount); mutex_lock(&c->umount_mutex); if (c->bgt) { kthread_stop(c->bgt); c->bgt = NULL; } dbg_save_space_info(c); for (i = 0; i < c->jhead_cnt; i++) ubifs_wbuf_sync(&c->jheads[i].wbuf); c->mst_node->flags &= ~cpu_to_le32(UBIFS_MST_DIRTY); c->mst_node->flags |= cpu_to_le32(UBIFS_MST_NO_ORPHS); c->mst_node->gc_lnum = cpu_to_le32(c->gc_lnum); err = ubifs_write_master(c); if (err) ubifs_ro_mode(c, err); vfree(c->orph_buf); c->orph_buf = NULL; kfree(c->write_reserve_buf); c->write_reserve_buf = NULL; vfree(c->ileb_buf); c->ileb_buf = NULL; ubifs_lpt_free(c, 1); c->ro_mount = 1; err = dbg_check_space_info(c); if (err) ubifs_ro_mode(c, err); mutex_unlock(&c->umount_mutex); } static void ubifs_put_super(struct super_block *sb) { int i; struct ubifs_info *c = sb->s_fs_info; ubifs_msg("un-mount UBI device %d, volume %d", c->vi.ubi_num, c->vi.vol_id); /* * The following asserts are only valid if there has not been a failure * of the media. For example, there will be dirty inodes if we failed * to write them back because of I/O errors. */ if (!c->ro_error) { ubifs_assert(c->bi.idx_growth == 0); ubifs_assert(c->bi.dd_growth == 0); ubifs_assert(c->bi.data_growth == 0); } /* * The 'c->umount_lock' prevents races between UBIFS memory shrinker * and file system un-mount. Namely, it prevents the shrinker from * picking this superblock for shrinking - it will be just skipped if * the mutex is locked. */ mutex_lock(&c->umount_mutex); if (!c->ro_mount) { /* * First of all kill the background thread to make sure it does * not interfere with un-mounting and freeing resources. */ if (c->bgt) { kthread_stop(c->bgt); c->bgt = NULL; } /* * On fatal errors c->ro_error is set to 1, in which case we do * not write the master node. */ if (!c->ro_error) { int err; /* Synchronize write-buffers */ for (i = 0; i < c->jhead_cnt; i++) ubifs_wbuf_sync(&c->jheads[i].wbuf); /* * We are being cleanly unmounted which means the * orphans were killed - indicate this in the master * node. Also save the reserved GC LEB number. */ c->mst_node->flags &= ~cpu_to_le32(UBIFS_MST_DIRTY); c->mst_node->flags |= cpu_to_le32(UBIFS_MST_NO_ORPHS); c->mst_node->gc_lnum = cpu_to_le32(c->gc_lnum); err = ubifs_write_master(c); if (err) /* * Recovery will attempt to fix the master area * next mount, so we just print a message and * continue to unmount normally. */ ubifs_err("failed to write master node, error %d", err); } else { for (i = 0; i < c->jhead_cnt; i++) /* Make sure write-buffer timers are canceled */ hrtimer_cancel(&c->jheads[i].wbuf.timer); } } ubifs_umount(c); bdi_destroy(&c->bdi); ubi_close_volume(c->ubi); mutex_unlock(&c->umount_mutex); } static int ubifs_remount_fs(struct super_block *sb, int *flags, char *data) { int err; struct ubifs_info *c = sb->s_fs_info; dbg_gen("old flags %#lx, new flags %#x", sb->s_flags, *flags); err = ubifs_parse_options(c, data, 1); if (err) { ubifs_err("invalid or unknown remount parameter"); return err; } if (c->ro_mount && !(*flags & MS_RDONLY)) { if (c->ro_error) { ubifs_msg("cannot re-mount R/W due to prior errors"); return -EROFS; } if (c->ro_media) { ubifs_msg("cannot re-mount R/W - UBI volume is R/O"); return -EROFS; } err = ubifs_remount_rw(c); if (err) return err; } else if (!c->ro_mount && (*flags & MS_RDONLY)) { if (c->ro_error) { ubifs_msg("cannot re-mount R/O due to prior errors"); return -EROFS; } ubifs_remount_ro(c); } if (c->bulk_read == 1) bu_init(c); else { dbg_gen("disable bulk-read"); kfree(c->bu.buf); c->bu.buf = NULL; } ubifs_assert(c->lst.taken_empty_lebs > 0); return 0; } const struct super_operations ubifs_super_operations = { .alloc_inode = ubifs_alloc_inode, .destroy_inode = ubifs_destroy_inode, .put_super = ubifs_put_super, .write_inode = ubifs_write_inode, .evict_inode = ubifs_evict_inode, .statfs = ubifs_statfs, .dirty_inode = ubifs_dirty_inode, .remount_fs = ubifs_remount_fs, .show_options = ubifs_show_options, .sync_fs = ubifs_sync_fs, }; /** * open_ubi - parse UBI device name string and open the UBI device. * @name: UBI volume name * @mode: UBI volume open mode * * The primary method of mounting UBIFS is by specifying the UBI volume * character device node path. However, UBIFS may also be mounted withoug any * character device node using one of the following methods: * * o ubiX_Y - mount UBI device number X, volume Y; * o ubiY - mount UBI device number 0, volume Y; * o ubiX:NAME - mount UBI device X, volume with name NAME; * o ubi:NAME - mount UBI device 0, volume with name NAME. * * Alternative '!' separator may be used instead of ':' (because some shells * like busybox may interpret ':' as an NFS host name separator). This function * returns UBI volume description object in case of success and a negative * error code in case of failure. */ static struct ubi_volume_desc *open_ubi(const char *name, int mode) { struct ubi_volume_desc *ubi; int dev, vol; char *endptr; /* First, try to open using the device node path method */ ubi = ubi_open_volume_path(name, mode); if (!IS_ERR(ubi)) return ubi; /* Try the "nodev" method */ if (name[0] != 'u' || name[1] != 'b' || name[2] != 'i') return ERR_PTR(-EINVAL); /* ubi:NAME method */ if ((name[3] == ':' || name[3] == '!') && name[4] != '\0') return ubi_open_volume_nm(0, name + 4, mode); if (!isdigit(name[3])) return ERR_PTR(-EINVAL); dev = simple_strtoul(name + 3, &endptr, 0); /* ubiY method */ if (*endptr == '\0') return ubi_open_volume(0, dev, mode); /* ubiX_Y method */ if (*endptr == '_' && isdigit(endptr[1])) { vol = simple_strtoul(endptr + 1, &endptr, 0); if (*endptr != '\0') return ERR_PTR(-EINVAL); return ubi_open_volume(dev, vol, mode); } /* ubiX:NAME method */ if ((*endptr == ':' || *endptr == '!') && endptr[1] != '\0') return ubi_open_volume_nm(dev, ++endptr, mode); return ERR_PTR(-EINVAL); } static struct ubifs_info *alloc_ubifs_info(struct ubi_volume_desc *ubi) { struct ubifs_info *c; c = kzalloc(sizeof(struct ubifs_info), GFP_KERNEL); if (c) { spin_lock_init(&c->cnt_lock); spin_lock_init(&c->cs_lock); spin_lock_init(&c->buds_lock); spin_lock_init(&c->space_lock); spin_lock_init(&c->orphan_lock); init_rwsem(&c->commit_sem); mutex_init(&c->lp_mutex); mutex_init(&c->tnc_mutex); mutex_init(&c->log_mutex); mutex_init(&c->mst_mutex); mutex_init(&c->umount_mutex); mutex_init(&c->bu_mutex); mutex_init(&c->write_reserve_mutex); init_waitqueue_head(&c->cmt_wq); c->buds = RB_ROOT; c->old_idx = RB_ROOT; c->size_tree = RB_ROOT; c->orph_tree = RB_ROOT; INIT_LIST_HEAD(&c->infos_list); INIT_LIST_HEAD(&c->idx_gc); INIT_LIST_HEAD(&c->replay_list); INIT_LIST_HEAD(&c->replay_buds); INIT_LIST_HEAD(&c->uncat_list); INIT_LIST_HEAD(&c->empty_list); INIT_LIST_HEAD(&c->freeable_list); INIT_LIST_HEAD(&c->frdi_idx_list); INIT_LIST_HEAD(&c->unclean_leb_list); INIT_LIST_HEAD(&c->old_buds); INIT_LIST_HEAD(&c->orph_list); INIT_LIST_HEAD(&c->orph_new); c->no_chk_data_crc = 1; c->highest_inum = UBIFS_FIRST_INO; c->lhead_lnum = c->ltail_lnum = UBIFS_LOG_LNUM; ubi_get_volume_info(ubi, &c->vi); ubi_get_device_info(c->vi.ubi_num, &c->di); } return c; } static int ubifs_fill_super(struct super_block *sb, void *data, int silent) { struct ubifs_info *c = sb->s_fs_info; struct inode *root; int err; c->vfs_sb = sb; /* Re-open the UBI device in read-write mode */ c->ubi = ubi_open_volume(c->vi.ubi_num, c->vi.vol_id, UBI_READWRITE); if (IS_ERR(c->ubi)) { err = PTR_ERR(c->ubi); goto out; } /* * UBIFS provides 'backing_dev_info' in order to disable read-ahead. For * UBIFS, I/O is not deferred, it is done immediately in readpage, * which means the user would have to wait not just for their own I/O * but the read-ahead I/O as well i.e. completely pointless. * * Read-ahead will be disabled because @c->bdi.ra_pages is 0. */ c->bdi.name = "ubifs", c->bdi.capabilities = BDI_CAP_MAP_COPY; err = bdi_init(&c->bdi); if (err) goto out_close; err = bdi_register(&c->bdi, NULL, "ubifs_%d_%d", c->vi.ubi_num, c->vi.vol_id); if (err) goto out_bdi; err = ubifs_parse_options(c, data, 0); if (err) goto out_bdi; sb->s_bdi = &c->bdi; sb->s_fs_info = c; sb->s_magic = UBIFS_SUPER_MAGIC; sb->s_blocksize = UBIFS_BLOCK_SIZE; sb->s_blocksize_bits = UBIFS_BLOCK_SHIFT; sb->s_maxbytes = c->max_inode_sz = key_max_inode_size(c); if (c->max_inode_sz > MAX_LFS_FILESIZE) sb->s_maxbytes = c->max_inode_sz = MAX_LFS_FILESIZE; sb->s_op = &ubifs_super_operations; mutex_lock(&c->umount_mutex); err = mount_ubifs(c); if (err) { ubifs_assert(err < 0); goto out_unlock; } /* Read the root inode */ root = ubifs_iget(sb, UBIFS_ROOT_INO); if (IS_ERR(root)) { err = PTR_ERR(root); goto out_umount; } sb->s_root = d_make_root(root); if (!sb->s_root) goto out_umount; mutex_unlock(&c->umount_mutex); return 0; out_umount: ubifs_umount(c); out_unlock: mutex_unlock(&c->umount_mutex); out_bdi: bdi_destroy(&c->bdi); out_close: ubi_close_volume(c->ubi); out: return err; } static int sb_test(struct super_block *sb, void *data) { struct ubifs_info *c1 = data; struct ubifs_info *c = sb->s_fs_info; return c->vi.cdev == c1->vi.cdev; } static int sb_set(struct super_block *sb, void *data) { sb->s_fs_info = data; return set_anon_super(sb, NULL); } static struct dentry *ubifs_mount(struct file_system_type *fs_type, int flags, const char *name, void *data) { struct ubi_volume_desc *ubi; struct ubifs_info *c; struct super_block *sb; int err; dbg_gen("name %s, flags %#x", name, flags); /* * Get UBI device number and volume ID. Mount it read-only so far * because this might be a new mount point, and UBI allows only one * read-write user at a time. */ ubi = open_ubi(name, UBI_READONLY); if (IS_ERR(ubi)) { ubifs_err("cannot open \"%s\", error %d", name, (int)PTR_ERR(ubi)); return ERR_CAST(ubi); } c = alloc_ubifs_info(ubi); if (!c) { err = -ENOMEM; goto out_close; } dbg_gen("opened ubi%d_%d", c->vi.ubi_num, c->vi.vol_id); sb = sget(fs_type, sb_test, sb_set, flags, c); if (IS_ERR(sb)) { err = PTR_ERR(sb); kfree(c); goto out_close; } if (sb->s_root) { struct ubifs_info *c1 = sb->s_fs_info; kfree(c); /* A new mount point for already mounted UBIFS */ dbg_gen("this ubi volume is already mounted"); if (!!(flags & MS_RDONLY) != c1->ro_mount) { err = -EBUSY; goto out_deact; } } else { err = ubifs_fill_super(sb, data, flags & MS_SILENT ? 1 : 0); if (err) goto out_deact; /* We do not support atime */ sb->s_flags |= MS_ACTIVE | MS_NOATIME; } /* 'fill_super()' opens ubi again so we must close it here */ ubi_close_volume(ubi); return dget(sb->s_root); out_deact: deactivate_locked_super(sb); out_close: ubi_close_volume(ubi); return ERR_PTR(err); } static void kill_ubifs_super(struct super_block *s) { struct ubifs_info *c = s->s_fs_info; kill_anon_super(s); kfree(c); } static struct file_system_type ubifs_fs_type = { .name = "ubifs", .owner = THIS_MODULE, .mount = ubifs_mount, .kill_sb = kill_ubifs_super, }; MODULE_ALIAS_FS("ubifs"); /* * Inode slab cache constructor. */ static void inode_slab_ctor(void *obj) { struct ubifs_inode *ui = obj; inode_init_once(&ui->vfs_inode); } static int __init ubifs_init(void) { int err; BUILD_BUG_ON(sizeof(struct ubifs_ch) != 24); /* Make sure node sizes are 8-byte aligned */ BUILD_BUG_ON(UBIFS_CH_SZ & 7); BUILD_BUG_ON(UBIFS_INO_NODE_SZ & 7); BUILD_BUG_ON(UBIFS_DENT_NODE_SZ & 7); BUILD_BUG_ON(UBIFS_XENT_NODE_SZ & 7); BUILD_BUG_ON(UBIFS_DATA_NODE_SZ & 7); BUILD_BUG_ON(UBIFS_TRUN_NODE_SZ & 7); BUILD_BUG_ON(UBIFS_SB_NODE_SZ & 7); BUILD_BUG_ON(UBIFS_MST_NODE_SZ & 7); BUILD_BUG_ON(UBIFS_REF_NODE_SZ & 7); BUILD_BUG_ON(UBIFS_CS_NODE_SZ & 7); BUILD_BUG_ON(UBIFS_ORPH_NODE_SZ & 7); BUILD_BUG_ON(UBIFS_MAX_DENT_NODE_SZ & 7); BUILD_BUG_ON(UBIFS_MAX_XENT_NODE_SZ & 7); BUILD_BUG_ON(UBIFS_MAX_DATA_NODE_SZ & 7); BUILD_BUG_ON(UBIFS_MAX_INO_NODE_SZ & 7); BUILD_BUG_ON(UBIFS_MAX_NODE_SZ & 7); BUILD_BUG_ON(MIN_WRITE_SZ & 7); /* Check min. node size */ BUILD_BUG_ON(UBIFS_INO_NODE_SZ < MIN_WRITE_SZ); BUILD_BUG_ON(UBIFS_DENT_NODE_SZ < MIN_WRITE_SZ); BUILD_BUG_ON(UBIFS_XENT_NODE_SZ < MIN_WRITE_SZ); BUILD_BUG_ON(UBIFS_TRUN_NODE_SZ < MIN_WRITE_SZ); BUILD_BUG_ON(UBIFS_MAX_DENT_NODE_SZ > UBIFS_MAX_NODE_SZ); BUILD_BUG_ON(UBIFS_MAX_XENT_NODE_SZ > UBIFS_MAX_NODE_SZ); BUILD_BUG_ON(UBIFS_MAX_DATA_NODE_SZ > UBIFS_MAX_NODE_SZ); BUILD_BUG_ON(UBIFS_MAX_INO_NODE_SZ > UBIFS_MAX_NODE_SZ); /* Defined node sizes */ BUILD_BUG_ON(UBIFS_SB_NODE_SZ != 4096); BUILD_BUG_ON(UBIFS_MST_NODE_SZ != 512); BUILD_BUG_ON(UBIFS_INO_NODE_SZ != 160); BUILD_BUG_ON(UBIFS_REF_NODE_SZ != 64); /* * We use 2 bit wide bit-fields to store compression type, which should * be amended if more compressors are added. The bit-fields are: * @compr_type in 'struct ubifs_inode', @default_compr in * 'struct ubifs_info' and @compr_type in 'struct ubifs_mount_opts'. */ BUILD_BUG_ON(UBIFS_COMPR_TYPES_CNT > 4); /* * We require that PAGE_CACHE_SIZE is greater-than-or-equal-to * UBIFS_BLOCK_SIZE. It is assumed that both are powers of 2. */ if (PAGE_CACHE_SIZE < UBIFS_BLOCK_SIZE) { ubifs_err("VFS page cache size is %u bytes, but UBIFS requires at least 4096 bytes", (unsigned int)PAGE_CACHE_SIZE); return -EINVAL; } ubifs_inode_slab = kmem_cache_create("ubifs_inode_slab", sizeof(struct ubifs_inode), 0, SLAB_MEM_SPREAD | SLAB_RECLAIM_ACCOUNT, &inode_slab_ctor); if (!ubifs_inode_slab) return -ENOMEM; register_shrinker(&ubifs_shrinker_info); err = ubifs_compressors_init(); if (err) goto out_shrinker; err = dbg_debugfs_init(); if (err) goto out_compr; err = register_filesystem(&ubifs_fs_type); if (err) { ubifs_err("cannot register file system, error %d", err); goto out_dbg; } return 0; out_dbg: dbg_debugfs_exit(); out_compr: ubifs_compressors_exit(); out_shrinker: unregister_shrinker(&ubifs_shrinker_info); kmem_cache_destroy(ubifs_inode_slab); return err; } /* late_initcall to let compressors initialize first */ late_initcall(ubifs_init); static void __exit ubifs_exit(void) { ubifs_assert(list_empty(&ubifs_infos)); ubifs_assert(atomic_long_read(&ubifs_clean_zn_cnt) == 0); dbg_debugfs_exit(); ubifs_compressors_exit(); unregister_shrinker(&ubifs_shrinker_info); /* * Make sure all delayed rcu free inodes are flushed before we * destroy cache. */ rcu_barrier(); kmem_cache_destroy(ubifs_inode_slab); unregister_filesystem(&ubifs_fs_type); } module_exit(ubifs_exit); MODULE_LICENSE("GPL"); MODULE_VERSION(__stringify(UBIFS_VERSION)); MODULE_AUTHOR("Artem Bityutskiy, Adrian Hunter"); MODULE_DESCRIPTION("UBIFS - UBI File System");
gpl-2.0
googyanas/GoogyMax-G4
net/netfilter/interceptor_v33/debug_strf.c
925
3835
/** @copyright Copyright (c) 2011 - 2013, INSIDE Secure Oy. All rights reserved. */ #include "implementation_defs.h" const char * debug_str_hexbuf( DEBUG_STRBUF_TYPE buf, const void *data, int len) { int p_len; char *p; const unsigned char *u_data = data; int offset = 0; int i; DEBUG_STRBUF_BUFFER_GET(buf, &p, &p_len); for (i = 0; i < len && offset + 3 < p_len; i++) { offset += snprintf(p + offset, len - offset, "%02x", u_data[i]); } if (i < len && offset >= 4) { offset -= 4; offset += snprintf(p + offset, 4, "..."); } DEBUG_STRBUF_BUFFER_COMMIT(buf, offset); return p; } const char * debug_strbuf_sshrender( DEBUG_STRBUF_TYPE dsb, int (*ssh_renderer)(unsigned char *, int, int, void *), void *render_datum) { char *p = DEBUG_STRBUF_ALLOC(dsb, 32); if (p != NULL) { ssh_renderer((unsigned char *) p, 32, -1, render_datum); return p; } return DEBUG_STRBUF_ERROR; } static void longest_zero_pairs( const unsigned char address[16], int *start_p, int *length_p) { int longest_length = 0; int longest_start = 0; int start = -1; int length = 0; int i; for (i = 0; i < 16; i += 2) { const bool is_zero = (address[i] == 0 && address[i + 1] == 0); if (is_zero) { if (start < 0) { start = i; } length += 2; } if (start >= 0) { if (length > longest_length) { longest_length = length; longest_start = start; } } if (is_zero == false) { start = -1; length = 0; } } *start_p = longest_start; *length_p = longest_length; } static int format_ip6_address( const unsigned char address[16], int zero_start, int zero_length, char *p, int p_len) { int address_index; int i = 0; for (address_index = 0; address_index <= 14; ) { if (address_index != 0) { i += snprintf(p + i, p_len - i, ":"); } if (address_index == zero_start && zero_length != 0) { if (address_index == 0) { i += snprintf(p + i, p_len - i, ":"); } address_index += zero_length; if (address_index == 16) { i += snprintf(p + i, p_len - i, ":"); } } else { const int a1 = address[address_index]; const int a2 = address[address_index + 1]; if (a1 != 0) { i += snprintf(p + i, p_len - 1, "%x%.2x", a1, a2); } else { i += snprintf(p + i, p_len - 1, "%x", a2); } address_index += 2; } } return i; } const char * debug_strbuf_ipaddress( DEBUG_STRBUF_TYPE buf, const unsigned char address[16]) { int used; char *p; int p_len; DEBUG_STRBUF_BUFFER_GET(buf, &p, &p_len); used = format_ipaddress(p, p_len, address); DEBUG_STRBUF_BUFFER_COMMIT(buf, used + 1); return p; } int format_ipaddress( char *buf, int len, const unsigned char address[16]) { int zero_start; int zero_length; int used; longest_zero_pairs(address, &zero_start, &zero_length); if (zero_start == 0 && zero_length == 12) { const int a1 = address[12 + 0]; const int a2 = address[12 + 1]; const int a3 = address[12 + 2]; const int a4 = address[12 + 3]; used = snprintf(buf, len, "::%d.%d.%d.%d", a1, a2, a3, a4); } else { used = format_ip6_address(address, zero_start, zero_length, buf, len); } return used; }
gpl-2.0
abhisit/TechNexion-linux
sound/oss/sb_midi.c
1181
4350
/* * sound/oss/sb_midi.c * * The low level driver for the Sound Blaster DS chips. * * * Copyright (C) by Hannu Savolainen 1993-1997 * * OSS/Free for Linux is distributed under the GNU GENERAL PUBLIC LICENSE (GPL) * Version 2 (June 1991). See the "COPYING" file distributed with this software * for more info. */ #include <linux/spinlock.h> #include <linux/slab.h> #include "sound_config.h" #include "sb.h" #undef SB_TEST_IRQ /* * The DSP channel can be used either for input or output. Variable * 'sb_irq_mode' will be set when the program calls read or write first time * after open. Current version doesn't support mode changes without closing * and reopening the device. Support for this feature may be implemented in a * future version of this driver. */ static int sb_midi_open(int dev, int mode, void (*input) (int dev, unsigned char data), void (*output) (int dev) ) { sb_devc *devc = midi_devs[dev]->devc; unsigned long flags; if (devc == NULL) return -ENXIO; spin_lock_irqsave(&devc->lock, flags); if (devc->opened) { spin_unlock_irqrestore(&devc->lock, flags); return -EBUSY; } devc->opened = 1; spin_unlock_irqrestore(&devc->lock, flags); devc->irq_mode = IMODE_MIDI; devc->midi_broken = 0; sb_dsp_reset(devc); if (!sb_dsp_command(devc, 0x35)) /* Start MIDI UART mode */ { devc->opened = 0; return -EIO; } devc->intr_active = 1; if (mode & OPEN_READ) { devc->input_opened = 1; devc->midi_input_intr = input; } return 0; } static void sb_midi_close(int dev) { sb_devc *devc = midi_devs[dev]->devc; unsigned long flags; if (devc == NULL) return; spin_lock_irqsave(&devc->lock, flags); sb_dsp_reset(devc); devc->intr_active = 0; devc->input_opened = 0; devc->opened = 0; spin_unlock_irqrestore(&devc->lock, flags); } static int sb_midi_out(int dev, unsigned char midi_byte) { sb_devc *devc = midi_devs[dev]->devc; if (devc == NULL) return 1; if (devc->midi_broken) return 1; if (!sb_dsp_command(devc, midi_byte)) { devc->midi_broken = 1; return 1; } return 1; } static int sb_midi_start_read(int dev) { return 0; } static int sb_midi_end_read(int dev) { sb_devc *devc = midi_devs[dev]->devc; if (devc == NULL) return -ENXIO; sb_dsp_reset(devc); devc->intr_active = 0; return 0; } static int sb_midi_ioctl(int dev, unsigned cmd, void __user *arg) { return -EINVAL; } void sb_midi_interrupt(sb_devc * devc) { unsigned long flags; unsigned char data; if (devc == NULL) return; spin_lock_irqsave(&devc->lock, flags); data = inb(DSP_READ); if (devc->input_opened) devc->midi_input_intr(devc->my_mididev, data); spin_unlock_irqrestore(&devc->lock, flags); } #define MIDI_SYNTH_NAME "Sound Blaster Midi" #define MIDI_SYNTH_CAPS 0 #include "midi_synth.h" static struct midi_operations sb_midi_operations = { .owner = THIS_MODULE, .info = {"Sound Blaster", 0, 0, SNDCARD_SB}, .converter = &std_midi_synth, .in_info = {0}, .open = sb_midi_open, .close = sb_midi_close, .ioctl = sb_midi_ioctl, .outputc = sb_midi_out, .start_read = sb_midi_start_read, .end_read = sb_midi_end_read, }; void sb_dsp_midi_init(sb_devc * devc, struct module *owner) { int dev; if (devc->model < 2) /* No MIDI support for SB 1.x */ return; dev = sound_alloc_mididev(); if (dev == -1) { printk(KERN_ERR "sb_midi: too many MIDI devices detected\n"); return; } std_midi_synth.midi_dev = devc->my_mididev = dev; midi_devs[dev] = kmalloc(sizeof(struct midi_operations), GFP_KERNEL); if (midi_devs[dev] == NULL) { printk(KERN_WARNING "Sound Blaster: failed to allocate MIDI memory.\n"); sound_unload_mididev(dev); return; } memcpy((char *) midi_devs[dev], (char *) &sb_midi_operations, sizeof(struct midi_operations)); if (owner) midi_devs[dev]->owner = owner; midi_devs[dev]->devc = devc; midi_devs[dev]->converter = kmalloc(sizeof(struct synth_operations), GFP_KERNEL); if (midi_devs[dev]->converter == NULL) { printk(KERN_WARNING "Sound Blaster: failed to allocate MIDI memory.\n"); kfree(midi_devs[dev]); sound_unload_mididev(dev); return; } memcpy((char *) midi_devs[dev]->converter, (char *) &std_midi_synth, sizeof(struct synth_operations)); midi_devs[dev]->converter->id = "SBMIDI"; sequencer_init(); }
gpl-2.0
VegaDevTeam/android_kernel_n9009
arch/arm/mach-msm/hotplug.c
1693
4797
/* * Copyright (C) 2002 ARM Ltd. * All Rights Reserved * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/smp.h> #include <linux/cpu.h> #include <linux/ratelimit.h> #include <asm/cacheflush.h> #include <asm/smp_plat.h> #include <asm/vfp.h> #include <mach/jtag.h> #include <mach/msm_rtb.h> #include "pm.h" #include "spm.h" extern volatile int pen_release; static cpumask_t cpu_dying_mask; static DEFINE_PER_CPU(unsigned int, warm_boot_flag); static inline void cpu_enter_lowpower(void) { /* Just flush the cache. Changing the coherency is not yet * available on msm. */ flush_cache_all(); } static inline void cpu_leave_lowpower(void) { } static inline void platform_do_lowpower(unsigned int cpu, int *spurious) { /* Just enter wfi for now. TODO: Properly shut off the cpu. */ for (;;) { msm_pm_cpu_enter_lowpower(cpu); if (pen_release == cpu_logical_map(cpu)) { /* * OK, proper wakeup, we're done */ break; } /* * getting here, means that we have come out of WFI without * having been woken up - this shouldn't happen * * The trouble is, letting people know about this is not really * possible, since we are currently running incoherently, and * therefore cannot safely call printk() or anything else */ (*spurious)++; } } int platform_cpu_kill(unsigned int cpu) { int ret = 0; if (cpumask_test_and_clear_cpu(cpu, &cpu_dying_mask)) ret = msm_pm_wait_cpu_shutdown(cpu); return ret ? 0 : 1; } /* * platform-specific code to shutdown a CPU * * Called with IRQs disabled */ void platform_cpu_die(unsigned int cpu) { int spurious = 0; if (unlikely(cpu != smp_processor_id())) { pr_crit("%s: running on %u, should be %u\n", __func__, smp_processor_id(), cpu); BUG(); } /* * we're ready for shutdown now, so do it */ cpu_enter_lowpower(); platform_do_lowpower(cpu, &spurious); pr_debug("CPU%u: %s: normal wakeup\n", cpu, __func__); cpu_leave_lowpower(); if (spurious) pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious); } int platform_cpu_disable(unsigned int cpu) { /* * we don't allow CPU 0 to be shutdown (it is still too special * e.g. clock tick interrupts) */ return cpu == 0 ? -EPERM : 0; } #define CPU_SHIFT 0 #define CPU_MASK 0xF #define CPU_OF(n) (((n) & CPU_MASK) << CPU_SHIFT) #define CPUSET_SHIFT 4 #define CPUSET_MASK 0xFFFF #define CPUSET_OF(n) (((n) & CPUSET_MASK) << CPUSET_SHIFT) static int hotplug_rtb_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { /* * Bits [19:4] of the data are the online mask, lower 4 bits are the * cpu number that is being changed. Additionally, changes to the * online_mask that will be done by the current hotplug will be made * even though they aren't necessarily in the online mask yet. * * XXX: This design is limited to supporting at most 16 cpus */ int this_cpumask = CPUSET_OF(1 << (int)hcpu); int cpumask = CPUSET_OF(cpumask_bits(cpu_online_mask)[0]); int cpudata = CPU_OF((int)hcpu) | cpumask; switch (action & (~CPU_TASKS_FROZEN)) { case CPU_STARTING: uncached_logk(LOGK_HOTPLUG, (void *)(cpudata | this_cpumask)); break; case CPU_DYING: cpumask_set_cpu((unsigned long)hcpu, &cpu_dying_mask); uncached_logk(LOGK_HOTPLUG, (void *)(cpudata & ~this_cpumask)); break; default: break; } return NOTIFY_OK; } static struct notifier_block hotplug_rtb_notifier = { .notifier_call = hotplug_rtb_callback, }; static int hotplug_cpu_check_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { int cpu = (int)hcpu; switch (action & (~CPU_TASKS_FROZEN)) { case CPU_DOWN_PREPARE: if (cpu == 0) { pr_err_ratelimited("CPU0 hotplug is not supported\n"); return NOTIFY_BAD; } break; default: break; } return NOTIFY_OK; } static struct notifier_block hotplug_cpu_check_notifier = { .notifier_call = hotplug_cpu_check_callback, .priority = INT_MAX, }; int msm_platform_secondary_init(unsigned int cpu) { int ret; unsigned int *warm_boot = &__get_cpu_var(warm_boot_flag); if (!(*warm_boot)) { *warm_boot = 1; return 0; } msm_jtag_restore_state(); #if defined(CONFIG_VFP) && defined (CONFIG_CPU_PM) vfp_pm_resume(); #endif ret = msm_spm_set_low_power_mode(MSM_SPM_MODE_CLOCK_GATING, false); return ret; } static int __init init_hotplug(void) { int rc; rc = register_hotcpu_notifier(&hotplug_rtb_notifier); if (rc) return rc; return register_hotcpu_notifier(&hotplug_cpu_check_notifier); } early_initcall(init_hotplug);
gpl-2.0
Howpathetic/3.4_Shooter_U_kernel
mm/ksm.c
2717
56027
/* * Memory merging support. * * This code enables dynamic sharing of identical pages found in different * memory areas, even if they are not shared by fork() * * Copyright (C) 2008-2009 Red Hat, Inc. * Authors: * Izik Eidus * Andrea Arcangeli * Chris Wright * Hugh Dickins * * This work is licensed under the terms of the GNU GPL, version 2. */ #include <linux/errno.h> #include <linux/mm.h> #include <linux/fs.h> #include <linux/mman.h> #include <linux/sched.h> #include <linux/rwsem.h> #include <linux/pagemap.h> #include <linux/rmap.h> #include <linux/spinlock.h> #include <linux/jhash.h> #include <linux/delay.h> #include <linux/kthread.h> #include <linux/wait.h> #include <linux/slab.h> #include <linux/rbtree.h> #include <linux/memory.h> #include <linux/mmu_notifier.h> #include <linux/swap.h> #include <linux/ksm.h> #include <linux/hash.h> #include <linux/freezer.h> #include <linux/oom.h> #include <asm/tlbflush.h> #include "internal.h" /* * A few notes about the KSM scanning process, * to make it easier to understand the data structures below: * * In order to reduce excessive scanning, KSM sorts the memory pages by their * contents into a data structure that holds pointers to the pages' locations. * * Since the contents of the pages may change at any moment, KSM cannot just * insert the pages into a normal sorted tree and expect it to find anything. * Therefore KSM uses two data structures - the stable and the unstable tree. * * The stable tree holds pointers to all the merged pages (ksm pages), sorted * by their contents. Because each such page is write-protected, searching on * this tree is fully assured to be working (except when pages are unmapped), * and therefore this tree is called the stable tree. * * In addition to the stable tree, KSM uses a second data structure called the * unstable tree: this tree holds pointers to pages which have been found to * be "unchanged for a period of time". The unstable tree sorts these pages * by their contents, but since they are not write-protected, KSM cannot rely * upon the unstable tree to work correctly - the unstable tree is liable to * be corrupted as its contents are modified, and so it is called unstable. * * KSM solves this problem by several techniques: * * 1) The unstable tree is flushed every time KSM completes scanning all * memory areas, and then the tree is rebuilt again from the beginning. * 2) KSM will only insert into the unstable tree, pages whose hash value * has not changed since the previous scan of all memory areas. * 3) The unstable tree is a RedBlack Tree - so its balancing is based on the * colors of the nodes and not on their contents, assuring that even when * the tree gets "corrupted" it won't get out of balance, so scanning time * remains the same (also, searching and inserting nodes in an rbtree uses * the same algorithm, so we have no overhead when we flush and rebuild). * 4) KSM never flushes the stable tree, which means that even if it were to * take 10 attempts to find a page in the unstable tree, once it is found, * it is secured in the stable tree. (When we scan a new page, we first * compare it against the stable tree, and then against the unstable tree.) */ /** * struct mm_slot - ksm information per mm that is being scanned * @link: link to the mm_slots hash list * @mm_list: link into the mm_slots list, rooted in ksm_mm_head * @rmap_list: head for this mm_slot's singly-linked list of rmap_items * @mm: the mm that this information is valid for */ struct mm_slot { struct hlist_node link; struct list_head mm_list; struct rmap_item *rmap_list; struct mm_struct *mm; }; /** * struct ksm_scan - cursor for scanning * @mm_slot: the current mm_slot we are scanning * @address: the next address inside that to be scanned * @rmap_list: link to the next rmap to be scanned in the rmap_list * @seqnr: count of completed full scans (needed when removing unstable node) * * There is only the one ksm_scan instance of this cursor structure. */ struct ksm_scan { struct mm_slot *mm_slot; unsigned long address; struct rmap_item **rmap_list; unsigned long seqnr; }; /** * struct stable_node - node of the stable rbtree * @node: rb node of this ksm page in the stable tree * @hlist: hlist head of rmap_items using this ksm page * @kpfn: page frame number of this ksm page */ struct stable_node { struct rb_node node; struct hlist_head hlist; unsigned long kpfn; }; /** * struct rmap_item - reverse mapping item for virtual addresses * @rmap_list: next rmap_item in mm_slot's singly-linked rmap_list * @anon_vma: pointer to anon_vma for this mm,address, when in stable tree * @mm: the memory structure this rmap_item is pointing into * @address: the virtual address this rmap_item tracks (+ flags in low bits) * @oldchecksum: previous checksum of the page at that virtual address * @node: rb node of this rmap_item in the unstable tree * @head: pointer to stable_node heading this list in the stable tree * @hlist: link into hlist of rmap_items hanging off that stable_node */ struct rmap_item { struct rmap_item *rmap_list; struct anon_vma *anon_vma; /* when stable */ struct mm_struct *mm; unsigned long address; /* + low bits used for flags below */ unsigned int oldchecksum; /* when unstable */ union { struct rb_node node; /* when node of unstable tree */ struct { /* when listed from stable tree */ struct stable_node *head; struct hlist_node hlist; }; }; }; #define SEQNR_MASK 0x0ff /* low bits of unstable tree seqnr */ #define UNSTABLE_FLAG 0x100 /* is a node of the unstable tree */ #define STABLE_FLAG 0x200 /* is listed from the stable tree */ /* The stable and unstable tree heads */ static struct rb_root root_stable_tree = RB_ROOT; static struct rb_root root_unstable_tree = RB_ROOT; #define MM_SLOTS_HASH_SHIFT 10 #define MM_SLOTS_HASH_HEADS (1 << MM_SLOTS_HASH_SHIFT) static struct hlist_head mm_slots_hash[MM_SLOTS_HASH_HEADS]; static struct mm_slot ksm_mm_head = { .mm_list = LIST_HEAD_INIT(ksm_mm_head.mm_list), }; static struct ksm_scan ksm_scan = { .mm_slot = &ksm_mm_head, }; static struct kmem_cache *rmap_item_cache; static struct kmem_cache *stable_node_cache; static struct kmem_cache *mm_slot_cache; /* The number of nodes in the stable tree */ static unsigned long ksm_pages_shared; /* The number of page slots additionally sharing those nodes */ static unsigned long ksm_pages_sharing; /* The number of nodes in the unstable tree */ static unsigned long ksm_pages_unshared; /* The number of rmap_items in use: to calculate pages_volatile */ static unsigned long ksm_rmap_items; /* Number of pages ksmd should scan in one batch */ static unsigned int ksm_thread_pages_to_scan = 100; /* Milliseconds ksmd should sleep between batches */ static unsigned int ksm_thread_sleep_millisecs = 20; #define KSM_RUN_STOP 0 #define KSM_RUN_MERGE 1 #define KSM_RUN_UNMERGE 2 static unsigned int ksm_run = KSM_RUN_STOP; static DECLARE_WAIT_QUEUE_HEAD(ksm_thread_wait); static DEFINE_MUTEX(ksm_thread_mutex); static DEFINE_SPINLOCK(ksm_mmlist_lock); #define KSM_KMEM_CACHE(__struct, __flags) kmem_cache_create("ksm_"#__struct,\ sizeof(struct __struct), __alignof__(struct __struct),\ (__flags), NULL) static int __init ksm_slab_init(void) { rmap_item_cache = KSM_KMEM_CACHE(rmap_item, 0); if (!rmap_item_cache) goto out; stable_node_cache = KSM_KMEM_CACHE(stable_node, 0); if (!stable_node_cache) goto out_free1; mm_slot_cache = KSM_KMEM_CACHE(mm_slot, 0); if (!mm_slot_cache) goto out_free2; return 0; out_free2: kmem_cache_destroy(stable_node_cache); out_free1: kmem_cache_destroy(rmap_item_cache); out: return -ENOMEM; } static void __init ksm_slab_free(void) { kmem_cache_destroy(mm_slot_cache); kmem_cache_destroy(stable_node_cache); kmem_cache_destroy(rmap_item_cache); mm_slot_cache = NULL; } static inline struct rmap_item *alloc_rmap_item(void) { struct rmap_item *rmap_item; rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL); if (rmap_item) ksm_rmap_items++; return rmap_item; } static inline void free_rmap_item(struct rmap_item *rmap_item) { ksm_rmap_items--; rmap_item->mm = NULL; /* debug safety */ kmem_cache_free(rmap_item_cache, rmap_item); } static inline struct stable_node *alloc_stable_node(void) { return kmem_cache_alloc(stable_node_cache, GFP_KERNEL); } static inline void free_stable_node(struct stable_node *stable_node) { kmem_cache_free(stable_node_cache, stable_node); } static inline struct mm_slot *alloc_mm_slot(void) { if (!mm_slot_cache) /* initialization failed */ return NULL; return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL); } static inline void free_mm_slot(struct mm_slot *mm_slot) { kmem_cache_free(mm_slot_cache, mm_slot); } static struct mm_slot *get_mm_slot(struct mm_struct *mm) { struct mm_slot *mm_slot; struct hlist_head *bucket; struct hlist_node *node; bucket = &mm_slots_hash[hash_ptr(mm, MM_SLOTS_HASH_SHIFT)]; hlist_for_each_entry(mm_slot, node, bucket, link) { if (mm == mm_slot->mm) return mm_slot; } return NULL; } static void insert_to_mm_slots_hash(struct mm_struct *mm, struct mm_slot *mm_slot) { struct hlist_head *bucket; bucket = &mm_slots_hash[hash_ptr(mm, MM_SLOTS_HASH_SHIFT)]; mm_slot->mm = mm; hlist_add_head(&mm_slot->link, bucket); } static inline int in_stable_tree(struct rmap_item *rmap_item) { return rmap_item->address & STABLE_FLAG; } /* * ksmd, and unmerge_and_remove_all_rmap_items(), must not touch an mm's * page tables after it has passed through ksm_exit() - which, if necessary, * takes mmap_sem briefly to serialize against them. ksm_exit() does not set * a special flag: they can just back out as soon as mm_users goes to zero. * ksm_test_exit() is used throughout to make this test for exit: in some * places for correctness, in some places just to avoid unnecessary work. */ static inline bool ksm_test_exit(struct mm_struct *mm) { return atomic_read(&mm->mm_users) == 0; } /* * We use break_ksm to break COW on a ksm page: it's a stripped down * * if (get_user_pages(current, mm, addr, 1, 1, 1, &page, NULL) == 1) * put_page(page); * * but taking great care only to touch a ksm page, in a VM_MERGEABLE vma, * in case the application has unmapped and remapped mm,addr meanwhile. * Could a ksm page appear anywhere else? Actually yes, in a VM_PFNMAP * mmap of /dev/mem or /dev/kmem, where we would not want to touch it. */ static int break_ksm(struct vm_area_struct *vma, unsigned long addr) { struct page *page; int ret = 0; do { cond_resched(); page = follow_page(vma, addr, FOLL_GET); if (IS_ERR_OR_NULL(page)) break; if (PageKsm(page)) ret = handle_mm_fault(vma->vm_mm, vma, addr, FAULT_FLAG_WRITE); else ret = VM_FAULT_WRITE; put_page(page); } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_OOM))); /* * We must loop because handle_mm_fault() may back out if there's * any difficulty e.g. if pte accessed bit gets updated concurrently. * * VM_FAULT_WRITE is what we have been hoping for: it indicates that * COW has been broken, even if the vma does not permit VM_WRITE; * but note that a concurrent fault might break PageKsm for us. * * VM_FAULT_SIGBUS could occur if we race with truncation of the * backing file, which also invalidates anonymous pages: that's * okay, that truncation will have unmapped the PageKsm for us. * * VM_FAULT_OOM: at the time of writing (late July 2009), setting * aside mem_cgroup limits, VM_FAULT_OOM would only be set if the * current task has TIF_MEMDIE set, and will be OOM killed on return * to user; and ksmd, having no mm, would never be chosen for that. * * But if the mm is in a limited mem_cgroup, then the fault may fail * with VM_FAULT_OOM even if the current task is not TIF_MEMDIE; and * even ksmd can fail in this way - though it's usually breaking ksm * just to undo a merge it made a moment before, so unlikely to oom. * * That's a pity: we might therefore have more kernel pages allocated * than we're counting as nodes in the stable tree; but ksm_do_scan * will retry to break_cow on each pass, so should recover the page * in due course. The important thing is to not let VM_MERGEABLE * be cleared while any such pages might remain in the area. */ return (ret & VM_FAULT_OOM) ? -ENOMEM : 0; } static struct vm_area_struct *find_mergeable_vma(struct mm_struct *mm, unsigned long addr) { struct vm_area_struct *vma; if (ksm_test_exit(mm)) return NULL; vma = find_vma(mm, addr); if (!vma || vma->vm_start > addr) return NULL; if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) return NULL; return vma; } static void break_cow(struct rmap_item *rmap_item) { struct mm_struct *mm = rmap_item->mm; unsigned long addr = rmap_item->address; struct vm_area_struct *vma; /* * It is not an accident that whenever we want to break COW * to undo, we also need to drop a reference to the anon_vma. */ put_anon_vma(rmap_item->anon_vma); down_read(&mm->mmap_sem); vma = find_mergeable_vma(mm, addr); if (vma) break_ksm(vma, addr); up_read(&mm->mmap_sem); } static struct page *page_trans_compound_anon(struct page *page) { if (PageTransCompound(page)) { struct page *head = compound_trans_head(page); /* * head may actually be splitted and freed from under * us but it's ok here. */ if (PageAnon(head)) return head; } return NULL; } static struct page *get_mergeable_page(struct rmap_item *rmap_item) { struct mm_struct *mm = rmap_item->mm; unsigned long addr = rmap_item->address; struct vm_area_struct *vma; struct page *page; down_read(&mm->mmap_sem); vma = find_mergeable_vma(mm, addr); if (!vma) goto out; page = follow_page(vma, addr, FOLL_GET); if (IS_ERR_OR_NULL(page)) goto out; if (PageAnon(page) || page_trans_compound_anon(page)) { flush_anon_page(vma, page, addr); flush_dcache_page(page); } else { put_page(page); out: page = NULL; } up_read(&mm->mmap_sem); return page; } static void remove_node_from_stable_tree(struct stable_node *stable_node) { struct rmap_item *rmap_item; struct hlist_node *hlist; hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) { if (rmap_item->hlist.next) ksm_pages_sharing--; else ksm_pages_shared--; put_anon_vma(rmap_item->anon_vma); rmap_item->address &= PAGE_MASK; cond_resched(); } rb_erase(&stable_node->node, &root_stable_tree); free_stable_node(stable_node); } /* * get_ksm_page: checks if the page indicated by the stable node * is still its ksm page, despite having held no reference to it. * In which case we can trust the content of the page, and it * returns the gotten page; but if the page has now been zapped, * remove the stale node from the stable tree and return NULL. * * You would expect the stable_node to hold a reference to the ksm page. * But if it increments the page's count, swapping out has to wait for * ksmd to come around again before it can free the page, which may take * seconds or even minutes: much too unresponsive. So instead we use a * "keyhole reference": access to the ksm page from the stable node peeps * out through its keyhole to see if that page still holds the right key, * pointing back to this stable node. This relies on freeing a PageAnon * page to reset its page->mapping to NULL, and relies on no other use of * a page to put something that might look like our key in page->mapping. * * include/linux/pagemap.h page_cache_get_speculative() is a good reference, * but this is different - made simpler by ksm_thread_mutex being held, but * interesting for assuming that no other use of the struct page could ever * put our expected_mapping into page->mapping (or a field of the union which * coincides with page->mapping). The RCU calls are not for KSM at all, but * to keep the page_count protocol described with page_cache_get_speculative. * * Note: it is possible that get_ksm_page() will return NULL one moment, * then page the next, if the page is in between page_freeze_refs() and * page_unfreeze_refs(): this shouldn't be a problem anywhere, the page * is on its way to being freed; but it is an anomaly to bear in mind. */ static struct page *get_ksm_page(struct stable_node *stable_node) { struct page *page; void *expected_mapping; page = pfn_to_page(stable_node->kpfn); expected_mapping = (void *)stable_node + (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM); rcu_read_lock(); if (page->mapping != expected_mapping) goto stale; if (!get_page_unless_zero(page)) goto stale; if (page->mapping != expected_mapping) { put_page(page); goto stale; } rcu_read_unlock(); return page; stale: rcu_read_unlock(); remove_node_from_stable_tree(stable_node); return NULL; } /* * Removing rmap_item from stable or unstable tree. * This function will clean the information from the stable/unstable tree. */ static void remove_rmap_item_from_tree(struct rmap_item *rmap_item) { if (rmap_item->address & STABLE_FLAG) { struct stable_node *stable_node; struct page *page; stable_node = rmap_item->head; page = get_ksm_page(stable_node); if (!page) goto out; lock_page(page); hlist_del(&rmap_item->hlist); unlock_page(page); put_page(page); if (stable_node->hlist.first) ksm_pages_sharing--; else ksm_pages_shared--; put_anon_vma(rmap_item->anon_vma); rmap_item->address &= PAGE_MASK; } else if (rmap_item->address & UNSTABLE_FLAG) { unsigned char age; /* * Usually ksmd can and must skip the rb_erase, because * root_unstable_tree was already reset to RB_ROOT. * But be careful when an mm is exiting: do the rb_erase * if this rmap_item was inserted by this scan, rather * than left over from before. */ age = (unsigned char)(ksm_scan.seqnr - rmap_item->address); BUG_ON(age > 1); if (!age) rb_erase(&rmap_item->node, &root_unstable_tree); ksm_pages_unshared--; rmap_item->address &= PAGE_MASK; } out: cond_resched(); /* we're called from many long loops */ } static void remove_trailing_rmap_items(struct mm_slot *mm_slot, struct rmap_item **rmap_list) { while (*rmap_list) { struct rmap_item *rmap_item = *rmap_list; *rmap_list = rmap_item->rmap_list; remove_rmap_item_from_tree(rmap_item); free_rmap_item(rmap_item); } } /* * Though it's very tempting to unmerge in_stable_tree(rmap_item)s rather * than check every pte of a given vma, the locking doesn't quite work for * that - an rmap_item is assigned to the stable tree after inserting ksm * page and upping mmap_sem. Nor does it fit with the way we skip dup'ing * rmap_items from parent to child at fork time (so as not to waste time * if exit comes before the next scan reaches it). * * Similarly, although we'd like to remove rmap_items (so updating counts * and freeing memory) when unmerging an area, it's easier to leave that * to the next pass of ksmd - consider, for example, how ksmd might be * in cmp_and_merge_page on one of the rmap_items we would be removing. */ static int unmerge_ksm_pages(struct vm_area_struct *vma, unsigned long start, unsigned long end) { unsigned long addr; int err = 0; for (addr = start; addr < end && !err; addr += PAGE_SIZE) { if (ksm_test_exit(vma->vm_mm)) break; if (signal_pending(current)) err = -ERESTARTSYS; else err = break_ksm(vma, addr); } return err; } #ifdef CONFIG_SYSFS /* * Only called through the sysfs control interface: */ static int unmerge_and_remove_all_rmap_items(void) { struct mm_slot *mm_slot; struct mm_struct *mm; struct vm_area_struct *vma; int err = 0; spin_lock(&ksm_mmlist_lock); ksm_scan.mm_slot = list_entry(ksm_mm_head.mm_list.next, struct mm_slot, mm_list); spin_unlock(&ksm_mmlist_lock); for (mm_slot = ksm_scan.mm_slot; mm_slot != &ksm_mm_head; mm_slot = ksm_scan.mm_slot) { mm = mm_slot->mm; down_read(&mm->mmap_sem); for (vma = mm->mmap; vma; vma = vma->vm_next) { if (ksm_test_exit(mm)) break; if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) continue; err = unmerge_ksm_pages(vma, vma->vm_start, vma->vm_end); if (err) goto error; } remove_trailing_rmap_items(mm_slot, &mm_slot->rmap_list); spin_lock(&ksm_mmlist_lock); ksm_scan.mm_slot = list_entry(mm_slot->mm_list.next, struct mm_slot, mm_list); if (ksm_test_exit(mm)) { hlist_del(&mm_slot->link); list_del(&mm_slot->mm_list); spin_unlock(&ksm_mmlist_lock); free_mm_slot(mm_slot); clear_bit(MMF_VM_MERGEABLE, &mm->flags); up_read(&mm->mmap_sem); mmdrop(mm); } else { spin_unlock(&ksm_mmlist_lock); up_read(&mm->mmap_sem); } } ksm_scan.seqnr = 0; return 0; error: up_read(&mm->mmap_sem); spin_lock(&ksm_mmlist_lock); ksm_scan.mm_slot = &ksm_mm_head; spin_unlock(&ksm_mmlist_lock); return err; } #endif /* CONFIG_SYSFS */ static u32 calc_checksum(struct page *page) { u32 checksum; void *addr = kmap_atomic(page); checksum = jhash2(addr, PAGE_SIZE / 4, 17); kunmap_atomic(addr); return checksum; } static int memcmp_pages(struct page *page1, struct page *page2) { char *addr1, *addr2; int ret; addr1 = kmap_atomic(page1); addr2 = kmap_atomic(page2); ret = memcmp(addr1, addr2, PAGE_SIZE); kunmap_atomic(addr2); kunmap_atomic(addr1); return ret; } static inline int pages_identical(struct page *page1, struct page *page2) { return !memcmp_pages(page1, page2); } static int write_protect_page(struct vm_area_struct *vma, struct page *page, pte_t *orig_pte) { struct mm_struct *mm = vma->vm_mm; unsigned long addr; pte_t *ptep; spinlock_t *ptl; int swapped; int err = -EFAULT; addr = page_address_in_vma(page, vma); if (addr == -EFAULT) goto out; BUG_ON(PageTransCompound(page)); ptep = page_check_address(page, mm, addr, &ptl, 0); if (!ptep) goto out; if (pte_write(*ptep) || pte_dirty(*ptep)) { pte_t entry; swapped = PageSwapCache(page); flush_cache_page(vma, addr, page_to_pfn(page)); /* * Ok this is tricky, when get_user_pages_fast() run it doesn't * take any lock, therefore the check that we are going to make * with the pagecount against the mapcount is racey and * O_DIRECT can happen right after the check. * So we clear the pte and flush the tlb before the check * this assure us that no O_DIRECT can happen after the check * or in the middle of the check. */ entry = ptep_clear_flush(vma, addr, ptep); /* * Check that no O_DIRECT or similar I/O is in progress on the * page */ if (page_mapcount(page) + 1 + swapped != page_count(page)) { set_pte_at(mm, addr, ptep, entry); goto out_unlock; } if (pte_dirty(entry)) set_page_dirty(page); entry = pte_mkclean(pte_wrprotect(entry)); set_pte_at_notify(mm, addr, ptep, entry); } *orig_pte = *ptep; err = 0; out_unlock: pte_unmap_unlock(ptep, ptl); out: return err; } /** * replace_page - replace page in vma by new ksm page * @vma: vma that holds the pte pointing to page * @page: the page we are replacing by kpage * @kpage: the ksm page we replace page by * @orig_pte: the original value of the pte * * Returns 0 on success, -EFAULT on failure. */ static int replace_page(struct vm_area_struct *vma, struct page *page, struct page *kpage, pte_t orig_pte) { struct mm_struct *mm = vma->vm_mm; pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *ptep; spinlock_t *ptl; unsigned long addr; int err = -EFAULT; addr = page_address_in_vma(page, vma); if (addr == -EFAULT) goto out; pgd = pgd_offset(mm, addr); if (!pgd_present(*pgd)) goto out; pud = pud_offset(pgd, addr); if (!pud_present(*pud)) goto out; pmd = pmd_offset(pud, addr); BUG_ON(pmd_trans_huge(*pmd)); if (!pmd_present(*pmd)) goto out; ptep = pte_offset_map_lock(mm, pmd, addr, &ptl); if (!pte_same(*ptep, orig_pte)) { pte_unmap_unlock(ptep, ptl); goto out; } get_page(kpage); page_add_anon_rmap(kpage, vma, addr); flush_cache_page(vma, addr, pte_pfn(*ptep)); ptep_clear_flush(vma, addr, ptep); set_pte_at_notify(mm, addr, ptep, mk_pte(kpage, vma->vm_page_prot)); page_remove_rmap(page); if (!page_mapped(page)) try_to_free_swap(page); put_page(page); pte_unmap_unlock(ptep, ptl); err = 0; out: return err; } static int page_trans_compound_anon_split(struct page *page) { int ret = 0; struct page *transhuge_head = page_trans_compound_anon(page); if (transhuge_head) { /* Get the reference on the head to split it. */ if (get_page_unless_zero(transhuge_head)) { /* * Recheck we got the reference while the head * was still anonymous. */ if (PageAnon(transhuge_head)) ret = split_huge_page(transhuge_head); else /* * Retry later if split_huge_page run * from under us. */ ret = 1; put_page(transhuge_head); } else /* Retry later if split_huge_page run from under us. */ ret = 1; } return ret; } /* * try_to_merge_one_page - take two pages and merge them into one * @vma: the vma that holds the pte pointing to page * @page: the PageAnon page that we want to replace with kpage * @kpage: the PageKsm page that we want to map instead of page, * or NULL the first time when we want to use page as kpage. * * This function returns 0 if the pages were merged, -EFAULT otherwise. */ static int try_to_merge_one_page(struct vm_area_struct *vma, struct page *page, struct page *kpage) { pte_t orig_pte = __pte(0); int err = -EFAULT; if (page == kpage) /* ksm page forked */ return 0; if (!(vma->vm_flags & VM_MERGEABLE)) goto out; if (PageTransCompound(page) && page_trans_compound_anon_split(page)) goto out; BUG_ON(PageTransCompound(page)); if (!PageAnon(page)) goto out; /* * We need the page lock to read a stable PageSwapCache in * write_protect_page(). We use trylock_page() instead of * lock_page() because we don't want to wait here - we * prefer to continue scanning and merging different pages, * then come back to this page when it is unlocked. */ if (!trylock_page(page)) goto out; /* * If this anonymous page is mapped only here, its pte may need * to be write-protected. If it's mapped elsewhere, all of its * ptes are necessarily already write-protected. But in either * case, we need to lock and check page_count is not raised. */ if (write_protect_page(vma, page, &orig_pte) == 0) { if (!kpage) { /* * While we hold page lock, upgrade page from * PageAnon+anon_vma to PageKsm+NULL stable_node: * stable_tree_insert() will update stable_node. */ set_page_stable_node(page, NULL); mark_page_accessed(page); err = 0; } else if (pages_identical(page, kpage)) err = replace_page(vma, page, kpage, orig_pte); } if ((vma->vm_flags & VM_LOCKED) && kpage && !err) { munlock_vma_page(page); if (!PageMlocked(kpage)) { unlock_page(page); lock_page(kpage); mlock_vma_page(kpage); page = kpage; /* for final unlock */ } } unlock_page(page); out: return err; } /* * try_to_merge_with_ksm_page - like try_to_merge_two_pages, * but no new kernel page is allocated: kpage must already be a ksm page. * * This function returns 0 if the pages were merged, -EFAULT otherwise. */ static int try_to_merge_with_ksm_page(struct rmap_item *rmap_item, struct page *page, struct page *kpage) { struct mm_struct *mm = rmap_item->mm; struct vm_area_struct *vma; int err = -EFAULT; down_read(&mm->mmap_sem); if (ksm_test_exit(mm)) goto out; vma = find_vma(mm, rmap_item->address); if (!vma || vma->vm_start > rmap_item->address) goto out; err = try_to_merge_one_page(vma, page, kpage); if (err) goto out; /* Must get reference to anon_vma while still holding mmap_sem */ rmap_item->anon_vma = vma->anon_vma; get_anon_vma(vma->anon_vma); out: up_read(&mm->mmap_sem); return err; } /* * try_to_merge_two_pages - take two identical pages and prepare them * to be merged into one page. * * This function returns the kpage if we successfully merged two identical * pages into one ksm page, NULL otherwise. * * Note that this function upgrades page to ksm page: if one of the pages * is already a ksm page, try_to_merge_with_ksm_page should be used. */ static struct page *try_to_merge_two_pages(struct rmap_item *rmap_item, struct page *page, struct rmap_item *tree_rmap_item, struct page *tree_page) { int err; err = try_to_merge_with_ksm_page(rmap_item, page, NULL); if (!err) { err = try_to_merge_with_ksm_page(tree_rmap_item, tree_page, page); /* * If that fails, we have a ksm page with only one pte * pointing to it: so break it. */ if (err) break_cow(rmap_item); } return err ? NULL : page; } /* * stable_tree_search - search for page inside the stable tree * * This function checks if there is a page inside the stable tree * with identical content to the page that we are scanning right now. * * This function returns the stable tree node of identical content if found, * NULL otherwise. */ static struct page *stable_tree_search(struct page *page) { struct rb_node *node = root_stable_tree.rb_node; struct stable_node *stable_node; stable_node = page_stable_node(page); if (stable_node) { /* ksm page forked */ get_page(page); return page; } while (node) { struct page *tree_page; int ret; cond_resched(); stable_node = rb_entry(node, struct stable_node, node); tree_page = get_ksm_page(stable_node); if (!tree_page) return NULL; ret = memcmp_pages(page, tree_page); if (ret < 0) { put_page(tree_page); node = node->rb_left; } else if (ret > 0) { put_page(tree_page); node = node->rb_right; } else return tree_page; } return NULL; } /* * stable_tree_insert - insert rmap_item pointing to new ksm page * into the stable tree. * * This function returns the stable tree node just allocated on success, * NULL otherwise. */ static struct stable_node *stable_tree_insert(struct page *kpage) { struct rb_node **new = &root_stable_tree.rb_node; struct rb_node *parent = NULL; struct stable_node *stable_node; while (*new) { struct page *tree_page; int ret; cond_resched(); stable_node = rb_entry(*new, struct stable_node, node); tree_page = get_ksm_page(stable_node); if (!tree_page) return NULL; ret = memcmp_pages(kpage, tree_page); put_page(tree_page); parent = *new; if (ret < 0) new = &parent->rb_left; else if (ret > 0) new = &parent->rb_right; else { /* * It is not a bug that stable_tree_search() didn't * find this node: because at that time our page was * not yet write-protected, so may have changed since. */ return NULL; } } stable_node = alloc_stable_node(); if (!stable_node) return NULL; rb_link_node(&stable_node->node, parent, new); rb_insert_color(&stable_node->node, &root_stable_tree); INIT_HLIST_HEAD(&stable_node->hlist); stable_node->kpfn = page_to_pfn(kpage); set_page_stable_node(kpage, stable_node); return stable_node; } /* * unstable_tree_search_insert - search for identical page, * else insert rmap_item into the unstable tree. * * This function searches for a page in the unstable tree identical to the * page currently being scanned; and if no identical page is found in the * tree, we insert rmap_item as a new object into the unstable tree. * * This function returns pointer to rmap_item found to be identical * to the currently scanned page, NULL otherwise. * * This function does both searching and inserting, because they share * the same walking algorithm in an rbtree. */ static struct rmap_item *unstable_tree_search_insert(struct rmap_item *rmap_item, struct page *page, struct page **tree_pagep) { struct rb_node **new = &root_unstable_tree.rb_node; struct rb_node *parent = NULL; while (*new) { struct rmap_item *tree_rmap_item; struct page *tree_page; int ret; cond_resched(); tree_rmap_item = rb_entry(*new, struct rmap_item, node); tree_page = get_mergeable_page(tree_rmap_item); if (IS_ERR_OR_NULL(tree_page)) return NULL; /* * Don't substitute a ksm page for a forked page. */ if (page == tree_page) { put_page(tree_page); return NULL; } ret = memcmp_pages(page, tree_page); parent = *new; if (ret < 0) { put_page(tree_page); new = &parent->rb_left; } else if (ret > 0) { put_page(tree_page); new = &parent->rb_right; } else { *tree_pagep = tree_page; return tree_rmap_item; } } rmap_item->address |= UNSTABLE_FLAG; rmap_item->address |= (ksm_scan.seqnr & SEQNR_MASK); rb_link_node(&rmap_item->node, parent, new); rb_insert_color(&rmap_item->node, &root_unstable_tree); ksm_pages_unshared++; return NULL; } /* * stable_tree_append - add another rmap_item to the linked list of * rmap_items hanging off a given node of the stable tree, all sharing * the same ksm page. */ static void stable_tree_append(struct rmap_item *rmap_item, struct stable_node *stable_node) { rmap_item->head = stable_node; rmap_item->address |= STABLE_FLAG; hlist_add_head(&rmap_item->hlist, &stable_node->hlist); if (rmap_item->hlist.next) ksm_pages_sharing++; else ksm_pages_shared++; } /* * cmp_and_merge_page - first see if page can be merged into the stable tree; * if not, compare checksum to previous and if it's the same, see if page can * be inserted into the unstable tree, or merged with a page already there and * both transferred to the stable tree. * * @page: the page that we are searching identical page to. * @rmap_item: the reverse mapping into the virtual address of this page */ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item) { struct rmap_item *tree_rmap_item; struct page *tree_page = NULL; struct stable_node *stable_node; struct page *kpage; unsigned int checksum; int err; remove_rmap_item_from_tree(rmap_item); /* We first start with searching the page inside the stable tree */ kpage = stable_tree_search(page); if (kpage) { err = try_to_merge_with_ksm_page(rmap_item, page, kpage); if (!err) { /* * The page was successfully merged: * add its rmap_item to the stable tree. */ lock_page(kpage); stable_tree_append(rmap_item, page_stable_node(kpage)); unlock_page(kpage); } put_page(kpage); return; } /* * If the hash value of the page has changed from the last time * we calculated it, this page is changing frequently: therefore we * don't want to insert it in the unstable tree, and we don't want * to waste our time searching for something identical to it there. */ checksum = calc_checksum(page); if (rmap_item->oldchecksum != checksum) { rmap_item->oldchecksum = checksum; return; } tree_rmap_item = unstable_tree_search_insert(rmap_item, page, &tree_page); if (tree_rmap_item) { kpage = try_to_merge_two_pages(rmap_item, page, tree_rmap_item, tree_page); put_page(tree_page); /* * As soon as we merge this page, we want to remove the * rmap_item of the page we have merged with from the unstable * tree, and insert it instead as new node in the stable tree. */ if (kpage) { remove_rmap_item_from_tree(tree_rmap_item); lock_page(kpage); stable_node = stable_tree_insert(kpage); if (stable_node) { stable_tree_append(tree_rmap_item, stable_node); stable_tree_append(rmap_item, stable_node); } unlock_page(kpage); /* * If we fail to insert the page into the stable tree, * we will have 2 virtual addresses that are pointing * to a ksm page left outside the stable tree, * in which case we need to break_cow on both. */ if (!stable_node) { break_cow(tree_rmap_item); break_cow(rmap_item); } } } } static struct rmap_item *get_next_rmap_item(struct mm_slot *mm_slot, struct rmap_item **rmap_list, unsigned long addr) { struct rmap_item *rmap_item; while (*rmap_list) { rmap_item = *rmap_list; if ((rmap_item->address & PAGE_MASK) == addr) return rmap_item; if (rmap_item->address > addr) break; *rmap_list = rmap_item->rmap_list; remove_rmap_item_from_tree(rmap_item); free_rmap_item(rmap_item); } rmap_item = alloc_rmap_item(); if (rmap_item) { /* It has already been zeroed */ rmap_item->mm = mm_slot->mm; rmap_item->address = addr; rmap_item->rmap_list = *rmap_list; *rmap_list = rmap_item; } return rmap_item; } static struct rmap_item *scan_get_next_rmap_item(struct page **page) { struct mm_struct *mm; struct mm_slot *slot; struct vm_area_struct *vma; struct rmap_item *rmap_item; if (list_empty(&ksm_mm_head.mm_list)) return NULL; slot = ksm_scan.mm_slot; if (slot == &ksm_mm_head) { /* * A number of pages can hang around indefinitely on per-cpu * pagevecs, raised page count preventing write_protect_page * from merging them. Though it doesn't really matter much, * it is puzzling to see some stuck in pages_volatile until * other activity jostles them out, and they also prevented * LTP's KSM test from succeeding deterministically; so drain * them here (here rather than on entry to ksm_do_scan(), * so we don't IPI too often when pages_to_scan is set low). */ lru_add_drain_all(); root_unstable_tree = RB_ROOT; spin_lock(&ksm_mmlist_lock); slot = list_entry(slot->mm_list.next, struct mm_slot, mm_list); ksm_scan.mm_slot = slot; spin_unlock(&ksm_mmlist_lock); /* * Although we tested list_empty() above, a racing __ksm_exit * of the last mm on the list may have removed it since then. */ if (slot == &ksm_mm_head) return NULL; next_mm: ksm_scan.address = 0; ksm_scan.rmap_list = &slot->rmap_list; } mm = slot->mm; down_read(&mm->mmap_sem); if (ksm_test_exit(mm)) vma = NULL; else vma = find_vma(mm, ksm_scan.address); for (; vma; vma = vma->vm_next) { if (!(vma->vm_flags & VM_MERGEABLE)) continue; if (ksm_scan.address < vma->vm_start) ksm_scan.address = vma->vm_start; if (!vma->anon_vma) ksm_scan.address = vma->vm_end; while (ksm_scan.address < vma->vm_end) { if (ksm_test_exit(mm)) break; *page = follow_page(vma, ksm_scan.address, FOLL_GET); if (IS_ERR_OR_NULL(*page)) { ksm_scan.address += PAGE_SIZE; cond_resched(); continue; } if (PageAnon(*page) || page_trans_compound_anon(*page)) { flush_anon_page(vma, *page, ksm_scan.address); flush_dcache_page(*page); rmap_item = get_next_rmap_item(slot, ksm_scan.rmap_list, ksm_scan.address); if (rmap_item) { ksm_scan.rmap_list = &rmap_item->rmap_list; ksm_scan.address += PAGE_SIZE; } else put_page(*page); up_read(&mm->mmap_sem); return rmap_item; } put_page(*page); ksm_scan.address += PAGE_SIZE; cond_resched(); } } if (ksm_test_exit(mm)) { ksm_scan.address = 0; ksm_scan.rmap_list = &slot->rmap_list; } /* * Nuke all the rmap_items that are above this current rmap: * because there were no VM_MERGEABLE vmas with such addresses. */ remove_trailing_rmap_items(slot, ksm_scan.rmap_list); spin_lock(&ksm_mmlist_lock); ksm_scan.mm_slot = list_entry(slot->mm_list.next, struct mm_slot, mm_list); if (ksm_scan.address == 0) { /* * We've completed a full scan of all vmas, holding mmap_sem * throughout, and found no VM_MERGEABLE: so do the same as * __ksm_exit does to remove this mm from all our lists now. * This applies either when cleaning up after __ksm_exit * (but beware: we can reach here even before __ksm_exit), * or when all VM_MERGEABLE areas have been unmapped (and * mmap_sem then protects against race with MADV_MERGEABLE). */ hlist_del(&slot->link); list_del(&slot->mm_list); spin_unlock(&ksm_mmlist_lock); free_mm_slot(slot); clear_bit(MMF_VM_MERGEABLE, &mm->flags); up_read(&mm->mmap_sem); mmdrop(mm); } else { spin_unlock(&ksm_mmlist_lock); up_read(&mm->mmap_sem); } /* Repeat until we've completed scanning the whole list */ slot = ksm_scan.mm_slot; if (slot != &ksm_mm_head) goto next_mm; ksm_scan.seqnr++; return NULL; } /** * ksm_do_scan - the ksm scanner main worker function. * @scan_npages - number of pages we want to scan before we return. */ static void ksm_do_scan(unsigned int scan_npages) { struct rmap_item *rmap_item; struct page *uninitialized_var(page); while (scan_npages-- && likely(!freezing(current))) { cond_resched(); rmap_item = scan_get_next_rmap_item(&page); if (!rmap_item) return; if (!PageKsm(page) || !in_stable_tree(rmap_item)) cmp_and_merge_page(page, rmap_item); put_page(page); } } static int ksmd_should_run(void) { return (ksm_run & KSM_RUN_MERGE) && !list_empty(&ksm_mm_head.mm_list); } static int ksm_scan_thread(void *nothing) { set_freezable(); set_user_nice(current, 5); while (!kthread_should_stop()) { mutex_lock(&ksm_thread_mutex); if (ksmd_should_run()) ksm_do_scan(ksm_thread_pages_to_scan); mutex_unlock(&ksm_thread_mutex); try_to_freeze(); if (ksmd_should_run()) { schedule_timeout_interruptible( msecs_to_jiffies(ksm_thread_sleep_millisecs)); } else { wait_event_freezable(ksm_thread_wait, ksmd_should_run() || kthread_should_stop()); } } return 0; } int ksm_madvise(struct vm_area_struct *vma, unsigned long start, unsigned long end, int advice, unsigned long *vm_flags) { struct mm_struct *mm = vma->vm_mm; int err; switch (advice) { case MADV_MERGEABLE: /* * Be somewhat over-protective for now! */ if (*vm_flags & (VM_MERGEABLE | VM_SHARED | VM_MAYSHARE | VM_PFNMAP | VM_IO | VM_DONTEXPAND | VM_RESERVED | VM_HUGETLB | VM_INSERTPAGE | VM_NONLINEAR | VM_MIXEDMAP | VM_SAO)) return 0; /* just ignore the advice */ if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) { err = __ksm_enter(mm); if (err) return err; } *vm_flags |= VM_MERGEABLE; break; case MADV_UNMERGEABLE: if (!(*vm_flags & VM_MERGEABLE)) return 0; /* just ignore the advice */ if (vma->anon_vma) { err = unmerge_ksm_pages(vma, start, end); if (err) return err; } *vm_flags &= ~VM_MERGEABLE; break; } return 0; } int __ksm_enter(struct mm_struct *mm) { struct mm_slot *mm_slot; int needs_wakeup; mm_slot = alloc_mm_slot(); if (!mm_slot) return -ENOMEM; /* Check ksm_run too? Would need tighter locking */ needs_wakeup = list_empty(&ksm_mm_head.mm_list); spin_lock(&ksm_mmlist_lock); insert_to_mm_slots_hash(mm, mm_slot); /* * Insert just behind the scanning cursor, to let the area settle * down a little; when fork is followed by immediate exec, we don't * want ksmd to waste time setting up and tearing down an rmap_list. */ list_add_tail(&mm_slot->mm_list, &ksm_scan.mm_slot->mm_list); spin_unlock(&ksm_mmlist_lock); set_bit(MMF_VM_MERGEABLE, &mm->flags); atomic_inc(&mm->mm_count); if (needs_wakeup) wake_up_interruptible(&ksm_thread_wait); return 0; } void __ksm_exit(struct mm_struct *mm) { struct mm_slot *mm_slot; int easy_to_free = 0; /* * This process is exiting: if it's straightforward (as is the * case when ksmd was never running), free mm_slot immediately. * But if it's at the cursor or has rmap_items linked to it, use * mmap_sem to synchronize with any break_cows before pagetables * are freed, and leave the mm_slot on the list for ksmd to free. * Beware: ksm may already have noticed it exiting and freed the slot. */ spin_lock(&ksm_mmlist_lock); mm_slot = get_mm_slot(mm); if (mm_slot && ksm_scan.mm_slot != mm_slot) { if (!mm_slot->rmap_list) { hlist_del(&mm_slot->link); list_del(&mm_slot->mm_list); easy_to_free = 1; } else { list_move(&mm_slot->mm_list, &ksm_scan.mm_slot->mm_list); } } spin_unlock(&ksm_mmlist_lock); if (easy_to_free) { free_mm_slot(mm_slot); clear_bit(MMF_VM_MERGEABLE, &mm->flags); mmdrop(mm); } else if (mm_slot) { down_write(&mm->mmap_sem); up_write(&mm->mmap_sem); } } struct page *ksm_does_need_to_copy(struct page *page, struct vm_area_struct *vma, unsigned long address) { struct page *new_page; new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); if (new_page) { copy_user_highpage(new_page, page, address, vma); SetPageDirty(new_page); __SetPageUptodate(new_page); SetPageSwapBacked(new_page); __set_page_locked(new_page); if (page_evictable(new_page, vma)) lru_cache_add_lru(new_page, LRU_ACTIVE_ANON); else add_page_to_unevictable_list(new_page); } return new_page; } int page_referenced_ksm(struct page *page, struct mem_cgroup *memcg, unsigned long *vm_flags) { struct stable_node *stable_node; struct rmap_item *rmap_item; struct hlist_node *hlist; unsigned int mapcount = page_mapcount(page); int referenced = 0; int search_new_forks = 0; VM_BUG_ON(!PageKsm(page)); VM_BUG_ON(!PageLocked(page)); stable_node = page_stable_node(page); if (!stable_node) return 0; again: hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) { struct anon_vma *anon_vma = rmap_item->anon_vma; struct anon_vma_chain *vmac; struct vm_area_struct *vma; anon_vma_lock(anon_vma); list_for_each_entry(vmac, &anon_vma->head, same_anon_vma) { vma = vmac->vma; if (rmap_item->address < vma->vm_start || rmap_item->address >= vma->vm_end) continue; /* * Initially we examine only the vma which covers this * rmap_item; but later, if there is still work to do, * we examine covering vmas in other mms: in case they * were forked from the original since ksmd passed. */ if ((rmap_item->mm == vma->vm_mm) == search_new_forks) continue; if (memcg && !mm_match_cgroup(vma->vm_mm, memcg)) continue; referenced += page_referenced_one(page, vma, rmap_item->address, &mapcount, vm_flags); if (!search_new_forks || !mapcount) break; } anon_vma_unlock(anon_vma); if (!mapcount) goto out; } if (!search_new_forks++) goto again; out: return referenced; } int try_to_unmap_ksm(struct page *page, enum ttu_flags flags) { struct stable_node *stable_node; struct hlist_node *hlist; struct rmap_item *rmap_item; int ret = SWAP_AGAIN; int search_new_forks = 0; VM_BUG_ON(!PageKsm(page)); VM_BUG_ON(!PageLocked(page)); stable_node = page_stable_node(page); if (!stable_node) return SWAP_FAIL; again: hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) { struct anon_vma *anon_vma = rmap_item->anon_vma; struct anon_vma_chain *vmac; struct vm_area_struct *vma; anon_vma_lock(anon_vma); list_for_each_entry(vmac, &anon_vma->head, same_anon_vma) { vma = vmac->vma; if (rmap_item->address < vma->vm_start || rmap_item->address >= vma->vm_end) continue; /* * Initially we examine only the vma which covers this * rmap_item; but later, if there is still work to do, * we examine covering vmas in other mms: in case they * were forked from the original since ksmd passed. */ if ((rmap_item->mm == vma->vm_mm) == search_new_forks) continue; ret = try_to_unmap_one(page, vma, rmap_item->address, flags); if (ret != SWAP_AGAIN || !page_mapped(page)) { anon_vma_unlock(anon_vma); goto out; } } anon_vma_unlock(anon_vma); } if (!search_new_forks++) goto again; out: return ret; } #ifdef CONFIG_MIGRATION int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page *, struct vm_area_struct *, unsigned long, void *), void *arg) { struct stable_node *stable_node; struct hlist_node *hlist; struct rmap_item *rmap_item; int ret = SWAP_AGAIN; int search_new_forks = 0; VM_BUG_ON(!PageKsm(page)); VM_BUG_ON(!PageLocked(page)); stable_node = page_stable_node(page); if (!stable_node) return ret; again: hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) { struct anon_vma *anon_vma = rmap_item->anon_vma; struct anon_vma_chain *vmac; struct vm_area_struct *vma; anon_vma_lock(anon_vma); list_for_each_entry(vmac, &anon_vma->head, same_anon_vma) { vma = vmac->vma; if (rmap_item->address < vma->vm_start || rmap_item->address >= vma->vm_end) continue; /* * Initially we examine only the vma which covers this * rmap_item; but later, if there is still work to do, * we examine covering vmas in other mms: in case they * were forked from the original since ksmd passed. */ if ((rmap_item->mm == vma->vm_mm) == search_new_forks) continue; ret = rmap_one(page, vma, rmap_item->address, arg); if (ret != SWAP_AGAIN) { anon_vma_unlock(anon_vma); goto out; } } anon_vma_unlock(anon_vma); } if (!search_new_forks++) goto again; out: return ret; } void ksm_migrate_page(struct page *newpage, struct page *oldpage) { struct stable_node *stable_node; VM_BUG_ON(!PageLocked(oldpage)); VM_BUG_ON(!PageLocked(newpage)); VM_BUG_ON(newpage->mapping != oldpage->mapping); stable_node = page_stable_node(newpage); if (stable_node) { VM_BUG_ON(stable_node->kpfn != page_to_pfn(oldpage)); stable_node->kpfn = page_to_pfn(newpage); } } #endif /* CONFIG_MIGRATION */ #ifdef CONFIG_MEMORY_HOTREMOVE static struct stable_node *ksm_check_stable_tree(unsigned long start_pfn, unsigned long end_pfn) { struct rb_node *node; for (node = rb_first(&root_stable_tree); node; node = rb_next(node)) { struct stable_node *stable_node; stable_node = rb_entry(node, struct stable_node, node); if (stable_node->kpfn >= start_pfn && stable_node->kpfn < end_pfn) return stable_node; } return NULL; } static int ksm_memory_callback(struct notifier_block *self, unsigned long action, void *arg) { struct memory_notify *mn = arg; struct stable_node *stable_node; switch (action) { case MEM_GOING_OFFLINE: /* * Keep it very simple for now: just lock out ksmd and * MADV_UNMERGEABLE while any memory is going offline. * mutex_lock_nested() is necessary because lockdep was alarmed * that here we take ksm_thread_mutex inside notifier chain * mutex, and later take notifier chain mutex inside * ksm_thread_mutex to unlock it. But that's safe because both * are inside mem_hotplug_mutex. */ mutex_lock_nested(&ksm_thread_mutex, SINGLE_DEPTH_NESTING); break; case MEM_OFFLINE: /* * Most of the work is done by page migration; but there might * be a few stable_nodes left over, still pointing to struct * pages which have been offlined: prune those from the tree. */ while ((stable_node = ksm_check_stable_tree(mn->start_pfn, mn->start_pfn + mn->nr_pages)) != NULL) remove_node_from_stable_tree(stable_node); /* fallthrough */ case MEM_CANCEL_OFFLINE: mutex_unlock(&ksm_thread_mutex); break; } return NOTIFY_OK; } #endif /* CONFIG_MEMORY_HOTREMOVE */ #ifdef CONFIG_SYSFS /* * This all compiles without CONFIG_SYSFS, but is a waste of space. */ #define KSM_ATTR_RO(_name) \ static struct kobj_attribute _name##_attr = __ATTR_RO(_name) #define KSM_ATTR(_name) \ static struct kobj_attribute _name##_attr = \ __ATTR(_name, 0644, _name##_show, _name##_store) static ssize_t sleep_millisecs_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%u\n", ksm_thread_sleep_millisecs); } static ssize_t sleep_millisecs_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { unsigned long msecs; int err; err = strict_strtoul(buf, 10, &msecs); if (err || msecs > UINT_MAX) return -EINVAL; ksm_thread_sleep_millisecs = msecs; return count; } KSM_ATTR(sleep_millisecs); static ssize_t pages_to_scan_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%u\n", ksm_thread_pages_to_scan); } static ssize_t pages_to_scan_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { int err; unsigned long nr_pages; err = strict_strtoul(buf, 10, &nr_pages); if (err || nr_pages > UINT_MAX) return -EINVAL; ksm_thread_pages_to_scan = nr_pages; return count; } KSM_ATTR(pages_to_scan); static ssize_t run_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%u\n", ksm_run); } static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { int err; unsigned long flags; err = strict_strtoul(buf, 10, &flags); if (err || flags > UINT_MAX) return -EINVAL; if (flags > KSM_RUN_UNMERGE) return -EINVAL; /* * KSM_RUN_MERGE sets ksmd running, and 0 stops it running. * KSM_RUN_UNMERGE stops it running and unmerges all rmap_items, * breaking COW to free the pages_shared (but leaves mm_slots * on the list for when ksmd may be set running again). */ mutex_lock(&ksm_thread_mutex); if (ksm_run != flags) { ksm_run = flags; if (flags & KSM_RUN_UNMERGE) { int oom_score_adj; oom_score_adj = test_set_oom_score_adj(OOM_SCORE_ADJ_MAX); err = unmerge_and_remove_all_rmap_items(); compare_swap_oom_score_adj(OOM_SCORE_ADJ_MAX, oom_score_adj); if (err) { ksm_run = KSM_RUN_STOP; count = err; } } } mutex_unlock(&ksm_thread_mutex); if (flags & KSM_RUN_MERGE) wake_up_interruptible(&ksm_thread_wait); return count; } KSM_ATTR(run); static ssize_t pages_shared_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%lu\n", ksm_pages_shared); } KSM_ATTR_RO(pages_shared); static ssize_t pages_sharing_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%lu\n", ksm_pages_sharing); } KSM_ATTR_RO(pages_sharing); static ssize_t pages_unshared_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%lu\n", ksm_pages_unshared); } KSM_ATTR_RO(pages_unshared); static ssize_t pages_volatile_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { long ksm_pages_volatile; ksm_pages_volatile = ksm_rmap_items - ksm_pages_shared - ksm_pages_sharing - ksm_pages_unshared; /* * It was not worth any locking to calculate that statistic, * but it might therefore sometimes be negative: conceal that. */ if (ksm_pages_volatile < 0) ksm_pages_volatile = 0; return sprintf(buf, "%ld\n", ksm_pages_volatile); } KSM_ATTR_RO(pages_volatile); static ssize_t full_scans_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%lu\n", ksm_scan.seqnr); } KSM_ATTR_RO(full_scans); static struct attribute *ksm_attrs[] = { &sleep_millisecs_attr.attr, &pages_to_scan_attr.attr, &run_attr.attr, &pages_shared_attr.attr, &pages_sharing_attr.attr, &pages_unshared_attr.attr, &pages_volatile_attr.attr, &full_scans_attr.attr, NULL, }; static struct attribute_group ksm_attr_group = { .attrs = ksm_attrs, .name = "ksm", }; #endif /* CONFIG_SYSFS */ static int __init ksm_init(void) { struct task_struct *ksm_thread; int err; err = ksm_slab_init(); if (err) goto out; ksm_thread = kthread_run(ksm_scan_thread, NULL, "ksmd"); if (IS_ERR(ksm_thread)) { printk(KERN_ERR "ksm: creating kthread failed\n"); err = PTR_ERR(ksm_thread); goto out_free; } #ifdef CONFIG_SYSFS err = sysfs_create_group(mm_kobj, &ksm_attr_group); if (err) { printk(KERN_ERR "ksm: register sysfs failed\n"); kthread_stop(ksm_thread); goto out_free; } #else ksm_run = KSM_RUN_MERGE; /* no way for user to start it */ #endif /* CONFIG_SYSFS */ #ifdef CONFIG_MEMORY_HOTREMOVE /* * Choose a high priority since the callback takes ksm_thread_mutex: * later callbacks could only be taking locks which nest within that. */ hotplug_memory_notifier(ksm_memory_callback, 100); #endif return 0; out_free: ksm_slab_free(); out: return err; } module_init(ksm_init)
gpl-2.0
drod2169/Linux-3.12.x
drivers/i2c/busses/i2c-sis5595.c
2717
11844
/* Copyright (c) 1998, 1999 Frodo Looijaard <frodol@dds.nl> and Philip Edelbrock <phil@netroedge.com> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* Note: we assume there can only be one SIS5595 with one SMBus interface */ /* Note: all have mfr. ID 0x1039. SUPPORTED PCI ID 5595 0008 Note: these chips contain a 0008 device which is incompatible with the 5595. We recognize these by the presence of the listed "blacklist" PCI ID and refuse to load. NOT SUPPORTED PCI ID BLACKLIST PCI ID 540 0008 0540 550 0008 0550 5513 0008 5511 5581 0008 5597 5582 0008 5597 5597 0008 5597 5598 0008 5597/5598 630 0008 0630 645 0008 0645 646 0008 0646 648 0008 0648 650 0008 0650 651 0008 0651 730 0008 0730 735 0008 0735 745 0008 0745 746 0008 0746 */ /* TO DO: * Add Block Transfers (ugly, but supported by the adapter) * Add adapter resets */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/delay.h> #include <linux/pci.h> #include <linux/ioport.h> #include <linux/init.h> #include <linux/i2c.h> #include <linux/acpi.h> #include <linux/io.h> static int blacklist[] = { PCI_DEVICE_ID_SI_540, PCI_DEVICE_ID_SI_550, PCI_DEVICE_ID_SI_630, PCI_DEVICE_ID_SI_645, PCI_DEVICE_ID_SI_646, PCI_DEVICE_ID_SI_648, PCI_DEVICE_ID_SI_650, PCI_DEVICE_ID_SI_651, PCI_DEVICE_ID_SI_730, PCI_DEVICE_ID_SI_735, PCI_DEVICE_ID_SI_745, PCI_DEVICE_ID_SI_746, PCI_DEVICE_ID_SI_5511, /* 5513 chip has the 0008 device but that ID shows up in other chips so we use the 5511 ID for recognition */ PCI_DEVICE_ID_SI_5597, PCI_DEVICE_ID_SI_5598, 0, /* terminates the list */ }; /* Length of ISA address segment */ #define SIS5595_EXTENT 8 /* SIS5595 SMBus registers */ #define SMB_STS_LO 0x00 #define SMB_STS_HI 0x01 #define SMB_CTL_LO 0x02 #define SMB_CTL_HI 0x03 #define SMB_ADDR 0x04 #define SMB_CMD 0x05 #define SMB_PCNT 0x06 #define SMB_CNT 0x07 #define SMB_BYTE 0x08 #define SMB_DEV 0x10 #define SMB_DB0 0x11 #define SMB_DB1 0x12 #define SMB_HAA 0x13 /* PCI Address Constants */ #define SMB_INDEX 0x38 #define SMB_DAT 0x39 #define SIS5595_ENABLE_REG 0x40 #define ACPI_BASE 0x90 /* Other settings */ #define MAX_TIMEOUT 500 /* SIS5595 constants */ #define SIS5595_QUICK 0x00 #define SIS5595_BYTE 0x02 #define SIS5595_BYTE_DATA 0x04 #define SIS5595_WORD_DATA 0x06 #define SIS5595_PROC_CALL 0x08 #define SIS5595_BLOCK_DATA 0x0A /* insmod parameters */ /* If force_addr is set to anything different from 0, we forcibly enable the device at the given address. */ static u16 force_addr; module_param(force_addr, ushort, 0); MODULE_PARM_DESC(force_addr, "Initialize the base address of the i2c controller"); static struct pci_driver sis5595_driver; static unsigned short sis5595_base; static struct pci_dev *sis5595_pdev; static u8 sis5595_read(u8 reg) { outb(reg, sis5595_base + SMB_INDEX); return inb(sis5595_base + SMB_DAT); } static void sis5595_write(u8 reg, u8 data) { outb(reg, sis5595_base + SMB_INDEX); outb(data, sis5595_base + SMB_DAT); } static int sis5595_setup(struct pci_dev *SIS5595_dev) { u16 a; u8 val; int *i; int retval; /* Look for imposters */ for (i = blacklist; *i != 0; i++) { struct pci_dev *dev; dev = pci_get_device(PCI_VENDOR_ID_SI, *i, NULL); if (dev) { dev_err(&SIS5595_dev->dev, "Looked for SIS5595 but found unsupported device %.4x\n", *i); pci_dev_put(dev); return -ENODEV; } } /* Determine the address of the SMBus areas */ pci_read_config_word(SIS5595_dev, ACPI_BASE, &sis5595_base); if (sis5595_base == 0 && force_addr == 0) { dev_err(&SIS5595_dev->dev, "ACPI base address uninitialized - upgrade BIOS or use force_addr=0xaddr\n"); return -ENODEV; } if (force_addr) sis5595_base = force_addr & ~(SIS5595_EXTENT - 1); dev_dbg(&SIS5595_dev->dev, "ACPI Base address: %04x\n", sis5595_base); /* NB: We grab just the two SMBus registers here, but this may still * interfere with ACPI :-( */ retval = acpi_check_region(sis5595_base + SMB_INDEX, 2, sis5595_driver.name); if (retval) return retval; if (!request_region(sis5595_base + SMB_INDEX, 2, sis5595_driver.name)) { dev_err(&SIS5595_dev->dev, "SMBus registers 0x%04x-0x%04x already in use!\n", sis5595_base + SMB_INDEX, sis5595_base + SMB_INDEX + 1); return -ENODEV; } if (force_addr) { dev_info(&SIS5595_dev->dev, "forcing ISA address 0x%04X\n", sis5595_base); if (pci_write_config_word(SIS5595_dev, ACPI_BASE, sis5595_base) != PCIBIOS_SUCCESSFUL) goto error; if (pci_read_config_word(SIS5595_dev, ACPI_BASE, &a) != PCIBIOS_SUCCESSFUL) goto error; if ((a & ~(SIS5595_EXTENT - 1)) != sis5595_base) { /* doesn't work for some chips! */ dev_err(&SIS5595_dev->dev, "force address failed - not supported?\n"); goto error; } } if (pci_read_config_byte(SIS5595_dev, SIS5595_ENABLE_REG, &val) != PCIBIOS_SUCCESSFUL) goto error; if ((val & 0x80) == 0) { dev_info(&SIS5595_dev->dev, "enabling ACPI\n"); if (pci_write_config_byte(SIS5595_dev, SIS5595_ENABLE_REG, val | 0x80) != PCIBIOS_SUCCESSFUL) goto error; if (pci_read_config_byte(SIS5595_dev, SIS5595_ENABLE_REG, &val) != PCIBIOS_SUCCESSFUL) goto error; if ((val & 0x80) == 0) { /* doesn't work for some chips? */ dev_err(&SIS5595_dev->dev, "ACPI enable failed - not supported?\n"); goto error; } } /* Everything is happy */ return 0; error: release_region(sis5595_base + SMB_INDEX, 2); return -ENODEV; } static int sis5595_transaction(struct i2c_adapter *adap) { int temp; int result = 0; int timeout = 0; /* Make sure the SMBus host is ready to start transmitting */ temp = sis5595_read(SMB_STS_LO) + (sis5595_read(SMB_STS_HI) << 8); if (temp != 0x00) { dev_dbg(&adap->dev, "SMBus busy (%04x). Resetting...\n", temp); sis5595_write(SMB_STS_LO, temp & 0xff); sis5595_write(SMB_STS_HI, temp >> 8); if ((temp = sis5595_read(SMB_STS_LO) + (sis5595_read(SMB_STS_HI) << 8)) != 0x00) { dev_dbg(&adap->dev, "Failed! (%02x)\n", temp); return -EBUSY; } else { dev_dbg(&adap->dev, "Successful!\n"); } } /* start the transaction by setting bit 4 */ sis5595_write(SMB_CTL_LO, sis5595_read(SMB_CTL_LO) | 0x10); /* We will always wait for a fraction of a second! */ do { msleep(1); temp = sis5595_read(SMB_STS_LO); } while (!(temp & 0x40) && (timeout++ < MAX_TIMEOUT)); /* If the SMBus is still busy, we give up */ if (timeout > MAX_TIMEOUT) { dev_dbg(&adap->dev, "SMBus Timeout!\n"); result = -ETIMEDOUT; } if (temp & 0x10) { dev_dbg(&adap->dev, "Error: Failed bus transaction\n"); result = -ENXIO; } if (temp & 0x20) { dev_err(&adap->dev, "Bus collision! SMBus may be locked until " "next hard reset (or not...)\n"); /* Clock stops and slave is stuck in mid-transmission */ result = -EIO; } temp = sis5595_read(SMB_STS_LO) + (sis5595_read(SMB_STS_HI) << 8); if (temp != 0x00) { sis5595_write(SMB_STS_LO, temp & 0xff); sis5595_write(SMB_STS_HI, temp >> 8); } temp = sis5595_read(SMB_STS_LO) + (sis5595_read(SMB_STS_HI) << 8); if (temp != 0x00) dev_dbg(&adap->dev, "Failed reset at end of transaction (%02x)\n", temp); return result; } /* Return negative errno on error. */ static s32 sis5595_access(struct i2c_adapter *adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data *data) { int status; switch (size) { case I2C_SMBUS_QUICK: sis5595_write(SMB_ADDR, ((addr & 0x7f) << 1) | (read_write & 0x01)); size = SIS5595_QUICK; break; case I2C_SMBUS_BYTE: sis5595_write(SMB_ADDR, ((addr & 0x7f) << 1) | (read_write & 0x01)); if (read_write == I2C_SMBUS_WRITE) sis5595_write(SMB_CMD, command); size = SIS5595_BYTE; break; case I2C_SMBUS_BYTE_DATA: sis5595_write(SMB_ADDR, ((addr & 0x7f) << 1) | (read_write & 0x01)); sis5595_write(SMB_CMD, command); if (read_write == I2C_SMBUS_WRITE) sis5595_write(SMB_BYTE, data->byte); size = SIS5595_BYTE_DATA; break; case I2C_SMBUS_PROC_CALL: case I2C_SMBUS_WORD_DATA: sis5595_write(SMB_ADDR, ((addr & 0x7f) << 1) | (read_write & 0x01)); sis5595_write(SMB_CMD, command); if (read_write == I2C_SMBUS_WRITE) { sis5595_write(SMB_BYTE, data->word & 0xff); sis5595_write(SMB_BYTE + 1, (data->word & 0xff00) >> 8); } size = (size == I2C_SMBUS_PROC_CALL) ? SIS5595_PROC_CALL : SIS5595_WORD_DATA; break; default: dev_warn(&adap->dev, "Unsupported transaction %d\n", size); return -EOPNOTSUPP; } sis5595_write(SMB_CTL_LO, ((size & 0x0E))); status = sis5595_transaction(adap); if (status) return status; if ((size != SIS5595_PROC_CALL) && ((read_write == I2C_SMBUS_WRITE) || (size == SIS5595_QUICK))) return 0; switch (size) { case SIS5595_BYTE: case SIS5595_BYTE_DATA: data->byte = sis5595_read(SMB_BYTE); break; case SIS5595_WORD_DATA: case SIS5595_PROC_CALL: data->word = sis5595_read(SMB_BYTE) + (sis5595_read(SMB_BYTE + 1) << 8); break; } return 0; } static u32 sis5595_func(struct i2c_adapter *adapter) { return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_PROC_CALL; } static const struct i2c_algorithm smbus_algorithm = { .smbus_xfer = sis5595_access, .functionality = sis5595_func, }; static struct i2c_adapter sis5595_adapter = { .owner = THIS_MODULE, .class = I2C_CLASS_HWMON | I2C_CLASS_SPD, .algo = &smbus_algorithm, }; static DEFINE_PCI_DEVICE_TABLE(sis5595_ids) = { { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503) }, { 0, } }; MODULE_DEVICE_TABLE (pci, sis5595_ids); static int sis5595_probe(struct pci_dev *dev, const struct pci_device_id *id) { int err; if (sis5595_setup(dev)) { dev_err(&dev->dev, "SIS5595 not detected, module not inserted.\n"); return -ENODEV; } /* set up the sysfs linkage to our parent device */ sis5595_adapter.dev.parent = &dev->dev; snprintf(sis5595_adapter.name, sizeof(sis5595_adapter.name), "SMBus SIS5595 adapter at %04x", sis5595_base + SMB_INDEX); err = i2c_add_adapter(&sis5595_adapter); if (err) { release_region(sis5595_base + SMB_INDEX, 2); return err; } /* Always return failure here. This is to allow other drivers to bind * to this pci device. We don't really want to have control over the * pci device, we only wanted to read as few register values from it. */ sis5595_pdev = pci_dev_get(dev); return -ENODEV; } static struct pci_driver sis5595_driver = { .name = "sis5595_smbus", .id_table = sis5595_ids, .probe = sis5595_probe, }; static int __init i2c_sis5595_init(void) { return pci_register_driver(&sis5595_driver); } static void __exit i2c_sis5595_exit(void) { pci_unregister_driver(&sis5595_driver); if (sis5595_pdev) { i2c_del_adapter(&sis5595_adapter); release_region(sis5595_base + SMB_INDEX, 2); pci_dev_put(sis5595_pdev); sis5595_pdev = NULL; } } MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl>"); MODULE_DESCRIPTION("SIS5595 SMBus driver"); MODULE_LICENSE("GPL"); module_init(i2c_sis5595_init); module_exit(i2c_sis5595_exit);
gpl-2.0
TheMeddlingMonk/android_kernel_toshiba_tostab03
arch/arm/mach-sa1100/hackkit.c
2973
4721
/* * linux/arch/arm/mach-sa1100/hackkit.c * * Copyright (C) 2002 Stefan Eletzhofer <stefan.eletzhofer@eletztrick.de> * * This file contains all HackKit tweaks. Based on original work from * Nicolas Pitre's assabet fixes * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/tty.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/cpufreq.h> #include <linux/serial_core.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include <mach/hardware.h> #include <asm/mach-types.h> #include <asm/setup.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/irq.h> #include <asm/mach/arch.h> #include <asm/mach/flash.h> #include <asm/mach/map.h> #include <asm/mach/irq.h> #include <asm/mach/serial_sa1100.h> #include "generic.h" /********************************************************************** * prototypes */ /* init funcs */ static void __init hackkit_map_io(void); static u_int hackkit_get_mctrl(struct uart_port *port); static void hackkit_set_mctrl(struct uart_port *port, u_int mctrl); static void hackkit_uart_pm(struct uart_port *port, u_int state, u_int oldstate); /********************************************************************** * global data */ /********************************************************************** * static data */ static struct map_desc hackkit_io_desc[] __initdata = { { /* Flash bank 0 */ .virtual = 0xe8000000, .pfn = __phys_to_pfn(0x00000000), .length = 0x01000000, .type = MT_DEVICE }, }; static struct sa1100_port_fns hackkit_port_fns __initdata = { .set_mctrl = hackkit_set_mctrl, .get_mctrl = hackkit_get_mctrl, .pm = hackkit_uart_pm, }; /********************************************************************** * Static functions */ static void __init hackkit_map_io(void) { sa1100_map_io(); iotable_init(hackkit_io_desc, ARRAY_SIZE(hackkit_io_desc)); sa1100_register_uart_fns(&hackkit_port_fns); sa1100_register_uart(0, 1); /* com port */ sa1100_register_uart(1, 2); sa1100_register_uart(2, 3); /* radio module */ Ser1SDCR0 |= SDCR0_SUS; } /** * hackkit_uart_pm - powermgmt callback function for system 3 UART * @port: uart port structure * @state: pm state * @oldstate: old pm state * */ static void hackkit_uart_pm(struct uart_port *port, u_int state, u_int oldstate) { /* TODO: switch on/off uart in powersave mode */ } /* * Note! this can be called from IRQ context. * FIXME: No modem ctrl lines yet. */ static void hackkit_set_mctrl(struct uart_port *port, u_int mctrl) { #if 0 if (port->mapbase == _Ser1UTCR0) { u_int set = 0, clear = 0; if (mctrl & TIOCM_RTS) set |= PT_CTRL2_RS1_RTS; else clear |= PT_CTRL2_RS1_RTS; if (mctrl & TIOCM_DTR) set |= PT_CTRL2_RS1_DTR; else clear |= PT_CTRL2_RS1_DTR; PTCTRL2_clear(clear); PTCTRL2_set(set); } #endif } static u_int hackkit_get_mctrl(struct uart_port *port) { u_int ret = 0; #if 0 u_int irqsr = PT_IRQSR; /* need 2 reads to read current value */ irqsr = PT_IRQSR; /* TODO: check IRQ source register for modem/com status lines and set them correctly. */ #endif ret = TIOCM_CD | TIOCM_CTS | TIOCM_DSR; return ret; } static struct mtd_partition hackkit_partitions[] = { { .name = "BLOB", .size = 0x00040000, .offset = 0x00000000, .mask_flags = MTD_WRITEABLE, /* force read-only */ }, { .name = "config", .size = 0x00040000, .offset = MTDPART_OFS_APPEND, }, { .name = "kernel", .size = 0x00100000, .offset = MTDPART_OFS_APPEND, }, { .name = "initrd", .size = 0x00180000, .offset = MTDPART_OFS_APPEND, }, { .name = "rootfs", .size = 0x700000, .offset = MTDPART_OFS_APPEND, }, { .name = "data", .size = MTDPART_SIZ_FULL, .offset = MTDPART_OFS_APPEND, } }; static struct flash_platform_data hackkit_flash_data = { .map_name = "cfi_probe", .parts = hackkit_partitions, .nr_parts = ARRAY_SIZE(hackkit_partitions), }; static struct resource hackkit_flash_resource = { .start = SA1100_CS0_PHYS, .end = SA1100_CS0_PHYS + SZ_32M, .flags = IORESOURCE_MEM, }; static void __init hackkit_init(void) { sa11x0_register_mtd(&hackkit_flash_data, &hackkit_flash_resource, 1); } /********************************************************************** * Exported Functions */ MACHINE_START(HACKKIT, "HackKit Cpu Board") .boot_params = 0xc0000100, .map_io = hackkit_map_io, .init_irq = sa1100_init_irq, .timer = &sa1100_timer, .init_machine = hackkit_init, MACHINE_END
gpl-2.0
upndwn4par/kernel_hammerhead_lollipop
drivers/cpufreq/powernow-k6.c
4253
6787
/* * This file was based upon code in Powertweak Linux (http://powertweak.sf.net) * (C) 2000-2003 Dave Jones, Arjan van de Ven, Janne Pänkälä, * Dominik Brodowski. * * Licensed under the terms of the GNU GPL License version 2. * * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous* */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/cpufreq.h> #include <linux/ioport.h> #include <linux/timex.h> #include <linux/io.h> #include <asm/cpu_device_id.h> #include <asm/msr.h> #define POWERNOW_IOPORT 0xfff0 /* it doesn't matter where, as long as it is unused */ #define PFX "powernow-k6: " static unsigned int busfreq; /* FSB, in 10 kHz */ static unsigned int max_multiplier; /* Clock ratio multiplied by 10 - see table 27 in AMD#23446 */ static struct cpufreq_frequency_table clock_ratio[] = { {45, /* 000 -> 4.5x */ 0}, {50, /* 001 -> 5.0x */ 0}, {40, /* 010 -> 4.0x */ 0}, {55, /* 011 -> 5.5x */ 0}, {20, /* 100 -> 2.0x */ 0}, {30, /* 101 -> 3.0x */ 0}, {60, /* 110 -> 6.0x */ 0}, {35, /* 111 -> 3.5x */ 0}, {0, CPUFREQ_TABLE_END} }; /** * powernow_k6_get_cpu_multiplier - returns the current FSB multiplier * * Returns the current setting of the frequency multiplier. Core clock * speed is frequency of the Front-Side Bus multiplied with this value. */ static int powernow_k6_get_cpu_multiplier(void) { u64 invalue = 0; u32 msrval; msrval = POWERNOW_IOPORT + 0x1; wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */ invalue = inl(POWERNOW_IOPORT + 0x8); msrval = POWERNOW_IOPORT + 0x0; wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */ return clock_ratio[(invalue >> 5)&7].index; } /** * powernow_k6_set_state - set the PowerNow! multiplier * @best_i: clock_ratio[best_i] is the target multiplier * * Tries to change the PowerNow! multiplier */ static void powernow_k6_set_state(unsigned int best_i) { unsigned long outvalue = 0, invalue = 0; unsigned long msrval; struct cpufreq_freqs freqs; if (clock_ratio[best_i].index > max_multiplier) { printk(KERN_ERR PFX "invalid target frequency\n"); return; } freqs.old = busfreq * powernow_k6_get_cpu_multiplier(); freqs.new = busfreq * clock_ratio[best_i].index; freqs.cpu = 0; /* powernow-k6.c is UP only driver */ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); /* we now need to transform best_i to the BVC format, see AMD#23446 */ outvalue = (1<<12) | (1<<10) | (1<<9) | (best_i<<5); msrval = POWERNOW_IOPORT + 0x1; wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */ invalue = inl(POWERNOW_IOPORT + 0x8); invalue = invalue & 0xf; outvalue = outvalue | invalue; outl(outvalue , (POWERNOW_IOPORT + 0x8)); msrval = POWERNOW_IOPORT + 0x0; wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); return; } /** * powernow_k6_verify - verifies a new CPUfreq policy * @policy: new policy * * Policy must be within lowest and highest possible CPU Frequency, * and at least one possible state must be within min and max. */ static int powernow_k6_verify(struct cpufreq_policy *policy) { return cpufreq_frequency_table_verify(policy, &clock_ratio[0]); } /** * powernow_k6_setpolicy - sets a new CPUFreq policy * @policy: new policy * @target_freq: the target frequency * @relation: how that frequency relates to achieved frequency * (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H) * * sets a new CPUFreq policy */ static int powernow_k6_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation) { unsigned int newstate = 0; if (cpufreq_frequency_table_target(policy, &clock_ratio[0], target_freq, relation, &newstate)) return -EINVAL; powernow_k6_set_state(newstate); return 0; } static int powernow_k6_cpu_init(struct cpufreq_policy *policy) { unsigned int i, f; int result; if (policy->cpu != 0) return -ENODEV; /* get frequencies */ max_multiplier = powernow_k6_get_cpu_multiplier(); busfreq = cpu_khz / max_multiplier; /* table init */ for (i = 0; (clock_ratio[i].frequency != CPUFREQ_TABLE_END); i++) { f = clock_ratio[i].index; if (f > max_multiplier) clock_ratio[i].frequency = CPUFREQ_ENTRY_INVALID; else clock_ratio[i].frequency = busfreq * f; } /* cpuinfo and default policy values */ policy->cpuinfo.transition_latency = 200000; policy->cur = busfreq * max_multiplier; result = cpufreq_frequency_table_cpuinfo(policy, clock_ratio); if (result) return result; cpufreq_frequency_table_get_attr(clock_ratio, policy->cpu); return 0; } static int powernow_k6_cpu_exit(struct cpufreq_policy *policy) { unsigned int i; for (i = 0; i < 8; i++) { if (i == max_multiplier) powernow_k6_set_state(i); } cpufreq_frequency_table_put_attr(policy->cpu); return 0; } static unsigned int powernow_k6_get(unsigned int cpu) { unsigned int ret; ret = (busfreq * powernow_k6_get_cpu_multiplier()); return ret; } static struct freq_attr *powernow_k6_attr[] = { &cpufreq_freq_attr_scaling_available_freqs, NULL, }; static struct cpufreq_driver powernow_k6_driver = { .verify = powernow_k6_verify, .target = powernow_k6_target, .init = powernow_k6_cpu_init, .exit = powernow_k6_cpu_exit, .get = powernow_k6_get, .name = "powernow-k6", .owner = THIS_MODULE, .attr = powernow_k6_attr, }; static const struct x86_cpu_id powernow_k6_ids[] = { { X86_VENDOR_AMD, 5, 12 }, { X86_VENDOR_AMD, 5, 13 }, {} }; MODULE_DEVICE_TABLE(x86cpu, powernow_k6_ids); /** * powernow_k6_init - initializes the k6 PowerNow! CPUFreq driver * * Initializes the K6 PowerNow! support. Returns -ENODEV on unsupported * devices, -EINVAL or -ENOMEM on problems during initiatization, and zero * on success. */ static int __init powernow_k6_init(void) { if (!x86_match_cpu(powernow_k6_ids)) return -ENODEV; if (!request_region(POWERNOW_IOPORT, 16, "PowerNow!")) { printk(KERN_INFO PFX "PowerNow IOPORT region already used.\n"); return -EIO; } if (cpufreq_register_driver(&powernow_k6_driver)) { release_region(POWERNOW_IOPORT, 16); return -EINVAL; } return 0; } /** * powernow_k6_exit - unregisters AMD K6-2+/3+ PowerNow! support * * Unregisters AMD K6-2+ / K6-3+ PowerNow! support. */ static void __exit powernow_k6_exit(void) { cpufreq_unregister_driver(&powernow_k6_driver); release_region(POWERNOW_IOPORT, 16); } MODULE_AUTHOR("Arjan van de Ven, Dave Jones <davej@redhat.com>, " "Dominik Brodowski <linux@brodo.de>"); MODULE_DESCRIPTION("PowerNow! driver for AMD K6-2+ / K6-3+ processors."); MODULE_LICENSE("GPL"); module_init(powernow_k6_init); module_exit(powernow_k6_exit);
gpl-2.0
jamison904/Nexus7_kernel
fs/gfs2/rgrp.c
4765
45838
/* * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU General Public License version 2. */ #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/completion.h> #include <linux/buffer_head.h> #include <linux/fs.h> #include <linux/gfs2_ondisk.h> #include <linux/prefetch.h> #include <linux/blkdev.h> #include <linux/rbtree.h> #include "gfs2.h" #include "incore.h" #include "glock.h" #include "glops.h" #include "lops.h" #include "meta_io.h" #include "quota.h" #include "rgrp.h" #include "super.h" #include "trans.h" #include "util.h" #include "log.h" #include "inode.h" #include "trace_gfs2.h" #define BFITNOENT ((u32)~0) #define NO_BLOCK ((u64)~0) #if BITS_PER_LONG == 32 #define LBITMASK (0x55555555UL) #define LBITSKIP55 (0x55555555UL) #define LBITSKIP00 (0x00000000UL) #else #define LBITMASK (0x5555555555555555UL) #define LBITSKIP55 (0x5555555555555555UL) #define LBITSKIP00 (0x0000000000000000UL) #endif /* * These routines are used by the resource group routines (rgrp.c) * to keep track of block allocation. Each block is represented by two * bits. So, each byte represents GFS2_NBBY (i.e. 4) blocks. * * 0 = Free * 1 = Used (not metadata) * 2 = Unlinked (still in use) inode * 3 = Used (metadata) */ static const char valid_change[16] = { /* current */ /* n */ 0, 1, 1, 1, /* e */ 1, 0, 0, 0, /* w */ 0, 0, 0, 1, 1, 0, 0, 0 }; static u32 rgblk_search(struct gfs2_rgrpd *rgd, u32 goal, unsigned char old_state, struct gfs2_bitmap **rbi); /** * gfs2_setbit - Set a bit in the bitmaps * @buffer: the buffer that holds the bitmaps * @buflen: the length (in bytes) of the buffer * @block: the block to set * @new_state: the new state of the block * */ static inline void gfs2_setbit(struct gfs2_rgrpd *rgd, unsigned char *buf1, unsigned char *buf2, unsigned int offset, struct gfs2_bitmap *bi, u32 block, unsigned char new_state) { unsigned char *byte1, *byte2, *end, cur_state; unsigned int buflen = bi->bi_len; const unsigned int bit = (block % GFS2_NBBY) * GFS2_BIT_SIZE; byte1 = buf1 + offset + (block / GFS2_NBBY); end = buf1 + offset + buflen; BUG_ON(byte1 >= end); cur_state = (*byte1 >> bit) & GFS2_BIT_MASK; if (unlikely(!valid_change[new_state * 4 + cur_state])) { printk(KERN_WARNING "GFS2: buf_blk = 0x%llx old_state=%d, " "new_state=%d\n", (unsigned long long)block, cur_state, new_state); printk(KERN_WARNING "GFS2: rgrp=0x%llx bi_start=0x%lx\n", (unsigned long long)rgd->rd_addr, (unsigned long)bi->bi_start); printk(KERN_WARNING "GFS2: bi_offset=0x%lx bi_len=0x%lx\n", (unsigned long)bi->bi_offset, (unsigned long)bi->bi_len); dump_stack(); gfs2_consist_rgrpd(rgd); return; } *byte1 ^= (cur_state ^ new_state) << bit; if (buf2) { byte2 = buf2 + offset + (block / GFS2_NBBY); cur_state = (*byte2 >> bit) & GFS2_BIT_MASK; *byte2 ^= (cur_state ^ new_state) << bit; } } /** * gfs2_testbit - test a bit in the bitmaps * @buffer: the buffer that holds the bitmaps * @buflen: the length (in bytes) of the buffer * @block: the block to read * */ static inline unsigned char gfs2_testbit(struct gfs2_rgrpd *rgd, const unsigned char *buffer, unsigned int buflen, u32 block) { const unsigned char *byte, *end; unsigned char cur_state; unsigned int bit; byte = buffer + (block / GFS2_NBBY); bit = (block % GFS2_NBBY) * GFS2_BIT_SIZE; end = buffer + buflen; gfs2_assert(rgd->rd_sbd, byte < end); cur_state = (*byte >> bit) & GFS2_BIT_MASK; return cur_state; } /** * gfs2_bit_search * @ptr: Pointer to bitmap data * @mask: Mask to use (normally 0x55555.... but adjusted for search start) * @state: The state we are searching for * * We xor the bitmap data with a patter which is the bitwise opposite * of what we are looking for, this gives rise to a pattern of ones * wherever there is a match. Since we have two bits per entry, we * take this pattern, shift it down by one place and then and it with * the original. All the even bit positions (0,2,4, etc) then represent * successful matches, so we mask with 0x55555..... to remove the unwanted * odd bit positions. * * This allows searching of a whole u64 at once (32 blocks) with a * single test (on 64 bit arches). */ static inline u64 gfs2_bit_search(const __le64 *ptr, u64 mask, u8 state) { u64 tmp; static const u64 search[] = { [0] = 0xffffffffffffffffULL, [1] = 0xaaaaaaaaaaaaaaaaULL, [2] = 0x5555555555555555ULL, [3] = 0x0000000000000000ULL, }; tmp = le64_to_cpu(*ptr) ^ search[state]; tmp &= (tmp >> 1); tmp &= mask; return tmp; } /** * gfs2_bitfit - Search an rgrp's bitmap buffer to find a bit-pair representing * a block in a given allocation state. * @buffer: the buffer that holds the bitmaps * @len: the length (in bytes) of the buffer * @goal: start search at this block's bit-pair (within @buffer) * @state: GFS2_BLKST_XXX the state of the block we're looking for. * * Scope of @goal and returned block number is only within this bitmap buffer, * not entire rgrp or filesystem. @buffer will be offset from the actual * beginning of a bitmap block buffer, skipping any header structures, but * headers are always a multiple of 64 bits long so that the buffer is * always aligned to a 64 bit boundary. * * The size of the buffer is in bytes, but is it assumed that it is * always ok to read a complete multiple of 64 bits at the end * of the block in case the end is no aligned to a natural boundary. * * Return: the block number (bitmap buffer scope) that was found */ static u32 gfs2_bitfit(const u8 *buf, const unsigned int len, u32 goal, u8 state) { u32 spoint = (goal << 1) & ((8*sizeof(u64)) - 1); const __le64 *ptr = ((__le64 *)buf) + (goal >> 5); const __le64 *end = (__le64 *)(buf + ALIGN(len, sizeof(u64))); u64 tmp; u64 mask = 0x5555555555555555ULL; u32 bit; BUG_ON(state > 3); /* Mask off bits we don't care about at the start of the search */ mask <<= spoint; tmp = gfs2_bit_search(ptr, mask, state); ptr++; while(tmp == 0 && ptr < end) { tmp = gfs2_bit_search(ptr, 0x5555555555555555ULL, state); ptr++; } /* Mask off any bits which are more than len bytes from the start */ if (ptr == end && (len & (sizeof(u64) - 1))) tmp &= (((u64)~0) >> (64 - 8*(len & (sizeof(u64) - 1)))); /* Didn't find anything, so return */ if (tmp == 0) return BFITNOENT; ptr--; bit = __ffs64(tmp); bit /= 2; /* two bits per entry in the bitmap */ return (((const unsigned char *)ptr - buf) * GFS2_NBBY) + bit; } /** * gfs2_bitcount - count the number of bits in a certain state * @buffer: the buffer that holds the bitmaps * @buflen: the length (in bytes) of the buffer * @state: the state of the block we're looking for * * Returns: The number of bits */ static u32 gfs2_bitcount(struct gfs2_rgrpd *rgd, const u8 *buffer, unsigned int buflen, u8 state) { const u8 *byte = buffer; const u8 *end = buffer + buflen; const u8 state1 = state << 2; const u8 state2 = state << 4; const u8 state3 = state << 6; u32 count = 0; for (; byte < end; byte++) { if (((*byte) & 0x03) == state) count++; if (((*byte) & 0x0C) == state1) count++; if (((*byte) & 0x30) == state2) count++; if (((*byte) & 0xC0) == state3) count++; } return count; } /** * gfs2_rgrp_verify - Verify that a resource group is consistent * @sdp: the filesystem * @rgd: the rgrp * */ void gfs2_rgrp_verify(struct gfs2_rgrpd *rgd) { struct gfs2_sbd *sdp = rgd->rd_sbd; struct gfs2_bitmap *bi = NULL; u32 length = rgd->rd_length; u32 count[4], tmp; int buf, x; memset(count, 0, 4 * sizeof(u32)); /* Count # blocks in each of 4 possible allocation states */ for (buf = 0; buf < length; buf++) { bi = rgd->rd_bits + buf; for (x = 0; x < 4; x++) count[x] += gfs2_bitcount(rgd, bi->bi_bh->b_data + bi->bi_offset, bi->bi_len, x); } if (count[0] != rgd->rd_free) { if (gfs2_consist_rgrpd(rgd)) fs_err(sdp, "free data mismatch: %u != %u\n", count[0], rgd->rd_free); return; } tmp = rgd->rd_data - rgd->rd_free - rgd->rd_dinodes; if (count[1] != tmp) { if (gfs2_consist_rgrpd(rgd)) fs_err(sdp, "used data mismatch: %u != %u\n", count[1], tmp); return; } if (count[2] + count[3] != rgd->rd_dinodes) { if (gfs2_consist_rgrpd(rgd)) fs_err(sdp, "used metadata mismatch: %u != %u\n", count[2] + count[3], rgd->rd_dinodes); return; } } static inline int rgrp_contains_block(struct gfs2_rgrpd *rgd, u64 block) { u64 first = rgd->rd_data0; u64 last = first + rgd->rd_data; return first <= block && block < last; } /** * gfs2_blk2rgrpd - Find resource group for a given data/meta block number * @sdp: The GFS2 superblock * @n: The data block number * * Returns: The resource group, or NULL if not found */ struct gfs2_rgrpd *gfs2_blk2rgrpd(struct gfs2_sbd *sdp, u64 blk, bool exact) { struct rb_node *n, *next; struct gfs2_rgrpd *cur; spin_lock(&sdp->sd_rindex_spin); n = sdp->sd_rindex_tree.rb_node; while (n) { cur = rb_entry(n, struct gfs2_rgrpd, rd_node); next = NULL; if (blk < cur->rd_addr) next = n->rb_left; else if (blk >= cur->rd_data0 + cur->rd_data) next = n->rb_right; if (next == NULL) { spin_unlock(&sdp->sd_rindex_spin); if (exact) { if (blk < cur->rd_addr) return NULL; if (blk >= cur->rd_data0 + cur->rd_data) return NULL; } return cur; } n = next; } spin_unlock(&sdp->sd_rindex_spin); return NULL; } /** * gfs2_rgrpd_get_first - get the first Resource Group in the filesystem * @sdp: The GFS2 superblock * * Returns: The first rgrp in the filesystem */ struct gfs2_rgrpd *gfs2_rgrpd_get_first(struct gfs2_sbd *sdp) { const struct rb_node *n; struct gfs2_rgrpd *rgd; spin_lock(&sdp->sd_rindex_spin); n = rb_first(&sdp->sd_rindex_tree); rgd = rb_entry(n, struct gfs2_rgrpd, rd_node); spin_unlock(&sdp->sd_rindex_spin); return rgd; } /** * gfs2_rgrpd_get_next - get the next RG * @rgd: A RG * * Returns: The next rgrp */ struct gfs2_rgrpd *gfs2_rgrpd_get_next(struct gfs2_rgrpd *rgd) { struct gfs2_sbd *sdp = rgd->rd_sbd; const struct rb_node *n; spin_lock(&sdp->sd_rindex_spin); n = rb_next(&rgd->rd_node); if (n == NULL) n = rb_first(&sdp->sd_rindex_tree); if (unlikely(&rgd->rd_node == n)) { spin_unlock(&sdp->sd_rindex_spin); return NULL; } rgd = rb_entry(n, struct gfs2_rgrpd, rd_node); spin_unlock(&sdp->sd_rindex_spin); return rgd; } void gfs2_free_clones(struct gfs2_rgrpd *rgd) { int x; for (x = 0; x < rgd->rd_length; x++) { struct gfs2_bitmap *bi = rgd->rd_bits + x; kfree(bi->bi_clone); bi->bi_clone = NULL; } } void gfs2_clear_rgrpd(struct gfs2_sbd *sdp) { struct rb_node *n; struct gfs2_rgrpd *rgd; struct gfs2_glock *gl; while ((n = rb_first(&sdp->sd_rindex_tree))) { rgd = rb_entry(n, struct gfs2_rgrpd, rd_node); gl = rgd->rd_gl; rb_erase(n, &sdp->sd_rindex_tree); if (gl) { spin_lock(&gl->gl_spin); gl->gl_object = NULL; spin_unlock(&gl->gl_spin); gfs2_glock_add_to_lru(gl); gfs2_glock_put(gl); } gfs2_free_clones(rgd); kfree(rgd->rd_bits); kmem_cache_free(gfs2_rgrpd_cachep, rgd); } } static void gfs2_rindex_print(const struct gfs2_rgrpd *rgd) { printk(KERN_INFO " ri_addr = %llu\n", (unsigned long long)rgd->rd_addr); printk(KERN_INFO " ri_length = %u\n", rgd->rd_length); printk(KERN_INFO " ri_data0 = %llu\n", (unsigned long long)rgd->rd_data0); printk(KERN_INFO " ri_data = %u\n", rgd->rd_data); printk(KERN_INFO " ri_bitbytes = %u\n", rgd->rd_bitbytes); } /** * gfs2_compute_bitstructs - Compute the bitmap sizes * @rgd: The resource group descriptor * * Calculates bitmap descriptors, one for each block that contains bitmap data * * Returns: errno */ static int compute_bitstructs(struct gfs2_rgrpd *rgd) { struct gfs2_sbd *sdp = rgd->rd_sbd; struct gfs2_bitmap *bi; u32 length = rgd->rd_length; /* # blocks in hdr & bitmap */ u32 bytes_left, bytes; int x; if (!length) return -EINVAL; rgd->rd_bits = kcalloc(length, sizeof(struct gfs2_bitmap), GFP_NOFS); if (!rgd->rd_bits) return -ENOMEM; bytes_left = rgd->rd_bitbytes; for (x = 0; x < length; x++) { bi = rgd->rd_bits + x; bi->bi_flags = 0; /* small rgrp; bitmap stored completely in header block */ if (length == 1) { bytes = bytes_left; bi->bi_offset = sizeof(struct gfs2_rgrp); bi->bi_start = 0; bi->bi_len = bytes; /* header block */ } else if (x == 0) { bytes = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_rgrp); bi->bi_offset = sizeof(struct gfs2_rgrp); bi->bi_start = 0; bi->bi_len = bytes; /* last block */ } else if (x + 1 == length) { bytes = bytes_left; bi->bi_offset = sizeof(struct gfs2_meta_header); bi->bi_start = rgd->rd_bitbytes - bytes_left; bi->bi_len = bytes; /* other blocks */ } else { bytes = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header); bi->bi_offset = sizeof(struct gfs2_meta_header); bi->bi_start = rgd->rd_bitbytes - bytes_left; bi->bi_len = bytes; } bytes_left -= bytes; } if (bytes_left) { gfs2_consist_rgrpd(rgd); return -EIO; } bi = rgd->rd_bits + (length - 1); if ((bi->bi_start + bi->bi_len) * GFS2_NBBY != rgd->rd_data) { if (gfs2_consist_rgrpd(rgd)) { gfs2_rindex_print(rgd); fs_err(sdp, "start=%u len=%u offset=%u\n", bi->bi_start, bi->bi_len, bi->bi_offset); } return -EIO; } return 0; } /** * gfs2_ri_total - Total up the file system space, according to the rindex. * */ u64 gfs2_ri_total(struct gfs2_sbd *sdp) { u64 total_data = 0; struct inode *inode = sdp->sd_rindex; struct gfs2_inode *ip = GFS2_I(inode); char buf[sizeof(struct gfs2_rindex)]; struct file_ra_state ra_state; int error, rgrps; file_ra_state_init(&ra_state, inode->i_mapping); for (rgrps = 0;; rgrps++) { loff_t pos = rgrps * sizeof(struct gfs2_rindex); if (pos + sizeof(struct gfs2_rindex) > i_size_read(inode)) break; error = gfs2_internal_read(ip, &ra_state, buf, &pos, sizeof(struct gfs2_rindex)); if (error != sizeof(struct gfs2_rindex)) break; total_data += be32_to_cpu(((struct gfs2_rindex *)buf)->ri_data); } return total_data; } static int rgd_insert(struct gfs2_rgrpd *rgd) { struct gfs2_sbd *sdp = rgd->rd_sbd; struct rb_node **newn = &sdp->sd_rindex_tree.rb_node, *parent = NULL; /* Figure out where to put new node */ while (*newn) { struct gfs2_rgrpd *cur = rb_entry(*newn, struct gfs2_rgrpd, rd_node); parent = *newn; if (rgd->rd_addr < cur->rd_addr) newn = &((*newn)->rb_left); else if (rgd->rd_addr > cur->rd_addr) newn = &((*newn)->rb_right); else return -EEXIST; } rb_link_node(&rgd->rd_node, parent, newn); rb_insert_color(&rgd->rd_node, &sdp->sd_rindex_tree); sdp->sd_rgrps++; return 0; } /** * read_rindex_entry - Pull in a new resource index entry from the disk * @gl: The glock covering the rindex inode * * Returns: 0 on success, > 0 on EOF, error code otherwise */ static int read_rindex_entry(struct gfs2_inode *ip, struct file_ra_state *ra_state) { struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); loff_t pos = sdp->sd_rgrps * sizeof(struct gfs2_rindex); struct gfs2_rindex buf; int error; struct gfs2_rgrpd *rgd; if (pos >= i_size_read(&ip->i_inode)) return 1; error = gfs2_internal_read(ip, ra_state, (char *)&buf, &pos, sizeof(struct gfs2_rindex)); if (error != sizeof(struct gfs2_rindex)) return (error == 0) ? 1 : error; rgd = kmem_cache_zalloc(gfs2_rgrpd_cachep, GFP_NOFS); error = -ENOMEM; if (!rgd) return error; rgd->rd_sbd = sdp; rgd->rd_addr = be64_to_cpu(buf.ri_addr); rgd->rd_length = be32_to_cpu(buf.ri_length); rgd->rd_data0 = be64_to_cpu(buf.ri_data0); rgd->rd_data = be32_to_cpu(buf.ri_data); rgd->rd_bitbytes = be32_to_cpu(buf.ri_bitbytes); error = compute_bitstructs(rgd); if (error) goto fail; error = gfs2_glock_get(sdp, rgd->rd_addr, &gfs2_rgrp_glops, CREATE, &rgd->rd_gl); if (error) goto fail; rgd->rd_gl->gl_object = rgd; rgd->rd_flags &= ~GFS2_RDF_UPTODATE; if (rgd->rd_data > sdp->sd_max_rg_data) sdp->sd_max_rg_data = rgd->rd_data; spin_lock(&sdp->sd_rindex_spin); error = rgd_insert(rgd); spin_unlock(&sdp->sd_rindex_spin); if (!error) return 0; error = 0; /* someone else read in the rgrp; free it and ignore it */ gfs2_glock_put(rgd->rd_gl); fail: kfree(rgd->rd_bits); kmem_cache_free(gfs2_rgrpd_cachep, rgd); return error; } /** * gfs2_ri_update - Pull in a new resource index from the disk * @ip: pointer to the rindex inode * * Returns: 0 on successful update, error code otherwise */ static int gfs2_ri_update(struct gfs2_inode *ip) { struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); struct inode *inode = &ip->i_inode; struct file_ra_state ra_state; int error; file_ra_state_init(&ra_state, inode->i_mapping); do { error = read_rindex_entry(ip, &ra_state); } while (error == 0); if (error < 0) return error; sdp->sd_rindex_uptodate = 1; return 0; } /** * gfs2_rindex_update - Update the rindex if required * @sdp: The GFS2 superblock * * We grab a lock on the rindex inode to make sure that it doesn't * change whilst we are performing an operation. We keep this lock * for quite long periods of time compared to other locks. This * doesn't matter, since it is shared and it is very, very rarely * accessed in the exclusive mode (i.e. only when expanding the filesystem). * * This makes sure that we're using the latest copy of the resource index * special file, which might have been updated if someone expanded the * filesystem (via gfs2_grow utility), which adds new resource groups. * * Returns: 0 on succeess, error code otherwise */ int gfs2_rindex_update(struct gfs2_sbd *sdp) { struct gfs2_inode *ip = GFS2_I(sdp->sd_rindex); struct gfs2_glock *gl = ip->i_gl; struct gfs2_holder ri_gh; int error = 0; int unlock_required = 0; /* Read new copy from disk if we don't have the latest */ if (!sdp->sd_rindex_uptodate) { if (!gfs2_glock_is_locked_by_me(gl)) { error = gfs2_glock_nq_init(gl, LM_ST_SHARED, 0, &ri_gh); if (error) return error; unlock_required = 1; } if (!sdp->sd_rindex_uptodate) error = gfs2_ri_update(ip); if (unlock_required) gfs2_glock_dq_uninit(&ri_gh); } return error; } static void gfs2_rgrp_in(struct gfs2_rgrpd *rgd, const void *buf) { const struct gfs2_rgrp *str = buf; u32 rg_flags; rg_flags = be32_to_cpu(str->rg_flags); rg_flags &= ~GFS2_RDF_MASK; rgd->rd_flags &= GFS2_RDF_MASK; rgd->rd_flags |= rg_flags; rgd->rd_free = be32_to_cpu(str->rg_free); rgd->rd_dinodes = be32_to_cpu(str->rg_dinodes); rgd->rd_igeneration = be64_to_cpu(str->rg_igeneration); } static void gfs2_rgrp_out(struct gfs2_rgrpd *rgd, void *buf) { struct gfs2_rgrp *str = buf; str->rg_flags = cpu_to_be32(rgd->rd_flags & ~GFS2_RDF_MASK); str->rg_free = cpu_to_be32(rgd->rd_free); str->rg_dinodes = cpu_to_be32(rgd->rd_dinodes); str->__pad = cpu_to_be32(0); str->rg_igeneration = cpu_to_be64(rgd->rd_igeneration); memset(&str->rg_reserved, 0, sizeof(str->rg_reserved)); } /** * gfs2_rgrp_go_lock - Read in a RG's header and bitmaps * @rgd: the struct gfs2_rgrpd describing the RG to read in * * Read in all of a Resource Group's header and bitmap blocks. * Caller must eventually call gfs2_rgrp_relse() to free the bitmaps. * * Returns: errno */ int gfs2_rgrp_go_lock(struct gfs2_holder *gh) { struct gfs2_rgrpd *rgd = gh->gh_gl->gl_object; struct gfs2_sbd *sdp = rgd->rd_sbd; struct gfs2_glock *gl = rgd->rd_gl; unsigned int length = rgd->rd_length; struct gfs2_bitmap *bi; unsigned int x, y; int error; for (x = 0; x < length; x++) { bi = rgd->rd_bits + x; error = gfs2_meta_read(gl, rgd->rd_addr + x, 0, &bi->bi_bh); if (error) goto fail; } for (y = length; y--;) { bi = rgd->rd_bits + y; error = gfs2_meta_wait(sdp, bi->bi_bh); if (error) goto fail; if (gfs2_metatype_check(sdp, bi->bi_bh, y ? GFS2_METATYPE_RB : GFS2_METATYPE_RG)) { error = -EIO; goto fail; } } if (!(rgd->rd_flags & GFS2_RDF_UPTODATE)) { for (x = 0; x < length; x++) clear_bit(GBF_FULL, &rgd->rd_bits[x].bi_flags); gfs2_rgrp_in(rgd, (rgd->rd_bits[0].bi_bh)->b_data); rgd->rd_flags |= (GFS2_RDF_UPTODATE | GFS2_RDF_CHECK); rgd->rd_free_clone = rgd->rd_free; } return 0; fail: while (x--) { bi = rgd->rd_bits + x; brelse(bi->bi_bh); bi->bi_bh = NULL; gfs2_assert_warn(sdp, !bi->bi_clone); } return error; } /** * gfs2_rgrp_go_unlock - Release RG bitmaps read in with gfs2_rgrp_bh_get() * @rgd: the struct gfs2_rgrpd describing the RG to read in * */ void gfs2_rgrp_go_unlock(struct gfs2_holder *gh) { struct gfs2_rgrpd *rgd = gh->gh_gl->gl_object; int x, length = rgd->rd_length; for (x = 0; x < length; x++) { struct gfs2_bitmap *bi = rgd->rd_bits + x; brelse(bi->bi_bh); bi->bi_bh = NULL; } } int gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset, struct buffer_head *bh, const struct gfs2_bitmap *bi, unsigned minlen, u64 *ptrimmed) { struct super_block *sb = sdp->sd_vfs; struct block_device *bdev = sb->s_bdev; const unsigned int sects_per_blk = sdp->sd_sb.sb_bsize / bdev_logical_block_size(sb->s_bdev); u64 blk; sector_t start = 0; sector_t nr_sects = 0; int rv; unsigned int x; u32 trimmed = 0; u8 diff; for (x = 0; x < bi->bi_len; x++) { const u8 *clone = bi->bi_clone ? bi->bi_clone : bi->bi_bh->b_data; clone += bi->bi_offset; clone += x; if (bh) { const u8 *orig = bh->b_data + bi->bi_offset + x; diff = ~(*orig | (*orig >> 1)) & (*clone | (*clone >> 1)); } else { diff = ~(*clone | (*clone >> 1)); } diff &= 0x55; if (diff == 0) continue; blk = offset + ((bi->bi_start + x) * GFS2_NBBY); blk *= sects_per_blk; /* convert to sectors */ while(diff) { if (diff & 1) { if (nr_sects == 0) goto start_new_extent; if ((start + nr_sects) != blk) { if (nr_sects >= minlen) { rv = blkdev_issue_discard(bdev, start, nr_sects, GFP_NOFS, 0); if (rv) goto fail; trimmed += nr_sects; } nr_sects = 0; start_new_extent: start = blk; } nr_sects += sects_per_blk; } diff >>= 2; blk += sects_per_blk; } } if (nr_sects >= minlen) { rv = blkdev_issue_discard(bdev, start, nr_sects, GFP_NOFS, 0); if (rv) goto fail; trimmed += nr_sects; } if (ptrimmed) *ptrimmed = trimmed; return 0; fail: if (sdp->sd_args.ar_discard) fs_warn(sdp, "error %d on discard request, turning discards off for this filesystem", rv); sdp->sd_args.ar_discard = 0; return -EIO; } /** * gfs2_fitrim - Generate discard requests for unused bits of the filesystem * @filp: Any file on the filesystem * @argp: Pointer to the arguments (also used to pass result) * * Returns: 0 on success, otherwise error code */ int gfs2_fitrim(struct file *filp, void __user *argp) { struct inode *inode = filp->f_dentry->d_inode; struct gfs2_sbd *sdp = GFS2_SB(inode); struct request_queue *q = bdev_get_queue(sdp->sd_vfs->s_bdev); struct buffer_head *bh; struct gfs2_rgrpd *rgd; struct gfs2_rgrpd *rgd_end; struct gfs2_holder gh; struct fstrim_range r; int ret = 0; u64 amt; u64 trimmed = 0; unsigned int x; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (!blk_queue_discard(q)) return -EOPNOTSUPP; if (argp == NULL) { r.start = 0; r.len = ULLONG_MAX; r.minlen = 0; } else if (copy_from_user(&r, argp, sizeof(r))) return -EFAULT; ret = gfs2_rindex_update(sdp); if (ret) return ret; rgd = gfs2_blk2rgrpd(sdp, r.start, 0); rgd_end = gfs2_blk2rgrpd(sdp, r.start + r.len, 0); while (1) { ret = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &gh); if (ret) goto out; if (!(rgd->rd_flags & GFS2_RGF_TRIMMED)) { /* Trim each bitmap in the rgrp */ for (x = 0; x < rgd->rd_length; x++) { struct gfs2_bitmap *bi = rgd->rd_bits + x; ret = gfs2_rgrp_send_discards(sdp, rgd->rd_data0, NULL, bi, r.minlen, &amt); if (ret) { gfs2_glock_dq_uninit(&gh); goto out; } trimmed += amt; } /* Mark rgrp as having been trimmed */ ret = gfs2_trans_begin(sdp, RES_RG_HDR, 0); if (ret == 0) { bh = rgd->rd_bits[0].bi_bh; rgd->rd_flags |= GFS2_RGF_TRIMMED; gfs2_trans_add_bh(rgd->rd_gl, bh, 1); gfs2_rgrp_out(rgd, bh->b_data); gfs2_trans_end(sdp); } } gfs2_glock_dq_uninit(&gh); if (rgd == rgd_end) break; rgd = gfs2_rgrpd_get_next(rgd); } out: r.len = trimmed << 9; if (argp && copy_to_user(argp, &r, sizeof(r))) return -EFAULT; return ret; } /** * gfs2_qadata_get - get the struct gfs2_qadata structure for an inode * @ip: the incore GFS2 inode structure * * Returns: the struct gfs2_qadata */ struct gfs2_qadata *gfs2_qadata_get(struct gfs2_inode *ip) { struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); int error; BUG_ON(ip->i_qadata != NULL); ip->i_qadata = kzalloc(sizeof(struct gfs2_qadata), GFP_NOFS); error = gfs2_rindex_update(sdp); if (error) fs_warn(sdp, "rindex update returns %d\n", error); return ip->i_qadata; } /** * gfs2_blkrsv_get - get the struct gfs2_blkreserv structure for an inode * @ip: the incore GFS2 inode structure * * Returns: the struct gfs2_qadata */ static struct gfs2_blkreserv *gfs2_blkrsv_get(struct gfs2_inode *ip) { BUG_ON(ip->i_res != NULL); ip->i_res = kzalloc(sizeof(struct gfs2_blkreserv), GFP_NOFS); return ip->i_res; } /** * try_rgrp_fit - See if a given reservation will fit in a given RG * @rgd: the RG data * @ip: the inode * * If there's room for the requested blocks to be allocated from the RG: * * Returns: 1 on success (it fits), 0 on failure (it doesn't fit) */ static int try_rgrp_fit(const struct gfs2_rgrpd *rgd, const struct gfs2_inode *ip) { const struct gfs2_blkreserv *rs = ip->i_res; if (rgd->rd_flags & (GFS2_RGF_NOALLOC | GFS2_RDF_ERROR)) return 0; if (rgd->rd_free_clone >= rs->rs_requested) return 1; return 0; } static inline u32 gfs2_bi2rgd_blk(struct gfs2_bitmap *bi, u32 blk) { return (bi->bi_start * GFS2_NBBY) + blk; } /** * try_rgrp_unlink - Look for any unlinked, allocated, but unused inodes * @rgd: The rgrp * * Returns: 0 if no error * The inode, if one has been found, in inode. */ static void try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, u64 skip) { u32 goal = 0, block; u64 no_addr; struct gfs2_sbd *sdp = rgd->rd_sbd; struct gfs2_glock *gl; struct gfs2_inode *ip; int error; int found = 0; struct gfs2_bitmap *bi; while (goal < rgd->rd_data) { down_write(&sdp->sd_log_flush_lock); block = rgblk_search(rgd, goal, GFS2_BLKST_UNLINKED, &bi); up_write(&sdp->sd_log_flush_lock); if (block == BFITNOENT) break; block = gfs2_bi2rgd_blk(bi, block); /* rgblk_search can return a block < goal, so we need to keep it marching forward. */ no_addr = block + rgd->rd_data0; goal = max(block + 1, goal + 1); if (*last_unlinked != NO_BLOCK && no_addr <= *last_unlinked) continue; if (no_addr == skip) continue; *last_unlinked = no_addr; error = gfs2_glock_get(sdp, no_addr, &gfs2_inode_glops, CREATE, &gl); if (error) continue; /* If the inode is already in cache, we can ignore it here * because the existing inode disposal code will deal with * it when all refs have gone away. Accessing gl_object like * this is not safe in general. Here it is ok because we do * not dereference the pointer, and we only need an approx * answer to whether it is NULL or not. */ ip = gl->gl_object; if (ip || queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0) gfs2_glock_put(gl); else found++; /* Limit reclaim to sensible number of tasks */ if (found > NR_CPUS) return; } rgd->rd_flags &= ~GFS2_RDF_CHECK; return; } /** * get_local_rgrp - Choose and lock a rgrp for allocation * @ip: the inode to reserve space for * @rgp: the chosen and locked rgrp * * Try to acquire rgrp in way which avoids contending with others. * * Returns: errno */ static int get_local_rgrp(struct gfs2_inode *ip, u64 *last_unlinked) { struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); struct gfs2_rgrpd *rgd, *begin = NULL; struct gfs2_blkreserv *rs = ip->i_res; int error, rg_locked, flags = LM_FLAG_TRY; int loops = 0; if (ip->i_rgd && rgrp_contains_block(ip->i_rgd, ip->i_goal)) rgd = begin = ip->i_rgd; else rgd = begin = gfs2_blk2rgrpd(sdp, ip->i_goal, 1); if (rgd == NULL) return -EBADSLT; while (loops < 3) { rg_locked = 0; if (gfs2_glock_is_locked_by_me(rgd->rd_gl)) { rg_locked = 1; error = 0; } else { error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, flags, &rs->rs_rgd_gh); } switch (error) { case 0: if (try_rgrp_fit(rgd, ip)) { ip->i_rgd = rgd; return 0; } if (rgd->rd_flags & GFS2_RDF_CHECK) try_rgrp_unlink(rgd, last_unlinked, ip->i_no_addr); if (!rg_locked) gfs2_glock_dq_uninit(&rs->rs_rgd_gh); /* fall through */ case GLR_TRYFAILED: rgd = gfs2_rgrpd_get_next(rgd); if (rgd == begin) { flags = 0; loops++; } break; default: return error; } } return -ENOSPC; } static void gfs2_blkrsv_put(struct gfs2_inode *ip) { BUG_ON(ip->i_res == NULL); kfree(ip->i_res); ip->i_res = NULL; } /** * gfs2_inplace_reserve - Reserve space in the filesystem * @ip: the inode to reserve space for * * Returns: errno */ int gfs2_inplace_reserve(struct gfs2_inode *ip, u32 requested) { struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); struct gfs2_blkreserv *rs; int error = 0; u64 last_unlinked = NO_BLOCK; int tries = 0; rs = gfs2_blkrsv_get(ip); if (!rs) return -ENOMEM; rs->rs_requested = requested; if (gfs2_assert_warn(sdp, requested)) { error = -EINVAL; goto out; } do { error = get_local_rgrp(ip, &last_unlinked); if (error != -ENOSPC) break; /* Check that fs hasn't grown if writing to rindex */ if (ip == GFS2_I(sdp->sd_rindex) && !sdp->sd_rindex_uptodate) { error = gfs2_ri_update(ip); if (error) break; continue; } /* Flushing the log may release space */ gfs2_log_flush(sdp, NULL); } while (tries++ < 3); out: if (error) gfs2_blkrsv_put(ip); return error; } /** * gfs2_inplace_release - release an inplace reservation * @ip: the inode the reservation was taken out on * * Release a reservation made by gfs2_inplace_reserve(). */ void gfs2_inplace_release(struct gfs2_inode *ip) { struct gfs2_blkreserv *rs = ip->i_res; if (rs->rs_rgd_gh.gh_gl) gfs2_glock_dq_uninit(&rs->rs_rgd_gh); gfs2_blkrsv_put(ip); } /** * gfs2_get_block_type - Check a block in a RG is of given type * @rgd: the resource group holding the block * @block: the block number * * Returns: The block type (GFS2_BLKST_*) */ static unsigned char gfs2_get_block_type(struct gfs2_rgrpd *rgd, u64 block) { struct gfs2_bitmap *bi = NULL; u32 length, rgrp_block, buf_block; unsigned int buf; unsigned char type; length = rgd->rd_length; rgrp_block = block - rgd->rd_data0; for (buf = 0; buf < length; buf++) { bi = rgd->rd_bits + buf; if (rgrp_block < (bi->bi_start + bi->bi_len) * GFS2_NBBY) break; } gfs2_assert(rgd->rd_sbd, buf < length); buf_block = rgrp_block - bi->bi_start * GFS2_NBBY; type = gfs2_testbit(rgd, bi->bi_bh->b_data + bi->bi_offset, bi->bi_len, buf_block); return type; } /** * rgblk_search - find a block in @state * @rgd: the resource group descriptor * @goal: the goal block within the RG (start here to search for avail block) * @state: GFS2_BLKST_XXX the before-allocation state to find * @dinode: TRUE if the first block we allocate is for a dinode * @rbi: address of the pointer to the bitmap containing the block found * * Walk rgrp's bitmap to find bits that represent a block in @state. * * This function never fails, because we wouldn't call it unless we * know (from reservation results, etc.) that a block is available. * * Scope of @goal is just within rgrp, not the whole filesystem. * Scope of @returned block is just within bitmap, not the whole filesystem. * * Returns: the block number found relative to the bitmap rbi */ static u32 rgblk_search(struct gfs2_rgrpd *rgd, u32 goal, unsigned char state, struct gfs2_bitmap **rbi) { struct gfs2_bitmap *bi = NULL; const u32 length = rgd->rd_length; u32 blk = BFITNOENT; unsigned int buf, x; const u8 *buffer = NULL; *rbi = NULL; /* Find bitmap block that contains bits for goal block */ for (buf = 0; buf < length; buf++) { bi = rgd->rd_bits + buf; /* Convert scope of "goal" from rgrp-wide to within found bit block */ if (goal < (bi->bi_start + bi->bi_len) * GFS2_NBBY) { goal -= bi->bi_start * GFS2_NBBY; goto do_search; } } buf = 0; goal = 0; do_search: /* Search (up to entire) bitmap in this rgrp for allocatable block. "x <= length", instead of "x < length", because we typically start the search in the middle of a bit block, but if we can't find an allocatable block anywhere else, we want to be able wrap around and search in the first part of our first-searched bit block. */ for (x = 0; x <= length; x++) { bi = rgd->rd_bits + buf; if (test_bit(GBF_FULL, &bi->bi_flags) && (state == GFS2_BLKST_FREE)) goto skip; /* The GFS2_BLKST_UNLINKED state doesn't apply to the clone bitmaps, so we must search the originals for that. */ buffer = bi->bi_bh->b_data + bi->bi_offset; WARN_ON(!buffer_uptodate(bi->bi_bh)); if (state != GFS2_BLKST_UNLINKED && bi->bi_clone) buffer = bi->bi_clone + bi->bi_offset; blk = gfs2_bitfit(buffer, bi->bi_len, goal, state); if (blk != BFITNOENT) break; if ((goal == 0) && (state == GFS2_BLKST_FREE)) set_bit(GBF_FULL, &bi->bi_flags); /* Try next bitmap block (wrap back to rgrp header if at end) */ skip: buf++; buf %= length; goal = 0; } if (blk != BFITNOENT) *rbi = bi; return blk; } /** * gfs2_alloc_extent - allocate an extent from a given bitmap * @rgd: the resource group descriptor * @bi: the bitmap within the rgrp * @blk: the block within the bitmap * @dinode: TRUE if the first block we allocate is for a dinode * @n: The extent length * * Add the found bitmap buffer to the transaction. * Set the found bits to @new_state to change block's allocation state. * Returns: starting block number of the extent (fs scope) */ static u64 gfs2_alloc_extent(struct gfs2_rgrpd *rgd, struct gfs2_bitmap *bi, u32 blk, bool dinode, unsigned int *n) { const unsigned int elen = *n; u32 goal; const u8 *buffer = NULL; *n = 0; buffer = bi->bi_bh->b_data + bi->bi_offset; gfs2_trans_add_bh(rgd->rd_gl, bi->bi_bh, 1); gfs2_setbit(rgd, bi->bi_bh->b_data, bi->bi_clone, bi->bi_offset, bi, blk, dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED); (*n)++; goal = blk; while (*n < elen) { goal++; if (goal >= (bi->bi_len * GFS2_NBBY)) break; if (gfs2_testbit(rgd, buffer, bi->bi_len, goal) != GFS2_BLKST_FREE) break; gfs2_setbit(rgd, bi->bi_bh->b_data, bi->bi_clone, bi->bi_offset, bi, goal, GFS2_BLKST_USED); (*n)++; } blk = gfs2_bi2rgd_blk(bi, blk); rgd->rd_last_alloc = blk + *n - 1; return rgd->rd_data0 + blk; } /** * rgblk_free - Change alloc state of given block(s) * @sdp: the filesystem * @bstart: the start of a run of blocks to free * @blen: the length of the block run (all must lie within ONE RG!) * @new_state: GFS2_BLKST_XXX the after-allocation block state * * Returns: Resource group containing the block(s) */ static struct gfs2_rgrpd *rgblk_free(struct gfs2_sbd *sdp, u64 bstart, u32 blen, unsigned char new_state) { struct gfs2_rgrpd *rgd; struct gfs2_bitmap *bi = NULL; u32 length, rgrp_blk, buf_blk; unsigned int buf; rgd = gfs2_blk2rgrpd(sdp, bstart, 1); if (!rgd) { if (gfs2_consist(sdp)) fs_err(sdp, "block = %llu\n", (unsigned long long)bstart); return NULL; } length = rgd->rd_length; rgrp_blk = bstart - rgd->rd_data0; while (blen--) { for (buf = 0; buf < length; buf++) { bi = rgd->rd_bits + buf; if (rgrp_blk < (bi->bi_start + bi->bi_len) * GFS2_NBBY) break; } gfs2_assert(rgd->rd_sbd, buf < length); buf_blk = rgrp_blk - bi->bi_start * GFS2_NBBY; rgrp_blk++; if (!bi->bi_clone) { bi->bi_clone = kmalloc(bi->bi_bh->b_size, GFP_NOFS | __GFP_NOFAIL); memcpy(bi->bi_clone + bi->bi_offset, bi->bi_bh->b_data + bi->bi_offset, bi->bi_len); } gfs2_trans_add_bh(rgd->rd_gl, bi->bi_bh, 1); gfs2_setbit(rgd, bi->bi_bh->b_data, NULL, bi->bi_offset, bi, buf_blk, new_state); } return rgd; } /** * gfs2_rgrp_dump - print out an rgrp * @seq: The iterator * @gl: The glock in question * */ int gfs2_rgrp_dump(struct seq_file *seq, const struct gfs2_glock *gl) { const struct gfs2_rgrpd *rgd = gl->gl_object; if (rgd == NULL) return 0; gfs2_print_dbg(seq, " R: n:%llu f:%02x b:%u/%u i:%u\n", (unsigned long long)rgd->rd_addr, rgd->rd_flags, rgd->rd_free, rgd->rd_free_clone, rgd->rd_dinodes); return 0; } static void gfs2_rgrp_error(struct gfs2_rgrpd *rgd) { struct gfs2_sbd *sdp = rgd->rd_sbd; fs_warn(sdp, "rgrp %llu has an error, marking it readonly until umount\n", (unsigned long long)rgd->rd_addr); fs_warn(sdp, "umount on all nodes and run fsck.gfs2 to fix the error\n"); gfs2_rgrp_dump(NULL, rgd->rd_gl); rgd->rd_flags |= GFS2_RDF_ERROR; } /** * gfs2_alloc_blocks - Allocate one or more blocks of data and/or a dinode * @ip: the inode to allocate the block for * @bn: Used to return the starting block number * @ndata: requested number of blocks/extent length (value/result) * @dinode: 1 if we're allocating a dinode block, else 0 * @generation: the generation number of the inode * * Returns: 0 or error */ int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *nblocks, bool dinode, u64 *generation) { struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); struct buffer_head *dibh; struct gfs2_rgrpd *rgd; unsigned int ndata; u32 goal, blk; /* block, within the rgrp scope */ u64 block; /* block, within the file system scope */ int error; struct gfs2_bitmap *bi; /* Only happens if there is a bug in gfs2, return something distinctive * to ensure that it is noticed. */ if (ip->i_res == NULL) return -ECANCELED; rgd = ip->i_rgd; if (!dinode && rgrp_contains_block(rgd, ip->i_goal)) goal = ip->i_goal - rgd->rd_data0; else goal = rgd->rd_last_alloc; blk = rgblk_search(rgd, goal, GFS2_BLKST_FREE, &bi); /* Since all blocks are reserved in advance, this shouldn't happen */ if (blk == BFITNOENT) goto rgrp_error; block = gfs2_alloc_extent(rgd, bi, blk, dinode, nblocks); ndata = *nblocks; if (dinode) ndata--; if (!dinode) { ip->i_goal = block + ndata - 1; error = gfs2_meta_inode_buffer(ip, &dibh); if (error == 0) { struct gfs2_dinode *di = (struct gfs2_dinode *)dibh->b_data; gfs2_trans_add_bh(ip->i_gl, dibh, 1); di->di_goal_meta = di->di_goal_data = cpu_to_be64(ip->i_goal); brelse(dibh); } } if (rgd->rd_free < *nblocks) goto rgrp_error; rgd->rd_free -= *nblocks; if (dinode) { rgd->rd_dinodes++; *generation = rgd->rd_igeneration++; if (*generation == 0) *generation = rgd->rd_igeneration++; } gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1); gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data); gfs2_statfs_change(sdp, 0, -(s64)*nblocks, dinode ? 1 : 0); if (dinode) gfs2_trans_add_unrevoke(sdp, block, 1); /* * This needs reviewing to see why we cannot do the quota change * at this point in the dinode case. */ if (ndata) gfs2_quota_change(ip, ndata, ip->i_inode.i_uid, ip->i_inode.i_gid); rgd->rd_free_clone -= *nblocks; trace_gfs2_block_alloc(ip, block, *nblocks, dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED); *bn = block; return 0; rgrp_error: gfs2_rgrp_error(rgd); return -EIO; } /** * __gfs2_free_blocks - free a contiguous run of block(s) * @ip: the inode these blocks are being freed from * @bstart: first block of a run of contiguous blocks * @blen: the length of the block run * @meta: 1 if the blocks represent metadata * */ void __gfs2_free_blocks(struct gfs2_inode *ip, u64 bstart, u32 blen, int meta) { struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); struct gfs2_rgrpd *rgd; rgd = rgblk_free(sdp, bstart, blen, GFS2_BLKST_FREE); if (!rgd) return; trace_gfs2_block_alloc(ip, bstart, blen, GFS2_BLKST_FREE); rgd->rd_free += blen; rgd->rd_flags &= ~GFS2_RGF_TRIMMED; gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1); gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data); /* Directories keep their data in the metadata address space */ if (meta || ip->i_depth) gfs2_meta_wipe(ip, bstart, blen); } /** * gfs2_free_meta - free a contiguous run of data block(s) * @ip: the inode these blocks are being freed from * @bstart: first block of a run of contiguous blocks * @blen: the length of the block run * */ void gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen) { struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); __gfs2_free_blocks(ip, bstart, blen, 1); gfs2_statfs_change(sdp, 0, +blen, 0); gfs2_quota_change(ip, -(s64)blen, ip->i_inode.i_uid, ip->i_inode.i_gid); } void gfs2_unlink_di(struct inode *inode) { struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_sbd *sdp = GFS2_SB(inode); struct gfs2_rgrpd *rgd; u64 blkno = ip->i_no_addr; rgd = rgblk_free(sdp, blkno, 1, GFS2_BLKST_UNLINKED); if (!rgd) return; trace_gfs2_block_alloc(ip, blkno, 1, GFS2_BLKST_UNLINKED); gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1); gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data); } static void gfs2_free_uninit_di(struct gfs2_rgrpd *rgd, u64 blkno) { struct gfs2_sbd *sdp = rgd->rd_sbd; struct gfs2_rgrpd *tmp_rgd; tmp_rgd = rgblk_free(sdp, blkno, 1, GFS2_BLKST_FREE); if (!tmp_rgd) return; gfs2_assert_withdraw(sdp, rgd == tmp_rgd); if (!rgd->rd_dinodes) gfs2_consist_rgrpd(rgd); rgd->rd_dinodes--; rgd->rd_free++; gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1); gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data); gfs2_statfs_change(sdp, 0, +1, -1); } void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip) { gfs2_free_uninit_di(rgd, ip->i_no_addr); trace_gfs2_block_alloc(ip, ip->i_no_addr, 1, GFS2_BLKST_FREE); gfs2_quota_change(ip, -1, ip->i_inode.i_uid, ip->i_inode.i_gid); gfs2_meta_wipe(ip, ip->i_no_addr, 1); } /** * gfs2_check_blk_type - Check the type of a block * @sdp: The superblock * @no_addr: The block number to check * @type: The block type we are looking for * * Returns: 0 if the block type matches the expected type * -ESTALE if it doesn't match * or -ve errno if something went wrong while checking */ int gfs2_check_blk_type(struct gfs2_sbd *sdp, u64 no_addr, unsigned int type) { struct gfs2_rgrpd *rgd; struct gfs2_holder rgd_gh; int error = -EINVAL; rgd = gfs2_blk2rgrpd(sdp, no_addr, 1); if (!rgd) goto fail; error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_SHARED, 0, &rgd_gh); if (error) goto fail; if (gfs2_get_block_type(rgd, no_addr) != type) error = -ESTALE; gfs2_glock_dq_uninit(&rgd_gh); fail: return error; } /** * gfs2_rlist_add - add a RG to a list of RGs * @ip: the inode * @rlist: the list of resource groups * @block: the block * * Figure out what RG a block belongs to and add that RG to the list * * FIXME: Don't use NOFAIL * */ void gfs2_rlist_add(struct gfs2_inode *ip, struct gfs2_rgrp_list *rlist, u64 block) { struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); struct gfs2_rgrpd *rgd; struct gfs2_rgrpd **tmp; unsigned int new_space; unsigned int x; if (gfs2_assert_warn(sdp, !rlist->rl_ghs)) return; if (ip->i_rgd && rgrp_contains_block(ip->i_rgd, block)) rgd = ip->i_rgd; else rgd = gfs2_blk2rgrpd(sdp, block, 1); if (!rgd) { fs_err(sdp, "rlist_add: no rgrp for block %llu\n", (unsigned long long)block); return; } ip->i_rgd = rgd; for (x = 0; x < rlist->rl_rgrps; x++) if (rlist->rl_rgd[x] == rgd) return; if (rlist->rl_rgrps == rlist->rl_space) { new_space = rlist->rl_space + 10; tmp = kcalloc(new_space, sizeof(struct gfs2_rgrpd *), GFP_NOFS | __GFP_NOFAIL); if (rlist->rl_rgd) { memcpy(tmp, rlist->rl_rgd, rlist->rl_space * sizeof(struct gfs2_rgrpd *)); kfree(rlist->rl_rgd); } rlist->rl_space = new_space; rlist->rl_rgd = tmp; } rlist->rl_rgd[rlist->rl_rgrps++] = rgd; } /** * gfs2_rlist_alloc - all RGs have been added to the rlist, now allocate * and initialize an array of glock holders for them * @rlist: the list of resource groups * @state: the lock state to acquire the RG lock in * @flags: the modifier flags for the holder structures * * FIXME: Don't use NOFAIL * */ void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist, unsigned int state) { unsigned int x; rlist->rl_ghs = kcalloc(rlist->rl_rgrps, sizeof(struct gfs2_holder), GFP_NOFS | __GFP_NOFAIL); for (x = 0; x < rlist->rl_rgrps; x++) gfs2_holder_init(rlist->rl_rgd[x]->rd_gl, state, 0, &rlist->rl_ghs[x]); } /** * gfs2_rlist_free - free a resource group list * @list: the list of resource groups * */ void gfs2_rlist_free(struct gfs2_rgrp_list *rlist) { unsigned int x; kfree(rlist->rl_rgd); if (rlist->rl_ghs) { for (x = 0; x < rlist->rl_rgrps; x++) gfs2_holder_uninit(&rlist->rl_ghs[x]); kfree(rlist->rl_ghs); } }
gpl-2.0
Ezekeel/GLaDOS-nexus-prime
drivers/isdn/hisax/isurf.c
5021
7787
/* $Id: isurf.c,v 1.12.2.4 2004/01/13 21:46:03 keil Exp $ * * low level stuff for Siemens I-Surf/I-Talk cards * * Author Karsten Keil * Copyright by Karsten Keil <keil@isdn4linux.de> * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * */ #include <linux/init.h> #include "hisax.h" #include "isac.h" #include "isar.h" #include "isdnl1.h" #include <linux/isapnp.h> static const char *ISurf_revision = "$Revision: 1.12.2.4 $"; #define byteout(addr,val) outb(val,addr) #define bytein(addr) inb(addr) #define ISURF_ISAR_RESET 1 #define ISURF_ISAC_RESET 2 #define ISURF_ISAR_EA 4 #define ISURF_ARCOFI_RESET 8 #define ISURF_RESET (ISURF_ISAR_RESET | ISURF_ISAC_RESET | ISURF_ARCOFI_RESET) #define ISURF_ISAR_OFFSET 0 #define ISURF_ISAC_OFFSET 0x100 #define ISURF_IOMEM_SIZE 0x400 /* Interface functions */ static u_char ReadISAC(struct IsdnCardState *cs, u_char offset) { return (readb(cs->hw.isurf.isac + offset)); } static void WriteISAC(struct IsdnCardState *cs, u_char offset, u_char value) { writeb(value, cs->hw.isurf.isac + offset); mb(); } static void ReadISACfifo(struct IsdnCardState *cs, u_char * data, int size) { register int i; for (i = 0; i < size; i++) data[i] = readb(cs->hw.isurf.isac); } static void WriteISACfifo(struct IsdnCardState *cs, u_char * data, int size) { register int i; for (i = 0; i < size; i++){ writeb(data[i], cs->hw.isurf.isac);mb(); } } /* ISAR access routines * mode = 0 access with IRQ on * mode = 1 access with IRQ off * mode = 2 access with IRQ off and using last offset */ static u_char ReadISAR(struct IsdnCardState *cs, int mode, u_char offset) { return(readb(cs->hw.isurf.isar + offset)); } static void WriteISAR(struct IsdnCardState *cs, int mode, u_char offset, u_char value) { writeb(value, cs->hw.isurf.isar + offset);mb(); } static irqreturn_t isurf_interrupt(int intno, void *dev_id) { struct IsdnCardState *cs = dev_id; u_char val; int cnt = 5; u_long flags; spin_lock_irqsave(&cs->lock, flags); val = readb(cs->hw.isurf.isar + ISAR_IRQBIT); Start_ISAR: if (val & ISAR_IRQSTA) isar_int_main(cs); val = readb(cs->hw.isurf.isac + ISAC_ISTA); Start_ISAC: if (val) isac_interrupt(cs, val); val = readb(cs->hw.isurf.isar + ISAR_IRQBIT); if ((val & ISAR_IRQSTA) && --cnt) { if (cs->debug & L1_DEB_HSCX) debugl1(cs, "ISAR IntStat after IntRoutine"); goto Start_ISAR; } val = readb(cs->hw.isurf.isac + ISAC_ISTA); if (val && --cnt) { if (cs->debug & L1_DEB_ISAC) debugl1(cs, "ISAC IntStat after IntRoutine"); goto Start_ISAC; } if (!cnt) printk(KERN_WARNING "ISurf IRQ LOOP\n"); writeb(0, cs->hw.isurf.isar + ISAR_IRQBIT); mb(); writeb(0xFF, cs->hw.isurf.isac + ISAC_MASK);mb(); writeb(0, cs->hw.isurf.isac + ISAC_MASK);mb(); writeb(ISAR_IRQMSK, cs->hw.isurf.isar + ISAR_IRQBIT); mb(); spin_unlock_irqrestore(&cs->lock, flags); return IRQ_HANDLED; } static void release_io_isurf(struct IsdnCardState *cs) { release_region(cs->hw.isurf.reset, 1); iounmap(cs->hw.isurf.isar); release_mem_region(cs->hw.isurf.phymem, ISURF_IOMEM_SIZE); } static void reset_isurf(struct IsdnCardState *cs, u_char chips) { printk(KERN_INFO "ISurf: resetting card\n"); byteout(cs->hw.isurf.reset, chips); /* Reset On */ mdelay(10); byteout(cs->hw.isurf.reset, ISURF_ISAR_EA); /* Reset Off */ mdelay(10); } static int ISurf_card_msg(struct IsdnCardState *cs, int mt, void *arg) { u_long flags; switch (mt) { case CARD_RESET: spin_lock_irqsave(&cs->lock, flags); reset_isurf(cs, ISURF_RESET); spin_unlock_irqrestore(&cs->lock, flags); return(0); case CARD_RELEASE: release_io_isurf(cs); return(0); case CARD_INIT: spin_lock_irqsave(&cs->lock, flags); reset_isurf(cs, ISURF_RESET); clear_pending_isac_ints(cs); writeb(0, cs->hw.isurf.isar+ISAR_IRQBIT);mb(); initisac(cs); initisar(cs); /* Reenable ISAC IRQ */ cs->writeisac(cs, ISAC_MASK, 0); /* RESET Receiver and Transmitter */ cs->writeisac(cs, ISAC_CMDR, 0x41); spin_unlock_irqrestore(&cs->lock, flags); return(0); case CARD_TEST: return(0); } return(0); } static int isurf_auxcmd(struct IsdnCardState *cs, isdn_ctrl *ic) { int ret; u_long flags; if ((ic->command == ISDN_CMD_IOCTL) && (ic->arg == 9)) { ret = isar_auxcmd(cs, ic); spin_lock_irqsave(&cs->lock, flags); if (!ret) { reset_isurf(cs, ISURF_ISAR_EA | ISURF_ISAC_RESET | ISURF_ARCOFI_RESET); initisac(cs); cs->writeisac(cs, ISAC_MASK, 0); cs->writeisac(cs, ISAC_CMDR, 0x41); } spin_unlock_irqrestore(&cs->lock, flags); return(ret); } return(isar_auxcmd(cs, ic)); } #ifdef __ISAPNP__ static struct pnp_card *pnp_c __devinitdata = NULL; #endif int __devinit setup_isurf(struct IsdnCard *card) { int ver; struct IsdnCardState *cs = card->cs; char tmp[64]; strcpy(tmp, ISurf_revision); printk(KERN_INFO "HiSax: ISurf driver Rev. %s\n", HiSax_getrev(tmp)); if (cs->typ != ISDN_CTYPE_ISURF) return(0); if (card->para[1] && card->para[2]) { cs->hw.isurf.reset = card->para[1]; cs->hw.isurf.phymem = card->para[2]; cs->irq = card->para[0]; } else { #ifdef __ISAPNP__ if (isapnp_present()) { struct pnp_dev *pnp_d = NULL; int err; cs->subtyp = 0; if ((pnp_c = pnp_find_card( ISAPNP_VENDOR('S', 'I', 'E'), ISAPNP_FUNCTION(0x0010), pnp_c))) { if (!(pnp_d = pnp_find_dev(pnp_c, ISAPNP_VENDOR('S', 'I', 'E'), ISAPNP_FUNCTION(0x0010), pnp_d))) { printk(KERN_ERR "ISurfPnP: PnP error card found, no device\n"); return (0); } pnp_disable_dev(pnp_d); err = pnp_activate_dev(pnp_d); cs->hw.isurf.reset = pnp_port_start(pnp_d, 0); cs->hw.isurf.phymem = pnp_mem_start(pnp_d, 1); cs->irq = pnp_irq(pnp_d, 0); if (!cs->irq || !cs->hw.isurf.reset || !cs->hw.isurf.phymem) { printk(KERN_ERR "ISurfPnP:some resources are missing %d/%x/%lx\n", cs->irq, cs->hw.isurf.reset, cs->hw.isurf.phymem); pnp_disable_dev(pnp_d); return(0); } } else { printk(KERN_INFO "ISurfPnP: no ISAPnP card found\n"); return(0); } } else { printk(KERN_INFO "ISurfPnP: no ISAPnP bus found\n"); return(0); } #else printk(KERN_WARNING "HiSax: Siemens I-Surf port/mem not set\n"); return (0); #endif } if (!request_region(cs->hw.isurf.reset, 1, "isurf isdn")) { printk(KERN_WARNING "HiSax: Siemens I-Surf config port %x already in use\n", cs->hw.isurf.reset); return (0); } if (!request_region(cs->hw.isurf.phymem, ISURF_IOMEM_SIZE, "isurf iomem")) { printk(KERN_WARNING "HiSax: Siemens I-Surf memory region " "%lx-%lx already in use\n", cs->hw.isurf.phymem, cs->hw.isurf.phymem + ISURF_IOMEM_SIZE); release_region(cs->hw.isurf.reset, 1); return (0); } cs->hw.isurf.isar = ioremap(cs->hw.isurf.phymem, ISURF_IOMEM_SIZE); cs->hw.isurf.isac = cs->hw.isurf.isar + ISURF_ISAC_OFFSET; printk(KERN_INFO "ISurf: defined at 0x%x 0x%lx IRQ %d\n", cs->hw.isurf.reset, cs->hw.isurf.phymem, cs->irq); setup_isac(cs); cs->cardmsg = &ISurf_card_msg; cs->irq_func = &isurf_interrupt; cs->auxcmd = &isurf_auxcmd; cs->readisac = &ReadISAC; cs->writeisac = &WriteISAC; cs->readisacfifo = &ReadISACfifo; cs->writeisacfifo = &WriteISACfifo; cs->bcs[0].hw.isar.reg = &cs->hw.isurf.isar_r; cs->bcs[1].hw.isar.reg = &cs->hw.isurf.isar_r; test_and_set_bit(HW_ISAR, &cs->HW_Flags); ISACVersion(cs, "ISurf:"); cs->BC_Read_Reg = &ReadISAR; cs->BC_Write_Reg = &WriteISAR; cs->BC_Send_Data = &isar_fill_fifo; ver = ISARVersion(cs, "ISurf:"); if (ver < 0) { printk(KERN_WARNING "ISurf: wrong ISAR version (ret = %d)\n", ver); release_io_isurf(cs); return (0); } return (1); }
gpl-2.0
NuriJ/sony_kernel_msm8960
fs/hfsplus/brec.c
5277
13490
/* * linux/fs/hfsplus/brec.c * * Copyright (C) 2001 * Brad Boyer (flar@allandria.com) * (C) 2003 Ardis Technologies <roman@ardistech.com> * * Handle individual btree records */ #include "hfsplus_fs.h" #include "hfsplus_raw.h" static struct hfs_bnode *hfs_bnode_split(struct hfs_find_data *fd); static int hfs_brec_update_parent(struct hfs_find_data *fd); static int hfs_btree_inc_height(struct hfs_btree *); /* Get the length and offset of the given record in the given node */ u16 hfs_brec_lenoff(struct hfs_bnode *node, u16 rec, u16 *off) { __be16 retval[2]; u16 dataoff; dataoff = node->tree->node_size - (rec + 2) * 2; hfs_bnode_read(node, retval, dataoff, 4); *off = be16_to_cpu(retval[1]); return be16_to_cpu(retval[0]) - *off; } /* Get the length of the key from a keyed record */ u16 hfs_brec_keylen(struct hfs_bnode *node, u16 rec) { u16 retval, recoff; if (node->type != HFS_NODE_INDEX && node->type != HFS_NODE_LEAF) return 0; if ((node->type == HFS_NODE_INDEX) && !(node->tree->attributes & HFS_TREE_VARIDXKEYS)) { retval = node->tree->max_key_len + 2; } else { recoff = hfs_bnode_read_u16(node, node->tree->node_size - (rec + 1) * 2); if (!recoff) return 0; if (recoff > node->tree->node_size - 2) { printk(KERN_ERR "hfs: recoff %d too large\n", recoff); return 0; } retval = hfs_bnode_read_u16(node, recoff) + 2; if (retval > node->tree->max_key_len + 2) { printk(KERN_ERR "hfs: keylen %d too large\n", retval); retval = 0; } } return retval; } int hfs_brec_insert(struct hfs_find_data *fd, void *entry, int entry_len) { struct hfs_btree *tree; struct hfs_bnode *node, *new_node; int size, key_len, rec; int data_off, end_off; int idx_rec_off, data_rec_off, end_rec_off; __be32 cnid; tree = fd->tree; if (!fd->bnode) { if (!tree->root) hfs_btree_inc_height(tree); fd->bnode = hfs_bnode_find(tree, tree->leaf_head); if (IS_ERR(fd->bnode)) return PTR_ERR(fd->bnode); fd->record = -1; } new_node = NULL; key_len = be16_to_cpu(fd->search_key->key_len) + 2; again: /* new record idx and complete record size */ rec = fd->record + 1; size = key_len + entry_len; node = fd->bnode; hfs_bnode_dump(node); /* get last offset */ end_rec_off = tree->node_size - (node->num_recs + 1) * 2; end_off = hfs_bnode_read_u16(node, end_rec_off); end_rec_off -= 2; dprint(DBG_BNODE_MOD, "insert_rec: %d, %d, %d, %d\n", rec, size, end_off, end_rec_off); if (size > end_rec_off - end_off) { if (new_node) panic("not enough room!\n"); new_node = hfs_bnode_split(fd); if (IS_ERR(new_node)) return PTR_ERR(new_node); goto again; } if (node->type == HFS_NODE_LEAF) { tree->leaf_count++; mark_inode_dirty(tree->inode); } node->num_recs++; /* write new last offset */ hfs_bnode_write_u16(node, offsetof(struct hfs_bnode_desc, num_recs), node->num_recs); hfs_bnode_write_u16(node, end_rec_off, end_off + size); data_off = end_off; data_rec_off = end_rec_off + 2; idx_rec_off = tree->node_size - (rec + 1) * 2; if (idx_rec_off == data_rec_off) goto skip; /* move all following entries */ do { data_off = hfs_bnode_read_u16(node, data_rec_off + 2); hfs_bnode_write_u16(node, data_rec_off, data_off + size); data_rec_off += 2; } while (data_rec_off < idx_rec_off); /* move data away */ hfs_bnode_move(node, data_off + size, data_off, end_off - data_off); skip: hfs_bnode_write(node, fd->search_key, data_off, key_len); hfs_bnode_write(node, entry, data_off + key_len, entry_len); hfs_bnode_dump(node); if (new_node) { /* update parent key if we inserted a key * at the start of the first node */ if (!rec && new_node != node) hfs_brec_update_parent(fd); hfs_bnode_put(fd->bnode); if (!new_node->parent) { hfs_btree_inc_height(tree); new_node->parent = tree->root; } fd->bnode = hfs_bnode_find(tree, new_node->parent); /* create index data entry */ cnid = cpu_to_be32(new_node->this); entry = &cnid; entry_len = sizeof(cnid); /* get index key */ hfs_bnode_read_key(new_node, fd->search_key, 14); __hfs_brec_find(fd->bnode, fd); hfs_bnode_put(new_node); new_node = NULL; if (tree->attributes & HFS_TREE_VARIDXKEYS) key_len = be16_to_cpu(fd->search_key->key_len) + 2; else { fd->search_key->key_len = cpu_to_be16(tree->max_key_len); key_len = tree->max_key_len + 2; } goto again; } if (!rec) hfs_brec_update_parent(fd); return 0; } int hfs_brec_remove(struct hfs_find_data *fd) { struct hfs_btree *tree; struct hfs_bnode *node, *parent; int end_off, rec_off, data_off, size; tree = fd->tree; node = fd->bnode; again: rec_off = tree->node_size - (fd->record + 2) * 2; end_off = tree->node_size - (node->num_recs + 1) * 2; if (node->type == HFS_NODE_LEAF) { tree->leaf_count--; mark_inode_dirty(tree->inode); } hfs_bnode_dump(node); dprint(DBG_BNODE_MOD, "remove_rec: %d, %d\n", fd->record, fd->keylength + fd->entrylength); if (!--node->num_recs) { hfs_bnode_unlink(node); if (!node->parent) return 0; parent = hfs_bnode_find(tree, node->parent); if (IS_ERR(parent)) return PTR_ERR(parent); hfs_bnode_put(node); node = fd->bnode = parent; __hfs_brec_find(node, fd); goto again; } hfs_bnode_write_u16(node, offsetof(struct hfs_bnode_desc, num_recs), node->num_recs); if (rec_off == end_off) goto skip; size = fd->keylength + fd->entrylength; do { data_off = hfs_bnode_read_u16(node, rec_off); hfs_bnode_write_u16(node, rec_off + 2, data_off - size); rec_off -= 2; } while (rec_off >= end_off); /* fill hole */ hfs_bnode_move(node, fd->keyoffset, fd->keyoffset + size, data_off - fd->keyoffset - size); skip: hfs_bnode_dump(node); if (!fd->record) hfs_brec_update_parent(fd); return 0; } static struct hfs_bnode *hfs_bnode_split(struct hfs_find_data *fd) { struct hfs_btree *tree; struct hfs_bnode *node, *new_node, *next_node; struct hfs_bnode_desc node_desc; int num_recs, new_rec_off, new_off, old_rec_off; int data_start, data_end, size; tree = fd->tree; node = fd->bnode; new_node = hfs_bmap_alloc(tree); if (IS_ERR(new_node)) return new_node; hfs_bnode_get(node); dprint(DBG_BNODE_MOD, "split_nodes: %d - %d - %d\n", node->this, new_node->this, node->next); new_node->next = node->next; new_node->prev = node->this; new_node->parent = node->parent; new_node->type = node->type; new_node->height = node->height; if (node->next) next_node = hfs_bnode_find(tree, node->next); else next_node = NULL; if (IS_ERR(next_node)) { hfs_bnode_put(node); hfs_bnode_put(new_node); return next_node; } size = tree->node_size / 2 - node->num_recs * 2 - 14; old_rec_off = tree->node_size - 4; num_recs = 1; for (;;) { data_start = hfs_bnode_read_u16(node, old_rec_off); if (data_start > size) break; old_rec_off -= 2; if (++num_recs < node->num_recs) continue; /* panic? */ hfs_bnode_put(node); hfs_bnode_put(new_node); if (next_node) hfs_bnode_put(next_node); return ERR_PTR(-ENOSPC); } if (fd->record + 1 < num_recs) { /* new record is in the lower half, * so leave some more space there */ old_rec_off += 2; num_recs--; data_start = hfs_bnode_read_u16(node, old_rec_off); } else { hfs_bnode_put(node); hfs_bnode_get(new_node); fd->bnode = new_node; fd->record -= num_recs; fd->keyoffset -= data_start - 14; fd->entryoffset -= data_start - 14; } new_node->num_recs = node->num_recs - num_recs; node->num_recs = num_recs; new_rec_off = tree->node_size - 2; new_off = 14; size = data_start - new_off; num_recs = new_node->num_recs; data_end = data_start; while (num_recs) { hfs_bnode_write_u16(new_node, new_rec_off, new_off); old_rec_off -= 2; new_rec_off -= 2; data_end = hfs_bnode_read_u16(node, old_rec_off); new_off = data_end - size; num_recs--; } hfs_bnode_write_u16(new_node, new_rec_off, new_off); hfs_bnode_copy(new_node, 14, node, data_start, data_end - data_start); /* update new bnode header */ node_desc.next = cpu_to_be32(new_node->next); node_desc.prev = cpu_to_be32(new_node->prev); node_desc.type = new_node->type; node_desc.height = new_node->height; node_desc.num_recs = cpu_to_be16(new_node->num_recs); node_desc.reserved = 0; hfs_bnode_write(new_node, &node_desc, 0, sizeof(node_desc)); /* update previous bnode header */ node->next = new_node->this; hfs_bnode_read(node, &node_desc, 0, sizeof(node_desc)); node_desc.next = cpu_to_be32(node->next); node_desc.num_recs = cpu_to_be16(node->num_recs); hfs_bnode_write(node, &node_desc, 0, sizeof(node_desc)); /* update next bnode header */ if (next_node) { next_node->prev = new_node->this; hfs_bnode_read(next_node, &node_desc, 0, sizeof(node_desc)); node_desc.prev = cpu_to_be32(next_node->prev); hfs_bnode_write(next_node, &node_desc, 0, sizeof(node_desc)); hfs_bnode_put(next_node); } else if (node->this == tree->leaf_tail) { /* if there is no next node, this might be the new tail */ tree->leaf_tail = new_node->this; mark_inode_dirty(tree->inode); } hfs_bnode_dump(node); hfs_bnode_dump(new_node); hfs_bnode_put(node); return new_node; } static int hfs_brec_update_parent(struct hfs_find_data *fd) { struct hfs_btree *tree; struct hfs_bnode *node, *new_node, *parent; int newkeylen, diff; int rec, rec_off, end_rec_off; int start_off, end_off; tree = fd->tree; node = fd->bnode; new_node = NULL; if (!node->parent) return 0; again: parent = hfs_bnode_find(tree, node->parent); if (IS_ERR(parent)) return PTR_ERR(parent); __hfs_brec_find(parent, fd); hfs_bnode_dump(parent); rec = fd->record; /* size difference between old and new key */ if (tree->attributes & HFS_TREE_VARIDXKEYS) newkeylen = hfs_bnode_read_u16(node, 14) + 2; else fd->keylength = newkeylen = tree->max_key_len + 2; dprint(DBG_BNODE_MOD, "update_rec: %d, %d, %d\n", rec, fd->keylength, newkeylen); rec_off = tree->node_size - (rec + 2) * 2; end_rec_off = tree->node_size - (parent->num_recs + 1) * 2; diff = newkeylen - fd->keylength; if (!diff) goto skip; if (diff > 0) { end_off = hfs_bnode_read_u16(parent, end_rec_off); if (end_rec_off - end_off < diff) { dprint(DBG_BNODE_MOD, "hfs: splitting index node.\n"); fd->bnode = parent; new_node = hfs_bnode_split(fd); if (IS_ERR(new_node)) return PTR_ERR(new_node); parent = fd->bnode; rec = fd->record; rec_off = tree->node_size - (rec + 2) * 2; end_rec_off = tree->node_size - (parent->num_recs + 1) * 2; } } end_off = start_off = hfs_bnode_read_u16(parent, rec_off); hfs_bnode_write_u16(parent, rec_off, start_off + diff); start_off -= 4; /* move previous cnid too */ while (rec_off > end_rec_off) { rec_off -= 2; end_off = hfs_bnode_read_u16(parent, rec_off); hfs_bnode_write_u16(parent, rec_off, end_off + diff); } hfs_bnode_move(parent, start_off + diff, start_off, end_off - start_off); skip: hfs_bnode_copy(parent, fd->keyoffset, node, 14, newkeylen); hfs_bnode_dump(parent); hfs_bnode_put(node); node = parent; if (new_node) { __be32 cnid; fd->bnode = hfs_bnode_find(tree, new_node->parent); /* create index key and entry */ hfs_bnode_read_key(new_node, fd->search_key, 14); cnid = cpu_to_be32(new_node->this); __hfs_brec_find(fd->bnode, fd); hfs_brec_insert(fd, &cnid, sizeof(cnid)); hfs_bnode_put(fd->bnode); hfs_bnode_put(new_node); if (!rec) { if (new_node == node) goto out; /* restore search_key */ hfs_bnode_read_key(node, fd->search_key, 14); } } if (!rec && node->parent) goto again; out: fd->bnode = node; return 0; } static int hfs_btree_inc_height(struct hfs_btree *tree) { struct hfs_bnode *node, *new_node; struct hfs_bnode_desc node_desc; int key_size, rec; __be32 cnid; node = NULL; if (tree->root) { node = hfs_bnode_find(tree, tree->root); if (IS_ERR(node)) return PTR_ERR(node); } new_node = hfs_bmap_alloc(tree); if (IS_ERR(new_node)) { hfs_bnode_put(node); return PTR_ERR(new_node); } tree->root = new_node->this; if (!tree->depth) { tree->leaf_head = tree->leaf_tail = new_node->this; new_node->type = HFS_NODE_LEAF; new_node->num_recs = 0; } else { new_node->type = HFS_NODE_INDEX; new_node->num_recs = 1; } new_node->parent = 0; new_node->next = 0; new_node->prev = 0; new_node->height = ++tree->depth; node_desc.next = cpu_to_be32(new_node->next); node_desc.prev = cpu_to_be32(new_node->prev); node_desc.type = new_node->type; node_desc.height = new_node->height; node_desc.num_recs = cpu_to_be16(new_node->num_recs); node_desc.reserved = 0; hfs_bnode_write(new_node, &node_desc, 0, sizeof(node_desc)); rec = tree->node_size - 2; hfs_bnode_write_u16(new_node, rec, 14); if (node) { /* insert old root idx into new root */ node->parent = tree->root; if (node->type == HFS_NODE_LEAF || tree->attributes & HFS_TREE_VARIDXKEYS) key_size = hfs_bnode_read_u16(node, 14) + 2; else key_size = tree->max_key_len + 2; hfs_bnode_copy(new_node, 14, node, 14, key_size); if (!(tree->attributes & HFS_TREE_VARIDXKEYS)) { key_size = tree->max_key_len + 2; hfs_bnode_write_u16(new_node, 14, tree->max_key_len); } cnid = cpu_to_be32(node->this); hfs_bnode_write(new_node, &cnid, 14 + key_size, 4); rec -= 2; hfs_bnode_write_u16(new_node, rec, 14 + key_size + 4); hfs_bnode_put(node); } hfs_bnode_put(new_node); mark_inode_dirty(tree->inode); return 0; }
gpl-2.0
pocketbook-free/kernel_623
sound/core/pcm_timer.c
11677
3755
/* * Digital Audio (PCM) abstract layer * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/time.h> #include <linux/gcd.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/timer.h> /* * Timer functions */ void snd_pcm_timer_resolution_change(struct snd_pcm_substream *substream) { unsigned long rate, mult, fsize, l, post; struct snd_pcm_runtime *runtime = substream->runtime; mult = 1000000000; rate = runtime->rate; if (snd_BUG_ON(!rate)) return; l = gcd(mult, rate); mult /= l; rate /= l; fsize = runtime->period_size; if (snd_BUG_ON(!fsize)) return; l = gcd(rate, fsize); rate /= l; fsize /= l; post = 1; while ((mult * fsize) / fsize != mult) { mult /= 2; post *= 2; } if (rate == 0) { snd_printk(KERN_ERR "pcm timer resolution out of range (rate = %u, period_size = %lu)\n", runtime->rate, runtime->period_size); runtime->timer_resolution = -1; return; } runtime->timer_resolution = (mult * fsize / rate) * post; } static unsigned long snd_pcm_timer_resolution(struct snd_timer * timer) { struct snd_pcm_substream *substream; substream = timer->private_data; return substream->runtime ? substream->runtime->timer_resolution : 0; } static int snd_pcm_timer_start(struct snd_timer * timer) { struct snd_pcm_substream *substream; substream = snd_timer_chip(timer); substream->timer_running = 1; return 0; } static int snd_pcm_timer_stop(struct snd_timer * timer) { struct snd_pcm_substream *substream; substream = snd_timer_chip(timer); substream->timer_running = 0; return 0; } static struct snd_timer_hardware snd_pcm_timer = { .flags = SNDRV_TIMER_HW_AUTO | SNDRV_TIMER_HW_SLAVE, .resolution = 0, .ticks = 1, .c_resolution = snd_pcm_timer_resolution, .start = snd_pcm_timer_start, .stop = snd_pcm_timer_stop, }; /* * Init functions */ static void snd_pcm_timer_free(struct snd_timer *timer) { struct snd_pcm_substream *substream = timer->private_data; substream->timer = NULL; } void snd_pcm_timer_init(struct snd_pcm_substream *substream) { struct snd_timer_id tid; struct snd_timer *timer; tid.dev_sclass = SNDRV_TIMER_SCLASS_NONE; tid.dev_class = SNDRV_TIMER_CLASS_PCM; tid.card = substream->pcm->card->number; tid.device = substream->pcm->device; tid.subdevice = (substream->number << 1) | (substream->stream & 1); if (snd_timer_new(substream->pcm->card, "PCM", &tid, &timer) < 0) return; sprintf(timer->name, "PCM %s %i-%i-%i", substream->stream == SNDRV_PCM_STREAM_CAPTURE ? "capture" : "playback", tid.card, tid.device, tid.subdevice); timer->hw = snd_pcm_timer; if (snd_device_register(timer->card, timer) < 0) { snd_device_free(timer->card, timer); return; } timer->private_data = substream; timer->private_free = snd_pcm_timer_free; substream->timer = timer; } void snd_pcm_timer_done(struct snd_pcm_substream *substream) { if (substream->timer) { snd_device_free(substream->pcm->card, substream->timer); substream->timer = NULL; } }
gpl-2.0
uberlaggydarwin/htc-bfam-caf
drivers/scsi/sym53c8xx_2/sym_malloc.c
12957
8844
/* * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family * of PCI-SCSI IO processors. * * Copyright (C) 1999-2001 Gerard Roudier <groudier@free.fr> * * This driver is derived from the Linux sym53c8xx driver. * Copyright (C) 1998-2000 Gerard Roudier * * The sym53c8xx driver is derived from the ncr53c8xx driver that had been * a port of the FreeBSD ncr driver to Linux-1.2.13. * * The original ncr driver has been written for 386bsd and FreeBSD by * Wolfgang Stanglmeier <wolf@cologne.de> * Stefan Esser <se@mi.Uni-Koeln.de> * Copyright (C) 1994 Wolfgang Stanglmeier * * Other major contributions: * * NVRAM detection and reading. * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk> * *----------------------------------------------------------------------------- * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include "sym_glue.h" /* * Simple power of two buddy-like generic allocator. * Provides naturally aligned memory chunks. * * This simple code is not intended to be fast, but to * provide power of 2 aligned memory allocations. * Since the SCRIPTS processor only supplies 8 bit arithmetic, * this allocator allows simple and fast address calculations * from the SCRIPTS code. In addition, cache line alignment * is guaranteed for power of 2 cache line size. * * This allocator has been developed for the Linux sym53c8xx * driver, since this O/S does not provide naturally aligned * allocations. * It has the advantage of allowing the driver to use private * pages of memory that will be useful if we ever need to deal * with IO MMUs for PCI. */ static void *___sym_malloc(m_pool_p mp, int size) { int i = 0; int s = (1 << SYM_MEM_SHIFT); int j; void *a; m_link_p h = mp->h; if (size > SYM_MEM_CLUSTER_SIZE) return NULL; while (size > s) { s <<= 1; ++i; } j = i; while (!h[j].next) { if (s == SYM_MEM_CLUSTER_SIZE) { h[j].next = (m_link_p) M_GET_MEM_CLUSTER(); if (h[j].next) h[j].next->next = NULL; break; } ++j; s <<= 1; } a = h[j].next; if (a) { h[j].next = h[j].next->next; while (j > i) { j -= 1; s >>= 1; h[j].next = (m_link_p) (a+s); h[j].next->next = NULL; } } #ifdef DEBUG printf("___sym_malloc(%d) = %p\n", size, (void *) a); #endif return a; } /* * Counter-part of the generic allocator. */ static void ___sym_mfree(m_pool_p mp, void *ptr, int size) { int i = 0; int s = (1 << SYM_MEM_SHIFT); m_link_p q; unsigned long a, b; m_link_p h = mp->h; #ifdef DEBUG printf("___sym_mfree(%p, %d)\n", ptr, size); #endif if (size > SYM_MEM_CLUSTER_SIZE) return; while (size > s) { s <<= 1; ++i; } a = (unsigned long)ptr; while (1) { if (s == SYM_MEM_CLUSTER_SIZE) { #ifdef SYM_MEM_FREE_UNUSED M_FREE_MEM_CLUSTER((void *)a); #else ((m_link_p) a)->next = h[i].next; h[i].next = (m_link_p) a; #endif break; } b = a ^ s; q = &h[i]; while (q->next && q->next != (m_link_p) b) { q = q->next; } if (!q->next) { ((m_link_p) a)->next = h[i].next; h[i].next = (m_link_p) a; break; } q->next = q->next->next; a = a & b; s <<= 1; ++i; } } /* * Verbose and zeroing allocator that wrapps to the generic allocator. */ static void *__sym_calloc2(m_pool_p mp, int size, char *name, int uflags) { void *p; p = ___sym_malloc(mp, size); if (DEBUG_FLAGS & DEBUG_ALLOC) { printf ("new %-10s[%4d] @%p.\n", name, size, p); } if (p) memset(p, 0, size); else if (uflags & SYM_MEM_WARN) printf ("__sym_calloc2: failed to allocate %s[%d]\n", name, size); return p; } #define __sym_calloc(mp, s, n) __sym_calloc2(mp, s, n, SYM_MEM_WARN) /* * Its counter-part. */ static void __sym_mfree(m_pool_p mp, void *ptr, int size, char *name) { if (DEBUG_FLAGS & DEBUG_ALLOC) printf ("freeing %-10s[%4d] @%p.\n", name, size, ptr); ___sym_mfree(mp, ptr, size); } /* * Default memory pool we donnot need to involve in DMA. * * With DMA abstraction, we use functions (methods), to * distinguish between non DMAable memory and DMAable memory. */ static void *___mp0_get_mem_cluster(m_pool_p mp) { void *m = sym_get_mem_cluster(); if (m) ++mp->nump; return m; } #ifdef SYM_MEM_FREE_UNUSED static void ___mp0_free_mem_cluster(m_pool_p mp, void *m) { sym_free_mem_cluster(m); --mp->nump; } #else #define ___mp0_free_mem_cluster NULL #endif static struct sym_m_pool mp0 = { NULL, ___mp0_get_mem_cluster, ___mp0_free_mem_cluster }; /* * Methods that maintains DMAable pools according to user allocations. * New pools are created on the fly when a new pool id is provided. * They are deleted on the fly when they get emptied. */ /* Get a memory cluster that matches the DMA constraints of a given pool */ static void * ___get_dma_mem_cluster(m_pool_p mp) { m_vtob_p vbp; void *vaddr; vbp = __sym_calloc(&mp0, sizeof(*vbp), "VTOB"); if (!vbp) goto out_err; vaddr = sym_m_get_dma_mem_cluster(mp, vbp); if (vaddr) { int hc = VTOB_HASH_CODE(vaddr); vbp->next = mp->vtob[hc]; mp->vtob[hc] = vbp; ++mp->nump; } return vaddr; out_err: return NULL; } #ifdef SYM_MEM_FREE_UNUSED /* Free a memory cluster and associated resources for DMA */ static void ___free_dma_mem_cluster(m_pool_p mp, void *m) { m_vtob_p *vbpp, vbp; int hc = VTOB_HASH_CODE(m); vbpp = &mp->vtob[hc]; while (*vbpp && (*vbpp)->vaddr != m) vbpp = &(*vbpp)->next; if (*vbpp) { vbp = *vbpp; *vbpp = (*vbpp)->next; sym_m_free_dma_mem_cluster(mp, vbp); __sym_mfree(&mp0, vbp, sizeof(*vbp), "VTOB"); --mp->nump; } } #endif /* Fetch the memory pool for a given pool id (i.e. DMA constraints) */ static inline m_pool_p ___get_dma_pool(m_pool_ident_t dev_dmat) { m_pool_p mp; for (mp = mp0.next; mp && !sym_m_pool_match(mp->dev_dmat, dev_dmat); mp = mp->next); return mp; } /* Create a new memory DMAable pool (when fetch failed) */ static m_pool_p ___cre_dma_pool(m_pool_ident_t dev_dmat) { m_pool_p mp = __sym_calloc(&mp0, sizeof(*mp), "MPOOL"); if (mp) { mp->dev_dmat = dev_dmat; mp->get_mem_cluster = ___get_dma_mem_cluster; #ifdef SYM_MEM_FREE_UNUSED mp->free_mem_cluster = ___free_dma_mem_cluster; #endif mp->next = mp0.next; mp0.next = mp; return mp; } return NULL; } #ifdef SYM_MEM_FREE_UNUSED /* Destroy a DMAable memory pool (when got emptied) */ static void ___del_dma_pool(m_pool_p p) { m_pool_p *pp = &mp0.next; while (*pp && *pp != p) pp = &(*pp)->next; if (*pp) { *pp = (*pp)->next; __sym_mfree(&mp0, p, sizeof(*p), "MPOOL"); } } #endif /* This lock protects only the memory allocation/free. */ static DEFINE_SPINLOCK(sym53c8xx_lock); /* * Actual allocator for DMAable memory. */ void *__sym_calloc_dma(m_pool_ident_t dev_dmat, int size, char *name) { unsigned long flags; m_pool_p mp; void *m = NULL; spin_lock_irqsave(&sym53c8xx_lock, flags); mp = ___get_dma_pool(dev_dmat); if (!mp) mp = ___cre_dma_pool(dev_dmat); if (!mp) goto out; m = __sym_calloc(mp, size, name); #ifdef SYM_MEM_FREE_UNUSED if (!mp->nump) ___del_dma_pool(mp); #endif out: spin_unlock_irqrestore(&sym53c8xx_lock, flags); return m; } void __sym_mfree_dma(m_pool_ident_t dev_dmat, void *m, int size, char *name) { unsigned long flags; m_pool_p mp; spin_lock_irqsave(&sym53c8xx_lock, flags); mp = ___get_dma_pool(dev_dmat); if (!mp) goto out; __sym_mfree(mp, m, size, name); #ifdef SYM_MEM_FREE_UNUSED if (!mp->nump) ___del_dma_pool(mp); #endif out: spin_unlock_irqrestore(&sym53c8xx_lock, flags); } /* * Actual virtual to bus physical address translator * for 32 bit addressable DMAable memory. */ dma_addr_t __vtobus(m_pool_ident_t dev_dmat, void *m) { unsigned long flags; m_pool_p mp; int hc = VTOB_HASH_CODE(m); m_vtob_p vp = NULL; void *a = (void *)((unsigned long)m & ~SYM_MEM_CLUSTER_MASK); dma_addr_t b; spin_lock_irqsave(&sym53c8xx_lock, flags); mp = ___get_dma_pool(dev_dmat); if (mp) { vp = mp->vtob[hc]; while (vp && vp->vaddr != a) vp = vp->next; } if (!vp) panic("sym: VTOBUS FAILED!\n"); b = vp->baddr + (m - a); spin_unlock_irqrestore(&sym53c8xx_lock, flags); return b; }
gpl-2.0
arasilinux/arasievm-kernel
arch/arm/mach-at91/board-afeb-9260v1.c
158
5182
/* * linux/arch/arm/mach-at91/board-afeb-9260v1.c * * Copyright (C) 2005 SAN People * Copyright (C) 2006 Atmel * Copyright (C) 2008 Sergey Lapin * * A custom board designed as open hardware; PCBs and various information * is available at http://groups.google.com/group/arm9fpga-evolution-board/ * Subversion repository: svn://194.85.238.22/home/users/george/svn/arm9eb * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/types.h> #include <linux/gpio.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/spi/spi.h> #include <linux/clk.h> #include <linux/dma-mapping.h> #include <mach/hardware.h> #include <asm/setup.h> #include <asm/mach-types.h> #include <asm/irq.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach/irq.h> #include <mach/board.h> #include "generic.h" static void __init afeb9260_init_early(void) { /* Initialize processor: 18.432 MHz crystal */ at91_initialize(18432000); /* DBGU on ttyS0. (Rx & Tx only) */ at91_register_uart(0, 0, 0); /* USART0 on ttyS1. (Rx, Tx, CTS, RTS, DTR, DSR, DCD, RI) */ at91_register_uart(AT91SAM9260_ID_US0, 1, ATMEL_UART_CTS | ATMEL_UART_RTS | ATMEL_UART_DTR | ATMEL_UART_DSR | ATMEL_UART_DCD | ATMEL_UART_RI); /* USART1 on ttyS2. (Rx, Tx, RTS, CTS) */ at91_register_uart(AT91SAM9260_ID_US1, 2, ATMEL_UART_CTS | ATMEL_UART_RTS); /* set serial console to ttyS0 (ie, DBGU) */ at91_set_serial_console(0); } /* * USB Host port */ static struct at91_usbh_data __initdata afeb9260_usbh_data = { .ports = 1, }; /* * USB Device port */ static struct at91_udc_data __initdata afeb9260_udc_data = { .vbus_pin = AT91_PIN_PC5, .pullup_pin = 0, /* pull-up driven by UDC */ }; /* * SPI devices. */ static struct spi_board_info afeb9260_spi_devices[] = { { /* DataFlash chip */ .modalias = "mtd_dataflash", .chip_select = 1, .max_speed_hz = 15 * 1000 * 1000, .bus_num = 0, }, }; /* * MACB Ethernet device */ static struct at91_eth_data __initdata afeb9260_macb_data = { .phy_irq_pin = AT91_PIN_PA9, .is_rmii = 0, }; /* * NAND flash */ static struct mtd_partition __initdata afeb9260_nand_partition[] = { { .name = "bootloader", .offset = 0, .size = (640 * SZ_1K), }, { .name = "kernel", .offset = MTDPART_OFS_NXTBLK, .size = SZ_2M, }, { .name = "rootfs", .offset = MTDPART_OFS_NXTBLK, .size = MTDPART_SIZ_FULL, }, }; static struct atmel_nand_data __initdata afeb9260_nand_data = { .ale = 21, .cle = 22, .rdy_pin = AT91_PIN_PC13, .enable_pin = AT91_PIN_PC14, .bus_width_16 = 0, .parts = afeb9260_nand_partition, .num_parts = ARRAY_SIZE(afeb9260_nand_partition), }; /* * MCI (SD/MMC) */ static struct at91_mmc_data __initdata afeb9260_mmc_data = { .det_pin = AT91_PIN_PC9, .wp_pin = AT91_PIN_PC4, .slot_b = 1, .wire4 = 1, }; static struct i2c_board_info __initdata afeb9260_i2c_devices[] = { { I2C_BOARD_INFO("tlv320aic23", 0x1a), }, { I2C_BOARD_INFO("fm3130", 0x68), }, { I2C_BOARD_INFO("24c64", 0x50), }, }; /* * IDE (CF True IDE mode) */ static struct at91_cf_data afeb9260_cf_data = { .chipselect = 4, .irq_pin = AT91_PIN_PA6, .rst_pin = AT91_PIN_PA7, .flags = AT91_CF_TRUE_IDE, }; static void __init afeb9260_board_init(void) { /* Serial */ at91_add_device_serial(); /* USB Host */ at91_add_device_usbh(&afeb9260_usbh_data); /* USB Device */ at91_add_device_udc(&afeb9260_udc_data); /* SPI */ at91_add_device_spi(afeb9260_spi_devices, ARRAY_SIZE(afeb9260_spi_devices)); /* NAND */ at91_add_device_nand(&afeb9260_nand_data); /* Ethernet */ at91_add_device_eth(&afeb9260_macb_data); /* Standard function's pin assignments are not * appropriate for us and generic code provide * no API to configure these pins any other way */ at91_set_B_periph(AT91_PIN_PA10, 0); /* ETX2 */ at91_set_B_periph(AT91_PIN_PA11, 0); /* ETX3 */ /* MMC */ at91_add_device_mmc(0, &afeb9260_mmc_data); /* I2C */ at91_add_device_i2c(afeb9260_i2c_devices, ARRAY_SIZE(afeb9260_i2c_devices)); /* Audio */ at91_add_device_ssc(AT91SAM9260_ID_SSC, ATMEL_SSC_TX); /* IDE */ at91_add_device_cf(&afeb9260_cf_data); } MACHINE_START(AFEB9260, "Custom afeb9260 board") /* Maintainer: Sergey Lapin <slapin@ossfans.org> */ .timer = &at91sam926x_timer, .map_io = at91_map_io, .init_early = afeb9260_init_early, .init_irq = at91_init_irq_default, .init_machine = afeb9260_board_init, MACHINE_END
gpl-2.0
dwd31415/GameSharingSample
GameSharingSample/cocos2d/plugin/protocols/platform/android/PluginFactory.cpp
158
4368
/**************************************************************************** Copyright (c) 2012-2013 cocos2d-x.org http://www.cocos2d-x.org Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ****************************************************************************/ #include "PluginFactory.h" #include "PluginUtils.h" #include "PluginJniHelper.h" #include "ProtocolAds.h" #include "ProtocolAnalytics.h" #include "ProtocolIAP.h" #include "ProtocolShare.h" #include "ProtocolUser.h" #include "ProtocolSocial.h" namespace cocos2d { namespace plugin { enum { kPluginAds = 1, kPluginAnalytics, kPluginIAP, kPluginShare, kPluginUser, kPluginSocial, }; #define ANDROID_PLUGIN_PACKAGE_PREFIX "org/cocos2dx/plugin/" static PluginFactory* s_pFactory = NULL; PluginFactory::PluginFactory() { } PluginFactory::~PluginFactory() { } PluginFactory* PluginFactory::getInstance() { if (NULL == s_pFactory) { s_pFactory = new PluginFactory(); } return s_pFactory; } void PluginFactory::purgeFactory() { if (NULL != s_pFactory) { delete s_pFactory; s_pFactory = NULL; } } /** create the plugin by name */ PluginProtocol* PluginFactory::createPlugin(const char* name) { PluginProtocol* pRet = NULL; do { if (name == NULL || strlen(name) == 0) break; std::string jClassName = ANDROID_PLUGIN_PACKAGE_PREFIX; jClassName.append(name); PluginUtils::outputLog("PluginFactory", "Java class name of plugin %s is : %s", name, jClassName.c_str()); PluginJniMethodInfo t; if (! PluginJniHelper::getStaticMethodInfo(t , "org/cocos2dx/plugin/PluginWrapper" , "initPlugin" , "(Ljava/lang/String;)Ljava/lang/Object;")) { PluginUtils::outputLog("PluginFactory", "Can't find method initPlugin in class org.cocos2dx.plugin.PluginWrapper"); break; } jstring clsName = t.env->NewStringUTF(jClassName.c_str()); jobject jObj = t.env->CallStaticObjectMethod(t.classID, t.methodID, clsName); t.env->DeleteLocalRef(clsName); t.env->DeleteLocalRef(t.classID); if (jObj == NULL) { PluginUtils::outputLog("PluginFactory", "Can't find java class %s", jClassName.c_str()); break; } if (! PluginJniHelper::getStaticMethodInfo(t , "org/cocos2dx/plugin/PluginWrapper" , "getPluginType" , "(Ljava/lang/Object;)I")) { PluginUtils::outputLog("PluginFactory", "Can't find method getPluginType in class org.cocos2dx.plugin.PluginWrapper"); break; } int curType = t.env->CallStaticIntMethod(t.classID, t.methodID, jObj); t.env->DeleteLocalRef(t.classID); PluginUtils::outputLog("PluginFactory", "The type of plugin %s is : %d", name, curType); switch (curType) { case kPluginAds: pRet = new ProtocolAds(); break; case kPluginAnalytics: pRet = new ProtocolAnalytics(); break; case kPluginIAP: pRet = new ProtocolIAP(); break; case kPluginShare: pRet = new ProtocolShare(); break; case kPluginUser: pRet = new ProtocolUser(); break; case kPluginSocial: pRet = new ProtocolSocial(); break; default: break; } if (pRet != NULL) { pRet->setPluginName(name); PluginUtils::initJavaPlugin(pRet, jObj, jClassName.c_str()); } } while(0); return pRet; } }} //namespace cocos2d { namespace plugin {
gpl-2.0
mtitinger/linux-pm
fs/jfs/file.c
158
4545
/* * Copyright (C) International Business Machines Corp., 2000-2002 * Portions Copyright (C) Christoph Hellwig, 2001-2002 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/mm.h> #include <linux/fs.h> #include <linux/posix_acl.h> #include <linux/quotaops.h> #include "jfs_incore.h" #include "jfs_inode.h" #include "jfs_dmap.h" #include "jfs_txnmgr.h" #include "jfs_xattr.h" #include "jfs_acl.h" #include "jfs_debug.h" int jfs_fsync(struct file *file, loff_t start, loff_t end, int datasync) { struct inode *inode = file->f_mapping->host; int rc = 0; rc = filemap_write_and_wait_range(inode->i_mapping, start, end); if (rc) return rc; mutex_lock(&inode->i_mutex); if (!(inode->i_state & I_DIRTY_ALL) || (datasync && !(inode->i_state & I_DIRTY_DATASYNC))) { /* Make sure committed changes hit the disk */ jfs_flush_journal(JFS_SBI(inode->i_sb)->log, 1); mutex_unlock(&inode->i_mutex); return rc; } rc |= jfs_commit_inode(inode, 1); mutex_unlock(&inode->i_mutex); return rc ? -EIO : 0; } static int jfs_open(struct inode *inode, struct file *file) { int rc; if ((rc = dquot_file_open(inode, file))) return rc; /* * We attempt to allow only one "active" file open per aggregate * group. Otherwise, appending to files in parallel can cause * fragmentation within the files. * * If the file is empty, it was probably just created and going * to be written to. If it has a size, we'll hold off until the * file is actually grown. */ if (S_ISREG(inode->i_mode) && file->f_mode & FMODE_WRITE && (inode->i_size == 0)) { struct jfs_inode_info *ji = JFS_IP(inode); spin_lock_irq(&ji->ag_lock); if (ji->active_ag == -1) { struct jfs_sb_info *jfs_sb = JFS_SBI(inode->i_sb); ji->active_ag = BLKTOAG(addressPXD(&ji->ixpxd), jfs_sb); atomic_inc(&jfs_sb->bmap->db_active[ji->active_ag]); } spin_unlock_irq(&ji->ag_lock); } return 0; } static int jfs_release(struct inode *inode, struct file *file) { struct jfs_inode_info *ji = JFS_IP(inode); spin_lock_irq(&ji->ag_lock); if (ji->active_ag != -1) { struct bmap *bmap = JFS_SBI(inode->i_sb)->bmap; atomic_dec(&bmap->db_active[ji->active_ag]); ji->active_ag = -1; } spin_unlock_irq(&ji->ag_lock); return 0; } int jfs_setattr(struct dentry *dentry, struct iattr *iattr) { struct inode *inode = d_inode(dentry); int rc; rc = inode_change_ok(inode, iattr); if (rc) return rc; if (is_quota_modification(inode, iattr)) dquot_initialize(inode); if ((iattr->ia_valid & ATTR_UID && !uid_eq(iattr->ia_uid, inode->i_uid)) || (iattr->ia_valid & ATTR_GID && !gid_eq(iattr->ia_gid, inode->i_gid))) { rc = dquot_transfer(inode, iattr); if (rc) return rc; } if ((iattr->ia_valid & ATTR_SIZE) && iattr->ia_size != i_size_read(inode)) { inode_dio_wait(inode); rc = inode_newsize_ok(inode, iattr->ia_size); if (rc) return rc; truncate_setsize(inode, iattr->ia_size); jfs_truncate(inode); } setattr_copy(inode, iattr); mark_inode_dirty(inode); if (iattr->ia_valid & ATTR_MODE) rc = posix_acl_chmod(inode, inode->i_mode); return rc; } const struct inode_operations jfs_file_inode_operations = { .setxattr = jfs_setxattr, .getxattr = jfs_getxattr, .listxattr = jfs_listxattr, .removexattr = jfs_removexattr, .setattr = jfs_setattr, #ifdef CONFIG_JFS_POSIX_ACL .get_acl = jfs_get_acl, .set_acl = jfs_set_acl, #endif }; const struct file_operations jfs_file_operations = { .open = jfs_open, .llseek = generic_file_llseek, .read_iter = generic_file_read_iter, .write_iter = generic_file_write_iter, .mmap = generic_file_mmap, .splice_read = generic_file_splice_read, .splice_write = iter_file_splice_write, .fsync = jfs_fsync, .release = jfs_release, .unlocked_ioctl = jfs_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = jfs_compat_ioctl, #endif };
gpl-2.0
infectedmushi/kernel-sony-copyleft
drivers/tty/serial/pxa.c
414
23193
/* * Based on drivers/serial/8250.c by Russell King. * * Author: Nicolas Pitre * Created: Feb 20, 2003 * Copyright: (C) 2003 Monta Vista Software, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Note 1: This driver is made separate from the already too overloaded * 8250.c because it needs some kirks of its own and that'll make it * easier to add DMA support. * * Note 2: I'm too sick of device allocation policies for serial ports. * If someone else wants to request an "official" allocation of major/minor * for this driver please be my guest. And don't forget that new hardware * to come from Intel might have more than 3 or 4 of those UARTs. Let's * hope for a better port registration and dynamic device allocation scheme * with the serial core maintainer satisfaction to appear soon. */ #if defined(CONFIG_SERIAL_PXA_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) #define SUPPORT_SYSRQ #endif #include <linux/module.h> #include <linux/ioport.h> #include <linux/init.h> #include <linux/console.h> #include <linux/sysrq.h> #include <linux/serial_reg.h> #include <linux/circ_buf.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/serial_core.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/slab.h> #define PXA_NAME_LEN 8 struct uart_pxa_port { struct uart_port port; unsigned char ier; unsigned char lcr; unsigned char mcr; unsigned int lsr_break_flag; struct clk *clk; char name[PXA_NAME_LEN]; }; static inline unsigned int serial_in(struct uart_pxa_port *up, int offset) { offset <<= 2; return readl(up->port.membase + offset); } static inline void serial_out(struct uart_pxa_port *up, int offset, int value) { offset <<= 2; writel(value, up->port.membase + offset); } static void serial_pxa_enable_ms(struct uart_port *port) { struct uart_pxa_port *up = (struct uart_pxa_port *)port; up->ier |= UART_IER_MSI; serial_out(up, UART_IER, up->ier); } static void serial_pxa_stop_tx(struct uart_port *port) { struct uart_pxa_port *up = (struct uart_pxa_port *)port; if (up->ier & UART_IER_THRI) { up->ier &= ~UART_IER_THRI; serial_out(up, UART_IER, up->ier); } } static void serial_pxa_stop_rx(struct uart_port *port) { struct uart_pxa_port *up = (struct uart_pxa_port *)port; up->ier &= ~UART_IER_RLSI; up->port.read_status_mask &= ~UART_LSR_DR; serial_out(up, UART_IER, up->ier); } static inline void receive_chars(struct uart_pxa_port *up, int *status) { unsigned int ch, flag; int max_count = 256; do { /* work around Errata #20 according to * Intel(R) PXA27x Processor Family * Specification Update (May 2005) * * Step 2 * Disable the Reciever Time Out Interrupt via IER[RTOEI] */ up->ier &= ~UART_IER_RTOIE; serial_out(up, UART_IER, up->ier); ch = serial_in(up, UART_RX); flag = TTY_NORMAL; up->port.icount.rx++; if (unlikely(*status & (UART_LSR_BI | UART_LSR_PE | UART_LSR_FE | UART_LSR_OE))) { /* * For statistics only */ if (*status & UART_LSR_BI) { *status &= ~(UART_LSR_FE | UART_LSR_PE); up->port.icount.brk++; /* * We do the SysRQ and SAK checking * here because otherwise the break * may get masked by ignore_status_mask * or read_status_mask. */ if (uart_handle_break(&up->port)) goto ignore_char; } else if (*status & UART_LSR_PE) up->port.icount.parity++; else if (*status & UART_LSR_FE) up->port.icount.frame++; if (*status & UART_LSR_OE) up->port.icount.overrun++; /* * Mask off conditions which should be ignored. */ *status &= up->port.read_status_mask; #ifdef CONFIG_SERIAL_PXA_CONSOLE if (up->port.line == up->port.cons->index) { /* Recover the break flag from console xmit */ *status |= up->lsr_break_flag; up->lsr_break_flag = 0; } #endif if (*status & UART_LSR_BI) { flag = TTY_BREAK; } else if (*status & UART_LSR_PE) flag = TTY_PARITY; else if (*status & UART_LSR_FE) flag = TTY_FRAME; } if (uart_handle_sysrq_char(&up->port, ch)) goto ignore_char; uart_insert_char(&up->port, *status, UART_LSR_OE, ch, flag); ignore_char: *status = serial_in(up, UART_LSR); } while ((*status & UART_LSR_DR) && (max_count-- > 0)); tty_flip_buffer_push(&up->port.state->port); /* work around Errata #20 according to * Intel(R) PXA27x Processor Family * Specification Update (May 2005) * * Step 6: * No more data in FIFO: Re-enable RTO interrupt via IER[RTOIE] */ up->ier |= UART_IER_RTOIE; serial_out(up, UART_IER, up->ier); } static void transmit_chars(struct uart_pxa_port *up) { struct circ_buf *xmit = &up->port.state->xmit; int count; if (up->port.x_char) { serial_out(up, UART_TX, up->port.x_char); up->port.icount.tx++; up->port.x_char = 0; return; } if (uart_circ_empty(xmit) || uart_tx_stopped(&up->port)) { serial_pxa_stop_tx(&up->port); return; } count = up->port.fifosize / 2; do { serial_out(up, UART_TX, xmit->buf[xmit->tail]); xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); up->port.icount.tx++; if (uart_circ_empty(xmit)) break; } while (--count > 0); if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(&up->port); if (uart_circ_empty(xmit)) serial_pxa_stop_tx(&up->port); } static void serial_pxa_start_tx(struct uart_port *port) { struct uart_pxa_port *up = (struct uart_pxa_port *)port; if (!(up->ier & UART_IER_THRI)) { up->ier |= UART_IER_THRI; serial_out(up, UART_IER, up->ier); } } static inline void check_modem_status(struct uart_pxa_port *up) { int status; status = serial_in(up, UART_MSR); if ((status & UART_MSR_ANY_DELTA) == 0) return; if (status & UART_MSR_TERI) up->port.icount.rng++; if (status & UART_MSR_DDSR) up->port.icount.dsr++; if (status & UART_MSR_DDCD) uart_handle_dcd_change(&up->port, status & UART_MSR_DCD); if (status & UART_MSR_DCTS) uart_handle_cts_change(&up->port, status & UART_MSR_CTS); wake_up_interruptible(&up->port.state->port.delta_msr_wait); } /* * This handles the interrupt from one port. */ static inline irqreturn_t serial_pxa_irq(int irq, void *dev_id) { struct uart_pxa_port *up = dev_id; unsigned int iir, lsr; iir = serial_in(up, UART_IIR); if (iir & UART_IIR_NO_INT) return IRQ_NONE; lsr = serial_in(up, UART_LSR); if (lsr & UART_LSR_DR) receive_chars(up, &lsr); check_modem_status(up); if (lsr & UART_LSR_THRE) transmit_chars(up); return IRQ_HANDLED; } static unsigned int serial_pxa_tx_empty(struct uart_port *port) { struct uart_pxa_port *up = (struct uart_pxa_port *)port; unsigned long flags; unsigned int ret; spin_lock_irqsave(&up->port.lock, flags); ret = serial_in(up, UART_LSR) & UART_LSR_TEMT ? TIOCSER_TEMT : 0; spin_unlock_irqrestore(&up->port.lock, flags); return ret; } static unsigned int serial_pxa_get_mctrl(struct uart_port *port) { struct uart_pxa_port *up = (struct uart_pxa_port *)port; unsigned char status; unsigned int ret; status = serial_in(up, UART_MSR); ret = 0; if (status & UART_MSR_DCD) ret |= TIOCM_CAR; if (status & UART_MSR_RI) ret |= TIOCM_RNG; if (status & UART_MSR_DSR) ret |= TIOCM_DSR; if (status & UART_MSR_CTS) ret |= TIOCM_CTS; return ret; } static void serial_pxa_set_mctrl(struct uart_port *port, unsigned int mctrl) { struct uart_pxa_port *up = (struct uart_pxa_port *)port; unsigned char mcr = 0; if (mctrl & TIOCM_RTS) mcr |= UART_MCR_RTS; if (mctrl & TIOCM_DTR) mcr |= UART_MCR_DTR; if (mctrl & TIOCM_OUT1) mcr |= UART_MCR_OUT1; if (mctrl & TIOCM_OUT2) mcr |= UART_MCR_OUT2; if (mctrl & TIOCM_LOOP) mcr |= UART_MCR_LOOP; mcr |= up->mcr; serial_out(up, UART_MCR, mcr); } static void serial_pxa_break_ctl(struct uart_port *port, int break_state) { struct uart_pxa_port *up = (struct uart_pxa_port *)port; unsigned long flags; spin_lock_irqsave(&up->port.lock, flags); if (break_state == -1) up->lcr |= UART_LCR_SBC; else up->lcr &= ~UART_LCR_SBC; serial_out(up, UART_LCR, up->lcr); spin_unlock_irqrestore(&up->port.lock, flags); } static int serial_pxa_startup(struct uart_port *port) { struct uart_pxa_port *up = (struct uart_pxa_port *)port; unsigned long flags; int retval; if (port->line == 3) /* HWUART */ up->mcr |= UART_MCR_AFE; else up->mcr = 0; up->port.uartclk = clk_get_rate(up->clk); /* * Allocate the IRQ */ retval = request_irq(up->port.irq, serial_pxa_irq, 0, up->name, up); if (retval) return retval; /* * Clear the FIFO buffers and disable them. * (they will be reenabled in set_termios()) */ serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO); serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT); serial_out(up, UART_FCR, 0); /* * Clear the interrupt registers. */ (void) serial_in(up, UART_LSR); (void) serial_in(up, UART_RX); (void) serial_in(up, UART_IIR); (void) serial_in(up, UART_MSR); /* * Now, initialize the UART */ serial_out(up, UART_LCR, UART_LCR_WLEN8); spin_lock_irqsave(&up->port.lock, flags); up->port.mctrl |= TIOCM_OUT2; serial_pxa_set_mctrl(&up->port, up->port.mctrl); spin_unlock_irqrestore(&up->port.lock, flags); /* * Finally, enable interrupts. Note: Modem status interrupts * are set via set_termios(), which will be occurring imminently * anyway, so we don't enable them here. */ up->ier = UART_IER_RLSI | UART_IER_RDI | UART_IER_RTOIE | UART_IER_UUE; serial_out(up, UART_IER, up->ier); /* * And clear the interrupt registers again for luck. */ (void) serial_in(up, UART_LSR); (void) serial_in(up, UART_RX); (void) serial_in(up, UART_IIR); (void) serial_in(up, UART_MSR); return 0; } static void serial_pxa_shutdown(struct uart_port *port) { struct uart_pxa_port *up = (struct uart_pxa_port *)port; unsigned long flags; free_irq(up->port.irq, up); /* * Disable interrupts from this port */ up->ier = 0; serial_out(up, UART_IER, 0); spin_lock_irqsave(&up->port.lock, flags); up->port.mctrl &= ~TIOCM_OUT2; serial_pxa_set_mctrl(&up->port, up->port.mctrl); spin_unlock_irqrestore(&up->port.lock, flags); /* * Disable break condition and FIFOs */ serial_out(up, UART_LCR, serial_in(up, UART_LCR) & ~UART_LCR_SBC); serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT); serial_out(up, UART_FCR, 0); } static void serial_pxa_set_termios(struct uart_port *port, struct ktermios *termios, struct ktermios *old) { struct uart_pxa_port *up = (struct uart_pxa_port *)port; unsigned char cval, fcr = 0; unsigned long flags; unsigned int baud, quot; unsigned int dll; switch (termios->c_cflag & CSIZE) { case CS5: cval = UART_LCR_WLEN5; break; case CS6: cval = UART_LCR_WLEN6; break; case CS7: cval = UART_LCR_WLEN7; break; default: case CS8: cval = UART_LCR_WLEN8; break; } if (termios->c_cflag & CSTOPB) cval |= UART_LCR_STOP; if (termios->c_cflag & PARENB) cval |= UART_LCR_PARITY; if (!(termios->c_cflag & PARODD)) cval |= UART_LCR_EPAR; /* * Ask the core to calculate the divisor for us. */ baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16); quot = uart_get_divisor(port, baud); if ((up->port.uartclk / quot) < (2400 * 16)) fcr = UART_FCR_ENABLE_FIFO | UART_FCR_PXAR1; else if ((up->port.uartclk / quot) < (230400 * 16)) fcr = UART_FCR_ENABLE_FIFO | UART_FCR_PXAR8; else fcr = UART_FCR_ENABLE_FIFO | UART_FCR_PXAR32; /* * Ok, we're now changing the port state. Do it with * interrupts disabled. */ spin_lock_irqsave(&up->port.lock, flags); /* * Ensure the port will be enabled. * This is required especially for serial console. */ up->ier |= UART_IER_UUE; /* * Update the per-port timeout. */ uart_update_timeout(port, termios->c_cflag, baud); up->port.read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR; if (termios->c_iflag & INPCK) up->port.read_status_mask |= UART_LSR_FE | UART_LSR_PE; if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK)) up->port.read_status_mask |= UART_LSR_BI; /* * Characters to ignore */ up->port.ignore_status_mask = 0; if (termios->c_iflag & IGNPAR) up->port.ignore_status_mask |= UART_LSR_PE | UART_LSR_FE; if (termios->c_iflag & IGNBRK) { up->port.ignore_status_mask |= UART_LSR_BI; /* * If we're ignoring parity and break indicators, * ignore overruns too (for real raw support). */ if (termios->c_iflag & IGNPAR) up->port.ignore_status_mask |= UART_LSR_OE; } /* * ignore all characters if CREAD is not set */ if ((termios->c_cflag & CREAD) == 0) up->port.ignore_status_mask |= UART_LSR_DR; /* * CTS flow control flag and modem status interrupts */ up->ier &= ~UART_IER_MSI; if (UART_ENABLE_MS(&up->port, termios->c_cflag)) up->ier |= UART_IER_MSI; serial_out(up, UART_IER, up->ier); if (termios->c_cflag & CRTSCTS) up->mcr |= UART_MCR_AFE; else up->mcr &= ~UART_MCR_AFE; serial_out(up, UART_LCR, cval | UART_LCR_DLAB); /* set DLAB */ serial_out(up, UART_DLL, quot & 0xff); /* LS of divisor */ /* * work around Errata #75 according to Intel(R) PXA27x Processor Family * Specification Update (Nov 2005) */ dll = serial_in(up, UART_DLL); WARN_ON(dll != (quot & 0xff)); serial_out(up, UART_DLM, quot >> 8); /* MS of divisor */ serial_out(up, UART_LCR, cval); /* reset DLAB */ up->lcr = cval; /* Save LCR */ serial_pxa_set_mctrl(&up->port, up->port.mctrl); serial_out(up, UART_FCR, fcr); spin_unlock_irqrestore(&up->port.lock, flags); } static void serial_pxa_pm(struct uart_port *port, unsigned int state, unsigned int oldstate) { struct uart_pxa_port *up = (struct uart_pxa_port *)port; if (!state) clk_prepare_enable(up->clk); else clk_disable_unprepare(up->clk); } static void serial_pxa_release_port(struct uart_port *port) { } static int serial_pxa_request_port(struct uart_port *port) { return 0; } static void serial_pxa_config_port(struct uart_port *port, int flags) { struct uart_pxa_port *up = (struct uart_pxa_port *)port; up->port.type = PORT_PXA; } static int serial_pxa_verify_port(struct uart_port *port, struct serial_struct *ser) { /* we don't want the core code to modify any port params */ return -EINVAL; } static const char * serial_pxa_type(struct uart_port *port) { struct uart_pxa_port *up = (struct uart_pxa_port *)port; return up->name; } static struct uart_pxa_port *serial_pxa_ports[4]; static struct uart_driver serial_pxa_reg; #ifdef CONFIG_SERIAL_PXA_CONSOLE #define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE) /* * Wait for transmitter & holding register to empty */ static inline void wait_for_xmitr(struct uart_pxa_port *up) { unsigned int status, tmout = 10000; /* Wait up to 10ms for the character(s) to be sent. */ do { status = serial_in(up, UART_LSR); if (status & UART_LSR_BI) up->lsr_break_flag = UART_LSR_BI; if (--tmout == 0) break; udelay(1); } while ((status & BOTH_EMPTY) != BOTH_EMPTY); /* Wait up to 1s for flow control if necessary */ if (up->port.flags & UPF_CONS_FLOW) { tmout = 1000000; while (--tmout && ((serial_in(up, UART_MSR) & UART_MSR_CTS) == 0)) udelay(1); } } static void serial_pxa_console_putchar(struct uart_port *port, int ch) { struct uart_pxa_port *up = (struct uart_pxa_port *)port; wait_for_xmitr(up); serial_out(up, UART_TX, ch); } /* * Print a string to the serial port trying not to disturb * any possible real use of the port... * * The console_lock must be held when we get here. */ static void serial_pxa_console_write(struct console *co, const char *s, unsigned int count) { struct uart_pxa_port *up = serial_pxa_ports[co->index]; unsigned int ier; unsigned long flags; int locked = 1; clk_enable(up->clk); local_irq_save(flags); if (up->port.sysrq) locked = 0; else if (oops_in_progress) locked = spin_trylock(&up->port.lock); else spin_lock(&up->port.lock); /* * First save the IER then disable the interrupts */ ier = serial_in(up, UART_IER); serial_out(up, UART_IER, UART_IER_UUE); uart_console_write(&up->port, s, count, serial_pxa_console_putchar); /* * Finally, wait for transmitter to become empty * and restore the IER */ wait_for_xmitr(up); serial_out(up, UART_IER, ier); if (locked) spin_unlock(&up->port.lock); local_irq_restore(flags); clk_disable(up->clk); } #ifdef CONFIG_CONSOLE_POLL /* * Console polling routines for writing and reading from the uart while * in an interrupt or debug context. */ static int serial_pxa_get_poll_char(struct uart_port *port) { struct uart_pxa_port *up = (struct uart_pxa_port *)port; unsigned char lsr = serial_in(up, UART_LSR); while (!(lsr & UART_LSR_DR)) lsr = serial_in(up, UART_LSR); return serial_in(up, UART_RX); } static void serial_pxa_put_poll_char(struct uart_port *port, unsigned char c) { unsigned int ier; struct uart_pxa_port *up = (struct uart_pxa_port *)port; /* * First save the IER then disable the interrupts */ ier = serial_in(up, UART_IER); serial_out(up, UART_IER, UART_IER_UUE); wait_for_xmitr(up); /* * Send the character out. */ serial_out(up, UART_TX, c); /* * Finally, wait for transmitter to become empty * and restore the IER */ wait_for_xmitr(up); serial_out(up, UART_IER, ier); } #endif /* CONFIG_CONSOLE_POLL */ static int __init serial_pxa_console_setup(struct console *co, char *options) { struct uart_pxa_port *up; int baud = 9600; int bits = 8; int parity = 'n'; int flow = 'n'; if (co->index == -1 || co->index >= serial_pxa_reg.nr) co->index = 0; up = serial_pxa_ports[co->index]; if (!up) return -ENODEV; if (options) uart_parse_options(options, &baud, &parity, &bits, &flow); return uart_set_options(&up->port, co, baud, parity, bits, flow); } static struct console serial_pxa_console = { .name = "ttyS", .write = serial_pxa_console_write, .device = uart_console_device, .setup = serial_pxa_console_setup, .flags = CON_PRINTBUFFER, .index = -1, .data = &serial_pxa_reg, }; #define PXA_CONSOLE &serial_pxa_console #else #define PXA_CONSOLE NULL #endif static struct uart_ops serial_pxa_pops = { .tx_empty = serial_pxa_tx_empty, .set_mctrl = serial_pxa_set_mctrl, .get_mctrl = serial_pxa_get_mctrl, .stop_tx = serial_pxa_stop_tx, .start_tx = serial_pxa_start_tx, .stop_rx = serial_pxa_stop_rx, .enable_ms = serial_pxa_enable_ms, .break_ctl = serial_pxa_break_ctl, .startup = serial_pxa_startup, .shutdown = serial_pxa_shutdown, .set_termios = serial_pxa_set_termios, .pm = serial_pxa_pm, .type = serial_pxa_type, .release_port = serial_pxa_release_port, .request_port = serial_pxa_request_port, .config_port = serial_pxa_config_port, .verify_port = serial_pxa_verify_port, #if defined(CONFIG_CONSOLE_POLL) && defined(CONFIG_SERIAL_PXA_CONSOLE) .poll_get_char = serial_pxa_get_poll_char, .poll_put_char = serial_pxa_put_poll_char, #endif }; static struct uart_driver serial_pxa_reg = { .owner = THIS_MODULE, .driver_name = "PXA serial", .dev_name = "ttyS", .major = TTY_MAJOR, .minor = 64, .nr = 4, .cons = PXA_CONSOLE, }; #ifdef CONFIG_PM static int serial_pxa_suspend(struct device *dev) { struct uart_pxa_port *sport = dev_get_drvdata(dev); if (sport) uart_suspend_port(&serial_pxa_reg, &sport->port); return 0; } static int serial_pxa_resume(struct device *dev) { struct uart_pxa_port *sport = dev_get_drvdata(dev); if (sport) uart_resume_port(&serial_pxa_reg, &sport->port); return 0; } static const struct dev_pm_ops serial_pxa_pm_ops = { .suspend = serial_pxa_suspend, .resume = serial_pxa_resume, }; #endif static struct of_device_id serial_pxa_dt_ids[] = { { .compatible = "mrvl,pxa-uart", }, { .compatible = "mrvl,mmp-uart", }, {} }; MODULE_DEVICE_TABLE(of, serial_pxa_dt_ids); static int serial_pxa_probe_dt(struct platform_device *pdev, struct uart_pxa_port *sport) { struct device_node *np = pdev->dev.of_node; int ret; if (!np) return 1; ret = of_alias_get_id(np, "serial"); if (ret < 0) { dev_err(&pdev->dev, "failed to get alias id, errno %d\n", ret); return ret; } sport->port.line = ret; return 0; } static int serial_pxa_probe(struct platform_device *dev) { struct uart_pxa_port *sport; struct resource *mmres, *irqres; int ret; mmres = platform_get_resource(dev, IORESOURCE_MEM, 0); irqres = platform_get_resource(dev, IORESOURCE_IRQ, 0); if (!mmres || !irqres) return -ENODEV; sport = kzalloc(sizeof(struct uart_pxa_port), GFP_KERNEL); if (!sport) return -ENOMEM; sport->clk = clk_get(&dev->dev, NULL); if (IS_ERR(sport->clk)) { ret = PTR_ERR(sport->clk); goto err_free; } ret = clk_prepare(sport->clk); if (ret) { clk_put(sport->clk); goto err_free; } sport->port.type = PORT_PXA; sport->port.iotype = UPIO_MEM; sport->port.mapbase = mmres->start; sport->port.irq = irqres->start; sport->port.fifosize = 64; sport->port.ops = &serial_pxa_pops; sport->port.dev = &dev->dev; sport->port.flags = UPF_IOREMAP | UPF_BOOT_AUTOCONF; sport->port.uartclk = clk_get_rate(sport->clk); ret = serial_pxa_probe_dt(dev, sport); if (ret > 0) sport->port.line = dev->id; else if (ret < 0) goto err_clk; snprintf(sport->name, PXA_NAME_LEN - 1, "UART%d", sport->port.line + 1); sport->port.membase = ioremap(mmres->start, resource_size(mmres)); if (!sport->port.membase) { ret = -ENOMEM; goto err_clk; } serial_pxa_ports[sport->port.line] = sport; uart_add_one_port(&serial_pxa_reg, &sport->port); platform_set_drvdata(dev, sport); return 0; err_clk: clk_unprepare(sport->clk); clk_put(sport->clk); err_free: kfree(sport); return ret; } static int serial_pxa_remove(struct platform_device *dev) { struct uart_pxa_port *sport = platform_get_drvdata(dev); uart_remove_one_port(&serial_pxa_reg, &sport->port); clk_unprepare(sport->clk); clk_put(sport->clk); kfree(sport); return 0; } static struct platform_driver serial_pxa_driver = { .probe = serial_pxa_probe, .remove = serial_pxa_remove, .driver = { .name = "pxa2xx-uart", .owner = THIS_MODULE, #ifdef CONFIG_PM .pm = &serial_pxa_pm_ops, #endif .of_match_table = serial_pxa_dt_ids, }, }; static int __init serial_pxa_init(void) { int ret; ret = uart_register_driver(&serial_pxa_reg); if (ret != 0) return ret; ret = platform_driver_register(&serial_pxa_driver); if (ret != 0) uart_unregister_driver(&serial_pxa_reg); return ret; } static void __exit serial_pxa_exit(void) { platform_driver_unregister(&serial_pxa_driver); uart_unregister_driver(&serial_pxa_reg); } module_init(serial_pxa_init); module_exit(serial_pxa_exit); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:pxa2xx-uart");
gpl-2.0
Nold360/GC-Linux-Kernel-2.6.32
mm/rmap.c
414
35494
/* * mm/rmap.c - physical to virtual reverse mappings * * Copyright 2001, Rik van Riel <riel@conectiva.com.br> * Released under the General Public License (GPL). * * Simple, low overhead reverse mapping scheme. * Please try to keep this thing as modular as possible. * * Provides methods for unmapping each kind of mapped page: * the anon methods track anonymous pages, and * the file methods track pages belonging to an inode. * * Original design by Rik van Riel <riel@conectiva.com.br> 2001 * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004 * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004 * Contributions by Hugh Dickins 2003, 2004 */ /* * Lock ordering in mm: * * inode->i_mutex (while writing or truncating, not reading or faulting) * inode->i_alloc_sem (vmtruncate_range) * mm->mmap_sem * page->flags PG_locked (lock_page) * mapping->i_mmap_lock * anon_vma->lock * mm->page_table_lock or pte_lock * zone->lru_lock (in mark_page_accessed, isolate_lru_page) * swap_lock (in swap_duplicate, swap_info_get) * mmlist_lock (in mmput, drain_mmlist and others) * mapping->private_lock (in __set_page_dirty_buffers) * inode_lock (in set_page_dirty's __mark_inode_dirty) * sb_lock (within inode_lock in fs/fs-writeback.c) * mapping->tree_lock (widely used, in set_page_dirty, * in arch-dependent flush_dcache_mmap_lock, * within inode_lock in __sync_single_inode) * * (code doesn't rely on that order so it could be switched around) * ->tasklist_lock * anon_vma->lock (memory_failure, collect_procs_anon) * pte map lock */ #include <linux/mm.h> #include <linux/pagemap.h> #include <linux/swap.h> #include <linux/swapops.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/rmap.h> #include <linux/rcupdate.h> #include <linux/module.h> #include <linux/memcontrol.h> #include <linux/mmu_notifier.h> #include <linux/migrate.h> #include <asm/tlbflush.h> #include "internal.h" static struct kmem_cache *anon_vma_cachep; static inline struct anon_vma *anon_vma_alloc(void) { return kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL); } static inline void anon_vma_free(struct anon_vma *anon_vma) { kmem_cache_free(anon_vma_cachep, anon_vma); } /** * anon_vma_prepare - attach an anon_vma to a memory region * @vma: the memory region in question * * This makes sure the memory mapping described by 'vma' has * an 'anon_vma' attached to it, so that we can associate the * anonymous pages mapped into it with that anon_vma. * * The common case will be that we already have one, but if * if not we either need to find an adjacent mapping that we * can re-use the anon_vma from (very common when the only * reason for splitting a vma has been mprotect()), or we * allocate a new one. * * Anon-vma allocations are very subtle, because we may have * optimistically looked up an anon_vma in page_lock_anon_vma() * and that may actually touch the spinlock even in the newly * allocated vma (it depends on RCU to make sure that the * anon_vma isn't actually destroyed). * * As a result, we need to do proper anon_vma locking even * for the new allocation. At the same time, we do not want * to do any locking for the common case of already having * an anon_vma. * * This must be called with the mmap_sem held for reading. */ int anon_vma_prepare(struct vm_area_struct *vma) { struct anon_vma *anon_vma = vma->anon_vma; might_sleep(); if (unlikely(!anon_vma)) { struct mm_struct *mm = vma->vm_mm; struct anon_vma *allocated; anon_vma = find_mergeable_anon_vma(vma); allocated = NULL; if (!anon_vma) { anon_vma = anon_vma_alloc(); if (unlikely(!anon_vma)) return -ENOMEM; allocated = anon_vma; } spin_lock(&anon_vma->lock); /* page_table_lock to protect against threads */ spin_lock(&mm->page_table_lock); if (likely(!vma->anon_vma)) { vma->anon_vma = anon_vma; list_add_tail(&vma->anon_vma_node, &anon_vma->head); allocated = NULL; } spin_unlock(&mm->page_table_lock); spin_unlock(&anon_vma->lock); if (unlikely(allocated)) anon_vma_free(allocated); } return 0; } void __anon_vma_merge(struct vm_area_struct *vma, struct vm_area_struct *next) { BUG_ON(vma->anon_vma != next->anon_vma); list_del(&next->anon_vma_node); } void __anon_vma_link(struct vm_area_struct *vma) { struct anon_vma *anon_vma = vma->anon_vma; if (anon_vma) list_add_tail(&vma->anon_vma_node, &anon_vma->head); } void anon_vma_link(struct vm_area_struct *vma) { struct anon_vma *anon_vma = vma->anon_vma; if (anon_vma) { spin_lock(&anon_vma->lock); list_add_tail(&vma->anon_vma_node, &anon_vma->head); spin_unlock(&anon_vma->lock); } } void anon_vma_unlink(struct vm_area_struct *vma) { struct anon_vma *anon_vma = vma->anon_vma; int empty; if (!anon_vma) return; spin_lock(&anon_vma->lock); list_del(&vma->anon_vma_node); /* We must garbage collect the anon_vma if it's empty */ empty = list_empty(&anon_vma->head); spin_unlock(&anon_vma->lock); if (empty) anon_vma_free(anon_vma); } static void anon_vma_ctor(void *data) { struct anon_vma *anon_vma = data; spin_lock_init(&anon_vma->lock); INIT_LIST_HEAD(&anon_vma->head); } void __init anon_vma_init(void) { anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma), 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor); } /* * Getting a lock on a stable anon_vma from a page off the LRU is * tricky: page_lock_anon_vma rely on RCU to guard against the races. */ struct anon_vma *page_lock_anon_vma(struct page *page) { struct anon_vma *anon_vma; unsigned long anon_mapping; rcu_read_lock(); anon_mapping = (unsigned long) page->mapping; if (!(anon_mapping & PAGE_MAPPING_ANON)) goto out; if (!page_mapped(page)) goto out; anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); spin_lock(&anon_vma->lock); return anon_vma; out: rcu_read_unlock(); return NULL; } void page_unlock_anon_vma(struct anon_vma *anon_vma) { spin_unlock(&anon_vma->lock); rcu_read_unlock(); } /* * At what user virtual address is page expected in @vma? * Returns virtual address or -EFAULT if page's index/offset is not * within the range mapped the @vma. */ static inline unsigned long vma_address(struct page *page, struct vm_area_struct *vma) { pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); unsigned long address; address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); if (unlikely(address < vma->vm_start || address >= vma->vm_end)) { /* page should be within @vma mapping range */ return -EFAULT; } return address; } /* * At what user virtual address is page expected in vma? * checking that the page matches the vma. */ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) { if (PageAnon(page)) { if ((void *)vma->anon_vma != (void *)page->mapping - PAGE_MAPPING_ANON) return -EFAULT; } else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) { if (!vma->vm_file || vma->vm_file->f_mapping != page->mapping) return -EFAULT; } else return -EFAULT; return vma_address(page, vma); } /* * Check that @page is mapped at @address into @mm. * * If @sync is false, page_check_address may perform a racy check to avoid * the page table lock when the pte is not present (helpful when reclaiming * highly shared pages). * * On success returns with pte mapped and locked. */ pte_t *page_check_address(struct page *page, struct mm_struct *mm, unsigned long address, spinlock_t **ptlp, int sync) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte; spinlock_t *ptl; pgd = pgd_offset(mm, address); if (!pgd_present(*pgd)) return NULL; pud = pud_offset(pgd, address); if (!pud_present(*pud)) return NULL; pmd = pmd_offset(pud, address); if (!pmd_present(*pmd)) return NULL; pte = pte_offset_map(pmd, address); /* Make a quick check before getting the lock */ if (!sync && !pte_present(*pte)) { pte_unmap(pte); return NULL; } ptl = pte_lockptr(mm, pmd); spin_lock(ptl); if (pte_present(*pte) && page_to_pfn(page) == pte_pfn(*pte)) { *ptlp = ptl; return pte; } pte_unmap_unlock(pte, ptl); return NULL; } /** * page_mapped_in_vma - check whether a page is really mapped in a VMA * @page: the page to test * @vma: the VMA to test * * Returns 1 if the page is mapped into the page tables of the VMA, 0 * if the page is not mapped into the page tables of this VMA. Only * valid for normal file or anonymous VMAs. */ int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma) { unsigned long address; pte_t *pte; spinlock_t *ptl; address = vma_address(page, vma); if (address == -EFAULT) /* out of vma range */ return 0; pte = page_check_address(page, vma->vm_mm, address, &ptl, 1); if (!pte) /* the page is not in this mm */ return 0; pte_unmap_unlock(pte, ptl); return 1; } /* * Subfunctions of page_referenced: page_referenced_one called * repeatedly from either page_referenced_anon or page_referenced_file. */ static int page_referenced_one(struct page *page, struct vm_area_struct *vma, unsigned int *mapcount, unsigned long *vm_flags) { struct mm_struct *mm = vma->vm_mm; unsigned long address; pte_t *pte; spinlock_t *ptl; int referenced = 0; address = vma_address(page, vma); if (address == -EFAULT) goto out; pte = page_check_address(page, mm, address, &ptl, 0); if (!pte) goto out; /* * Don't want to elevate referenced for mlocked page that gets this far, * in order that it progresses to try_to_unmap and is moved to the * unevictable list. */ if (vma->vm_flags & VM_LOCKED) { *mapcount = 1; /* break early from loop */ *vm_flags |= VM_LOCKED; goto out_unmap; } if (ptep_clear_flush_young_notify(vma, address, pte)) { /* * Don't treat a reference through a sequentially read * mapping as such. If the page has been used in * another mapping, we will catch it; if this other * mapping is already gone, the unmap path will have * set PG_referenced or activated the page. */ if (likely(!VM_SequentialReadHint(vma))) referenced++; } /* Pretend the page is referenced if the task has the swap token and is in the middle of a page fault. */ if (mm != current->mm && has_swap_token(mm) && rwsem_is_locked(&mm->mmap_sem)) referenced++; out_unmap: (*mapcount)--; pte_unmap_unlock(pte, ptl); out: if (referenced) *vm_flags |= vma->vm_flags; return referenced; } static int page_referenced_anon(struct page *page, struct mem_cgroup *mem_cont, unsigned long *vm_flags) { unsigned int mapcount; struct anon_vma *anon_vma; struct vm_area_struct *vma; int referenced = 0; anon_vma = page_lock_anon_vma(page); if (!anon_vma) return referenced; mapcount = page_mapcount(page); list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { /* * If we are reclaiming on behalf of a cgroup, skip * counting on behalf of references from different * cgroups */ if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont)) continue; referenced += page_referenced_one(page, vma, &mapcount, vm_flags); if (!mapcount) break; } page_unlock_anon_vma(anon_vma); return referenced; } /** * page_referenced_file - referenced check for object-based rmap * @page: the page we're checking references on. * @mem_cont: target memory controller * @vm_flags: collect encountered vma->vm_flags who actually referenced the page * * For an object-based mapped page, find all the places it is mapped and * check/clear the referenced flag. This is done by following the page->mapping * pointer, then walking the chain of vmas it holds. It returns the number * of references it found. * * This function is only called from page_referenced for object-based pages. */ static int page_referenced_file(struct page *page, struct mem_cgroup *mem_cont, unsigned long *vm_flags) { unsigned int mapcount; struct address_space *mapping = page->mapping; pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); struct vm_area_struct *vma; struct prio_tree_iter iter; int referenced = 0; /* * The caller's checks on page->mapping and !PageAnon have made * sure that this is a file page: the check for page->mapping * excludes the case just before it gets set on an anon page. */ BUG_ON(PageAnon(page)); /* * The page lock not only makes sure that page->mapping cannot * suddenly be NULLified by truncation, it makes sure that the * structure at mapping cannot be freed and reused yet, * so we can safely take mapping->i_mmap_lock. */ BUG_ON(!PageLocked(page)); spin_lock(&mapping->i_mmap_lock); /* * i_mmap_lock does not stabilize mapcount at all, but mapcount * is more likely to be accurate if we note it after spinning. */ mapcount = page_mapcount(page); vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { /* * If we are reclaiming on behalf of a cgroup, skip * counting on behalf of references from different * cgroups */ if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont)) continue; referenced += page_referenced_one(page, vma, &mapcount, vm_flags); if (!mapcount) break; } spin_unlock(&mapping->i_mmap_lock); return referenced; } /** * page_referenced - test if the page was referenced * @page: the page to test * @is_locked: caller holds lock on the page * @mem_cont: target memory controller * @vm_flags: collect encountered vma->vm_flags who actually referenced the page * * Quick test_and_clear_referenced for all mappings to a page, * returns the number of ptes which referenced the page. */ int page_referenced(struct page *page, int is_locked, struct mem_cgroup *mem_cont, unsigned long *vm_flags) { int referenced = 0; if (TestClearPageReferenced(page)) referenced++; *vm_flags = 0; if (page_mapped(page) && page->mapping) { if (PageAnon(page)) referenced += page_referenced_anon(page, mem_cont, vm_flags); else if (is_locked) referenced += page_referenced_file(page, mem_cont, vm_flags); else if (!trylock_page(page)) referenced++; else { if (page->mapping) referenced += page_referenced_file(page, mem_cont, vm_flags); unlock_page(page); } } if (page_test_and_clear_young(page)) referenced++; return referenced; } static int page_mkclean_one(struct page *page, struct vm_area_struct *vma) { struct mm_struct *mm = vma->vm_mm; unsigned long address; pte_t *pte; spinlock_t *ptl; int ret = 0; address = vma_address(page, vma); if (address == -EFAULT) goto out; pte = page_check_address(page, mm, address, &ptl, 1); if (!pte) goto out; if (pte_dirty(*pte) || pte_write(*pte)) { pte_t entry; flush_cache_page(vma, address, pte_pfn(*pte)); entry = ptep_clear_flush_notify(vma, address, pte); entry = pte_wrprotect(entry); entry = pte_mkclean(entry); set_pte_at(mm, address, pte, entry); ret = 1; } pte_unmap_unlock(pte, ptl); out: return ret; } static int page_mkclean_file(struct address_space *mapping, struct page *page) { pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); struct vm_area_struct *vma; struct prio_tree_iter iter; int ret = 0; BUG_ON(PageAnon(page)); spin_lock(&mapping->i_mmap_lock); vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { if (vma->vm_flags & VM_SHARED) ret += page_mkclean_one(page, vma); } spin_unlock(&mapping->i_mmap_lock); return ret; } int page_mkclean(struct page *page) { int ret = 0; BUG_ON(!PageLocked(page)); if (page_mapped(page)) { struct address_space *mapping = page_mapping(page); if (mapping) { ret = page_mkclean_file(mapping, page); if (page_test_dirty(page)) { page_clear_dirty(page); ret = 1; } } } return ret; } EXPORT_SYMBOL_GPL(page_mkclean); /** * __page_set_anon_rmap - setup new anonymous rmap * @page: the page to add the mapping to * @vma: the vm area in which the mapping is added * @address: the user virtual address mapped */ static void __page_set_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) { struct anon_vma *anon_vma = vma->anon_vma; BUG_ON(!anon_vma); anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; page->mapping = (struct address_space *) anon_vma; page->index = linear_page_index(vma, address); /* * nr_mapped state can be updated without turning off * interrupts because it is not modified via interrupt. */ __inc_zone_page_state(page, NR_ANON_PAGES); } /** * __page_check_anon_rmap - sanity check anonymous rmap addition * @page: the page to add the mapping to * @vma: the vm area in which the mapping is added * @address: the user virtual address mapped */ static void __page_check_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) { #ifdef CONFIG_DEBUG_VM /* * The page's anon-rmap details (mapping and index) are guaranteed to * be set up correctly at this point. * * We have exclusion against page_add_anon_rmap because the caller * always holds the page locked, except if called from page_dup_rmap, * in which case the page is already known to be setup. * * We have exclusion against page_add_new_anon_rmap because those pages * are initially only visible via the pagetables, and the pte is locked * over the call to page_add_new_anon_rmap. */ struct anon_vma *anon_vma = vma->anon_vma; anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; BUG_ON(page->mapping != (struct address_space *)anon_vma); BUG_ON(page->index != linear_page_index(vma, address)); #endif } /** * page_add_anon_rmap - add pte mapping to an anonymous page * @page: the page to add the mapping to * @vma: the vm area in which the mapping is added * @address: the user virtual address mapped * * The caller needs to hold the pte lock and the page must be locked. */ void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) { VM_BUG_ON(!PageLocked(page)); VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end); if (atomic_inc_and_test(&page->_mapcount)) __page_set_anon_rmap(page, vma, address); else __page_check_anon_rmap(page, vma, address); } /** * page_add_new_anon_rmap - add pte mapping to a new anonymous page * @page: the page to add the mapping to * @vma: the vm area in which the mapping is added * @address: the user virtual address mapped * * Same as page_add_anon_rmap but must only be called on *new* pages. * This means the inc-and-test can be bypassed. * Page does not have to be locked. */ void page_add_new_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) { VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end); SetPageSwapBacked(page); atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */ __page_set_anon_rmap(page, vma, address); if (page_evictable(page, vma)) lru_cache_add_lru(page, LRU_ACTIVE_ANON); else add_page_to_unevictable_list(page); } /** * page_add_file_rmap - add pte mapping to a file page * @page: the page to add the mapping to * * The caller needs to hold the pte lock. */ void page_add_file_rmap(struct page *page) { if (atomic_inc_and_test(&page->_mapcount)) { __inc_zone_page_state(page, NR_FILE_MAPPED); mem_cgroup_update_mapped_file_stat(page, 1); } } /** * page_remove_rmap - take down pte mapping from a page * @page: page to remove mapping from * * The caller needs to hold the pte lock. */ void page_remove_rmap(struct page *page) { /* page still mapped by someone else? */ if (!atomic_add_negative(-1, &page->_mapcount)) return; /* * Now that the last pte has gone, s390 must transfer dirty * flag from storage key to struct page. We can usually skip * this if the page is anon, so about to be freed; but perhaps * not if it's in swapcache - there might be another pte slot * containing the swap entry, but page not yet written to swap. */ if ((!PageAnon(page) || PageSwapCache(page)) && page_test_dirty(page)) { page_clear_dirty(page); set_page_dirty(page); } if (PageAnon(page)) { mem_cgroup_uncharge_page(page); __dec_zone_page_state(page, NR_ANON_PAGES); } else { __dec_zone_page_state(page, NR_FILE_MAPPED); } mem_cgroup_update_mapped_file_stat(page, -1); /* * It would be tidy to reset the PageAnon mapping here, * but that might overwrite a racing page_add_anon_rmap * which increments mapcount after us but sets mapping * before us: so leave the reset to free_hot_cold_page, * and remember that it's only reliable while mapped. * Leaving it set also helps swapoff to reinstate ptes * faster for those pages still in swapcache. */ } /* * Subfunctions of try_to_unmap: try_to_unmap_one called * repeatedly from either try_to_unmap_anon or try_to_unmap_file. */ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, enum ttu_flags flags) { struct mm_struct *mm = vma->vm_mm; unsigned long address; pte_t *pte; pte_t pteval; spinlock_t *ptl; int ret = SWAP_AGAIN; address = vma_address(page, vma); if (address == -EFAULT) goto out; pte = page_check_address(page, mm, address, &ptl, 0); if (!pte) goto out; /* * If the page is mlock()d, we cannot swap it out. * If it's recently referenced (perhaps page_referenced * skipped over this mm) then we should reactivate it. */ if (!(flags & TTU_IGNORE_MLOCK)) { if (vma->vm_flags & VM_LOCKED) { ret = SWAP_MLOCK; goto out_unmap; } } if (!(flags & TTU_IGNORE_ACCESS)) { if (ptep_clear_flush_young_notify(vma, address, pte)) { ret = SWAP_FAIL; goto out_unmap; } } /* Nuke the page table entry. */ flush_cache_page(vma, address, page_to_pfn(page)); pteval = ptep_clear_flush_notify(vma, address, pte); /* Move the dirty bit to the physical page now the pte is gone. */ if (pte_dirty(pteval)) set_page_dirty(page); /* Update high watermark before we lower rss */ update_hiwater_rss(mm); if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) { if (PageAnon(page)) dec_mm_counter(mm, anon_rss); else dec_mm_counter(mm, file_rss); set_pte_at(mm, address, pte, swp_entry_to_pte(make_hwpoison_entry(page))); } else if (PageAnon(page)) { swp_entry_t entry = { .val = page_private(page) }; if (PageSwapCache(page)) { /* * Store the swap location in the pte. * See handle_pte_fault() ... */ swap_duplicate(entry); if (list_empty(&mm->mmlist)) { spin_lock(&mmlist_lock); if (list_empty(&mm->mmlist)) list_add(&mm->mmlist, &init_mm.mmlist); spin_unlock(&mmlist_lock); } dec_mm_counter(mm, anon_rss); } else if (PAGE_MIGRATION) { /* * Store the pfn of the page in a special migration * pte. do_swap_page() will wait until the migration * pte is removed and then restart fault handling. */ BUG_ON(TTU_ACTION(flags) != TTU_MIGRATION); entry = make_migration_entry(page, pte_write(pteval)); } set_pte_at(mm, address, pte, swp_entry_to_pte(entry)); BUG_ON(pte_file(*pte)); } else if (PAGE_MIGRATION && (TTU_ACTION(flags) == TTU_MIGRATION)) { /* Establish migration entry for a file page */ swp_entry_t entry; entry = make_migration_entry(page, pte_write(pteval)); set_pte_at(mm, address, pte, swp_entry_to_pte(entry)); } else dec_mm_counter(mm, file_rss); page_remove_rmap(page); page_cache_release(page); out_unmap: pte_unmap_unlock(pte, ptl); out: return ret; } /* * objrmap doesn't work for nonlinear VMAs because the assumption that * offset-into-file correlates with offset-into-virtual-addresses does not hold. * Consequently, given a particular page and its ->index, we cannot locate the * ptes which are mapping that page without an exhaustive linear search. * * So what this code does is a mini "virtual scan" of each nonlinear VMA which * maps the file to which the target page belongs. The ->vm_private_data field * holds the current cursor into that scan. Successive searches will circulate * around the vma's virtual address space. * * So as more replacement pressure is applied to the pages in a nonlinear VMA, * more scanning pressure is placed against them as well. Eventually pages * will become fully unmapped and are eligible for eviction. * * For very sparsely populated VMAs this is a little inefficient - chances are * there there won't be many ptes located within the scan cluster. In this case * maybe we could scan further - to the end of the pte page, perhaps. * * Mlocked pages: check VM_LOCKED under mmap_sem held for read, if we can * acquire it without blocking. If vma locked, mlock the pages in the cluster, * rather than unmapping them. If we encounter the "check_page" that vmscan is * trying to unmap, return SWAP_MLOCK, else default SWAP_AGAIN. */ #define CLUSTER_SIZE min(32*PAGE_SIZE, PMD_SIZE) #define CLUSTER_MASK (~(CLUSTER_SIZE - 1)) static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount, struct vm_area_struct *vma, struct page *check_page) { struct mm_struct *mm = vma->vm_mm; pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte; pte_t pteval; spinlock_t *ptl; struct page *page; unsigned long address; unsigned long end; int ret = SWAP_AGAIN; int locked_vma = 0; address = (vma->vm_start + cursor) & CLUSTER_MASK; end = address + CLUSTER_SIZE; if (address < vma->vm_start) address = vma->vm_start; if (end > vma->vm_end) end = vma->vm_end; pgd = pgd_offset(mm, address); if (!pgd_present(*pgd)) return ret; pud = pud_offset(pgd, address); if (!pud_present(*pud)) return ret; pmd = pmd_offset(pud, address); if (!pmd_present(*pmd)) return ret; /* * MLOCK_PAGES => feature is configured. * if we can acquire the mmap_sem for read, and vma is VM_LOCKED, * keep the sem while scanning the cluster for mlocking pages. */ if (MLOCK_PAGES && down_read_trylock(&vma->vm_mm->mmap_sem)) { locked_vma = (vma->vm_flags & VM_LOCKED); if (!locked_vma) up_read(&vma->vm_mm->mmap_sem); /* don't need it */ } pte = pte_offset_map_lock(mm, pmd, address, &ptl); /* Update high watermark before we lower rss */ update_hiwater_rss(mm); for (; address < end; pte++, address += PAGE_SIZE) { if (!pte_present(*pte)) continue; page = vm_normal_page(vma, address, *pte); BUG_ON(!page || PageAnon(page)); if (locked_vma) { mlock_vma_page(page); /* no-op if already mlocked */ if (page == check_page) ret = SWAP_MLOCK; continue; /* don't unmap */ } if (ptep_clear_flush_young_notify(vma, address, pte)) continue; /* Nuke the page table entry. */ flush_cache_page(vma, address, pte_pfn(*pte)); pteval = ptep_clear_flush_notify(vma, address, pte); /* If nonlinear, store the file page offset in the pte. */ if (page->index != linear_page_index(vma, address)) set_pte_at(mm, address, pte, pgoff_to_pte(page->index)); /* Move the dirty bit to the physical page now the pte is gone. */ if (pte_dirty(pteval)) set_page_dirty(page); page_remove_rmap(page); page_cache_release(page); dec_mm_counter(mm, file_rss); (*mapcount)--; } pte_unmap_unlock(pte - 1, ptl); if (locked_vma) up_read(&vma->vm_mm->mmap_sem); return ret; } /* * common handling for pages mapped in VM_LOCKED vmas */ static int try_to_mlock_page(struct page *page, struct vm_area_struct *vma) { int mlocked = 0; if (down_read_trylock(&vma->vm_mm->mmap_sem)) { if (vma->vm_flags & VM_LOCKED) { mlock_vma_page(page); mlocked++; /* really mlocked the page */ } up_read(&vma->vm_mm->mmap_sem); } return mlocked; } /** * try_to_unmap_anon - unmap or unlock anonymous page using the object-based * rmap method * @page: the page to unmap/unlock * @unlock: request for unlock rather than unmap [unlikely] * @migration: unmapping for migration - ignored if @unlock * * Find all the mappings of a page using the mapping pointer and the vma chains * contained in the anon_vma struct it points to. * * This function is only called from try_to_unmap/try_to_munlock for * anonymous pages. * When called from try_to_munlock(), the mmap_sem of the mm containing the vma * where the page was found will be held for write. So, we won't recheck * vm_flags for that VMA. That should be OK, because that vma shouldn't be * 'LOCKED. */ static int try_to_unmap_anon(struct page *page, enum ttu_flags flags) { struct anon_vma *anon_vma; struct vm_area_struct *vma; unsigned int mlocked = 0; int ret = SWAP_AGAIN; int unlock = TTU_ACTION(flags) == TTU_MUNLOCK; if (MLOCK_PAGES && unlikely(unlock)) ret = SWAP_SUCCESS; /* default for try_to_munlock() */ anon_vma = page_lock_anon_vma(page); if (!anon_vma) return ret; list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { if (MLOCK_PAGES && unlikely(unlock)) { if (!((vma->vm_flags & VM_LOCKED) && page_mapped_in_vma(page, vma))) continue; /* must visit all unlocked vmas */ ret = SWAP_MLOCK; /* saw at least one mlocked vma */ } else { ret = try_to_unmap_one(page, vma, flags); if (ret == SWAP_FAIL || !page_mapped(page)) break; } if (ret == SWAP_MLOCK) { mlocked = try_to_mlock_page(page, vma); if (mlocked) break; /* stop if actually mlocked page */ } } page_unlock_anon_vma(anon_vma); if (mlocked) ret = SWAP_MLOCK; /* actually mlocked the page */ else if (ret == SWAP_MLOCK) ret = SWAP_AGAIN; /* saw VM_LOCKED vma */ return ret; } /** * try_to_unmap_file - unmap/unlock file page using the object-based rmap method * @page: the page to unmap/unlock * @flags: action and flags * * Find all the mappings of a page using the mapping pointer and the vma chains * contained in the address_space struct it points to. * * This function is only called from try_to_unmap/try_to_munlock for * object-based pages. * When called from try_to_munlock(), the mmap_sem of the mm containing the vma * where the page was found will be held for write. So, we won't recheck * vm_flags for that VMA. That should be OK, because that vma shouldn't be * 'LOCKED. */ static int try_to_unmap_file(struct page *page, enum ttu_flags flags) { struct address_space *mapping = page->mapping; pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); struct vm_area_struct *vma; struct prio_tree_iter iter; int ret = SWAP_AGAIN; unsigned long cursor; unsigned long max_nl_cursor = 0; unsigned long max_nl_size = 0; unsigned int mapcount; unsigned int mlocked = 0; int unlock = TTU_ACTION(flags) == TTU_MUNLOCK; if (MLOCK_PAGES && unlikely(unlock)) ret = SWAP_SUCCESS; /* default for try_to_munlock() */ spin_lock(&mapping->i_mmap_lock); vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { if (MLOCK_PAGES && unlikely(unlock)) { if (!((vma->vm_flags & VM_LOCKED) && page_mapped_in_vma(page, vma))) continue; /* must visit all vmas */ ret = SWAP_MLOCK; } else { ret = try_to_unmap_one(page, vma, flags); if (ret == SWAP_FAIL || !page_mapped(page)) goto out; } if (ret == SWAP_MLOCK) { mlocked = try_to_mlock_page(page, vma); if (mlocked) break; /* stop if actually mlocked page */ } } if (mlocked) goto out; if (list_empty(&mapping->i_mmap_nonlinear)) goto out; list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list) { if (MLOCK_PAGES && unlikely(unlock)) { if (!(vma->vm_flags & VM_LOCKED)) continue; /* must visit all vmas */ ret = SWAP_MLOCK; /* leave mlocked == 0 */ goto out; /* no need to look further */ } if (!MLOCK_PAGES && !(flags & TTU_IGNORE_MLOCK) && (vma->vm_flags & VM_LOCKED)) continue; cursor = (unsigned long) vma->vm_private_data; if (cursor > max_nl_cursor) max_nl_cursor = cursor; cursor = vma->vm_end - vma->vm_start; if (cursor > max_nl_size) max_nl_size = cursor; } if (max_nl_size == 0) { /* all nonlinears locked or reserved ? */ ret = SWAP_FAIL; goto out; } /* * We don't try to search for this page in the nonlinear vmas, * and page_referenced wouldn't have found it anyway. Instead * just walk the nonlinear vmas trying to age and unmap some. * The mapcount of the page we came in with is irrelevant, * but even so use it as a guide to how hard we should try? */ mapcount = page_mapcount(page); if (!mapcount) goto out; cond_resched_lock(&mapping->i_mmap_lock); max_nl_size = (max_nl_size + CLUSTER_SIZE - 1) & CLUSTER_MASK; if (max_nl_cursor == 0) max_nl_cursor = CLUSTER_SIZE; do { list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list) { if (!MLOCK_PAGES && !(flags & TTU_IGNORE_MLOCK) && (vma->vm_flags & VM_LOCKED)) continue; cursor = (unsigned long) vma->vm_private_data; while ( cursor < max_nl_cursor && cursor < vma->vm_end - vma->vm_start) { ret = try_to_unmap_cluster(cursor, &mapcount, vma, page); if (ret == SWAP_MLOCK) mlocked = 2; /* to return below */ cursor += CLUSTER_SIZE; vma->vm_private_data = (void *) cursor; if ((int)mapcount <= 0) goto out; } vma->vm_private_data = (void *) max_nl_cursor; } cond_resched_lock(&mapping->i_mmap_lock); max_nl_cursor += CLUSTER_SIZE; } while (max_nl_cursor <= max_nl_size); /* * Don't loop forever (perhaps all the remaining pages are * in locked vmas). Reset cursor on all unreserved nonlinear * vmas, now forgetting on which ones it had fallen behind. */ list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list) vma->vm_private_data = NULL; out: spin_unlock(&mapping->i_mmap_lock); if (mlocked) ret = SWAP_MLOCK; /* actually mlocked the page */ else if (ret == SWAP_MLOCK) ret = SWAP_AGAIN; /* saw VM_LOCKED vma */ return ret; } /** * try_to_unmap - try to remove all page table mappings to a page * @page: the page to get unmapped * @flags: action and flags * * Tries to remove all the page table entries which are mapping this * page, used in the pageout path. Caller must hold the page lock. * Return values are: * * SWAP_SUCCESS - we succeeded in removing all mappings * SWAP_AGAIN - we missed a mapping, try again later * SWAP_FAIL - the page is unswappable * SWAP_MLOCK - page is mlocked. */ int try_to_unmap(struct page *page, enum ttu_flags flags) { int ret; BUG_ON(!PageLocked(page)); if (PageAnon(page)) ret = try_to_unmap_anon(page, flags); else ret = try_to_unmap_file(page, flags); if (ret != SWAP_MLOCK && !page_mapped(page)) ret = SWAP_SUCCESS; return ret; } /** * try_to_munlock - try to munlock a page * @page: the page to be munlocked * * Called from munlock code. Checks all of the VMAs mapping the page * to make sure nobody else has this page mlocked. The page will be * returned with PG_mlocked cleared if no other vmas have it mlocked. * * Return values are: * * SWAP_SUCCESS - no vma's holding page mlocked. * SWAP_AGAIN - page mapped in mlocked vma -- couldn't acquire mmap sem * SWAP_MLOCK - page is now mlocked. */ int try_to_munlock(struct page *page) { VM_BUG_ON(!PageLocked(page) || PageLRU(page)); if (PageAnon(page)) return try_to_unmap_anon(page, TTU_MUNLOCK); else return try_to_unmap_file(page, TTU_MUNLOCK); }
gpl-2.0
wan5xp/android_kernel_sony_u8500
fs/splice.c
414
47687
/* * "splice": joining two ropes together by interweaving their strands. * * This is the "extended pipe" functionality, where a pipe is used as * an arbitrary in-memory buffer. Think of a pipe as a small kernel * buffer that you can use to transfer data from one end to the other. * * The traditional unix read/write is extended with a "splice()" operation * that transfers data buffers to or from a pipe buffer. * * Named by Larry McVoy, original implementation from Linus, extended by * Jens to support splicing to files, network, direct splicing, etc and * fixing lots of bugs. * * Copyright (C) 2005-2006 Jens Axboe <axboe@kernel.dk> * Copyright (C) 2005-2006 Linus Torvalds <torvalds@osdl.org> * Copyright (C) 2006 Ingo Molnar <mingo@elte.hu> * */ #include <linux/fs.h> #include <linux/file.h> #include <linux/pagemap.h> #include <linux/splice.h> #include <linux/memcontrol.h> #include <linux/mm_inline.h> #include <linux/swap.h> #include <linux/writeback.h> #include <linux/buffer_head.h> #include <linux/module.h> #include <linux/syscalls.h> #include <linux/uio.h> #include <linux/security.h> #include <linux/gfp.h> #include <linux/socket.h> /* * Attempt to steal a page from a pipe buffer. This should perhaps go into * a vm helper function, it's already simplified quite a bit by the * addition of remove_mapping(). If success is returned, the caller may * attempt to reuse this page for another destination. */ static int page_cache_pipe_buf_steal(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { struct page *page = buf->page; struct address_space *mapping; lock_page(page); mapping = page_mapping(page); if (mapping) { WARN_ON(!PageUptodate(page)); /* * At least for ext2 with nobh option, we need to wait on * writeback completing on this page, since we'll remove it * from the pagecache. Otherwise truncate wont wait on the * page, allowing the disk blocks to be reused by someone else * before we actually wrote our data to them. fs corruption * ensues. */ wait_on_page_writeback(page); if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL)) goto out_unlock; /* * If we succeeded in removing the mapping, set LRU flag * and return good. */ if (remove_mapping(mapping, page)) { buf->flags |= PIPE_BUF_FLAG_LRU; return 0; } } /* * Raced with truncate or failed to remove page from current * address space, unlock and return failure. */ out_unlock: unlock_page(page); return 1; } static void page_cache_pipe_buf_release(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { page_cache_release(buf->page); buf->flags &= ~PIPE_BUF_FLAG_LRU; } /* * Check whether the contents of buf is OK to access. Since the content * is a page cache page, IO may be in flight. */ static int page_cache_pipe_buf_confirm(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { struct page *page = buf->page; int err; if (!PageUptodate(page)) { lock_page(page); /* * Page got truncated/unhashed. This will cause a 0-byte * splice, if this is the first page. */ if (!page->mapping) { err = -ENODATA; goto error; } /* * Uh oh, read-error from disk. */ if (!PageUptodate(page)) { err = -EIO; goto error; } /* * Page is ok afterall, we are done. */ unlock_page(page); } return 0; error: unlock_page(page); return err; } static const struct pipe_buf_operations page_cache_pipe_buf_ops = { .can_merge = 0, .map = generic_pipe_buf_map, .unmap = generic_pipe_buf_unmap, .confirm = page_cache_pipe_buf_confirm, .release = page_cache_pipe_buf_release, .steal = page_cache_pipe_buf_steal, .get = generic_pipe_buf_get, }; static int user_page_pipe_buf_steal(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { if (!(buf->flags & PIPE_BUF_FLAG_GIFT)) return 1; buf->flags |= PIPE_BUF_FLAG_LRU; return generic_pipe_buf_steal(pipe, buf); } static const struct pipe_buf_operations user_page_pipe_buf_ops = { .can_merge = 0, .map = generic_pipe_buf_map, .unmap = generic_pipe_buf_unmap, .confirm = generic_pipe_buf_confirm, .release = page_cache_pipe_buf_release, .steal = user_page_pipe_buf_steal, .get = generic_pipe_buf_get, }; static void wakeup_pipe_readers(struct pipe_inode_info *pipe) { smp_mb(); if (waitqueue_active(&pipe->wait)) wake_up_interruptible(&pipe->wait); kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); } /** * splice_to_pipe - fill passed data into a pipe * @pipe: pipe to fill * @spd: data to fill * * Description: * @spd contains a map of pages and len/offset tuples, along with * the struct pipe_buf_operations associated with these pages. This * function will link that data to the pipe. * */ ssize_t splice_to_pipe(struct pipe_inode_info *pipe, struct splice_pipe_desc *spd) { unsigned int spd_pages = spd->nr_pages; int ret, do_wakeup, page_nr; ret = 0; do_wakeup = 0; page_nr = 0; pipe_lock(pipe); for (;;) { if (!pipe->readers) { send_sig(SIGPIPE, current, 0); if (!ret) ret = -EPIPE; break; } if (pipe->nrbufs < pipe->buffers) { int newbuf = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1); struct pipe_buffer *buf = pipe->bufs + newbuf; buf->page = spd->pages[page_nr]; buf->offset = spd->partial[page_nr].offset; buf->len = spd->partial[page_nr].len; buf->private = spd->partial[page_nr].private; buf->ops = spd->ops; if (spd->flags & SPLICE_F_GIFT) buf->flags |= PIPE_BUF_FLAG_GIFT; pipe->nrbufs++; page_nr++; ret += buf->len; if (pipe->inode) do_wakeup = 1; if (!--spd->nr_pages) break; if (pipe->nrbufs < pipe->buffers) continue; break; } if (spd->flags & SPLICE_F_NONBLOCK) { if (!ret) ret = -EAGAIN; break; } if (signal_pending(current)) { if (!ret) ret = -ERESTARTSYS; break; } if (do_wakeup) { smp_mb(); if (waitqueue_active(&pipe->wait)) wake_up_interruptible_sync(&pipe->wait); kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); do_wakeup = 0; } pipe->waiting_writers++; pipe_wait(pipe); pipe->waiting_writers--; } pipe_unlock(pipe); if (do_wakeup) wakeup_pipe_readers(pipe); while (page_nr < spd_pages) spd->spd_release(spd, page_nr++); return ret; } static void spd_release_page(struct splice_pipe_desc *spd, unsigned int i) { page_cache_release(spd->pages[i]); } /* * Check if we need to grow the arrays holding pages and partial page * descriptions. */ int splice_grow_spd(const struct pipe_inode_info *pipe, struct splice_pipe_desc *spd) { unsigned int buffers = ACCESS_ONCE(pipe->buffers); spd->nr_pages_max = buffers; if (buffers <= PIPE_DEF_BUFFERS) return 0; spd->pages = kmalloc(buffers * sizeof(struct page *), GFP_KERNEL); spd->partial = kmalloc(buffers * sizeof(struct partial_page), GFP_KERNEL); if (spd->pages && spd->partial) return 0; kfree(spd->pages); kfree(spd->partial); return -ENOMEM; } void splice_shrink_spd(struct splice_pipe_desc *spd) { if (spd->nr_pages_max <= PIPE_DEF_BUFFERS) return; kfree(spd->pages); kfree(spd->partial); } static int __generic_file_splice_read(struct file *in, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags) { struct address_space *mapping = in->f_mapping; unsigned int loff, nr_pages, req_pages; struct page *pages[PIPE_DEF_BUFFERS]; struct partial_page partial[PIPE_DEF_BUFFERS]; struct page *page; pgoff_t index, end_index; loff_t isize; int error, page_nr; struct splice_pipe_desc spd = { .pages = pages, .partial = partial, .nr_pages_max = PIPE_DEF_BUFFERS, .flags = flags, .ops = &page_cache_pipe_buf_ops, .spd_release = spd_release_page, }; if (splice_grow_spd(pipe, &spd)) return -ENOMEM; index = *ppos >> PAGE_CACHE_SHIFT; loff = *ppos & ~PAGE_CACHE_MASK; req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; nr_pages = min(req_pages, spd.nr_pages_max); /* * Lookup the (hopefully) full range of pages we need. */ spd.nr_pages = find_get_pages_contig(mapping, index, nr_pages, spd.pages); index += spd.nr_pages; /* * If find_get_pages_contig() returned fewer pages than we needed, * readahead/allocate the rest and fill in the holes. */ if (spd.nr_pages < nr_pages) page_cache_sync_readahead(mapping, &in->f_ra, in, index, req_pages - spd.nr_pages); error = 0; while (spd.nr_pages < nr_pages) { /* * Page could be there, find_get_pages_contig() breaks on * the first hole. */ page = find_get_page(mapping, index); if (!page) { /* * page didn't exist, allocate one. */ page = page_cache_alloc_cold(mapping); if (!page) break; error = add_to_page_cache_lru(page, mapping, index, GFP_KERNEL); if (unlikely(error)) { page_cache_release(page); if (error == -EEXIST) continue; break; } /* * add_to_page_cache() locks the page, unlock it * to avoid convoluting the logic below even more. */ unlock_page(page); } spd.pages[spd.nr_pages++] = page; index++; } /* * Now loop over the map and see if we need to start IO on any * pages, fill in the partial map, etc. */ index = *ppos >> PAGE_CACHE_SHIFT; nr_pages = spd.nr_pages; spd.nr_pages = 0; for (page_nr = 0; page_nr < nr_pages; page_nr++) { unsigned int this_len; if (!len) break; /* * this_len is the max we'll use from this page */ this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff); page = spd.pages[page_nr]; if (PageReadahead(page)) page_cache_async_readahead(mapping, &in->f_ra, in, page, index, req_pages - page_nr); /* * If the page isn't uptodate, we may need to start io on it */ if (!PageUptodate(page)) { lock_page(page); /* * Page was truncated, or invalidated by the * filesystem. Redo the find/create, but this time the * page is kept locked, so there's no chance of another * race with truncate/invalidate. */ if (!page->mapping) { unlock_page(page); page = find_or_create_page(mapping, index, mapping_gfp_mask(mapping)); if (!page) { error = -ENOMEM; break; } page_cache_release(spd.pages[page_nr]); spd.pages[page_nr] = page; } /* * page was already under io and is now done, great */ if (PageUptodate(page)) { unlock_page(page); goto fill_it; } /* * need to read in the page */ error = mapping->a_ops->readpage(in, page); if (unlikely(error)) { /* * We really should re-lookup the page here, * but it complicates things a lot. Instead * lets just do what we already stored, and * we'll get it the next time we are called. */ if (error == AOP_TRUNCATED_PAGE) error = 0; break; } } fill_it: /* * i_size must be checked after PageUptodate. */ isize = i_size_read(mapping->host); end_index = (isize - 1) >> PAGE_CACHE_SHIFT; if (unlikely(!isize || index > end_index)) break; /* * if this is the last page, see if we need to shrink * the length and stop */ if (end_index == index) { unsigned int plen; /* * max good bytes in this page */ plen = ((isize - 1) & ~PAGE_CACHE_MASK) + 1; if (plen <= loff) break; /* * force quit after adding this page */ this_len = min(this_len, plen - loff); len = this_len; } spd.partial[page_nr].offset = loff; spd.partial[page_nr].len = this_len; len -= this_len; loff = 0; spd.nr_pages++; index++; } /* * Release any pages at the end, if we quit early. 'page_nr' is how far * we got, 'nr_pages' is how many pages are in the map. */ while (page_nr < nr_pages) page_cache_release(spd.pages[page_nr++]); in->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT; if (spd.nr_pages) error = splice_to_pipe(pipe, &spd); splice_shrink_spd(&spd); return error; } /** * generic_file_splice_read - splice data from file to a pipe * @in: file to splice from * @ppos: position in @in * @pipe: pipe to splice to * @len: number of bytes to splice * @flags: splice modifier flags * * Description: * Will read pages from given file and fill them into a pipe. Can be * used as long as the address_space operations for the source implements * a readpage() hook. * */ ssize_t generic_file_splice_read(struct file *in, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags) { loff_t isize, left; int ret; isize = i_size_read(in->f_mapping->host); if (unlikely(*ppos >= isize)) return 0; left = isize - *ppos; if (unlikely(left < len)) len = left; ret = __generic_file_splice_read(in, ppos, pipe, len, flags); if (ret > 0) { *ppos += ret; file_accessed(in); } return ret; } EXPORT_SYMBOL(generic_file_splice_read); static const struct pipe_buf_operations default_pipe_buf_ops = { .can_merge = 0, .map = generic_pipe_buf_map, .unmap = generic_pipe_buf_unmap, .confirm = generic_pipe_buf_confirm, .release = generic_pipe_buf_release, .steal = generic_pipe_buf_steal, .get = generic_pipe_buf_get, }; static ssize_t kernel_readv(struct file *file, const struct iovec *vec, unsigned long vlen, loff_t offset) { mm_segment_t old_fs; loff_t pos = offset; ssize_t res; old_fs = get_fs(); set_fs(get_ds()); /* The cast to a user pointer is valid due to the set_fs() */ res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos); set_fs(old_fs); return res; } static ssize_t kernel_write(struct file *file, const char *buf, size_t count, loff_t pos) { mm_segment_t old_fs; ssize_t res; old_fs = get_fs(); set_fs(get_ds()); /* The cast to a user pointer is valid due to the set_fs() */ res = vfs_write(file, (const char __user *)buf, count, &pos); set_fs(old_fs); return res; } ssize_t default_file_splice_read(struct file *in, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags) { unsigned int nr_pages; unsigned int nr_freed; size_t offset; struct page *pages[PIPE_DEF_BUFFERS]; struct partial_page partial[PIPE_DEF_BUFFERS]; struct iovec *vec, __vec[PIPE_DEF_BUFFERS]; ssize_t res; size_t this_len; int error; int i; struct splice_pipe_desc spd = { .pages = pages, .partial = partial, .nr_pages_max = PIPE_DEF_BUFFERS, .flags = flags, .ops = &default_pipe_buf_ops, .spd_release = spd_release_page, }; if (splice_grow_spd(pipe, &spd)) return -ENOMEM; res = -ENOMEM; vec = __vec; if (spd.nr_pages_max > PIPE_DEF_BUFFERS) { vec = kmalloc(spd.nr_pages_max * sizeof(struct iovec), GFP_KERNEL); if (!vec) goto shrink_ret; } offset = *ppos & ~PAGE_CACHE_MASK; nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; for (i = 0; i < nr_pages && i < spd.nr_pages_max && len; i++) { struct page *page; page = alloc_page(GFP_USER); error = -ENOMEM; if (!page) goto err; this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset); vec[i].iov_base = (void __user *) page_address(page); vec[i].iov_len = this_len; spd.pages[i] = page; spd.nr_pages++; len -= this_len; offset = 0; } res = kernel_readv(in, vec, spd.nr_pages, *ppos); if (res < 0) { error = res; goto err; } error = 0; if (!res) goto err; nr_freed = 0; for (i = 0; i < spd.nr_pages; i++) { this_len = min_t(size_t, vec[i].iov_len, res); spd.partial[i].offset = 0; spd.partial[i].len = this_len; if (!this_len) { __free_page(spd.pages[i]); spd.pages[i] = NULL; nr_freed++; } res -= this_len; } spd.nr_pages -= nr_freed; res = splice_to_pipe(pipe, &spd); if (res > 0) *ppos += res; shrink_ret: if (vec != __vec) kfree(vec); splice_shrink_spd(&spd); return res; err: for (i = 0; i < spd.nr_pages; i++) __free_page(spd.pages[i]); res = error; goto shrink_ret; } EXPORT_SYMBOL(default_file_splice_read); /* * Send 'sd->len' bytes to socket from 'sd->file' at position 'sd->pos' * using sendpage(). Return the number of bytes sent. */ static int pipe_to_sendpage(struct pipe_inode_info *pipe, struct pipe_buffer *buf, struct splice_desc *sd) { struct file *file = sd->u.file; loff_t pos = sd->pos; int more; if (!likely(file->f_op && file->f_op->sendpage)) return -EINVAL; more = (sd->flags & SPLICE_F_MORE) ? MSG_MORE : 0; if (sd->len < sd->total_len && pipe->nrbufs > 1) more |= MSG_SENDPAGE_NOTLAST; return file->f_op->sendpage(file, buf->page, buf->offset, sd->len, &pos, more); } /* * This is a little more tricky than the file -> pipe splicing. There are * basically three cases: * * - Destination page already exists in the address space and there * are users of it. For that case we have no other option that * copying the data. Tough luck. * - Destination page already exists in the address space, but there * are no users of it. Make sure it's uptodate, then drop it. Fall * through to last case. * - Destination page does not exist, we can add the pipe page to * the page cache and avoid the copy. * * If asked to move pages to the output file (SPLICE_F_MOVE is set in * sd->flags), we attempt to migrate pages from the pipe to the output * file address space page cache. This is possible if no one else has * the pipe page referenced outside of the pipe and page cache. If * SPLICE_F_MOVE isn't set, or we cannot move the page, we simply create * a new page in the output file page cache and fill/dirty that. */ int pipe_to_file(struct pipe_inode_info *pipe, struct pipe_buffer *buf, struct splice_desc *sd) { struct file *file = sd->u.file; struct address_space *mapping = file->f_mapping; unsigned int offset, this_len; struct page *page; void *fsdata; int ret; offset = sd->pos & ~PAGE_CACHE_MASK; this_len = sd->len; if (this_len + offset > PAGE_CACHE_SIZE) this_len = PAGE_CACHE_SIZE - offset; ret = pagecache_write_begin(file, mapping, sd->pos, this_len, AOP_FLAG_UNINTERRUPTIBLE, &page, &fsdata); if (unlikely(ret)) goto out; if (buf->page != page) { /* * Careful, ->map() uses KM_USER0! */ char *src = buf->ops->map(pipe, buf, 1); char *dst = kmap_atomic(page, KM_USER1); memcpy(dst + offset, src + buf->offset, this_len); flush_dcache_page(page); kunmap_atomic(dst, KM_USER1); buf->ops->unmap(pipe, buf, src); } ret = pagecache_write_end(file, mapping, sd->pos, this_len, this_len, page, fsdata); out: return ret; } EXPORT_SYMBOL(pipe_to_file); static void wakeup_pipe_writers(struct pipe_inode_info *pipe) { smp_mb(); if (waitqueue_active(&pipe->wait)) wake_up_interruptible(&pipe->wait); kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); } /** * splice_from_pipe_feed - feed available data from a pipe to a file * @pipe: pipe to splice from * @sd: information to @actor * @actor: handler that splices the data * * Description: * This function loops over the pipe and calls @actor to do the * actual moving of a single struct pipe_buffer to the desired * destination. It returns when there's no more buffers left in * the pipe or if the requested number of bytes (@sd->total_len) * have been copied. It returns a positive number (one) if the * pipe needs to be filled with more data, zero if the required * number of bytes have been copied and -errno on error. * * This, together with splice_from_pipe_{begin,end,next}, may be * used to implement the functionality of __splice_from_pipe() when * locking is required around copying the pipe buffers to the * destination. */ int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_desc *sd, splice_actor *actor) { int ret; while (pipe->nrbufs) { struct pipe_buffer *buf = pipe->bufs + pipe->curbuf; const struct pipe_buf_operations *ops = buf->ops; sd->len = buf->len; if (sd->len > sd->total_len) sd->len = sd->total_len; ret = buf->ops->confirm(pipe, buf); if (unlikely(ret)) { if (ret == -ENODATA) ret = 0; return ret; } ret = actor(pipe, buf, sd); if (ret <= 0) return ret; buf->offset += ret; buf->len -= ret; sd->num_spliced += ret; sd->len -= ret; sd->pos += ret; sd->total_len -= ret; if (!buf->len) { buf->ops = NULL; ops->release(pipe, buf); pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1); pipe->nrbufs--; if (pipe->inode) sd->need_wakeup = true; } if (!sd->total_len) return 0; } return 1; } EXPORT_SYMBOL(splice_from_pipe_feed); /** * splice_from_pipe_next - wait for some data to splice from * @pipe: pipe to splice from * @sd: information about the splice operation * * Description: * This function will wait for some data and return a positive * value (one) if pipe buffers are available. It will return zero * or -errno if no more data needs to be spliced. */ int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd) { while (!pipe->nrbufs) { if (!pipe->writers) return 0; if (!pipe->waiting_writers && sd->num_spliced) return 0; if (sd->flags & SPLICE_F_NONBLOCK) return -EAGAIN; if (signal_pending(current)) return -ERESTARTSYS; if (sd->need_wakeup) { wakeup_pipe_writers(pipe); sd->need_wakeup = false; } pipe_wait(pipe); } return 1; } EXPORT_SYMBOL(splice_from_pipe_next); /** * splice_from_pipe_begin - start splicing from pipe * @sd: information about the splice operation * * Description: * This function should be called before a loop containing * splice_from_pipe_next() and splice_from_pipe_feed() to * initialize the necessary fields of @sd. */ void splice_from_pipe_begin(struct splice_desc *sd) { sd->num_spliced = 0; sd->need_wakeup = false; } EXPORT_SYMBOL(splice_from_pipe_begin); /** * splice_from_pipe_end - finish splicing from pipe * @pipe: pipe to splice from * @sd: information about the splice operation * * Description: * This function will wake up pipe writers if necessary. It should * be called after a loop containing splice_from_pipe_next() and * splice_from_pipe_feed(). */ void splice_from_pipe_end(struct pipe_inode_info *pipe, struct splice_desc *sd) { if (sd->need_wakeup) wakeup_pipe_writers(pipe); } EXPORT_SYMBOL(splice_from_pipe_end); /** * __splice_from_pipe - splice data from a pipe to given actor * @pipe: pipe to splice from * @sd: information to @actor * @actor: handler that splices the data * * Description: * This function does little more than loop over the pipe and call * @actor to do the actual moving of a single struct pipe_buffer to * the desired destination. See pipe_to_file, pipe_to_sendpage, or * pipe_to_user. * */ ssize_t __splice_from_pipe(struct pipe_inode_info *pipe, struct splice_desc *sd, splice_actor *actor) { int ret; splice_from_pipe_begin(sd); do { ret = splice_from_pipe_next(pipe, sd); if (ret > 0) ret = splice_from_pipe_feed(pipe, sd, actor); } while (ret > 0); splice_from_pipe_end(pipe, sd); return sd->num_spliced ? sd->num_spliced : ret; } EXPORT_SYMBOL(__splice_from_pipe); /** * splice_from_pipe - splice data from a pipe to a file * @pipe: pipe to splice from * @out: file to splice to * @ppos: position in @out * @len: how many bytes to splice * @flags: splice modifier flags * @actor: handler that splices the data * * Description: * See __splice_from_pipe. This function locks the pipe inode, * otherwise it's identical to __splice_from_pipe(). * */ ssize_t splice_from_pipe(struct pipe_inode_info *pipe, struct file *out, loff_t *ppos, size_t len, unsigned int flags, splice_actor *actor) { ssize_t ret; struct splice_desc sd = { .total_len = len, .flags = flags, .pos = *ppos, .u.file = out, }; pipe_lock(pipe); ret = __splice_from_pipe(pipe, &sd, actor); pipe_unlock(pipe); return ret; } /** * generic_file_splice_write - splice data from a pipe to a file * @pipe: pipe info * @out: file to write to * @ppos: position in @out * @len: number of bytes to splice * @flags: splice modifier flags * * Description: * Will either move or copy pages (determined by @flags options) from * the given pipe inode to the given file. * */ ssize_t generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out, loff_t *ppos, size_t len, unsigned int flags) { struct address_space *mapping = out->f_mapping; struct inode *inode = mapping->host; struct splice_desc sd = { .total_len = len, .flags = flags, .pos = *ppos, .u.file = out, }; ssize_t ret; pipe_lock(pipe); splice_from_pipe_begin(&sd); do { ret = splice_from_pipe_next(pipe, &sd); if (ret <= 0) break; mutex_lock_nested(&inode->i_mutex, I_MUTEX_CHILD); ret = file_remove_suid(out); if (!ret) { file_update_time(out); ret = splice_from_pipe_feed(pipe, &sd, pipe_to_file); } mutex_unlock(&inode->i_mutex); } while (ret > 0); splice_from_pipe_end(pipe, &sd); pipe_unlock(pipe); if (sd.num_spliced) ret = sd.num_spliced; if (ret > 0) { unsigned long nr_pages; int err; nr_pages = (ret + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; err = generic_write_sync(out, *ppos, ret); if (err) ret = err; else *ppos += ret; balance_dirty_pages_ratelimited_nr(mapping, nr_pages); } return ret; } EXPORT_SYMBOL(generic_file_splice_write); static int write_pipe_buf(struct pipe_inode_info *pipe, struct pipe_buffer *buf, struct splice_desc *sd) { int ret; void *data; data = buf->ops->map(pipe, buf, 0); ret = kernel_write(sd->u.file, data + buf->offset, sd->len, sd->pos); buf->ops->unmap(pipe, buf, data); return ret; } static ssize_t default_file_splice_write(struct pipe_inode_info *pipe, struct file *out, loff_t *ppos, size_t len, unsigned int flags) { ssize_t ret; ret = splice_from_pipe(pipe, out, ppos, len, flags, write_pipe_buf); if (ret > 0) *ppos += ret; return ret; } /** * generic_splice_sendpage - splice data from a pipe to a socket * @pipe: pipe to splice from * @out: socket to write to * @ppos: position in @out * @len: number of bytes to splice * @flags: splice modifier flags * * Description: * Will send @len bytes from the pipe to a network socket. No data copying * is involved. * */ ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, struct file *out, loff_t *ppos, size_t len, unsigned int flags) { return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_sendpage); } EXPORT_SYMBOL(generic_splice_sendpage); /* * Attempt to initiate a splice from pipe to file. */ static long do_splice_from(struct pipe_inode_info *pipe, struct file *out, loff_t *ppos, size_t len, unsigned int flags) { ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int); int ret; if (unlikely(!(out->f_mode & FMODE_WRITE))) return -EBADF; if (unlikely(out->f_flags & O_APPEND)) return -EINVAL; ret = rw_verify_area(WRITE, out, ppos, len); if (unlikely(ret < 0)) return ret; if (out->f_op && out->f_op->splice_write) splice_write = out->f_op->splice_write; else splice_write = default_file_splice_write; return splice_write(pipe, out, ppos, len, flags); } /* * Attempt to initiate a splice from a file to a pipe. */ static long do_splice_to(struct file *in, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags) { ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int); int ret; if (unlikely(!(in->f_mode & FMODE_READ))) return -EBADF; ret = rw_verify_area(READ, in, ppos, len); if (unlikely(ret < 0)) return ret; if (in->f_op && in->f_op->splice_read) splice_read = in->f_op->splice_read; else splice_read = default_file_splice_read; return splice_read(in, ppos, pipe, len, flags); } /** * splice_direct_to_actor - splices data directly between two non-pipes * @in: file to splice from * @sd: actor information on where to splice to * @actor: handles the data splicing * * Description: * This is a special case helper to splice directly between two * points, without requiring an explicit pipe. Internally an allocated * pipe is cached in the process, and reused during the lifetime of * that process. * */ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd, splice_direct_actor *actor) { struct pipe_inode_info *pipe; long ret, bytes; umode_t i_mode; size_t len; int i, flags; /* * We require the input being a regular file, as we don't want to * randomly drop data for eg socket -> socket splicing. Use the * piped splicing for that! */ i_mode = in->f_path.dentry->d_inode->i_mode; if (unlikely(!S_ISREG(i_mode) && !S_ISBLK(i_mode))) return -EINVAL; /* * neither in nor out is a pipe, setup an internal pipe attached to * 'out' and transfer the wanted data from 'in' to 'out' through that */ pipe = current->splice_pipe; if (unlikely(!pipe)) { pipe = alloc_pipe_info(NULL); if (!pipe) return -ENOMEM; /* * We don't have an immediate reader, but we'll read the stuff * out of the pipe right after the splice_to_pipe(). So set * PIPE_READERS appropriately. */ pipe->readers = 1; current->splice_pipe = pipe; } /* * Do the splice. */ ret = 0; bytes = 0; len = sd->total_len; flags = sd->flags; /* * Don't block on output, we have to drain the direct pipe. */ sd->flags &= ~SPLICE_F_NONBLOCK; while (len) { size_t read_len; loff_t pos = sd->pos, prev_pos = pos; ret = do_splice_to(in, &pos, pipe, len, flags); if (unlikely(ret <= 0)) goto out_release; read_len = ret; sd->total_len = read_len; /* * NOTE: nonblocking mode only applies to the input. We * must not do the output in nonblocking mode as then we * could get stuck data in the internal pipe: */ ret = actor(pipe, sd); if (unlikely(ret <= 0)) { sd->pos = prev_pos; goto out_release; } bytes += ret; len -= ret; sd->pos = pos; if (ret < read_len) { sd->pos = prev_pos + ret; goto out_release; } } done: pipe->nrbufs = pipe->curbuf = 0; file_accessed(in); return bytes; out_release: /* * If we did an incomplete transfer we must release * the pipe buffers in question: */ for (i = 0; i < pipe->buffers; i++) { struct pipe_buffer *buf = pipe->bufs + i; if (buf->ops) { buf->ops->release(pipe, buf); buf->ops = NULL; } } if (!bytes) bytes = ret; goto done; } EXPORT_SYMBOL(splice_direct_to_actor); static int direct_splice_actor(struct pipe_inode_info *pipe, struct splice_desc *sd) { struct file *file = sd->u.file; return do_splice_from(pipe, file, &file->f_pos, sd->total_len, sd->flags); } /** * do_splice_direct - splices data directly between two files * @in: file to splice from * @ppos: input file offset * @out: file to splice to * @len: number of bytes to splice * @flags: splice modifier flags * * Description: * For use by do_sendfile(). splice can easily emulate sendfile, but * doing it in the application would incur an extra system call * (splice in + splice out, as compared to just sendfile()). So this helper * can splice directly through a process-private pipe. * */ long do_splice_direct(struct file *in, loff_t *ppos, struct file *out, size_t len, unsigned int flags) { struct splice_desc sd = { .len = len, .total_len = len, .flags = flags, .pos = *ppos, .u.file = out, }; long ret; ret = splice_direct_to_actor(in, &sd, direct_splice_actor); if (ret > 0) *ppos = sd.pos; return ret; } static int splice_pipe_to_pipe(struct pipe_inode_info *ipipe, struct pipe_inode_info *opipe, size_t len, unsigned int flags); /* * Determine where to splice to/from. */ static long do_splice(struct file *in, loff_t __user *off_in, struct file *out, loff_t __user *off_out, size_t len, unsigned int flags) { struct pipe_inode_info *ipipe; struct pipe_inode_info *opipe; loff_t offset, *off; long ret; ipipe = get_pipe_info(in); opipe = get_pipe_info(out); if (ipipe && opipe) { if (off_in || off_out) return -ESPIPE; if (!(in->f_mode & FMODE_READ)) return -EBADF; if (!(out->f_mode & FMODE_WRITE)) return -EBADF; /* Splicing to self would be fun, but... */ if (ipipe == opipe) return -EINVAL; return splice_pipe_to_pipe(ipipe, opipe, len, flags); } if (ipipe) { if (off_in) return -ESPIPE; if (off_out) { if (!(out->f_mode & FMODE_PWRITE)) return -EINVAL; if (copy_from_user(&offset, off_out, sizeof(loff_t))) return -EFAULT; off = &offset; } else off = &out->f_pos; ret = do_splice_from(ipipe, out, off, len, flags); if (off_out && copy_to_user(off_out, off, sizeof(loff_t))) ret = -EFAULT; return ret; } if (opipe) { if (off_out) return -ESPIPE; if (off_in) { if (!(in->f_mode & FMODE_PREAD)) return -EINVAL; if (copy_from_user(&offset, off_in, sizeof(loff_t))) return -EFAULT; off = &offset; } else off = &in->f_pos; ret = do_splice_to(in, off, opipe, len, flags); if (off_in && copy_to_user(off_in, off, sizeof(loff_t))) ret = -EFAULT; return ret; } return -EINVAL; } /* * Map an iov into an array of pages and offset/length tupples. With the * partial_page structure, we can map several non-contiguous ranges into * our ones pages[] map instead of splitting that operation into pieces. * Could easily be exported as a generic helper for other users, in which * case one would probably want to add a 'max_nr_pages' parameter as well. */ static int get_iovec_page_array(const struct iovec __user *iov, unsigned int nr_vecs, struct page **pages, struct partial_page *partial, int aligned, unsigned int pipe_buffers) { int buffers = 0, error = 0; while (nr_vecs) { unsigned long off, npages; struct iovec entry; void __user *base; size_t len; int i; error = -EFAULT; if (copy_from_user(&entry, iov, sizeof(entry))) break; base = entry.iov_base; len = entry.iov_len; /* * Sanity check this iovec. 0 read succeeds. */ error = 0; if (unlikely(!len)) break; error = -EFAULT; if (!access_ok(VERIFY_READ, base, len)) break; /* * Get this base offset and number of pages, then map * in the user pages. */ off = (unsigned long) base & ~PAGE_MASK; /* * If asked for alignment, the offset must be zero and the * length a multiple of the PAGE_SIZE. */ error = -EINVAL; if (aligned && (off || len & ~PAGE_MASK)) break; npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT; if (npages > pipe_buffers - buffers) npages = pipe_buffers - buffers; error = get_user_pages_fast((unsigned long)base, npages, 0, &pages[buffers]); if (unlikely(error <= 0)) break; /* * Fill this contiguous range into the partial page map. */ for (i = 0; i < error; i++) { const int plen = min_t(size_t, len, PAGE_SIZE - off); partial[buffers].offset = off; partial[buffers].len = plen; off = 0; len -= plen; buffers++; } /* * We didn't complete this iov, stop here since it probably * means we have to move some of this into a pipe to * be able to continue. */ if (len) break; /* * Don't continue if we mapped fewer pages than we asked for, * or if we mapped the max number of pages that we have * room for. */ if (error < npages || buffers == pipe_buffers) break; nr_vecs--; iov++; } if (buffers) return buffers; return error; } static int pipe_to_user(struct pipe_inode_info *pipe, struct pipe_buffer *buf, struct splice_desc *sd) { char *src; int ret; /* * See if we can use the atomic maps, by prefaulting in the * pages and doing an atomic copy */ if (!fault_in_pages_writeable(sd->u.userptr, sd->len)) { src = buf->ops->map(pipe, buf, 1); ret = __copy_to_user_inatomic(sd->u.userptr, src + buf->offset, sd->len); buf->ops->unmap(pipe, buf, src); if (!ret) { ret = sd->len; goto out; } } /* * No dice, use slow non-atomic map and copy */ src = buf->ops->map(pipe, buf, 0); ret = sd->len; if (copy_to_user(sd->u.userptr, src + buf->offset, sd->len)) ret = -EFAULT; buf->ops->unmap(pipe, buf, src); out: if (ret > 0) sd->u.userptr += ret; return ret; } /* * For lack of a better implementation, implement vmsplice() to userspace * as a simple copy of the pipes pages to the user iov. */ static long vmsplice_to_user(struct file *file, const struct iovec __user *iov, unsigned long nr_segs, unsigned int flags) { struct pipe_inode_info *pipe; struct splice_desc sd; ssize_t size; int error; long ret; pipe = get_pipe_info(file); if (!pipe) return -EBADF; pipe_lock(pipe); error = ret = 0; while (nr_segs) { void __user *base; size_t len; /* * Get user address base and length for this iovec. */ error = get_user(base, &iov->iov_base); if (unlikely(error)) break; error = get_user(len, &iov->iov_len); if (unlikely(error)) break; /* * Sanity check this iovec. 0 read succeeds. */ if (unlikely(!len)) break; if (unlikely(!base)) { error = -EFAULT; break; } if (unlikely(!access_ok(VERIFY_WRITE, base, len))) { error = -EFAULT; break; } sd.len = 0; sd.total_len = len; sd.flags = flags; sd.u.userptr = base; sd.pos = 0; size = __splice_from_pipe(pipe, &sd, pipe_to_user); if (size < 0) { if (!ret) ret = size; break; } ret += size; if (size < len) break; nr_segs--; iov++; } pipe_unlock(pipe); if (!ret) ret = error; return ret; } /* * vmsplice splices a user address range into a pipe. It can be thought of * as splice-from-memory, where the regular splice is splice-from-file (or * to file). In both cases the output is a pipe, naturally. */ static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov, unsigned long nr_segs, unsigned int flags) { struct pipe_inode_info *pipe; struct page *pages[PIPE_DEF_BUFFERS]; struct partial_page partial[PIPE_DEF_BUFFERS]; struct splice_pipe_desc spd = { .pages = pages, .partial = partial, .nr_pages_max = PIPE_DEF_BUFFERS, .flags = flags, .ops = &user_page_pipe_buf_ops, .spd_release = spd_release_page, }; long ret; pipe = get_pipe_info(file); if (!pipe) return -EBADF; if (splice_grow_spd(pipe, &spd)) return -ENOMEM; spd.nr_pages = get_iovec_page_array(iov, nr_segs, spd.pages, spd.partial, flags & SPLICE_F_GIFT, spd.nr_pages_max); if (spd.nr_pages <= 0) ret = spd.nr_pages; else ret = splice_to_pipe(pipe, &spd); splice_shrink_spd(&spd); return ret; } /* * Note that vmsplice only really supports true splicing _from_ user memory * to a pipe, not the other way around. Splicing from user memory is a simple * operation that can be supported without any funky alignment restrictions * or nasty vm tricks. We simply map in the user memory and fill them into * a pipe. The reverse isn't quite as easy, though. There are two possible * solutions for that: * * - memcpy() the data internally, at which point we might as well just * do a regular read() on the buffer anyway. * - Lots of nasty vm tricks, that are neither fast nor flexible (it * has restriction limitations on both ends of the pipe). * * Currently we punt and implement it as a normal copy, see pipe_to_user(). * */ SYSCALL_DEFINE4(vmsplice, int, fd, const struct iovec __user *, iov, unsigned long, nr_segs, unsigned int, flags) { struct file *file; long error; int fput; if (unlikely(nr_segs > UIO_MAXIOV)) return -EINVAL; else if (unlikely(!nr_segs)) return 0; error = -EBADF; file = fget_light(fd, &fput); if (file) { if (file->f_mode & FMODE_WRITE) error = vmsplice_to_pipe(file, iov, nr_segs, flags); else if (file->f_mode & FMODE_READ) error = vmsplice_to_user(file, iov, nr_segs, flags); fput_light(file, fput); } return error; } SYSCALL_DEFINE6(splice, int, fd_in, loff_t __user *, off_in, int, fd_out, loff_t __user *, off_out, size_t, len, unsigned int, flags) { long error; struct file *in, *out; int fput_in, fput_out; if (unlikely(!len)) return 0; error = -EBADF; in = fget_light(fd_in, &fput_in); if (in) { if (in->f_mode & FMODE_READ) { out = fget_light(fd_out, &fput_out); if (out) { if (out->f_mode & FMODE_WRITE) error = do_splice(in, off_in, out, off_out, len, flags); fput_light(out, fput_out); } } fput_light(in, fput_in); } return error; } /* * Make sure there's data to read. Wait for input if we can, otherwise * return an appropriate error. */ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags) { int ret; /* * Check ->nrbufs without the inode lock first. This function * is speculative anyways, so missing one is ok. */ if (pipe->nrbufs) return 0; ret = 0; pipe_lock(pipe); while (!pipe->nrbufs) { if (signal_pending(current)) { ret = -ERESTARTSYS; break; } if (!pipe->writers) break; if (!pipe->waiting_writers) { if (flags & SPLICE_F_NONBLOCK) { ret = -EAGAIN; break; } } pipe_wait(pipe); } pipe_unlock(pipe); return ret; } /* * Make sure there's writeable room. Wait for room if we can, otherwise * return an appropriate error. */ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags) { int ret; /* * Check ->nrbufs without the inode lock first. This function * is speculative anyways, so missing one is ok. */ if (pipe->nrbufs < pipe->buffers) return 0; ret = 0; pipe_lock(pipe); while (pipe->nrbufs >= pipe->buffers) { if (!pipe->readers) { send_sig(SIGPIPE, current, 0); ret = -EPIPE; break; } if (flags & SPLICE_F_NONBLOCK) { ret = -EAGAIN; break; } if (signal_pending(current)) { ret = -ERESTARTSYS; break; } pipe->waiting_writers++; pipe_wait(pipe); pipe->waiting_writers--; } pipe_unlock(pipe); return ret; } /* * Splice contents of ipipe to opipe. */ static int splice_pipe_to_pipe(struct pipe_inode_info *ipipe, struct pipe_inode_info *opipe, size_t len, unsigned int flags) { struct pipe_buffer *ibuf, *obuf; int ret = 0, nbuf; bool input_wakeup = false; retry: ret = ipipe_prep(ipipe, flags); if (ret) return ret; ret = opipe_prep(opipe, flags); if (ret) return ret; /* * Potential ABBA deadlock, work around it by ordering lock * grabbing by pipe info address. Otherwise two different processes * could deadlock (one doing tee from A -> B, the other from B -> A). */ pipe_double_lock(ipipe, opipe); do { if (!opipe->readers) { send_sig(SIGPIPE, current, 0); if (!ret) ret = -EPIPE; break; } if (!ipipe->nrbufs && !ipipe->writers) break; /* * Cannot make any progress, because either the input * pipe is empty or the output pipe is full. */ if (!ipipe->nrbufs || opipe->nrbufs >= opipe->buffers) { /* Already processed some buffers, break */ if (ret) break; if (flags & SPLICE_F_NONBLOCK) { ret = -EAGAIN; break; } /* * We raced with another reader/writer and haven't * managed to process any buffers. A zero return * value means EOF, so retry instead. */ pipe_unlock(ipipe); pipe_unlock(opipe); goto retry; } ibuf = ipipe->bufs + ipipe->curbuf; nbuf = (opipe->curbuf + opipe->nrbufs) & (opipe->buffers - 1); obuf = opipe->bufs + nbuf; if (len >= ibuf->len) { /* * Simply move the whole buffer from ipipe to opipe */ *obuf = *ibuf; ibuf->ops = NULL; opipe->nrbufs++; ipipe->curbuf = (ipipe->curbuf + 1) & (ipipe->buffers - 1); ipipe->nrbufs--; input_wakeup = true; } else { /* * Get a reference to this pipe buffer, * so we can copy the contents over. */ ibuf->ops->get(ipipe, ibuf); *obuf = *ibuf; /* * Don't inherit the gift flag, we need to * prevent multiple steals of this page. */ obuf->flags &= ~PIPE_BUF_FLAG_GIFT; obuf->len = len; opipe->nrbufs++; ibuf->offset += obuf->len; ibuf->len -= obuf->len; } ret += obuf->len; len -= obuf->len; } while (len); pipe_unlock(ipipe); pipe_unlock(opipe); /* * If we put data in the output pipe, wakeup any potential readers. */ if (ret > 0) wakeup_pipe_readers(opipe); if (input_wakeup) wakeup_pipe_writers(ipipe); return ret; } /* * Link contents of ipipe to opipe. */ static int link_pipe(struct pipe_inode_info *ipipe, struct pipe_inode_info *opipe, size_t len, unsigned int flags) { struct pipe_buffer *ibuf, *obuf; int ret = 0, i = 0, nbuf; /* * Potential ABBA deadlock, work around it by ordering lock * grabbing by pipe info address. Otherwise two different processes * could deadlock (one doing tee from A -> B, the other from B -> A). */ pipe_double_lock(ipipe, opipe); do { if (!opipe->readers) { send_sig(SIGPIPE, current, 0); if (!ret) ret = -EPIPE; break; } /* * If we have iterated all input buffers or ran out of * output room, break. */ if (i >= ipipe->nrbufs || opipe->nrbufs >= opipe->buffers) break; ibuf = ipipe->bufs + ((ipipe->curbuf + i) & (ipipe->buffers-1)); nbuf = (opipe->curbuf + opipe->nrbufs) & (opipe->buffers - 1); /* * Get a reference to this pipe buffer, * so we can copy the contents over. */ ibuf->ops->get(ipipe, ibuf); obuf = opipe->bufs + nbuf; *obuf = *ibuf; /* * Don't inherit the gift flag, we need to * prevent multiple steals of this page. */ obuf->flags &= ~PIPE_BUF_FLAG_GIFT; if (obuf->len > len) obuf->len = len; opipe->nrbufs++; ret += obuf->len; len -= obuf->len; i++; } while (len); /* * return EAGAIN if we have the potential of some data in the * future, otherwise just return 0 */ if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK)) ret = -EAGAIN; pipe_unlock(ipipe); pipe_unlock(opipe); /* * If we put data in the output pipe, wakeup any potential readers. */ if (ret > 0) wakeup_pipe_readers(opipe); return ret; } /* * This is a tee(1) implementation that works on pipes. It doesn't copy * any data, it simply references the 'in' pages on the 'out' pipe. * The 'flags' used are the SPLICE_F_* variants, currently the only * applicable one is SPLICE_F_NONBLOCK. */ static long do_tee(struct file *in, struct file *out, size_t len, unsigned int flags) { struct pipe_inode_info *ipipe = get_pipe_info(in); struct pipe_inode_info *opipe = get_pipe_info(out); int ret = -EINVAL; /* * Duplicate the contents of ipipe to opipe without actually * copying the data. */ if (ipipe && opipe && ipipe != opipe) { /* * Keep going, unless we encounter an error. The ipipe/opipe * ordering doesn't really matter. */ ret = ipipe_prep(ipipe, flags); if (!ret) { ret = opipe_prep(opipe, flags); if (!ret) ret = link_pipe(ipipe, opipe, len, flags); } } return ret; } SYSCALL_DEFINE4(tee, int, fdin, int, fdout, size_t, len, unsigned int, flags) { struct file *in; int error, fput_in; if (unlikely(!len)) return 0; error = -EBADF; in = fget_light(fdin, &fput_in); if (in) { if (in->f_mode & FMODE_READ) { int fput_out; struct file *out = fget_light(fdout, &fput_out); if (out) { if (out->f_mode & FMODE_WRITE) error = do_tee(in, out, len, flags); fput_light(out, fput_out); } } fput_light(in, fput_in); } return error; }
gpl-2.0
ZeroInfinityXDA/HelixKernel_Nougat
drivers/mtd/nand/bf5xx_nand.c
414
19905
/* linux/drivers/mtd/nand/bf5xx_nand.c * * Copyright 2006-2008 Analog Devices Inc. * http://blackfin.uclinux.org/ * Bryan Wu <bryan.wu@analog.com> * * Blackfin BF5xx on-chip NAND flash controller driver * * Derived from drivers/mtd/nand/s3c2410.c * Copyright (c) 2007 Ben Dooks <ben@simtec.co.uk> * * Derived from drivers/mtd/nand/cafe.c * Copyright © 2006 Red Hat, Inc. * Copyright © 2006 David Woodhouse <dwmw2@infradead.org> * * Changelog: * 12-Jun-2007 Bryan Wu: Initial version * 18-Jul-2007 Bryan Wu: * - ECC_HW and ECC_SW supported * - DMA supported in ECC_HW * - YAFFS tested as rootfs in both ECC_HW and ECC_SW * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/ioport.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/io.h> #include <linux/bitops.h> #include <linux/mtd/mtd.h> #include <linux/mtd/nand.h> #include <linux/mtd/nand_ecc.h> #include <linux/mtd/partitions.h> #include <asm/blackfin.h> #include <asm/dma.h> #include <asm/cacheflush.h> #include <asm/nand.h> #include <asm/portmux.h> #define DRV_NAME "bf5xx-nand" #define DRV_VERSION "1.2" #define DRV_AUTHOR "Bryan Wu <bryan.wu@analog.com>" #define DRV_DESC "BF5xx on-chip NAND FLash Controller Driver" /* NFC_STAT Masks */ #define NBUSY 0x01 /* Not Busy */ #define WB_FULL 0x02 /* Write Buffer Full */ #define PG_WR_STAT 0x04 /* Page Write Pending */ #define PG_RD_STAT 0x08 /* Page Read Pending */ #define WB_EMPTY 0x10 /* Write Buffer Empty */ /* NFC_IRQSTAT Masks */ #define NBUSYIRQ 0x01 /* Not Busy IRQ */ #define WB_OVF 0x02 /* Write Buffer Overflow */ #define WB_EDGE 0x04 /* Write Buffer Edge Detect */ #define RD_RDY 0x08 /* Read Data Ready */ #define WR_DONE 0x10 /* Page Write Done */ /* NFC_RST Masks */ #define ECC_RST 0x01 /* ECC (and NFC counters) Reset */ /* NFC_PGCTL Masks */ #define PG_RD_START 0x01 /* Page Read Start */ #define PG_WR_START 0x02 /* Page Write Start */ #ifdef CONFIG_MTD_NAND_BF5XX_HWECC static int hardware_ecc = 1; #else static int hardware_ecc; #endif static const unsigned short bfin_nfc_pin_req[] = {P_NAND_CE, P_NAND_RB, P_NAND_D0, P_NAND_D1, P_NAND_D2, P_NAND_D3, P_NAND_D4, P_NAND_D5, P_NAND_D6, P_NAND_D7, P_NAND_WE, P_NAND_RE, P_NAND_CLE, P_NAND_ALE, 0}; #ifdef CONFIG_MTD_NAND_BF5XX_BOOTROM_ECC static struct nand_ecclayout bootrom_ecclayout = { .eccbytes = 24, .eccpos = { 0x8 * 0, 0x8 * 0 + 1, 0x8 * 0 + 2, 0x8 * 1, 0x8 * 1 + 1, 0x8 * 1 + 2, 0x8 * 2, 0x8 * 2 + 1, 0x8 * 2 + 2, 0x8 * 3, 0x8 * 3 + 1, 0x8 * 3 + 2, 0x8 * 4, 0x8 * 4 + 1, 0x8 * 4 + 2, 0x8 * 5, 0x8 * 5 + 1, 0x8 * 5 + 2, 0x8 * 6, 0x8 * 6 + 1, 0x8 * 6 + 2, 0x8 * 7, 0x8 * 7 + 1, 0x8 * 7 + 2 }, .oobfree = { { 0x8 * 0 + 3, 5 }, { 0x8 * 1 + 3, 5 }, { 0x8 * 2 + 3, 5 }, { 0x8 * 3 + 3, 5 }, { 0x8 * 4 + 3, 5 }, { 0x8 * 5 + 3, 5 }, { 0x8 * 6 + 3, 5 }, { 0x8 * 7 + 3, 5 }, } }; #endif /* * Data structures for bf5xx nand flash controller driver */ /* bf5xx nand info */ struct bf5xx_nand_info { /* mtd info */ struct nand_hw_control controller; struct mtd_info mtd; struct nand_chip chip; /* platform info */ struct bf5xx_nand_platform *platform; /* device info */ struct device *device; /* DMA stuff */ struct completion dma_completion; }; /* * Conversion functions */ static struct bf5xx_nand_info *mtd_to_nand_info(struct mtd_info *mtd) { return container_of(mtd, struct bf5xx_nand_info, mtd); } static struct bf5xx_nand_info *to_nand_info(struct platform_device *pdev) { return platform_get_drvdata(pdev); } static struct bf5xx_nand_platform *to_nand_plat(struct platform_device *pdev) { return dev_get_platdata(&pdev->dev); } /* * struct nand_chip interface function pointers */ /* * bf5xx_nand_hwcontrol * * Issue command and address cycles to the chip */ static void bf5xx_nand_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl) { if (cmd == NAND_CMD_NONE) return; while (bfin_read_NFC_STAT() & WB_FULL) cpu_relax(); if (ctrl & NAND_CLE) bfin_write_NFC_CMD(cmd); else if (ctrl & NAND_ALE) bfin_write_NFC_ADDR(cmd); SSYNC(); } /* * bf5xx_nand_devready() * * returns 0 if the nand is busy, 1 if it is ready */ static int bf5xx_nand_devready(struct mtd_info *mtd) { unsigned short val = bfin_read_NFC_STAT(); if ((val & NBUSY) == NBUSY) return 1; else return 0; } /* * ECC functions * These allow the bf5xx to use the controller's ECC * generator block to ECC the data as it passes through */ /* * ECC error correction function */ static int bf5xx_nand_correct_data_256(struct mtd_info *mtd, u_char *dat, u_char *read_ecc, u_char *calc_ecc) { struct bf5xx_nand_info *info = mtd_to_nand_info(mtd); u32 syndrome[5]; u32 calced, stored; int i; unsigned short failing_bit, failing_byte; u_char data; calced = calc_ecc[0] | (calc_ecc[1] << 8) | (calc_ecc[2] << 16); stored = read_ecc[0] | (read_ecc[1] << 8) | (read_ecc[2] << 16); syndrome[0] = (calced ^ stored); /* * syndrome 0: all zero * No error in data * No action */ if (!syndrome[0] || !calced || !stored) return 0; /* * sysdrome 0: only one bit is one * ECC data was incorrect * No action */ if (hweight32(syndrome[0]) == 1) { dev_err(info->device, "ECC data was incorrect!\n"); return 1; } syndrome[1] = (calced & 0x7FF) ^ (stored & 0x7FF); syndrome[2] = (calced & 0x7FF) ^ ((calced >> 11) & 0x7FF); syndrome[3] = (stored & 0x7FF) ^ ((stored >> 11) & 0x7FF); syndrome[4] = syndrome[2] ^ syndrome[3]; for (i = 0; i < 5; i++) dev_info(info->device, "syndrome[%d] 0x%08x\n", i, syndrome[i]); dev_info(info->device, "calced[0x%08x], stored[0x%08x]\n", calced, stored); /* * sysdrome 0: exactly 11 bits are one, each parity * and parity' pair is 1 & 0 or 0 & 1. * 1-bit correctable error * Correct the error */ if (hweight32(syndrome[0]) == 11 && syndrome[4] == 0x7FF) { dev_info(info->device, "1-bit correctable error, correct it.\n"); dev_info(info->device, "syndrome[1] 0x%08x\n", syndrome[1]); failing_bit = syndrome[1] & 0x7; failing_byte = syndrome[1] >> 0x3; data = *(dat + failing_byte); data = data ^ (0x1 << failing_bit); *(dat + failing_byte) = data; return 0; } /* * sysdrome 0: random data * More than 1-bit error, non-correctable error * Discard data, mark bad block */ dev_err(info->device, "More than 1-bit error, non-correctable error.\n"); dev_err(info->device, "Please discard data, mark bad block\n"); return 1; } static int bf5xx_nand_correct_data(struct mtd_info *mtd, u_char *dat, u_char *read_ecc, u_char *calc_ecc) { struct nand_chip *chip = mtd->priv; int ret; ret = bf5xx_nand_correct_data_256(mtd, dat, read_ecc, calc_ecc); /* If ecc size is 512, correct second 256 bytes */ if (chip->ecc.size == 512) { dat += 256; read_ecc += 3; calc_ecc += 3; ret |= bf5xx_nand_correct_data_256(mtd, dat, read_ecc, calc_ecc); } return ret; } static void bf5xx_nand_enable_hwecc(struct mtd_info *mtd, int mode) { return; } static int bf5xx_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code) { struct bf5xx_nand_info *info = mtd_to_nand_info(mtd); struct nand_chip *chip = mtd->priv; u16 ecc0, ecc1; u32 code[2]; u8 *p; /* first 3 bytes ECC code for 256 page size */ ecc0 = bfin_read_NFC_ECC0(); ecc1 = bfin_read_NFC_ECC1(); code[0] = (ecc0 & 0x7ff) | ((ecc1 & 0x7ff) << 11); dev_dbg(info->device, "returning ecc 0x%08x\n", code[0]); p = (u8 *) code; memcpy(ecc_code, p, 3); /* second 3 bytes ECC code for 512 ecc size */ if (chip->ecc.size == 512) { ecc0 = bfin_read_NFC_ECC2(); ecc1 = bfin_read_NFC_ECC3(); code[1] = (ecc0 & 0x7ff) | ((ecc1 & 0x7ff) << 11); /* second 3 bytes in ecc_code for second 256 * bytes of 512 page size */ p = (u8 *) (code + 1); memcpy((ecc_code + 3), p, 3); dev_dbg(info->device, "returning ecc 0x%08x\n", code[1]); } return 0; } /* * PIO mode for buffer writing and reading */ static void bf5xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) { int i; unsigned short val; /* * Data reads are requested by first writing to NFC_DATA_RD * and then reading back from NFC_READ. */ for (i = 0; i < len; i++) { while (bfin_read_NFC_STAT() & WB_FULL) cpu_relax(); /* Contents do not matter */ bfin_write_NFC_DATA_RD(0x0000); SSYNC(); while ((bfin_read_NFC_IRQSTAT() & RD_RDY) != RD_RDY) cpu_relax(); buf[i] = bfin_read_NFC_READ(); val = bfin_read_NFC_IRQSTAT(); val |= RD_RDY; bfin_write_NFC_IRQSTAT(val); SSYNC(); } } static uint8_t bf5xx_nand_read_byte(struct mtd_info *mtd) { uint8_t val; bf5xx_nand_read_buf(mtd, &val, 1); return val; } static void bf5xx_nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len) { int i; for (i = 0; i < len; i++) { while (bfin_read_NFC_STAT() & WB_FULL) cpu_relax(); bfin_write_NFC_DATA_WR(buf[i]); SSYNC(); } } static void bf5xx_nand_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len) { int i; u16 *p = (u16 *) buf; len >>= 1; /* * Data reads are requested by first writing to NFC_DATA_RD * and then reading back from NFC_READ. */ bfin_write_NFC_DATA_RD(0x5555); SSYNC(); for (i = 0; i < len; i++) p[i] = bfin_read_NFC_READ(); } static void bf5xx_nand_write_buf16(struct mtd_info *mtd, const uint8_t *buf, int len) { int i; u16 *p = (u16 *) buf; len >>= 1; for (i = 0; i < len; i++) bfin_write_NFC_DATA_WR(p[i]); SSYNC(); } /* * DMA functions for buffer writing and reading */ static irqreturn_t bf5xx_nand_dma_irq(int irq, void *dev_id) { struct bf5xx_nand_info *info = dev_id; clear_dma_irqstat(CH_NFC); disable_dma(CH_NFC); complete(&info->dma_completion); return IRQ_HANDLED; } static void bf5xx_nand_dma_rw(struct mtd_info *mtd, uint8_t *buf, int is_read) { struct bf5xx_nand_info *info = mtd_to_nand_info(mtd); struct nand_chip *chip = mtd->priv; unsigned short val; dev_dbg(info->device, " mtd->%p, buf->%p, is_read %d\n", mtd, buf, is_read); /* * Before starting a dma transfer, be sure to invalidate/flush * the cache over the address range of your DMA buffer to * prevent cache coherency problems. Otherwise very subtle bugs * can be introduced to your driver. */ if (is_read) invalidate_dcache_range((unsigned int)buf, (unsigned int)(buf + chip->ecc.size)); else flush_dcache_range((unsigned int)buf, (unsigned int)(buf + chip->ecc.size)); /* * This register must be written before each page is * transferred to generate the correct ECC register * values. */ bfin_write_NFC_RST(ECC_RST); SSYNC(); while (bfin_read_NFC_RST() & ECC_RST) cpu_relax(); disable_dma(CH_NFC); clear_dma_irqstat(CH_NFC); /* setup DMA register with Blackfin DMA API */ set_dma_config(CH_NFC, 0x0); set_dma_start_addr(CH_NFC, (unsigned long) buf); /* The DMAs have different size on BF52x and BF54x */ #ifdef CONFIG_BF52x set_dma_x_count(CH_NFC, (chip->ecc.size >> 1)); set_dma_x_modify(CH_NFC, 2); val = DI_EN | WDSIZE_16; #endif #ifdef CONFIG_BF54x set_dma_x_count(CH_NFC, (chip->ecc.size >> 2)); set_dma_x_modify(CH_NFC, 4); val = DI_EN | WDSIZE_32; #endif /* setup write or read operation */ if (is_read) val |= WNR; set_dma_config(CH_NFC, val); enable_dma(CH_NFC); /* Start PAGE read/write operation */ if (is_read) bfin_write_NFC_PGCTL(PG_RD_START); else bfin_write_NFC_PGCTL(PG_WR_START); wait_for_completion(&info->dma_completion); } static void bf5xx_nand_dma_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) { struct bf5xx_nand_info *info = mtd_to_nand_info(mtd); struct nand_chip *chip = mtd->priv; dev_dbg(info->device, "mtd->%p, buf->%p, int %d\n", mtd, buf, len); if (len == chip->ecc.size) bf5xx_nand_dma_rw(mtd, buf, 1); else bf5xx_nand_read_buf(mtd, buf, len); } static void bf5xx_nand_dma_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len) { struct bf5xx_nand_info *info = mtd_to_nand_info(mtd); struct nand_chip *chip = mtd->priv; dev_dbg(info->device, "mtd->%p, buf->%p, len %d\n", mtd, buf, len); if (len == chip->ecc.size) bf5xx_nand_dma_rw(mtd, (uint8_t *)buf, 0); else bf5xx_nand_write_buf(mtd, buf, len); } static int bf5xx_nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip, uint8_t *buf, int oob_required, int page) { bf5xx_nand_read_buf(mtd, buf, mtd->writesize); bf5xx_nand_read_buf(mtd, chip->oob_poi, mtd->oobsize); return 0; } static int bf5xx_nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip, const uint8_t *buf, int oob_required) { bf5xx_nand_write_buf(mtd, buf, mtd->writesize); bf5xx_nand_write_buf(mtd, chip->oob_poi, mtd->oobsize); return 0; } /* * System initialization functions */ static int bf5xx_nand_dma_init(struct bf5xx_nand_info *info) { int ret; /* Do not use dma */ if (!hardware_ecc) return 0; init_completion(&info->dma_completion); /* Request NFC DMA channel */ ret = request_dma(CH_NFC, "BF5XX NFC driver"); if (ret < 0) { dev_err(info->device, " unable to get DMA channel\n"); return ret; } #ifdef CONFIG_BF54x /* Setup DMAC1 channel mux for NFC which shared with SDH */ bfin_write_DMAC1_PERIMUX(bfin_read_DMAC1_PERIMUX() & ~1); SSYNC(); #endif set_dma_callback(CH_NFC, bf5xx_nand_dma_irq, info); /* Turn off the DMA channel first */ disable_dma(CH_NFC); return 0; } static void bf5xx_nand_dma_remove(struct bf5xx_nand_info *info) { /* Free NFC DMA channel */ if (hardware_ecc) free_dma(CH_NFC); } /* * BF5XX NFC hardware initialization * - pin mux setup * - clear interrupt status */ static int bf5xx_nand_hw_init(struct bf5xx_nand_info *info) { int err = 0; unsigned short val; struct bf5xx_nand_platform *plat = info->platform; /* setup NFC_CTL register */ dev_info(info->device, "data_width=%d, wr_dly=%d, rd_dly=%d\n", (plat->data_width ? 16 : 8), plat->wr_dly, plat->rd_dly); val = (1 << NFC_PG_SIZE_OFFSET) | (plat->data_width << NFC_NWIDTH_OFFSET) | (plat->rd_dly << NFC_RDDLY_OFFSET) | (plat->wr_dly << NFC_WRDLY_OFFSET); dev_dbg(info->device, "NFC_CTL is 0x%04x\n", val); bfin_write_NFC_CTL(val); SSYNC(); /* clear interrupt status */ bfin_write_NFC_IRQMASK(0x0); SSYNC(); val = bfin_read_NFC_IRQSTAT(); bfin_write_NFC_IRQSTAT(val); SSYNC(); /* DMA initialization */ if (bf5xx_nand_dma_init(info)) err = -ENXIO; return err; } /* * Device management interface */ static int bf5xx_nand_add_partition(struct bf5xx_nand_info *info) { struct mtd_info *mtd = &info->mtd; struct mtd_partition *parts = info->platform->partitions; int nr = info->platform->nr_partitions; return mtd_device_register(mtd, parts, nr); } static int bf5xx_nand_remove(struct platform_device *pdev) { struct bf5xx_nand_info *info = to_nand_info(pdev); /* first thing we need to do is release all our mtds * and their partitions, then go through freeing the * resources used */ nand_release(&info->mtd); peripheral_free_list(bfin_nfc_pin_req); bf5xx_nand_dma_remove(info); return 0; } static int bf5xx_nand_scan(struct mtd_info *mtd) { struct nand_chip *chip = mtd->priv; int ret; ret = nand_scan_ident(mtd, 1, NULL); if (ret) return ret; if (hardware_ecc) { /* * for nand with page size > 512B, think it as several sections with 512B */ if (likely(mtd->writesize >= 512)) { chip->ecc.size = 512; chip->ecc.bytes = 6; chip->ecc.strength = 2; } else { chip->ecc.size = 256; chip->ecc.bytes = 3; chip->ecc.strength = 1; bfin_write_NFC_CTL(bfin_read_NFC_CTL() & ~(1 << NFC_PG_SIZE_OFFSET)); SSYNC(); } } return nand_scan_tail(mtd); } /* * bf5xx_nand_probe * * called by device layer when it finds a device matching * one our driver can handled. This code checks to see if * it can allocate all necessary resources then calls the * nand layer to look for devices */ static int bf5xx_nand_probe(struct platform_device *pdev) { struct bf5xx_nand_platform *plat = to_nand_plat(pdev); struct bf5xx_nand_info *info = NULL; struct nand_chip *chip = NULL; struct mtd_info *mtd = NULL; int err = 0; dev_dbg(&pdev->dev, "(%p)\n", pdev); if (!plat) { dev_err(&pdev->dev, "no platform specific information\n"); return -EINVAL; } if (peripheral_request_list(bfin_nfc_pin_req, DRV_NAME)) { dev_err(&pdev->dev, "requesting Peripherals failed\n"); return -EFAULT; } info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL); if (info == NULL) { err = -ENOMEM; goto out_err; } platform_set_drvdata(pdev, info); spin_lock_init(&info->controller.lock); init_waitqueue_head(&info->controller.wq); info->device = &pdev->dev; info->platform = plat; /* initialise chip data struct */ chip = &info->chip; if (plat->data_width) chip->options |= NAND_BUSWIDTH_16; chip->options |= NAND_CACHEPRG | NAND_SKIP_BBTSCAN; chip->read_buf = (plat->data_width) ? bf5xx_nand_read_buf16 : bf5xx_nand_read_buf; chip->write_buf = (plat->data_width) ? bf5xx_nand_write_buf16 : bf5xx_nand_write_buf; chip->read_byte = bf5xx_nand_read_byte; chip->cmd_ctrl = bf5xx_nand_hwcontrol; chip->dev_ready = bf5xx_nand_devready; chip->priv = &info->mtd; chip->controller = &info->controller; chip->IO_ADDR_R = (void __iomem *) NFC_READ; chip->IO_ADDR_W = (void __iomem *) NFC_DATA_WR; chip->chip_delay = 0; /* initialise mtd info data struct */ mtd = &info->mtd; mtd->priv = chip; mtd->owner = THIS_MODULE; /* initialise the hardware */ err = bf5xx_nand_hw_init(info); if (err) goto out_err; /* setup hardware ECC data struct */ if (hardware_ecc) { #ifdef CONFIG_MTD_NAND_BF5XX_BOOTROM_ECC chip->ecc.layout = &bootrom_ecclayout; #endif chip->read_buf = bf5xx_nand_dma_read_buf; chip->write_buf = bf5xx_nand_dma_write_buf; chip->ecc.calculate = bf5xx_nand_calculate_ecc; chip->ecc.correct = bf5xx_nand_correct_data; chip->ecc.mode = NAND_ECC_HW; chip->ecc.hwctl = bf5xx_nand_enable_hwecc; chip->ecc.read_page_raw = bf5xx_nand_read_page_raw; chip->ecc.write_page_raw = bf5xx_nand_write_page_raw; } else { chip->ecc.mode = NAND_ECC_SOFT; } /* scan hardware nand chip and setup mtd info data struct */ if (bf5xx_nand_scan(mtd)) { err = -ENXIO; goto out_err_nand_scan; } #ifdef CONFIG_MTD_NAND_BF5XX_BOOTROM_ECC chip->badblockpos = 63; #endif /* add NAND partition */ bf5xx_nand_add_partition(info); dev_dbg(&pdev->dev, "initialised ok\n"); return 0; out_err_nand_scan: bf5xx_nand_dma_remove(info); out_err: peripheral_free_list(bfin_nfc_pin_req); return err; } /* driver device registration */ static struct platform_driver bf5xx_nand_driver = { .probe = bf5xx_nand_probe, .remove = bf5xx_nand_remove, .driver = { .name = DRV_NAME, .owner = THIS_MODULE, }, }; module_platform_driver(bf5xx_nand_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR(DRV_AUTHOR); MODULE_DESCRIPTION(DRV_DESC); MODULE_ALIAS("platform:" DRV_NAME);
gpl-2.0
xtrymind/android_kernel_msm
drivers/net/caif/caif_shm_u5500.c
5790
3127
/* * Copyright (C) ST-Ericsson AB 2010 * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com * Author: Amarnath Revanna / amarnath.bangalore.revanna@stericsson.com * License terms: GNU General Public License (GPL) version 2 */ #define pr_fmt(fmt) KBUILD_MODNAME ":" fmt #include <linux/init.h> #include <linux/module.h> #include <linux/netdevice.h> #include <mach/mbox-db5500.h> #include <net/caif/caif_shm.h> MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("CAIF Shared Memory protocol driver"); #define MAX_SHM_INSTANCES 1 enum { MBX_ACC0, MBX_ACC1, MBX_DSP }; static struct shmdev_layer shmdev_lyr[MAX_SHM_INSTANCES]; static unsigned int shm_start; static unsigned int shm_size; module_param(shm_size, uint , 0440); MODULE_PARM_DESC(shm_total_size, "Start of SHM shared memory"); module_param(shm_start, uint , 0440); MODULE_PARM_DESC(shm_total_start, "Total Size of SHM shared memory"); static int shmdev_send_msg(u32 dev_id, u32 mbx_msg) { /* Always block until msg is written successfully */ mbox_send(shmdev_lyr[dev_id].hmbx, mbx_msg, true); return 0; } static int shmdev_mbx_setup(void *pshmdrv_cb, struct shmdev_layer *pshm_dev, void *pshm_drv) { /* * For UX5500, we have only 1 SHM instance which uses MBX0 * for communication with the peer modem */ pshm_dev->hmbx = mbox_setup(MBX_ACC0, pshmdrv_cb, pshm_drv); if (!pshm_dev->hmbx) return -ENODEV; else return 0; } static int __init caif_shmdev_init(void) { int i, result; /* Loop is currently overkill, there is only one instance */ for (i = 0; i < MAX_SHM_INSTANCES; i++) { shmdev_lyr[i].shm_base_addr = shm_start; shmdev_lyr[i].shm_total_sz = shm_size; if (((char *)shmdev_lyr[i].shm_base_addr == NULL) || (shmdev_lyr[i].shm_total_sz <= 0)) { pr_warn("ERROR," "Shared memory Address and/or Size incorrect" ", Bailing out ...\n"); result = -EINVAL; goto clean; } pr_info("SHM AREA (instance %d) STARTS" " AT %p\n", i, (char *)shmdev_lyr[i].shm_base_addr); shmdev_lyr[i].shm_id = i; shmdev_lyr[i].pshmdev_mbxsend = shmdev_send_msg; shmdev_lyr[i].pshmdev_mbxsetup = shmdev_mbx_setup; /* * Finally, CAIF core module is called with details in place: * 1. SHM base address * 2. SHM size * 3. MBX handle */ result = caif_shmcore_probe(&shmdev_lyr[i]); if (result) { pr_warn("ERROR[%d]," "Could not probe SHM core (instance %d)" " Bailing out ...\n", result, i); goto clean; } } return 0; clean: /* * For now, we assume that even if one instance of SHM fails, we bail * out of the driver support completely. For this, we need to release * any memory allocated and unregister any instance of SHM net device. */ for (i = 0; i < MAX_SHM_INSTANCES; i++) { if (shmdev_lyr[i].pshm_netdev) unregister_netdev(shmdev_lyr[i].pshm_netdev); } return result; } static void __exit caif_shmdev_exit(void) { int i; for (i = 0; i < MAX_SHM_INSTANCES; i++) { caif_shmcore_remove(shmdev_lyr[i].pshm_netdev); kfree((void *)shmdev_lyr[i].shm_base_addr); } } module_init(caif_shmdev_init); module_exit(caif_shmdev_exit);
gpl-2.0
ravendra275/sony_kernel_msm8960
drivers/staging/speakup/speakup_ltlk.c
7582
6036
/* * originally written by: Kirk Reiser <kirk@braille.uwo.ca> * this version considerably modified by David Borowski, david575@rogers.com * * Copyright (C) 1998-99 Kirk Reiser. * Copyright (C) 2003 David Borowski. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * specificly written as a driver for the speakup screenreview * s not a general device driver. */ #include "speakup.h" #include "spk_priv.h" #include "serialio.h" #include "speakup_dtlk.h" /* local header file for LiteTalk values */ #define DRV_VERSION "2.11" #define PROCSPEECH 0x0d static int synth_probe(struct spk_synth *synth); static struct var_t vars[] = { { CAPS_START, .u.s = {"\x01+35p" } }, { CAPS_STOP, .u.s = {"\x01-35p" } }, { RATE, .u.n = {"\x01%ds", 8, 0, 9, 0, 0, NULL } }, { PITCH, .u.n = {"\x01%dp", 50, 0, 99, 0, 0, NULL } }, { VOL, .u.n = {"\x01%dv", 5, 0, 9, 0, 0, NULL } }, { TONE, .u.n = {"\x01%dx", 1, 0, 2, 0, 0, NULL } }, { PUNCT, .u.n = {"\x01%db", 7, 0, 15, 0, 0, NULL } }, { VOICE, .u.n = {"\x01%do", 0, 0, 7, 0, 0, NULL } }, { FREQUENCY, .u.n = {"\x01%df", 5, 0, 9, 0, 0, NULL } }, { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } }, V_LAST_VAR }; /* * These attributes will appear in /sys/accessibility/speakup/ltlk. */ static struct kobj_attribute caps_start_attribute = __ATTR(caps_start, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute caps_stop_attribute = __ATTR(caps_stop, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute freq_attribute = __ATTR(freq, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute pitch_attribute = __ATTR(pitch, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute punct_attribute = __ATTR(punct, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute rate_attribute = __ATTR(rate, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute tone_attribute = __ATTR(tone, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute voice_attribute = __ATTR(voice, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute vol_attribute = __ATTR(vol, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute delay_time_attribute = __ATTR(delay_time, ROOT_W, spk_var_show, spk_var_store); static struct kobj_attribute direct_attribute = __ATTR(direct, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute full_time_attribute = __ATTR(full_time, ROOT_W, spk_var_show, spk_var_store); static struct kobj_attribute jiffy_delta_attribute = __ATTR(jiffy_delta, ROOT_W, spk_var_show, spk_var_store); static struct kobj_attribute trigger_time_attribute = __ATTR(trigger_time, ROOT_W, spk_var_show, spk_var_store); /* * Create a group of attributes so that we can create and destroy them all * at once. */ static struct attribute *synth_attrs[] = { &caps_start_attribute.attr, &caps_stop_attribute.attr, &freq_attribute.attr, &pitch_attribute.attr, &punct_attribute.attr, &rate_attribute.attr, &tone_attribute.attr, &voice_attribute.attr, &vol_attribute.attr, &delay_time_attribute.attr, &direct_attribute.attr, &full_time_attribute.attr, &jiffy_delta_attribute.attr, &trigger_time_attribute.attr, NULL, /* need to NULL terminate the list of attributes */ }; static struct spk_synth synth_ltlk = { .name = "ltlk", .version = DRV_VERSION, .long_name = "LiteTalk", .init = "\01@\x01\x31y\n\0", .procspeech = PROCSPEECH, .clear = SYNTH_CLEAR, .delay = 500, .trigger = 50, .jiffies = 50, .full = 40000, .startup = SYNTH_START, .checkval = SYNTH_CHECK, .vars = vars, .probe = synth_probe, .release = spk_serial_release, .synth_immediate = spk_synth_immediate, .catch_up = spk_do_catch_up, .flush = spk_synth_flush, .is_alive = spk_synth_is_alive_restart, .synth_adjust = NULL, .read_buff_add = NULL, .get_index = spk_serial_in_nowait, .indexing = { .command = "\x01%di", .lowindex = 1, .highindex = 5, .currindex = 1, }, .attributes = { .attrs = synth_attrs, .name = "ltlk", }, }; /* interrogate the LiteTalk and print its settings */ static void synth_interrogate(struct spk_synth *synth) { unsigned char *t, i; unsigned char buf[50], rom_v[20]; spk_synth_immediate(synth, "\x18\x01?"); for (i = 0; i < 50; i++) { buf[i] = spk_serial_in(); if (i > 2 && buf[i] == 0x7f) break; } t = buf+2; for (i = 0; *t != '\r'; t++) { rom_v[i] = *t; if (++i >= 19) break; } rom_v[i] = 0; pr_info("%s: ROM version: %s\n", synth->long_name, rom_v); } static int synth_probe(struct spk_synth *synth) { int failed = 0; failed = serial_synth_probe(synth); if (failed == 0) synth_interrogate(synth); synth->alive = !failed; return failed; } module_param_named(ser, synth_ltlk.ser, int, S_IRUGO); module_param_named(start, synth_ltlk.startup, short, S_IRUGO); MODULE_PARM_DESC(ser, "Set the serial port for the synthesizer (0-based)."); MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded."); static int __init ltlk_init(void) { return synth_add(&synth_ltlk); } static void __exit ltlk_exit(void) { synth_remove(&synth_ltlk); } module_init(ltlk_init); module_exit(ltlk_exit); MODULE_AUTHOR("Kirk Reiser <kirk@braille.uwo.ca>"); MODULE_AUTHOR("David Borowski"); MODULE_DESCRIPTION("Speakup support for DoubleTalk LT/LiteTalk synthesizers"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION);
gpl-2.0
IllusionRom/android_kernel_lge_hammerhead
drivers/staging/speakup/speakup_ltlk.c
7582
6036
/* * originally written by: Kirk Reiser <kirk@braille.uwo.ca> * this version considerably modified by David Borowski, david575@rogers.com * * Copyright (C) 1998-99 Kirk Reiser. * Copyright (C) 2003 David Borowski. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * specificly written as a driver for the speakup screenreview * s not a general device driver. */ #include "speakup.h" #include "spk_priv.h" #include "serialio.h" #include "speakup_dtlk.h" /* local header file for LiteTalk values */ #define DRV_VERSION "2.11" #define PROCSPEECH 0x0d static int synth_probe(struct spk_synth *synth); static struct var_t vars[] = { { CAPS_START, .u.s = {"\x01+35p" } }, { CAPS_STOP, .u.s = {"\x01-35p" } }, { RATE, .u.n = {"\x01%ds", 8, 0, 9, 0, 0, NULL } }, { PITCH, .u.n = {"\x01%dp", 50, 0, 99, 0, 0, NULL } }, { VOL, .u.n = {"\x01%dv", 5, 0, 9, 0, 0, NULL } }, { TONE, .u.n = {"\x01%dx", 1, 0, 2, 0, 0, NULL } }, { PUNCT, .u.n = {"\x01%db", 7, 0, 15, 0, 0, NULL } }, { VOICE, .u.n = {"\x01%do", 0, 0, 7, 0, 0, NULL } }, { FREQUENCY, .u.n = {"\x01%df", 5, 0, 9, 0, 0, NULL } }, { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } }, V_LAST_VAR }; /* * These attributes will appear in /sys/accessibility/speakup/ltlk. */ static struct kobj_attribute caps_start_attribute = __ATTR(caps_start, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute caps_stop_attribute = __ATTR(caps_stop, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute freq_attribute = __ATTR(freq, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute pitch_attribute = __ATTR(pitch, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute punct_attribute = __ATTR(punct, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute rate_attribute = __ATTR(rate, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute tone_attribute = __ATTR(tone, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute voice_attribute = __ATTR(voice, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute vol_attribute = __ATTR(vol, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute delay_time_attribute = __ATTR(delay_time, ROOT_W, spk_var_show, spk_var_store); static struct kobj_attribute direct_attribute = __ATTR(direct, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute full_time_attribute = __ATTR(full_time, ROOT_W, spk_var_show, spk_var_store); static struct kobj_attribute jiffy_delta_attribute = __ATTR(jiffy_delta, ROOT_W, spk_var_show, spk_var_store); static struct kobj_attribute trigger_time_attribute = __ATTR(trigger_time, ROOT_W, spk_var_show, spk_var_store); /* * Create a group of attributes so that we can create and destroy them all * at once. */ static struct attribute *synth_attrs[] = { &caps_start_attribute.attr, &caps_stop_attribute.attr, &freq_attribute.attr, &pitch_attribute.attr, &punct_attribute.attr, &rate_attribute.attr, &tone_attribute.attr, &voice_attribute.attr, &vol_attribute.attr, &delay_time_attribute.attr, &direct_attribute.attr, &full_time_attribute.attr, &jiffy_delta_attribute.attr, &trigger_time_attribute.attr, NULL, /* need to NULL terminate the list of attributes */ }; static struct spk_synth synth_ltlk = { .name = "ltlk", .version = DRV_VERSION, .long_name = "LiteTalk", .init = "\01@\x01\x31y\n\0", .procspeech = PROCSPEECH, .clear = SYNTH_CLEAR, .delay = 500, .trigger = 50, .jiffies = 50, .full = 40000, .startup = SYNTH_START, .checkval = SYNTH_CHECK, .vars = vars, .probe = synth_probe, .release = spk_serial_release, .synth_immediate = spk_synth_immediate, .catch_up = spk_do_catch_up, .flush = spk_synth_flush, .is_alive = spk_synth_is_alive_restart, .synth_adjust = NULL, .read_buff_add = NULL, .get_index = spk_serial_in_nowait, .indexing = { .command = "\x01%di", .lowindex = 1, .highindex = 5, .currindex = 1, }, .attributes = { .attrs = synth_attrs, .name = "ltlk", }, }; /* interrogate the LiteTalk and print its settings */ static void synth_interrogate(struct spk_synth *synth) { unsigned char *t, i; unsigned char buf[50], rom_v[20]; spk_synth_immediate(synth, "\x18\x01?"); for (i = 0; i < 50; i++) { buf[i] = spk_serial_in(); if (i > 2 && buf[i] == 0x7f) break; } t = buf+2; for (i = 0; *t != '\r'; t++) { rom_v[i] = *t; if (++i >= 19) break; } rom_v[i] = 0; pr_info("%s: ROM version: %s\n", synth->long_name, rom_v); } static int synth_probe(struct spk_synth *synth) { int failed = 0; failed = serial_synth_probe(synth); if (failed == 0) synth_interrogate(synth); synth->alive = !failed; return failed; } module_param_named(ser, synth_ltlk.ser, int, S_IRUGO); module_param_named(start, synth_ltlk.startup, short, S_IRUGO); MODULE_PARM_DESC(ser, "Set the serial port for the synthesizer (0-based)."); MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded."); static int __init ltlk_init(void) { return synth_add(&synth_ltlk); } static void __exit ltlk_exit(void) { synth_remove(&synth_ltlk); } module_init(ltlk_init); module_exit(ltlk_exit); MODULE_AUTHOR("Kirk Reiser <kirk@braille.uwo.ca>"); MODULE_AUTHOR("David Borowski"); MODULE_DESCRIPTION("Speakup support for DoubleTalk LT/LiteTalk synthesizers"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION);
gpl-2.0
Kalashnikitty/Aurora_D802
arch/mips/math-emu/dp_add.c
7838
4674
/* IEEE754 floating point arithmetic * double precision: common utilities */ /* * MIPS floating point support * Copyright (C) 1994-2000 Algorithmics Ltd. * * ######################################################################## * * This program is free software; you can distribute it and/or modify it * under the terms of the GNU General Public License (Version 2) as * published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. * * ######################################################################## * */ #include "ieee754dp.h" ieee754dp ieee754dp_add(ieee754dp x, ieee754dp y) { COMPXDP; COMPYDP; EXPLODEXDP; EXPLODEYDP; CLEARCX; FLUSHXDP; FLUSHYDP; switch (CLPAIR(xc, yc)) { case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN): case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN): case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN): case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN): case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN): case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN): case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO): case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM): case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM): case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF): SETCX(IEEE754_INVALID_OPERATION); return ieee754dp_nanxcpt(ieee754dp_indef(), "add", x, y); case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN): return y; case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF): return x; /* Infinity handling */ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF): if (xs == ys) return x; SETCX(IEEE754_INVALID_OPERATION); return ieee754dp_xcpt(ieee754dp_indef(), "add", x, y); case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF): case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF): case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF): return y; case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO): case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM): case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM): return x; /* Zero handling */ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO): if (xs == ys) return x; else return ieee754dp_zero(ieee754_csr.rm == IEEE754_RD); case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO): case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO): return x; case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM): case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM): return y; case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM): DPDNORMX; /* FALL THROUGH */ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM): DPDNORMY; break; case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM): DPDNORMX; break; case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_NORM): break; } assert(xm & DP_HIDDEN_BIT); assert(ym & DP_HIDDEN_BIT); /* provide guard,round and stick bit space */ xm <<= 3; ym <<= 3; if (xe > ye) { /* have to shift y fraction right to align */ int s = xe - ye; ym = XDPSRS(ym, s); ye += s; } else if (ye > xe) { /* have to shift x fraction right to align */ int s = ye - xe; xm = XDPSRS(xm, s); xe += s; } assert(xe == ye); assert(xe <= DP_EMAX); if (xs == ys) { /* generate 28 bit result of adding two 27 bit numbers * leaving result in xm,xs,xe */ xm = xm + ym; xe = xe; xs = xs; if (xm >> (DP_MBITS + 1 + 3)) { /* carry out */ xm = XDPSRS1(xm); xe++; } } else { if (xm >= ym) { xm = xm - ym; xe = xe; xs = xs; } else { xm = ym - xm; xe = xe; xs = ys; } if (xm == 0) return ieee754dp_zero(ieee754_csr.rm == IEEE754_RD); /* normalize to rounding precision */ while ((xm >> (DP_MBITS + 3)) == 0) { xm <<= 1; xe--; } } DPNORMRET2(xs, xe, xm, "add", x, y); }
gpl-2.0
william-wfei/linux
drivers/net/wan/wanxl.c
159
21247
/* * wanXL serial card driver for Linux * host part * * Copyright (C) 2003 Krzysztof Halasa <khc@pm.waw.pl> * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License * as published by the Free Software Foundation. * * Status: * - Only DTE (external clock) support with NRZ and NRZI encodings * - wanXL100 will require minor driver modifications, no access to hw */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/netdevice.h> #include <linux/hdlc.h> #include <linux/pci.h> #include <linux/dma-mapping.h> #include <linux/delay.h> #include <asm/io.h> #include "wanxl.h" static const char* version = "wanXL serial card driver version: 0.48"; #define PLX_CTL_RESET 0x40000000 /* adapter reset */ #undef DEBUG_PKT #undef DEBUG_PCI /* MAILBOX #1 - PUTS COMMANDS */ #define MBX1_CMD_ABORTJ 0x85000000 /* Abort and Jump */ #ifdef __LITTLE_ENDIAN #define MBX1_CMD_BSWAP 0x8C000001 /* little-endian Byte Swap Mode */ #else #define MBX1_CMD_BSWAP 0x8C000000 /* big-endian Byte Swap Mode */ #endif /* MAILBOX #2 - DRAM SIZE */ #define MBX2_MEMSZ_MASK 0xFFFF0000 /* PUTS Memory Size Register mask */ struct port { struct net_device *dev; struct card *card; spinlock_t lock; /* for wanxl_xmit */ int node; /* physical port #0 - 3 */ unsigned int clock_type; int tx_in, tx_out; struct sk_buff *tx_skbs[TX_BUFFERS]; }; struct card_status { desc_t rx_descs[RX_QUEUE_LENGTH]; port_status_t port_status[4]; }; struct card { int n_ports; /* 1, 2 or 4 ports */ u8 irq; u8 __iomem *plx; /* PLX PCI9060 virtual base address */ struct pci_dev *pdev; /* for pci_name(pdev) */ int rx_in; struct sk_buff *rx_skbs[RX_QUEUE_LENGTH]; struct card_status *status; /* shared between host and card */ dma_addr_t status_address; struct port ports[0]; /* 1 - 4 port structures follow */ }; static inline struct port *dev_to_port(struct net_device *dev) { return (struct port *)dev_to_hdlc(dev)->priv; } static inline port_status_t *get_status(struct port *port) { return &port->card->status->port_status[port->node]; } #ifdef DEBUG_PCI static inline dma_addr_t pci_map_single_debug(struct pci_dev *pdev, void *ptr, size_t size, int direction) { dma_addr_t addr = pci_map_single(pdev, ptr, size, direction); if (addr + size > 0x100000000LL) pr_crit("%s: pci_map_single() returned memory at 0x%llx!\n", pci_name(pdev), (unsigned long long)addr); return addr; } #undef pci_map_single #define pci_map_single pci_map_single_debug #endif /* Cable and/or personality module change interrupt service */ static inline void wanxl_cable_intr(struct port *port) { u32 value = get_status(port)->cable; int valid = 1; const char *cable, *pm, *dte = "", *dsr = "", *dcd = ""; switch(value & 0x7) { case STATUS_CABLE_V35: cable = "V.35"; break; case STATUS_CABLE_X21: cable = "X.21"; break; case STATUS_CABLE_V24: cable = "V.24"; break; case STATUS_CABLE_EIA530: cable = "EIA530"; break; case STATUS_CABLE_NONE: cable = "no"; break; default: cable = "invalid"; } switch((value >> STATUS_CABLE_PM_SHIFT) & 0x7) { case STATUS_CABLE_V35: pm = "V.35"; break; case STATUS_CABLE_X21: pm = "X.21"; break; case STATUS_CABLE_V24: pm = "V.24"; break; case STATUS_CABLE_EIA530: pm = "EIA530"; break; case STATUS_CABLE_NONE: pm = "no personality"; valid = 0; break; default: pm = "invalid personality"; valid = 0; } if (valid) { if ((value & 7) == ((value >> STATUS_CABLE_PM_SHIFT) & 7)) { dsr = (value & STATUS_CABLE_DSR) ? ", DSR ON" : ", DSR off"; dcd = (value & STATUS_CABLE_DCD) ? ", carrier ON" : ", carrier off"; } dte = (value & STATUS_CABLE_DCE) ? " DCE" : " DTE"; } netdev_info(port->dev, "%s%s module, %s cable%s%s\n", pm, dte, cable, dsr, dcd); if (value & STATUS_CABLE_DCD) netif_carrier_on(port->dev); else netif_carrier_off(port->dev); } /* Transmit complete interrupt service */ static inline void wanxl_tx_intr(struct port *port) { struct net_device *dev = port->dev; while (1) { desc_t *desc = &get_status(port)->tx_descs[port->tx_in]; struct sk_buff *skb = port->tx_skbs[port->tx_in]; switch (desc->stat) { case PACKET_FULL: case PACKET_EMPTY: netif_wake_queue(dev); return; case PACKET_UNDERRUN: dev->stats.tx_errors++; dev->stats.tx_fifo_errors++; break; default: dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; } desc->stat = PACKET_EMPTY; /* Free descriptor */ pci_unmap_single(port->card->pdev, desc->address, skb->len, PCI_DMA_TODEVICE); dev_kfree_skb_irq(skb); port->tx_in = (port->tx_in + 1) % TX_BUFFERS; } } /* Receive complete interrupt service */ static inline void wanxl_rx_intr(struct card *card) { desc_t *desc; while (desc = &card->status->rx_descs[card->rx_in], desc->stat != PACKET_EMPTY) { if ((desc->stat & PACKET_PORT_MASK) > card->n_ports) pr_crit("%s: received packet for nonexistent port\n", pci_name(card->pdev)); else { struct sk_buff *skb = card->rx_skbs[card->rx_in]; struct port *port = &card->ports[desc->stat & PACKET_PORT_MASK]; struct net_device *dev = port->dev; if (!skb) dev->stats.rx_dropped++; else { pci_unmap_single(card->pdev, desc->address, BUFFER_LENGTH, PCI_DMA_FROMDEVICE); skb_put(skb, desc->length); #ifdef DEBUG_PKT printk(KERN_DEBUG "%s RX(%i):", dev->name, skb->len); debug_frame(skb); #endif dev->stats.rx_packets++; dev->stats.rx_bytes += skb->len; skb->protocol = hdlc_type_trans(skb, dev); netif_rx(skb); skb = NULL; } if (!skb) { skb = dev_alloc_skb(BUFFER_LENGTH); desc->address = skb ? pci_map_single(card->pdev, skb->data, BUFFER_LENGTH, PCI_DMA_FROMDEVICE) : 0; card->rx_skbs[card->rx_in] = skb; } } desc->stat = PACKET_EMPTY; /* Free descriptor */ card->rx_in = (card->rx_in + 1) % RX_QUEUE_LENGTH; } } static irqreturn_t wanxl_intr(int irq, void* dev_id) { struct card *card = dev_id; int i; u32 stat; int handled = 0; while((stat = readl(card->plx + PLX_DOORBELL_FROM_CARD)) != 0) { handled = 1; writel(stat, card->plx + PLX_DOORBELL_FROM_CARD); for (i = 0; i < card->n_ports; i++) { if (stat & (1 << (DOORBELL_FROM_CARD_TX_0 + i))) wanxl_tx_intr(&card->ports[i]); if (stat & (1 << (DOORBELL_FROM_CARD_CABLE_0 + i))) wanxl_cable_intr(&card->ports[i]); } if (stat & (1 << DOORBELL_FROM_CARD_RX)) wanxl_rx_intr(card); } return IRQ_RETVAL(handled); } static netdev_tx_t wanxl_xmit(struct sk_buff *skb, struct net_device *dev) { struct port *port = dev_to_port(dev); desc_t *desc; spin_lock(&port->lock); desc = &get_status(port)->tx_descs[port->tx_out]; if (desc->stat != PACKET_EMPTY) { /* should never happen - previous xmit should stop queue */ #ifdef DEBUG_PKT printk(KERN_DEBUG "%s: transmitter buffer full\n", dev->name); #endif netif_stop_queue(dev); spin_unlock(&port->lock); return NETDEV_TX_BUSY; /* request packet to be queued */ } #ifdef DEBUG_PKT printk(KERN_DEBUG "%s TX(%i):", dev->name, skb->len); debug_frame(skb); #endif port->tx_skbs[port->tx_out] = skb; desc->address = pci_map_single(port->card->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); desc->length = skb->len; desc->stat = PACKET_FULL; writel(1 << (DOORBELL_TO_CARD_TX_0 + port->node), port->card->plx + PLX_DOORBELL_TO_CARD); port->tx_out = (port->tx_out + 1) % TX_BUFFERS; if (get_status(port)->tx_descs[port->tx_out].stat != PACKET_EMPTY) { netif_stop_queue(dev); #ifdef DEBUG_PKT printk(KERN_DEBUG "%s: transmitter buffer full\n", dev->name); #endif } spin_unlock(&port->lock); return NETDEV_TX_OK; } static int wanxl_attach(struct net_device *dev, unsigned short encoding, unsigned short parity) { struct port *port = dev_to_port(dev); if (encoding != ENCODING_NRZ && encoding != ENCODING_NRZI) return -EINVAL; if (parity != PARITY_NONE && parity != PARITY_CRC32_PR1_CCITT && parity != PARITY_CRC16_PR1_CCITT && parity != PARITY_CRC32_PR0_CCITT && parity != PARITY_CRC16_PR0_CCITT) return -EINVAL; get_status(port)->encoding = encoding; get_status(port)->parity = parity; return 0; } static int wanxl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { const size_t size = sizeof(sync_serial_settings); sync_serial_settings line; struct port *port = dev_to_port(dev); if (cmd != SIOCWANDEV) return hdlc_ioctl(dev, ifr, cmd); switch (ifr->ifr_settings.type) { case IF_GET_IFACE: ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL; if (ifr->ifr_settings.size < size) { ifr->ifr_settings.size = size; /* data size wanted */ return -ENOBUFS; } memset(&line, 0, sizeof(line)); line.clock_type = get_status(port)->clocking; line.clock_rate = 0; line.loopback = 0; if (copy_to_user(ifr->ifr_settings.ifs_ifsu.sync, &line, size)) return -EFAULT; return 0; case IF_IFACE_SYNC_SERIAL: if (!capable(CAP_NET_ADMIN)) return -EPERM; if (dev->flags & IFF_UP) return -EBUSY; if (copy_from_user(&line, ifr->ifr_settings.ifs_ifsu.sync, size)) return -EFAULT; if (line.clock_type != CLOCK_EXT && line.clock_type != CLOCK_TXFROMRX) return -EINVAL; /* No such clock setting */ if (line.loopback != 0) return -EINVAL; get_status(port)->clocking = line.clock_type; return 0; default: return hdlc_ioctl(dev, ifr, cmd); } } static int wanxl_open(struct net_device *dev) { struct port *port = dev_to_port(dev); u8 __iomem *dbr = port->card->plx + PLX_DOORBELL_TO_CARD; unsigned long timeout; int i; if (get_status(port)->open) { netdev_err(dev, "port already open\n"); return -EIO; } if ((i = hdlc_open(dev)) != 0) return i; port->tx_in = port->tx_out = 0; for (i = 0; i < TX_BUFFERS; i++) get_status(port)->tx_descs[i].stat = PACKET_EMPTY; /* signal the card */ writel(1 << (DOORBELL_TO_CARD_OPEN_0 + port->node), dbr); timeout = jiffies + HZ; do { if (get_status(port)->open) { netif_start_queue(dev); return 0; } } while (time_after(timeout, jiffies)); netdev_err(dev, "unable to open port\n"); /* ask the card to close the port, should it be still alive */ writel(1 << (DOORBELL_TO_CARD_CLOSE_0 + port->node), dbr); return -EFAULT; } static int wanxl_close(struct net_device *dev) { struct port *port = dev_to_port(dev); unsigned long timeout; int i; hdlc_close(dev); /* signal the card */ writel(1 << (DOORBELL_TO_CARD_CLOSE_0 + port->node), port->card->plx + PLX_DOORBELL_TO_CARD); timeout = jiffies + HZ; do { if (!get_status(port)->open) break; } while (time_after(timeout, jiffies)); if (get_status(port)->open) netdev_err(dev, "unable to close port\n"); netif_stop_queue(dev); for (i = 0; i < TX_BUFFERS; i++) { desc_t *desc = &get_status(port)->tx_descs[i]; if (desc->stat != PACKET_EMPTY) { desc->stat = PACKET_EMPTY; pci_unmap_single(port->card->pdev, desc->address, port->tx_skbs[i]->len, PCI_DMA_TODEVICE); dev_kfree_skb(port->tx_skbs[i]); } } return 0; } static struct net_device_stats *wanxl_get_stats(struct net_device *dev) { struct port *port = dev_to_port(dev); dev->stats.rx_over_errors = get_status(port)->rx_overruns; dev->stats.rx_frame_errors = get_status(port)->rx_frame_errors; dev->stats.rx_errors = dev->stats.rx_over_errors + dev->stats.rx_frame_errors; return &dev->stats; } static int wanxl_puts_command(struct card *card, u32 cmd) { unsigned long timeout = jiffies + 5 * HZ; writel(cmd, card->plx + PLX_MAILBOX_1); do { if (readl(card->plx + PLX_MAILBOX_1) == 0) return 0; schedule(); }while (time_after(timeout, jiffies)); return -1; } static void wanxl_reset(struct card *card) { u32 old_value = readl(card->plx + PLX_CONTROL) & ~PLX_CTL_RESET; writel(0x80, card->plx + PLX_MAILBOX_0); writel(old_value | PLX_CTL_RESET, card->plx + PLX_CONTROL); readl(card->plx + PLX_CONTROL); /* wait for posted write */ udelay(1); writel(old_value, card->plx + PLX_CONTROL); readl(card->plx + PLX_CONTROL); /* wait for posted write */ } static void wanxl_pci_remove_one(struct pci_dev *pdev) { struct card *card = pci_get_drvdata(pdev); int i; for (i = 0; i < card->n_ports; i++) { unregister_hdlc_device(card->ports[i].dev); free_netdev(card->ports[i].dev); } /* unregister and free all host resources */ if (card->irq) free_irq(card->irq, card); wanxl_reset(card); for (i = 0; i < RX_QUEUE_LENGTH; i++) if (card->rx_skbs[i]) { pci_unmap_single(card->pdev, card->status->rx_descs[i].address, BUFFER_LENGTH, PCI_DMA_FROMDEVICE); dev_kfree_skb(card->rx_skbs[i]); } if (card->plx) iounmap(card->plx); if (card->status) pci_free_consistent(pdev, sizeof(struct card_status), card->status, card->status_address); pci_release_regions(pdev); pci_disable_device(pdev); kfree(card); } #include "wanxlfw.inc" static const struct net_device_ops wanxl_ops = { .ndo_open = wanxl_open, .ndo_stop = wanxl_close, .ndo_start_xmit = hdlc_start_xmit, .ndo_do_ioctl = wanxl_ioctl, .ndo_get_stats = wanxl_get_stats, }; static int wanxl_pci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { struct card *card; u32 ramsize, stat; unsigned long timeout; u32 plx_phy; /* PLX PCI base address */ u32 mem_phy; /* memory PCI base addr */ u8 __iomem *mem; /* memory virtual base addr */ int i, ports, alloc_size; #ifndef MODULE pr_info_once("%s\n", version); #endif i = pci_enable_device(pdev); if (i) return i; /* QUICC can only access first 256 MB of host RAM directly, but PLX9060 DMA does 32-bits for actual packet data transfers */ /* FIXME when PCI/DMA subsystems are fixed. We set both dma_mask and consistent_dma_mask to 28 bits and pray pci_alloc_consistent() will use this info. It should work on most platforms */ if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(28)) || pci_set_dma_mask(pdev, DMA_BIT_MASK(28))) { pr_err("No usable DMA configuration\n"); pci_disable_device(pdev); return -EIO; } i = pci_request_regions(pdev, "wanXL"); if (i) { pci_disable_device(pdev); return i; } switch (pdev->device) { case PCI_DEVICE_ID_SBE_WANXL100: ports = 1; break; case PCI_DEVICE_ID_SBE_WANXL200: ports = 2; break; default: ports = 4; } alloc_size = sizeof(struct card) + ports * sizeof(struct port); card = kzalloc(alloc_size, GFP_KERNEL); if (card == NULL) { pci_release_regions(pdev); pci_disable_device(pdev); return -ENOBUFS; } pci_set_drvdata(pdev, card); card->pdev = pdev; card->status = pci_alloc_consistent(pdev, sizeof(struct card_status), &card->status_address); if (card->status == NULL) { wanxl_pci_remove_one(pdev); return -ENOBUFS; } #ifdef DEBUG_PCI printk(KERN_DEBUG "wanXL %s: pci_alloc_consistent() returned memory" " at 0x%LX\n", pci_name(pdev), (unsigned long long)card->status_address); #endif /* FIXME when PCI/DMA subsystems are fixed. We set both dma_mask and consistent_dma_mask back to 32 bits to indicate the card can do 32-bit DMA addressing */ if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) || pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { pr_err("No usable DMA configuration\n"); wanxl_pci_remove_one(pdev); return -EIO; } /* set up PLX mapping */ plx_phy = pci_resource_start(pdev, 0); card->plx = ioremap_nocache(plx_phy, 0x70); if (!card->plx) { pr_err("ioremap() failed\n"); wanxl_pci_remove_one(pdev); return -EFAULT; } #if RESET_WHILE_LOADING wanxl_reset(card); #endif timeout = jiffies + 20 * HZ; while ((stat = readl(card->plx + PLX_MAILBOX_0)) != 0) { if (time_before(timeout, jiffies)) { pr_warn("%s: timeout waiting for PUTS to complete\n", pci_name(pdev)); wanxl_pci_remove_one(pdev); return -ENODEV; } switch(stat & 0xC0) { case 0x00: /* hmm - PUTS completed with non-zero code? */ case 0x80: /* PUTS still testing the hardware */ break; default: pr_warn("%s: PUTS test 0x%X failed\n", pci_name(pdev), stat & 0x30); wanxl_pci_remove_one(pdev); return -ENODEV; } schedule(); } /* get on-board memory size (PUTS detects no more than 4 MB) */ ramsize = readl(card->plx + PLX_MAILBOX_2) & MBX2_MEMSZ_MASK; /* set up on-board RAM mapping */ mem_phy = pci_resource_start(pdev, 2); /* sanity check the board's reported memory size */ if (ramsize < BUFFERS_ADDR + (TX_BUFFERS + RX_BUFFERS) * BUFFER_LENGTH * ports) { pr_warn("%s: no enough on-board RAM (%u bytes detected, %u bytes required)\n", pci_name(pdev), ramsize, BUFFERS_ADDR + (TX_BUFFERS + RX_BUFFERS) * BUFFER_LENGTH * ports); wanxl_pci_remove_one(pdev); return -ENODEV; } if (wanxl_puts_command(card, MBX1_CMD_BSWAP)) { pr_warn("%s: unable to Set Byte Swap Mode\n", pci_name(pdev)); wanxl_pci_remove_one(pdev); return -ENODEV; } for (i = 0; i < RX_QUEUE_LENGTH; i++) { struct sk_buff *skb = dev_alloc_skb(BUFFER_LENGTH); card->rx_skbs[i] = skb; if (skb) card->status->rx_descs[i].address = pci_map_single(card->pdev, skb->data, BUFFER_LENGTH, PCI_DMA_FROMDEVICE); } mem = ioremap_nocache(mem_phy, PDM_OFFSET + sizeof(firmware)); if (!mem) { pr_err("ioremap() failed\n"); wanxl_pci_remove_one(pdev); return -EFAULT; } for (i = 0; i < sizeof(firmware); i += 4) writel(ntohl(*(__be32*)(firmware + i)), mem + PDM_OFFSET + i); for (i = 0; i < ports; i++) writel(card->status_address + (void *)&card->status->port_status[i] - (void *)card->status, mem + PDM_OFFSET + 4 + i * 4); writel(card->status_address, mem + PDM_OFFSET + 20); writel(PDM_OFFSET, mem); iounmap(mem); writel(0, card->plx + PLX_MAILBOX_5); if (wanxl_puts_command(card, MBX1_CMD_ABORTJ)) { pr_warn("%s: unable to Abort and Jump\n", pci_name(pdev)); wanxl_pci_remove_one(pdev); return -ENODEV; } stat = 0; timeout = jiffies + 5 * HZ; do { if ((stat = readl(card->plx + PLX_MAILBOX_5)) != 0) break; schedule(); }while (time_after(timeout, jiffies)); if (!stat) { pr_warn("%s: timeout while initializing card firmware\n", pci_name(pdev)); wanxl_pci_remove_one(pdev); return -ENODEV; } #if DETECT_RAM ramsize = stat; #endif pr_info("%s: at 0x%X, %u KB of RAM at 0x%X, irq %u\n", pci_name(pdev), plx_phy, ramsize / 1024, mem_phy, pdev->irq); /* Allocate IRQ */ if (request_irq(pdev->irq, wanxl_intr, IRQF_SHARED, "wanXL", card)) { pr_warn("%s: could not allocate IRQ%i\n", pci_name(pdev), pdev->irq); wanxl_pci_remove_one(pdev); return -EBUSY; } card->irq = pdev->irq; for (i = 0; i < ports; i++) { hdlc_device *hdlc; struct port *port = &card->ports[i]; struct net_device *dev = alloc_hdlcdev(port); if (!dev) { pr_err("%s: unable to allocate memory\n", pci_name(pdev)); wanxl_pci_remove_one(pdev); return -ENOMEM; } port->dev = dev; hdlc = dev_to_hdlc(dev); spin_lock_init(&port->lock); dev->tx_queue_len = 50; dev->netdev_ops = &wanxl_ops; hdlc->attach = wanxl_attach; hdlc->xmit = wanxl_xmit; port->card = card; port->node = i; get_status(port)->clocking = CLOCK_EXT; if (register_hdlc_device(dev)) { pr_err("%s: unable to register hdlc device\n", pci_name(pdev)); free_netdev(dev); wanxl_pci_remove_one(pdev); return -ENOBUFS; } card->n_ports++; } pr_info("%s: port", pci_name(pdev)); for (i = 0; i < ports; i++) pr_cont("%s #%i: %s", i ? "," : "", i, card->ports[i].dev->name); pr_cont("\n"); for (i = 0; i < ports; i++) wanxl_cable_intr(&card->ports[i]); /* get carrier status etc.*/ return 0; } static const struct pci_device_id wanxl_pci_tbl[] = { { PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_SBE_WANXL100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, { PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_SBE_WANXL200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, { PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_SBE_WANXL400, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, { 0, } }; static struct pci_driver wanxl_pci_driver = { .name = "wanXL", .id_table = wanxl_pci_tbl, .probe = wanxl_pci_init_one, .remove = wanxl_pci_remove_one, }; static int __init wanxl_init_module(void) { #ifdef MODULE pr_info("%s\n", version); #endif return pci_register_driver(&wanxl_pci_driver); } static void __exit wanxl_cleanup_module(void) { pci_unregister_driver(&wanxl_pci_driver); } MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>"); MODULE_DESCRIPTION("SBE Inc. wanXL serial port driver"); MODULE_LICENSE("GPL v2"); MODULE_DEVICE_TABLE(pci, wanxl_pci_tbl); module_init(wanxl_init_module); module_exit(wanxl_cleanup_module);
gpl-2.0
TSCLKS/linux
drivers/tty/serial/samsung.c
159
46774
/* * Driver core for Samsung SoC onboard UARTs. * * Ben Dooks, Copyright (c) 2003-2008 Simtec Electronics * http://armlinux.simtec.co.uk/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ /* Hote on 2410 error handling * * The s3c2410 manual has a love/hate affair with the contents of the * UERSTAT register in the UART blocks, and keeps marking some of the * error bits as reserved. Having checked with the s3c2410x01, * it copes with BREAKs properly, so I am happy to ignore the RESERVED * feature from the latter versions of the manual. * * If it becomes aparrent that latter versions of the 2410 remove these * bits, then action will have to be taken to differentiate the versions * and change the policy on BREAK * * BJD, 04-Nov-2004 */ #if defined(CONFIG_SERIAL_SAMSUNG_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) #define SUPPORT_SYSRQ #endif #include <linux/module.h> #include <linux/ioport.h> #include <linux/io.h> #include <linux/platform_device.h> #include <linux/init.h> #include <linux/sysrq.h> #include <linux/console.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/serial_core.h> #include <linux/serial.h> #include <linux/serial_s3c.h> #include <linux/delay.h> #include <linux/clk.h> #include <linux/cpufreq.h> #include <linux/of.h> #include <asm/irq.h> #include "samsung.h" #if defined(CONFIG_SERIAL_SAMSUNG_DEBUG) && \ defined(CONFIG_DEBUG_LL) && \ !defined(MODULE) extern void printascii(const char *); __printf(1, 2) static void dbg(const char *fmt, ...) { va_list va; char buff[256]; va_start(va, fmt); vscnprintf(buff, sizeof(buff), fmt, va); va_end(va); printascii(buff); } #else #define dbg(fmt, ...) do { if (0) no_printk(fmt, ##__VA_ARGS__); } while (0) #endif /* UART name and device definitions */ #define S3C24XX_SERIAL_NAME "ttySAC" #define S3C24XX_SERIAL_MAJOR 204 #define S3C24XX_SERIAL_MINOR 64 /* macros to change one thing to another */ #define tx_enabled(port) ((port)->unused[0]) #define rx_enabled(port) ((port)->unused[1]) /* flag to ignore all characters coming in */ #define RXSTAT_DUMMY_READ (0x10000000) static inline struct s3c24xx_uart_port *to_ourport(struct uart_port *port) { return container_of(port, struct s3c24xx_uart_port, port); } /* translate a port to the device name */ static inline const char *s3c24xx_serial_portname(struct uart_port *port) { return to_platform_device(port->dev)->name; } static int s3c24xx_serial_txempty_nofifo(struct uart_port *port) { return rd_regl(port, S3C2410_UTRSTAT) & S3C2410_UTRSTAT_TXE; } /* * s3c64xx and later SoC's include the interrupt mask and status registers in * the controller itself, unlike the s3c24xx SoC's which have these registers * in the interrupt controller. Check if the port type is s3c64xx or higher. */ static int s3c24xx_serial_has_interrupt_mask(struct uart_port *port) { return to_ourport(port)->info->type == PORT_S3C6400; } static void s3c24xx_serial_rx_enable(struct uart_port *port) { unsigned long flags; unsigned int ucon, ufcon; int count = 10000; spin_lock_irqsave(&port->lock, flags); while (--count && !s3c24xx_serial_txempty_nofifo(port)) udelay(100); ufcon = rd_regl(port, S3C2410_UFCON); ufcon |= S3C2410_UFCON_RESETRX; wr_regl(port, S3C2410_UFCON, ufcon); ucon = rd_regl(port, S3C2410_UCON); ucon |= S3C2410_UCON_RXIRQMODE; wr_regl(port, S3C2410_UCON, ucon); rx_enabled(port) = 1; spin_unlock_irqrestore(&port->lock, flags); } static void s3c24xx_serial_rx_disable(struct uart_port *port) { unsigned long flags; unsigned int ucon; spin_lock_irqsave(&port->lock, flags); ucon = rd_regl(port, S3C2410_UCON); ucon &= ~S3C2410_UCON_RXIRQMODE; wr_regl(port, S3C2410_UCON, ucon); rx_enabled(port) = 0; spin_unlock_irqrestore(&port->lock, flags); } static void s3c24xx_serial_stop_tx(struct uart_port *port) { struct s3c24xx_uart_port *ourport = to_ourport(port); if (tx_enabled(port)) { if (s3c24xx_serial_has_interrupt_mask(port)) __set_bit(S3C64XX_UINTM_TXD, portaddrl(port, S3C64XX_UINTM)); else disable_irq_nosync(ourport->tx_irq); tx_enabled(port) = 0; if (port->flags & UPF_CONS_FLOW) s3c24xx_serial_rx_enable(port); } } static void s3c24xx_serial_start_tx(struct uart_port *port) { struct s3c24xx_uart_port *ourport = to_ourport(port); if (!tx_enabled(port)) { if (port->flags & UPF_CONS_FLOW) s3c24xx_serial_rx_disable(port); if (s3c24xx_serial_has_interrupt_mask(port)) __clear_bit(S3C64XX_UINTM_TXD, portaddrl(port, S3C64XX_UINTM)); else enable_irq(ourport->tx_irq); tx_enabled(port) = 1; } } static void s3c24xx_serial_stop_rx(struct uart_port *port) { struct s3c24xx_uart_port *ourport = to_ourport(port); if (rx_enabled(port)) { dbg("s3c24xx_serial_stop_rx: port=%p\n", port); if (s3c24xx_serial_has_interrupt_mask(port)) __set_bit(S3C64XX_UINTM_RXD, portaddrl(port, S3C64XX_UINTM)); else disable_irq_nosync(ourport->rx_irq); rx_enabled(port) = 0; } } static inline struct s3c24xx_uart_info *s3c24xx_port_to_info(struct uart_port *port) { return to_ourport(port)->info; } static inline struct s3c2410_uartcfg *s3c24xx_port_to_cfg(struct uart_port *port) { struct s3c24xx_uart_port *ourport; if (port->dev == NULL) return NULL; ourport = container_of(port, struct s3c24xx_uart_port, port); return ourport->cfg; } static int s3c24xx_serial_rx_fifocnt(struct s3c24xx_uart_port *ourport, unsigned long ufstat) { struct s3c24xx_uart_info *info = ourport->info; if (ufstat & info->rx_fifofull) return ourport->port.fifosize; return (ufstat & info->rx_fifomask) >> info->rx_fifoshift; } /* ? - where has parity gone?? */ #define S3C2410_UERSTAT_PARITY (0x1000) static irqreturn_t s3c24xx_serial_rx_chars(int irq, void *dev_id) { struct s3c24xx_uart_port *ourport = dev_id; struct uart_port *port = &ourport->port; unsigned int ufcon, ch, flag, ufstat, uerstat; unsigned long flags; int max_count = 64; spin_lock_irqsave(&port->lock, flags); while (max_count-- > 0) { ufcon = rd_regl(port, S3C2410_UFCON); ufstat = rd_regl(port, S3C2410_UFSTAT); if (s3c24xx_serial_rx_fifocnt(ourport, ufstat) == 0) break; uerstat = rd_regl(port, S3C2410_UERSTAT); ch = rd_regb(port, S3C2410_URXH); if (port->flags & UPF_CONS_FLOW) { int txe = s3c24xx_serial_txempty_nofifo(port); if (rx_enabled(port)) { if (!txe) { rx_enabled(port) = 0; continue; } } else { if (txe) { ufcon |= S3C2410_UFCON_RESETRX; wr_regl(port, S3C2410_UFCON, ufcon); rx_enabled(port) = 1; spin_unlock_irqrestore(&port->lock, flags); goto out; } continue; } } /* insert the character into the buffer */ flag = TTY_NORMAL; port->icount.rx++; if (unlikely(uerstat & S3C2410_UERSTAT_ANY)) { dbg("rxerr: port ch=0x%02x, rxs=0x%08x\n", ch, uerstat); /* check for break */ if (uerstat & S3C2410_UERSTAT_BREAK) { dbg("break!\n"); port->icount.brk++; if (uart_handle_break(port)) goto ignore_char; } if (uerstat & S3C2410_UERSTAT_FRAME) port->icount.frame++; if (uerstat & S3C2410_UERSTAT_OVERRUN) port->icount.overrun++; uerstat &= port->read_status_mask; if (uerstat & S3C2410_UERSTAT_BREAK) flag = TTY_BREAK; else if (uerstat & S3C2410_UERSTAT_PARITY) flag = TTY_PARITY; else if (uerstat & (S3C2410_UERSTAT_FRAME | S3C2410_UERSTAT_OVERRUN)) flag = TTY_FRAME; } if (uart_handle_sysrq_char(port, ch)) goto ignore_char; uart_insert_char(port, uerstat, S3C2410_UERSTAT_OVERRUN, ch, flag); ignore_char: continue; } spin_unlock_irqrestore(&port->lock, flags); tty_flip_buffer_push(&port->state->port); out: return IRQ_HANDLED; } static irqreturn_t s3c24xx_serial_tx_chars(int irq, void *id) { struct s3c24xx_uart_port *ourport = id; struct uart_port *port = &ourport->port; struct circ_buf *xmit = &port->state->xmit; unsigned long flags; int count = 256; spin_lock_irqsave(&port->lock, flags); if (port->x_char) { wr_regb(port, S3C2410_UTXH, port->x_char); port->icount.tx++; port->x_char = 0; goto out; } /* if there isn't anything more to transmit, or the uart is now * stopped, disable the uart and exit */ if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { s3c24xx_serial_stop_tx(port); goto out; } /* try and drain the buffer... */ while (!uart_circ_empty(xmit) && count-- > 0) { if (rd_regl(port, S3C2410_UFSTAT) & ourport->info->tx_fifofull) break; wr_regb(port, S3C2410_UTXH, xmit->buf[xmit->tail]); xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); port->icount.tx++; } if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) { spin_unlock(&port->lock); uart_write_wakeup(port); spin_lock(&port->lock); } if (uart_circ_empty(xmit)) s3c24xx_serial_stop_tx(port); out: spin_unlock_irqrestore(&port->lock, flags); return IRQ_HANDLED; } /* interrupt handler for s3c64xx and later SoC's.*/ static irqreturn_t s3c64xx_serial_handle_irq(int irq, void *id) { struct s3c24xx_uart_port *ourport = id; struct uart_port *port = &ourport->port; unsigned int pend = rd_regl(port, S3C64XX_UINTP); irqreturn_t ret = IRQ_HANDLED; if (pend & S3C64XX_UINTM_RXD_MSK) { ret = s3c24xx_serial_rx_chars(irq, id); wr_regl(port, S3C64XX_UINTP, S3C64XX_UINTM_RXD_MSK); } if (pend & S3C64XX_UINTM_TXD_MSK) { ret = s3c24xx_serial_tx_chars(irq, id); wr_regl(port, S3C64XX_UINTP, S3C64XX_UINTM_TXD_MSK); } return ret; } static unsigned int s3c24xx_serial_tx_empty(struct uart_port *port) { struct s3c24xx_uart_info *info = s3c24xx_port_to_info(port); unsigned long ufstat = rd_regl(port, S3C2410_UFSTAT); unsigned long ufcon = rd_regl(port, S3C2410_UFCON); if (ufcon & S3C2410_UFCON_FIFOMODE) { if ((ufstat & info->tx_fifomask) != 0 || (ufstat & info->tx_fifofull)) return 0; return 1; } return s3c24xx_serial_txempty_nofifo(port); } /* no modem control lines */ static unsigned int s3c24xx_serial_get_mctrl(struct uart_port *port) { unsigned int umstat = rd_regb(port, S3C2410_UMSTAT); if (umstat & S3C2410_UMSTAT_CTS) return TIOCM_CAR | TIOCM_DSR | TIOCM_CTS; else return TIOCM_CAR | TIOCM_DSR; } static void s3c24xx_serial_set_mctrl(struct uart_port *port, unsigned int mctrl) { unsigned int umcon = rd_regl(port, S3C2410_UMCON); if (mctrl & TIOCM_RTS) umcon |= S3C2410_UMCOM_RTS_LOW; else umcon &= ~S3C2410_UMCOM_RTS_LOW; wr_regl(port, S3C2410_UMCON, umcon); } static void s3c24xx_serial_break_ctl(struct uart_port *port, int break_state) { unsigned long flags; unsigned int ucon; spin_lock_irqsave(&port->lock, flags); ucon = rd_regl(port, S3C2410_UCON); if (break_state) ucon |= S3C2410_UCON_SBREAK; else ucon &= ~S3C2410_UCON_SBREAK; wr_regl(port, S3C2410_UCON, ucon); spin_unlock_irqrestore(&port->lock, flags); } static void s3c24xx_serial_shutdown(struct uart_port *port) { struct s3c24xx_uart_port *ourport = to_ourport(port); if (ourport->tx_claimed) { if (!s3c24xx_serial_has_interrupt_mask(port)) free_irq(ourport->tx_irq, ourport); tx_enabled(port) = 0; ourport->tx_claimed = 0; } if (ourport->rx_claimed) { if (!s3c24xx_serial_has_interrupt_mask(port)) free_irq(ourport->rx_irq, ourport); ourport->rx_claimed = 0; rx_enabled(port) = 0; } /* Clear pending interrupts and mask all interrupts */ if (s3c24xx_serial_has_interrupt_mask(port)) { free_irq(port->irq, ourport); wr_regl(port, S3C64XX_UINTP, 0xf); wr_regl(port, S3C64XX_UINTM, 0xf); } } static int s3c24xx_serial_startup(struct uart_port *port) { struct s3c24xx_uart_port *ourport = to_ourport(port); int ret; dbg("s3c24xx_serial_startup: port=%p (%08llx,%p)\n", port, (unsigned long long)port->mapbase, port->membase); rx_enabled(port) = 1; ret = request_irq(ourport->rx_irq, s3c24xx_serial_rx_chars, 0, s3c24xx_serial_portname(port), ourport); if (ret != 0) { dev_err(port->dev, "cannot get irq %d\n", ourport->rx_irq); return ret; } ourport->rx_claimed = 1; dbg("requesting tx irq...\n"); tx_enabled(port) = 1; ret = request_irq(ourport->tx_irq, s3c24xx_serial_tx_chars, 0, s3c24xx_serial_portname(port), ourport); if (ret) { dev_err(port->dev, "cannot get irq %d\n", ourport->tx_irq); goto err; } ourport->tx_claimed = 1; dbg("s3c24xx_serial_startup ok\n"); /* the port reset code should have done the correct * register setup for the port controls */ return ret; err: s3c24xx_serial_shutdown(port); return ret; } static int s3c64xx_serial_startup(struct uart_port *port) { struct s3c24xx_uart_port *ourport = to_ourport(port); int ret; dbg("s3c64xx_serial_startup: port=%p (%08llx,%p)\n", port, (unsigned long long)port->mapbase, port->membase); wr_regl(port, S3C64XX_UINTM, 0xf); ret = request_irq(port->irq, s3c64xx_serial_handle_irq, IRQF_SHARED, s3c24xx_serial_portname(port), ourport); if (ret) { dev_err(port->dev, "cannot get irq %d\n", port->irq); return ret; } /* For compatibility with s3c24xx Soc's */ rx_enabled(port) = 1; ourport->rx_claimed = 1; tx_enabled(port) = 0; ourport->tx_claimed = 1; /* Enable Rx Interrupt */ __clear_bit(S3C64XX_UINTM_RXD, portaddrl(port, S3C64XX_UINTM)); dbg("s3c64xx_serial_startup ok\n"); return ret; } /* power power management control */ static void s3c24xx_serial_pm(struct uart_port *port, unsigned int level, unsigned int old) { struct s3c24xx_uart_port *ourport = to_ourport(port); int timeout = 10000; ourport->pm_level = level; switch (level) { case 3: while (--timeout && !s3c24xx_serial_txempty_nofifo(port)) udelay(100); if (!IS_ERR(ourport->baudclk)) clk_disable_unprepare(ourport->baudclk); clk_disable_unprepare(ourport->clk); break; case 0: clk_prepare_enable(ourport->clk); if (!IS_ERR(ourport->baudclk)) clk_prepare_enable(ourport->baudclk); break; default: dev_err(port->dev, "s3c24xx_serial: unknown pm %d\n", level); } } /* baud rate calculation * * The UARTs on the S3C2410/S3C2440 can take their clocks from a number * of different sources, including the peripheral clock ("pclk") and an * external clock ("uclk"). The S3C2440 also adds the core clock ("fclk") * with a programmable extra divisor. * * The following code goes through the clock sources, and calculates the * baud clocks (and the resultant actual baud rates) and then tries to * pick the closest one and select that. * */ #define MAX_CLK_NAME_LENGTH 15 static inline int s3c24xx_serial_getsource(struct uart_port *port) { struct s3c24xx_uart_info *info = s3c24xx_port_to_info(port); unsigned int ucon; if (info->num_clks == 1) return 0; ucon = rd_regl(port, S3C2410_UCON); ucon &= info->clksel_mask; return ucon >> info->clksel_shift; } static void s3c24xx_serial_setsource(struct uart_port *port, unsigned int clk_sel) { struct s3c24xx_uart_info *info = s3c24xx_port_to_info(port); unsigned int ucon; if (info->num_clks == 1) return; ucon = rd_regl(port, S3C2410_UCON); if ((ucon & info->clksel_mask) >> info->clksel_shift == clk_sel) return; ucon &= ~info->clksel_mask; ucon |= clk_sel << info->clksel_shift; wr_regl(port, S3C2410_UCON, ucon); } static unsigned int s3c24xx_serial_getclk(struct s3c24xx_uart_port *ourport, unsigned int req_baud, struct clk **best_clk, unsigned int *clk_num) { struct s3c24xx_uart_info *info = ourport->info; struct clk *clk; unsigned long rate; unsigned int cnt, baud, quot, clk_sel, best_quot = 0; char clkname[MAX_CLK_NAME_LENGTH]; int calc_deviation, deviation = (1 << 30) - 1; clk_sel = (ourport->cfg->clk_sel) ? ourport->cfg->clk_sel : ourport->info->def_clk_sel; for (cnt = 0; cnt < info->num_clks; cnt++) { if (!(clk_sel & (1 << cnt))) continue; sprintf(clkname, "clk_uart_baud%d", cnt); clk = clk_get(ourport->port.dev, clkname); if (IS_ERR(clk)) continue; rate = clk_get_rate(clk); if (!rate) continue; if (ourport->info->has_divslot) { unsigned long div = rate / req_baud; /* The UDIVSLOT register on the newer UARTs allows us to * get a divisor adjustment of 1/16th on the baud clock. * * We don't keep the UDIVSLOT value (the 16ths we * calculated by not multiplying the baud by 16) as it * is easy enough to recalculate. */ quot = div / 16; baud = rate / div; } else { quot = (rate + (8 * req_baud)) / (16 * req_baud); baud = rate / (quot * 16); } quot--; calc_deviation = req_baud - baud; if (calc_deviation < 0) calc_deviation = -calc_deviation; if (calc_deviation < deviation) { *best_clk = clk; best_quot = quot; *clk_num = cnt; deviation = calc_deviation; } } return best_quot; } /* udivslot_table[] * * This table takes the fractional value of the baud divisor and gives * the recommended setting for the UDIVSLOT register. */ static u16 udivslot_table[16] = { [0] = 0x0000, [1] = 0x0080, [2] = 0x0808, [3] = 0x0888, [4] = 0x2222, [5] = 0x4924, [6] = 0x4A52, [7] = 0x54AA, [8] = 0x5555, [9] = 0xD555, [10] = 0xD5D5, [11] = 0xDDD5, [12] = 0xDDDD, [13] = 0xDFDD, [14] = 0xDFDF, [15] = 0xFFDF, }; static void s3c24xx_serial_set_termios(struct uart_port *port, struct ktermios *termios, struct ktermios *old) { struct s3c2410_uartcfg *cfg = s3c24xx_port_to_cfg(port); struct s3c24xx_uart_port *ourport = to_ourport(port); struct clk *clk = ERR_PTR(-EINVAL); unsigned long flags; unsigned int baud, quot, clk_sel = 0; unsigned int ulcon; unsigned int umcon; unsigned int udivslot = 0; /* * We don't support modem control lines. */ termios->c_cflag &= ~(HUPCL | CMSPAR); termios->c_cflag |= CLOCAL; /* * Ask the core to calculate the divisor for us. */ baud = uart_get_baud_rate(port, termios, old, 0, 115200*8); quot = s3c24xx_serial_getclk(ourport, baud, &clk, &clk_sel); if (baud == 38400 && (port->flags & UPF_SPD_MASK) == UPF_SPD_CUST) quot = port->custom_divisor; if (IS_ERR(clk)) return; /* check to see if we need to change clock source */ if (ourport->baudclk != clk) { s3c24xx_serial_setsource(port, clk_sel); if (!IS_ERR(ourport->baudclk)) { clk_disable_unprepare(ourport->baudclk); ourport->baudclk = ERR_PTR(-EINVAL); } clk_prepare_enable(clk); ourport->baudclk = clk; ourport->baudclk_rate = clk ? clk_get_rate(clk) : 0; } if (ourport->info->has_divslot) { unsigned int div = ourport->baudclk_rate / baud; if (cfg->has_fracval) { udivslot = (div & 15); dbg("fracval = %04x\n", udivslot); } else { udivslot = udivslot_table[div & 15]; dbg("udivslot = %04x (div %d)\n", udivslot, div & 15); } } switch (termios->c_cflag & CSIZE) { case CS5: dbg("config: 5bits/char\n"); ulcon = S3C2410_LCON_CS5; break; case CS6: dbg("config: 6bits/char\n"); ulcon = S3C2410_LCON_CS6; break; case CS7: dbg("config: 7bits/char\n"); ulcon = S3C2410_LCON_CS7; break; case CS8: default: dbg("config: 8bits/char\n"); ulcon = S3C2410_LCON_CS8; break; } /* preserve original lcon IR settings */ ulcon |= (cfg->ulcon & S3C2410_LCON_IRM); if (termios->c_cflag & CSTOPB) ulcon |= S3C2410_LCON_STOPB; if (termios->c_cflag & PARENB) { if (termios->c_cflag & PARODD) ulcon |= S3C2410_LCON_PODD; else ulcon |= S3C2410_LCON_PEVEN; } else { ulcon |= S3C2410_LCON_PNONE; } spin_lock_irqsave(&port->lock, flags); dbg("setting ulcon to %08x, brddiv to %d, udivslot %08x\n", ulcon, quot, udivslot); wr_regl(port, S3C2410_ULCON, ulcon); wr_regl(port, S3C2410_UBRDIV, quot); umcon = rd_regl(port, S3C2410_UMCON); if (termios->c_cflag & CRTSCTS) { umcon |= S3C2410_UMCOM_AFC; /* Disable RTS when RX FIFO contains 63 bytes */ umcon &= ~S3C2412_UMCON_AFC_8; } else { umcon &= ~S3C2410_UMCOM_AFC; } wr_regl(port, S3C2410_UMCON, umcon); if (ourport->info->has_divslot) wr_regl(port, S3C2443_DIVSLOT, udivslot); dbg("uart: ulcon = 0x%08x, ucon = 0x%08x, ufcon = 0x%08x\n", rd_regl(port, S3C2410_ULCON), rd_regl(port, S3C2410_UCON), rd_regl(port, S3C2410_UFCON)); /* * Update the per-port timeout. */ uart_update_timeout(port, termios->c_cflag, baud); /* * Which character status flags are we interested in? */ port->read_status_mask = S3C2410_UERSTAT_OVERRUN; if (termios->c_iflag & INPCK) port->read_status_mask |= S3C2410_UERSTAT_FRAME | S3C2410_UERSTAT_PARITY; /* * Which character status flags should we ignore? */ port->ignore_status_mask = 0; if (termios->c_iflag & IGNPAR) port->ignore_status_mask |= S3C2410_UERSTAT_OVERRUN; if (termios->c_iflag & IGNBRK && termios->c_iflag & IGNPAR) port->ignore_status_mask |= S3C2410_UERSTAT_FRAME; /* * Ignore all characters if CREAD is not set. */ if ((termios->c_cflag & CREAD) == 0) port->ignore_status_mask |= RXSTAT_DUMMY_READ; spin_unlock_irqrestore(&port->lock, flags); } static const char *s3c24xx_serial_type(struct uart_port *port) { switch (port->type) { case PORT_S3C2410: return "S3C2410"; case PORT_S3C2440: return "S3C2440"; case PORT_S3C2412: return "S3C2412"; case PORT_S3C6400: return "S3C6400/10"; default: return NULL; } } #define MAP_SIZE (0x100) static void s3c24xx_serial_release_port(struct uart_port *port) { release_mem_region(port->mapbase, MAP_SIZE); } static int s3c24xx_serial_request_port(struct uart_port *port) { const char *name = s3c24xx_serial_portname(port); return request_mem_region(port->mapbase, MAP_SIZE, name) ? 0 : -EBUSY; } static void s3c24xx_serial_config_port(struct uart_port *port, int flags) { struct s3c24xx_uart_info *info = s3c24xx_port_to_info(port); if (flags & UART_CONFIG_TYPE && s3c24xx_serial_request_port(port) == 0) port->type = info->type; } /* * verify the new serial_struct (for TIOCSSERIAL). */ static int s3c24xx_serial_verify_port(struct uart_port *port, struct serial_struct *ser) { struct s3c24xx_uart_info *info = s3c24xx_port_to_info(port); if (ser->type != PORT_UNKNOWN && ser->type != info->type) return -EINVAL; return 0; } #ifdef CONFIG_SERIAL_SAMSUNG_CONSOLE static struct console s3c24xx_serial_console; static int __init s3c24xx_serial_console_init(void) { register_console(&s3c24xx_serial_console); return 0; } console_initcall(s3c24xx_serial_console_init); #define S3C24XX_SERIAL_CONSOLE &s3c24xx_serial_console #else #define S3C24XX_SERIAL_CONSOLE NULL #endif #if defined(CONFIG_SERIAL_SAMSUNG_CONSOLE) && defined(CONFIG_CONSOLE_POLL) static int s3c24xx_serial_get_poll_char(struct uart_port *port); static void s3c24xx_serial_put_poll_char(struct uart_port *port, unsigned char c); #endif static struct uart_ops s3c24xx_serial_ops = { .pm = s3c24xx_serial_pm, .tx_empty = s3c24xx_serial_tx_empty, .get_mctrl = s3c24xx_serial_get_mctrl, .set_mctrl = s3c24xx_serial_set_mctrl, .stop_tx = s3c24xx_serial_stop_tx, .start_tx = s3c24xx_serial_start_tx, .stop_rx = s3c24xx_serial_stop_rx, .break_ctl = s3c24xx_serial_break_ctl, .startup = s3c24xx_serial_startup, .shutdown = s3c24xx_serial_shutdown, .set_termios = s3c24xx_serial_set_termios, .type = s3c24xx_serial_type, .release_port = s3c24xx_serial_release_port, .request_port = s3c24xx_serial_request_port, .config_port = s3c24xx_serial_config_port, .verify_port = s3c24xx_serial_verify_port, #if defined(CONFIG_SERIAL_SAMSUNG_CONSOLE) && defined(CONFIG_CONSOLE_POLL) .poll_get_char = s3c24xx_serial_get_poll_char, .poll_put_char = s3c24xx_serial_put_poll_char, #endif }; static struct uart_driver s3c24xx_uart_drv = { .owner = THIS_MODULE, .driver_name = "s3c2410_serial", .nr = CONFIG_SERIAL_SAMSUNG_UARTS, .cons = S3C24XX_SERIAL_CONSOLE, .dev_name = S3C24XX_SERIAL_NAME, .major = S3C24XX_SERIAL_MAJOR, .minor = S3C24XX_SERIAL_MINOR, }; static struct s3c24xx_uart_port s3c24xx_serial_ports[CONFIG_SERIAL_SAMSUNG_UARTS] = { [0] = { .port = { .lock = __SPIN_LOCK_UNLOCKED(s3c24xx_serial_ports[0].port.lock), .iotype = UPIO_MEM, .uartclk = 0, .fifosize = 16, .ops = &s3c24xx_serial_ops, .flags = UPF_BOOT_AUTOCONF, .line = 0, } }, [1] = { .port = { .lock = __SPIN_LOCK_UNLOCKED(s3c24xx_serial_ports[1].port.lock), .iotype = UPIO_MEM, .uartclk = 0, .fifosize = 16, .ops = &s3c24xx_serial_ops, .flags = UPF_BOOT_AUTOCONF, .line = 1, } }, #if CONFIG_SERIAL_SAMSUNG_UARTS > 2 [2] = { .port = { .lock = __SPIN_LOCK_UNLOCKED(s3c24xx_serial_ports[2].port.lock), .iotype = UPIO_MEM, .uartclk = 0, .fifosize = 16, .ops = &s3c24xx_serial_ops, .flags = UPF_BOOT_AUTOCONF, .line = 2, } }, #endif #if CONFIG_SERIAL_SAMSUNG_UARTS > 3 [3] = { .port = { .lock = __SPIN_LOCK_UNLOCKED(s3c24xx_serial_ports[3].port.lock), .iotype = UPIO_MEM, .uartclk = 0, .fifosize = 16, .ops = &s3c24xx_serial_ops, .flags = UPF_BOOT_AUTOCONF, .line = 3, } } #endif }; /* s3c24xx_serial_resetport * * reset the fifos and other the settings. */ static void s3c24xx_serial_resetport(struct uart_port *port, struct s3c2410_uartcfg *cfg) { struct s3c24xx_uart_info *info = s3c24xx_port_to_info(port); unsigned long ucon = rd_regl(port, S3C2410_UCON); unsigned int ucon_mask; ucon_mask = info->clksel_mask; if (info->type == PORT_S3C2440) ucon_mask |= S3C2440_UCON0_DIVMASK; ucon &= ucon_mask; wr_regl(port, S3C2410_UCON, ucon | cfg->ucon); /* reset both fifos */ wr_regl(port, S3C2410_UFCON, cfg->ufcon | S3C2410_UFCON_RESETBOTH); wr_regl(port, S3C2410_UFCON, cfg->ufcon); /* some delay is required after fifo reset */ udelay(1); } #ifdef CONFIG_CPU_FREQ static int s3c24xx_serial_cpufreq_transition(struct notifier_block *nb, unsigned long val, void *data) { struct s3c24xx_uart_port *port; struct uart_port *uport; port = container_of(nb, struct s3c24xx_uart_port, freq_transition); uport = &port->port; /* check to see if port is enabled */ if (port->pm_level != 0) return 0; /* try and work out if the baudrate is changing, we can detect * a change in rate, but we do not have support for detecting * a disturbance in the clock-rate over the change. */ if (IS_ERR(port->baudclk)) goto exit; if (port->baudclk_rate == clk_get_rate(port->baudclk)) goto exit; if (val == CPUFREQ_PRECHANGE) { /* we should really shut the port down whilst the * frequency change is in progress. */ } else if (val == CPUFREQ_POSTCHANGE) { struct ktermios *termios; struct tty_struct *tty; if (uport->state == NULL) goto exit; tty = uport->state->port.tty; if (tty == NULL) goto exit; termios = &tty->termios; if (termios == NULL) { dev_warn(uport->dev, "%s: no termios?\n", __func__); goto exit; } s3c24xx_serial_set_termios(uport, termios, NULL); } exit: return 0; } static inline int s3c24xx_serial_cpufreq_register(struct s3c24xx_uart_port *port) { port->freq_transition.notifier_call = s3c24xx_serial_cpufreq_transition; return cpufreq_register_notifier(&port->freq_transition, CPUFREQ_TRANSITION_NOTIFIER); } static inline void s3c24xx_serial_cpufreq_deregister(struct s3c24xx_uart_port *port) { cpufreq_unregister_notifier(&port->freq_transition, CPUFREQ_TRANSITION_NOTIFIER); } #else static inline int s3c24xx_serial_cpufreq_register(struct s3c24xx_uart_port *port) { return 0; } static inline void s3c24xx_serial_cpufreq_deregister(struct s3c24xx_uart_port *port) { } #endif /* s3c24xx_serial_init_port * * initialise a single serial port from the platform device given */ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport, struct platform_device *platdev) { struct uart_port *port = &ourport->port; struct s3c2410_uartcfg *cfg = ourport->cfg; struct resource *res; int ret; dbg("s3c24xx_serial_init_port: port=%p, platdev=%p\n", port, platdev); if (platdev == NULL) return -ENODEV; if (port->mapbase != 0) return 0; /* setup info for port */ port->dev = &platdev->dev; /* Startup sequence is different for s3c64xx and higher SoC's */ if (s3c24xx_serial_has_interrupt_mask(port)) s3c24xx_serial_ops.startup = s3c64xx_serial_startup; port->uartclk = 1; if (cfg->uart_flags & UPF_CONS_FLOW) { dbg("s3c24xx_serial_init_port: enabling flow control\n"); port->flags |= UPF_CONS_FLOW; } /* sort our the physical and virtual addresses for each UART */ res = platform_get_resource(platdev, IORESOURCE_MEM, 0); if (res == NULL) { dev_err(port->dev, "failed to find memory resource for uart\n"); return -EINVAL; } dbg("resource %pR)\n", res); port->membase = devm_ioremap(port->dev, res->start, resource_size(res)); if (!port->membase) { dev_err(port->dev, "failed to remap controller address\n"); return -EBUSY; } port->mapbase = res->start; ret = platform_get_irq(platdev, 0); if (ret < 0) port->irq = 0; else { port->irq = ret; ourport->rx_irq = ret; ourport->tx_irq = ret + 1; } ret = platform_get_irq(platdev, 1); if (ret > 0) ourport->tx_irq = ret; ourport->clk = clk_get(&platdev->dev, "uart"); if (IS_ERR(ourport->clk)) { pr_err("%s: Controller clock not found\n", dev_name(&platdev->dev)); return PTR_ERR(ourport->clk); } ret = clk_prepare_enable(ourport->clk); if (ret) { pr_err("uart: clock failed to prepare+enable: %d\n", ret); clk_put(ourport->clk); return ret; } /* Keep all interrupts masked and cleared */ if (s3c24xx_serial_has_interrupt_mask(port)) { wr_regl(port, S3C64XX_UINTM, 0xf); wr_regl(port, S3C64XX_UINTP, 0xf); wr_regl(port, S3C64XX_UINTSP, 0xf); } dbg("port: map=%pa, mem=%p, irq=%d (%d,%d), clock=%u\n", &port->mapbase, port->membase, port->irq, ourport->rx_irq, ourport->tx_irq, port->uartclk); /* reset the fifos (and setup the uart) */ s3c24xx_serial_resetport(port, cfg); return 0; } #ifdef CONFIG_SAMSUNG_CLOCK static ssize_t s3c24xx_serial_show_clksrc(struct device *dev, struct device_attribute *attr, char *buf) { struct uart_port *port = s3c24xx_dev_to_port(dev); struct s3c24xx_uart_port *ourport = to_ourport(port); if (IS_ERR(ourport->baudclk)) return -EINVAL; return snprintf(buf, PAGE_SIZE, "* %s\n", ourport->baudclk->name ?: "(null)"); } static DEVICE_ATTR(clock_source, S_IRUGO, s3c24xx_serial_show_clksrc, NULL); #endif /* Device driver serial port probe */ static const struct of_device_id s3c24xx_uart_dt_match[]; static int probe_index; static inline struct s3c24xx_serial_drv_data *s3c24xx_get_driver_data( struct platform_device *pdev) { #ifdef CONFIG_OF if (pdev->dev.of_node) { const struct of_device_id *match; match = of_match_node(s3c24xx_uart_dt_match, pdev->dev.of_node); return (struct s3c24xx_serial_drv_data *)match->data; } #endif return (struct s3c24xx_serial_drv_data *) platform_get_device_id(pdev)->driver_data; } static int s3c24xx_serial_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct s3c24xx_uart_port *ourport; int index = probe_index; int ret; if (np) { ret = of_alias_get_id(np, "serial"); if (ret >= 0) index = ret; } dbg("s3c24xx_serial_probe(%p) %d\n", pdev, index); ourport = &s3c24xx_serial_ports[index]; ourport->drv_data = s3c24xx_get_driver_data(pdev); if (!ourport->drv_data) { dev_err(&pdev->dev, "could not find driver data\n"); return -ENODEV; } ourport->baudclk = ERR_PTR(-EINVAL); ourport->info = ourport->drv_data->info; ourport->cfg = (dev_get_platdata(&pdev->dev)) ? dev_get_platdata(&pdev->dev) : ourport->drv_data->def_cfg; if (np) of_property_read_u32(np, "samsung,uart-fifosize", &ourport->port.fifosize); if (!ourport->port.fifosize) { ourport->port.fifosize = (ourport->info->fifosize) ? ourport->info->fifosize : ourport->drv_data->fifosize[index]; } probe_index++; dbg("%s: initialising port %p...\n", __func__, ourport); ret = s3c24xx_serial_init_port(ourport, pdev); if (ret < 0) return ret; if (!s3c24xx_uart_drv.state) { ret = uart_register_driver(&s3c24xx_uart_drv); if (ret < 0) { pr_err("Failed to register Samsung UART driver\n"); return ret; } } dbg("%s: adding port\n", __func__); uart_add_one_port(&s3c24xx_uart_drv, &ourport->port); platform_set_drvdata(pdev, &ourport->port); /* * Deactivate the clock enabled in s3c24xx_serial_init_port here, * so that a potential re-enablement through the pm-callback overlaps * and keeps the clock enabled in this case. */ clk_disable_unprepare(ourport->clk); #ifdef CONFIG_SAMSUNG_CLOCK ret = device_create_file(&pdev->dev, &dev_attr_clock_source); if (ret < 0) dev_err(&pdev->dev, "failed to add clock source attr.\n"); #endif ret = s3c24xx_serial_cpufreq_register(ourport); if (ret < 0) dev_err(&pdev->dev, "failed to add cpufreq notifier\n"); return 0; } static int s3c24xx_serial_remove(struct platform_device *dev) { struct uart_port *port = s3c24xx_dev_to_port(&dev->dev); if (port) { s3c24xx_serial_cpufreq_deregister(to_ourport(port)); #ifdef CONFIG_SAMSUNG_CLOCK device_remove_file(&dev->dev, &dev_attr_clock_source); #endif uart_remove_one_port(&s3c24xx_uart_drv, port); } uart_unregister_driver(&s3c24xx_uart_drv); return 0; } /* UART power management code */ #ifdef CONFIG_PM_SLEEP static int s3c24xx_serial_suspend(struct device *dev) { struct uart_port *port = s3c24xx_dev_to_port(dev); if (port) uart_suspend_port(&s3c24xx_uart_drv, port); return 0; } static int s3c24xx_serial_resume(struct device *dev) { struct uart_port *port = s3c24xx_dev_to_port(dev); struct s3c24xx_uart_port *ourport = to_ourport(port); if (port) { clk_prepare_enable(ourport->clk); s3c24xx_serial_resetport(port, s3c24xx_port_to_cfg(port)); clk_disable_unprepare(ourport->clk); uart_resume_port(&s3c24xx_uart_drv, port); } return 0; } static int s3c24xx_serial_resume_noirq(struct device *dev) { struct uart_port *port = s3c24xx_dev_to_port(dev); if (port) { /* restore IRQ mask */ if (s3c24xx_serial_has_interrupt_mask(port)) { unsigned int uintm = 0xf; if (tx_enabled(port)) uintm &= ~S3C64XX_UINTM_TXD_MSK; if (rx_enabled(port)) uintm &= ~S3C64XX_UINTM_RXD_MSK; wr_regl(port, S3C64XX_UINTM, uintm); } } return 0; } static const struct dev_pm_ops s3c24xx_serial_pm_ops = { .suspend = s3c24xx_serial_suspend, .resume = s3c24xx_serial_resume, .resume_noirq = s3c24xx_serial_resume_noirq, }; #define SERIAL_SAMSUNG_PM_OPS (&s3c24xx_serial_pm_ops) #else /* !CONFIG_PM_SLEEP */ #define SERIAL_SAMSUNG_PM_OPS NULL #endif /* CONFIG_PM_SLEEP */ /* Console code */ #ifdef CONFIG_SERIAL_SAMSUNG_CONSOLE static struct uart_port *cons_uart; static int s3c24xx_serial_console_txrdy(struct uart_port *port, unsigned int ufcon) { struct s3c24xx_uart_info *info = s3c24xx_port_to_info(port); unsigned long ufstat, utrstat; if (ufcon & S3C2410_UFCON_FIFOMODE) { /* fifo mode - check amount of data in fifo registers... */ ufstat = rd_regl(port, S3C2410_UFSTAT); return (ufstat & info->tx_fifofull) ? 0 : 1; } /* in non-fifo mode, we go and use the tx buffer empty */ utrstat = rd_regl(port, S3C2410_UTRSTAT); return (utrstat & S3C2410_UTRSTAT_TXE) ? 1 : 0; } static bool s3c24xx_port_configured(unsigned int ucon) { /* consider the serial port configured if the tx/rx mode set */ return (ucon & 0xf) != 0; } #ifdef CONFIG_CONSOLE_POLL /* * Console polling routines for writing and reading from the uart while * in an interrupt or debug context. */ static int s3c24xx_serial_get_poll_char(struct uart_port *port) { struct s3c24xx_uart_port *ourport = to_ourport(port); unsigned int ufstat; ufstat = rd_regl(port, S3C2410_UFSTAT); if (s3c24xx_serial_rx_fifocnt(ourport, ufstat) == 0) return NO_POLL_CHAR; return rd_regb(port, S3C2410_URXH); } static void s3c24xx_serial_put_poll_char(struct uart_port *port, unsigned char c) { unsigned int ufcon = rd_regl(port, S3C2410_UFCON); unsigned int ucon = rd_regl(port, S3C2410_UCON); /* not possible to xmit on unconfigured port */ if (!s3c24xx_port_configured(ucon)) return; while (!s3c24xx_serial_console_txrdy(port, ufcon)) cpu_relax(); wr_regb(port, S3C2410_UTXH, c); } #endif /* CONFIG_CONSOLE_POLL */ static void s3c24xx_serial_console_putchar(struct uart_port *port, int ch) { unsigned int ufcon = rd_regl(port, S3C2410_UFCON); while (!s3c24xx_serial_console_txrdy(port, ufcon)) cpu_relax(); wr_regb(port, S3C2410_UTXH, ch); } static void s3c24xx_serial_console_write(struct console *co, const char *s, unsigned int count) { unsigned int ucon = rd_regl(cons_uart, S3C2410_UCON); /* not possible to xmit on unconfigured port */ if (!s3c24xx_port_configured(ucon)) return; uart_console_write(cons_uart, s, count, s3c24xx_serial_console_putchar); } static void __init s3c24xx_serial_get_options(struct uart_port *port, int *baud, int *parity, int *bits) { struct clk *clk; unsigned int ulcon; unsigned int ucon; unsigned int ubrdiv; unsigned long rate; unsigned int clk_sel; char clk_name[MAX_CLK_NAME_LENGTH]; ulcon = rd_regl(port, S3C2410_ULCON); ucon = rd_regl(port, S3C2410_UCON); ubrdiv = rd_regl(port, S3C2410_UBRDIV); dbg("s3c24xx_serial_get_options: port=%p\n" "registers: ulcon=%08x, ucon=%08x, ubdriv=%08x\n", port, ulcon, ucon, ubrdiv); if (s3c24xx_port_configured(ucon)) { switch (ulcon & S3C2410_LCON_CSMASK) { case S3C2410_LCON_CS5: *bits = 5; break; case S3C2410_LCON_CS6: *bits = 6; break; case S3C2410_LCON_CS7: *bits = 7; break; case S3C2410_LCON_CS8: default: *bits = 8; break; } switch (ulcon & S3C2410_LCON_PMASK) { case S3C2410_LCON_PEVEN: *parity = 'e'; break; case S3C2410_LCON_PODD: *parity = 'o'; break; case S3C2410_LCON_PNONE: default: *parity = 'n'; } /* now calculate the baud rate */ clk_sel = s3c24xx_serial_getsource(port); sprintf(clk_name, "clk_uart_baud%d", clk_sel); clk = clk_get(port->dev, clk_name); if (!IS_ERR(clk)) rate = clk_get_rate(clk); else rate = 1; *baud = rate / (16 * (ubrdiv + 1)); dbg("calculated baud %d\n", *baud); } } static int __init s3c24xx_serial_console_setup(struct console *co, char *options) { struct uart_port *port; int baud = 9600; int bits = 8; int parity = 'n'; int flow = 'n'; dbg("s3c24xx_serial_console_setup: co=%p (%d), %s\n", co, co->index, options); /* is this a valid port */ if (co->index == -1 || co->index >= CONFIG_SERIAL_SAMSUNG_UARTS) co->index = 0; port = &s3c24xx_serial_ports[co->index].port; /* is the port configured? */ if (port->mapbase == 0x0) return -ENODEV; cons_uart = port; dbg("s3c24xx_serial_console_setup: port=%p (%d)\n", port, co->index); /* * Check whether an invalid uart number has been specified, and * if so, search for the first available port that does have * console support. */ if (options) uart_parse_options(options, &baud, &parity, &bits, &flow); else s3c24xx_serial_get_options(port, &baud, &parity, &bits); dbg("s3c24xx_serial_console_setup: baud %d\n", baud); return uart_set_options(port, co, baud, parity, bits, flow); } static struct console s3c24xx_serial_console = { .name = S3C24XX_SERIAL_NAME, .device = uart_console_device, .flags = CON_PRINTBUFFER, .index = -1, .write = s3c24xx_serial_console_write, .setup = s3c24xx_serial_console_setup, .data = &s3c24xx_uart_drv, }; #endif /* CONFIG_SERIAL_SAMSUNG_CONSOLE */ #ifdef CONFIG_CPU_S3C2410 static struct s3c24xx_serial_drv_data s3c2410_serial_drv_data = { .info = &(struct s3c24xx_uart_info) { .name = "Samsung S3C2410 UART", .type = PORT_S3C2410, .fifosize = 16, .rx_fifomask = S3C2410_UFSTAT_RXMASK, .rx_fifoshift = S3C2410_UFSTAT_RXSHIFT, .rx_fifofull = S3C2410_UFSTAT_RXFULL, .tx_fifofull = S3C2410_UFSTAT_TXFULL, .tx_fifomask = S3C2410_UFSTAT_TXMASK, .tx_fifoshift = S3C2410_UFSTAT_TXSHIFT, .def_clk_sel = S3C2410_UCON_CLKSEL0, .num_clks = 2, .clksel_mask = S3C2410_UCON_CLKMASK, .clksel_shift = S3C2410_UCON_CLKSHIFT, }, .def_cfg = &(struct s3c2410_uartcfg) { .ucon = S3C2410_UCON_DEFAULT, .ufcon = S3C2410_UFCON_DEFAULT, }, }; #define S3C2410_SERIAL_DRV_DATA ((kernel_ulong_t)&s3c2410_serial_drv_data) #else #define S3C2410_SERIAL_DRV_DATA (kernel_ulong_t)NULL #endif #ifdef CONFIG_CPU_S3C2412 static struct s3c24xx_serial_drv_data s3c2412_serial_drv_data = { .info = &(struct s3c24xx_uart_info) { .name = "Samsung S3C2412 UART", .type = PORT_S3C2412, .fifosize = 64, .has_divslot = 1, .rx_fifomask = S3C2440_UFSTAT_RXMASK, .rx_fifoshift = S3C2440_UFSTAT_RXSHIFT, .rx_fifofull = S3C2440_UFSTAT_RXFULL, .tx_fifofull = S3C2440_UFSTAT_TXFULL, .tx_fifomask = S3C2440_UFSTAT_TXMASK, .tx_fifoshift = S3C2440_UFSTAT_TXSHIFT, .def_clk_sel = S3C2410_UCON_CLKSEL2, .num_clks = 4, .clksel_mask = S3C2412_UCON_CLKMASK, .clksel_shift = S3C2412_UCON_CLKSHIFT, }, .def_cfg = &(struct s3c2410_uartcfg) { .ucon = S3C2410_UCON_DEFAULT, .ufcon = S3C2410_UFCON_DEFAULT, }, }; #define S3C2412_SERIAL_DRV_DATA ((kernel_ulong_t)&s3c2412_serial_drv_data) #else #define S3C2412_SERIAL_DRV_DATA (kernel_ulong_t)NULL #endif #if defined(CONFIG_CPU_S3C2440) || defined(CONFIG_CPU_S3C2416) || \ defined(CONFIG_CPU_S3C2443) || defined(CONFIG_CPU_S3C2442) static struct s3c24xx_serial_drv_data s3c2440_serial_drv_data = { .info = &(struct s3c24xx_uart_info) { .name = "Samsung S3C2440 UART", .type = PORT_S3C2440, .fifosize = 64, .has_divslot = 1, .rx_fifomask = S3C2440_UFSTAT_RXMASK, .rx_fifoshift = S3C2440_UFSTAT_RXSHIFT, .rx_fifofull = S3C2440_UFSTAT_RXFULL, .tx_fifofull = S3C2440_UFSTAT_TXFULL, .tx_fifomask = S3C2440_UFSTAT_TXMASK, .tx_fifoshift = S3C2440_UFSTAT_TXSHIFT, .def_clk_sel = S3C2410_UCON_CLKSEL2, .num_clks = 4, .clksel_mask = S3C2412_UCON_CLKMASK, .clksel_shift = S3C2412_UCON_CLKSHIFT, }, .def_cfg = &(struct s3c2410_uartcfg) { .ucon = S3C2410_UCON_DEFAULT, .ufcon = S3C2410_UFCON_DEFAULT, }, }; #define S3C2440_SERIAL_DRV_DATA ((kernel_ulong_t)&s3c2440_serial_drv_data) #else #define S3C2440_SERIAL_DRV_DATA (kernel_ulong_t)NULL #endif #if defined(CONFIG_CPU_S3C6400) || defined(CONFIG_CPU_S3C6410) static struct s3c24xx_serial_drv_data s3c6400_serial_drv_data = { .info = &(struct s3c24xx_uart_info) { .name = "Samsung S3C6400 UART", .type = PORT_S3C6400, .fifosize = 64, .has_divslot = 1, .rx_fifomask = S3C2440_UFSTAT_RXMASK, .rx_fifoshift = S3C2440_UFSTAT_RXSHIFT, .rx_fifofull = S3C2440_UFSTAT_RXFULL, .tx_fifofull = S3C2440_UFSTAT_TXFULL, .tx_fifomask = S3C2440_UFSTAT_TXMASK, .tx_fifoshift = S3C2440_UFSTAT_TXSHIFT, .def_clk_sel = S3C2410_UCON_CLKSEL2, .num_clks = 4, .clksel_mask = S3C6400_UCON_CLKMASK, .clksel_shift = S3C6400_UCON_CLKSHIFT, }, .def_cfg = &(struct s3c2410_uartcfg) { .ucon = S3C2410_UCON_DEFAULT, .ufcon = S3C2410_UFCON_DEFAULT, }, }; #define S3C6400_SERIAL_DRV_DATA ((kernel_ulong_t)&s3c6400_serial_drv_data) #else #define S3C6400_SERIAL_DRV_DATA (kernel_ulong_t)NULL #endif #ifdef CONFIG_CPU_S5PV210 static struct s3c24xx_serial_drv_data s5pv210_serial_drv_data = { .info = &(struct s3c24xx_uart_info) { .name = "Samsung S5PV210 UART", .type = PORT_S3C6400, .has_divslot = 1, .rx_fifomask = S5PV210_UFSTAT_RXMASK, .rx_fifoshift = S5PV210_UFSTAT_RXSHIFT, .rx_fifofull = S5PV210_UFSTAT_RXFULL, .tx_fifofull = S5PV210_UFSTAT_TXFULL, .tx_fifomask = S5PV210_UFSTAT_TXMASK, .tx_fifoshift = S5PV210_UFSTAT_TXSHIFT, .def_clk_sel = S3C2410_UCON_CLKSEL0, .num_clks = 2, .clksel_mask = S5PV210_UCON_CLKMASK, .clksel_shift = S5PV210_UCON_CLKSHIFT, }, .def_cfg = &(struct s3c2410_uartcfg) { .ucon = S5PV210_UCON_DEFAULT, .ufcon = S5PV210_UFCON_DEFAULT, }, .fifosize = { 256, 64, 16, 16 }, }; #define S5PV210_SERIAL_DRV_DATA ((kernel_ulong_t)&s5pv210_serial_drv_data) #else #define S5PV210_SERIAL_DRV_DATA (kernel_ulong_t)NULL #endif #if defined(CONFIG_ARCH_EXYNOS) static struct s3c24xx_serial_drv_data exynos4210_serial_drv_data = { .info = &(struct s3c24xx_uart_info) { .name = "Samsung Exynos4 UART", .type = PORT_S3C6400, .has_divslot = 1, .rx_fifomask = S5PV210_UFSTAT_RXMASK, .rx_fifoshift = S5PV210_UFSTAT_RXSHIFT, .rx_fifofull = S5PV210_UFSTAT_RXFULL, .tx_fifofull = S5PV210_UFSTAT_TXFULL, .tx_fifomask = S5PV210_UFSTAT_TXMASK, .tx_fifoshift = S5PV210_UFSTAT_TXSHIFT, .def_clk_sel = S3C2410_UCON_CLKSEL0, .num_clks = 1, .clksel_mask = 0, .clksel_shift = 0, }, .def_cfg = &(struct s3c2410_uartcfg) { .ucon = S5PV210_UCON_DEFAULT, .ufcon = S5PV210_UFCON_DEFAULT, .has_fracval = 1, }, .fifosize = { 256, 64, 16, 16 }, }; #define EXYNOS4210_SERIAL_DRV_DATA ((kernel_ulong_t)&exynos4210_serial_drv_data) #else #define EXYNOS4210_SERIAL_DRV_DATA (kernel_ulong_t)NULL #endif static struct platform_device_id s3c24xx_serial_driver_ids[] = { { .name = "s3c2410-uart", .driver_data = S3C2410_SERIAL_DRV_DATA, }, { .name = "s3c2412-uart", .driver_data = S3C2412_SERIAL_DRV_DATA, }, { .name = "s3c2440-uart", .driver_data = S3C2440_SERIAL_DRV_DATA, }, { .name = "s3c6400-uart", .driver_data = S3C6400_SERIAL_DRV_DATA, }, { .name = "s5pv210-uart", .driver_data = S5PV210_SERIAL_DRV_DATA, }, { .name = "exynos4210-uart", .driver_data = EXYNOS4210_SERIAL_DRV_DATA, }, { }, }; MODULE_DEVICE_TABLE(platform, s3c24xx_serial_driver_ids); #ifdef CONFIG_OF static const struct of_device_id s3c24xx_uart_dt_match[] = { { .compatible = "samsung,s3c2410-uart", .data = (void *)S3C2410_SERIAL_DRV_DATA }, { .compatible = "samsung,s3c2412-uart", .data = (void *)S3C2412_SERIAL_DRV_DATA }, { .compatible = "samsung,s3c2440-uart", .data = (void *)S3C2440_SERIAL_DRV_DATA }, { .compatible = "samsung,s3c6400-uart", .data = (void *)S3C6400_SERIAL_DRV_DATA }, { .compatible = "samsung,s5pv210-uart", .data = (void *)S5PV210_SERIAL_DRV_DATA }, { .compatible = "samsung,exynos4210-uart", .data = (void *)EXYNOS4210_SERIAL_DRV_DATA }, {}, }; MODULE_DEVICE_TABLE(of, s3c24xx_uart_dt_match); #endif static struct platform_driver samsung_serial_driver = { .probe = s3c24xx_serial_probe, .remove = s3c24xx_serial_remove, .id_table = s3c24xx_serial_driver_ids, .driver = { .name = "samsung-uart", .owner = THIS_MODULE, .pm = SERIAL_SAMSUNG_PM_OPS, .of_match_table = of_match_ptr(s3c24xx_uart_dt_match), }, }; module_platform_driver(samsung_serial_driver); MODULE_ALIAS("platform:samsung-uart"); MODULE_DESCRIPTION("Samsung SoC Serial port driver"); MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>"); MODULE_LICENSE("GPL v2");
gpl-2.0
Huawei-Kiwi/android_kernel_huawei_msm8939
drivers/net/wireless/ath/wil6210/cfg80211.c
415
23377
/* * Copyright (c) 2012-2014 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "wil6210.h" #include "wmi.h" #define CHAN60G(_channel, _flags) { \ .band = IEEE80211_BAND_60GHZ, \ .center_freq = 56160 + (2160 * (_channel)), \ .hw_value = (_channel), \ .flags = (_flags), \ .max_antenna_gain = 0, \ .max_power = 40, \ } static struct ieee80211_channel wil_60ghz_channels[] = { CHAN60G(1, 0), CHAN60G(2, 0), CHAN60G(3, 0), /* channel 4 not supported yet */ }; static struct ieee80211_supported_band wil_band_60ghz = { .channels = wil_60ghz_channels, .n_channels = ARRAY_SIZE(wil_60ghz_channels), .ht_cap = { .ht_supported = true, .cap = 0, /* TODO */ .ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K, /* TODO */ .ampdu_density = IEEE80211_HT_MPDU_DENSITY_8, /* TODO */ .mcs = { /* MCS 1..12 - SC PHY */ .rx_mask = {0xfe, 0x1f}, /* 1..12 */ .tx_params = IEEE80211_HT_MCS_TX_DEFINED, /* TODO */ }, }, }; static const struct ieee80211_txrx_stypes wil_mgmt_stypes[NUM_NL80211_IFTYPES] = { [NL80211_IFTYPE_STATION] = { .tx = BIT(IEEE80211_STYPE_ACTION >> 4) | BIT(IEEE80211_STYPE_PROBE_RESP >> 4), .rx = BIT(IEEE80211_STYPE_ACTION >> 4) | BIT(IEEE80211_STYPE_PROBE_REQ >> 4) }, [NL80211_IFTYPE_AP] = { .tx = BIT(IEEE80211_STYPE_ACTION >> 4) | BIT(IEEE80211_STYPE_PROBE_RESP >> 4), .rx = BIT(IEEE80211_STYPE_ACTION >> 4) | BIT(IEEE80211_STYPE_PROBE_REQ >> 4) }, [NL80211_IFTYPE_P2P_CLIENT] = { .tx = BIT(IEEE80211_STYPE_ACTION >> 4) | BIT(IEEE80211_STYPE_PROBE_RESP >> 4), .rx = BIT(IEEE80211_STYPE_ACTION >> 4) | BIT(IEEE80211_STYPE_PROBE_REQ >> 4) }, [NL80211_IFTYPE_P2P_GO] = { .tx = BIT(IEEE80211_STYPE_ACTION >> 4) | BIT(IEEE80211_STYPE_PROBE_RESP >> 4), .rx = BIT(IEEE80211_STYPE_ACTION >> 4) | BIT(IEEE80211_STYPE_PROBE_REQ >> 4) }, }; static const u32 wil_cipher_suites[] = { WLAN_CIPHER_SUITE_GCMP, }; int wil_iftype_nl2wmi(enum nl80211_iftype type) { static const struct { enum nl80211_iftype nl; enum wmi_network_type wmi; } __nl2wmi[] = { {NL80211_IFTYPE_ADHOC, WMI_NETTYPE_ADHOC}, {NL80211_IFTYPE_STATION, WMI_NETTYPE_INFRA}, {NL80211_IFTYPE_AP, WMI_NETTYPE_AP}, {NL80211_IFTYPE_P2P_CLIENT, WMI_NETTYPE_P2P}, {NL80211_IFTYPE_P2P_GO, WMI_NETTYPE_P2P}, {NL80211_IFTYPE_MONITOR, WMI_NETTYPE_ADHOC}, /* FIXME */ }; uint i; for (i = 0; i < ARRAY_SIZE(__nl2wmi); i++) { if (__nl2wmi[i].nl == type) return __nl2wmi[i].wmi; } return -EOPNOTSUPP; } int wil_cid_fill_sinfo(struct wil6210_priv *wil, int cid, struct station_info *sinfo) { struct wmi_notify_req_cmd cmd = { .cid = cid, .interval_usec = 0, }; struct { struct wil6210_mbox_hdr_wmi wmi; struct wmi_notify_req_done_event evt; } __packed reply; struct wil_net_stats *stats = &wil->sta[cid].stats; int rc; rc = wmi_call(wil, WMI_NOTIFY_REQ_CMDID, &cmd, sizeof(cmd), WMI_NOTIFY_REQ_DONE_EVENTID, &reply, sizeof(reply), 20); if (rc) return rc; wil_dbg_wmi(wil, "Link status for CID %d: {\n" " MCS %d TSF 0x%016llx\n" " BF status 0x%08x SNR 0x%08x SQI %d%%\n" " Tx Tpt %d goodput %d Rx goodput %d\n" " Sectors(rx:tx) my %d:%d peer %d:%d\n""}\n", cid, le16_to_cpu(reply.evt.bf_mcs), le64_to_cpu(reply.evt.tsf), reply.evt.status, le32_to_cpu(reply.evt.snr_val), reply.evt.sqi, le32_to_cpu(reply.evt.tx_tpt), le32_to_cpu(reply.evt.tx_goodput), le32_to_cpu(reply.evt.rx_goodput), le16_to_cpu(reply.evt.my_rx_sector), le16_to_cpu(reply.evt.my_tx_sector), le16_to_cpu(reply.evt.other_rx_sector), le16_to_cpu(reply.evt.other_tx_sector)); sinfo->generation = wil->sinfo_gen; sinfo->filled = STATION_INFO_RX_BYTES | STATION_INFO_TX_BYTES | STATION_INFO_RX_PACKETS | STATION_INFO_TX_PACKETS | STATION_INFO_RX_BITRATE | STATION_INFO_TX_BITRATE | STATION_INFO_RX_DROP_MISC | STATION_INFO_TX_FAILED; sinfo->txrate.flags = RATE_INFO_FLAGS_MCS | RATE_INFO_FLAGS_60G; sinfo->txrate.mcs = le16_to_cpu(reply.evt.bf_mcs); sinfo->rxrate.flags = RATE_INFO_FLAGS_MCS | RATE_INFO_FLAGS_60G; sinfo->rxrate.mcs = stats->last_mcs_rx; sinfo->rx_bytes = stats->rx_bytes; sinfo->rx_packets = stats->rx_packets; sinfo->rx_dropped_misc = stats->rx_dropped; sinfo->tx_bytes = stats->tx_bytes; sinfo->tx_packets = stats->tx_packets; sinfo->tx_failed = stats->tx_errors; if (test_bit(wil_status_fwconnected, &wil->status)) { sinfo->filled |= STATION_INFO_SIGNAL; sinfo->signal = reply.evt.sqi; } return rc; } static int wil_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev, u8 *mac, struct station_info *sinfo) { struct wil6210_priv *wil = wiphy_to_wil(wiphy); int rc; int cid = wil_find_cid(wil, mac); wil_dbg_misc(wil, "%s(%pM) CID %d\n", __func__, mac, cid); if (cid < 0) return cid; rc = wil_cid_fill_sinfo(wil, cid, sinfo); return rc; } /* * Find @idx-th active STA for station dump. */ static int wil_find_cid_by_idx(struct wil6210_priv *wil, int idx) { int i; for (i = 0; i < ARRAY_SIZE(wil->sta); i++) { if (wil->sta[i].status == wil_sta_unused) continue; if (idx == 0) return i; idx--; } return -ENOENT; } static int wil_cfg80211_dump_station(struct wiphy *wiphy, struct net_device *dev, int idx, u8 *mac, struct station_info *sinfo) { struct wil6210_priv *wil = wiphy_to_wil(wiphy); int rc; int cid = wil_find_cid_by_idx(wil, idx); if (cid < 0) return -ENOENT; memcpy(mac, wil->sta[cid].addr, ETH_ALEN); wil_dbg_misc(wil, "%s(%pM) CID %d\n", __func__, mac, cid); rc = wil_cid_fill_sinfo(wil, cid, sinfo); return rc; } static int wil_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev, enum nl80211_iftype type, u32 *flags, struct vif_params *params) { struct wil6210_priv *wil = wiphy_to_wil(wiphy); struct wireless_dev *wdev = wil->wdev; switch (type) { case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_AP: case NL80211_IFTYPE_P2P_CLIENT: case NL80211_IFTYPE_P2P_GO: break; case NL80211_IFTYPE_MONITOR: if (flags) wil->monitor_flags = *flags; else wil->monitor_flags = 0; break; default: return -EOPNOTSUPP; } wdev->iftype = type; return 0; } static int wil_cfg80211_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request) { struct wil6210_priv *wil = wiphy_to_wil(wiphy); struct wireless_dev *wdev = wil->wdev; struct { struct wmi_start_scan_cmd cmd; u16 chnl[4]; } __packed cmd; uint i, n; int rc; if (wil->scan_request) { wil_err(wil, "Already scanning\n"); return -EAGAIN; } /* check we are client side */ switch (wdev->iftype) { case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_P2P_CLIENT: break; default: return -EOPNOTSUPP; } /* FW don't support scan after connection attempt */ if (test_bit(wil_status_dontscan, &wil->status)) { wil_err(wil, "Can't scan now\n"); return -EBUSY; } wil_dbg_misc(wil, "Start scan_request 0x%p\n", request); wil->scan_request = request; mod_timer(&wil->scan_timer, jiffies + WIL6210_SCAN_TO); memset(&cmd, 0, sizeof(cmd)); cmd.cmd.num_channels = 0; n = min(request->n_channels, 4U); for (i = 0; i < n; i++) { int ch = request->channels[i]->hw_value; if (ch == 0) { wil_err(wil, "Scan requested for unknown frequency %dMhz\n", request->channels[i]->center_freq); continue; } /* 0-based channel indexes */ cmd.cmd.channel_list[cmd.cmd.num_channels++].channel = ch - 1; wil_dbg_misc(wil, "Scan for ch %d : %d MHz\n", ch, request->channels[i]->center_freq); } if (request->ie_len) print_hex_dump_bytes("Scan IE ", DUMP_PREFIX_OFFSET, request->ie, request->ie_len); else wil_dbg_misc(wil, "Scan has no IE's\n"); rc = wmi_set_ie(wil, WMI_FRAME_PROBE_REQ, request->ie_len, request->ie); if (rc) { wil_err(wil, "Aborting scan, set_ie failed: %d\n", rc); goto out; } rc = wmi_send(wil, WMI_START_SCAN_CMDID, &cmd, sizeof(cmd.cmd) + cmd.cmd.num_channels * sizeof(cmd.cmd.channel_list[0])); out: if (rc) { del_timer_sync(&wil->scan_timer); wil->scan_request = NULL; } return rc; } static void wil_print_connect_params(struct wil6210_priv *wil, struct cfg80211_connect_params *sme) { wil_info(wil, "Connecting to:\n"); if (sme->channel) { wil_info(wil, " Channel: %d freq %d\n", sme->channel->hw_value, sme->channel->center_freq); } if (sme->bssid) wil_info(wil, " BSSID: %pM\n", sme->bssid); if (sme->ssid) print_hex_dump(KERN_INFO, " SSID: ", DUMP_PREFIX_OFFSET, 16, 1, sme->ssid, sme->ssid_len, true); wil_info(wil, " Privacy: %s\n", sme->privacy ? "secure" : "open"); } static int wil_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev, struct cfg80211_connect_params *sme) { struct wil6210_priv *wil = wiphy_to_wil(wiphy); struct cfg80211_bss *bss; struct wmi_connect_cmd conn; const u8 *ssid_eid; const u8 *rsn_eid; int ch; int rc = 0; if (test_bit(wil_status_fwconnecting, &wil->status) || test_bit(wil_status_fwconnected, &wil->status)) return -EALREADY; wil_print_connect_params(wil, sme); bss = cfg80211_get_bss(wiphy, sme->channel, sme->bssid, sme->ssid, sme->ssid_len, WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS); if (!bss) { wil_err(wil, "Unable to find BSS\n"); return -ENOENT; } ssid_eid = ieee80211_bss_get_ie(bss, WLAN_EID_SSID); if (!ssid_eid) { wil_err(wil, "No SSID\n"); rc = -ENOENT; goto out; } rsn_eid = sme->ie ? cfg80211_find_ie(WLAN_EID_RSN, sme->ie, sme->ie_len) : NULL; if (rsn_eid) { if (sme->ie_len > WMI_MAX_IE_LEN) { rc = -ERANGE; wil_err(wil, "IE too large (%td bytes)\n", sme->ie_len); goto out; } /* For secure assoc, send WMI_DELETE_CIPHER_KEY_CMD */ rc = wmi_del_cipher_key(wil, 0, bss->bssid); if (rc) { wil_err(wil, "WMI_DELETE_CIPHER_KEY_CMD failed\n"); goto out; } } /* WMI_SET_APPIE_CMD. ie may contain rsn info as well as other info * elements. Send it also in case it's empty, to erase previously set * ies in FW. */ rc = wmi_set_ie(wil, WMI_FRAME_ASSOC_REQ, sme->ie_len, sme->ie); if (rc) { wil_err(wil, "WMI_SET_APPIE_CMD failed\n"); goto out; } /* WMI_CONNECT_CMD */ memset(&conn, 0, sizeof(conn)); switch (bss->capability & WLAN_CAPABILITY_DMG_TYPE_MASK) { case WLAN_CAPABILITY_DMG_TYPE_AP: conn.network_type = WMI_NETTYPE_INFRA; break; case WLAN_CAPABILITY_DMG_TYPE_PBSS: conn.network_type = WMI_NETTYPE_P2P; break; default: wil_err(wil, "Unsupported BSS type, capability= 0x%04x\n", bss->capability); goto out; } if (rsn_eid) { conn.dot11_auth_mode = WMI_AUTH11_SHARED; conn.auth_mode = WMI_AUTH_WPA2_PSK; conn.pairwise_crypto_type = WMI_CRYPT_AES_GCMP; conn.pairwise_crypto_len = 16; } else { conn.dot11_auth_mode = WMI_AUTH11_OPEN; conn.auth_mode = WMI_AUTH_NONE; } conn.ssid_len = min_t(u8, ssid_eid[1], 32); memcpy(conn.ssid, ssid_eid+2, conn.ssid_len); ch = bss->channel->hw_value; if (ch == 0) { wil_err(wil, "BSS at unknown frequency %dMhz\n", bss->channel->center_freq); rc = -EOPNOTSUPP; goto out; } conn.channel = ch - 1; memcpy(conn.bssid, bss->bssid, ETH_ALEN); memcpy(conn.dst_mac, bss->bssid, ETH_ALEN); set_bit(wil_status_fwconnecting, &wil->status); rc = wmi_send(wil, WMI_CONNECT_CMDID, &conn, sizeof(conn)); if (rc == 0) { /* Connect can take lots of time */ mod_timer(&wil->connect_timer, jiffies + msecs_to_jiffies(2000)); } else { clear_bit(wil_status_fwconnecting, &wil->status); } out: cfg80211_put_bss(wiphy, bss); return rc; } static int wil_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *ndev, u16 reason_code) { int rc; struct wil6210_priv *wil = wiphy_to_wil(wiphy); rc = wmi_send(wil, WMI_DISCONNECT_CMDID, NULL, 0); return rc; } int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, struct ieee80211_channel *chan, bool offchan, unsigned int wait, const u8 *buf, size_t len, bool no_cck, bool dont_wait_for_ack, u64 *cookie) { struct wil6210_priv *wil = wiphy_to_wil(wiphy); int rc; bool tx_status = false; struct ieee80211_mgmt *mgmt_frame = (void *)buf; struct wmi_sw_tx_req_cmd *cmd; struct { struct wil6210_mbox_hdr_wmi wmi; struct wmi_sw_tx_complete_event evt; } __packed evt; cmd = kmalloc(sizeof(*cmd) + len, GFP_KERNEL); if (!cmd) { rc = -ENOMEM; goto out; } memcpy(cmd->dst_mac, mgmt_frame->da, WMI_MAC_LEN); cmd->len = cpu_to_le16(len); memcpy(cmd->payload, buf, len); rc = wmi_call(wil, WMI_SW_TX_REQ_CMDID, cmd, sizeof(*cmd) + len, WMI_SW_TX_COMPLETE_EVENTID, &evt, sizeof(evt), 2000); if (rc == 0) tx_status = !evt.evt.status; kfree(cmd); out: cfg80211_mgmt_tx_status(wdev, cookie ? *cookie : 0, buf, len, tx_status, GFP_KERNEL); return rc; } static int wil_cfg80211_set_channel(struct wiphy *wiphy, struct cfg80211_chan_def *chandef) { struct wil6210_priv *wil = wiphy_to_wil(wiphy); struct wireless_dev *wdev = wil->wdev; wdev->preset_chandef = *chandef; return 0; } static int wil_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev, u8 key_index, bool pairwise, const u8 *mac_addr, struct key_params *params) { struct wil6210_priv *wil = wiphy_to_wil(wiphy); /* group key is not used */ if (!pairwise) return 0; return wmi_add_cipher_key(wil, key_index, mac_addr, params->key_len, params->key); } static int wil_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev, u8 key_index, bool pairwise, const u8 *mac_addr) { struct wil6210_priv *wil = wiphy_to_wil(wiphy); /* group key is not used */ if (!pairwise) return 0; return wmi_del_cipher_key(wil, key_index, mac_addr); } /* Need to be present or wiphy_new() will WARN */ static int wil_cfg80211_set_default_key(struct wiphy *wiphy, struct net_device *ndev, u8 key_index, bool unicast, bool multicast) { return 0; } static int wil_remain_on_channel(struct wiphy *wiphy, struct wireless_dev *wdev, struct ieee80211_channel *chan, unsigned int duration, u64 *cookie) { struct wil6210_priv *wil = wiphy_to_wil(wiphy); int rc; /* TODO: handle duration */ wil_info(wil, "%s(%d, %d ms)\n", __func__, chan->center_freq, duration); rc = wmi_set_channel(wil, chan->hw_value); if (rc) return rc; rc = wmi_rxon(wil, true); return rc; } static int wil_cancel_remain_on_channel(struct wiphy *wiphy, struct wireless_dev *wdev, u64 cookie) { struct wil6210_priv *wil = wiphy_to_wil(wiphy); int rc; wil_info(wil, "%s()\n", __func__); rc = wmi_rxon(wil, false); return rc; } static void wil_print_bcon_data(struct cfg80211_beacon_data *b) { print_hex_dump_bytes("head ", DUMP_PREFIX_OFFSET, b->head, b->head_len); print_hex_dump_bytes("tail ", DUMP_PREFIX_OFFSET, b->tail, b->tail_len); print_hex_dump_bytes("BCON IE ", DUMP_PREFIX_OFFSET, b->beacon_ies, b->beacon_ies_len); print_hex_dump_bytes("PROBE ", DUMP_PREFIX_OFFSET, b->probe_resp, b->probe_resp_len); print_hex_dump_bytes("PROBE IE ", DUMP_PREFIX_OFFSET, b->proberesp_ies, b->proberesp_ies_len); print_hex_dump_bytes("ASSOC IE ", DUMP_PREFIX_OFFSET, b->assocresp_ies, b->assocresp_ies_len); } static void wil_print_crypto(struct wil6210_priv *wil, struct cfg80211_crypto_settings *c) { wil_dbg_misc(wil, "WPA versions: 0x%08x cipher group 0x%08x\n", c->wpa_versions, c->cipher_group); wil_dbg_misc(wil, "Pairwise ciphers [%d]\n", c->n_ciphers_pairwise); wil_dbg_misc(wil, "AKM suites [%d]\n", c->n_akm_suites); wil_dbg_misc(wil, "Control port : %d, eth_type 0x%04x no_encrypt %d\n", c->control_port, be16_to_cpu(c->control_port_ethertype), c->control_port_no_encrypt); } static int wil_fix_bcon(struct wil6210_priv *wil, struct cfg80211_beacon_data *bcon) { struct ieee80211_mgmt *f = (struct ieee80211_mgmt *)bcon->probe_resp; size_t hlen = offsetof(struct ieee80211_mgmt, u.probe_resp.variable); int rc = 0; if (bcon->probe_resp_len <= hlen) return 0; if (!bcon->proberesp_ies) { bcon->proberesp_ies = f->u.probe_resp.variable; bcon->proberesp_ies_len = bcon->probe_resp_len - hlen; rc = 1; } if (!bcon->assocresp_ies) { bcon->assocresp_ies = f->u.probe_resp.variable; bcon->assocresp_ies_len = bcon->probe_resp_len - hlen; rc = 1; } return rc; } static int wil_cfg80211_change_beacon(struct wiphy *wiphy, struct net_device *ndev, struct cfg80211_beacon_data *bcon) { struct wil6210_priv *wil = wiphy_to_wil(wiphy); int rc; wil_dbg_misc(wil, "%s()\n", __func__); if (wil_fix_bcon(wil, bcon)) { wil_dbg_misc(wil, "Fixed bcon\n"); wil_print_bcon_data(bcon); } /* FW do not form regular beacon, so bcon IE's are not set * For the DMG bcon, when it will be supported, bcon IE's will * be reused; add something like: * wmi_set_ie(wil, WMI_FRAME_BEACON, bcon->beacon_ies_len, * bcon->beacon_ies); */ rc = wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, bcon->proberesp_ies_len, bcon->proberesp_ies); if (rc) { wil_err(wil, "set_ie(PROBE_RESP) failed\n"); return rc; } rc = wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP, bcon->assocresp_ies_len, bcon->assocresp_ies); if (rc) { wil_err(wil, "set_ie(ASSOC_RESP) failed\n"); return rc; } return 0; } static int wil_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev, struct cfg80211_ap_settings *info) { int rc = 0; struct wil6210_priv *wil = wiphy_to_wil(wiphy); struct wireless_dev *wdev = ndev->ieee80211_ptr; struct ieee80211_channel *channel = info->chandef.chan; struct cfg80211_beacon_data *bcon = &info->beacon; struct cfg80211_crypto_settings *crypto = &info->crypto; u8 wmi_nettype = wil_iftype_nl2wmi(wdev->iftype); wil_dbg_misc(wil, "%s()\n", __func__); if (!channel) { wil_err(wil, "AP: No channel???\n"); return -EINVAL; } wil_dbg_misc(wil, "AP on Channel %d %d MHz, %s\n", channel->hw_value, channel->center_freq, info->privacy ? "secure" : "open"); wil_dbg_misc(wil, "Privacy: %d auth_type %d\n", info->privacy, info->auth_type); wil_dbg_misc(wil, "BI %d DTIM %d\n", info->beacon_interval, info->dtim_period); print_hex_dump_bytes("SSID ", DUMP_PREFIX_OFFSET, info->ssid, info->ssid_len); wil_print_bcon_data(bcon); wil_print_crypto(wil, crypto); if (wil_fix_bcon(wil, bcon)) { wil_dbg_misc(wil, "Fixed bcon\n"); wil_print_bcon_data(bcon); } mutex_lock(&wil->mutex); __wil_down(wil); rc = __wil_up(wil); if (rc) goto out; rc = wmi_set_ssid(wil, info->ssid_len, info->ssid); if (rc) goto out; /* IE's */ /* bcon 'head IE's are not relevant for 60g band */ /* * FW do not form regular beacon, so bcon IE's are not set * For the DMG bcon, when it will be supported, bcon IE's will * be reused; add something like: * wmi_set_ie(wil, WMI_FRAME_BEACON, bcon->beacon_ies_len, * bcon->beacon_ies); */ wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, bcon->proberesp_ies_len, bcon->proberesp_ies); wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP, bcon->assocresp_ies_len, bcon->assocresp_ies); wil->secure_pcp = info->privacy; rc = wmi_pcp_start(wil, info->beacon_interval, wmi_nettype, channel->hw_value); if (rc) goto out; netif_carrier_on(ndev); out: mutex_unlock(&wil->mutex); return rc; } static int wil_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev) { int rc, rc1; struct wil6210_priv *wil = wiphy_to_wil(wiphy); wil_dbg_misc(wil, "%s()\n", __func__); mutex_lock(&wil->mutex); rc = wmi_pcp_stop(wil); __wil_down(wil); rc1 = __wil_up(wil); mutex_unlock(&wil->mutex); return min(rc, rc1); } static int wil_cfg80211_del_station(struct wiphy *wiphy, struct net_device *dev, u8 *mac) { struct wil6210_priv *wil = wiphy_to_wil(wiphy); mutex_lock(&wil->mutex); wil6210_disconnect(wil, mac); mutex_unlock(&wil->mutex); return 0; } static struct cfg80211_ops wil_cfg80211_ops = { .scan = wil_cfg80211_scan, .connect = wil_cfg80211_connect, .disconnect = wil_cfg80211_disconnect, .change_virtual_intf = wil_cfg80211_change_iface, .get_station = wil_cfg80211_get_station, .dump_station = wil_cfg80211_dump_station, .remain_on_channel = wil_remain_on_channel, .cancel_remain_on_channel = wil_cancel_remain_on_channel, .mgmt_tx = wil_cfg80211_mgmt_tx, .set_monitor_channel = wil_cfg80211_set_channel, .add_key = wil_cfg80211_add_key, .del_key = wil_cfg80211_del_key, .set_default_key = wil_cfg80211_set_default_key, /* AP mode */ .change_beacon = wil_cfg80211_change_beacon, .start_ap = wil_cfg80211_start_ap, .stop_ap = wil_cfg80211_stop_ap, .del_station = wil_cfg80211_del_station, }; static void wil_wiphy_init(struct wiphy *wiphy) { /* TODO: set real value */ wiphy->max_scan_ssids = 10; wiphy->max_scan_ie_len = WMI_MAX_IE_LEN; wiphy->max_num_pmkids = 0 /* TODO: */; wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_MONITOR); /* TODO: enable P2P when integrated with supplicant: * BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO) */ wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME | WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD; dev_dbg(wiphy_dev(wiphy), "%s : flags = 0x%08x\n", __func__, wiphy->flags); wiphy->probe_resp_offload = NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS | NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 | NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P; wiphy->bands[IEEE80211_BAND_60GHZ] = &wil_band_60ghz; /* TODO: figure this out */ wiphy->signal_type = CFG80211_SIGNAL_TYPE_UNSPEC; wiphy->cipher_suites = wil_cipher_suites; wiphy->n_cipher_suites = ARRAY_SIZE(wil_cipher_suites); wiphy->mgmt_stypes = wil_mgmt_stypes; } struct wireless_dev *wil_cfg80211_init(struct device *dev) { int rc = 0; struct wireless_dev *wdev; dev_dbg(dev, "%s()\n", __func__); wdev = kzalloc(sizeof(*wdev), GFP_KERNEL); if (!wdev) return ERR_PTR(-ENOMEM); wdev->wiphy = wiphy_new(&wil_cfg80211_ops, sizeof(struct wil6210_priv)); if (!wdev->wiphy) { rc = -ENOMEM; goto out; } set_wiphy_dev(wdev->wiphy, dev); wil_wiphy_init(wdev->wiphy); rc = wiphy_register(wdev->wiphy); if (rc < 0) goto out_failed_reg; return wdev; out_failed_reg: wiphy_free(wdev->wiphy); out: kfree(wdev); return ERR_PTR(rc); } void wil_wdev_free(struct wil6210_priv *wil) { struct wireless_dev *wdev = wil_to_wdev(wil); dev_dbg(wil_to_dev(wil), "%s()\n", __func__); if (!wdev) return; wiphy_unregister(wdev->wiphy); wiphy_free(wdev->wiphy); kfree(wdev); }
gpl-2.0
ghosteyezz/my-linux-kernel-study
fs/ecryptfs/messaging.c
671
14221
/** * eCryptfs: Linux filesystem encryption layer * * Copyright (C) 2004-2008 International Business Machines Corp. * Author(s): Michael A. Halcrow <mhalcrow@us.ibm.com> * Tyler Hicks <tyhicks@ou.edu> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA * 02111-1307, USA. */ #include <linux/sched.h> #include <linux/slab.h> #include <linux/user_namespace.h> #include <linux/nsproxy.h> #include "ecryptfs_kernel.h" static LIST_HEAD(ecryptfs_msg_ctx_free_list); static LIST_HEAD(ecryptfs_msg_ctx_alloc_list); static struct mutex ecryptfs_msg_ctx_lists_mux; static struct hlist_head *ecryptfs_daemon_hash; struct mutex ecryptfs_daemon_hash_mux; static int ecryptfs_hash_bits; #define ecryptfs_current_euid_hash(uid) \ hash_long((unsigned long)from_kuid(&init_user_ns, current_euid()), ecryptfs_hash_bits) static u32 ecryptfs_msg_counter; static struct ecryptfs_msg_ctx *ecryptfs_msg_ctx_arr; /** * ecryptfs_acquire_free_msg_ctx * @msg_ctx: The context that was acquired from the free list * * Acquires a context element from the free list and locks the mutex * on the context. Sets the msg_ctx task to current. Returns zero on * success; non-zero on error or upon failure to acquire a free * context element. Must be called with ecryptfs_msg_ctx_lists_mux * held. */ static int ecryptfs_acquire_free_msg_ctx(struct ecryptfs_msg_ctx **msg_ctx) { struct list_head *p; int rc; if (list_empty(&ecryptfs_msg_ctx_free_list)) { printk(KERN_WARNING "%s: The eCryptfs free " "context list is empty. It may be helpful to " "specify the ecryptfs_message_buf_len " "parameter to be greater than the current " "value of [%d]\n", __func__, ecryptfs_message_buf_len); rc = -ENOMEM; goto out; } list_for_each(p, &ecryptfs_msg_ctx_free_list) { *msg_ctx = list_entry(p, struct ecryptfs_msg_ctx, node); if (mutex_trylock(&(*msg_ctx)->mux)) { (*msg_ctx)->task = current; rc = 0; goto out; } } rc = -ENOMEM; out: return rc; } /** * ecryptfs_msg_ctx_free_to_alloc * @msg_ctx: The context to move from the free list to the alloc list * * Must be called with ecryptfs_msg_ctx_lists_mux held. */ static void ecryptfs_msg_ctx_free_to_alloc(struct ecryptfs_msg_ctx *msg_ctx) { list_move(&msg_ctx->node, &ecryptfs_msg_ctx_alloc_list); msg_ctx->state = ECRYPTFS_MSG_CTX_STATE_PENDING; msg_ctx->counter = ++ecryptfs_msg_counter; } /** * ecryptfs_msg_ctx_alloc_to_free * @msg_ctx: The context to move from the alloc list to the free list * * Must be called with ecryptfs_msg_ctx_lists_mux held. */ void ecryptfs_msg_ctx_alloc_to_free(struct ecryptfs_msg_ctx *msg_ctx) { list_move(&(msg_ctx->node), &ecryptfs_msg_ctx_free_list); kfree(msg_ctx->msg); msg_ctx->msg = NULL; msg_ctx->state = ECRYPTFS_MSG_CTX_STATE_FREE; } /** * ecryptfs_find_daemon_by_euid * @daemon: If return value is zero, points to the desired daemon pointer * * Must be called with ecryptfs_daemon_hash_mux held. * * Search the hash list for the current effective user id. * * Returns zero if the user id exists in the list; non-zero otherwise. */ int ecryptfs_find_daemon_by_euid(struct ecryptfs_daemon **daemon) { int rc; hlist_for_each_entry(*daemon, &ecryptfs_daemon_hash[ecryptfs_current_euid_hash()], euid_chain) { if (uid_eq((*daemon)->file->f_cred->euid, current_euid())) { rc = 0; goto out; } } rc = -EINVAL; out: return rc; } /** * ecryptfs_spawn_daemon - Create and initialize a new daemon struct * @daemon: Pointer to set to newly allocated daemon struct * @file: File used when opening /dev/ecryptfs * * Must be called ceremoniously while in possession of * ecryptfs_sacred_daemon_hash_mux * * Returns zero on success; non-zero otherwise */ int ecryptfs_spawn_daemon(struct ecryptfs_daemon **daemon, struct file *file) { int rc = 0; (*daemon) = kzalloc(sizeof(**daemon), GFP_KERNEL); if (!(*daemon)) { rc = -ENOMEM; printk(KERN_ERR "%s: Failed to allocate [%zd] bytes of " "GFP_KERNEL memory\n", __func__, sizeof(**daemon)); goto out; } (*daemon)->file = file; mutex_init(&(*daemon)->mux); INIT_LIST_HEAD(&(*daemon)->msg_ctx_out_queue); init_waitqueue_head(&(*daemon)->wait); (*daemon)->num_queued_msg_ctx = 0; hlist_add_head(&(*daemon)->euid_chain, &ecryptfs_daemon_hash[ecryptfs_current_euid_hash()]); out: return rc; } /** * ecryptfs_exorcise_daemon - Destroy the daemon struct * * Must be called ceremoniously while in possession of * ecryptfs_daemon_hash_mux and the daemon's own mux. */ int ecryptfs_exorcise_daemon(struct ecryptfs_daemon *daemon) { struct ecryptfs_msg_ctx *msg_ctx, *msg_ctx_tmp; int rc = 0; mutex_lock(&daemon->mux); if ((daemon->flags & ECRYPTFS_DAEMON_IN_READ) || (daemon->flags & ECRYPTFS_DAEMON_IN_POLL)) { rc = -EBUSY; mutex_unlock(&daemon->mux); goto out; } list_for_each_entry_safe(msg_ctx, msg_ctx_tmp, &daemon->msg_ctx_out_queue, daemon_out_list) { list_del(&msg_ctx->daemon_out_list); daemon->num_queued_msg_ctx--; printk(KERN_WARNING "%s: Warning: dropping message that is in " "the out queue of a dying daemon\n", __func__); ecryptfs_msg_ctx_alloc_to_free(msg_ctx); } hlist_del(&daemon->euid_chain); mutex_unlock(&daemon->mux); kzfree(daemon); out: return rc; } /** * ecryptfs_process_reponse * @msg: The ecryptfs message received; the caller should sanity check * msg->data_len and free the memory * @seq: The sequence number of the message; must match the sequence * number for the existing message context waiting for this * response * * Processes a response message after sending an operation request to * userspace. Some other process is awaiting this response. Before * sending out its first communications, the other process allocated a * msg_ctx from the ecryptfs_msg_ctx_arr at a particular index. The * response message contains this index so that we can copy over the * response message into the msg_ctx that the process holds a * reference to. The other process is going to wake up, check to see * that msg_ctx->state == ECRYPTFS_MSG_CTX_STATE_DONE, and then * proceed to read off and process the response message. Returns zero * upon delivery to desired context element; non-zero upon delivery * failure or error. * * Returns zero on success; non-zero otherwise */ int ecryptfs_process_response(struct ecryptfs_daemon *daemon, struct ecryptfs_message *msg, u32 seq) { struct ecryptfs_msg_ctx *msg_ctx; size_t msg_size; int rc; if (msg->index >= ecryptfs_message_buf_len) { rc = -EINVAL; printk(KERN_ERR "%s: Attempt to reference " "context buffer at index [%d]; maximum " "allowable is [%d]\n", __func__, msg->index, (ecryptfs_message_buf_len - 1)); goto out; } msg_ctx = &ecryptfs_msg_ctx_arr[msg->index]; mutex_lock(&msg_ctx->mux); if (msg_ctx->state != ECRYPTFS_MSG_CTX_STATE_PENDING) { rc = -EINVAL; printk(KERN_WARNING "%s: Desired context element is not " "pending a response\n", __func__); goto unlock; } else if (msg_ctx->counter != seq) { rc = -EINVAL; printk(KERN_WARNING "%s: Invalid message sequence; " "expected [%d]; received [%d]\n", __func__, msg_ctx->counter, seq); goto unlock; } msg_size = (sizeof(*msg) + msg->data_len); msg_ctx->msg = kmemdup(msg, msg_size, GFP_KERNEL); if (!msg_ctx->msg) { rc = -ENOMEM; printk(KERN_ERR "%s: Failed to allocate [%zd] bytes of " "GFP_KERNEL memory\n", __func__, msg_size); goto unlock; } msg_ctx->state = ECRYPTFS_MSG_CTX_STATE_DONE; wake_up_process(msg_ctx->task); rc = 0; unlock: mutex_unlock(&msg_ctx->mux); out: return rc; } /** * ecryptfs_send_message_locked * @data: The data to send * @data_len: The length of data * @msg_ctx: The message context allocated for the send * * Must be called with ecryptfs_daemon_hash_mux held. * * Returns zero on success; non-zero otherwise */ static int ecryptfs_send_message_locked(char *data, int data_len, u8 msg_type, struct ecryptfs_msg_ctx **msg_ctx) { struct ecryptfs_daemon *daemon; int rc; rc = ecryptfs_find_daemon_by_euid(&daemon); if (rc) { rc = -ENOTCONN; goto out; } mutex_lock(&ecryptfs_msg_ctx_lists_mux); rc = ecryptfs_acquire_free_msg_ctx(msg_ctx); if (rc) { mutex_unlock(&ecryptfs_msg_ctx_lists_mux); printk(KERN_WARNING "%s: Could not claim a free " "context element\n", __func__); goto out; } ecryptfs_msg_ctx_free_to_alloc(*msg_ctx); mutex_unlock(&(*msg_ctx)->mux); mutex_unlock(&ecryptfs_msg_ctx_lists_mux); rc = ecryptfs_send_miscdev(data, data_len, *msg_ctx, msg_type, 0, daemon); if (rc) printk(KERN_ERR "%s: Error attempting to send message to " "userspace daemon; rc = [%d]\n", __func__, rc); out: return rc; } /** * ecryptfs_send_message * @data: The data to send * @data_len: The length of data * @msg_ctx: The message context allocated for the send * * Grabs ecryptfs_daemon_hash_mux. * * Returns zero on success; non-zero otherwise */ int ecryptfs_send_message(char *data, int data_len, struct ecryptfs_msg_ctx **msg_ctx) { int rc; mutex_lock(&ecryptfs_daemon_hash_mux); rc = ecryptfs_send_message_locked(data, data_len, ECRYPTFS_MSG_REQUEST, msg_ctx); mutex_unlock(&ecryptfs_daemon_hash_mux); return rc; } /** * ecryptfs_wait_for_response * @msg_ctx: The context that was assigned when sending a message * @msg: The incoming message from userspace; not set if rc != 0 * * Sleeps until awaken by ecryptfs_receive_message or until the amount * of time exceeds ecryptfs_message_wait_timeout. If zero is * returned, msg will point to a valid message from userspace; a * non-zero value is returned upon failure to receive a message or an * error occurs. Callee must free @msg on success. */ int ecryptfs_wait_for_response(struct ecryptfs_msg_ctx *msg_ctx, struct ecryptfs_message **msg) { signed long timeout = ecryptfs_message_wait_timeout * HZ; int rc = 0; sleep: timeout = schedule_timeout_interruptible(timeout); mutex_lock(&ecryptfs_msg_ctx_lists_mux); mutex_lock(&msg_ctx->mux); if (msg_ctx->state != ECRYPTFS_MSG_CTX_STATE_DONE) { if (timeout) { mutex_unlock(&msg_ctx->mux); mutex_unlock(&ecryptfs_msg_ctx_lists_mux); goto sleep; } rc = -ENOMSG; } else { *msg = msg_ctx->msg; msg_ctx->msg = NULL; } ecryptfs_msg_ctx_alloc_to_free(msg_ctx); mutex_unlock(&msg_ctx->mux); mutex_unlock(&ecryptfs_msg_ctx_lists_mux); return rc; } int __init ecryptfs_init_messaging(void) { int i; int rc = 0; if (ecryptfs_number_of_users > ECRYPTFS_MAX_NUM_USERS) { ecryptfs_number_of_users = ECRYPTFS_MAX_NUM_USERS; printk(KERN_WARNING "%s: Specified number of users is " "too large, defaulting to [%d] users\n", __func__, ecryptfs_number_of_users); } mutex_init(&ecryptfs_daemon_hash_mux); mutex_lock(&ecryptfs_daemon_hash_mux); ecryptfs_hash_bits = 1; while (ecryptfs_number_of_users >> ecryptfs_hash_bits) ecryptfs_hash_bits++; ecryptfs_daemon_hash = kmalloc((sizeof(struct hlist_head) * (1 << ecryptfs_hash_bits)), GFP_KERNEL); if (!ecryptfs_daemon_hash) { rc = -ENOMEM; printk(KERN_ERR "%s: Failed to allocate memory\n", __func__); mutex_unlock(&ecryptfs_daemon_hash_mux); goto out; } for (i = 0; i < (1 << ecryptfs_hash_bits); i++) INIT_HLIST_HEAD(&ecryptfs_daemon_hash[i]); mutex_unlock(&ecryptfs_daemon_hash_mux); ecryptfs_msg_ctx_arr = kmalloc((sizeof(struct ecryptfs_msg_ctx) * ecryptfs_message_buf_len), GFP_KERNEL); if (!ecryptfs_msg_ctx_arr) { rc = -ENOMEM; printk(KERN_ERR "%s: Failed to allocate memory\n", __func__); goto out; } mutex_init(&ecryptfs_msg_ctx_lists_mux); mutex_lock(&ecryptfs_msg_ctx_lists_mux); ecryptfs_msg_counter = 0; for (i = 0; i < ecryptfs_message_buf_len; i++) { INIT_LIST_HEAD(&ecryptfs_msg_ctx_arr[i].node); INIT_LIST_HEAD(&ecryptfs_msg_ctx_arr[i].daemon_out_list); mutex_init(&ecryptfs_msg_ctx_arr[i].mux); mutex_lock(&ecryptfs_msg_ctx_arr[i].mux); ecryptfs_msg_ctx_arr[i].index = i; ecryptfs_msg_ctx_arr[i].state = ECRYPTFS_MSG_CTX_STATE_FREE; ecryptfs_msg_ctx_arr[i].counter = 0; ecryptfs_msg_ctx_arr[i].task = NULL; ecryptfs_msg_ctx_arr[i].msg = NULL; list_add_tail(&ecryptfs_msg_ctx_arr[i].node, &ecryptfs_msg_ctx_free_list); mutex_unlock(&ecryptfs_msg_ctx_arr[i].mux); } mutex_unlock(&ecryptfs_msg_ctx_lists_mux); rc = ecryptfs_init_ecryptfs_miscdev(); if (rc) ecryptfs_release_messaging(); out: return rc; } void ecryptfs_release_messaging(void) { if (ecryptfs_msg_ctx_arr) { int i; mutex_lock(&ecryptfs_msg_ctx_lists_mux); for (i = 0; i < ecryptfs_message_buf_len; i++) { mutex_lock(&ecryptfs_msg_ctx_arr[i].mux); if (ecryptfs_msg_ctx_arr[i].msg) kfree(ecryptfs_msg_ctx_arr[i].msg); mutex_unlock(&ecryptfs_msg_ctx_arr[i].mux); } kfree(ecryptfs_msg_ctx_arr); mutex_unlock(&ecryptfs_msg_ctx_lists_mux); } if (ecryptfs_daemon_hash) { struct ecryptfs_daemon *daemon; int i; mutex_lock(&ecryptfs_daemon_hash_mux); for (i = 0; i < (1 << ecryptfs_hash_bits); i++) { int rc; hlist_for_each_entry(daemon, &ecryptfs_daemon_hash[i], euid_chain) { rc = ecryptfs_exorcise_daemon(daemon); if (rc) printk(KERN_ERR "%s: Error whilst " "attempting to destroy daemon; " "rc = [%d]. Dazed and confused, " "but trying to continue.\n", __func__, rc); } } kfree(ecryptfs_daemon_hash); mutex_unlock(&ecryptfs_daemon_hash_mux); } ecryptfs_destroy_ecryptfs_miscdev(); return; }
gpl-2.0
MaxiCM/android_kernel_samsung_degaswifi
drivers/usb/chipidea/ci13xxx_imx.c
1951
6469
/* * Copyright 2012 Freescale Semiconductor, Inc. * Copyright (C) 2012 Marek Vasut <marex@denx.de> * on behalf of DENX Software Engineering GmbH * * The code contained herein is licensed under the GNU General Public * License. You may obtain a copy of the GNU General Public License * Version 2 or later at the following locations: * * http://www.opensource.org/licenses/gpl-license.html * http://www.gnu.org/copyleft/gpl.html */ #include <linux/module.h> #include <linux/of_platform.h> #include <linux/of_gpio.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/dma-mapping.h> #include <linux/usb/chipidea.h> #include <linux/clk.h> #include <linux/regulator/consumer.h> #include <linux/pinctrl/consumer.h> #include "ci.h" #include "ci13xxx_imx.h" #define pdev_to_phy(pdev) \ ((struct usb_phy *)platform_get_drvdata(pdev)) struct ci13xxx_imx_data { struct device_node *phy_np; struct usb_phy *phy; struct platform_device *ci_pdev; struct clk *clk; struct regulator *reg_vbus; }; static const struct usbmisc_ops *usbmisc_ops; /* Common functions shared by usbmisc drivers */ int usbmisc_set_ops(const struct usbmisc_ops *ops) { if (usbmisc_ops) return -EBUSY; usbmisc_ops = ops; return 0; } EXPORT_SYMBOL_GPL(usbmisc_set_ops); void usbmisc_unset_ops(const struct usbmisc_ops *ops) { usbmisc_ops = NULL; } EXPORT_SYMBOL_GPL(usbmisc_unset_ops); int usbmisc_get_init_data(struct device *dev, struct usbmisc_usb_device *usbdev) { struct device_node *np = dev->of_node; struct of_phandle_args args; int ret; usbdev->dev = dev; ret = of_parse_phandle_with_args(np, "fsl,usbmisc", "#index-cells", 0, &args); if (ret) { dev_err(dev, "Failed to parse property fsl,usbmisc, errno %d\n", ret); memset(usbdev, 0, sizeof(*usbdev)); return ret; } usbdev->index = args.args[0]; of_node_put(args.np); if (of_find_property(np, "disable-over-current", NULL)) usbdev->disable_oc = 1; if (of_find_property(np, "external-vbus-divider", NULL)) usbdev->evdo = 1; return 0; } EXPORT_SYMBOL_GPL(usbmisc_get_init_data); /* End of common functions shared by usbmisc drivers*/ static struct ci13xxx_platform_data ci13xxx_imx_platdata = { .name = "ci13xxx_imx", .flags = CI13XXX_REQUIRE_TRANSCEIVER | CI13XXX_PULLUP_ON_VBUS | CI13XXX_DISABLE_STREAMING, .capoffset = DEF_CAPOFFSET, }; static int ci13xxx_imx_probe(struct platform_device *pdev) { struct ci13xxx_imx_data *data; struct platform_device *plat_ci, *phy_pdev; struct device_node *phy_np; struct resource *res; struct regulator *reg_vbus; struct pinctrl *pinctrl; int ret; if (of_find_property(pdev->dev.of_node, "fsl,usbmisc", NULL) && !usbmisc_ops) return -EPROBE_DEFER; data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); if (!data) { dev_err(&pdev->dev, "Failed to allocate CI13xxx-IMX data!\n"); return -ENOMEM; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "Can't get device resources!\n"); return -ENOENT; } pinctrl = devm_pinctrl_get_select_default(&pdev->dev); if (IS_ERR(pinctrl)) dev_warn(&pdev->dev, "pinctrl get/select failed, err=%ld\n", PTR_ERR(pinctrl)); data->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(data->clk)) { dev_err(&pdev->dev, "Failed to get clock, err=%ld\n", PTR_ERR(data->clk)); return PTR_ERR(data->clk); } ret = clk_prepare_enable(data->clk); if (ret) { dev_err(&pdev->dev, "Failed to prepare or enable clock, err=%d\n", ret); return ret; } phy_np = of_parse_phandle(pdev->dev.of_node, "fsl,usbphy", 0); if (phy_np) { data->phy_np = phy_np; phy_pdev = of_find_device_by_node(phy_np); if (phy_pdev) { struct usb_phy *phy; phy = pdev_to_phy(phy_pdev); if (phy && try_module_get(phy_pdev->dev.driver->owner)) { usb_phy_init(phy); data->phy = phy; } } } /* we only support host now, so enable vbus here */ reg_vbus = devm_regulator_get(&pdev->dev, "vbus"); if (!IS_ERR(reg_vbus)) { ret = regulator_enable(reg_vbus); if (ret) { dev_err(&pdev->dev, "Failed to enable vbus regulator, err=%d\n", ret); goto put_np; } data->reg_vbus = reg_vbus; } else { reg_vbus = NULL; } ci13xxx_imx_platdata.phy = data->phy; if (!pdev->dev.dma_mask) pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; if (!pdev->dev.coherent_dma_mask) pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); if (usbmisc_ops && usbmisc_ops->init) { ret = usbmisc_ops->init(&pdev->dev); if (ret) { dev_err(&pdev->dev, "usbmisc init failed, ret=%d\n", ret); goto err; } } plat_ci = ci13xxx_add_device(&pdev->dev, pdev->resource, pdev->num_resources, &ci13xxx_imx_platdata); if (IS_ERR(plat_ci)) { ret = PTR_ERR(plat_ci); dev_err(&pdev->dev, "Can't register ci_hdrc platform device, err=%d\n", ret); goto err; } if (usbmisc_ops && usbmisc_ops->post) { ret = usbmisc_ops->post(&pdev->dev); if (ret) { dev_err(&pdev->dev, "usbmisc post failed, ret=%d\n", ret); goto put_np; } } data->ci_pdev = plat_ci; platform_set_drvdata(pdev, data); pm_runtime_no_callbacks(&pdev->dev); pm_runtime_enable(&pdev->dev); return 0; err: if (reg_vbus) regulator_disable(reg_vbus); put_np: if (phy_np) of_node_put(phy_np); clk_disable_unprepare(data->clk); return ret; } static int ci13xxx_imx_remove(struct platform_device *pdev) { struct ci13xxx_imx_data *data = platform_get_drvdata(pdev); pm_runtime_disable(&pdev->dev); ci13xxx_remove_device(data->ci_pdev); if (data->reg_vbus) regulator_disable(data->reg_vbus); if (data->phy) { usb_phy_shutdown(data->phy); module_put(data->phy->dev->driver->owner); } of_node_put(data->phy_np); clk_disable_unprepare(data->clk); platform_set_drvdata(pdev, NULL); return 0; } static const struct of_device_id ci13xxx_imx_dt_ids[] = { { .compatible = "fsl,imx27-usb", }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, ci13xxx_imx_dt_ids); static struct platform_driver ci13xxx_imx_driver = { .probe = ci13xxx_imx_probe, .remove = ci13xxx_imx_remove, .driver = { .name = "imx_usb", .owner = THIS_MODULE, .of_match_table = ci13xxx_imx_dt_ids, }, }; module_platform_driver(ci13xxx_imx_driver); MODULE_ALIAS("platform:imx-usb"); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("CI13xxx i.MX USB binding"); MODULE_AUTHOR("Marek Vasut <marex@denx.de>"); MODULE_AUTHOR("Richard Zhao <richard.zhao@freescale.com>");
gpl-2.0
olegfusion/IM-A830S_kernel
arch/arm/plat-mxc/devices/platform-imx-uart.c
2463
5395
/* * Copyright (C) 2009-2010 Pengutronix * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de> * * This program is free software; you can redistribute it and/or modify it under * the terms of the GNU General Public License version 2 as published by the * Free Software Foundation. */ #include <mach/hardware.h> #include <mach/devices-common.h> #define imx_imx_uart_3irq_data_entry(soc, _id, _hwid, _size) \ [_id] = { \ .id = _id, \ .iobase = soc ## _UART ## _hwid ## _BASE_ADDR, \ .iosize = _size, \ .irqrx = soc ## _INT_UART ## _hwid ## RX, \ .irqtx = soc ## _INT_UART ## _hwid ## TX, \ .irqrts = soc ## _INT_UART ## _hwid ## RTS, \ } #define imx_imx_uart_1irq_data_entry(soc, _id, _hwid, _size) \ [_id] = { \ .id = _id, \ .iobase = soc ## _UART ## _hwid ## _BASE_ADDR, \ .iosize = _size, \ .irq = soc ## _INT_UART ## _hwid, \ } #ifdef CONFIG_SOC_IMX1 const struct imx_imx_uart_3irq_data imx1_imx_uart_data[] __initconst = { #define imx1_imx_uart_data_entry(_id, _hwid) \ imx_imx_uart_3irq_data_entry(MX1, _id, _hwid, 0xd0) imx1_imx_uart_data_entry(0, 1), imx1_imx_uart_data_entry(1, 2), }; #endif /* ifdef CONFIG_SOC_IMX1 */ #ifdef CONFIG_SOC_IMX21 const struct imx_imx_uart_1irq_data imx21_imx_uart_data[] __initconst = { #define imx21_imx_uart_data_entry(_id, _hwid) \ imx_imx_uart_1irq_data_entry(MX21, _id, _hwid, SZ_4K) imx21_imx_uart_data_entry(0, 1), imx21_imx_uart_data_entry(1, 2), imx21_imx_uart_data_entry(2, 3), imx21_imx_uart_data_entry(3, 4), }; #endif #ifdef CONFIG_SOC_IMX25 const struct imx_imx_uart_1irq_data imx25_imx_uart_data[] __initconst = { #define imx25_imx_uart_data_entry(_id, _hwid) \ imx_imx_uart_1irq_data_entry(MX25, _id, _hwid, SZ_16K) imx25_imx_uart_data_entry(0, 1), imx25_imx_uart_data_entry(1, 2), imx25_imx_uart_data_entry(2, 3), imx25_imx_uart_data_entry(3, 4), imx25_imx_uart_data_entry(4, 5), }; #endif /* ifdef CONFIG_SOC_IMX25 */ #ifdef CONFIG_SOC_IMX27 const struct imx_imx_uart_1irq_data imx27_imx_uart_data[] __initconst = { #define imx27_imx_uart_data_entry(_id, _hwid) \ imx_imx_uart_1irq_data_entry(MX27, _id, _hwid, SZ_4K) imx27_imx_uart_data_entry(0, 1), imx27_imx_uart_data_entry(1, 2), imx27_imx_uart_data_entry(2, 3), imx27_imx_uart_data_entry(3, 4), imx27_imx_uart_data_entry(4, 5), imx27_imx_uart_data_entry(5, 6), }; #endif /* ifdef CONFIG_SOC_IMX27 */ #ifdef CONFIG_SOC_IMX31 const struct imx_imx_uart_1irq_data imx31_imx_uart_data[] __initconst = { #define imx31_imx_uart_data_entry(_id, _hwid) \ imx_imx_uart_1irq_data_entry(MX31, _id, _hwid, SZ_4K) imx31_imx_uart_data_entry(0, 1), imx31_imx_uart_data_entry(1, 2), imx31_imx_uart_data_entry(2, 3), imx31_imx_uart_data_entry(3, 4), imx31_imx_uart_data_entry(4, 5), }; #endif /* ifdef CONFIG_SOC_IMX31 */ #ifdef CONFIG_SOC_IMX35 const struct imx_imx_uart_1irq_data imx35_imx_uart_data[] __initconst = { #define imx35_imx_uart_data_entry(_id, _hwid) \ imx_imx_uart_1irq_data_entry(MX31, _id, _hwid, SZ_16K) imx35_imx_uart_data_entry(0, 1), imx35_imx_uart_data_entry(1, 2), imx35_imx_uart_data_entry(2, 3), }; #endif /* ifdef CONFIG_SOC_IMX35 */ #ifdef CONFIG_SOC_IMX50 const struct imx_imx_uart_1irq_data imx50_imx_uart_data[] __initconst = { #define imx50_imx_uart_data_entry(_id, _hwid) \ imx_imx_uart_1irq_data_entry(MX50, _id, _hwid, SZ_4K) imx50_imx_uart_data_entry(0, 1), imx50_imx_uart_data_entry(1, 2), imx50_imx_uart_data_entry(2, 3), imx50_imx_uart_data_entry(3, 4), imx50_imx_uart_data_entry(4, 5), }; #endif /* ifdef CONFIG_SOC_IMX50 */ #ifdef CONFIG_SOC_IMX51 const struct imx_imx_uart_1irq_data imx51_imx_uart_data[] __initconst = { #define imx51_imx_uart_data_entry(_id, _hwid) \ imx_imx_uart_1irq_data_entry(MX51, _id, _hwid, SZ_4K) imx51_imx_uart_data_entry(0, 1), imx51_imx_uart_data_entry(1, 2), imx51_imx_uart_data_entry(2, 3), }; #endif /* ifdef CONFIG_SOC_IMX51 */ #ifdef CONFIG_SOC_IMX53 const struct imx_imx_uart_1irq_data imx53_imx_uart_data[] __initconst = { #define imx53_imx_uart_data_entry(_id, _hwid) \ imx_imx_uart_1irq_data_entry(MX53, _id, _hwid, SZ_4K) imx53_imx_uart_data_entry(0, 1), imx53_imx_uart_data_entry(1, 2), imx53_imx_uart_data_entry(2, 3), }; #endif /* ifdef CONFIG_SOC_IMX53 */ struct platform_device *__init imx_add_imx_uart_3irq( const struct imx_imx_uart_3irq_data *data, const struct imxuart_platform_data *pdata) { struct resource res[] = { { .start = data->iobase, .end = data->iobase + data->iosize - 1, .flags = IORESOURCE_MEM, }, { .start = data->irqrx, .end = data->irqrx, .flags = IORESOURCE_IRQ, }, { .start = data->irqtx, .end = data->irqtx, .flags = IORESOURCE_IRQ, }, { .start = data->irqrts, .end = data->irqrx, .flags = IORESOURCE_IRQ, }, }; return imx_add_platform_device("imx-uart", data->id, res, ARRAY_SIZE(res), pdata, sizeof(*pdata)); } struct platform_device *__init imx_add_imx_uart_1irq( const struct imx_imx_uart_1irq_data *data, const struct imxuart_platform_data *pdata) { struct resource res[] = { { .start = data->iobase, .end = data->iobase + data->iosize - 1, .flags = IORESOURCE_MEM, }, { .start = data->irq, .end = data->irq, .flags = IORESOURCE_IRQ, }, }; return imx_add_platform_device("imx-uart", data->id, res, ARRAY_SIZE(res), pdata, sizeof(*pdata)); }
gpl-2.0
sub77-bkp/android_kernel_samsung_golden
arch/mips/mm/fault.c
2463
7658
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1995 - 2000 by Ralf Baechle */ #include <linux/signal.h> #include <linux/sched.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/types.h> #include <linux/ptrace.h> #include <linux/mman.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/module.h> #include <linux/kprobes.h> #include <linux/perf_event.h> #include <asm/branch.h> #include <asm/mmu_context.h> #include <asm/system.h> #include <asm/uaccess.h> #include <asm/ptrace.h> #include <asm/highmem.h> /* For VMALLOC_END */ #include <linux/kdebug.h> /* * This routine handles page faults. It determines the address, * and the problem, and then passes it off to one of the appropriate * routines. */ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, unsigned long write, unsigned long address) { struct vm_area_struct * vma = NULL; struct task_struct *tsk = current; struct mm_struct *mm = tsk->mm; const int field = sizeof(unsigned long) * 2; siginfo_t info; int fault; #if 0 printk("Cpu%d[%s:%d:%0*lx:%ld:%0*lx]\n", raw_smp_processor_id(), current->comm, current->pid, field, address, write, field, regs->cp0_epc); #endif #ifdef CONFIG_KPROBES /* * This is to notify the fault handler of the kprobes. The * exception code is redundant as it is also carried in REGS, * but we pass it anyhow. */ if (notify_die(DIE_PAGE_FAULT, "page fault", regs, -1, (regs->cp0_cause >> 2) & 0x1f, SIGSEGV) == NOTIFY_STOP) return; #endif info.si_code = SEGV_MAPERR; /* * We fault-in kernel-space virtual memory on-demand. The * 'reference' page table is init_mm.pgd. * * NOTE! We MUST NOT take any locks for this case. We may * be in an interrupt or a critical region, and should * only copy the information from the master page table, * nothing more. */ #ifdef CONFIG_64BIT # define VMALLOC_FAULT_TARGET no_context #else # define VMALLOC_FAULT_TARGET vmalloc_fault #endif if (unlikely(address >= VMALLOC_START && address <= VMALLOC_END)) goto VMALLOC_FAULT_TARGET; #ifdef MODULE_START if (unlikely(address >= MODULE_START && address < MODULE_END)) goto VMALLOC_FAULT_TARGET; #endif /* * If we're in an interrupt or have no user * context, we must not take the fault.. */ if (in_atomic() || !mm) goto bad_area_nosemaphore; down_read(&mm->mmap_sem); vma = find_vma(mm, address); if (!vma) goto bad_area; if (vma->vm_start <= address) goto good_area; if (!(vma->vm_flags & VM_GROWSDOWN)) goto bad_area; if (expand_stack(vma, address)) goto bad_area; /* * Ok, we have a good vm_area for this memory access, so * we can handle it.. */ good_area: info.si_code = SEGV_ACCERR; if (write) { if (!(vma->vm_flags & VM_WRITE)) goto bad_area; } else { if (kernel_uses_smartmips_rixi) { if (address == regs->cp0_epc && !(vma->vm_flags & VM_EXEC)) { #if 0 pr_notice("Cpu%d[%s:%d:%0*lx:%ld:%0*lx] XI violation\n", raw_smp_processor_id(), current->comm, current->pid, field, address, write, field, regs->cp0_epc); #endif goto bad_area; } if (!(vma->vm_flags & VM_READ)) { #if 0 pr_notice("Cpu%d[%s:%d:%0*lx:%ld:%0*lx] RI violation\n", raw_smp_processor_id(), current->comm, current->pid, field, address, write, field, regs->cp0_epc); #endif goto bad_area; } } else { if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))) goto bad_area; } } /* * If for any reason at all we couldn't handle the fault, * make sure we exit gracefully rather than endlessly redo * the fault. */ fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0); perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; else if (fault & VM_FAULT_SIGBUS) goto do_sigbus; BUG(); } if (fault & VM_FAULT_MAJOR) { perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, regs, address); tsk->maj_flt++; } else { perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, regs, address); tsk->min_flt++; } up_read(&mm->mmap_sem); return; /* * Something tried to access memory that isn't in our memory map.. * Fix it, but check if it's kernel or user first.. */ bad_area: up_read(&mm->mmap_sem); bad_area_nosemaphore: /* User mode accesses just cause a SIGSEGV */ if (user_mode(regs)) { tsk->thread.cp0_badvaddr = address; tsk->thread.error_code = write; #if 0 printk("do_page_fault() #2: sending SIGSEGV to %s for " "invalid %s\n%0*lx (epc == %0*lx, ra == %0*lx)\n", tsk->comm, write ? "write access to" : "read access from", field, address, field, (unsigned long) regs->cp0_epc, field, (unsigned long) regs->regs[31]); #endif info.si_signo = SIGSEGV; info.si_errno = 0; /* info.si_code has been set above */ info.si_addr = (void __user *) address; force_sig_info(SIGSEGV, &info, tsk); return; } no_context: /* Are we prepared to handle this kernel fault? */ if (fixup_exception(regs)) { current->thread.cp0_baduaddr = address; return; } /* * Oops. The kernel tried to access some bad page. We'll have to * terminate things with extreme prejudice. */ bust_spinlocks(1); printk(KERN_ALERT "CPU %d Unable to handle kernel paging request at " "virtual address %0*lx, epc == %0*lx, ra == %0*lx\n", raw_smp_processor_id(), field, address, field, regs->cp0_epc, field, regs->regs[31]); die("Oops", regs); out_of_memory: /* * We ran out of memory, call the OOM killer, and return the userspace * (which will retry the fault, or kill us if we got oom-killed). */ up_read(&mm->mmap_sem); pagefault_out_of_memory(); return; do_sigbus: up_read(&mm->mmap_sem); /* Kernel mode? Handle exceptions or die */ if (!user_mode(regs)) goto no_context; else /* * Send a sigbus, regardless of whether we were in kernel * or user mode. */ #if 0 printk("do_page_fault() #3: sending SIGBUS to %s for " "invalid %s\n%0*lx (epc == %0*lx, ra == %0*lx)\n", tsk->comm, write ? "write access to" : "read access from", field, address, field, (unsigned long) regs->cp0_epc, field, (unsigned long) regs->regs[31]); #endif tsk->thread.cp0_badvaddr = address; info.si_signo = SIGBUS; info.si_errno = 0; info.si_code = BUS_ADRERR; info.si_addr = (void __user *) address; force_sig_info(SIGBUS, &info, tsk); return; #ifndef CONFIG_64BIT vmalloc_fault: { /* * Synchronize this task's top level page-table * with the 'reference' page table. * * Do _not_ use "tsk" here. We might be inside * an interrupt in the middle of a task switch.. */ int offset = __pgd_offset(address); pgd_t *pgd, *pgd_k; pud_t *pud, *pud_k; pmd_t *pmd, *pmd_k; pte_t *pte_k; pgd = (pgd_t *) pgd_current[raw_smp_processor_id()] + offset; pgd_k = init_mm.pgd + offset; if (!pgd_present(*pgd_k)) goto no_context; set_pgd(pgd, *pgd_k); pud = pud_offset(pgd, address); pud_k = pud_offset(pgd_k, address); if (!pud_present(*pud_k)) goto no_context; pmd = pmd_offset(pud, address); pmd_k = pmd_offset(pud_k, address); if (!pmd_present(*pmd_k)) goto no_context; set_pmd(pmd, *pmd_k); pte_k = pte_offset_kernel(pmd_k, address); if (!pte_present(*pte_k)) goto no_context; return; } #endif }
gpl-2.0
MSM8939-Samsung/android_kernel_samsung_a7lte
fs/ext4/symlink.c
3231
1320
/* * linux/fs/ext4/symlink.c * * Only fast symlinks left here - the rest is done by generic code. AV, 1999 * * Copyright (C) 1992, 1993, 1994, 1995 * Remy Card (card@masi.ibp.fr) * Laboratoire MASI - Institut Blaise Pascal * Universite Pierre et Marie Curie (Paris VI) * * from * * linux/fs/minix/symlink.c * * Copyright (C) 1991, 1992 Linus Torvalds * * ext4 symlink handling code */ #include <linux/fs.h> #include <linux/jbd2.h> #include <linux/namei.h> #include "ext4.h" #include "xattr.h" static void *ext4_follow_link(struct dentry *dentry, struct nameidata *nd) { struct ext4_inode_info *ei = EXT4_I(dentry->d_inode); nd_set_link(nd, (char *) ei->i_data); return NULL; } const struct inode_operations ext4_symlink_inode_operations = { .readlink = generic_readlink, .follow_link = page_follow_link_light, .put_link = page_put_link, .setattr = ext4_setattr, .setxattr = generic_setxattr, .getxattr = generic_getxattr, .listxattr = ext4_listxattr, .removexattr = generic_removexattr, }; const struct inode_operations ext4_fast_symlink_inode_operations = { .readlink = generic_readlink, .follow_link = ext4_follow_link, .setattr = ext4_setattr, .setxattr = generic_setxattr, .getxattr = generic_getxattr, .listxattr = ext4_listxattr, .removexattr = generic_removexattr, };
gpl-2.0
chinghanyu/Cognet-RPi-linux
arch/ia64/sn/kernel/io_common.c
4511
15267
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2006 Silicon Graphics, Inc. All rights reserved. */ #include <linux/bootmem.h> #include <linux/export.h> #include <linux/slab.h> #include <asm/sn/types.h> #include <asm/sn/addrs.h> #include <asm/sn/sn_feature_sets.h> #include <asm/sn/geo.h> #include <asm/sn/io.h> #include <asm/sn/l1.h> #include <asm/sn/module.h> #include <asm/sn/pcibr_provider.h> #include <asm/sn/pcibus_provider_defs.h> #include <asm/sn/pcidev.h> #include <asm/sn/simulator.h> #include <asm/sn/sn_sal.h> #include <asm/sn/tioca_provider.h> #include <asm/sn/tioce_provider.h> #include "xtalk/hubdev.h" #include "xtalk/xwidgetdev.h" #include <linux/acpi.h> #include <asm/sn/sn2/sn_hwperf.h> #include <asm/sn/acpi.h> extern void sn_init_cpei_timer(void); extern void register_sn_procfs(void); extern void sn_io_acpi_init(void); extern void sn_io_init(void); static struct list_head sn_sysdata_list; /* sysdata list struct */ struct sysdata_el { struct list_head entry; void *sysdata; }; int sn_ioif_inited; /* SN I/O infrastructure initialized? */ int sn_acpi_rev; /* SN ACPI revision */ EXPORT_SYMBOL_GPL(sn_acpi_rev); struct sn_pcibus_provider *sn_pci_provider[PCIIO_ASIC_MAX_TYPES]; /* indexed by asic type */ /* * Hooks and struct for unsupported pci providers */ static dma_addr_t sn_default_pci_map(struct pci_dev *pdev, unsigned long paddr, size_t size, int type) { return 0; } static void sn_default_pci_unmap(struct pci_dev *pdev, dma_addr_t addr, int direction) { return; } static void * sn_default_pci_bus_fixup(struct pcibus_bussoft *soft, struct pci_controller *controller) { return NULL; } static struct sn_pcibus_provider sn_pci_default_provider = { .dma_map = sn_default_pci_map, .dma_map_consistent = sn_default_pci_map, .dma_unmap = sn_default_pci_unmap, .bus_fixup = sn_default_pci_bus_fixup, }; /* * Retrieve the DMA Flush List given nasid, widget, and device. * This list is needed to implement the WAR - Flush DMA data on PIO Reads. */ static inline u64 sal_get_device_dmaflush_list(u64 nasid, u64 widget_num, u64 device_num, u64 address) { struct ia64_sal_retval ret_stuff; ret_stuff.status = 0; ret_stuff.v0 = 0; SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_GET_DEVICE_DMAFLUSH_LIST, (u64) nasid, (u64) widget_num, (u64) device_num, (u64) address, 0, 0, 0); return ret_stuff.status; } /* * sn_pcidev_info_get() - Retrieve the pcidev_info struct for the specified * device. */ inline struct pcidev_info * sn_pcidev_info_get(struct pci_dev *dev) { struct pcidev_info *pcidev; list_for_each_entry(pcidev, &(SN_PLATFORM_DATA(dev)->pcidev_info), pdi_list) { if (pcidev->pdi_linux_pcidev == dev) return pcidev; } return NULL; } /* Older PROM flush WAR * * 01/16/06 -- This war will be in place until a new official PROM is released. * Additionally note that the struct sn_flush_device_war also has to be * removed from arch/ia64/sn/include/xtalk/hubdev.h */ static s64 sn_device_fixup_war(u64 nasid, u64 widget, int device, struct sn_flush_device_common *common) { struct sn_flush_device_war *war_list; struct sn_flush_device_war *dev_entry; struct ia64_sal_retval isrv = {0,0,0,0}; printk_once(KERN_WARNING "PROM version < 4.50 -- implementing old PROM flush WAR\n"); war_list = kzalloc(DEV_PER_WIDGET * sizeof(*war_list), GFP_KERNEL); BUG_ON(!war_list); SAL_CALL_NOLOCK(isrv, SN_SAL_IOIF_GET_WIDGET_DMAFLUSH_LIST, nasid, widget, __pa(war_list), 0, 0, 0 ,0); if (isrv.status) panic("sn_device_fixup_war failed: %s\n", ia64_sal_strerror(isrv.status)); dev_entry = war_list + device; memcpy(common,dev_entry, sizeof(*common)); kfree(war_list); return isrv.status; } /* * sn_common_hubdev_init() - This routine is called to initialize the HUB data * structure for each node in the system. */ void __init sn_common_hubdev_init(struct hubdev_info *hubdev) { struct sn_flush_device_kernel *sn_flush_device_kernel; struct sn_flush_device_kernel *dev_entry; s64 status; int widget, device, size; /* Attach the error interrupt handlers */ if (hubdev->hdi_nasid & 1) /* If TIO */ ice_error_init(hubdev); else hub_error_init(hubdev); for (widget = 0; widget <= HUB_WIDGET_ID_MAX; widget++) hubdev->hdi_xwidget_info[widget].xwi_hubinfo = hubdev; if (!hubdev->hdi_flush_nasid_list.widget_p) return; size = (HUB_WIDGET_ID_MAX + 1) * sizeof(struct sn_flush_device_kernel *); hubdev->hdi_flush_nasid_list.widget_p = kzalloc(size, GFP_KERNEL); BUG_ON(!hubdev->hdi_flush_nasid_list.widget_p); for (widget = 0; widget <= HUB_WIDGET_ID_MAX; widget++) { size = DEV_PER_WIDGET * sizeof(struct sn_flush_device_kernel); sn_flush_device_kernel = kzalloc(size, GFP_KERNEL); BUG_ON(!sn_flush_device_kernel); dev_entry = sn_flush_device_kernel; for (device = 0; device < DEV_PER_WIDGET; device++, dev_entry++) { size = sizeof(struct sn_flush_device_common); dev_entry->common = kzalloc(size, GFP_KERNEL); BUG_ON(!dev_entry->common); if (sn_prom_feature_available(PRF_DEVICE_FLUSH_LIST)) status = sal_get_device_dmaflush_list( hubdev->hdi_nasid, widget, device, (u64)(dev_entry->common)); else status = sn_device_fixup_war(hubdev->hdi_nasid, widget, device, dev_entry->common); if (status != SALRET_OK) panic("SAL call failed: %s\n", ia64_sal_strerror(status)); spin_lock_init(&dev_entry->sfdl_flush_lock); } if (sn_flush_device_kernel) hubdev->hdi_flush_nasid_list.widget_p[widget] = sn_flush_device_kernel; } } void sn_pci_unfixup_slot(struct pci_dev *dev) { struct pci_dev *host_pci_dev = SN_PCIDEV_INFO(dev)->host_pci_dev; sn_irq_unfixup(dev); pci_dev_put(host_pci_dev); pci_dev_put(dev); } /* * sn_pci_fixup_slot() */ void sn_pci_fixup_slot(struct pci_dev *dev, struct pcidev_info *pcidev_info, struct sn_irq_info *sn_irq_info) { int segment = pci_domain_nr(dev->bus); struct pcibus_bussoft *bs; struct pci_dev *host_pci_dev; unsigned int bus_no, devfn; pci_dev_get(dev); /* for the sysdata pointer */ /* Add pcidev_info to list in pci_controller.platform_data */ list_add_tail(&pcidev_info->pdi_list, &(SN_PLATFORM_DATA(dev->bus)->pcidev_info)); /* * Using the PROMs values for the PCI host bus, get the Linux * PCI host_pci_dev struct and set up host bus linkages */ bus_no = (pcidev_info->pdi_slot_host_handle >> 32) & 0xff; devfn = pcidev_info->pdi_slot_host_handle & 0xffffffff; host_pci_dev = pci_get_domain_bus_and_slot(segment, bus_no, devfn); pcidev_info->host_pci_dev = host_pci_dev; pcidev_info->pdi_linux_pcidev = dev; pcidev_info->pdi_host_pcidev_info = SN_PCIDEV_INFO(host_pci_dev); bs = SN_PCIBUS_BUSSOFT(dev->bus); pcidev_info->pdi_pcibus_info = bs; if (bs && bs->bs_asic_type < PCIIO_ASIC_MAX_TYPES) { SN_PCIDEV_BUSPROVIDER(dev) = sn_pci_provider[bs->bs_asic_type]; } else { SN_PCIDEV_BUSPROVIDER(dev) = &sn_pci_default_provider; } /* Only set up IRQ stuff if this device has a host bus context */ if (bs && sn_irq_info->irq_irq) { pcidev_info->pdi_sn_irq_info = sn_irq_info; dev->irq = pcidev_info->pdi_sn_irq_info->irq_irq; sn_irq_fixup(dev, sn_irq_info); } else { pcidev_info->pdi_sn_irq_info = NULL; kfree(sn_irq_info); } } /* * sn_common_bus_fixup - Perform platform specific bus fixup. * Execute the ASIC specific fixup routine * for this bus. */ void sn_common_bus_fixup(struct pci_bus *bus, struct pcibus_bussoft *prom_bussoft_ptr) { int cnode; struct pci_controller *controller; struct hubdev_info *hubdev_info; int nasid; void *provider_soft; struct sn_pcibus_provider *provider; struct sn_platform_data *sn_platform_data; controller = PCI_CONTROLLER(bus); /* * Per-provider fixup. Copies the bus soft structure from prom * to local area and links SN_PCIBUS_BUSSOFT(). */ if (prom_bussoft_ptr->bs_asic_type >= PCIIO_ASIC_MAX_TYPES) { printk(KERN_WARNING "sn_common_bus_fixup: Unsupported asic type, %d", prom_bussoft_ptr->bs_asic_type); return; } if (prom_bussoft_ptr->bs_asic_type == PCIIO_ASIC_TYPE_PPB) return; /* no further fixup necessary */ provider = sn_pci_provider[prom_bussoft_ptr->bs_asic_type]; if (provider == NULL) panic("sn_common_bus_fixup: No provider registered for this asic type, %d", prom_bussoft_ptr->bs_asic_type); if (provider->bus_fixup) provider_soft = (*provider->bus_fixup) (prom_bussoft_ptr, controller); else provider_soft = NULL; /* * Generic bus fixup goes here. Don't reference prom_bussoft_ptr * after this point. */ controller->platform_data = kzalloc(sizeof(struct sn_platform_data), GFP_KERNEL); BUG_ON(controller->platform_data == NULL); sn_platform_data = (struct sn_platform_data *) controller->platform_data; sn_platform_data->provider_soft = provider_soft; INIT_LIST_HEAD(&((struct sn_platform_data *) controller->platform_data)->pcidev_info); nasid = NASID_GET(SN_PCIBUS_BUSSOFT(bus)->bs_base); cnode = nasid_to_cnodeid(nasid); hubdev_info = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo); SN_PCIBUS_BUSSOFT(bus)->bs_xwidget_info = &(hubdev_info->hdi_xwidget_info[SN_PCIBUS_BUSSOFT(bus)->bs_xid]); /* * If the node information we obtained during the fixup phase is * invalid then set controller->node to -1 (undetermined) */ if (controller->node >= num_online_nodes()) { struct pcibus_bussoft *b = SN_PCIBUS_BUSSOFT(bus); printk(KERN_WARNING "Device ASIC=%u XID=%u PBUSNUM=%u " "L_IO=%llx L_MEM=%llx BASE=%llx\n", b->bs_asic_type, b->bs_xid, b->bs_persist_busnum, b->bs_legacy_io, b->bs_legacy_mem, b->bs_base); printk(KERN_WARNING "on node %d but only %d nodes online." "Association set to undetermined.\n", controller->node, num_online_nodes()); controller->node = -1; } } void sn_bus_store_sysdata(struct pci_dev *dev) { struct sysdata_el *element; element = kzalloc(sizeof(struct sysdata_el), GFP_KERNEL); if (!element) { dev_dbg(&dev->dev, "%s: out of memory!\n", __func__); return; } element->sysdata = SN_PCIDEV_INFO(dev); list_add(&element->entry, &sn_sysdata_list); } void sn_bus_free_sysdata(void) { struct sysdata_el *element; struct list_head *list, *safe; list_for_each_safe(list, safe, &sn_sysdata_list) { element = list_entry(list, struct sysdata_el, entry); list_del(&element->entry); list_del(&(((struct pcidev_info *) (element->sysdata))->pdi_list)); kfree(element->sysdata); kfree(element); } return; } /* * hubdev_init_node() - Creates the HUB data structure and link them to it's * own NODE specific data area. */ void __init hubdev_init_node(nodepda_t * npda, cnodeid_t node) { struct hubdev_info *hubdev_info; int size; pg_data_t *pg; size = sizeof(struct hubdev_info); if (node >= num_online_nodes()) /* Headless/memless IO nodes */ pg = NODE_DATA(0); else pg = NODE_DATA(node); hubdev_info = (struct hubdev_info *)alloc_bootmem_node(pg, size); npda->pdinfo = (void *)hubdev_info; } geoid_t cnodeid_get_geoid(cnodeid_t cnode) { struct hubdev_info *hubdev; hubdev = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo); return hubdev->hdi_geoid; } void sn_generate_path(struct pci_bus *pci_bus, char *address) { nasid_t nasid; cnodeid_t cnode; geoid_t geoid; moduleid_t moduleid; u16 bricktype; nasid = NASID_GET(SN_PCIBUS_BUSSOFT(pci_bus)->bs_base); cnode = nasid_to_cnodeid(nasid); geoid = cnodeid_get_geoid(cnode); moduleid = geo_module(geoid); sprintf(address, "module_%c%c%c%c%.2d", '0'+RACK_GET_CLASS(MODULE_GET_RACK(moduleid)), '0'+RACK_GET_GROUP(MODULE_GET_RACK(moduleid)), '0'+RACK_GET_NUM(MODULE_GET_RACK(moduleid)), MODULE_GET_BTCHAR(moduleid), MODULE_GET_BPOS(moduleid)); /* Tollhouse requires slot id to be displayed */ bricktype = MODULE_GET_BTYPE(moduleid); if ((bricktype == L1_BRICKTYPE_191010) || (bricktype == L1_BRICKTYPE_1932)) sprintf(address + strlen(address), "^%d", geo_slot(geoid)); } void sn_pci_fixup_bus(struct pci_bus *bus) { if (SN_ACPI_BASE_SUPPORT()) sn_acpi_bus_fixup(bus); else sn_bus_fixup(bus); } /* * sn_io_early_init - Perform early IO (and some non-IO) initialization. * In particular, setup the sn_pci_provider[] array. * This needs to be done prior to any bus scanning * (acpi_scan_init()) in the ACPI case, as the SN * bus fixup code will reference the array. */ static int __init sn_io_early_init(void) { int i; if (!ia64_platform_is("sn2") || IS_RUNNING_ON_FAKE_PROM()) return 0; /* we set the acpi revision to that of the DSDT table OEM rev. */ { struct acpi_table_header *header = NULL; acpi_get_table(ACPI_SIG_DSDT, 1, &header); BUG_ON(header == NULL); sn_acpi_rev = header->oem_revision; } /* * prime sn_pci_provider[]. Individual provider init routines will * override their respective default entries. */ for (i = 0; i < PCIIO_ASIC_MAX_TYPES; i++) sn_pci_provider[i] = &sn_pci_default_provider; pcibr_init_provider(); tioca_init_provider(); tioce_init_provider(); /* * This is needed to avoid bounce limit checks in the blk layer */ ia64_max_iommu_merge_mask = ~PAGE_MASK; sn_irq_lh_init(); INIT_LIST_HEAD(&sn_sysdata_list); sn_init_cpei_timer(); #ifdef CONFIG_PROC_FS register_sn_procfs(); #endif { struct acpi_table_header *header; (void)acpi_get_table(ACPI_SIG_DSDT, 1, &header); printk(KERN_INFO "ACPI DSDT OEM Rev 0x%x\n", header->oem_revision); } if (SN_ACPI_BASE_SUPPORT()) sn_io_acpi_init(); else sn_io_init(); return 0; } arch_initcall(sn_io_early_init); /* * sn_io_late_init() - Perform any final platform specific IO initialization. */ int __init sn_io_late_init(void) { struct pci_bus *bus; struct pcibus_bussoft *bussoft; cnodeid_t cnode; nasid_t nasid; cnodeid_t near_cnode; if (!ia64_platform_is("sn2") || IS_RUNNING_ON_FAKE_PROM()) return 0; /* * Setup closest node in pci_controller->node for * PIC, TIOCP, TIOCE (TIOCA does it during bus fixup using * info from the PROM). */ bus = NULL; while ((bus = pci_find_next_bus(bus)) != NULL) { bussoft = SN_PCIBUS_BUSSOFT(bus); nasid = NASID_GET(bussoft->bs_base); cnode = nasid_to_cnodeid(nasid); if ((bussoft->bs_asic_type == PCIIO_ASIC_TYPE_TIOCP) || (bussoft->bs_asic_type == PCIIO_ASIC_TYPE_TIOCE) || (bussoft->bs_asic_type == PCIIO_ASIC_TYPE_PIC)) { /* PCI Bridge: find nearest node with CPUs */ int e = sn_hwperf_get_nearest_node(cnode, NULL, &near_cnode); if (e < 0) { near_cnode = (cnodeid_t)-1; /* use any node */ printk(KERN_WARNING "sn_io_late_init: failed " "to find near node with CPUs for " "node %d, err=%d\n", cnode, e); } PCI_CONTROLLER(bus)->node = near_cnode; } } sn_ioif_inited = 1; /* SN I/O infrastructure now initialized */ return 0; } fs_initcall(sn_io_late_init); EXPORT_SYMBOL(sn_pci_unfixup_slot); EXPORT_SYMBOL(sn_bus_store_sysdata); EXPORT_SYMBOL(sn_bus_free_sysdata); EXPORT_SYMBOL(sn_generate_path);
gpl-2.0
magetron/linux
drivers/connector/cn_queue.c
4767
3931
/* * cn_queue.c * * 2004+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net> * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/list.h> #include <linux/workqueue.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/skbuff.h> #include <linux/suspend.h> #include <linux/connector.h> #include <linux/delay.h> static struct cn_callback_entry * cn_queue_alloc_callback_entry(struct cn_queue_dev *dev, const char *name, struct cb_id *id, void (*callback)(struct cn_msg *, struct netlink_skb_parms *)) { struct cn_callback_entry *cbq; cbq = kzalloc(sizeof(*cbq), GFP_KERNEL); if (!cbq) { pr_err("Failed to create new callback queue.\n"); return NULL; } atomic_set(&cbq->refcnt, 1); atomic_inc(&dev->refcnt); cbq->pdev = dev; snprintf(cbq->id.name, sizeof(cbq->id.name), "%s", name); memcpy(&cbq->id.id, id, sizeof(struct cb_id)); cbq->callback = callback; return cbq; } void cn_queue_release_callback(struct cn_callback_entry *cbq) { if (!atomic_dec_and_test(&cbq->refcnt)) return; atomic_dec(&cbq->pdev->refcnt); kfree(cbq); } int cn_cb_equal(struct cb_id *i1, struct cb_id *i2) { return ((i1->idx == i2->idx) && (i1->val == i2->val)); } int cn_queue_add_callback(struct cn_queue_dev *dev, const char *name, struct cb_id *id, void (*callback)(struct cn_msg *, struct netlink_skb_parms *)) { struct cn_callback_entry *cbq, *__cbq; int found = 0; cbq = cn_queue_alloc_callback_entry(dev, name, id, callback); if (!cbq) return -ENOMEM; spin_lock_bh(&dev->queue_lock); list_for_each_entry(__cbq, &dev->queue_list, callback_entry) { if (cn_cb_equal(&__cbq->id.id, id)) { found = 1; break; } } if (!found) list_add_tail(&cbq->callback_entry, &dev->queue_list); spin_unlock_bh(&dev->queue_lock); if (found) { cn_queue_release_callback(cbq); return -EINVAL; } cbq->seq = 0; cbq->group = cbq->id.id.idx; return 0; } void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id) { struct cn_callback_entry *cbq, *n; int found = 0; spin_lock_bh(&dev->queue_lock); list_for_each_entry_safe(cbq, n, &dev->queue_list, callback_entry) { if (cn_cb_equal(&cbq->id.id, id)) { list_del(&cbq->callback_entry); found = 1; break; } } spin_unlock_bh(&dev->queue_lock); if (found) cn_queue_release_callback(cbq); } struct cn_queue_dev *cn_queue_alloc_dev(const char *name, struct sock *nls) { struct cn_queue_dev *dev; dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) return NULL; snprintf(dev->name, sizeof(dev->name), "%s", name); atomic_set(&dev->refcnt, 0); INIT_LIST_HEAD(&dev->queue_list); spin_lock_init(&dev->queue_lock); dev->nls = nls; return dev; } void cn_queue_free_dev(struct cn_queue_dev *dev) { struct cn_callback_entry *cbq, *n; spin_lock_bh(&dev->queue_lock); list_for_each_entry_safe(cbq, n, &dev->queue_list, callback_entry) list_del(&cbq->callback_entry); spin_unlock_bh(&dev->queue_lock); while (atomic_read(&dev->refcnt)) { pr_info("Waiting for %s to become free: refcnt=%d.\n", dev->name, atomic_read(&dev->refcnt)); msleep(1000); } kfree(dev); dev = NULL; }
gpl-2.0
shanzin/M7_Lollipop_Kernel
arch/arm/mach-msm/board-mahimahi-smb329.c
5023
4070
/* drivers/i2c/chips/smb329.c * * SMB329B Switch Charger (SUMMIT Microelectronics) * * Copyright (C) 2009 HTC Corporation * Author: Justin Lin <Justin_Lin@htc.com> * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/i2c.h> #include <linux/delay.h> #include <linux/workqueue.h> #include <linux/mutex.h> #include <asm/atomic.h> #include "board-mahimahi-smb329.h" static struct smb329_data { struct i2c_client *client; uint8_t version; struct work_struct work; struct mutex state_lock; int chg_state; } smb329; static int smb329_i2c_write(uint8_t *value, uint8_t reg, uint8_t num_bytes) { int ret; struct i2c_msg msg; /* write the first byte of buffer as the register address */ value[0] = reg; msg.addr = smb329.client->addr; msg.len = num_bytes + 1; msg.flags = 0; msg.buf = value; ret = i2c_transfer(smb329.client->adapter, &msg, 1); return (ret >= 0) ? 0 : ret; } static int smb329_i2c_read(uint8_t *value, uint8_t reg, uint8_t num_bytes) { int ret; struct i2c_msg msg[2]; /* setup the address to read */ msg[0].addr = smb329.client->addr; msg[0].len = 1; msg[0].flags = 0; msg[0].buf = &reg; /* setup the read buffer */ msg[1].addr = smb329.client->addr; msg[1].flags = I2C_M_RD; msg[1].len = num_bytes; msg[1].buf = value; ret = i2c_transfer(smb329.client->adapter, msg, 2); return (ret >= 0) ? 0 : ret; } static int smb329_i2c_write_byte(uint8_t value, uint8_t reg) { int ret; uint8_t buf[2] = { 0 }; buf[1] = value; ret = smb329_i2c_write(buf, reg, 1); if (ret) pr_err("smb329: write byte error (%d)\n", ret); return ret; } static int smb329_i2c_read_byte(uint8_t *value, uint8_t reg) { int ret = smb329_i2c_read(value, reg, 1); if (ret) pr_err("smb329: read byte error (%d)\n", ret); return ret; } int smb329_set_charger_ctrl(uint32_t ctl) { mutex_lock(&smb329.state_lock); smb329.chg_state = ctl; schedule_work(&smb329.work); mutex_unlock(&smb329.state_lock); return 0; } static void smb329_work_func(struct work_struct *work) { mutex_lock(&smb329.state_lock); switch (smb329.chg_state) { case SMB329_ENABLE_FAST_CHG: pr_info("smb329: charger on (fast)\n"); smb329_i2c_write_byte(0x84, 0x31); smb329_i2c_write_byte(0x08, 0x05); if ((smb329.version & 0x18) == 0x0) smb329_i2c_write_byte(0xA9, 0x00); break; case SMB329_DISABLE_CHG: case SMB329_ENABLE_SLOW_CHG: pr_info("smb329: charger off/slow\n"); smb329_i2c_write_byte(0x88, 0x31); smb329_i2c_write_byte(0x08, 0x05); break; default: pr_err("smb329: unknown charger state %d\n", smb329.chg_state); } mutex_unlock(&smb329.state_lock); } static int smb329_probe(struct i2c_client *client, const struct i2c_device_id *id) { if (i2c_check_functionality(client->adapter, I2C_FUNC_I2C) == 0) { dev_dbg(&client->dev, "[SMB329]:I2C fail\n"); return -EIO; } smb329.client = client; mutex_init(&smb329.state_lock); INIT_WORK(&smb329.work, smb329_work_func); smb329_i2c_read_byte(&smb329.version, 0x3B); pr_info("smb329 version: 0x%02x\n", smb329.version); return 0; } static const struct i2c_device_id smb329_id[] = { { "smb329", 0 }, { }, }; static struct i2c_driver smb329_driver = { .driver.name = "smb329", .id_table = smb329_id, .probe = smb329_probe, }; static int __init smb329_init(void) { int ret = i2c_add_driver(&smb329_driver); if (ret) pr_err("smb329_init: failed\n"); return ret; } module_init(smb329_init); MODULE_AUTHOR("Justin Lin <Justin_Lin@htc.com>"); MODULE_DESCRIPTION("SUMMIT Microelectronics SMB329B switch charger"); MODULE_LICENSE("GPL");
gpl-2.0