repo_name
string
path
string
copies
string
size
string
content
string
license
string
playfulgod/kernel_lge_msm-3.0
drivers/media/dvb/frontends/dibx000_common.c
590
12138
#include <linux/i2c.h> #include "dibx000_common.h" static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "turn on debugging (default: 0)"); #define dprintk(args...) do { if (debug) { printk(KERN_DEBUG "DiBX000: "); printk(args); printk("\n"); } } while (0) static int dibx000_write_word(struct dibx000_i2c_master *mst, u16 reg, u16 val) { mst->i2c_write_buffer[0] = (reg >> 8) & 0xff; mst->i2c_write_buffer[1] = reg & 0xff; mst->i2c_write_buffer[2] = (val >> 8) & 0xff; mst->i2c_write_buffer[3] = val & 0xff; memset(mst->msg, 0, sizeof(struct i2c_msg)); mst->msg[0].addr = mst->i2c_addr; mst->msg[0].flags = 0; mst->msg[0].buf = mst->i2c_write_buffer; mst->msg[0].len = 4; return i2c_transfer(mst->i2c_adap, mst->msg, 1) != 1 ? -EREMOTEIO : 0; } static u16 dibx000_read_word(struct dibx000_i2c_master *mst, u16 reg) { mst->i2c_write_buffer[0] = reg >> 8; mst->i2c_write_buffer[1] = reg & 0xff; memset(mst->msg, 0, 2 * sizeof(struct i2c_msg)); mst->msg[0].addr = mst->i2c_addr; mst->msg[0].flags = 0; mst->msg[0].buf = mst->i2c_write_buffer; mst->msg[0].len = 2; mst->msg[1].addr = mst->i2c_addr; mst->msg[1].flags = I2C_M_RD; mst->msg[1].buf = mst->i2c_read_buffer; mst->msg[1].len = 2; if (i2c_transfer(mst->i2c_adap, mst->msg, 2) != 2) dprintk("i2c read error on %d", reg); return (mst->i2c_read_buffer[0] << 8) | mst->i2c_read_buffer[1]; } static int dibx000_is_i2c_done(struct dibx000_i2c_master *mst) { int i = 100; u16 status; while (((status = dibx000_read_word(mst, mst->base_reg + 2)) & 0x0100) == 0 && --i > 0) ; /* i2c timed out */ if (i == 0) return -EREMOTEIO; /* no acknowledge */ if ((status & 0x0080) == 0) return -EREMOTEIO; return 0; } static int dibx000_master_i2c_write(struct dibx000_i2c_master *mst, struct i2c_msg *msg, u8 stop) { u16 data; u16 da; u16 i; u16 txlen = msg->len, len; const u8 *b = msg->buf; while (txlen) { dibx000_read_word(mst, mst->base_reg + 2); len = txlen > 8 ? 8 : txlen; for (i = 0; i < len; i += 2) { data = *b++ << 8; if (i+1 < len) data |= *b++; dibx000_write_word(mst, mst->base_reg, data); } da = (((u8) (msg->addr)) << 9) | (1 << 8) | (1 << 7) | (0 << 6) | (0 << 5) | ((len & 0x7) << 2) | (0 << 1) | (0 << 0); if (txlen == msg->len) da |= 1 << 5; /* start */ if (txlen-len == 0 && stop) da |= 1 << 6; /* stop */ dibx000_write_word(mst, mst->base_reg+1, da); if (dibx000_is_i2c_done(mst) != 0) return -EREMOTEIO; txlen -= len; } return 0; } static int dibx000_master_i2c_read(struct dibx000_i2c_master *mst, struct i2c_msg *msg) { u16 da; u8 *b = msg->buf; u16 rxlen = msg->len, len; while (rxlen) { len = rxlen > 8 ? 8 : rxlen; da = (((u8) (msg->addr)) << 9) | (1 << 8) | (1 << 7) | (0 << 6) | (0 << 5) | ((len & 0x7) << 2) | (1 << 1) | (0 << 0); if (rxlen == msg->len) da |= 1 << 5; /* start */ if (rxlen-len == 0) da |= 1 << 6; /* stop */ dibx000_write_word(mst, mst->base_reg+1, da); if (dibx000_is_i2c_done(mst) != 0) return -EREMOTEIO; rxlen -= len; while (len) { da = dibx000_read_word(mst, mst->base_reg); *b++ = (da >> 8) & 0xff; len--; if (len >= 1) { *b++ = da & 0xff; len--; } } } return 0; } int dibx000_i2c_set_speed(struct i2c_adapter *i2c_adap, u16 speed) { struct dibx000_i2c_master *mst = i2c_get_adapdata(i2c_adap); if (mst->device_rev < DIB7000MC && speed < 235) speed = 235; return dibx000_write_word(mst, mst->base_reg + 3, (u16)(60000 / speed)); } EXPORT_SYMBOL(dibx000_i2c_set_speed); static u32 dibx000_i2c_func(struct i2c_adapter *adapter) { return I2C_FUNC_I2C; } static int dibx000_i2c_select_interface(struct dibx000_i2c_master *mst, enum dibx000_i2c_interface intf) { if (mst->device_rev > DIB3000MC && mst->selected_interface != intf) { dprintk("selecting interface: %d", intf); mst->selected_interface = intf; return dibx000_write_word(mst, mst->base_reg + 4, intf); } return 0; } static int dibx000_i2c_master_xfer_gpio12(struct i2c_adapter *i2c_adap, struct i2c_msg msg[], int num) { struct dibx000_i2c_master *mst = i2c_get_adapdata(i2c_adap); int msg_index; int ret = 0; dibx000_i2c_select_interface(mst, DIBX000_I2C_INTERFACE_GPIO_1_2); for (msg_index = 0; msg_index < num; msg_index++) { if (msg[msg_index].flags & I2C_M_RD) { ret = dibx000_master_i2c_read(mst, &msg[msg_index]); if (ret != 0) return 0; } else { ret = dibx000_master_i2c_write(mst, &msg[msg_index], 1); if (ret != 0) return 0; } } return num; } static int dibx000_i2c_master_xfer_gpio34(struct i2c_adapter *i2c_adap, struct i2c_msg msg[], int num) { struct dibx000_i2c_master *mst = i2c_get_adapdata(i2c_adap); int msg_index; int ret = 0; dibx000_i2c_select_interface(mst, DIBX000_I2C_INTERFACE_GPIO_3_4); for (msg_index = 0; msg_index < num; msg_index++) { if (msg[msg_index].flags & I2C_M_RD) { ret = dibx000_master_i2c_read(mst, &msg[msg_index]); if (ret != 0) return 0; } else { ret = dibx000_master_i2c_write(mst, &msg[msg_index], 1); if (ret != 0) return 0; } } return num; } static struct i2c_algorithm dibx000_i2c_master_gpio12_xfer_algo = { .master_xfer = dibx000_i2c_master_xfer_gpio12, .functionality = dibx000_i2c_func, }; static struct i2c_algorithm dibx000_i2c_master_gpio34_xfer_algo = { .master_xfer = dibx000_i2c_master_xfer_gpio34, .functionality = dibx000_i2c_func, }; static int dibx000_i2c_gate_ctrl(struct dibx000_i2c_master *mst, u8 tx[4], u8 addr, int onoff) { u16 val; if (onoff) val = addr << 8; // bit 7 = use master or not, if 0, the gate is open else val = 1 << 7; if (mst->device_rev > DIB7000) val <<= 1; tx[0] = (((mst->base_reg + 1) >> 8) & 0xff); tx[1] = ((mst->base_reg + 1) & 0xff); tx[2] = val >> 8; tx[3] = val & 0xff; return 0; } static int dibx000_i2c_gated_gpio67_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg msg[], int num) { struct dibx000_i2c_master *mst = i2c_get_adapdata(i2c_adap); if (num > 32) { dprintk("%s: too much I2C message to be transmitted (%i).\ Maximum is 32", __func__, num); return -ENOMEM; } memset(mst->msg, 0, sizeof(struct i2c_msg) * (2 + num)); dibx000_i2c_select_interface(mst, DIBX000_I2C_INTERFACE_GPIO_6_7); /* open the gate */ dibx000_i2c_gate_ctrl(mst, &mst->i2c_write_buffer[0], msg[0].addr, 1); mst->msg[0].addr = mst->i2c_addr; mst->msg[0].buf = &mst->i2c_write_buffer[0]; mst->msg[0].len = 4; memcpy(&mst->msg[1], msg, sizeof(struct i2c_msg) * num); /* close the gate */ dibx000_i2c_gate_ctrl(mst, &mst->i2c_write_buffer[4], 0, 0); mst->msg[num + 1].addr = mst->i2c_addr; mst->msg[num + 1].buf = &mst->i2c_write_buffer[4]; mst->msg[num + 1].len = 4; return i2c_transfer(mst->i2c_adap, mst->msg, 2 + num) == 2 + num ? num : -EIO; } static struct i2c_algorithm dibx000_i2c_gated_gpio67_algo = { .master_xfer = dibx000_i2c_gated_gpio67_xfer, .functionality = dibx000_i2c_func, }; static int dibx000_i2c_gated_tuner_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg msg[], int num) { struct dibx000_i2c_master *mst = i2c_get_adapdata(i2c_adap); if (num > 32) { dprintk("%s: too much I2C message to be transmitted (%i).\ Maximum is 32", __func__, num); return -ENOMEM; } memset(mst->msg, 0, sizeof(struct i2c_msg) * (2 + num)); dibx000_i2c_select_interface(mst, DIBX000_I2C_INTERFACE_TUNER); /* open the gate */ dibx000_i2c_gate_ctrl(mst, &mst->i2c_write_buffer[0], msg[0].addr, 1); mst->msg[0].addr = mst->i2c_addr; mst->msg[0].buf = &mst->i2c_write_buffer[0]; mst->msg[0].len = 4; memcpy(&mst->msg[1], msg, sizeof(struct i2c_msg) * num); /* close the gate */ dibx000_i2c_gate_ctrl(mst, &mst->i2c_write_buffer[4], 0, 0); mst->msg[num + 1].addr = mst->i2c_addr; mst->msg[num + 1].buf = &mst->i2c_write_buffer[4]; mst->msg[num + 1].len = 4; return i2c_transfer(mst->i2c_adap, mst->msg, 2 + num) == 2 + num ? num : -EIO; } static struct i2c_algorithm dibx000_i2c_gated_tuner_algo = { .master_xfer = dibx000_i2c_gated_tuner_xfer, .functionality = dibx000_i2c_func, }; struct i2c_adapter *dibx000_get_i2c_adapter(struct dibx000_i2c_master *mst, enum dibx000_i2c_interface intf, int gating) { struct i2c_adapter *i2c = NULL; switch (intf) { case DIBX000_I2C_INTERFACE_TUNER: if (gating) i2c = &mst->gated_tuner_i2c_adap; break; case DIBX000_I2C_INTERFACE_GPIO_1_2: if (!gating) i2c = &mst->master_i2c_adap_gpio12; break; case DIBX000_I2C_INTERFACE_GPIO_3_4: if (!gating) i2c = &mst->master_i2c_adap_gpio34; break; case DIBX000_I2C_INTERFACE_GPIO_6_7: if (gating) i2c = &mst->master_i2c_adap_gpio67; break; default: printk(KERN_ERR "DiBX000: incorrect I2C interface selected\n"); break; } return i2c; } EXPORT_SYMBOL(dibx000_get_i2c_adapter); void dibx000_reset_i2c_master(struct dibx000_i2c_master *mst) { /* initialize the i2c-master by closing the gate */ u8 tx[4]; struct i2c_msg m = {.addr = mst->i2c_addr,.buf = tx,.len = 4 }; dibx000_i2c_gate_ctrl(mst, tx, 0, 0); i2c_transfer(mst->i2c_adap, &m, 1); mst->selected_interface = 0xff; // the first time force a select of the I2C dibx000_i2c_select_interface(mst, DIBX000_I2C_INTERFACE_TUNER); } EXPORT_SYMBOL(dibx000_reset_i2c_master); static int i2c_adapter_init(struct i2c_adapter *i2c_adap, struct i2c_algorithm *algo, const char *name, struct dibx000_i2c_master *mst) { strncpy(i2c_adap->name, name, sizeof(i2c_adap->name)); i2c_adap->algo = algo; i2c_adap->algo_data = NULL; i2c_set_adapdata(i2c_adap, mst); if (i2c_add_adapter(i2c_adap) < 0) return -ENODEV; return 0; } int dibx000_init_i2c_master(struct dibx000_i2c_master *mst, u16 device_rev, struct i2c_adapter *i2c_adap, u8 i2c_addr) { u8 tx[4]; struct i2c_msg m = {.addr = i2c_addr >> 1,.buf = tx,.len = 4 }; mst->device_rev = device_rev; mst->i2c_adap = i2c_adap; mst->i2c_addr = i2c_addr >> 1; if (device_rev == DIB7000P || device_rev == DIB8000) mst->base_reg = 1024; else mst->base_reg = 768; mst->gated_tuner_i2c_adap.dev.parent = mst->i2c_adap->dev.parent; if (i2c_adapter_init (&mst->gated_tuner_i2c_adap, &dibx000_i2c_gated_tuner_algo, "DiBX000 tuner I2C bus", mst) != 0) printk(KERN_ERR "DiBX000: could not initialize the tuner i2c_adapter\n"); mst->master_i2c_adap_gpio12.dev.parent = mst->i2c_adap->dev.parent; if (i2c_adapter_init (&mst->master_i2c_adap_gpio12, &dibx000_i2c_master_gpio12_xfer_algo, "DiBX000 master GPIO12 I2C bus", mst) != 0) printk(KERN_ERR "DiBX000: could not initialize the master i2c_adapter\n"); mst->master_i2c_adap_gpio34.dev.parent = mst->i2c_adap->dev.parent; if (i2c_adapter_init (&mst->master_i2c_adap_gpio34, &dibx000_i2c_master_gpio34_xfer_algo, "DiBX000 master GPIO34 I2C bus", mst) != 0) printk(KERN_ERR "DiBX000: could not initialize the master i2c_adapter\n"); mst->master_i2c_adap_gpio67.dev.parent = mst->i2c_adap->dev.parent; if (i2c_adapter_init (&mst->master_i2c_adap_gpio67, &dibx000_i2c_gated_gpio67_algo, "DiBX000 master GPIO67 I2C bus", mst) != 0) printk(KERN_ERR "DiBX000: could not initialize the master i2c_adapter\n"); /* initialize the i2c-master by closing the gate */ dibx000_i2c_gate_ctrl(mst, tx, 0, 0); return i2c_transfer(i2c_adap, &m, 1) == 1; } EXPORT_SYMBOL(dibx000_init_i2c_master); void dibx000_exit_i2c_master(struct dibx000_i2c_master *mst) { i2c_del_adapter(&mst->gated_tuner_i2c_adap); i2c_del_adapter(&mst->master_i2c_adap_gpio12); i2c_del_adapter(&mst->master_i2c_adap_gpio34); i2c_del_adapter(&mst->master_i2c_adap_gpio67); } EXPORT_SYMBOL(dibx000_exit_i2c_master); u32 systime(void) { struct timespec t; t = current_kernel_time(); return (t.tv_sec * 10000) + (t.tv_nsec / 100000); } EXPORT_SYMBOL(systime); MODULE_AUTHOR("Patrick Boettcher <pboettcher@dibcom.fr>"); MODULE_DESCRIPTION("Common function the DiBcom demodulator family"); MODULE_LICENSE("GPL");
gpl-2.0
96boards/linux
arch/sh/mm/hugetlbpage.c
590
1198
/* * arch/sh/mm/hugetlbpage.c * * SuperH HugeTLB page support. * * Cloned from sparc64 by Paul Mundt. * * Copyright (C) 2002, 2003 David S. Miller (davem@redhat.com) */ #include <linux/init.h> #include <linux/fs.h> #include <linux/mm.h> #include <linux/hugetlb.h> #include <linux/pagemap.h> #include <linux/sysctl.h> #include <asm/mman.h> #include <asm/pgalloc.h> #include <asm/tlb.h> #include <asm/tlbflush.h> #include <asm/cacheflush.h> pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte = NULL; pgd = pgd_offset(mm, addr); if (pgd) { pud = pud_alloc(mm, pgd, addr); if (pud) { pmd = pmd_alloc(mm, pud, addr); if (pmd) pte = pte_alloc_map(mm, NULL, pmd, addr); } } return pte; } pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte = NULL; pgd = pgd_offset(mm, addr); if (pgd) { pud = pud_offset(pgd, addr); if (pud) { pmd = pmd_offset(pud, addr); if (pmd) pte = pte_offset_map(pmd, addr); } } return pte; } int pmd_huge(pmd_t pmd) { return 0; } int pud_huge(pud_t pud) { return 0; }
gpl-2.0
greguu/linux-4.4-cxx00
arch/ia64/kernel/setup.c
590
29309
/* * Architecture-specific setup. * * Copyright (C) 1998-2001, 2003-2004 Hewlett-Packard Co * David Mosberger-Tang <davidm@hpl.hp.com> * Stephane Eranian <eranian@hpl.hp.com> * Copyright (C) 2000, 2004 Intel Corp * Rohit Seth <rohit.seth@intel.com> * Suresh Siddha <suresh.b.siddha@intel.com> * Gordon Jin <gordon.jin@intel.com> * Copyright (C) 1999 VA Linux Systems * Copyright (C) 1999 Walt Drummond <drummond@valinux.com> * * 12/26/04 S.Siddha, G.Jin, R.Seth * Add multi-threading and multi-core detection * 11/12/01 D.Mosberger Convert get_cpuinfo() to seq_file based show_cpuinfo(). * 04/04/00 D.Mosberger renamed cpu_initialized to cpu_online_map * 03/31/00 R.Seth cpu_initialized and current->processor fixes * 02/04/00 D.Mosberger some more get_cpuinfo fixes... * 02/01/00 R.Seth fixed get_cpuinfo for SMP * 01/07/99 S.Eranian added the support for command line argument * 06/24/99 W.Drummond added boot_cpu_data. * 05/28/05 Z. Menyhart Dynamic stride size for "flush_icache_range()" */ #include <linux/module.h> #include <linux/init.h> #include <linux/acpi.h> #include <linux/bootmem.h> #include <linux/console.h> #include <linux/delay.h> #include <linux/kernel.h> #include <linux/reboot.h> #include <linux/sched.h> #include <linux/seq_file.h> #include <linux/string.h> #include <linux/threads.h> #include <linux/screen_info.h> #include <linux/dmi.h> #include <linux/serial.h> #include <linux/serial_core.h> #include <linux/efi.h> #include <linux/initrd.h> #include <linux/pm.h> #include <linux/cpufreq.h> #include <linux/kexec.h> #include <linux/crash_dump.h> #include <asm/machvec.h> #include <asm/mca.h> #include <asm/meminit.h> #include <asm/page.h> #include <asm/patch.h> #include <asm/pgtable.h> #include <asm/processor.h> #include <asm/sal.h> #include <asm/sections.h> #include <asm/setup.h> #include <asm/smp.h> #include <asm/tlbflush.h> #include <asm/unistd.h> #include <asm/hpsim.h> #if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE) # error "struct cpuinfo_ia64 too big!" #endif #ifdef CONFIG_SMP unsigned long __per_cpu_offset[NR_CPUS]; EXPORT_SYMBOL(__per_cpu_offset); #endif DEFINE_PER_CPU(struct cpuinfo_ia64, ia64_cpu_info); DEFINE_PER_CPU(unsigned long, local_per_cpu_offset); unsigned long ia64_cycles_per_usec; struct ia64_boot_param *ia64_boot_param; struct screen_info screen_info; unsigned long vga_console_iobase; unsigned long vga_console_membase; static struct resource data_resource = { .name = "Kernel data", .flags = IORESOURCE_BUSY | IORESOURCE_MEM }; static struct resource code_resource = { .name = "Kernel code", .flags = IORESOURCE_BUSY | IORESOURCE_MEM }; static struct resource bss_resource = { .name = "Kernel bss", .flags = IORESOURCE_BUSY | IORESOURCE_MEM }; unsigned long ia64_max_cacheline_size; unsigned long ia64_iobase; /* virtual address for I/O accesses */ EXPORT_SYMBOL(ia64_iobase); struct io_space io_space[MAX_IO_SPACES]; EXPORT_SYMBOL(io_space); unsigned int num_io_spaces; /* * "flush_icache_range()" needs to know what processor dependent stride size to use * when it makes i-cache(s) coherent with d-caches. */ #define I_CACHE_STRIDE_SHIFT 5 /* Safest way to go: 32 bytes by 32 bytes */ unsigned long ia64_i_cache_stride_shift = ~0; /* * "clflush_cache_range()" needs to know what processor dependent stride size to * use when it flushes cache lines including both d-cache and i-cache. */ /* Safest way to go: 32 bytes by 32 bytes */ #define CACHE_STRIDE_SHIFT 5 unsigned long ia64_cache_stride_shift = ~0; /* * The merge_mask variable needs to be set to (max(iommu_page_size(iommu)) - 1). This * mask specifies a mask of address bits that must be 0 in order for two buffers to be * mergeable by the I/O MMU (i.e., the end address of the first buffer and the start * address of the second buffer must be aligned to (merge_mask+1) in order to be * mergeable). By default, we assume there is no I/O MMU which can merge physically * discontiguous buffers, so we set the merge_mask to ~0UL, which corresponds to a iommu * page-size of 2^64. */ unsigned long ia64_max_iommu_merge_mask = ~0UL; EXPORT_SYMBOL(ia64_max_iommu_merge_mask); /* * We use a special marker for the end of memory and it uses the extra (+1) slot */ struct rsvd_region rsvd_region[IA64_MAX_RSVD_REGIONS + 1] __initdata; int num_rsvd_regions __initdata; /* * Filter incoming memory segments based on the primitive map created from the boot * parameters. Segments contained in the map are removed from the memory ranges. A * caller-specified function is called with the memory ranges that remain after filtering. * This routine does not assume the incoming segments are sorted. */ int __init filter_rsvd_memory (u64 start, u64 end, void *arg) { u64 range_start, range_end, prev_start; void (*func)(unsigned long, unsigned long, int); int i; #if IGNORE_PFN0 if (start == PAGE_OFFSET) { printk(KERN_WARNING "warning: skipping physical page 0\n"); start += PAGE_SIZE; if (start >= end) return 0; } #endif /* * lowest possible address(walker uses virtual) */ prev_start = PAGE_OFFSET; func = arg; for (i = 0; i < num_rsvd_regions; ++i) { range_start = max(start, prev_start); range_end = min(end, rsvd_region[i].start); if (range_start < range_end) call_pernode_memory(__pa(range_start), range_end - range_start, func); /* nothing more available in this segment */ if (range_end == end) return 0; prev_start = rsvd_region[i].end; } /* end of memory marker allows full processing inside loop body */ return 0; } /* * Similar to "filter_rsvd_memory()", but the reserved memory ranges * are not filtered out. */ int __init filter_memory(u64 start, u64 end, void *arg) { void (*func)(unsigned long, unsigned long, int); #if IGNORE_PFN0 if (start == PAGE_OFFSET) { printk(KERN_WARNING "warning: skipping physical page 0\n"); start += PAGE_SIZE; if (start >= end) return 0; } #endif func = arg; if (start < end) call_pernode_memory(__pa(start), end - start, func); return 0; } static void __init sort_regions (struct rsvd_region *rsvd_region, int max) { int j; /* simple bubble sorting */ while (max--) { for (j = 0; j < max; ++j) { if (rsvd_region[j].start > rsvd_region[j+1].start) { struct rsvd_region tmp; tmp = rsvd_region[j]; rsvd_region[j] = rsvd_region[j + 1]; rsvd_region[j + 1] = tmp; } } } } /* merge overlaps */ static int __init merge_regions (struct rsvd_region *rsvd_region, int max) { int i; for (i = 1; i < max; ++i) { if (rsvd_region[i].start >= rsvd_region[i-1].end) continue; if (rsvd_region[i].end > rsvd_region[i-1].end) rsvd_region[i-1].end = rsvd_region[i].end; --max; memmove(&rsvd_region[i], &rsvd_region[i+1], (max - i) * sizeof(struct rsvd_region)); } return max; } /* * Request address space for all standard resources */ static int __init register_memory(void) { code_resource.start = ia64_tpa(_text); code_resource.end = ia64_tpa(_etext) - 1; data_resource.start = ia64_tpa(_etext); data_resource.end = ia64_tpa(_edata) - 1; bss_resource.start = ia64_tpa(__bss_start); bss_resource.end = ia64_tpa(_end) - 1; efi_initialize_iomem_resources(&code_resource, &data_resource, &bss_resource); return 0; } __initcall(register_memory); #ifdef CONFIG_KEXEC /* * This function checks if the reserved crashkernel is allowed on the specific * IA64 machine flavour. Machines without an IO TLB use swiotlb and require * some memory below 4 GB (i.e. in 32 bit area), see the implementation of * lib/swiotlb.c. The hpzx1 architecture has an IO TLB but cannot use that * in kdump case. See the comment in sba_init() in sba_iommu.c. * * So, the only machvec that really supports loading the kdump kernel * over 4 GB is "sn2". */ static int __init check_crashkernel_memory(unsigned long pbase, size_t size) { if (ia64_platform_is("sn2") || ia64_platform_is("uv")) return 1; else return pbase < (1UL << 32); } static void __init setup_crashkernel(unsigned long total, int *n) { unsigned long long base = 0, size = 0; int ret; ret = parse_crashkernel(boot_command_line, total, &size, &base); if (ret == 0 && size > 0) { if (!base) { sort_regions(rsvd_region, *n); *n = merge_regions(rsvd_region, *n); base = kdump_find_rsvd_region(size, rsvd_region, *n); } if (!check_crashkernel_memory(base, size)) { pr_warning("crashkernel: There would be kdump memory " "at %ld GB but this is unusable because it " "must\nbe below 4 GB. Change the memory " "configuration of the machine.\n", (unsigned long)(base >> 30)); return; } if (base != ~0UL) { printk(KERN_INFO "Reserving %ldMB of memory at %ldMB " "for crashkernel (System RAM: %ldMB)\n", (unsigned long)(size >> 20), (unsigned long)(base >> 20), (unsigned long)(total >> 20)); rsvd_region[*n].start = (unsigned long)__va(base); rsvd_region[*n].end = (unsigned long)__va(base + size); (*n)++; crashk_res.start = base; crashk_res.end = base + size - 1; } } efi_memmap_res.start = ia64_boot_param->efi_memmap; efi_memmap_res.end = efi_memmap_res.start + ia64_boot_param->efi_memmap_size; boot_param_res.start = __pa(ia64_boot_param); boot_param_res.end = boot_param_res.start + sizeof(*ia64_boot_param); } #else static inline void __init setup_crashkernel(unsigned long total, int *n) {} #endif /** * reserve_memory - setup reserved memory areas * * Setup the reserved memory areas set aside for the boot parameters, * initrd, etc. There are currently %IA64_MAX_RSVD_REGIONS defined, * see arch/ia64/include/asm/meminit.h if you need to define more. */ void __init reserve_memory (void) { int n = 0; unsigned long total_memory; /* * none of the entries in this table overlap */ rsvd_region[n].start = (unsigned long) ia64_boot_param; rsvd_region[n].end = rsvd_region[n].start + sizeof(*ia64_boot_param); n++; rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->efi_memmap); rsvd_region[n].end = rsvd_region[n].start + ia64_boot_param->efi_memmap_size; n++; rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->command_line); rsvd_region[n].end = (rsvd_region[n].start + strlen(__va(ia64_boot_param->command_line)) + 1); n++; rsvd_region[n].start = (unsigned long) ia64_imva((void *)KERNEL_START); rsvd_region[n].end = (unsigned long) ia64_imva(_end); n++; #ifdef CONFIG_BLK_DEV_INITRD if (ia64_boot_param->initrd_start) { rsvd_region[n].start = (unsigned long)__va(ia64_boot_param->initrd_start); rsvd_region[n].end = rsvd_region[n].start + ia64_boot_param->initrd_size; n++; } #endif #ifdef CONFIG_CRASH_DUMP if (reserve_elfcorehdr(&rsvd_region[n].start, &rsvd_region[n].end) == 0) n++; #endif total_memory = efi_memmap_init(&rsvd_region[n].start, &rsvd_region[n].end); n++; setup_crashkernel(total_memory, &n); /* end of memory marker */ rsvd_region[n].start = ~0UL; rsvd_region[n].end = ~0UL; n++; num_rsvd_regions = n; BUG_ON(IA64_MAX_RSVD_REGIONS + 1 < n); sort_regions(rsvd_region, num_rsvd_regions); num_rsvd_regions = merge_regions(rsvd_region, num_rsvd_regions); } /** * find_initrd - get initrd parameters from the boot parameter structure * * Grab the initrd start and end from the boot parameter struct given us by * the boot loader. */ void __init find_initrd (void) { #ifdef CONFIG_BLK_DEV_INITRD if (ia64_boot_param->initrd_start) { initrd_start = (unsigned long)__va(ia64_boot_param->initrd_start); initrd_end = initrd_start+ia64_boot_param->initrd_size; printk(KERN_INFO "Initial ramdisk at: 0x%lx (%llu bytes)\n", initrd_start, ia64_boot_param->initrd_size); } #endif } static void __init io_port_init (void) { unsigned long phys_iobase; /* * Set `iobase' based on the EFI memory map or, failing that, the * value firmware left in ar.k0. * * Note that in ia32 mode, IN/OUT instructions use ar.k0 to compute * the port's virtual address, so ia32_load_state() loads it with a * user virtual address. But in ia64 mode, glibc uses the * *physical* address in ar.k0 to mmap the appropriate area from * /dev/mem, and the inX()/outX() interfaces use MMIO. In both * cases, user-mode can only use the legacy 0-64K I/O port space. * * ar.k0 is not involved in kernel I/O port accesses, which can use * any of the I/O port spaces and are done via MMIO using the * virtual mmio_base from the appropriate io_space[]. */ phys_iobase = efi_get_iobase(); if (!phys_iobase) { phys_iobase = ia64_get_kr(IA64_KR_IO_BASE); printk(KERN_INFO "No I/O port range found in EFI memory map, " "falling back to AR.KR0 (0x%lx)\n", phys_iobase); } ia64_iobase = (unsigned long) ioremap(phys_iobase, 0); ia64_set_kr(IA64_KR_IO_BASE, __pa(ia64_iobase)); /* setup legacy IO port space */ io_space[0].mmio_base = ia64_iobase; io_space[0].sparse = 1; num_io_spaces = 1; } /** * early_console_setup - setup debugging console * * Consoles started here require little enough setup that we can start using * them very early in the boot process, either right after the machine * vector initialization, or even before if the drivers can detect their hw. * * Returns non-zero if a console couldn't be setup. */ static inline int __init early_console_setup (char *cmdline) { int earlycons = 0; #ifdef CONFIG_SERIAL_SGI_L1_CONSOLE { extern int sn_serial_console_early_setup(void); if (!sn_serial_console_early_setup()) earlycons++; } #endif #ifdef CONFIG_EFI_PCDP if (!efi_setup_pcdp_console(cmdline)) earlycons++; #endif if (!simcons_register()) earlycons++; return (earlycons) ? 0 : -1; } static inline void mark_bsp_online (void) { #ifdef CONFIG_SMP /* If we register an early console, allow CPU 0 to printk */ set_cpu_online(smp_processor_id(), true); #endif } static __initdata int nomca; static __init int setup_nomca(char *s) { nomca = 1; return 0; } early_param("nomca", setup_nomca); #ifdef CONFIG_CRASH_DUMP int __init reserve_elfcorehdr(u64 *start, u64 *end) { u64 length; /* We get the address using the kernel command line, * but the size is extracted from the EFI tables. * Both address and size are required for reservation * to work properly. */ if (!is_vmcore_usable()) return -EINVAL; if ((length = vmcore_find_descriptor_size(elfcorehdr_addr)) == 0) { vmcore_unusable(); return -EINVAL; } *start = (unsigned long)__va(elfcorehdr_addr); *end = *start + length; return 0; } #endif /* CONFIG_PROC_VMCORE */ void __init setup_arch (char **cmdline_p) { unw_init(); ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist); *cmdline_p = __va(ia64_boot_param->command_line); strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE); efi_init(); io_port_init(); #ifdef CONFIG_IA64_GENERIC /* machvec needs to be parsed from the command line * before parse_early_param() is called to ensure * that ia64_mv is initialised before any command line * settings may cause console setup to occur */ machvec_init_from_cmdline(*cmdline_p); #endif parse_early_param(); if (early_console_setup(*cmdline_p) == 0) mark_bsp_online(); #ifdef CONFIG_ACPI /* Initialize the ACPI boot-time table parser */ acpi_table_init(); early_acpi_boot_init(); # ifdef CONFIG_ACPI_NUMA acpi_numa_init(); # ifdef CONFIG_ACPI_HOTPLUG_CPU prefill_possible_map(); # endif per_cpu_scan_finalize((cpumask_weight(&early_cpu_possible_map) == 0 ? 32 : cpumask_weight(&early_cpu_possible_map)), additional_cpus > 0 ? additional_cpus : 0); # endif #endif /* CONFIG_APCI_BOOT */ #ifdef CONFIG_SMP smp_build_cpu_map(); #endif find_memory(); /* process SAL system table: */ ia64_sal_init(__va(efi.sal_systab)); #ifdef CONFIG_ITANIUM ia64_patch_rse((u64) __start___rse_patchlist, (u64) __end___rse_patchlist); #else { unsigned long num_phys_stacked; if (ia64_pal_rse_info(&num_phys_stacked, 0) == 0 && num_phys_stacked > 96) ia64_patch_rse((u64) __start___rse_patchlist, (u64) __end___rse_patchlist); } #endif #ifdef CONFIG_SMP cpu_physical_id(0) = hard_smp_processor_id(); #endif cpu_init(); /* initialize the bootstrap CPU */ mmu_context_init(); /* initialize context_id bitmap */ #ifdef CONFIG_VT if (!conswitchp) { # if defined(CONFIG_DUMMY_CONSOLE) conswitchp = &dummy_con; # endif # if defined(CONFIG_VGA_CONSOLE) /* * Non-legacy systems may route legacy VGA MMIO range to system * memory. vga_con probes the MMIO hole, so memory looks like * a VGA device to it. The EFI memory map can tell us if it's * memory so we can avoid this problem. */ if (efi_mem_type(0xA0000) != EFI_CONVENTIONAL_MEMORY) conswitchp = &vga_con; # endif } #endif /* enable IA-64 Machine Check Abort Handling unless disabled */ if (!nomca) ia64_mca_init(); platform_setup(cmdline_p); #ifndef CONFIG_IA64_HP_SIM check_sal_cache_flush(); #endif paging_init(); } /* * Display cpu info for all CPUs. */ static int show_cpuinfo (struct seq_file *m, void *v) { #ifdef CONFIG_SMP # define lpj c->loops_per_jiffy # define cpunum c->cpu #else # define lpj loops_per_jiffy # define cpunum 0 #endif static struct { unsigned long mask; const char *feature_name; } feature_bits[] = { { 1UL << 0, "branchlong" }, { 1UL << 1, "spontaneous deferral"}, { 1UL << 2, "16-byte atomic ops" } }; char features[128], *cp, *sep; struct cpuinfo_ia64 *c = v; unsigned long mask; unsigned long proc_freq; int i, size; mask = c->features; /* build the feature string: */ memcpy(features, "standard", 9); cp = features; size = sizeof(features); sep = ""; for (i = 0; i < ARRAY_SIZE(feature_bits) && size > 1; ++i) { if (mask & feature_bits[i].mask) { cp += snprintf(cp, size, "%s%s", sep, feature_bits[i].feature_name), sep = ", "; mask &= ~feature_bits[i].mask; size = sizeof(features) - (cp - features); } } if (mask && size > 1) { /* print unknown features as a hex value */ snprintf(cp, size, "%s0x%lx", sep, mask); } proc_freq = cpufreq_quick_get(cpunum); if (!proc_freq) proc_freq = c->proc_freq / 1000; seq_printf(m, "processor : %d\n" "vendor : %s\n" "arch : IA-64\n" "family : %u\n" "model : %u\n" "model name : %s\n" "revision : %u\n" "archrev : %u\n" "features : %s\n" "cpu number : %lu\n" "cpu regs : %u\n" "cpu MHz : %lu.%03lu\n" "itc MHz : %lu.%06lu\n" "BogoMIPS : %lu.%02lu\n", cpunum, c->vendor, c->family, c->model, c->model_name, c->revision, c->archrev, features, c->ppn, c->number, proc_freq / 1000, proc_freq % 1000, c->itc_freq / 1000000, c->itc_freq % 1000000, lpj*HZ/500000, (lpj*HZ/5000) % 100); #ifdef CONFIG_SMP seq_printf(m, "siblings : %u\n", cpumask_weight(&cpu_core_map[cpunum])); if (c->socket_id != -1) seq_printf(m, "physical id: %u\n", c->socket_id); if (c->threads_per_core > 1 || c->cores_per_socket > 1) seq_printf(m, "core id : %u\n" "thread id : %u\n", c->core_id, c->thread_id); #endif seq_printf(m,"\n"); return 0; } static void * c_start (struct seq_file *m, loff_t *pos) { #ifdef CONFIG_SMP while (*pos < nr_cpu_ids && !cpu_online(*pos)) ++*pos; #endif return *pos < nr_cpu_ids ? cpu_data(*pos) : NULL; } static void * c_next (struct seq_file *m, void *v, loff_t *pos) { ++*pos; return c_start(m, pos); } static void c_stop (struct seq_file *m, void *v) { } const struct seq_operations cpuinfo_op = { .start = c_start, .next = c_next, .stop = c_stop, .show = show_cpuinfo }; #define MAX_BRANDS 8 static char brandname[MAX_BRANDS][128]; static char * get_model_name(__u8 family, __u8 model) { static int overflow; char brand[128]; int i; memcpy(brand, "Unknown", 8); if (ia64_pal_get_brand_info(brand)) { if (family == 0x7) memcpy(brand, "Merced", 7); else if (family == 0x1f) switch (model) { case 0: memcpy(brand, "McKinley", 9); break; case 1: memcpy(brand, "Madison", 8); break; case 2: memcpy(brand, "Madison up to 9M cache", 23); break; } } for (i = 0; i < MAX_BRANDS; i++) if (strcmp(brandname[i], brand) == 0) return brandname[i]; for (i = 0; i < MAX_BRANDS; i++) if (brandname[i][0] == '\0') return strcpy(brandname[i], brand); if (overflow++ == 0) printk(KERN_ERR "%s: Table overflow. Some processor model information will be missing\n", __func__); return "Unknown"; } static void identify_cpu (struct cpuinfo_ia64 *c) { union { unsigned long bits[5]; struct { /* id 0 & 1: */ char vendor[16]; /* id 2 */ u64 ppn; /* processor serial number */ /* id 3: */ unsigned number : 8; unsigned revision : 8; unsigned model : 8; unsigned family : 8; unsigned archrev : 8; unsigned reserved : 24; /* id 4: */ u64 features; } field; } cpuid; pal_vm_info_1_u_t vm1; pal_vm_info_2_u_t vm2; pal_status_t status; unsigned long impl_va_msb = 50, phys_addr_size = 44; /* Itanium defaults */ int i; for (i = 0; i < 5; ++i) cpuid.bits[i] = ia64_get_cpuid(i); memcpy(c->vendor, cpuid.field.vendor, 16); #ifdef CONFIG_SMP c->cpu = smp_processor_id(); /* below default values will be overwritten by identify_siblings() * for Multi-Threading/Multi-Core capable CPUs */ c->threads_per_core = c->cores_per_socket = c->num_log = 1; c->socket_id = -1; identify_siblings(c); if (c->threads_per_core > smp_num_siblings) smp_num_siblings = c->threads_per_core; #endif c->ppn = cpuid.field.ppn; c->number = cpuid.field.number; c->revision = cpuid.field.revision; c->model = cpuid.field.model; c->family = cpuid.field.family; c->archrev = cpuid.field.archrev; c->features = cpuid.field.features; c->model_name = get_model_name(c->family, c->model); status = ia64_pal_vm_summary(&vm1, &vm2); if (status == PAL_STATUS_SUCCESS) { impl_va_msb = vm2.pal_vm_info_2_s.impl_va_msb; phys_addr_size = vm1.pal_vm_info_1_s.phys_add_size; } c->unimpl_va_mask = ~((7L<<61) | ((1L << (impl_va_msb + 1)) - 1)); c->unimpl_pa_mask = ~((1L<<63) | ((1L << phys_addr_size) - 1)); } /* * Do the following calculations: * * 1. the max. cache line size. * 2. the minimum of the i-cache stride sizes for "flush_icache_range()". * 3. the minimum of the cache stride sizes for "clflush_cache_range()". */ static void get_cache_info(void) { unsigned long line_size, max = 1; unsigned long l, levels, unique_caches; pal_cache_config_info_t cci; long status; status = ia64_pal_cache_summary(&levels, &unique_caches); if (status != 0) { printk(KERN_ERR "%s: ia64_pal_cache_summary() failed (status=%ld)\n", __func__, status); max = SMP_CACHE_BYTES; /* Safest setup for "flush_icache_range()" */ ia64_i_cache_stride_shift = I_CACHE_STRIDE_SHIFT; /* Safest setup for "clflush_cache_range()" */ ia64_cache_stride_shift = CACHE_STRIDE_SHIFT; goto out; } for (l = 0; l < levels; ++l) { /* cache_type (data_or_unified)=2 */ status = ia64_pal_cache_config_info(l, 2, &cci); if (status != 0) { printk(KERN_ERR "%s: ia64_pal_cache_config_info" "(l=%lu, 2) failed (status=%ld)\n", __func__, l, status); max = SMP_CACHE_BYTES; /* The safest setup for "flush_icache_range()" */ cci.pcci_stride = I_CACHE_STRIDE_SHIFT; /* The safest setup for "clflush_cache_range()" */ ia64_cache_stride_shift = CACHE_STRIDE_SHIFT; cci.pcci_unified = 1; } else { if (cci.pcci_stride < ia64_cache_stride_shift) ia64_cache_stride_shift = cci.pcci_stride; line_size = 1 << cci.pcci_line_size; if (line_size > max) max = line_size; } if (!cci.pcci_unified) { /* cache_type (instruction)=1*/ status = ia64_pal_cache_config_info(l, 1, &cci); if (status != 0) { printk(KERN_ERR "%s: ia64_pal_cache_config_info" "(l=%lu, 1) failed (status=%ld)\n", __func__, l, status); /* The safest setup for flush_icache_range() */ cci.pcci_stride = I_CACHE_STRIDE_SHIFT; } } if (cci.pcci_stride < ia64_i_cache_stride_shift) ia64_i_cache_stride_shift = cci.pcci_stride; } out: if (max > ia64_max_cacheline_size) ia64_max_cacheline_size = max; } /* * cpu_init() initializes state that is per-CPU. This function acts * as a 'CPU state barrier', nothing should get across. */ void cpu_init (void) { extern void ia64_mmu_init(void *); static unsigned long max_num_phys_stacked = IA64_NUM_PHYS_STACK_REG; unsigned long num_phys_stacked; pal_vm_info_2_u_t vmi; unsigned int max_ctx; struct cpuinfo_ia64 *cpu_info; void *cpu_data; cpu_data = per_cpu_init(); #ifdef CONFIG_SMP /* * insert boot cpu into sibling and core mapes * (must be done after per_cpu area is setup) */ if (smp_processor_id() == 0) { cpumask_set_cpu(0, &per_cpu(cpu_sibling_map, 0)); cpumask_set_cpu(0, &cpu_core_map[0]); } else { /* * Set ar.k3 so that assembly code in MCA handler can compute * physical addresses of per cpu variables with a simple: * phys = ar.k3 + &per_cpu_var * and the alt-dtlb-miss handler can set per-cpu mapping into * the TLB when needed. head.S already did this for cpu0. */ ia64_set_kr(IA64_KR_PER_CPU_DATA, ia64_tpa(cpu_data) - (long) __per_cpu_start); } #endif get_cache_info(); /* * We can't pass "local_cpu_data" to identify_cpu() because we haven't called * ia64_mmu_init() yet. And we can't call ia64_mmu_init() first because it * depends on the data returned by identify_cpu(). We break the dependency by * accessing cpu_data() through the canonical per-CPU address. */ cpu_info = cpu_data + ((char *) &__ia64_per_cpu_var(ia64_cpu_info) - __per_cpu_start); identify_cpu(cpu_info); #ifdef CONFIG_MCKINLEY { # define FEATURE_SET 16 struct ia64_pal_retval iprv; if (cpu_info->family == 0x1f) { PAL_CALL_PHYS(iprv, PAL_PROC_GET_FEATURES, 0, FEATURE_SET, 0); if ((iprv.status == 0) && (iprv.v0 & 0x80) && (iprv.v2 & 0x80)) PAL_CALL_PHYS(iprv, PAL_PROC_SET_FEATURES, (iprv.v1 | 0x80), FEATURE_SET, 0); } } #endif /* Clear the stack memory reserved for pt_regs: */ memset(task_pt_regs(current), 0, sizeof(struct pt_regs)); ia64_set_kr(IA64_KR_FPU_OWNER, 0); /* * Initialize the page-table base register to a global * directory with all zeroes. This ensure that we can handle * TLB-misses to user address-space even before we created the * first user address-space. This may happen, e.g., due to * aggressive use of lfetch.fault. */ ia64_set_kr(IA64_KR_PT_BASE, __pa(ia64_imva(empty_zero_page))); /* * Initialize default control register to defer speculative faults except * for those arising from TLB misses, which are not deferred. The * kernel MUST NOT depend on a particular setting of these bits (in other words, * the kernel must have recovery code for all speculative accesses). Turn on * dcr.lc as per recommendation by the architecture team. Most IA-32 apps * shouldn't be affected by this (moral: keep your ia32 locks aligned and you'll * be fine). */ ia64_setreg(_IA64_REG_CR_DCR, ( IA64_DCR_DP | IA64_DCR_DK | IA64_DCR_DX | IA64_DCR_DR | IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC)); atomic_inc(&init_mm.mm_count); current->active_mm = &init_mm; BUG_ON(current->mm); ia64_mmu_init(ia64_imva(cpu_data)); ia64_mca_cpu_init(ia64_imva(cpu_data)); /* Clear ITC to eliminate sched_clock() overflows in human time. */ ia64_set_itc(0); /* disable all local interrupt sources: */ ia64_set_itv(1 << 16); ia64_set_lrr0(1 << 16); ia64_set_lrr1(1 << 16); ia64_setreg(_IA64_REG_CR_PMV, 1 << 16); ia64_setreg(_IA64_REG_CR_CMCV, 1 << 16); /* clear TPR & XTP to enable all interrupt classes: */ ia64_setreg(_IA64_REG_CR_TPR, 0); /* Clear any pending interrupts left by SAL/EFI */ while (ia64_get_ivr() != IA64_SPURIOUS_INT_VECTOR) ia64_eoi(); #ifdef CONFIG_SMP normal_xtp(); #endif /* set ia64_ctx.max_rid to the maximum RID that is supported by all CPUs: */ if (ia64_pal_vm_summary(NULL, &vmi) == 0) { max_ctx = (1U << (vmi.pal_vm_info_2_s.rid_size - 3)) - 1; setup_ptcg_sem(vmi.pal_vm_info_2_s.max_purges, NPTCG_FROM_PAL); } else { printk(KERN_WARNING "cpu_init: PAL VM summary failed, assuming 18 RID bits\n"); max_ctx = (1U << 15) - 1; /* use architected minimum */ } while (max_ctx < ia64_ctx.max_ctx) { unsigned int old = ia64_ctx.max_ctx; if (cmpxchg(&ia64_ctx.max_ctx, old, max_ctx) == old) break; } if (ia64_pal_rse_info(&num_phys_stacked, NULL) != 0) { printk(KERN_WARNING "cpu_init: PAL RSE info failed; assuming 96 physical " "stacked regs\n"); num_phys_stacked = 96; } /* size of physical stacked register partition plus 8 bytes: */ if (num_phys_stacked > max_num_phys_stacked) { ia64_patch_phys_stack_reg(num_phys_stacked*8 + 8); max_num_phys_stacked = num_phys_stacked; } platform_cpu_init(); } void __init check_bugs (void) { ia64_patch_mckinley_e9((unsigned long) __start___mckinley_e9_bundles, (unsigned long) __end___mckinley_e9_bundles); } static int __init run_dmi_scan(void) { dmi_scan_machine(); dmi_memdev_walk(); dmi_set_dump_stack_arch_desc(); return 0; } core_initcall(run_dmi_scan);
gpl-2.0
dtsinc/DTS-Sound-Integration_CAF-Android-kernel
drivers/gud/MobiCoreDriver/api.c
590
2849
/* * Copyright (c) 2013 TRUSTONIC LIMITED * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/module.h> #include "main.h" #include "mem.h" #include "debug.h" int mobicore_map_vmem(struct mc_instance *instance, void *addr, uint32_t len, uint32_t *handle) { phys_addr_t phys; return mc_register_wsm_mmu(instance, addr, len, handle, &phys); } EXPORT_SYMBOL(mobicore_map_vmem); /* * Unmap a virtual memory buffer from mobicore * @param instance * @param handle * * @return 0 if no error * */ int mobicore_unmap_vmem(struct mc_instance *instance, uint32_t handle) { return mc_unregister_wsm_mmu(instance, handle); } EXPORT_SYMBOL(mobicore_unmap_vmem); /* * Free a WSM buffer allocated with mobicore_allocate_wsm * @param instance * @param handle handle of the buffer * * @return 0 if no error * */ int mobicore_free_wsm(struct mc_instance *instance, uint32_t handle) { return mc_free_buffer(instance, handle); } EXPORT_SYMBOL(mobicore_free_wsm); /* * Allocate WSM for given instance * * @param instance instance * @param requested_size size of the WSM * @param handle pointer where the handle will be saved * @param virt_kernel_addr pointer for the kernel virtual address * * @return error code or 0 for success */ int mobicore_allocate_wsm(struct mc_instance *instance, unsigned long requested_size, uint32_t *handle, void **virt_kernel_addr) { struct mc_buffer *buffer = NULL; /* Setup the WSM buffer structure! */ if (mc_get_buffer(instance, &buffer, requested_size)) return -EFAULT; *handle = buffer->handle; *virt_kernel_addr = buffer->addr; return 0; } EXPORT_SYMBOL(mobicore_allocate_wsm); /* * Initialize a new mobicore API instance object * * @return Instance or NULL if no allocation was possible. */ struct mc_instance *mobicore_open(void) { struct mc_instance *instance = mc_alloc_instance(); if(instance) { instance->admin = true; } return instance; } EXPORT_SYMBOL(mobicore_open); /* * Release a mobicore instance object and all objects related to it * @param instance instance * @return 0 if Ok or -E ERROR */ int mobicore_release(struct mc_instance *instance) { return mc_release_instance(instance); } EXPORT_SYMBOL(mobicore_release); /* * Test if mobicore can sleep * * @return true if mobicore can sleep, false if it can't sleep */ bool mobicore_sleep_ready(void) { return mc_sleep_ready(); } EXPORT_SYMBOL(mobicore_sleep_ready);
gpl-2.0
akuster/linux-stable
drivers/tty/synclinkmp.c
846
150498
/* * $Id: synclinkmp.c,v 4.38 2005/07/15 13:29:44 paulkf Exp $ * * Device driver for Microgate SyncLink Multiport * high speed multiprotocol serial adapter. * * written by Paul Fulghum for Microgate Corporation * paulkf@microgate.com * * Microgate and SyncLink are trademarks of Microgate Corporation * * Derived from serial.c written by Theodore Ts'o and Linus Torvalds * This code is released under the GNU General Public License (GPL) * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. */ #define VERSION(ver,rel,seq) (((ver)<<16) | ((rel)<<8) | (seq)) #if defined(__i386__) # define BREAKPOINT() asm(" int $3"); #else # define BREAKPOINT() { } #endif #define MAX_DEVICES 12 #include <linux/module.h> #include <linux/errno.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/timer.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/serial.h> #include <linux/major.h> #include <linux/string.h> #include <linux/fcntl.h> #include <linux/ptrace.h> #include <linux/ioport.h> #include <linux/mm.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/netdevice.h> #include <linux/vmalloc.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/ioctl.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/dma.h> #include <linux/bitops.h> #include <asm/types.h> #include <linux/termios.h> #include <linux/workqueue.h> #include <linux/hdlc.h> #include <linux/synclink.h> #if defined(CONFIG_HDLC) || (defined(CONFIG_HDLC_MODULE) && defined(CONFIG_SYNCLINKMP_MODULE)) #define SYNCLINK_GENERIC_HDLC 1 #else #define SYNCLINK_GENERIC_HDLC 0 #endif #define GET_USER(error,value,addr) error = get_user(value,addr) #define COPY_FROM_USER(error,dest,src,size) error = copy_from_user(dest,src,size) ? -EFAULT : 0 #define PUT_USER(error,value,addr) error = put_user(value,addr) #define COPY_TO_USER(error,dest,src,size) error = copy_to_user(dest,src,size) ? -EFAULT : 0 #include <asm/uaccess.h> static MGSL_PARAMS default_params = { MGSL_MODE_HDLC, /* unsigned long mode */ 0, /* unsigned char loopback; */ HDLC_FLAG_UNDERRUN_ABORT15, /* unsigned short flags; */ HDLC_ENCODING_NRZI_SPACE, /* unsigned char encoding; */ 0, /* unsigned long clock_speed; */ 0xff, /* unsigned char addr_filter; */ HDLC_CRC_16_CCITT, /* unsigned short crc_type; */ HDLC_PREAMBLE_LENGTH_8BITS, /* unsigned char preamble_length; */ HDLC_PREAMBLE_PATTERN_NONE, /* unsigned char preamble; */ 9600, /* unsigned long data_rate; */ 8, /* unsigned char data_bits; */ 1, /* unsigned char stop_bits; */ ASYNC_PARITY_NONE /* unsigned char parity; */ }; /* size in bytes of DMA data buffers */ #define SCABUFSIZE 1024 #define SCA_MEM_SIZE 0x40000 #define SCA_BASE_SIZE 512 #define SCA_REG_SIZE 16 #define SCA_MAX_PORTS 4 #define SCAMAXDESC 128 #define BUFFERLISTSIZE 4096 /* SCA-I style DMA buffer descriptor */ typedef struct _SCADESC { u16 next; /* lower l6 bits of next descriptor addr */ u16 buf_ptr; /* lower 16 bits of buffer addr */ u8 buf_base; /* upper 8 bits of buffer addr */ u8 pad1; u16 length; /* length of buffer */ u8 status; /* status of buffer */ u8 pad2; } SCADESC, *PSCADESC; typedef struct _SCADESC_EX { /* device driver bookkeeping section */ char *virt_addr; /* virtual address of data buffer */ u16 phys_entry; /* lower 16-bits of physical address of this descriptor */ } SCADESC_EX, *PSCADESC_EX; /* The queue of BH actions to be performed */ #define BH_RECEIVE 1 #define BH_TRANSMIT 2 #define BH_STATUS 4 #define IO_PIN_SHUTDOWN_LIMIT 100 struct _input_signal_events { int ri_up; int ri_down; int dsr_up; int dsr_down; int dcd_up; int dcd_down; int cts_up; int cts_down; }; /* * Device instance data structure */ typedef struct _synclinkmp_info { void *if_ptr; /* General purpose pointer (used by SPPP) */ int magic; struct tty_port port; int line; unsigned short close_delay; unsigned short closing_wait; /* time to wait before closing */ struct mgsl_icount icount; int timeout; int x_char; /* xon/xoff character */ u16 read_status_mask1; /* break detection (SR1 indications) */ u16 read_status_mask2; /* parity/framing/overun (SR2 indications) */ unsigned char ignore_status_mask1; /* break detection (SR1 indications) */ unsigned char ignore_status_mask2; /* parity/framing/overun (SR2 indications) */ unsigned char *tx_buf; int tx_put; int tx_get; int tx_count; wait_queue_head_t status_event_wait_q; wait_queue_head_t event_wait_q; struct timer_list tx_timer; /* HDLC transmit timeout timer */ struct _synclinkmp_info *next_device; /* device list link */ struct timer_list status_timer; /* input signal status check timer */ spinlock_t lock; /* spinlock for synchronizing with ISR */ struct work_struct task; /* task structure for scheduling bh */ u32 max_frame_size; /* as set by device config */ u32 pending_bh; bool bh_running; /* Protection from multiple */ int isr_overflow; bool bh_requested; int dcd_chkcount; /* check counts to prevent */ int cts_chkcount; /* too many IRQs if a signal */ int dsr_chkcount; /* is floating */ int ri_chkcount; char *buffer_list; /* virtual address of Rx & Tx buffer lists */ unsigned long buffer_list_phys; unsigned int rx_buf_count; /* count of total allocated Rx buffers */ SCADESC *rx_buf_list; /* list of receive buffer entries */ SCADESC_EX rx_buf_list_ex[SCAMAXDESC]; /* list of receive buffer entries */ unsigned int current_rx_buf; unsigned int tx_buf_count; /* count of total allocated Tx buffers */ SCADESC *tx_buf_list; /* list of transmit buffer entries */ SCADESC_EX tx_buf_list_ex[SCAMAXDESC]; /* list of transmit buffer entries */ unsigned int last_tx_buf; unsigned char *tmp_rx_buf; unsigned int tmp_rx_buf_count; bool rx_enabled; bool rx_overflow; bool tx_enabled; bool tx_active; u32 idle_mode; unsigned char ie0_value; unsigned char ie1_value; unsigned char ie2_value; unsigned char ctrlreg_value; unsigned char old_signals; char device_name[25]; /* device instance name */ int port_count; int adapter_num; int port_num; struct _synclinkmp_info *port_array[SCA_MAX_PORTS]; unsigned int bus_type; /* expansion bus type (ISA,EISA,PCI) */ unsigned int irq_level; /* interrupt level */ unsigned long irq_flags; bool irq_requested; /* true if IRQ requested */ MGSL_PARAMS params; /* communications parameters */ unsigned char serial_signals; /* current serial signal states */ bool irq_occurred; /* for diagnostics use */ unsigned int init_error; /* Initialization startup error */ u32 last_mem_alloc; unsigned char* memory_base; /* shared memory address (PCI only) */ u32 phys_memory_base; int shared_mem_requested; unsigned char* sca_base; /* HD64570 SCA Memory address */ u32 phys_sca_base; u32 sca_offset; bool sca_base_requested; unsigned char* lcr_base; /* local config registers (PCI only) */ u32 phys_lcr_base; u32 lcr_offset; int lcr_mem_requested; unsigned char* statctrl_base; /* status/control register memory */ u32 phys_statctrl_base; u32 statctrl_offset; bool sca_statctrl_requested; u32 misc_ctrl_value; char *flag_buf; bool drop_rts_on_tx_done; struct _input_signal_events input_signal_events; /* SPPP/Cisco HDLC device parts */ int netcount; spinlock_t netlock; #if SYNCLINK_GENERIC_HDLC struct net_device *netdev; #endif } SLMP_INFO; #define MGSL_MAGIC 0x5401 /* * define serial signal status change macros */ #define MISCSTATUS_DCD_LATCHED (SerialSignal_DCD<<8) /* indicates change in DCD */ #define MISCSTATUS_RI_LATCHED (SerialSignal_RI<<8) /* indicates change in RI */ #define MISCSTATUS_CTS_LATCHED (SerialSignal_CTS<<8) /* indicates change in CTS */ #define MISCSTATUS_DSR_LATCHED (SerialSignal_DSR<<8) /* change in DSR */ /* Common Register macros */ #define LPR 0x00 #define PABR0 0x02 #define PABR1 0x03 #define WCRL 0x04 #define WCRM 0x05 #define WCRH 0x06 #define DPCR 0x08 #define DMER 0x09 #define ISR0 0x10 #define ISR1 0x11 #define ISR2 0x12 #define IER0 0x14 #define IER1 0x15 #define IER2 0x16 #define ITCR 0x18 #define INTVR 0x1a #define IMVR 0x1c /* MSCI Register macros */ #define TRB 0x20 #define TRBL 0x20 #define TRBH 0x21 #define SR0 0x22 #define SR1 0x23 #define SR2 0x24 #define SR3 0x25 #define FST 0x26 #define IE0 0x28 #define IE1 0x29 #define IE2 0x2a #define FIE 0x2b #define CMD 0x2c #define MD0 0x2e #define MD1 0x2f #define MD2 0x30 #define CTL 0x31 #define SA0 0x32 #define SA1 0x33 #define IDL 0x34 #define TMC 0x35 #define RXS 0x36 #define TXS 0x37 #define TRC0 0x38 #define TRC1 0x39 #define RRC 0x3a #define CST0 0x3c #define CST1 0x3d /* Timer Register Macros */ #define TCNT 0x60 #define TCNTL 0x60 #define TCNTH 0x61 #define TCONR 0x62 #define TCONRL 0x62 #define TCONRH 0x63 #define TMCS 0x64 #define TEPR 0x65 /* DMA Controller Register macros */ #define DARL 0x80 #define DARH 0x81 #define DARB 0x82 #define BAR 0x80 #define BARL 0x80 #define BARH 0x81 #define BARB 0x82 #define SAR 0x84 #define SARL 0x84 #define SARH 0x85 #define SARB 0x86 #define CPB 0x86 #define CDA 0x88 #define CDAL 0x88 #define CDAH 0x89 #define EDA 0x8a #define EDAL 0x8a #define EDAH 0x8b #define BFL 0x8c #define BFLL 0x8c #define BFLH 0x8d #define BCR 0x8e #define BCRL 0x8e #define BCRH 0x8f #define DSR 0x90 #define DMR 0x91 #define FCT 0x93 #define DIR 0x94 #define DCMD 0x95 /* combine with timer or DMA register address */ #define TIMER0 0x00 #define TIMER1 0x08 #define TIMER2 0x10 #define TIMER3 0x18 #define RXDMA 0x00 #define TXDMA 0x20 /* SCA Command Codes */ #define NOOP 0x00 #define TXRESET 0x01 #define TXENABLE 0x02 #define TXDISABLE 0x03 #define TXCRCINIT 0x04 #define TXCRCEXCL 0x05 #define TXEOM 0x06 #define TXABORT 0x07 #define MPON 0x08 #define TXBUFCLR 0x09 #define RXRESET 0x11 #define RXENABLE 0x12 #define RXDISABLE 0x13 #define RXCRCINIT 0x14 #define RXREJECT 0x15 #define SEARCHMP 0x16 #define RXCRCEXCL 0x17 #define RXCRCCALC 0x18 #define CHRESET 0x21 #define HUNT 0x31 /* DMA command codes */ #define SWABORT 0x01 #define FEICLEAR 0x02 /* IE0 */ #define TXINTE BIT7 #define RXINTE BIT6 #define TXRDYE BIT1 #define RXRDYE BIT0 /* IE1 & SR1 */ #define UDRN BIT7 #define IDLE BIT6 #define SYNCD BIT4 #define FLGD BIT4 #define CCTS BIT3 #define CDCD BIT2 #define BRKD BIT1 #define ABTD BIT1 #define GAPD BIT1 #define BRKE BIT0 #define IDLD BIT0 /* IE2 & SR2 */ #define EOM BIT7 #define PMP BIT6 #define SHRT BIT6 #define PE BIT5 #define ABT BIT5 #define FRME BIT4 #define RBIT BIT4 #define OVRN BIT3 #define CRCE BIT2 /* * Global linked list of SyncLink devices */ static SLMP_INFO *synclinkmp_device_list = NULL; static int synclinkmp_adapter_count = -1; static int synclinkmp_device_count = 0; /* * Set this param to non-zero to load eax with the * .text section address and breakpoint on module load. * This is useful for use with gdb and add-symbol-file command. */ static bool break_on_load = 0; /* * Driver major number, defaults to zero to get auto * assigned major number. May be forced as module parameter. */ static int ttymajor = 0; /* * Array of user specified options for ISA adapters. */ static int debug_level = 0; static int maxframe[MAX_DEVICES] = {0,}; module_param(break_on_load, bool, 0); module_param(ttymajor, int, 0); module_param(debug_level, int, 0); module_param_array(maxframe, int, NULL, 0); static char *driver_name = "SyncLink MultiPort driver"; static char *driver_version = "$Revision: 4.38 $"; static int synclinkmp_init_one(struct pci_dev *dev,const struct pci_device_id *ent); static void synclinkmp_remove_one(struct pci_dev *dev); static struct pci_device_id synclinkmp_pci_tbl[] = { { PCI_VENDOR_ID_MICROGATE, PCI_DEVICE_ID_MICROGATE_SCA, PCI_ANY_ID, PCI_ANY_ID, }, { 0, }, /* terminate list */ }; MODULE_DEVICE_TABLE(pci, synclinkmp_pci_tbl); MODULE_LICENSE("GPL"); static struct pci_driver synclinkmp_pci_driver = { .name = "synclinkmp", .id_table = synclinkmp_pci_tbl, .probe = synclinkmp_init_one, .remove = synclinkmp_remove_one, }; static struct tty_driver *serial_driver; /* number of characters left in xmit buffer before we ask for more */ #define WAKEUP_CHARS 256 /* tty callbacks */ static int open(struct tty_struct *tty, struct file * filp); static void close(struct tty_struct *tty, struct file * filp); static void hangup(struct tty_struct *tty); static void set_termios(struct tty_struct *tty, struct ktermios *old_termios); static int write(struct tty_struct *tty, const unsigned char *buf, int count); static int put_char(struct tty_struct *tty, unsigned char ch); static void send_xchar(struct tty_struct *tty, char ch); static void wait_until_sent(struct tty_struct *tty, int timeout); static int write_room(struct tty_struct *tty); static void flush_chars(struct tty_struct *tty); static void flush_buffer(struct tty_struct *tty); static void tx_hold(struct tty_struct *tty); static void tx_release(struct tty_struct *tty); static int ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg); static int chars_in_buffer(struct tty_struct *tty); static void throttle(struct tty_struct * tty); static void unthrottle(struct tty_struct * tty); static int set_break(struct tty_struct *tty, int break_state); #if SYNCLINK_GENERIC_HDLC #define dev_to_port(D) (dev_to_hdlc(D)->priv) static void hdlcdev_tx_done(SLMP_INFO *info); static void hdlcdev_rx(SLMP_INFO *info, char *buf, int size); static int hdlcdev_init(SLMP_INFO *info); static void hdlcdev_exit(SLMP_INFO *info); #endif /* ioctl handlers */ static int get_stats(SLMP_INFO *info, struct mgsl_icount __user *user_icount); static int get_params(SLMP_INFO *info, MGSL_PARAMS __user *params); static int set_params(SLMP_INFO *info, MGSL_PARAMS __user *params); static int get_txidle(SLMP_INFO *info, int __user *idle_mode); static int set_txidle(SLMP_INFO *info, int idle_mode); static int tx_enable(SLMP_INFO *info, int enable); static int tx_abort(SLMP_INFO *info); static int rx_enable(SLMP_INFO *info, int enable); static int modem_input_wait(SLMP_INFO *info,int arg); static int wait_mgsl_event(SLMP_INFO *info, int __user *mask_ptr); static int tiocmget(struct tty_struct *tty); static int tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear); static int set_break(struct tty_struct *tty, int break_state); static void add_device(SLMP_INFO *info); static void device_init(int adapter_num, struct pci_dev *pdev); static int claim_resources(SLMP_INFO *info); static void release_resources(SLMP_INFO *info); static int startup(SLMP_INFO *info); static int block_til_ready(struct tty_struct *tty, struct file * filp,SLMP_INFO *info); static int carrier_raised(struct tty_port *port); static void shutdown(SLMP_INFO *info); static void program_hw(SLMP_INFO *info); static void change_params(SLMP_INFO *info); static bool init_adapter(SLMP_INFO *info); static bool register_test(SLMP_INFO *info); static bool irq_test(SLMP_INFO *info); static bool loopback_test(SLMP_INFO *info); static int adapter_test(SLMP_INFO *info); static bool memory_test(SLMP_INFO *info); static void reset_adapter(SLMP_INFO *info); static void reset_port(SLMP_INFO *info); static void async_mode(SLMP_INFO *info); static void hdlc_mode(SLMP_INFO *info); static void rx_stop(SLMP_INFO *info); static void rx_start(SLMP_INFO *info); static void rx_reset_buffers(SLMP_INFO *info); static void rx_free_frame_buffers(SLMP_INFO *info, unsigned int first, unsigned int last); static bool rx_get_frame(SLMP_INFO *info); static void tx_start(SLMP_INFO *info); static void tx_stop(SLMP_INFO *info); static void tx_load_fifo(SLMP_INFO *info); static void tx_set_idle(SLMP_INFO *info); static void tx_load_dma_buffer(SLMP_INFO *info, const char *buf, unsigned int count); static void get_signals(SLMP_INFO *info); static void set_signals(SLMP_INFO *info); static void enable_loopback(SLMP_INFO *info, int enable); static void set_rate(SLMP_INFO *info, u32 data_rate); static int bh_action(SLMP_INFO *info); static void bh_handler(struct work_struct *work); static void bh_receive(SLMP_INFO *info); static void bh_transmit(SLMP_INFO *info); static void bh_status(SLMP_INFO *info); static void isr_timer(SLMP_INFO *info); static void isr_rxint(SLMP_INFO *info); static void isr_rxrdy(SLMP_INFO *info); static void isr_txint(SLMP_INFO *info); static void isr_txrdy(SLMP_INFO *info); static void isr_rxdmaok(SLMP_INFO *info); static void isr_rxdmaerror(SLMP_INFO *info); static void isr_txdmaok(SLMP_INFO *info); static void isr_txdmaerror(SLMP_INFO *info); static void isr_io_pin(SLMP_INFO *info, u16 status); static int alloc_dma_bufs(SLMP_INFO *info); static void free_dma_bufs(SLMP_INFO *info); static int alloc_buf_list(SLMP_INFO *info); static int alloc_frame_bufs(SLMP_INFO *info, SCADESC *list, SCADESC_EX *list_ex,int count); static int alloc_tmp_rx_buf(SLMP_INFO *info); static void free_tmp_rx_buf(SLMP_INFO *info); static void load_pci_memory(SLMP_INFO *info, char* dest, const char* src, unsigned short count); static void trace_block(SLMP_INFO *info, const char* data, int count, int xmit); static void tx_timeout(unsigned long context); static void status_timeout(unsigned long context); static unsigned char read_reg(SLMP_INFO *info, unsigned char addr); static void write_reg(SLMP_INFO *info, unsigned char addr, unsigned char val); static u16 read_reg16(SLMP_INFO *info, unsigned char addr); static void write_reg16(SLMP_INFO *info, unsigned char addr, u16 val); static unsigned char read_status_reg(SLMP_INFO * info); static void write_control_reg(SLMP_INFO * info); static unsigned char rx_active_fifo_level = 16; // rx request FIFO activation level in bytes static unsigned char tx_active_fifo_level = 16; // tx request FIFO activation level in bytes static unsigned char tx_negate_fifo_level = 32; // tx request FIFO negation level in bytes static u32 misc_ctrl_value = 0x007e4040; static u32 lcr1_brdr_value = 0x00800028; static u32 read_ahead_count = 8; /* DPCR, DMA Priority Control * * 07..05 Not used, must be 0 * 04 BRC, bus release condition: 0=all transfers complete * 1=release after 1 xfer on all channels * 03 CCC, channel change condition: 0=every cycle * 1=after each channel completes all xfers * 02..00 PR<2..0>, priority 100=round robin * * 00000100 = 0x00 */ static unsigned char dma_priority = 0x04; // Number of bytes that can be written to shared RAM // in a single write operation static u32 sca_pci_load_interval = 64; /* * 1st function defined in .text section. Calling this function in * init_module() followed by a breakpoint allows a remote debugger * (gdb) to get the .text address for the add-symbol-file command. * This allows remote debugging of dynamically loadable modules. */ static void* synclinkmp_get_text_ptr(void); static void* synclinkmp_get_text_ptr(void) {return synclinkmp_get_text_ptr;} static inline int sanity_check(SLMP_INFO *info, char *name, const char *routine) { #ifdef SANITY_CHECK static const char *badmagic = "Warning: bad magic number for synclinkmp_struct (%s) in %s\n"; static const char *badinfo = "Warning: null synclinkmp_struct for (%s) in %s\n"; if (!info) { printk(badinfo, name, routine); return 1; } if (info->magic != MGSL_MAGIC) { printk(badmagic, name, routine); return 1; } #else if (!info) return 1; #endif return 0; } /** * line discipline callback wrappers * * The wrappers maintain line discipline references * while calling into the line discipline. * * ldisc_receive_buf - pass receive data to line discipline */ static void ldisc_receive_buf(struct tty_struct *tty, const __u8 *data, char *flags, int count) { struct tty_ldisc *ld; if (!tty) return; ld = tty_ldisc_ref(tty); if (ld) { if (ld->ops->receive_buf) ld->ops->receive_buf(tty, data, flags, count); tty_ldisc_deref(ld); } } /* tty callbacks */ static int install(struct tty_driver *driver, struct tty_struct *tty) { SLMP_INFO *info; int line = tty->index; if (line >= synclinkmp_device_count) { printk("%s(%d): open with invalid line #%d.\n", __FILE__,__LINE__,line); return -ENODEV; } info = synclinkmp_device_list; while (info && info->line != line) info = info->next_device; if (sanity_check(info, tty->name, "open")) return -ENODEV; if (info->init_error) { printk("%s(%d):%s device is not allocated, init error=%d\n", __FILE__, __LINE__, info->device_name, info->init_error); return -ENODEV; } tty->driver_data = info; return tty_port_install(&info->port, driver, tty); } /* Called when a port is opened. Init and enable port. */ static int open(struct tty_struct *tty, struct file *filp) { SLMP_INFO *info = tty->driver_data; unsigned long flags; int retval; info->port.tty = tty; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):%s open(), old ref count = %d\n", __FILE__,__LINE__,tty->driver->name, info->port.count); /* If port is closing, signal caller to try again */ if (info->port.flags & ASYNC_CLOSING){ wait_event_interruptible_tty(tty, info->port.close_wait, !(info->port.flags & ASYNC_CLOSING)); retval = ((info->port.flags & ASYNC_HUP_NOTIFY) ? -EAGAIN : -ERESTARTSYS); goto cleanup; } info->port.low_latency = (info->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0; spin_lock_irqsave(&info->netlock, flags); if (info->netcount) { retval = -EBUSY; spin_unlock_irqrestore(&info->netlock, flags); goto cleanup; } info->port.count++; spin_unlock_irqrestore(&info->netlock, flags); if (info->port.count == 1) { /* 1st open on this device, init hardware */ retval = startup(info); if (retval < 0) goto cleanup; } retval = block_til_ready(tty, filp, info); if (retval) { if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):%s block_til_ready() returned %d\n", __FILE__,__LINE__, info->device_name, retval); goto cleanup; } if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):%s open() success\n", __FILE__,__LINE__, info->device_name); retval = 0; cleanup: if (retval) { if (tty->count == 1) info->port.tty = NULL; /* tty layer will release tty struct */ if(info->port.count) info->port.count--; } return retval; } /* Called when port is closed. Wait for remaining data to be * sent. Disable port and free resources. */ static void close(struct tty_struct *tty, struct file *filp) { SLMP_INFO * info = tty->driver_data; if (sanity_check(info, tty->name, "close")) return; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):%s close() entry, count=%d\n", __FILE__,__LINE__, info->device_name, info->port.count); if (tty_port_close_start(&info->port, tty, filp) == 0) goto cleanup; mutex_lock(&info->port.mutex); if (info->port.flags & ASYNC_INITIALIZED) wait_until_sent(tty, info->timeout); flush_buffer(tty); tty_ldisc_flush(tty); shutdown(info); mutex_unlock(&info->port.mutex); tty_port_close_end(&info->port, tty); info->port.tty = NULL; cleanup: if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):%s close() exit, count=%d\n", __FILE__,__LINE__, tty->driver->name, info->port.count); } /* Called by tty_hangup() when a hangup is signaled. * This is the same as closing all open descriptors for the port. */ static void hangup(struct tty_struct *tty) { SLMP_INFO *info = tty->driver_data; unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):%s hangup()\n", __FILE__,__LINE__, info->device_name ); if (sanity_check(info, tty->name, "hangup")) return; mutex_lock(&info->port.mutex); flush_buffer(tty); shutdown(info); spin_lock_irqsave(&info->port.lock, flags); info->port.count = 0; info->port.flags &= ~ASYNC_NORMAL_ACTIVE; info->port.tty = NULL; spin_unlock_irqrestore(&info->port.lock, flags); mutex_unlock(&info->port.mutex); wake_up_interruptible(&info->port.open_wait); } /* Set new termios settings */ static void set_termios(struct tty_struct *tty, struct ktermios *old_termios) { SLMP_INFO *info = tty->driver_data; unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):%s set_termios()\n", __FILE__,__LINE__, tty->driver->name ); change_params(info); /* Handle transition to B0 status */ if (old_termios->c_cflag & CBAUD && !(tty->termios.c_cflag & CBAUD)) { info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR); spin_lock_irqsave(&info->lock,flags); set_signals(info); spin_unlock_irqrestore(&info->lock,flags); } /* Handle transition away from B0 status */ if (!(old_termios->c_cflag & CBAUD) && tty->termios.c_cflag & CBAUD) { info->serial_signals |= SerialSignal_DTR; if (!(tty->termios.c_cflag & CRTSCTS) || !test_bit(TTY_THROTTLED, &tty->flags)) { info->serial_signals |= SerialSignal_RTS; } spin_lock_irqsave(&info->lock,flags); set_signals(info); spin_unlock_irqrestore(&info->lock,flags); } /* Handle turning off CRTSCTS */ if (old_termios->c_cflag & CRTSCTS && !(tty->termios.c_cflag & CRTSCTS)) { tty->hw_stopped = 0; tx_release(tty); } } /* Send a block of data * * Arguments: * * tty pointer to tty information structure * buf pointer to buffer containing send data * count size of send data in bytes * * Return Value: number of characters written */ static int write(struct tty_struct *tty, const unsigned char *buf, int count) { int c, ret = 0; SLMP_INFO *info = tty->driver_data; unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):%s write() count=%d\n", __FILE__,__LINE__,info->device_name,count); if (sanity_check(info, tty->name, "write")) goto cleanup; if (!info->tx_buf) goto cleanup; if (info->params.mode == MGSL_MODE_HDLC) { if (count > info->max_frame_size) { ret = -EIO; goto cleanup; } if (info->tx_active) goto cleanup; if (info->tx_count) { /* send accumulated data from send_char() calls */ /* as frame and wait before accepting more data. */ tx_load_dma_buffer(info, info->tx_buf, info->tx_count); goto start; } ret = info->tx_count = count; tx_load_dma_buffer(info, buf, count); goto start; } for (;;) { c = min_t(int, count, min(info->max_frame_size - info->tx_count - 1, info->max_frame_size - info->tx_put)); if (c <= 0) break; memcpy(info->tx_buf + info->tx_put, buf, c); spin_lock_irqsave(&info->lock,flags); info->tx_put += c; if (info->tx_put >= info->max_frame_size) info->tx_put -= info->max_frame_size; info->tx_count += c; spin_unlock_irqrestore(&info->lock,flags); buf += c; count -= c; ret += c; } if (info->params.mode == MGSL_MODE_HDLC) { if (count) { ret = info->tx_count = 0; goto cleanup; } tx_load_dma_buffer(info, info->tx_buf, info->tx_count); } start: if (info->tx_count && !tty->stopped && !tty->hw_stopped) { spin_lock_irqsave(&info->lock,flags); if (!info->tx_active) tx_start(info); spin_unlock_irqrestore(&info->lock,flags); } cleanup: if (debug_level >= DEBUG_LEVEL_INFO) printk( "%s(%d):%s write() returning=%d\n", __FILE__,__LINE__,info->device_name,ret); return ret; } /* Add a character to the transmit buffer. */ static int put_char(struct tty_struct *tty, unsigned char ch) { SLMP_INFO *info = tty->driver_data; unsigned long flags; int ret = 0; if ( debug_level >= DEBUG_LEVEL_INFO ) { printk( "%s(%d):%s put_char(%d)\n", __FILE__,__LINE__,info->device_name,ch); } if (sanity_check(info, tty->name, "put_char")) return 0; if (!info->tx_buf) return 0; spin_lock_irqsave(&info->lock,flags); if ( (info->params.mode != MGSL_MODE_HDLC) || !info->tx_active ) { if (info->tx_count < info->max_frame_size - 1) { info->tx_buf[info->tx_put++] = ch; if (info->tx_put >= info->max_frame_size) info->tx_put -= info->max_frame_size; info->tx_count++; ret = 1; } } spin_unlock_irqrestore(&info->lock,flags); return ret; } /* Send a high-priority XON/XOFF character */ static void send_xchar(struct tty_struct *tty, char ch) { SLMP_INFO *info = tty->driver_data; unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):%s send_xchar(%d)\n", __FILE__,__LINE__, info->device_name, ch ); if (sanity_check(info, tty->name, "send_xchar")) return; info->x_char = ch; if (ch) { /* Make sure transmit interrupts are on */ spin_lock_irqsave(&info->lock,flags); if (!info->tx_enabled) tx_start(info); spin_unlock_irqrestore(&info->lock,flags); } } /* Wait until the transmitter is empty. */ static void wait_until_sent(struct tty_struct *tty, int timeout) { SLMP_INFO * info = tty->driver_data; unsigned long orig_jiffies, char_time; if (!info ) return; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):%s wait_until_sent() entry\n", __FILE__,__LINE__, info->device_name ); if (sanity_check(info, tty->name, "wait_until_sent")) return; if (!test_bit(ASYNCB_INITIALIZED, &info->port.flags)) goto exit; orig_jiffies = jiffies; /* Set check interval to 1/5 of estimated time to * send a character, and make it at least 1. The check * interval should also be less than the timeout. * Note: use tight timings here to satisfy the NIST-PCTS. */ if ( info->params.data_rate ) { char_time = info->timeout/(32 * 5); if (!char_time) char_time++; } else char_time = 1; if (timeout) char_time = min_t(unsigned long, char_time, timeout); if ( info->params.mode == MGSL_MODE_HDLC ) { while (info->tx_active) { msleep_interruptible(jiffies_to_msecs(char_time)); if (signal_pending(current)) break; if (timeout && time_after(jiffies, orig_jiffies + timeout)) break; } } else { /* * TODO: determine if there is something similar to USC16C32 * TXSTATUS_ALL_SENT status */ while ( info->tx_active && info->tx_enabled) { msleep_interruptible(jiffies_to_msecs(char_time)); if (signal_pending(current)) break; if (timeout && time_after(jiffies, orig_jiffies + timeout)) break; } } exit: if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):%s wait_until_sent() exit\n", __FILE__,__LINE__, info->device_name ); } /* Return the count of free bytes in transmit buffer */ static int write_room(struct tty_struct *tty) { SLMP_INFO *info = tty->driver_data; int ret; if (sanity_check(info, tty->name, "write_room")) return 0; if (info->params.mode == MGSL_MODE_HDLC) { ret = (info->tx_active) ? 0 : HDLC_MAX_FRAME_SIZE; } else { ret = info->max_frame_size - info->tx_count - 1; if (ret < 0) ret = 0; } if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):%s write_room()=%d\n", __FILE__, __LINE__, info->device_name, ret); return ret; } /* enable transmitter and send remaining buffered characters */ static void flush_chars(struct tty_struct *tty) { SLMP_INFO *info = tty->driver_data; unsigned long flags; if ( debug_level >= DEBUG_LEVEL_INFO ) printk( "%s(%d):%s flush_chars() entry tx_count=%d\n", __FILE__,__LINE__,info->device_name,info->tx_count); if (sanity_check(info, tty->name, "flush_chars")) return; if (info->tx_count <= 0 || tty->stopped || tty->hw_stopped || !info->tx_buf) return; if ( debug_level >= DEBUG_LEVEL_INFO ) printk( "%s(%d):%s flush_chars() entry, starting transmitter\n", __FILE__,__LINE__,info->device_name ); spin_lock_irqsave(&info->lock,flags); if (!info->tx_active) { if ( (info->params.mode == MGSL_MODE_HDLC) && info->tx_count ) { /* operating in synchronous (frame oriented) mode */ /* copy data from circular tx_buf to */ /* transmit DMA buffer. */ tx_load_dma_buffer(info, info->tx_buf,info->tx_count); } tx_start(info); } spin_unlock_irqrestore(&info->lock,flags); } /* Discard all data in the send buffer */ static void flush_buffer(struct tty_struct *tty) { SLMP_INFO *info = tty->driver_data; unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):%s flush_buffer() entry\n", __FILE__,__LINE__, info->device_name ); if (sanity_check(info, tty->name, "flush_buffer")) return; spin_lock_irqsave(&info->lock,flags); info->tx_count = info->tx_put = info->tx_get = 0; del_timer(&info->tx_timer); spin_unlock_irqrestore(&info->lock,flags); tty_wakeup(tty); } /* throttle (stop) transmitter */ static void tx_hold(struct tty_struct *tty) { SLMP_INFO *info = tty->driver_data; unsigned long flags; if (sanity_check(info, tty->name, "tx_hold")) return; if ( debug_level >= DEBUG_LEVEL_INFO ) printk("%s(%d):%s tx_hold()\n", __FILE__,__LINE__,info->device_name); spin_lock_irqsave(&info->lock,flags); if (info->tx_enabled) tx_stop(info); spin_unlock_irqrestore(&info->lock,flags); } /* release (start) transmitter */ static void tx_release(struct tty_struct *tty) { SLMP_INFO *info = tty->driver_data; unsigned long flags; if (sanity_check(info, tty->name, "tx_release")) return; if ( debug_level >= DEBUG_LEVEL_INFO ) printk("%s(%d):%s tx_release()\n", __FILE__,__LINE__,info->device_name); spin_lock_irqsave(&info->lock,flags); if (!info->tx_enabled) tx_start(info); spin_unlock_irqrestore(&info->lock,flags); } /* Service an IOCTL request * * Arguments: * * tty pointer to tty instance data * cmd IOCTL command code * arg command argument/context * * Return Value: 0 if success, otherwise error code */ static int ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { SLMP_INFO *info = tty->driver_data; void __user *argp = (void __user *)arg; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):%s ioctl() cmd=%08X\n", __FILE__,__LINE__, info->device_name, cmd ); if (sanity_check(info, tty->name, "ioctl")) return -ENODEV; if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) && (cmd != TIOCMIWAIT)) { if (tty->flags & (1 << TTY_IO_ERROR)) return -EIO; } switch (cmd) { case MGSL_IOCGPARAMS: return get_params(info, argp); case MGSL_IOCSPARAMS: return set_params(info, argp); case MGSL_IOCGTXIDLE: return get_txidle(info, argp); case MGSL_IOCSTXIDLE: return set_txidle(info, (int)arg); case MGSL_IOCTXENABLE: return tx_enable(info, (int)arg); case MGSL_IOCRXENABLE: return rx_enable(info, (int)arg); case MGSL_IOCTXABORT: return tx_abort(info); case MGSL_IOCGSTATS: return get_stats(info, argp); case MGSL_IOCWAITEVENT: return wait_mgsl_event(info, argp); case MGSL_IOCLOOPTXDONE: return 0; // TODO: Not supported, need to document /* Wait for modem input (DCD,RI,DSR,CTS) change * as specified by mask in arg (TIOCM_RNG/DSR/CD/CTS) */ case TIOCMIWAIT: return modem_input_wait(info,(int)arg); /* * Get counter of input serial line interrupts (DCD,RI,DSR,CTS) * Return: write counters to the user passed counter struct * NB: both 1->0 and 0->1 transitions are counted except for * RI where only 0->1 is counted. */ default: return -ENOIOCTLCMD; } return 0; } static int get_icount(struct tty_struct *tty, struct serial_icounter_struct *icount) { SLMP_INFO *info = tty->driver_data; struct mgsl_icount cnow; /* kernel counter temps */ unsigned long flags; spin_lock_irqsave(&info->lock,flags); cnow = info->icount; spin_unlock_irqrestore(&info->lock,flags); icount->cts = cnow.cts; icount->dsr = cnow.dsr; icount->rng = cnow.rng; icount->dcd = cnow.dcd; icount->rx = cnow.rx; icount->tx = cnow.tx; icount->frame = cnow.frame; icount->overrun = cnow.overrun; icount->parity = cnow.parity; icount->brk = cnow.brk; icount->buf_overrun = cnow.buf_overrun; return 0; } /* * /proc fs routines.... */ static inline void line_info(struct seq_file *m, SLMP_INFO *info) { char stat_buf[30]; unsigned long flags; seq_printf(m, "%s: SCABase=%08x Mem=%08X StatusControl=%08x LCR=%08X\n" "\tIRQ=%d MaxFrameSize=%u\n", info->device_name, info->phys_sca_base, info->phys_memory_base, info->phys_statctrl_base, info->phys_lcr_base, info->irq_level, info->max_frame_size ); /* output current serial signal states */ spin_lock_irqsave(&info->lock,flags); get_signals(info); spin_unlock_irqrestore(&info->lock,flags); stat_buf[0] = 0; stat_buf[1] = 0; if (info->serial_signals & SerialSignal_RTS) strcat(stat_buf, "|RTS"); if (info->serial_signals & SerialSignal_CTS) strcat(stat_buf, "|CTS"); if (info->serial_signals & SerialSignal_DTR) strcat(stat_buf, "|DTR"); if (info->serial_signals & SerialSignal_DSR) strcat(stat_buf, "|DSR"); if (info->serial_signals & SerialSignal_DCD) strcat(stat_buf, "|CD"); if (info->serial_signals & SerialSignal_RI) strcat(stat_buf, "|RI"); if (info->params.mode == MGSL_MODE_HDLC) { seq_printf(m, "\tHDLC txok:%d rxok:%d", info->icount.txok, info->icount.rxok); if (info->icount.txunder) seq_printf(m, " txunder:%d", info->icount.txunder); if (info->icount.txabort) seq_printf(m, " txabort:%d", info->icount.txabort); if (info->icount.rxshort) seq_printf(m, " rxshort:%d", info->icount.rxshort); if (info->icount.rxlong) seq_printf(m, " rxlong:%d", info->icount.rxlong); if (info->icount.rxover) seq_printf(m, " rxover:%d", info->icount.rxover); if (info->icount.rxcrc) seq_printf(m, " rxlong:%d", info->icount.rxcrc); } else { seq_printf(m, "\tASYNC tx:%d rx:%d", info->icount.tx, info->icount.rx); if (info->icount.frame) seq_printf(m, " fe:%d", info->icount.frame); if (info->icount.parity) seq_printf(m, " pe:%d", info->icount.parity); if (info->icount.brk) seq_printf(m, " brk:%d", info->icount.brk); if (info->icount.overrun) seq_printf(m, " oe:%d", info->icount.overrun); } /* Append serial signal status to end */ seq_printf(m, " %s\n", stat_buf+1); seq_printf(m, "\ttxactive=%d bh_req=%d bh_run=%d pending_bh=%x\n", info->tx_active,info->bh_requested,info->bh_running, info->pending_bh); } /* Called to print information about devices */ static int synclinkmp_proc_show(struct seq_file *m, void *v) { SLMP_INFO *info; seq_printf(m, "synclinkmp driver:%s\n", driver_version); info = synclinkmp_device_list; while( info ) { line_info(m, info); info = info->next_device; } return 0; } static int synclinkmp_proc_open(struct inode *inode, struct file *file) { return single_open(file, synclinkmp_proc_show, NULL); } static const struct file_operations synclinkmp_proc_fops = { .owner = THIS_MODULE, .open = synclinkmp_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; /* Return the count of bytes in transmit buffer */ static int chars_in_buffer(struct tty_struct *tty) { SLMP_INFO *info = tty->driver_data; if (sanity_check(info, tty->name, "chars_in_buffer")) return 0; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):%s chars_in_buffer()=%d\n", __FILE__, __LINE__, info->device_name, info->tx_count); return info->tx_count; } /* Signal remote device to throttle send data (our receive data) */ static void throttle(struct tty_struct * tty) { SLMP_INFO *info = tty->driver_data; unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):%s throttle() entry\n", __FILE__,__LINE__, info->device_name ); if (sanity_check(info, tty->name, "throttle")) return; if (I_IXOFF(tty)) send_xchar(tty, STOP_CHAR(tty)); if (tty->termios.c_cflag & CRTSCTS) { spin_lock_irqsave(&info->lock,flags); info->serial_signals &= ~SerialSignal_RTS; set_signals(info); spin_unlock_irqrestore(&info->lock,flags); } } /* Signal remote device to stop throttling send data (our receive data) */ static void unthrottle(struct tty_struct * tty) { SLMP_INFO *info = tty->driver_data; unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):%s unthrottle() entry\n", __FILE__,__LINE__, info->device_name ); if (sanity_check(info, tty->name, "unthrottle")) return; if (I_IXOFF(tty)) { if (info->x_char) info->x_char = 0; else send_xchar(tty, START_CHAR(tty)); } if (tty->termios.c_cflag & CRTSCTS) { spin_lock_irqsave(&info->lock,flags); info->serial_signals |= SerialSignal_RTS; set_signals(info); spin_unlock_irqrestore(&info->lock,flags); } } /* set or clear transmit break condition * break_state -1=set break condition, 0=clear */ static int set_break(struct tty_struct *tty, int break_state) { unsigned char RegValue; SLMP_INFO * info = tty->driver_data; unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):%s set_break(%d)\n", __FILE__,__LINE__, info->device_name, break_state); if (sanity_check(info, tty->name, "set_break")) return -EINVAL; spin_lock_irqsave(&info->lock,flags); RegValue = read_reg(info, CTL); if (break_state == -1) RegValue |= BIT3; else RegValue &= ~BIT3; write_reg(info, CTL, RegValue); spin_unlock_irqrestore(&info->lock,flags); return 0; } #if SYNCLINK_GENERIC_HDLC /** * called by generic HDLC layer when protocol selected (PPP, frame relay, etc.) * set encoding and frame check sequence (FCS) options * * dev pointer to network device structure * encoding serial encoding setting * parity FCS setting * * returns 0 if success, otherwise error code */ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding, unsigned short parity) { SLMP_INFO *info = dev_to_port(dev); unsigned char new_encoding; unsigned short new_crctype; /* return error if TTY interface open */ if (info->port.count) return -EBUSY; switch (encoding) { case ENCODING_NRZ: new_encoding = HDLC_ENCODING_NRZ; break; case ENCODING_NRZI: new_encoding = HDLC_ENCODING_NRZI_SPACE; break; case ENCODING_FM_MARK: new_encoding = HDLC_ENCODING_BIPHASE_MARK; break; case ENCODING_FM_SPACE: new_encoding = HDLC_ENCODING_BIPHASE_SPACE; break; case ENCODING_MANCHESTER: new_encoding = HDLC_ENCODING_BIPHASE_LEVEL; break; default: return -EINVAL; } switch (parity) { case PARITY_NONE: new_crctype = HDLC_CRC_NONE; break; case PARITY_CRC16_PR1_CCITT: new_crctype = HDLC_CRC_16_CCITT; break; case PARITY_CRC32_PR1_CCITT: new_crctype = HDLC_CRC_32_CCITT; break; default: return -EINVAL; } info->params.encoding = new_encoding; info->params.crc_type = new_crctype; /* if network interface up, reprogram hardware */ if (info->netcount) program_hw(info); return 0; } /** * called by generic HDLC layer to send frame * * skb socket buffer containing HDLC frame * dev pointer to network device structure */ static netdev_tx_t hdlcdev_xmit(struct sk_buff *skb, struct net_device *dev) { SLMP_INFO *info = dev_to_port(dev); unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk(KERN_INFO "%s:hdlc_xmit(%s)\n",__FILE__,dev->name); /* stop sending until this frame completes */ netif_stop_queue(dev); /* copy data to device buffers */ info->tx_count = skb->len; tx_load_dma_buffer(info, skb->data, skb->len); /* update network statistics */ dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; /* done with socket buffer, so free it */ dev_kfree_skb(skb); /* save start time for transmit timeout detection */ dev->trans_start = jiffies; /* start hardware transmitter if necessary */ spin_lock_irqsave(&info->lock,flags); if (!info->tx_active) tx_start(info); spin_unlock_irqrestore(&info->lock,flags); return NETDEV_TX_OK; } /** * called by network layer when interface enabled * claim resources and initialize hardware * * dev pointer to network device structure * * returns 0 if success, otherwise error code */ static int hdlcdev_open(struct net_device *dev) { SLMP_INFO *info = dev_to_port(dev); int rc; unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s:hdlcdev_open(%s)\n",__FILE__,dev->name); /* generic HDLC layer open processing */ if ((rc = hdlc_open(dev))) return rc; /* arbitrate between network and tty opens */ spin_lock_irqsave(&info->netlock, flags); if (info->port.count != 0 || info->netcount != 0) { printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name); spin_unlock_irqrestore(&info->netlock, flags); return -EBUSY; } info->netcount=1; spin_unlock_irqrestore(&info->netlock, flags); /* claim resources and init adapter */ if ((rc = startup(info)) != 0) { spin_lock_irqsave(&info->netlock, flags); info->netcount=0; spin_unlock_irqrestore(&info->netlock, flags); return rc; } /* assert RTS and DTR, apply hardware settings */ info->serial_signals |= SerialSignal_RTS | SerialSignal_DTR; program_hw(info); /* enable network layer transmit */ dev->trans_start = jiffies; netif_start_queue(dev); /* inform generic HDLC layer of current DCD status */ spin_lock_irqsave(&info->lock, flags); get_signals(info); spin_unlock_irqrestore(&info->lock, flags); if (info->serial_signals & SerialSignal_DCD) netif_carrier_on(dev); else netif_carrier_off(dev); return 0; } /** * called by network layer when interface is disabled * shutdown hardware and release resources * * dev pointer to network device structure * * returns 0 if success, otherwise error code */ static int hdlcdev_close(struct net_device *dev) { SLMP_INFO *info = dev_to_port(dev); unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s:hdlcdev_close(%s)\n",__FILE__,dev->name); netif_stop_queue(dev); /* shutdown adapter and release resources */ shutdown(info); hdlc_close(dev); spin_lock_irqsave(&info->netlock, flags); info->netcount=0; spin_unlock_irqrestore(&info->netlock, flags); return 0; } /** * called by network layer to process IOCTL call to network device * * dev pointer to network device structure * ifr pointer to network interface request structure * cmd IOCTL command code * * returns 0 if success, otherwise error code */ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { const size_t size = sizeof(sync_serial_settings); sync_serial_settings new_line; sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync; SLMP_INFO *info = dev_to_port(dev); unsigned int flags; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name); /* return error if TTY interface open */ if (info->port.count) return -EBUSY; if (cmd != SIOCWANDEV) return hdlc_ioctl(dev, ifr, cmd); switch(ifr->ifr_settings.type) { case IF_GET_IFACE: /* return current sync_serial_settings */ ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL; if (ifr->ifr_settings.size < size) { ifr->ifr_settings.size = size; /* data size wanted */ return -ENOBUFS; } flags = info->params.flags & (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL | HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN | HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL | HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN); memset(&new_line, 0, sizeof(new_line)); switch (flags){ case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN): new_line.clock_type = CLOCK_EXT; break; case (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_INT; break; case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_TXINT; break; case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN): new_line.clock_type = CLOCK_TXFROMRX; break; default: new_line.clock_type = CLOCK_DEFAULT; } new_line.clock_rate = info->params.clock_speed; new_line.loopback = info->params.loopback ? 1:0; if (copy_to_user(line, &new_line, size)) return -EFAULT; return 0; case IF_IFACE_SYNC_SERIAL: /* set sync_serial_settings */ if(!capable(CAP_NET_ADMIN)) return -EPERM; if (copy_from_user(&new_line, line, size)) return -EFAULT; switch (new_line.clock_type) { case CLOCK_EXT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN; break; case CLOCK_TXFROMRX: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN; break; case CLOCK_INT: flags = HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG; break; case CLOCK_TXINT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG; break; case CLOCK_DEFAULT: flags = info->params.flags & (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL | HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN | HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL | HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN); break; default: return -EINVAL; } if (new_line.loopback != 0 && new_line.loopback != 1) return -EINVAL; info->params.flags &= ~(HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL | HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN | HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL | HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN); info->params.flags |= flags; info->params.loopback = new_line.loopback; if (flags & (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG)) info->params.clock_speed = new_line.clock_rate; else info->params.clock_speed = 0; /* if network interface up, reprogram hardware */ if (info->netcount) program_hw(info); return 0; default: return hdlc_ioctl(dev, ifr, cmd); } } /** * called by network layer when transmit timeout is detected * * dev pointer to network device structure */ static void hdlcdev_tx_timeout(struct net_device *dev) { SLMP_INFO *info = dev_to_port(dev); unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk("hdlcdev_tx_timeout(%s)\n",dev->name); dev->stats.tx_errors++; dev->stats.tx_aborted_errors++; spin_lock_irqsave(&info->lock,flags); tx_stop(info); spin_unlock_irqrestore(&info->lock,flags); netif_wake_queue(dev); } /** * called by device driver when transmit completes * reenable network layer transmit if stopped * * info pointer to device instance information */ static void hdlcdev_tx_done(SLMP_INFO *info) { if (netif_queue_stopped(info->netdev)) netif_wake_queue(info->netdev); } /** * called by device driver when frame received * pass frame to network layer * * info pointer to device instance information * buf pointer to buffer contianing frame data * size count of data bytes in buf */ static void hdlcdev_rx(SLMP_INFO *info, char *buf, int size) { struct sk_buff *skb = dev_alloc_skb(size); struct net_device *dev = info->netdev; if (debug_level >= DEBUG_LEVEL_INFO) printk("hdlcdev_rx(%s)\n",dev->name); if (skb == NULL) { printk(KERN_NOTICE "%s: can't alloc skb, dropping packet\n", dev->name); dev->stats.rx_dropped++; return; } memcpy(skb_put(skb, size), buf, size); skb->protocol = hdlc_type_trans(skb, dev); dev->stats.rx_packets++; dev->stats.rx_bytes += size; netif_rx(skb); } static const struct net_device_ops hdlcdev_ops = { .ndo_open = hdlcdev_open, .ndo_stop = hdlcdev_close, .ndo_change_mtu = hdlc_change_mtu, .ndo_start_xmit = hdlc_start_xmit, .ndo_do_ioctl = hdlcdev_ioctl, .ndo_tx_timeout = hdlcdev_tx_timeout, }; /** * called by device driver when adding device instance * do generic HDLC initialization * * info pointer to device instance information * * returns 0 if success, otherwise error code */ static int hdlcdev_init(SLMP_INFO *info) { int rc; struct net_device *dev; hdlc_device *hdlc; /* allocate and initialize network and HDLC layer objects */ if (!(dev = alloc_hdlcdev(info))) { printk(KERN_ERR "%s:hdlc device allocation failure\n",__FILE__); return -ENOMEM; } /* for network layer reporting purposes only */ dev->mem_start = info->phys_sca_base; dev->mem_end = info->phys_sca_base + SCA_BASE_SIZE - 1; dev->irq = info->irq_level; /* network layer callbacks and settings */ dev->netdev_ops = &hdlcdev_ops; dev->watchdog_timeo = 10 * HZ; dev->tx_queue_len = 50; /* generic HDLC layer callbacks and settings */ hdlc = dev_to_hdlc(dev); hdlc->attach = hdlcdev_attach; hdlc->xmit = hdlcdev_xmit; /* register objects with HDLC layer */ if ((rc = register_hdlc_device(dev))) { printk(KERN_WARNING "%s:unable to register hdlc device\n",__FILE__); free_netdev(dev); return rc; } info->netdev = dev; return 0; } /** * called by device driver when removing device instance * do generic HDLC cleanup * * info pointer to device instance information */ static void hdlcdev_exit(SLMP_INFO *info) { unregister_hdlc_device(info->netdev); free_netdev(info->netdev); info->netdev = NULL; } #endif /* CONFIG_HDLC */ /* Return next bottom half action to perform. * Return Value: BH action code or 0 if nothing to do. */ static int bh_action(SLMP_INFO *info) { unsigned long flags; int rc = 0; spin_lock_irqsave(&info->lock,flags); if (info->pending_bh & BH_RECEIVE) { info->pending_bh &= ~BH_RECEIVE; rc = BH_RECEIVE; } else if (info->pending_bh & BH_TRANSMIT) { info->pending_bh &= ~BH_TRANSMIT; rc = BH_TRANSMIT; } else if (info->pending_bh & BH_STATUS) { info->pending_bh &= ~BH_STATUS; rc = BH_STATUS; } if (!rc) { /* Mark BH routine as complete */ info->bh_running = false; info->bh_requested = false; } spin_unlock_irqrestore(&info->lock,flags); return rc; } /* Perform bottom half processing of work items queued by ISR. */ static void bh_handler(struct work_struct *work) { SLMP_INFO *info = container_of(work, SLMP_INFO, task); int action; if ( debug_level >= DEBUG_LEVEL_BH ) printk( "%s(%d):%s bh_handler() entry\n", __FILE__,__LINE__,info->device_name); info->bh_running = true; while((action = bh_action(info)) != 0) { /* Process work item */ if ( debug_level >= DEBUG_LEVEL_BH ) printk( "%s(%d):%s bh_handler() work item action=%d\n", __FILE__,__LINE__,info->device_name, action); switch (action) { case BH_RECEIVE: bh_receive(info); break; case BH_TRANSMIT: bh_transmit(info); break; case BH_STATUS: bh_status(info); break; default: /* unknown work item ID */ printk("%s(%d):%s Unknown work item ID=%08X!\n", __FILE__,__LINE__,info->device_name,action); break; } } if ( debug_level >= DEBUG_LEVEL_BH ) printk( "%s(%d):%s bh_handler() exit\n", __FILE__,__LINE__,info->device_name); } static void bh_receive(SLMP_INFO *info) { if ( debug_level >= DEBUG_LEVEL_BH ) printk( "%s(%d):%s bh_receive()\n", __FILE__,__LINE__,info->device_name); while( rx_get_frame(info) ); } static void bh_transmit(SLMP_INFO *info) { struct tty_struct *tty = info->port.tty; if ( debug_level >= DEBUG_LEVEL_BH ) printk( "%s(%d):%s bh_transmit() entry\n", __FILE__,__LINE__,info->device_name); if (tty) tty_wakeup(tty); } static void bh_status(SLMP_INFO *info) { if ( debug_level >= DEBUG_LEVEL_BH ) printk( "%s(%d):%s bh_status() entry\n", __FILE__,__LINE__,info->device_name); info->ri_chkcount = 0; info->dsr_chkcount = 0; info->dcd_chkcount = 0; info->cts_chkcount = 0; } static void isr_timer(SLMP_INFO * info) { unsigned char timer = (info->port_num & 1) ? TIMER2 : TIMER0; /* IER2<7..4> = timer<3..0> interrupt enables (0=disabled) */ write_reg(info, IER2, 0); /* TMCS, Timer Control/Status Register * * 07 CMF, Compare match flag (read only) 1=match * 06 ECMI, CMF Interrupt Enable: 0=disabled * 05 Reserved, must be 0 * 04 TME, Timer Enable * 03..00 Reserved, must be 0 * * 0000 0000 */ write_reg(info, (unsigned char)(timer + TMCS), 0); info->irq_occurred = true; if ( debug_level >= DEBUG_LEVEL_ISR ) printk("%s(%d):%s isr_timer()\n", __FILE__,__LINE__,info->device_name); } static void isr_rxint(SLMP_INFO * info) { struct tty_struct *tty = info->port.tty; struct mgsl_icount *icount = &info->icount; unsigned char status = read_reg(info, SR1) & info->ie1_value & (FLGD + IDLD + CDCD + BRKD); unsigned char status2 = read_reg(info, SR2) & info->ie2_value & OVRN; /* clear status bits */ if (status) write_reg(info, SR1, status); if (status2) write_reg(info, SR2, status2); if ( debug_level >= DEBUG_LEVEL_ISR ) printk("%s(%d):%s isr_rxint status=%02X %02x\n", __FILE__,__LINE__,info->device_name,status,status2); if (info->params.mode == MGSL_MODE_ASYNC) { if (status & BRKD) { icount->brk++; /* process break detection if tty control * is not set to ignore it */ if (!(status & info->ignore_status_mask1)) { if (info->read_status_mask1 & BRKD) { tty_insert_flip_char(&info->port, 0, TTY_BREAK); if (tty && (info->port.flags & ASYNC_SAK)) do_SAK(tty); } } } } else { if (status & (FLGD|IDLD)) { if (status & FLGD) info->icount.exithunt++; else if (status & IDLD) info->icount.rxidle++; wake_up_interruptible(&info->event_wait_q); } } if (status & CDCD) { /* simulate a common modem status change interrupt * for our handler */ get_signals( info ); isr_io_pin(info, MISCSTATUS_DCD_LATCHED|(info->serial_signals&SerialSignal_DCD)); } } /* * handle async rx data interrupts */ static void isr_rxrdy(SLMP_INFO * info) { u16 status; unsigned char DataByte; struct mgsl_icount *icount = &info->icount; if ( debug_level >= DEBUG_LEVEL_ISR ) printk("%s(%d):%s isr_rxrdy\n", __FILE__,__LINE__,info->device_name); while((status = read_reg(info,CST0)) & BIT0) { int flag = 0; bool over = false; DataByte = read_reg(info,TRB); icount->rx++; if ( status & (PE + FRME + OVRN) ) { printk("%s(%d):%s rxerr=%04X\n", __FILE__,__LINE__,info->device_name,status); /* update error statistics */ if (status & PE) icount->parity++; else if (status & FRME) icount->frame++; else if (status & OVRN) icount->overrun++; /* discard char if tty control flags say so */ if (status & info->ignore_status_mask2) continue; status &= info->read_status_mask2; if (status & PE) flag = TTY_PARITY; else if (status & FRME) flag = TTY_FRAME; if (status & OVRN) { /* Overrun is special, since it's * reported immediately, and doesn't * affect the current character */ over = true; } } /* end of if (error) */ tty_insert_flip_char(&info->port, DataByte, flag); if (over) tty_insert_flip_char(&info->port, 0, TTY_OVERRUN); } if ( debug_level >= DEBUG_LEVEL_ISR ) { printk("%s(%d):%s rx=%d brk=%d parity=%d frame=%d overrun=%d\n", __FILE__,__LINE__,info->device_name, icount->rx,icount->brk,icount->parity, icount->frame,icount->overrun); } tty_flip_buffer_push(&info->port); } static void isr_txeom(SLMP_INFO * info, unsigned char status) { if ( debug_level >= DEBUG_LEVEL_ISR ) printk("%s(%d):%s isr_txeom status=%02x\n", __FILE__,__LINE__,info->device_name,status); write_reg(info, TXDMA + DIR, 0x00); /* disable Tx DMA IRQs */ write_reg(info, TXDMA + DSR, 0xc0); /* clear IRQs and disable DMA */ write_reg(info, TXDMA + DCMD, SWABORT); /* reset/init DMA channel */ if (status & UDRN) { write_reg(info, CMD, TXRESET); write_reg(info, CMD, TXENABLE); } else write_reg(info, CMD, TXBUFCLR); /* disable and clear tx interrupts */ info->ie0_value &= ~TXRDYE; info->ie1_value &= ~(IDLE + UDRN); write_reg16(info, IE0, (unsigned short)((info->ie1_value << 8) + info->ie0_value)); write_reg(info, SR1, (unsigned char)(UDRN + IDLE)); if ( info->tx_active ) { if (info->params.mode != MGSL_MODE_ASYNC) { if (status & UDRN) info->icount.txunder++; else if (status & IDLE) info->icount.txok++; } info->tx_active = false; info->tx_count = info->tx_put = info->tx_get = 0; del_timer(&info->tx_timer); if (info->params.mode != MGSL_MODE_ASYNC && info->drop_rts_on_tx_done ) { info->serial_signals &= ~SerialSignal_RTS; info->drop_rts_on_tx_done = false; set_signals(info); } #if SYNCLINK_GENERIC_HDLC if (info->netcount) hdlcdev_tx_done(info); else #endif { if (info->port.tty && (info->port.tty->stopped || info->port.tty->hw_stopped)) { tx_stop(info); return; } info->pending_bh |= BH_TRANSMIT; } } } /* * handle tx status interrupts */ static void isr_txint(SLMP_INFO * info) { unsigned char status = read_reg(info, SR1) & info->ie1_value & (UDRN + IDLE + CCTS); /* clear status bits */ write_reg(info, SR1, status); if ( debug_level >= DEBUG_LEVEL_ISR ) printk("%s(%d):%s isr_txint status=%02x\n", __FILE__,__LINE__,info->device_name,status); if (status & (UDRN + IDLE)) isr_txeom(info, status); if (status & CCTS) { /* simulate a common modem status change interrupt * for our handler */ get_signals( info ); isr_io_pin(info, MISCSTATUS_CTS_LATCHED|(info->serial_signals&SerialSignal_CTS)); } } /* * handle async tx data interrupts */ static void isr_txrdy(SLMP_INFO * info) { if ( debug_level >= DEBUG_LEVEL_ISR ) printk("%s(%d):%s isr_txrdy() tx_count=%d\n", __FILE__,__LINE__,info->device_name,info->tx_count); if (info->params.mode != MGSL_MODE_ASYNC) { /* disable TXRDY IRQ, enable IDLE IRQ */ info->ie0_value &= ~TXRDYE; info->ie1_value |= IDLE; write_reg16(info, IE0, (unsigned short)((info->ie1_value << 8) + info->ie0_value)); return; } if (info->port.tty && (info->port.tty->stopped || info->port.tty->hw_stopped)) { tx_stop(info); return; } if ( info->tx_count ) tx_load_fifo( info ); else { info->tx_active = false; info->ie0_value &= ~TXRDYE; write_reg(info, IE0, info->ie0_value); } if (info->tx_count < WAKEUP_CHARS) info->pending_bh |= BH_TRANSMIT; } static void isr_rxdmaok(SLMP_INFO * info) { /* BIT7 = EOT (end of transfer) * BIT6 = EOM (end of message/frame) */ unsigned char status = read_reg(info,RXDMA + DSR) & 0xc0; /* clear IRQ (BIT0 must be 1 to prevent clearing DE bit) */ write_reg(info, RXDMA + DSR, (unsigned char)(status | 1)); if ( debug_level >= DEBUG_LEVEL_ISR ) printk("%s(%d):%s isr_rxdmaok(), status=%02x\n", __FILE__,__LINE__,info->device_name,status); info->pending_bh |= BH_RECEIVE; } static void isr_rxdmaerror(SLMP_INFO * info) { /* BIT5 = BOF (buffer overflow) * BIT4 = COF (counter overflow) */ unsigned char status = read_reg(info,RXDMA + DSR) & 0x30; /* clear IRQ (BIT0 must be 1 to prevent clearing DE bit) */ write_reg(info, RXDMA + DSR, (unsigned char)(status | 1)); if ( debug_level >= DEBUG_LEVEL_ISR ) printk("%s(%d):%s isr_rxdmaerror(), status=%02x\n", __FILE__,__LINE__,info->device_name,status); info->rx_overflow = true; info->pending_bh |= BH_RECEIVE; } static void isr_txdmaok(SLMP_INFO * info) { unsigned char status_reg1 = read_reg(info, SR1); write_reg(info, TXDMA + DIR, 0x00); /* disable Tx DMA IRQs */ write_reg(info, TXDMA + DSR, 0xc0); /* clear IRQs and disable DMA */ write_reg(info, TXDMA + DCMD, SWABORT); /* reset/init DMA channel */ if ( debug_level >= DEBUG_LEVEL_ISR ) printk("%s(%d):%s isr_txdmaok(), status=%02x\n", __FILE__,__LINE__,info->device_name,status_reg1); /* program TXRDY as FIFO empty flag, enable TXRDY IRQ */ write_reg16(info, TRC0, 0); info->ie0_value |= TXRDYE; write_reg(info, IE0, info->ie0_value); } static void isr_txdmaerror(SLMP_INFO * info) { /* BIT5 = BOF (buffer overflow) * BIT4 = COF (counter overflow) */ unsigned char status = read_reg(info,TXDMA + DSR) & 0x30; /* clear IRQ (BIT0 must be 1 to prevent clearing DE bit) */ write_reg(info, TXDMA + DSR, (unsigned char)(status | 1)); if ( debug_level >= DEBUG_LEVEL_ISR ) printk("%s(%d):%s isr_txdmaerror(), status=%02x\n", __FILE__,__LINE__,info->device_name,status); } /* handle input serial signal changes */ static void isr_io_pin( SLMP_INFO *info, u16 status ) { struct mgsl_icount *icount; if ( debug_level >= DEBUG_LEVEL_ISR ) printk("%s(%d):isr_io_pin status=%04X\n", __FILE__,__LINE__,status); if (status & (MISCSTATUS_CTS_LATCHED | MISCSTATUS_DCD_LATCHED | MISCSTATUS_DSR_LATCHED | MISCSTATUS_RI_LATCHED) ) { icount = &info->icount; /* update input line counters */ if (status & MISCSTATUS_RI_LATCHED) { icount->rng++; if ( status & SerialSignal_RI ) info->input_signal_events.ri_up++; else info->input_signal_events.ri_down++; } if (status & MISCSTATUS_DSR_LATCHED) { icount->dsr++; if ( status & SerialSignal_DSR ) info->input_signal_events.dsr_up++; else info->input_signal_events.dsr_down++; } if (status & MISCSTATUS_DCD_LATCHED) { if ((info->dcd_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT) { info->ie1_value &= ~CDCD; write_reg(info, IE1, info->ie1_value); } icount->dcd++; if (status & SerialSignal_DCD) { info->input_signal_events.dcd_up++; } else info->input_signal_events.dcd_down++; #if SYNCLINK_GENERIC_HDLC if (info->netcount) { if (status & SerialSignal_DCD) netif_carrier_on(info->netdev); else netif_carrier_off(info->netdev); } #endif } if (status & MISCSTATUS_CTS_LATCHED) { if ((info->cts_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT) { info->ie1_value &= ~CCTS; write_reg(info, IE1, info->ie1_value); } icount->cts++; if ( status & SerialSignal_CTS ) info->input_signal_events.cts_up++; else info->input_signal_events.cts_down++; } wake_up_interruptible(&info->status_event_wait_q); wake_up_interruptible(&info->event_wait_q); if ( (info->port.flags & ASYNC_CHECK_CD) && (status & MISCSTATUS_DCD_LATCHED) ) { if ( debug_level >= DEBUG_LEVEL_ISR ) printk("%s CD now %s...", info->device_name, (status & SerialSignal_DCD) ? "on" : "off"); if (status & SerialSignal_DCD) wake_up_interruptible(&info->port.open_wait); else { if ( debug_level >= DEBUG_LEVEL_ISR ) printk("doing serial hangup..."); if (info->port.tty) tty_hangup(info->port.tty); } } if (tty_port_cts_enabled(&info->port) && (status & MISCSTATUS_CTS_LATCHED) ) { if ( info->port.tty ) { if (info->port.tty->hw_stopped) { if (status & SerialSignal_CTS) { if ( debug_level >= DEBUG_LEVEL_ISR ) printk("CTS tx start..."); info->port.tty->hw_stopped = 0; tx_start(info); info->pending_bh |= BH_TRANSMIT; return; } } else { if (!(status & SerialSignal_CTS)) { if ( debug_level >= DEBUG_LEVEL_ISR ) printk("CTS tx stop..."); info->port.tty->hw_stopped = 1; tx_stop(info); } } } } } info->pending_bh |= BH_STATUS; } /* Interrupt service routine entry point. * * Arguments: * irq interrupt number that caused interrupt * dev_id device ID supplied during interrupt registration * regs interrupted processor context */ static irqreturn_t synclinkmp_interrupt(int dummy, void *dev_id) { SLMP_INFO *info = dev_id; unsigned char status, status0, status1=0; unsigned char dmastatus, dmastatus0, dmastatus1=0; unsigned char timerstatus0, timerstatus1=0; unsigned char shift; unsigned int i; unsigned short tmp; if ( debug_level >= DEBUG_LEVEL_ISR ) printk(KERN_DEBUG "%s(%d): synclinkmp_interrupt(%d)entry.\n", __FILE__, __LINE__, info->irq_level); spin_lock(&info->lock); for(;;) { /* get status for SCA0 (ports 0-1) */ tmp = read_reg16(info, ISR0); /* get ISR0 and ISR1 in one read */ status0 = (unsigned char)tmp; dmastatus0 = (unsigned char)(tmp>>8); timerstatus0 = read_reg(info, ISR2); if ( debug_level >= DEBUG_LEVEL_ISR ) printk(KERN_DEBUG "%s(%d):%s status0=%02x, dmastatus0=%02x, timerstatus0=%02x\n", __FILE__, __LINE__, info->device_name, status0, dmastatus0, timerstatus0); if (info->port_count == 4) { /* get status for SCA1 (ports 2-3) */ tmp = read_reg16(info->port_array[2], ISR0); status1 = (unsigned char)tmp; dmastatus1 = (unsigned char)(tmp>>8); timerstatus1 = read_reg(info->port_array[2], ISR2); if ( debug_level >= DEBUG_LEVEL_ISR ) printk("%s(%d):%s status1=%02x, dmastatus1=%02x, timerstatus1=%02x\n", __FILE__,__LINE__,info->device_name, status1,dmastatus1,timerstatus1); } if (!status0 && !dmastatus0 && !timerstatus0 && !status1 && !dmastatus1 && !timerstatus1) break; for(i=0; i < info->port_count ; i++) { if (info->port_array[i] == NULL) continue; if (i < 2) { status = status0; dmastatus = dmastatus0; } else { status = status1; dmastatus = dmastatus1; } shift = i & 1 ? 4 :0; if (status & BIT0 << shift) isr_rxrdy(info->port_array[i]); if (status & BIT1 << shift) isr_txrdy(info->port_array[i]); if (status & BIT2 << shift) isr_rxint(info->port_array[i]); if (status & BIT3 << shift) isr_txint(info->port_array[i]); if (dmastatus & BIT0 << shift) isr_rxdmaerror(info->port_array[i]); if (dmastatus & BIT1 << shift) isr_rxdmaok(info->port_array[i]); if (dmastatus & BIT2 << shift) isr_txdmaerror(info->port_array[i]); if (dmastatus & BIT3 << shift) isr_txdmaok(info->port_array[i]); } if (timerstatus0 & (BIT5 | BIT4)) isr_timer(info->port_array[0]); if (timerstatus0 & (BIT7 | BIT6)) isr_timer(info->port_array[1]); if (timerstatus1 & (BIT5 | BIT4)) isr_timer(info->port_array[2]); if (timerstatus1 & (BIT7 | BIT6)) isr_timer(info->port_array[3]); } for(i=0; i < info->port_count ; i++) { SLMP_INFO * port = info->port_array[i]; /* Request bottom half processing if there's something * for it to do and the bh is not already running. * * Note: startup adapter diags require interrupts. * do not request bottom half processing if the * device is not open in a normal mode. */ if ( port && (port->port.count || port->netcount) && port->pending_bh && !port->bh_running && !port->bh_requested ) { if ( debug_level >= DEBUG_LEVEL_ISR ) printk("%s(%d):%s queueing bh task.\n", __FILE__,__LINE__,port->device_name); schedule_work(&port->task); port->bh_requested = true; } } spin_unlock(&info->lock); if ( debug_level >= DEBUG_LEVEL_ISR ) printk(KERN_DEBUG "%s(%d):synclinkmp_interrupt(%d)exit.\n", __FILE__, __LINE__, info->irq_level); return IRQ_HANDLED; } /* Initialize and start device. */ static int startup(SLMP_INFO * info) { if ( debug_level >= DEBUG_LEVEL_INFO ) printk("%s(%d):%s tx_releaseup()\n",__FILE__,__LINE__,info->device_name); if (info->port.flags & ASYNC_INITIALIZED) return 0; if (!info->tx_buf) { info->tx_buf = kmalloc(info->max_frame_size, GFP_KERNEL); if (!info->tx_buf) { printk(KERN_ERR"%s(%d):%s can't allocate transmit buffer\n", __FILE__,__LINE__,info->device_name); return -ENOMEM; } } info->pending_bh = 0; memset(&info->icount, 0, sizeof(info->icount)); /* program hardware for current parameters */ reset_port(info); change_params(info); mod_timer(&info->status_timer, jiffies + msecs_to_jiffies(10)); if (info->port.tty) clear_bit(TTY_IO_ERROR, &info->port.tty->flags); info->port.flags |= ASYNC_INITIALIZED; return 0; } /* Called by close() and hangup() to shutdown hardware */ static void shutdown(SLMP_INFO * info) { unsigned long flags; if (!(info->port.flags & ASYNC_INITIALIZED)) return; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):%s synclinkmp_shutdown()\n", __FILE__,__LINE__, info->device_name ); /* clear status wait queue because status changes */ /* can't happen after shutting down the hardware */ wake_up_interruptible(&info->status_event_wait_q); wake_up_interruptible(&info->event_wait_q); del_timer(&info->tx_timer); del_timer(&info->status_timer); kfree(info->tx_buf); info->tx_buf = NULL; spin_lock_irqsave(&info->lock,flags); reset_port(info); if (!info->port.tty || info->port.tty->termios.c_cflag & HUPCL) { info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR); set_signals(info); } spin_unlock_irqrestore(&info->lock,flags); if (info->port.tty) set_bit(TTY_IO_ERROR, &info->port.tty->flags); info->port.flags &= ~ASYNC_INITIALIZED; } static void program_hw(SLMP_INFO *info) { unsigned long flags; spin_lock_irqsave(&info->lock,flags); rx_stop(info); tx_stop(info); info->tx_count = info->tx_put = info->tx_get = 0; if (info->params.mode == MGSL_MODE_HDLC || info->netcount) hdlc_mode(info); else async_mode(info); set_signals(info); info->dcd_chkcount = 0; info->cts_chkcount = 0; info->ri_chkcount = 0; info->dsr_chkcount = 0; info->ie1_value |= (CDCD|CCTS); write_reg(info, IE1, info->ie1_value); get_signals(info); if (info->netcount || (info->port.tty && info->port.tty->termios.c_cflag & CREAD) ) rx_start(info); spin_unlock_irqrestore(&info->lock,flags); } /* Reconfigure adapter based on new parameters */ static void change_params(SLMP_INFO *info) { unsigned cflag; int bits_per_char; if (!info->port.tty) return; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):%s change_params()\n", __FILE__,__LINE__, info->device_name ); cflag = info->port.tty->termios.c_cflag; /* if B0 rate (hangup) specified then negate RTS and DTR */ /* otherwise assert RTS and DTR */ if (cflag & CBAUD) info->serial_signals |= SerialSignal_RTS | SerialSignal_DTR; else info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR); /* byte size and parity */ switch (cflag & CSIZE) { case CS5: info->params.data_bits = 5; break; case CS6: info->params.data_bits = 6; break; case CS7: info->params.data_bits = 7; break; case CS8: info->params.data_bits = 8; break; /* Never happens, but GCC is too dumb to figure it out */ default: info->params.data_bits = 7; break; } if (cflag & CSTOPB) info->params.stop_bits = 2; else info->params.stop_bits = 1; info->params.parity = ASYNC_PARITY_NONE; if (cflag & PARENB) { if (cflag & PARODD) info->params.parity = ASYNC_PARITY_ODD; else info->params.parity = ASYNC_PARITY_EVEN; #ifdef CMSPAR if (cflag & CMSPAR) info->params.parity = ASYNC_PARITY_SPACE; #endif } /* calculate number of jiffies to transmit a full * FIFO (32 bytes) at specified data rate */ bits_per_char = info->params.data_bits + info->params.stop_bits + 1; /* if port data rate is set to 460800 or less then * allow tty settings to override, otherwise keep the * current data rate. */ if (info->params.data_rate <= 460800) { info->params.data_rate = tty_get_baud_rate(info->port.tty); } if ( info->params.data_rate ) { info->timeout = (32*HZ*bits_per_char) / info->params.data_rate; } info->timeout += HZ/50; /* Add .02 seconds of slop */ if (cflag & CRTSCTS) info->port.flags |= ASYNC_CTS_FLOW; else info->port.flags &= ~ASYNC_CTS_FLOW; if (cflag & CLOCAL) info->port.flags &= ~ASYNC_CHECK_CD; else info->port.flags |= ASYNC_CHECK_CD; /* process tty input control flags */ info->read_status_mask2 = OVRN; if (I_INPCK(info->port.tty)) info->read_status_mask2 |= PE | FRME; if (I_BRKINT(info->port.tty) || I_PARMRK(info->port.tty)) info->read_status_mask1 |= BRKD; if (I_IGNPAR(info->port.tty)) info->ignore_status_mask2 |= PE | FRME; if (I_IGNBRK(info->port.tty)) { info->ignore_status_mask1 |= BRKD; /* If ignoring parity and break indicators, ignore * overruns too. (For real raw support). */ if (I_IGNPAR(info->port.tty)) info->ignore_status_mask2 |= OVRN; } program_hw(info); } static int get_stats(SLMP_INFO * info, struct mgsl_icount __user *user_icount) { int err; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):%s get_params()\n", __FILE__,__LINE__, info->device_name); if (!user_icount) { memset(&info->icount, 0, sizeof(info->icount)); } else { mutex_lock(&info->port.mutex); COPY_TO_USER(err, user_icount, &info->icount, sizeof(struct mgsl_icount)); mutex_unlock(&info->port.mutex); if (err) return -EFAULT; } return 0; } static int get_params(SLMP_INFO * info, MGSL_PARAMS __user *user_params) { int err; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):%s get_params()\n", __FILE__,__LINE__, info->device_name); mutex_lock(&info->port.mutex); COPY_TO_USER(err,user_params, &info->params, sizeof(MGSL_PARAMS)); mutex_unlock(&info->port.mutex); if (err) { if ( debug_level >= DEBUG_LEVEL_INFO ) printk( "%s(%d):%s get_params() user buffer copy failed\n", __FILE__,__LINE__,info->device_name); return -EFAULT; } return 0; } static int set_params(SLMP_INFO * info, MGSL_PARAMS __user *new_params) { unsigned long flags; MGSL_PARAMS tmp_params; int err; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):%s set_params\n", __FILE__,__LINE__,info->device_name ); COPY_FROM_USER(err,&tmp_params, new_params, sizeof(MGSL_PARAMS)); if (err) { if ( debug_level >= DEBUG_LEVEL_INFO ) printk( "%s(%d):%s set_params() user buffer copy failed\n", __FILE__,__LINE__,info->device_name); return -EFAULT; } mutex_lock(&info->port.mutex); spin_lock_irqsave(&info->lock,flags); memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS)); spin_unlock_irqrestore(&info->lock,flags); change_params(info); mutex_unlock(&info->port.mutex); return 0; } static int get_txidle(SLMP_INFO * info, int __user *idle_mode) { int err; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):%s get_txidle()=%d\n", __FILE__,__LINE__, info->device_name, info->idle_mode); COPY_TO_USER(err,idle_mode, &info->idle_mode, sizeof(int)); if (err) { if ( debug_level >= DEBUG_LEVEL_INFO ) printk( "%s(%d):%s get_txidle() user buffer copy failed\n", __FILE__,__LINE__,info->device_name); return -EFAULT; } return 0; } static int set_txidle(SLMP_INFO * info, int idle_mode) { unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):%s set_txidle(%d)\n", __FILE__,__LINE__,info->device_name, idle_mode ); spin_lock_irqsave(&info->lock,flags); info->idle_mode = idle_mode; tx_set_idle( info ); spin_unlock_irqrestore(&info->lock,flags); return 0; } static int tx_enable(SLMP_INFO * info, int enable) { unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):%s tx_enable(%d)\n", __FILE__,__LINE__,info->device_name, enable); spin_lock_irqsave(&info->lock,flags); if ( enable ) { if ( !info->tx_enabled ) { tx_start(info); } } else { if ( info->tx_enabled ) tx_stop(info); } spin_unlock_irqrestore(&info->lock,flags); return 0; } /* abort send HDLC frame */ static int tx_abort(SLMP_INFO * info) { unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):%s tx_abort()\n", __FILE__,__LINE__,info->device_name); spin_lock_irqsave(&info->lock,flags); if ( info->tx_active && info->params.mode == MGSL_MODE_HDLC ) { info->ie1_value &= ~UDRN; info->ie1_value |= IDLE; write_reg(info, IE1, info->ie1_value); /* disable tx status interrupts */ write_reg(info, SR1, (unsigned char)(IDLE + UDRN)); /* clear pending */ write_reg(info, TXDMA + DSR, 0); /* disable DMA channel */ write_reg(info, TXDMA + DCMD, SWABORT); /* reset/init DMA channel */ write_reg(info, CMD, TXABORT); } spin_unlock_irqrestore(&info->lock,flags); return 0; } static int rx_enable(SLMP_INFO * info, int enable) { unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):%s rx_enable(%d)\n", __FILE__,__LINE__,info->device_name,enable); spin_lock_irqsave(&info->lock,flags); if ( enable ) { if ( !info->rx_enabled ) rx_start(info); } else { if ( info->rx_enabled ) rx_stop(info); } spin_unlock_irqrestore(&info->lock,flags); return 0; } /* wait for specified event to occur */ static int wait_mgsl_event(SLMP_INFO * info, int __user *mask_ptr) { unsigned long flags; int s; int rc=0; struct mgsl_icount cprev, cnow; int events; int mask; struct _input_signal_events oldsigs, newsigs; DECLARE_WAITQUEUE(wait, current); COPY_FROM_USER(rc,&mask, mask_ptr, sizeof(int)); if (rc) { return -EFAULT; } if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):%s wait_mgsl_event(%d)\n", __FILE__,__LINE__,info->device_name,mask); spin_lock_irqsave(&info->lock,flags); /* return immediately if state matches requested events */ get_signals(info); s = info->serial_signals; events = mask & ( ((s & SerialSignal_DSR) ? MgslEvent_DsrActive:MgslEvent_DsrInactive) + ((s & SerialSignal_DCD) ? MgslEvent_DcdActive:MgslEvent_DcdInactive) + ((s & SerialSignal_CTS) ? MgslEvent_CtsActive:MgslEvent_CtsInactive) + ((s & SerialSignal_RI) ? MgslEvent_RiActive :MgslEvent_RiInactive) ); if (events) { spin_unlock_irqrestore(&info->lock,flags); goto exit; } /* save current irq counts */ cprev = info->icount; oldsigs = info->input_signal_events; /* enable hunt and idle irqs if needed */ if (mask & (MgslEvent_ExitHuntMode+MgslEvent_IdleReceived)) { unsigned char oldval = info->ie1_value; unsigned char newval = oldval + (mask & MgslEvent_ExitHuntMode ? FLGD:0) + (mask & MgslEvent_IdleReceived ? IDLD:0); if ( oldval != newval ) { info->ie1_value = newval; write_reg(info, IE1, info->ie1_value); } } set_current_state(TASK_INTERRUPTIBLE); add_wait_queue(&info->event_wait_q, &wait); spin_unlock_irqrestore(&info->lock,flags); for(;;) { schedule(); if (signal_pending(current)) { rc = -ERESTARTSYS; break; } /* get current irq counts */ spin_lock_irqsave(&info->lock,flags); cnow = info->icount; newsigs = info->input_signal_events; set_current_state(TASK_INTERRUPTIBLE); spin_unlock_irqrestore(&info->lock,flags); /* if no change, wait aborted for some reason */ if (newsigs.dsr_up == oldsigs.dsr_up && newsigs.dsr_down == oldsigs.dsr_down && newsigs.dcd_up == oldsigs.dcd_up && newsigs.dcd_down == oldsigs.dcd_down && newsigs.cts_up == oldsigs.cts_up && newsigs.cts_down == oldsigs.cts_down && newsigs.ri_up == oldsigs.ri_up && newsigs.ri_down == oldsigs.ri_down && cnow.exithunt == cprev.exithunt && cnow.rxidle == cprev.rxidle) { rc = -EIO; break; } events = mask & ( (newsigs.dsr_up != oldsigs.dsr_up ? MgslEvent_DsrActive:0) + (newsigs.dsr_down != oldsigs.dsr_down ? MgslEvent_DsrInactive:0) + (newsigs.dcd_up != oldsigs.dcd_up ? MgslEvent_DcdActive:0) + (newsigs.dcd_down != oldsigs.dcd_down ? MgslEvent_DcdInactive:0) + (newsigs.cts_up != oldsigs.cts_up ? MgslEvent_CtsActive:0) + (newsigs.cts_down != oldsigs.cts_down ? MgslEvent_CtsInactive:0) + (newsigs.ri_up != oldsigs.ri_up ? MgslEvent_RiActive:0) + (newsigs.ri_down != oldsigs.ri_down ? MgslEvent_RiInactive:0) + (cnow.exithunt != cprev.exithunt ? MgslEvent_ExitHuntMode:0) + (cnow.rxidle != cprev.rxidle ? MgslEvent_IdleReceived:0) ); if (events) break; cprev = cnow; oldsigs = newsigs; } remove_wait_queue(&info->event_wait_q, &wait); set_current_state(TASK_RUNNING); if (mask & (MgslEvent_ExitHuntMode + MgslEvent_IdleReceived)) { spin_lock_irqsave(&info->lock,flags); if (!waitqueue_active(&info->event_wait_q)) { /* disable enable exit hunt mode/idle rcvd IRQs */ info->ie1_value &= ~(FLGD|IDLD); write_reg(info, IE1, info->ie1_value); } spin_unlock_irqrestore(&info->lock,flags); } exit: if ( rc == 0 ) PUT_USER(rc, events, mask_ptr); return rc; } static int modem_input_wait(SLMP_INFO *info,int arg) { unsigned long flags; int rc; struct mgsl_icount cprev, cnow; DECLARE_WAITQUEUE(wait, current); /* save current irq counts */ spin_lock_irqsave(&info->lock,flags); cprev = info->icount; add_wait_queue(&info->status_event_wait_q, &wait); set_current_state(TASK_INTERRUPTIBLE); spin_unlock_irqrestore(&info->lock,flags); for(;;) { schedule(); if (signal_pending(current)) { rc = -ERESTARTSYS; break; } /* get new irq counts */ spin_lock_irqsave(&info->lock,flags); cnow = info->icount; set_current_state(TASK_INTERRUPTIBLE); spin_unlock_irqrestore(&info->lock,flags); /* if no change, wait aborted for some reason */ if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr && cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) { rc = -EIO; break; } /* check for change in caller specified modem input */ if ((arg & TIOCM_RNG && cnow.rng != cprev.rng) || (arg & TIOCM_DSR && cnow.dsr != cprev.dsr) || (arg & TIOCM_CD && cnow.dcd != cprev.dcd) || (arg & TIOCM_CTS && cnow.cts != cprev.cts)) { rc = 0; break; } cprev = cnow; } remove_wait_queue(&info->status_event_wait_q, &wait); set_current_state(TASK_RUNNING); return rc; } /* return the state of the serial control and status signals */ static int tiocmget(struct tty_struct *tty) { SLMP_INFO *info = tty->driver_data; unsigned int result; unsigned long flags; spin_lock_irqsave(&info->lock,flags); get_signals(info); spin_unlock_irqrestore(&info->lock,flags); result = ((info->serial_signals & SerialSignal_RTS) ? TIOCM_RTS : 0) | ((info->serial_signals & SerialSignal_DTR) ? TIOCM_DTR : 0) | ((info->serial_signals & SerialSignal_DCD) ? TIOCM_CAR : 0) | ((info->serial_signals & SerialSignal_RI) ? TIOCM_RNG : 0) | ((info->serial_signals & SerialSignal_DSR) ? TIOCM_DSR : 0) | ((info->serial_signals & SerialSignal_CTS) ? TIOCM_CTS : 0); if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):%s tiocmget() value=%08X\n", __FILE__,__LINE__, info->device_name, result ); return result; } /* set modem control signals (DTR/RTS) */ static int tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear) { SLMP_INFO *info = tty->driver_data; unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):%s tiocmset(%x,%x)\n", __FILE__,__LINE__,info->device_name, set, clear); if (set & TIOCM_RTS) info->serial_signals |= SerialSignal_RTS; if (set & TIOCM_DTR) info->serial_signals |= SerialSignal_DTR; if (clear & TIOCM_RTS) info->serial_signals &= ~SerialSignal_RTS; if (clear & TIOCM_DTR) info->serial_signals &= ~SerialSignal_DTR; spin_lock_irqsave(&info->lock,flags); set_signals(info); spin_unlock_irqrestore(&info->lock,flags); return 0; } static int carrier_raised(struct tty_port *port) { SLMP_INFO *info = container_of(port, SLMP_INFO, port); unsigned long flags; spin_lock_irqsave(&info->lock,flags); get_signals(info); spin_unlock_irqrestore(&info->lock,flags); return (info->serial_signals & SerialSignal_DCD) ? 1 : 0; } static void dtr_rts(struct tty_port *port, int on) { SLMP_INFO *info = container_of(port, SLMP_INFO, port); unsigned long flags; spin_lock_irqsave(&info->lock,flags); if (on) info->serial_signals |= SerialSignal_RTS | SerialSignal_DTR; else info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR); set_signals(info); spin_unlock_irqrestore(&info->lock,flags); } /* Block the current process until the specified port is ready to open. */ static int block_til_ready(struct tty_struct *tty, struct file *filp, SLMP_INFO *info) { DECLARE_WAITQUEUE(wait, current); int retval; bool do_clocal = false; unsigned long flags; int cd; struct tty_port *port = &info->port; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):%s block_til_ready()\n", __FILE__,__LINE__, tty->driver->name ); if (filp->f_flags & O_NONBLOCK || tty->flags & (1 << TTY_IO_ERROR)){ /* nonblock mode is set or port is not enabled */ /* just verify that callout device is not active */ port->flags |= ASYNC_NORMAL_ACTIVE; return 0; } if (tty->termios.c_cflag & CLOCAL) do_clocal = true; /* Wait for carrier detect and the line to become * free (i.e., not in use by the callout). While we are in * this loop, port->count is dropped by one, so that * close() knows when to free things. We restore it upon * exit, either normal or abnormal. */ retval = 0; add_wait_queue(&port->open_wait, &wait); if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):%s block_til_ready() before block, count=%d\n", __FILE__,__LINE__, tty->driver->name, port->count ); spin_lock_irqsave(&info->lock, flags); port->count--; spin_unlock_irqrestore(&info->lock, flags); port->blocked_open++; while (1) { if (C_BAUD(tty) && test_bit(ASYNCB_INITIALIZED, &port->flags)) tty_port_raise_dtr_rts(port); set_current_state(TASK_INTERRUPTIBLE); if (tty_hung_up_p(filp) || !(port->flags & ASYNC_INITIALIZED)){ retval = (port->flags & ASYNC_HUP_NOTIFY) ? -EAGAIN : -ERESTARTSYS; break; } cd = tty_port_carrier_raised(port); if (!(port->flags & ASYNC_CLOSING) && (do_clocal || cd)) break; if (signal_pending(current)) { retval = -ERESTARTSYS; break; } if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):%s block_til_ready() count=%d\n", __FILE__,__LINE__, tty->driver->name, port->count ); tty_unlock(tty); schedule(); tty_lock(tty); } set_current_state(TASK_RUNNING); remove_wait_queue(&port->open_wait, &wait); if (!tty_hung_up_p(filp)) port->count++; port->blocked_open--; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):%s block_til_ready() after, count=%d\n", __FILE__,__LINE__, tty->driver->name, port->count ); if (!retval) port->flags |= ASYNC_NORMAL_ACTIVE; return retval; } static int alloc_dma_bufs(SLMP_INFO *info) { unsigned short BuffersPerFrame; unsigned short BufferCount; // Force allocation to start at 64K boundary for each port. // This is necessary because *all* buffer descriptors for a port // *must* be in the same 64K block. All descriptors on a port // share a common 'base' address (upper 8 bits of 24 bits) programmed // into the CBP register. info->port_array[0]->last_mem_alloc = (SCA_MEM_SIZE/4) * info->port_num; /* Calculate the number of DMA buffers necessary to hold the */ /* largest allowable frame size. Note: If the max frame size is */ /* not an even multiple of the DMA buffer size then we need to */ /* round the buffer count per frame up one. */ BuffersPerFrame = (unsigned short)(info->max_frame_size/SCABUFSIZE); if ( info->max_frame_size % SCABUFSIZE ) BuffersPerFrame++; /* calculate total number of data buffers (SCABUFSIZE) possible * in one ports memory (SCA_MEM_SIZE/4) after allocating memory * for the descriptor list (BUFFERLISTSIZE). */ BufferCount = (SCA_MEM_SIZE/4 - BUFFERLISTSIZE)/SCABUFSIZE; /* limit number of buffers to maximum amount of descriptors */ if (BufferCount > BUFFERLISTSIZE/sizeof(SCADESC)) BufferCount = BUFFERLISTSIZE/sizeof(SCADESC); /* use enough buffers to transmit one max size frame */ info->tx_buf_count = BuffersPerFrame + 1; /* never use more than half the available buffers for transmit */ if (info->tx_buf_count > (BufferCount/2)) info->tx_buf_count = BufferCount/2; if (info->tx_buf_count > SCAMAXDESC) info->tx_buf_count = SCAMAXDESC; /* use remaining buffers for receive */ info->rx_buf_count = BufferCount - info->tx_buf_count; if (info->rx_buf_count > SCAMAXDESC) info->rx_buf_count = SCAMAXDESC; if ( debug_level >= DEBUG_LEVEL_INFO ) printk("%s(%d):%s Allocating %d TX and %d RX DMA buffers.\n", __FILE__,__LINE__, info->device_name, info->tx_buf_count,info->rx_buf_count); if ( alloc_buf_list( info ) < 0 || alloc_frame_bufs(info, info->rx_buf_list, info->rx_buf_list_ex, info->rx_buf_count) < 0 || alloc_frame_bufs(info, info->tx_buf_list, info->tx_buf_list_ex, info->tx_buf_count) < 0 || alloc_tmp_rx_buf(info) < 0 ) { printk("%s(%d):%s Can't allocate DMA buffer memory\n", __FILE__,__LINE__, info->device_name); return -ENOMEM; } rx_reset_buffers( info ); return 0; } /* Allocate DMA buffers for the transmit and receive descriptor lists. */ static int alloc_buf_list(SLMP_INFO *info) { unsigned int i; /* build list in adapter shared memory */ info->buffer_list = info->memory_base + info->port_array[0]->last_mem_alloc; info->buffer_list_phys = info->port_array[0]->last_mem_alloc; info->port_array[0]->last_mem_alloc += BUFFERLISTSIZE; memset(info->buffer_list, 0, BUFFERLISTSIZE); /* Save virtual address pointers to the receive and */ /* transmit buffer lists. (Receive 1st). These pointers will */ /* be used by the processor to access the lists. */ info->rx_buf_list = (SCADESC *)info->buffer_list; info->tx_buf_list = (SCADESC *)info->buffer_list; info->tx_buf_list += info->rx_buf_count; /* Build links for circular buffer entry lists (tx and rx) * * Note: links are physical addresses read by the SCA device * to determine the next buffer entry to use. */ for ( i = 0; i < info->rx_buf_count; i++ ) { /* calculate and store physical address of this buffer entry */ info->rx_buf_list_ex[i].phys_entry = info->buffer_list_phys + (i * SCABUFSIZE); /* calculate and store physical address of */ /* next entry in cirular list of entries */ info->rx_buf_list[i].next = info->buffer_list_phys; if ( i < info->rx_buf_count - 1 ) info->rx_buf_list[i].next += (i + 1) * sizeof(SCADESC); info->rx_buf_list[i].length = SCABUFSIZE; } for ( i = 0; i < info->tx_buf_count; i++ ) { /* calculate and store physical address of this buffer entry */ info->tx_buf_list_ex[i].phys_entry = info->buffer_list_phys + ((info->rx_buf_count + i) * sizeof(SCADESC)); /* calculate and store physical address of */ /* next entry in cirular list of entries */ info->tx_buf_list[i].next = info->buffer_list_phys + info->rx_buf_count * sizeof(SCADESC); if ( i < info->tx_buf_count - 1 ) info->tx_buf_list[i].next += (i + 1) * sizeof(SCADESC); } return 0; } /* Allocate the frame DMA buffers used by the specified buffer list. */ static int alloc_frame_bufs(SLMP_INFO *info, SCADESC *buf_list,SCADESC_EX *buf_list_ex,int count) { int i; unsigned long phys_addr; for ( i = 0; i < count; i++ ) { buf_list_ex[i].virt_addr = info->memory_base + info->port_array[0]->last_mem_alloc; phys_addr = info->port_array[0]->last_mem_alloc; info->port_array[0]->last_mem_alloc += SCABUFSIZE; buf_list[i].buf_ptr = (unsigned short)phys_addr; buf_list[i].buf_base = (unsigned char)(phys_addr >> 16); } return 0; } static void free_dma_bufs(SLMP_INFO *info) { info->buffer_list = NULL; info->rx_buf_list = NULL; info->tx_buf_list = NULL; } /* allocate buffer large enough to hold max_frame_size. * This buffer is used to pass an assembled frame to the line discipline. */ static int alloc_tmp_rx_buf(SLMP_INFO *info) { info->tmp_rx_buf = kmalloc(info->max_frame_size, GFP_KERNEL); if (info->tmp_rx_buf == NULL) return -ENOMEM; /* unused flag buffer to satisfy receive_buf calling interface */ info->flag_buf = kzalloc(info->max_frame_size, GFP_KERNEL); if (!info->flag_buf) { kfree(info->tmp_rx_buf); info->tmp_rx_buf = NULL; return -ENOMEM; } return 0; } static void free_tmp_rx_buf(SLMP_INFO *info) { kfree(info->tmp_rx_buf); info->tmp_rx_buf = NULL; kfree(info->flag_buf); info->flag_buf = NULL; } static int claim_resources(SLMP_INFO *info) { if (request_mem_region(info->phys_memory_base,SCA_MEM_SIZE,"synclinkmp") == NULL) { printk( "%s(%d):%s mem addr conflict, Addr=%08X\n", __FILE__,__LINE__,info->device_name, info->phys_memory_base); info->init_error = DiagStatus_AddressConflict; goto errout; } else info->shared_mem_requested = true; if (request_mem_region(info->phys_lcr_base + info->lcr_offset,128,"synclinkmp") == NULL) { printk( "%s(%d):%s lcr mem addr conflict, Addr=%08X\n", __FILE__,__LINE__,info->device_name, info->phys_lcr_base); info->init_error = DiagStatus_AddressConflict; goto errout; } else info->lcr_mem_requested = true; if (request_mem_region(info->phys_sca_base + info->sca_offset,SCA_BASE_SIZE,"synclinkmp") == NULL) { printk( "%s(%d):%s sca mem addr conflict, Addr=%08X\n", __FILE__,__LINE__,info->device_name, info->phys_sca_base); info->init_error = DiagStatus_AddressConflict; goto errout; } else info->sca_base_requested = true; if (request_mem_region(info->phys_statctrl_base + info->statctrl_offset,SCA_REG_SIZE,"synclinkmp") == NULL) { printk( "%s(%d):%s stat/ctrl mem addr conflict, Addr=%08X\n", __FILE__,__LINE__,info->device_name, info->phys_statctrl_base); info->init_error = DiagStatus_AddressConflict; goto errout; } else info->sca_statctrl_requested = true; info->memory_base = ioremap_nocache(info->phys_memory_base, SCA_MEM_SIZE); if (!info->memory_base) { printk( "%s(%d):%s Can't map shared memory, MemAddr=%08X\n", __FILE__,__LINE__,info->device_name, info->phys_memory_base ); info->init_error = DiagStatus_CantAssignPciResources; goto errout; } info->lcr_base = ioremap_nocache(info->phys_lcr_base, PAGE_SIZE); if (!info->lcr_base) { printk( "%s(%d):%s Can't map LCR memory, MemAddr=%08X\n", __FILE__,__LINE__,info->device_name, info->phys_lcr_base ); info->init_error = DiagStatus_CantAssignPciResources; goto errout; } info->lcr_base += info->lcr_offset; info->sca_base = ioremap_nocache(info->phys_sca_base, PAGE_SIZE); if (!info->sca_base) { printk( "%s(%d):%s Can't map SCA memory, MemAddr=%08X\n", __FILE__,__LINE__,info->device_name, info->phys_sca_base ); info->init_error = DiagStatus_CantAssignPciResources; goto errout; } info->sca_base += info->sca_offset; info->statctrl_base = ioremap_nocache(info->phys_statctrl_base, PAGE_SIZE); if (!info->statctrl_base) { printk( "%s(%d):%s Can't map SCA Status/Control memory, MemAddr=%08X\n", __FILE__,__LINE__,info->device_name, info->phys_statctrl_base ); info->init_error = DiagStatus_CantAssignPciResources; goto errout; } info->statctrl_base += info->statctrl_offset; if ( !memory_test(info) ) { printk( "%s(%d):Shared Memory Test failed for device %s MemAddr=%08X\n", __FILE__,__LINE__,info->device_name, info->phys_memory_base ); info->init_error = DiagStatus_MemoryError; goto errout; } return 0; errout: release_resources( info ); return -ENODEV; } static void release_resources(SLMP_INFO *info) { if ( debug_level >= DEBUG_LEVEL_INFO ) printk( "%s(%d):%s release_resources() entry\n", __FILE__,__LINE__,info->device_name ); if ( info->irq_requested ) { free_irq(info->irq_level, info); info->irq_requested = false; } if ( info->shared_mem_requested ) { release_mem_region(info->phys_memory_base,SCA_MEM_SIZE); info->shared_mem_requested = false; } if ( info->lcr_mem_requested ) { release_mem_region(info->phys_lcr_base + info->lcr_offset,128); info->lcr_mem_requested = false; } if ( info->sca_base_requested ) { release_mem_region(info->phys_sca_base + info->sca_offset,SCA_BASE_SIZE); info->sca_base_requested = false; } if ( info->sca_statctrl_requested ) { release_mem_region(info->phys_statctrl_base + info->statctrl_offset,SCA_REG_SIZE); info->sca_statctrl_requested = false; } if (info->memory_base){ iounmap(info->memory_base); info->memory_base = NULL; } if (info->sca_base) { iounmap(info->sca_base - info->sca_offset); info->sca_base=NULL; } if (info->statctrl_base) { iounmap(info->statctrl_base - info->statctrl_offset); info->statctrl_base=NULL; } if (info->lcr_base){ iounmap(info->lcr_base - info->lcr_offset); info->lcr_base = NULL; } if ( debug_level >= DEBUG_LEVEL_INFO ) printk( "%s(%d):%s release_resources() exit\n", __FILE__,__LINE__,info->device_name ); } /* Add the specified device instance data structure to the * global linked list of devices and increment the device count. */ static void add_device(SLMP_INFO *info) { info->next_device = NULL; info->line = synclinkmp_device_count; sprintf(info->device_name,"ttySLM%dp%d",info->adapter_num,info->port_num); if (info->line < MAX_DEVICES) { if (maxframe[info->line]) info->max_frame_size = maxframe[info->line]; } synclinkmp_device_count++; if ( !synclinkmp_device_list ) synclinkmp_device_list = info; else { SLMP_INFO *current_dev = synclinkmp_device_list; while( current_dev->next_device ) current_dev = current_dev->next_device; current_dev->next_device = info; } if ( info->max_frame_size < 4096 ) info->max_frame_size = 4096; else if ( info->max_frame_size > 65535 ) info->max_frame_size = 65535; printk( "SyncLink MultiPort %s: " "Mem=(%08x %08X %08x %08X) IRQ=%d MaxFrameSize=%u\n", info->device_name, info->phys_sca_base, info->phys_memory_base, info->phys_statctrl_base, info->phys_lcr_base, info->irq_level, info->max_frame_size ); #if SYNCLINK_GENERIC_HDLC hdlcdev_init(info); #endif } static const struct tty_port_operations port_ops = { .carrier_raised = carrier_raised, .dtr_rts = dtr_rts, }; /* Allocate and initialize a device instance structure * * Return Value: pointer to SLMP_INFO if success, otherwise NULL */ static SLMP_INFO *alloc_dev(int adapter_num, int port_num, struct pci_dev *pdev) { SLMP_INFO *info; info = kzalloc(sizeof(SLMP_INFO), GFP_KERNEL); if (!info) { printk("%s(%d) Error can't allocate device instance data for adapter %d, port %d\n", __FILE__,__LINE__, adapter_num, port_num); } else { tty_port_init(&info->port); info->port.ops = &port_ops; info->magic = MGSL_MAGIC; INIT_WORK(&info->task, bh_handler); info->max_frame_size = 4096; info->port.close_delay = 5*HZ/10; info->port.closing_wait = 30*HZ; init_waitqueue_head(&info->status_event_wait_q); init_waitqueue_head(&info->event_wait_q); spin_lock_init(&info->netlock); memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS)); info->idle_mode = HDLC_TXIDLE_FLAGS; info->adapter_num = adapter_num; info->port_num = port_num; /* Copy configuration info to device instance data */ info->irq_level = pdev->irq; info->phys_lcr_base = pci_resource_start(pdev,0); info->phys_sca_base = pci_resource_start(pdev,2); info->phys_memory_base = pci_resource_start(pdev,3); info->phys_statctrl_base = pci_resource_start(pdev,4); /* Because veremap only works on page boundaries we must map * a larger area than is actually implemented for the LCR * memory range. We map a full page starting at the page boundary. */ info->lcr_offset = info->phys_lcr_base & (PAGE_SIZE-1); info->phys_lcr_base &= ~(PAGE_SIZE-1); info->sca_offset = info->phys_sca_base & (PAGE_SIZE-1); info->phys_sca_base &= ~(PAGE_SIZE-1); info->statctrl_offset = info->phys_statctrl_base & (PAGE_SIZE-1); info->phys_statctrl_base &= ~(PAGE_SIZE-1); info->bus_type = MGSL_BUS_TYPE_PCI; info->irq_flags = IRQF_SHARED; setup_timer(&info->tx_timer, tx_timeout, (unsigned long)info); setup_timer(&info->status_timer, status_timeout, (unsigned long)info); /* Store the PCI9050 misc control register value because a flaw * in the PCI9050 prevents LCR registers from being read if * BIOS assigns an LCR base address with bit 7 set. * * Only the misc control register is accessed for which only * write access is needed, so set an initial value and change * bits to the device instance data as we write the value * to the actual misc control register. */ info->misc_ctrl_value = 0x087e4546; /* initial port state is unknown - if startup errors * occur, init_error will be set to indicate the * problem. Once the port is fully initialized, * this value will be set to 0 to indicate the * port is available. */ info->init_error = -1; } return info; } static void device_init(int adapter_num, struct pci_dev *pdev) { SLMP_INFO *port_array[SCA_MAX_PORTS]; int port; /* allocate device instances for up to SCA_MAX_PORTS devices */ for ( port = 0; port < SCA_MAX_PORTS; ++port ) { port_array[port] = alloc_dev(adapter_num,port,pdev); if( port_array[port] == NULL ) { for (--port; port >= 0; --port) { tty_port_destroy(&port_array[port]->port); kfree(port_array[port]); } return; } } /* give copy of port_array to all ports and add to device list */ for ( port = 0; port < SCA_MAX_PORTS; ++port ) { memcpy(port_array[port]->port_array,port_array,sizeof(port_array)); add_device( port_array[port] ); spin_lock_init(&port_array[port]->lock); } /* Allocate and claim adapter resources */ if ( !claim_resources(port_array[0]) ) { alloc_dma_bufs(port_array[0]); /* copy resource information from first port to others */ for ( port = 1; port < SCA_MAX_PORTS; ++port ) { port_array[port]->lock = port_array[0]->lock; port_array[port]->irq_level = port_array[0]->irq_level; port_array[port]->memory_base = port_array[0]->memory_base; port_array[port]->sca_base = port_array[0]->sca_base; port_array[port]->statctrl_base = port_array[0]->statctrl_base; port_array[port]->lcr_base = port_array[0]->lcr_base; alloc_dma_bufs(port_array[port]); } if ( request_irq(port_array[0]->irq_level, synclinkmp_interrupt, port_array[0]->irq_flags, port_array[0]->device_name, port_array[0]) < 0 ) { printk( "%s(%d):%s Can't request interrupt, IRQ=%d\n", __FILE__,__LINE__, port_array[0]->device_name, port_array[0]->irq_level ); } else { port_array[0]->irq_requested = true; adapter_test(port_array[0]); } } } static const struct tty_operations ops = { .install = install, .open = open, .close = close, .write = write, .put_char = put_char, .flush_chars = flush_chars, .write_room = write_room, .chars_in_buffer = chars_in_buffer, .flush_buffer = flush_buffer, .ioctl = ioctl, .throttle = throttle, .unthrottle = unthrottle, .send_xchar = send_xchar, .break_ctl = set_break, .wait_until_sent = wait_until_sent, .set_termios = set_termios, .stop = tx_hold, .start = tx_release, .hangup = hangup, .tiocmget = tiocmget, .tiocmset = tiocmset, .get_icount = get_icount, .proc_fops = &synclinkmp_proc_fops, }; static void synclinkmp_cleanup(void) { int rc; SLMP_INFO *info; SLMP_INFO *tmp; printk("Unloading %s %s\n", driver_name, driver_version); if (serial_driver) { if ((rc = tty_unregister_driver(serial_driver))) printk("%s(%d) failed to unregister tty driver err=%d\n", __FILE__,__LINE__,rc); put_tty_driver(serial_driver); } /* reset devices */ info = synclinkmp_device_list; while(info) { reset_port(info); info = info->next_device; } /* release devices */ info = synclinkmp_device_list; while(info) { #if SYNCLINK_GENERIC_HDLC hdlcdev_exit(info); #endif free_dma_bufs(info); free_tmp_rx_buf(info); if ( info->port_num == 0 ) { if (info->sca_base) write_reg(info, LPR, 1); /* set low power mode */ release_resources(info); } tmp = info; info = info->next_device; tty_port_destroy(&tmp->port); kfree(tmp); } pci_unregister_driver(&synclinkmp_pci_driver); } /* Driver initialization entry point. */ static int __init synclinkmp_init(void) { int rc; if (break_on_load) { synclinkmp_get_text_ptr(); BREAKPOINT(); } printk("%s %s\n", driver_name, driver_version); if ((rc = pci_register_driver(&synclinkmp_pci_driver)) < 0) { printk("%s:failed to register PCI driver, error=%d\n",__FILE__,rc); return rc; } serial_driver = alloc_tty_driver(128); if (!serial_driver) { rc = -ENOMEM; goto error; } /* Initialize the tty_driver structure */ serial_driver->driver_name = "synclinkmp"; serial_driver->name = "ttySLM"; serial_driver->major = ttymajor; serial_driver->minor_start = 64; serial_driver->type = TTY_DRIVER_TYPE_SERIAL; serial_driver->subtype = SERIAL_TYPE_NORMAL; serial_driver->init_termios = tty_std_termios; serial_driver->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL; serial_driver->init_termios.c_ispeed = 9600; serial_driver->init_termios.c_ospeed = 9600; serial_driver->flags = TTY_DRIVER_REAL_RAW; tty_set_operations(serial_driver, &ops); if ((rc = tty_register_driver(serial_driver)) < 0) { printk("%s(%d):Couldn't register serial driver\n", __FILE__,__LINE__); put_tty_driver(serial_driver); serial_driver = NULL; goto error; } printk("%s %s, tty major#%d\n", driver_name, driver_version, serial_driver->major); return 0; error: synclinkmp_cleanup(); return rc; } static void __exit synclinkmp_exit(void) { synclinkmp_cleanup(); } module_init(synclinkmp_init); module_exit(synclinkmp_exit); /* Set the port for internal loopback mode. * The TxCLK and RxCLK signals are generated from the BRG and * the TxD is looped back to the RxD internally. */ static void enable_loopback(SLMP_INFO *info, int enable) { if (enable) { /* MD2 (Mode Register 2) * 01..00 CNCT<1..0> Channel Connection 11=Local Loopback */ write_reg(info, MD2, (unsigned char)(read_reg(info, MD2) | (BIT1 + BIT0))); /* degate external TxC clock source */ info->port_array[0]->ctrlreg_value |= (BIT0 << (info->port_num * 2)); write_control_reg(info); /* RXS/TXS (Rx/Tx clock source) * 07 Reserved, must be 0 * 06..04 Clock Source, 100=BRG * 03..00 Clock Divisor, 0000=1 */ write_reg(info, RXS, 0x40); write_reg(info, TXS, 0x40); } else { /* MD2 (Mode Register 2) * 01..00 CNCT<1..0> Channel connection, 0=normal */ write_reg(info, MD2, (unsigned char)(read_reg(info, MD2) & ~(BIT1 + BIT0))); /* RXS/TXS (Rx/Tx clock source) * 07 Reserved, must be 0 * 06..04 Clock Source, 000=RxC/TxC Pin * 03..00 Clock Divisor, 0000=1 */ write_reg(info, RXS, 0x00); write_reg(info, TXS, 0x00); } /* set LinkSpeed if available, otherwise default to 2Mbps */ if (info->params.clock_speed) set_rate(info, info->params.clock_speed); else set_rate(info, 3686400); } /* Set the baud rate register to the desired speed * * data_rate data rate of clock in bits per second * A data rate of 0 disables the AUX clock. */ static void set_rate( SLMP_INFO *info, u32 data_rate ) { u32 TMCValue; unsigned char BRValue; u32 Divisor=0; /* fBRG = fCLK/(TMC * 2^BR) */ if (data_rate != 0) { Divisor = 14745600/data_rate; if (!Divisor) Divisor = 1; TMCValue = Divisor; BRValue = 0; if (TMCValue != 1 && TMCValue != 2) { /* BRValue of 0 provides 50/50 duty cycle *only* when * TMCValue is 1 or 2. BRValue of 1 to 9 always provides * 50/50 duty cycle. */ BRValue = 1; TMCValue >>= 1; } /* while TMCValue is too big for TMC register, divide * by 2 and increment BR exponent. */ for(; TMCValue > 256 && BRValue < 10; BRValue++) TMCValue >>= 1; write_reg(info, TXS, (unsigned char)((read_reg(info, TXS) & 0xf0) | BRValue)); write_reg(info, RXS, (unsigned char)((read_reg(info, RXS) & 0xf0) | BRValue)); write_reg(info, TMC, (unsigned char)TMCValue); } else { write_reg(info, TXS,0); write_reg(info, RXS,0); write_reg(info, TMC, 0); } } /* Disable receiver */ static void rx_stop(SLMP_INFO *info) { if (debug_level >= DEBUG_LEVEL_ISR) printk("%s(%d):%s rx_stop()\n", __FILE__,__LINE__, info->device_name ); write_reg(info, CMD, RXRESET); info->ie0_value &= ~RXRDYE; write_reg(info, IE0, info->ie0_value); /* disable Rx data interrupts */ write_reg(info, RXDMA + DSR, 0); /* disable Rx DMA */ write_reg(info, RXDMA + DCMD, SWABORT); /* reset/init Rx DMA */ write_reg(info, RXDMA + DIR, 0); /* disable Rx DMA interrupts */ info->rx_enabled = false; info->rx_overflow = false; } /* enable the receiver */ static void rx_start(SLMP_INFO *info) { int i; if (debug_level >= DEBUG_LEVEL_ISR) printk("%s(%d):%s rx_start()\n", __FILE__,__LINE__, info->device_name ); write_reg(info, CMD, RXRESET); if ( info->params.mode == MGSL_MODE_HDLC ) { /* HDLC, disabe IRQ on rxdata */ info->ie0_value &= ~RXRDYE; write_reg(info, IE0, info->ie0_value); /* Reset all Rx DMA buffers and program rx dma */ write_reg(info, RXDMA + DSR, 0); /* disable Rx DMA */ write_reg(info, RXDMA + DCMD, SWABORT); /* reset/init Rx DMA */ for (i = 0; i < info->rx_buf_count; i++) { info->rx_buf_list[i].status = 0xff; // throttle to 4 shared memory writes at a time to prevent // hogging local bus (keep latency time for DMA requests low). if (!(i % 4)) read_status_reg(info); } info->current_rx_buf = 0; /* set current/1st descriptor address */ write_reg16(info, RXDMA + CDA, info->rx_buf_list_ex[0].phys_entry); /* set new last rx descriptor address */ write_reg16(info, RXDMA + EDA, info->rx_buf_list_ex[info->rx_buf_count - 1].phys_entry); /* set buffer length (shared by all rx dma data buffers) */ write_reg16(info, RXDMA + BFL, SCABUFSIZE); write_reg(info, RXDMA + DIR, 0x60); /* enable Rx DMA interrupts (EOM/BOF) */ write_reg(info, RXDMA + DSR, 0xf2); /* clear Rx DMA IRQs, enable Rx DMA */ } else { /* async, enable IRQ on rxdata */ info->ie0_value |= RXRDYE; write_reg(info, IE0, info->ie0_value); } write_reg(info, CMD, RXENABLE); info->rx_overflow = false; info->rx_enabled = true; } /* Enable the transmitter and send a transmit frame if * one is loaded in the DMA buffers. */ static void tx_start(SLMP_INFO *info) { if (debug_level >= DEBUG_LEVEL_ISR) printk("%s(%d):%s tx_start() tx_count=%d\n", __FILE__,__LINE__, info->device_name,info->tx_count ); if (!info->tx_enabled ) { write_reg(info, CMD, TXRESET); write_reg(info, CMD, TXENABLE); info->tx_enabled = true; } if ( info->tx_count ) { /* If auto RTS enabled and RTS is inactive, then assert */ /* RTS and set a flag indicating that the driver should */ /* negate RTS when the transmission completes. */ info->drop_rts_on_tx_done = false; if (info->params.mode != MGSL_MODE_ASYNC) { if ( info->params.flags & HDLC_FLAG_AUTO_RTS ) { get_signals( info ); if ( !(info->serial_signals & SerialSignal_RTS) ) { info->serial_signals |= SerialSignal_RTS; set_signals( info ); info->drop_rts_on_tx_done = true; } } write_reg16(info, TRC0, (unsigned short)(((tx_negate_fifo_level-1)<<8) + tx_active_fifo_level)); write_reg(info, TXDMA + DSR, 0); /* disable DMA channel */ write_reg(info, TXDMA + DCMD, SWABORT); /* reset/init DMA channel */ /* set TX CDA (current descriptor address) */ write_reg16(info, TXDMA + CDA, info->tx_buf_list_ex[0].phys_entry); /* set TX EDA (last descriptor address) */ write_reg16(info, TXDMA + EDA, info->tx_buf_list_ex[info->last_tx_buf].phys_entry); /* enable underrun IRQ */ info->ie1_value &= ~IDLE; info->ie1_value |= UDRN; write_reg(info, IE1, info->ie1_value); write_reg(info, SR1, (unsigned char)(IDLE + UDRN)); write_reg(info, TXDMA + DIR, 0x40); /* enable Tx DMA interrupts (EOM) */ write_reg(info, TXDMA + DSR, 0xf2); /* clear Tx DMA IRQs, enable Tx DMA */ mod_timer(&info->tx_timer, jiffies + msecs_to_jiffies(5000)); } else { tx_load_fifo(info); /* async, enable IRQ on txdata */ info->ie0_value |= TXRDYE; write_reg(info, IE0, info->ie0_value); } info->tx_active = true; } } /* stop the transmitter and DMA */ static void tx_stop( SLMP_INFO *info ) { if (debug_level >= DEBUG_LEVEL_ISR) printk("%s(%d):%s tx_stop()\n", __FILE__,__LINE__, info->device_name ); del_timer(&info->tx_timer); write_reg(info, TXDMA + DSR, 0); /* disable DMA channel */ write_reg(info, TXDMA + DCMD, SWABORT); /* reset/init DMA channel */ write_reg(info, CMD, TXRESET); info->ie1_value &= ~(UDRN + IDLE); write_reg(info, IE1, info->ie1_value); /* disable tx status interrupts */ write_reg(info, SR1, (unsigned char)(IDLE + UDRN)); /* clear pending */ info->ie0_value &= ~TXRDYE; write_reg(info, IE0, info->ie0_value); /* disable tx data interrupts */ info->tx_enabled = false; info->tx_active = false; } /* Fill the transmit FIFO until the FIFO is full or * there is no more data to load. */ static void tx_load_fifo(SLMP_INFO *info) { u8 TwoBytes[2]; /* do nothing is now tx data available and no XON/XOFF pending */ if ( !info->tx_count && !info->x_char ) return; /* load the Transmit FIFO until FIFOs full or all data sent */ while( info->tx_count && (read_reg(info,SR0) & BIT1) ) { /* there is more space in the transmit FIFO and */ /* there is more data in transmit buffer */ if ( (info->tx_count > 1) && !info->x_char ) { /* write 16-bits */ TwoBytes[0] = info->tx_buf[info->tx_get++]; if (info->tx_get >= info->max_frame_size) info->tx_get -= info->max_frame_size; TwoBytes[1] = info->tx_buf[info->tx_get++]; if (info->tx_get >= info->max_frame_size) info->tx_get -= info->max_frame_size; write_reg16(info, TRB, *((u16 *)TwoBytes)); info->tx_count -= 2; info->icount.tx += 2; } else { /* only 1 byte left to transmit or 1 FIFO slot left */ if (info->x_char) { /* transmit pending high priority char */ write_reg(info, TRB, info->x_char); info->x_char = 0; } else { write_reg(info, TRB, info->tx_buf[info->tx_get++]); if (info->tx_get >= info->max_frame_size) info->tx_get -= info->max_frame_size; info->tx_count--; } info->icount.tx++; } } } /* Reset a port to a known state */ static void reset_port(SLMP_INFO *info) { if (info->sca_base) { tx_stop(info); rx_stop(info); info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR); set_signals(info); /* disable all port interrupts */ info->ie0_value = 0; info->ie1_value = 0; info->ie2_value = 0; write_reg(info, IE0, info->ie0_value); write_reg(info, IE1, info->ie1_value); write_reg(info, IE2, info->ie2_value); write_reg(info, CMD, CHRESET); } } /* Reset all the ports to a known state. */ static void reset_adapter(SLMP_INFO *info) { int i; for ( i=0; i < SCA_MAX_PORTS; ++i) { if (info->port_array[i]) reset_port(info->port_array[i]); } } /* Program port for asynchronous communications. */ static void async_mode(SLMP_INFO *info) { unsigned char RegValue; tx_stop(info); rx_stop(info); /* MD0, Mode Register 0 * * 07..05 PRCTL<2..0>, Protocol Mode, 000=async * 04 AUTO, Auto-enable (RTS/CTS/DCD) * 03 Reserved, must be 0 * 02 CRCCC, CRC Calculation, 0=disabled * 01..00 STOP<1..0> Stop bits (00=1,10=2) * * 0000 0000 */ RegValue = 0x00; if (info->params.stop_bits != 1) RegValue |= BIT1; write_reg(info, MD0, RegValue); /* MD1, Mode Register 1 * * 07..06 BRATE<1..0>, bit rate, 00=1/1 01=1/16 10=1/32 11=1/64 * 05..04 TXCHR<1..0>, tx char size, 00=8 bits,01=7,10=6,11=5 * 03..02 RXCHR<1..0>, rx char size * 01..00 PMPM<1..0>, Parity mode, 00=none 10=even 11=odd * * 0100 0000 */ RegValue = 0x40; switch (info->params.data_bits) { case 7: RegValue |= BIT4 + BIT2; break; case 6: RegValue |= BIT5 + BIT3; break; case 5: RegValue |= BIT5 + BIT4 + BIT3 + BIT2; break; } if (info->params.parity != ASYNC_PARITY_NONE) { RegValue |= BIT1; if (info->params.parity == ASYNC_PARITY_ODD) RegValue |= BIT0; } write_reg(info, MD1, RegValue); /* MD2, Mode Register 2 * * 07..02 Reserved, must be 0 * 01..00 CNCT<1..0> Channel connection, 00=normal 11=local loopback * * 0000 0000 */ RegValue = 0x00; if (info->params.loopback) RegValue |= (BIT1 + BIT0); write_reg(info, MD2, RegValue); /* RXS, Receive clock source * * 07 Reserved, must be 0 * 06..04 RXCS<2..0>, clock source, 000=RxC Pin, 100=BRG, 110=DPLL * 03..00 RXBR<3..0>, rate divisor, 0000=1 */ RegValue=BIT6; write_reg(info, RXS, RegValue); /* TXS, Transmit clock source * * 07 Reserved, must be 0 * 06..04 RXCS<2..0>, clock source, 000=TxC Pin, 100=BRG, 110=Receive Clock * 03..00 RXBR<3..0>, rate divisor, 0000=1 */ RegValue=BIT6; write_reg(info, TXS, RegValue); /* Control Register * * 6,4,2,0 CLKSEL<3..0>, 0 = TcCLK in, 1 = Auxclk out */ info->port_array[0]->ctrlreg_value |= (BIT0 << (info->port_num * 2)); write_control_reg(info); tx_set_idle(info); /* RRC Receive Ready Control 0 * * 07..05 Reserved, must be 0 * 04..00 RRC<4..0> Rx FIFO trigger active 0x00 = 1 byte */ write_reg(info, RRC, 0x00); /* TRC0 Transmit Ready Control 0 * * 07..05 Reserved, must be 0 * 04..00 TRC<4..0> Tx FIFO trigger active 0x10 = 16 bytes */ write_reg(info, TRC0, 0x10); /* TRC1 Transmit Ready Control 1 * * 07..05 Reserved, must be 0 * 04..00 TRC<4..0> Tx FIFO trigger inactive 0x1e = 31 bytes (full-1) */ write_reg(info, TRC1, 0x1e); /* CTL, MSCI control register * * 07..06 Reserved, set to 0 * 05 UDRNC, underrun control, 0=abort 1=CRC+flag (HDLC/BSC) * 04 IDLC, idle control, 0=mark 1=idle register * 03 BRK, break, 0=off 1 =on (async) * 02 SYNCLD, sync char load enable (BSC) 1=enabled * 01 GOP, go active on poll (LOOP mode) 1=enabled * 00 RTS, RTS output control, 0=active 1=inactive * * 0001 0001 */ RegValue = 0x10; if (!(info->serial_signals & SerialSignal_RTS)) RegValue |= 0x01; write_reg(info, CTL, RegValue); /* enable status interrupts */ info->ie0_value |= TXINTE + RXINTE; write_reg(info, IE0, info->ie0_value); /* enable break detect interrupt */ info->ie1_value = BRKD; write_reg(info, IE1, info->ie1_value); /* enable rx overrun interrupt */ info->ie2_value = OVRN; write_reg(info, IE2, info->ie2_value); set_rate( info, info->params.data_rate * 16 ); } /* Program the SCA for HDLC communications. */ static void hdlc_mode(SLMP_INFO *info) { unsigned char RegValue; u32 DpllDivisor; // Can't use DPLL because SCA outputs recovered clock on RxC when // DPLL mode selected. This causes output contention with RxC receiver. // Use of DPLL would require external hardware to disable RxC receiver // when DPLL mode selected. info->params.flags &= ~(HDLC_FLAG_TXC_DPLL + HDLC_FLAG_RXC_DPLL); /* disable DMA interrupts */ write_reg(info, TXDMA + DIR, 0); write_reg(info, RXDMA + DIR, 0); /* MD0, Mode Register 0 * * 07..05 PRCTL<2..0>, Protocol Mode, 100=HDLC * 04 AUTO, Auto-enable (RTS/CTS/DCD) * 03 Reserved, must be 0 * 02 CRCCC, CRC Calculation, 1=enabled * 01 CRC1, CRC selection, 0=CRC-16,1=CRC-CCITT-16 * 00 CRC0, CRC initial value, 1 = all 1s * * 1000 0001 */ RegValue = 0x81; if (info->params.flags & HDLC_FLAG_AUTO_CTS) RegValue |= BIT4; if (info->params.flags & HDLC_FLAG_AUTO_DCD) RegValue |= BIT4; if (info->params.crc_type == HDLC_CRC_16_CCITT) RegValue |= BIT2 + BIT1; write_reg(info, MD0, RegValue); /* MD1, Mode Register 1 * * 07..06 ADDRS<1..0>, Address detect, 00=no addr check * 05..04 TXCHR<1..0>, tx char size, 00=8 bits * 03..02 RXCHR<1..0>, rx char size, 00=8 bits * 01..00 PMPM<1..0>, Parity mode, 00=no parity * * 0000 0000 */ RegValue = 0x00; write_reg(info, MD1, RegValue); /* MD2, Mode Register 2 * * 07 NRZFM, 0=NRZ, 1=FM * 06..05 CODE<1..0> Encoding, 00=NRZ * 04..03 DRATE<1..0> DPLL Divisor, 00=8 * 02 Reserved, must be 0 * 01..00 CNCT<1..0> Channel connection, 0=normal * * 0000 0000 */ RegValue = 0x00; switch(info->params.encoding) { case HDLC_ENCODING_NRZI: RegValue |= BIT5; break; case HDLC_ENCODING_BIPHASE_MARK: RegValue |= BIT7 + BIT5; break; /* aka FM1 */ case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT7 + BIT6; break; /* aka FM0 */ case HDLC_ENCODING_BIPHASE_LEVEL: RegValue |= BIT7; break; /* aka Manchester */ #if 0 case HDLC_ENCODING_NRZB: /* not supported */ case HDLC_ENCODING_NRZI_MARK: /* not supported */ case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: /* not supported */ #endif } if ( info->params.flags & HDLC_FLAG_DPLL_DIV16 ) { DpllDivisor = 16; RegValue |= BIT3; } else if ( info->params.flags & HDLC_FLAG_DPLL_DIV8 ) { DpllDivisor = 8; } else { DpllDivisor = 32; RegValue |= BIT4; } write_reg(info, MD2, RegValue); /* RXS, Receive clock source * * 07 Reserved, must be 0 * 06..04 RXCS<2..0>, clock source, 000=RxC Pin, 100=BRG, 110=DPLL * 03..00 RXBR<3..0>, rate divisor, 0000=1 */ RegValue=0; if (info->params.flags & HDLC_FLAG_RXC_BRG) RegValue |= BIT6; if (info->params.flags & HDLC_FLAG_RXC_DPLL) RegValue |= BIT6 + BIT5; write_reg(info, RXS, RegValue); /* TXS, Transmit clock source * * 07 Reserved, must be 0 * 06..04 RXCS<2..0>, clock source, 000=TxC Pin, 100=BRG, 110=Receive Clock * 03..00 RXBR<3..0>, rate divisor, 0000=1 */ RegValue=0; if (info->params.flags & HDLC_FLAG_TXC_BRG) RegValue |= BIT6; if (info->params.flags & HDLC_FLAG_TXC_DPLL) RegValue |= BIT6 + BIT5; write_reg(info, TXS, RegValue); if (info->params.flags & HDLC_FLAG_RXC_DPLL) set_rate(info, info->params.clock_speed * DpllDivisor); else set_rate(info, info->params.clock_speed); /* GPDATA (General Purpose I/O Data Register) * * 6,4,2,0 CLKSEL<3..0>, 0 = TcCLK in, 1 = Auxclk out */ if (info->params.flags & HDLC_FLAG_TXC_BRG) info->port_array[0]->ctrlreg_value |= (BIT0 << (info->port_num * 2)); else info->port_array[0]->ctrlreg_value &= ~(BIT0 << (info->port_num * 2)); write_control_reg(info); /* RRC Receive Ready Control 0 * * 07..05 Reserved, must be 0 * 04..00 RRC<4..0> Rx FIFO trigger active */ write_reg(info, RRC, rx_active_fifo_level); /* TRC0 Transmit Ready Control 0 * * 07..05 Reserved, must be 0 * 04..00 TRC<4..0> Tx FIFO trigger active */ write_reg(info, TRC0, tx_active_fifo_level); /* TRC1 Transmit Ready Control 1 * * 07..05 Reserved, must be 0 * 04..00 TRC<4..0> Tx FIFO trigger inactive 0x1f = 32 bytes (full) */ write_reg(info, TRC1, (unsigned char)(tx_negate_fifo_level - 1)); /* DMR, DMA Mode Register * * 07..05 Reserved, must be 0 * 04 TMOD, Transfer Mode: 1=chained-block * 03 Reserved, must be 0 * 02 NF, Number of Frames: 1=multi-frame * 01 CNTE, Frame End IRQ Counter enable: 0=disabled * 00 Reserved, must be 0 * * 0001 0100 */ write_reg(info, TXDMA + DMR, 0x14); write_reg(info, RXDMA + DMR, 0x14); /* Set chain pointer base (upper 8 bits of 24 bit addr) */ write_reg(info, RXDMA + CPB, (unsigned char)(info->buffer_list_phys >> 16)); /* Set chain pointer base (upper 8 bits of 24 bit addr) */ write_reg(info, TXDMA + CPB, (unsigned char)(info->buffer_list_phys >> 16)); /* enable status interrupts. other code enables/disables * the individual sources for these two interrupt classes. */ info->ie0_value |= TXINTE + RXINTE; write_reg(info, IE0, info->ie0_value); /* CTL, MSCI control register * * 07..06 Reserved, set to 0 * 05 UDRNC, underrun control, 0=abort 1=CRC+flag (HDLC/BSC) * 04 IDLC, idle control, 0=mark 1=idle register * 03 BRK, break, 0=off 1 =on (async) * 02 SYNCLD, sync char load enable (BSC) 1=enabled * 01 GOP, go active on poll (LOOP mode) 1=enabled * 00 RTS, RTS output control, 0=active 1=inactive * * 0001 0001 */ RegValue = 0x10; if (!(info->serial_signals & SerialSignal_RTS)) RegValue |= 0x01; write_reg(info, CTL, RegValue); /* preamble not supported ! */ tx_set_idle(info); tx_stop(info); rx_stop(info); set_rate(info, info->params.clock_speed); if (info->params.loopback) enable_loopback(info,1); } /* Set the transmit HDLC idle mode */ static void tx_set_idle(SLMP_INFO *info) { unsigned char RegValue = 0xff; /* Map API idle mode to SCA register bits */ switch(info->idle_mode) { case HDLC_TXIDLE_FLAGS: RegValue = 0x7e; break; case HDLC_TXIDLE_ALT_ZEROS_ONES: RegValue = 0xaa; break; case HDLC_TXIDLE_ZEROS: RegValue = 0x00; break; case HDLC_TXIDLE_ONES: RegValue = 0xff; break; case HDLC_TXIDLE_ALT_MARK_SPACE: RegValue = 0xaa; break; case HDLC_TXIDLE_SPACE: RegValue = 0x00; break; case HDLC_TXIDLE_MARK: RegValue = 0xff; break; } write_reg(info, IDL, RegValue); } /* Query the adapter for the state of the V24 status (input) signals. */ static void get_signals(SLMP_INFO *info) { u16 status = read_reg(info, SR3); u16 gpstatus = read_status_reg(info); u16 testbit; /* clear all serial signals except RTS and DTR */ info->serial_signals &= SerialSignal_RTS | SerialSignal_DTR; /* set serial signal bits to reflect MISR */ if (!(status & BIT3)) info->serial_signals |= SerialSignal_CTS; if ( !(status & BIT2)) info->serial_signals |= SerialSignal_DCD; testbit = BIT1 << (info->port_num * 2); // Port 0..3 RI is GPDATA<1,3,5,7> if (!(gpstatus & testbit)) info->serial_signals |= SerialSignal_RI; testbit = BIT0 << (info->port_num * 2); // Port 0..3 DSR is GPDATA<0,2,4,6> if (!(gpstatus & testbit)) info->serial_signals |= SerialSignal_DSR; } /* Set the state of RTS and DTR based on contents of * serial_signals member of device context. */ static void set_signals(SLMP_INFO *info) { unsigned char RegValue; u16 EnableBit; RegValue = read_reg(info, CTL); if (info->serial_signals & SerialSignal_RTS) RegValue &= ~BIT0; else RegValue |= BIT0; write_reg(info, CTL, RegValue); // Port 0..3 DTR is ctrl reg <1,3,5,7> EnableBit = BIT1 << (info->port_num*2); if (info->serial_signals & SerialSignal_DTR) info->port_array[0]->ctrlreg_value &= ~EnableBit; else info->port_array[0]->ctrlreg_value |= EnableBit; write_control_reg(info); } /*******************/ /* DMA Buffer Code */ /*******************/ /* Set the count for all receive buffers to SCABUFSIZE * and set the current buffer to the first buffer. This effectively * makes all buffers free and discards any data in buffers. */ static void rx_reset_buffers(SLMP_INFO *info) { rx_free_frame_buffers(info, 0, info->rx_buf_count - 1); } /* Free the buffers used by a received frame * * info pointer to device instance data * first index of 1st receive buffer of frame * last index of last receive buffer of frame */ static void rx_free_frame_buffers(SLMP_INFO *info, unsigned int first, unsigned int last) { bool done = false; while(!done) { /* reset current buffer for reuse */ info->rx_buf_list[first].status = 0xff; if (first == last) { done = true; /* set new last rx descriptor address */ write_reg16(info, RXDMA + EDA, info->rx_buf_list_ex[first].phys_entry); } first++; if (first == info->rx_buf_count) first = 0; } /* set current buffer to next buffer after last buffer of frame */ info->current_rx_buf = first; } /* Return a received frame from the receive DMA buffers. * Only frames received without errors are returned. * * Return Value: true if frame returned, otherwise false */ static bool rx_get_frame(SLMP_INFO *info) { unsigned int StartIndex, EndIndex; /* index of 1st and last buffers of Rx frame */ unsigned short status; unsigned int framesize = 0; bool ReturnCode = false; unsigned long flags; struct tty_struct *tty = info->port.tty; unsigned char addr_field = 0xff; SCADESC *desc; SCADESC_EX *desc_ex; CheckAgain: /* assume no frame returned, set zero length */ framesize = 0; addr_field = 0xff; /* * current_rx_buf points to the 1st buffer of the next available * receive frame. To find the last buffer of the frame look for * a non-zero status field in the buffer entries. (The status * field is set by the 16C32 after completing a receive frame. */ StartIndex = EndIndex = info->current_rx_buf; for ( ;; ) { desc = &info->rx_buf_list[EndIndex]; desc_ex = &info->rx_buf_list_ex[EndIndex]; if (desc->status == 0xff) goto Cleanup; /* current desc still in use, no frames available */ if (framesize == 0 && info->params.addr_filter != 0xff) addr_field = desc_ex->virt_addr[0]; framesize += desc->length; /* Status != 0 means last buffer of frame */ if (desc->status) break; EndIndex++; if (EndIndex == info->rx_buf_count) EndIndex = 0; if (EndIndex == info->current_rx_buf) { /* all buffers have been 'used' but none mark */ /* the end of a frame. Reset buffers and receiver. */ if ( info->rx_enabled ){ spin_lock_irqsave(&info->lock,flags); rx_start(info); spin_unlock_irqrestore(&info->lock,flags); } goto Cleanup; } } /* check status of receive frame */ /* frame status is byte stored after frame data * * 7 EOM (end of msg), 1 = last buffer of frame * 6 Short Frame, 1 = short frame * 5 Abort, 1 = frame aborted * 4 Residue, 1 = last byte is partial * 3 Overrun, 1 = overrun occurred during frame reception * 2 CRC, 1 = CRC error detected * */ status = desc->status; /* ignore CRC bit if not using CRC (bit is undefined) */ /* Note:CRC is not save to data buffer */ if (info->params.crc_type == HDLC_CRC_NONE) status &= ~BIT2; if (framesize == 0 || (addr_field != 0xff && addr_field != info->params.addr_filter)) { /* discard 0 byte frames, this seems to occur sometime * when remote is idling flags. */ rx_free_frame_buffers(info, StartIndex, EndIndex); goto CheckAgain; } if (framesize < 2) status |= BIT6; if (status & (BIT6+BIT5+BIT3+BIT2)) { /* received frame has errors, * update counts and mark frame size as 0 */ if (status & BIT6) info->icount.rxshort++; else if (status & BIT5) info->icount.rxabort++; else if (status & BIT3) info->icount.rxover++; else info->icount.rxcrc++; framesize = 0; #if SYNCLINK_GENERIC_HDLC { info->netdev->stats.rx_errors++; info->netdev->stats.rx_frame_errors++; } #endif } if ( debug_level >= DEBUG_LEVEL_BH ) printk("%s(%d):%s rx_get_frame() status=%04X size=%d\n", __FILE__,__LINE__,info->device_name,status,framesize); if ( debug_level >= DEBUG_LEVEL_DATA ) trace_block(info,info->rx_buf_list_ex[StartIndex].virt_addr, min_t(unsigned int, framesize, SCABUFSIZE), 0); if (framesize) { if (framesize > info->max_frame_size) info->icount.rxlong++; else { /* copy dma buffer(s) to contiguous intermediate buffer */ int copy_count = framesize; int index = StartIndex; unsigned char *ptmp = info->tmp_rx_buf; info->tmp_rx_buf_count = framesize; info->icount.rxok++; while(copy_count) { int partial_count = min(copy_count,SCABUFSIZE); memcpy( ptmp, info->rx_buf_list_ex[index].virt_addr, partial_count ); ptmp += partial_count; copy_count -= partial_count; if ( ++index == info->rx_buf_count ) index = 0; } #if SYNCLINK_GENERIC_HDLC if (info->netcount) hdlcdev_rx(info,info->tmp_rx_buf,framesize); else #endif ldisc_receive_buf(tty,info->tmp_rx_buf, info->flag_buf, framesize); } } /* Free the buffers used by this frame. */ rx_free_frame_buffers( info, StartIndex, EndIndex ); ReturnCode = true; Cleanup: if ( info->rx_enabled && info->rx_overflow ) { /* Receiver is enabled, but needs to restarted due to * rx buffer overflow. If buffers are empty, restart receiver. */ if (info->rx_buf_list[EndIndex].status == 0xff) { spin_lock_irqsave(&info->lock,flags); rx_start(info); spin_unlock_irqrestore(&info->lock,flags); } } return ReturnCode; } /* load the transmit DMA buffer with data */ static void tx_load_dma_buffer(SLMP_INFO *info, const char *buf, unsigned int count) { unsigned short copy_count; unsigned int i = 0; SCADESC *desc; SCADESC_EX *desc_ex; if ( debug_level >= DEBUG_LEVEL_DATA ) trace_block(info, buf, min_t(unsigned int, count, SCABUFSIZE), 1); /* Copy source buffer to one or more DMA buffers, starting with * the first transmit dma buffer. */ for(i=0;;) { copy_count = min_t(unsigned int, count, SCABUFSIZE); desc = &info->tx_buf_list[i]; desc_ex = &info->tx_buf_list_ex[i]; load_pci_memory(info, desc_ex->virt_addr,buf,copy_count); desc->length = copy_count; desc->status = 0; buf += copy_count; count -= copy_count; if (!count) break; i++; if (i >= info->tx_buf_count) i = 0; } info->tx_buf_list[i].status = 0x81; /* set EOM and EOT status */ info->last_tx_buf = ++i; } static bool register_test(SLMP_INFO *info) { static unsigned char testval[] = {0x00, 0xff, 0xaa, 0x55, 0x69, 0x96}; static unsigned int count = ARRAY_SIZE(testval); unsigned int i; bool rc = true; unsigned long flags; spin_lock_irqsave(&info->lock,flags); reset_port(info); /* assume failure */ info->init_error = DiagStatus_AddressFailure; /* Write bit patterns to various registers but do it out of */ /* sync, then read back and verify values. */ for (i = 0 ; i < count ; i++) { write_reg(info, TMC, testval[i]); write_reg(info, IDL, testval[(i+1)%count]); write_reg(info, SA0, testval[(i+2)%count]); write_reg(info, SA1, testval[(i+3)%count]); if ( (read_reg(info, TMC) != testval[i]) || (read_reg(info, IDL) != testval[(i+1)%count]) || (read_reg(info, SA0) != testval[(i+2)%count]) || (read_reg(info, SA1) != testval[(i+3)%count]) ) { rc = false; break; } } reset_port(info); spin_unlock_irqrestore(&info->lock,flags); return rc; } static bool irq_test(SLMP_INFO *info) { unsigned long timeout; unsigned long flags; unsigned char timer = (info->port_num & 1) ? TIMER2 : TIMER0; spin_lock_irqsave(&info->lock,flags); reset_port(info); /* assume failure */ info->init_error = DiagStatus_IrqFailure; info->irq_occurred = false; /* setup timer0 on SCA0 to interrupt */ /* IER2<7..4> = timer<3..0> interrupt enables (1=enabled) */ write_reg(info, IER2, (unsigned char)((info->port_num & 1) ? BIT6 : BIT4)); write_reg(info, (unsigned char)(timer + TEPR), 0); /* timer expand prescale */ write_reg16(info, (unsigned char)(timer + TCONR), 1); /* timer constant */ /* TMCS, Timer Control/Status Register * * 07 CMF, Compare match flag (read only) 1=match * 06 ECMI, CMF Interrupt Enable: 1=enabled * 05 Reserved, must be 0 * 04 TME, Timer Enable * 03..00 Reserved, must be 0 * * 0101 0000 */ write_reg(info, (unsigned char)(timer + TMCS), 0x50); spin_unlock_irqrestore(&info->lock,flags); timeout=100; while( timeout-- && !info->irq_occurred ) { msleep_interruptible(10); } spin_lock_irqsave(&info->lock,flags); reset_port(info); spin_unlock_irqrestore(&info->lock,flags); return info->irq_occurred; } /* initialize individual SCA device (2 ports) */ static bool sca_init(SLMP_INFO *info) { /* set wait controller to single mem partition (low), no wait states */ write_reg(info, PABR0, 0); /* wait controller addr boundary 0 */ write_reg(info, PABR1, 0); /* wait controller addr boundary 1 */ write_reg(info, WCRL, 0); /* wait controller low range */ write_reg(info, WCRM, 0); /* wait controller mid range */ write_reg(info, WCRH, 0); /* wait controller high range */ /* DPCR, DMA Priority Control * * 07..05 Not used, must be 0 * 04 BRC, bus release condition: 0=all transfers complete * 03 CCC, channel change condition: 0=every cycle * 02..00 PR<2..0>, priority 100=round robin * * 00000100 = 0x04 */ write_reg(info, DPCR, dma_priority); /* DMA Master Enable, BIT7: 1=enable all channels */ write_reg(info, DMER, 0x80); /* enable all interrupt classes */ write_reg(info, IER0, 0xff); /* TxRDY,RxRDY,TxINT,RxINT (ports 0-1) */ write_reg(info, IER1, 0xff); /* DMIB,DMIA (channels 0-3) */ write_reg(info, IER2, 0xf0); /* TIRQ (timers 0-3) */ /* ITCR, interrupt control register * 07 IPC, interrupt priority, 0=MSCI->DMA * 06..05 IAK<1..0>, Acknowledge cycle, 00=non-ack cycle * 04 VOS, Vector Output, 0=unmodified vector * 03..00 Reserved, must be 0 */ write_reg(info, ITCR, 0); return true; } /* initialize adapter hardware */ static bool init_adapter(SLMP_INFO *info) { int i; /* Set BIT30 of Local Control Reg 0x50 to reset SCA */ volatile u32 *MiscCtrl = (u32 *)(info->lcr_base + 0x50); u32 readval; info->misc_ctrl_value |= BIT30; *MiscCtrl = info->misc_ctrl_value; /* * Force at least 170ns delay before clearing * reset bit. Each read from LCR takes at least * 30ns so 10 times for 300ns to be safe. */ for(i=0;i<10;i++) readval = *MiscCtrl; info->misc_ctrl_value &= ~BIT30; *MiscCtrl = info->misc_ctrl_value; /* init control reg (all DTRs off, all clksel=input) */ info->ctrlreg_value = 0xaa; write_control_reg(info); { volatile u32 *LCR1BRDR = (u32 *)(info->lcr_base + 0x2c); lcr1_brdr_value &= ~(BIT5 + BIT4 + BIT3); switch(read_ahead_count) { case 16: lcr1_brdr_value |= BIT5 + BIT4 + BIT3; break; case 8: lcr1_brdr_value |= BIT5 + BIT4; break; case 4: lcr1_brdr_value |= BIT5 + BIT3; break; case 0: lcr1_brdr_value |= BIT5; break; } *LCR1BRDR = lcr1_brdr_value; *MiscCtrl = misc_ctrl_value; } sca_init(info->port_array[0]); sca_init(info->port_array[2]); return true; } /* Loopback an HDLC frame to test the hardware * interrupt and DMA functions. */ static bool loopback_test(SLMP_INFO *info) { #define TESTFRAMESIZE 20 unsigned long timeout; u16 count = TESTFRAMESIZE; unsigned char buf[TESTFRAMESIZE]; bool rc = false; unsigned long flags; struct tty_struct *oldtty = info->port.tty; u32 speed = info->params.clock_speed; info->params.clock_speed = 3686400; info->port.tty = NULL; /* assume failure */ info->init_error = DiagStatus_DmaFailure; /* build and send transmit frame */ for (count = 0; count < TESTFRAMESIZE;++count) buf[count] = (unsigned char)count; memset(info->tmp_rx_buf,0,TESTFRAMESIZE); /* program hardware for HDLC and enabled receiver */ spin_lock_irqsave(&info->lock,flags); hdlc_mode(info); enable_loopback(info,1); rx_start(info); info->tx_count = count; tx_load_dma_buffer(info,buf,count); tx_start(info); spin_unlock_irqrestore(&info->lock,flags); /* wait for receive complete */ /* Set a timeout for waiting for interrupt. */ for ( timeout = 100; timeout; --timeout ) { msleep_interruptible(10); if (rx_get_frame(info)) { rc = true; break; } } /* verify received frame length and contents */ if (rc && ( info->tmp_rx_buf_count != count || memcmp(buf, info->tmp_rx_buf,count))) { rc = false; } spin_lock_irqsave(&info->lock,flags); reset_adapter(info); spin_unlock_irqrestore(&info->lock,flags); info->params.clock_speed = speed; info->port.tty = oldtty; return rc; } /* Perform diagnostics on hardware */ static int adapter_test( SLMP_INFO *info ) { unsigned long flags; if ( debug_level >= DEBUG_LEVEL_INFO ) printk( "%s(%d):Testing device %s\n", __FILE__,__LINE__,info->device_name ); spin_lock_irqsave(&info->lock,flags); init_adapter(info); spin_unlock_irqrestore(&info->lock,flags); info->port_array[0]->port_count = 0; if ( register_test(info->port_array[0]) && register_test(info->port_array[1])) { info->port_array[0]->port_count = 2; if ( register_test(info->port_array[2]) && register_test(info->port_array[3]) ) info->port_array[0]->port_count += 2; } else { printk( "%s(%d):Register test failure for device %s Addr=%08lX\n", __FILE__,__LINE__,info->device_name, (unsigned long)(info->phys_sca_base)); return -ENODEV; } if ( !irq_test(info->port_array[0]) || !irq_test(info->port_array[1]) || (info->port_count == 4 && !irq_test(info->port_array[2])) || (info->port_count == 4 && !irq_test(info->port_array[3]))) { printk( "%s(%d):Interrupt test failure for device %s IRQ=%d\n", __FILE__,__LINE__,info->device_name, (unsigned short)(info->irq_level) ); return -ENODEV; } if (!loopback_test(info->port_array[0]) || !loopback_test(info->port_array[1]) || (info->port_count == 4 && !loopback_test(info->port_array[2])) || (info->port_count == 4 && !loopback_test(info->port_array[3]))) { printk( "%s(%d):DMA test failure for device %s\n", __FILE__,__LINE__,info->device_name); return -ENODEV; } if ( debug_level >= DEBUG_LEVEL_INFO ) printk( "%s(%d):device %s passed diagnostics\n", __FILE__,__LINE__,info->device_name ); info->port_array[0]->init_error = 0; info->port_array[1]->init_error = 0; if ( info->port_count > 2 ) { info->port_array[2]->init_error = 0; info->port_array[3]->init_error = 0; } return 0; } /* Test the shared memory on a PCI adapter. */ static bool memory_test(SLMP_INFO *info) { static unsigned long testval[] = { 0x0, 0x55555555, 0xaaaaaaaa, 0x66666666, 0x99999999, 0xffffffff, 0x12345678 }; unsigned long count = ARRAY_SIZE(testval); unsigned long i; unsigned long limit = SCA_MEM_SIZE/sizeof(unsigned long); unsigned long * addr = (unsigned long *)info->memory_base; /* Test data lines with test pattern at one location. */ for ( i = 0 ; i < count ; i++ ) { *addr = testval[i]; if ( *addr != testval[i] ) return false; } /* Test address lines with incrementing pattern over */ /* entire address range. */ for ( i = 0 ; i < limit ; i++ ) { *addr = i * 4; addr++; } addr = (unsigned long *)info->memory_base; for ( i = 0 ; i < limit ; i++ ) { if ( *addr != i * 4 ) return false; addr++; } memset( info->memory_base, 0, SCA_MEM_SIZE ); return true; } /* Load data into PCI adapter shared memory. * * The PCI9050 releases control of the local bus * after completing the current read or write operation. * * While the PCI9050 write FIFO not empty, the * PCI9050 treats all of the writes as a single transaction * and does not release the bus. This causes DMA latency problems * at high speeds when copying large data blocks to the shared memory. * * This function breaks a write into multiple transations by * interleaving a read which flushes the write FIFO and 'completes' * the write transation. This allows any pending DMA request to gain control * of the local bus in a timely fasion. */ static void load_pci_memory(SLMP_INFO *info, char* dest, const char* src, unsigned short count) { /* A load interval of 16 allows for 4 32-bit writes at */ /* 136ns each for a maximum latency of 542ns on the local bus.*/ unsigned short interval = count / sca_pci_load_interval; unsigned short i; for ( i = 0 ; i < interval ; i++ ) { memcpy(dest, src, sca_pci_load_interval); read_status_reg(info); dest += sca_pci_load_interval; src += sca_pci_load_interval; } memcpy(dest, src, count % sca_pci_load_interval); } static void trace_block(SLMP_INFO *info,const char* data, int count, int xmit) { int i; int linecount; if (xmit) printk("%s tx data:\n",info->device_name); else printk("%s rx data:\n",info->device_name); while(count) { if (count > 16) linecount = 16; else linecount = count; for(i=0;i<linecount;i++) printk("%02X ",(unsigned char)data[i]); for(;i<17;i++) printk(" "); for(i=0;i<linecount;i++) { if (data[i]>=040 && data[i]<=0176) printk("%c",data[i]); else printk("."); } printk("\n"); data += linecount; count -= linecount; } } /* end of trace_block() */ /* called when HDLC frame times out * update stats and do tx completion processing */ static void tx_timeout(unsigned long context) { SLMP_INFO *info = (SLMP_INFO*)context; unsigned long flags; if ( debug_level >= DEBUG_LEVEL_INFO ) printk( "%s(%d):%s tx_timeout()\n", __FILE__,__LINE__,info->device_name); if(info->tx_active && info->params.mode == MGSL_MODE_HDLC) { info->icount.txtimeout++; } spin_lock_irqsave(&info->lock,flags); info->tx_active = false; info->tx_count = info->tx_put = info->tx_get = 0; spin_unlock_irqrestore(&info->lock,flags); #if SYNCLINK_GENERIC_HDLC if (info->netcount) hdlcdev_tx_done(info); else #endif bh_transmit(info); } /* called to periodically check the DSR/RI modem signal input status */ static void status_timeout(unsigned long context) { u16 status = 0; SLMP_INFO *info = (SLMP_INFO*)context; unsigned long flags; unsigned char delta; spin_lock_irqsave(&info->lock,flags); get_signals(info); spin_unlock_irqrestore(&info->lock,flags); /* check for DSR/RI state change */ delta = info->old_signals ^ info->serial_signals; info->old_signals = info->serial_signals; if (delta & SerialSignal_DSR) status |= MISCSTATUS_DSR_LATCHED|(info->serial_signals&SerialSignal_DSR); if (delta & SerialSignal_RI) status |= MISCSTATUS_RI_LATCHED|(info->serial_signals&SerialSignal_RI); if (delta & SerialSignal_DCD) status |= MISCSTATUS_DCD_LATCHED|(info->serial_signals&SerialSignal_DCD); if (delta & SerialSignal_CTS) status |= MISCSTATUS_CTS_LATCHED|(info->serial_signals&SerialSignal_CTS); if (status) isr_io_pin(info,status); mod_timer(&info->status_timer, jiffies + msecs_to_jiffies(10)); } /* Register Access Routines - * All registers are memory mapped */ #define CALC_REGADDR() \ unsigned char * RegAddr = (unsigned char*)(info->sca_base + Addr); \ if (info->port_num > 1) \ RegAddr += 256; /* port 0-1 SCA0, 2-3 SCA1 */ \ if ( info->port_num & 1) { \ if (Addr > 0x7f) \ RegAddr += 0x40; /* DMA access */ \ else if (Addr > 0x1f && Addr < 0x60) \ RegAddr += 0x20; /* MSCI access */ \ } static unsigned char read_reg(SLMP_INFO * info, unsigned char Addr) { CALC_REGADDR(); return *RegAddr; } static void write_reg(SLMP_INFO * info, unsigned char Addr, unsigned char Value) { CALC_REGADDR(); *RegAddr = Value; } static u16 read_reg16(SLMP_INFO * info, unsigned char Addr) { CALC_REGADDR(); return *((u16 *)RegAddr); } static void write_reg16(SLMP_INFO * info, unsigned char Addr, u16 Value) { CALC_REGADDR(); *((u16 *)RegAddr) = Value; } static unsigned char read_status_reg(SLMP_INFO * info) { unsigned char *RegAddr = (unsigned char *)info->statctrl_base; return *RegAddr; } static void write_control_reg(SLMP_INFO * info) { unsigned char *RegAddr = (unsigned char *)info->statctrl_base; *RegAddr = info->port_array[0]->ctrlreg_value; } static int synclinkmp_init_one (struct pci_dev *dev, const struct pci_device_id *ent) { if (pci_enable_device(dev)) { printk("error enabling pci device %p\n", dev); return -EIO; } device_init( ++synclinkmp_adapter_count, dev ); return 0; } static void synclinkmp_remove_one (struct pci_dev *dev) { }
gpl-2.0
SamYaple/bcache-dev
drivers/tty/synclinkmp.c
846
150498
/* * $Id: synclinkmp.c,v 4.38 2005/07/15 13:29:44 paulkf Exp $ * * Device driver for Microgate SyncLink Multiport * high speed multiprotocol serial adapter. * * written by Paul Fulghum for Microgate Corporation * paulkf@microgate.com * * Microgate and SyncLink are trademarks of Microgate Corporation * * Derived from serial.c written by Theodore Ts'o and Linus Torvalds * This code is released under the GNU General Public License (GPL) * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. */ #define VERSION(ver,rel,seq) (((ver)<<16) | ((rel)<<8) | (seq)) #if defined(__i386__) # define BREAKPOINT() asm(" int $3"); #else # define BREAKPOINT() { } #endif #define MAX_DEVICES 12 #include <linux/module.h> #include <linux/errno.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/timer.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/serial.h> #include <linux/major.h> #include <linux/string.h> #include <linux/fcntl.h> #include <linux/ptrace.h> #include <linux/ioport.h> #include <linux/mm.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/netdevice.h> #include <linux/vmalloc.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/ioctl.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/dma.h> #include <linux/bitops.h> #include <asm/types.h> #include <linux/termios.h> #include <linux/workqueue.h> #include <linux/hdlc.h> #include <linux/synclink.h> #if defined(CONFIG_HDLC) || (defined(CONFIG_HDLC_MODULE) && defined(CONFIG_SYNCLINKMP_MODULE)) #define SYNCLINK_GENERIC_HDLC 1 #else #define SYNCLINK_GENERIC_HDLC 0 #endif #define GET_USER(error,value,addr) error = get_user(value,addr) #define COPY_FROM_USER(error,dest,src,size) error = copy_from_user(dest,src,size) ? -EFAULT : 0 #define PUT_USER(error,value,addr) error = put_user(value,addr) #define COPY_TO_USER(error,dest,src,size) error = copy_to_user(dest,src,size) ? -EFAULT : 0 #include <asm/uaccess.h> static MGSL_PARAMS default_params = { MGSL_MODE_HDLC, /* unsigned long mode */ 0, /* unsigned char loopback; */ HDLC_FLAG_UNDERRUN_ABORT15, /* unsigned short flags; */ HDLC_ENCODING_NRZI_SPACE, /* unsigned char encoding; */ 0, /* unsigned long clock_speed; */ 0xff, /* unsigned char addr_filter; */ HDLC_CRC_16_CCITT, /* unsigned short crc_type; */ HDLC_PREAMBLE_LENGTH_8BITS, /* unsigned char preamble_length; */ HDLC_PREAMBLE_PATTERN_NONE, /* unsigned char preamble; */ 9600, /* unsigned long data_rate; */ 8, /* unsigned char data_bits; */ 1, /* unsigned char stop_bits; */ ASYNC_PARITY_NONE /* unsigned char parity; */ }; /* size in bytes of DMA data buffers */ #define SCABUFSIZE 1024 #define SCA_MEM_SIZE 0x40000 #define SCA_BASE_SIZE 512 #define SCA_REG_SIZE 16 #define SCA_MAX_PORTS 4 #define SCAMAXDESC 128 #define BUFFERLISTSIZE 4096 /* SCA-I style DMA buffer descriptor */ typedef struct _SCADESC { u16 next; /* lower l6 bits of next descriptor addr */ u16 buf_ptr; /* lower 16 bits of buffer addr */ u8 buf_base; /* upper 8 bits of buffer addr */ u8 pad1; u16 length; /* length of buffer */ u8 status; /* status of buffer */ u8 pad2; } SCADESC, *PSCADESC; typedef struct _SCADESC_EX { /* device driver bookkeeping section */ char *virt_addr; /* virtual address of data buffer */ u16 phys_entry; /* lower 16-bits of physical address of this descriptor */ } SCADESC_EX, *PSCADESC_EX; /* The queue of BH actions to be performed */ #define BH_RECEIVE 1 #define BH_TRANSMIT 2 #define BH_STATUS 4 #define IO_PIN_SHUTDOWN_LIMIT 100 struct _input_signal_events { int ri_up; int ri_down; int dsr_up; int dsr_down; int dcd_up; int dcd_down; int cts_up; int cts_down; }; /* * Device instance data structure */ typedef struct _synclinkmp_info { void *if_ptr; /* General purpose pointer (used by SPPP) */ int magic; struct tty_port port; int line; unsigned short close_delay; unsigned short closing_wait; /* time to wait before closing */ struct mgsl_icount icount; int timeout; int x_char; /* xon/xoff character */ u16 read_status_mask1; /* break detection (SR1 indications) */ u16 read_status_mask2; /* parity/framing/overun (SR2 indications) */ unsigned char ignore_status_mask1; /* break detection (SR1 indications) */ unsigned char ignore_status_mask2; /* parity/framing/overun (SR2 indications) */ unsigned char *tx_buf; int tx_put; int tx_get; int tx_count; wait_queue_head_t status_event_wait_q; wait_queue_head_t event_wait_q; struct timer_list tx_timer; /* HDLC transmit timeout timer */ struct _synclinkmp_info *next_device; /* device list link */ struct timer_list status_timer; /* input signal status check timer */ spinlock_t lock; /* spinlock for synchronizing with ISR */ struct work_struct task; /* task structure for scheduling bh */ u32 max_frame_size; /* as set by device config */ u32 pending_bh; bool bh_running; /* Protection from multiple */ int isr_overflow; bool bh_requested; int dcd_chkcount; /* check counts to prevent */ int cts_chkcount; /* too many IRQs if a signal */ int dsr_chkcount; /* is floating */ int ri_chkcount; char *buffer_list; /* virtual address of Rx & Tx buffer lists */ unsigned long buffer_list_phys; unsigned int rx_buf_count; /* count of total allocated Rx buffers */ SCADESC *rx_buf_list; /* list of receive buffer entries */ SCADESC_EX rx_buf_list_ex[SCAMAXDESC]; /* list of receive buffer entries */ unsigned int current_rx_buf; unsigned int tx_buf_count; /* count of total allocated Tx buffers */ SCADESC *tx_buf_list; /* list of transmit buffer entries */ SCADESC_EX tx_buf_list_ex[SCAMAXDESC]; /* list of transmit buffer entries */ unsigned int last_tx_buf; unsigned char *tmp_rx_buf; unsigned int tmp_rx_buf_count; bool rx_enabled; bool rx_overflow; bool tx_enabled; bool tx_active; u32 idle_mode; unsigned char ie0_value; unsigned char ie1_value; unsigned char ie2_value; unsigned char ctrlreg_value; unsigned char old_signals; char device_name[25]; /* device instance name */ int port_count; int adapter_num; int port_num; struct _synclinkmp_info *port_array[SCA_MAX_PORTS]; unsigned int bus_type; /* expansion bus type (ISA,EISA,PCI) */ unsigned int irq_level; /* interrupt level */ unsigned long irq_flags; bool irq_requested; /* true if IRQ requested */ MGSL_PARAMS params; /* communications parameters */ unsigned char serial_signals; /* current serial signal states */ bool irq_occurred; /* for diagnostics use */ unsigned int init_error; /* Initialization startup error */ u32 last_mem_alloc; unsigned char* memory_base; /* shared memory address (PCI only) */ u32 phys_memory_base; int shared_mem_requested; unsigned char* sca_base; /* HD64570 SCA Memory address */ u32 phys_sca_base; u32 sca_offset; bool sca_base_requested; unsigned char* lcr_base; /* local config registers (PCI only) */ u32 phys_lcr_base; u32 lcr_offset; int lcr_mem_requested; unsigned char* statctrl_base; /* status/control register memory */ u32 phys_statctrl_base; u32 statctrl_offset; bool sca_statctrl_requested; u32 misc_ctrl_value; char *flag_buf; bool drop_rts_on_tx_done; struct _input_signal_events input_signal_events; /* SPPP/Cisco HDLC device parts */ int netcount; spinlock_t netlock; #if SYNCLINK_GENERIC_HDLC struct net_device *netdev; #endif } SLMP_INFO; #define MGSL_MAGIC 0x5401 /* * define serial signal status change macros */ #define MISCSTATUS_DCD_LATCHED (SerialSignal_DCD<<8) /* indicates change in DCD */ #define MISCSTATUS_RI_LATCHED (SerialSignal_RI<<8) /* indicates change in RI */ #define MISCSTATUS_CTS_LATCHED (SerialSignal_CTS<<8) /* indicates change in CTS */ #define MISCSTATUS_DSR_LATCHED (SerialSignal_DSR<<8) /* change in DSR */ /* Common Register macros */ #define LPR 0x00 #define PABR0 0x02 #define PABR1 0x03 #define WCRL 0x04 #define WCRM 0x05 #define WCRH 0x06 #define DPCR 0x08 #define DMER 0x09 #define ISR0 0x10 #define ISR1 0x11 #define ISR2 0x12 #define IER0 0x14 #define IER1 0x15 #define IER2 0x16 #define ITCR 0x18 #define INTVR 0x1a #define IMVR 0x1c /* MSCI Register macros */ #define TRB 0x20 #define TRBL 0x20 #define TRBH 0x21 #define SR0 0x22 #define SR1 0x23 #define SR2 0x24 #define SR3 0x25 #define FST 0x26 #define IE0 0x28 #define IE1 0x29 #define IE2 0x2a #define FIE 0x2b #define CMD 0x2c #define MD0 0x2e #define MD1 0x2f #define MD2 0x30 #define CTL 0x31 #define SA0 0x32 #define SA1 0x33 #define IDL 0x34 #define TMC 0x35 #define RXS 0x36 #define TXS 0x37 #define TRC0 0x38 #define TRC1 0x39 #define RRC 0x3a #define CST0 0x3c #define CST1 0x3d /* Timer Register Macros */ #define TCNT 0x60 #define TCNTL 0x60 #define TCNTH 0x61 #define TCONR 0x62 #define TCONRL 0x62 #define TCONRH 0x63 #define TMCS 0x64 #define TEPR 0x65 /* DMA Controller Register macros */ #define DARL 0x80 #define DARH 0x81 #define DARB 0x82 #define BAR 0x80 #define BARL 0x80 #define BARH 0x81 #define BARB 0x82 #define SAR 0x84 #define SARL 0x84 #define SARH 0x85 #define SARB 0x86 #define CPB 0x86 #define CDA 0x88 #define CDAL 0x88 #define CDAH 0x89 #define EDA 0x8a #define EDAL 0x8a #define EDAH 0x8b #define BFL 0x8c #define BFLL 0x8c #define BFLH 0x8d #define BCR 0x8e #define BCRL 0x8e #define BCRH 0x8f #define DSR 0x90 #define DMR 0x91 #define FCT 0x93 #define DIR 0x94 #define DCMD 0x95 /* combine with timer or DMA register address */ #define TIMER0 0x00 #define TIMER1 0x08 #define TIMER2 0x10 #define TIMER3 0x18 #define RXDMA 0x00 #define TXDMA 0x20 /* SCA Command Codes */ #define NOOP 0x00 #define TXRESET 0x01 #define TXENABLE 0x02 #define TXDISABLE 0x03 #define TXCRCINIT 0x04 #define TXCRCEXCL 0x05 #define TXEOM 0x06 #define TXABORT 0x07 #define MPON 0x08 #define TXBUFCLR 0x09 #define RXRESET 0x11 #define RXENABLE 0x12 #define RXDISABLE 0x13 #define RXCRCINIT 0x14 #define RXREJECT 0x15 #define SEARCHMP 0x16 #define RXCRCEXCL 0x17 #define RXCRCCALC 0x18 #define CHRESET 0x21 #define HUNT 0x31 /* DMA command codes */ #define SWABORT 0x01 #define FEICLEAR 0x02 /* IE0 */ #define TXINTE BIT7 #define RXINTE BIT6 #define TXRDYE BIT1 #define RXRDYE BIT0 /* IE1 & SR1 */ #define UDRN BIT7 #define IDLE BIT6 #define SYNCD BIT4 #define FLGD BIT4 #define CCTS BIT3 #define CDCD BIT2 #define BRKD BIT1 #define ABTD BIT1 #define GAPD BIT1 #define BRKE BIT0 #define IDLD BIT0 /* IE2 & SR2 */ #define EOM BIT7 #define PMP BIT6 #define SHRT BIT6 #define PE BIT5 #define ABT BIT5 #define FRME BIT4 #define RBIT BIT4 #define OVRN BIT3 #define CRCE BIT2 /* * Global linked list of SyncLink devices */ static SLMP_INFO *synclinkmp_device_list = NULL; static int synclinkmp_adapter_count = -1; static int synclinkmp_device_count = 0; /* * Set this param to non-zero to load eax with the * .text section address and breakpoint on module load. * This is useful for use with gdb and add-symbol-file command. */ static bool break_on_load = 0; /* * Driver major number, defaults to zero to get auto * assigned major number. May be forced as module parameter. */ static int ttymajor = 0; /* * Array of user specified options for ISA adapters. */ static int debug_level = 0; static int maxframe[MAX_DEVICES] = {0,}; module_param(break_on_load, bool, 0); module_param(ttymajor, int, 0); module_param(debug_level, int, 0); module_param_array(maxframe, int, NULL, 0); static char *driver_name = "SyncLink MultiPort driver"; static char *driver_version = "$Revision: 4.38 $"; static int synclinkmp_init_one(struct pci_dev *dev,const struct pci_device_id *ent); static void synclinkmp_remove_one(struct pci_dev *dev); static struct pci_device_id synclinkmp_pci_tbl[] = { { PCI_VENDOR_ID_MICROGATE, PCI_DEVICE_ID_MICROGATE_SCA, PCI_ANY_ID, PCI_ANY_ID, }, { 0, }, /* terminate list */ }; MODULE_DEVICE_TABLE(pci, synclinkmp_pci_tbl); MODULE_LICENSE("GPL"); static struct pci_driver synclinkmp_pci_driver = { .name = "synclinkmp", .id_table = synclinkmp_pci_tbl, .probe = synclinkmp_init_one, .remove = synclinkmp_remove_one, }; static struct tty_driver *serial_driver; /* number of characters left in xmit buffer before we ask for more */ #define WAKEUP_CHARS 256 /* tty callbacks */ static int open(struct tty_struct *tty, struct file * filp); static void close(struct tty_struct *tty, struct file * filp); static void hangup(struct tty_struct *tty); static void set_termios(struct tty_struct *tty, struct ktermios *old_termios); static int write(struct tty_struct *tty, const unsigned char *buf, int count); static int put_char(struct tty_struct *tty, unsigned char ch); static void send_xchar(struct tty_struct *tty, char ch); static void wait_until_sent(struct tty_struct *tty, int timeout); static int write_room(struct tty_struct *tty); static void flush_chars(struct tty_struct *tty); static void flush_buffer(struct tty_struct *tty); static void tx_hold(struct tty_struct *tty); static void tx_release(struct tty_struct *tty); static int ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg); static int chars_in_buffer(struct tty_struct *tty); static void throttle(struct tty_struct * tty); static void unthrottle(struct tty_struct * tty); static int set_break(struct tty_struct *tty, int break_state); #if SYNCLINK_GENERIC_HDLC #define dev_to_port(D) (dev_to_hdlc(D)->priv) static void hdlcdev_tx_done(SLMP_INFO *info); static void hdlcdev_rx(SLMP_INFO *info, char *buf, int size); static int hdlcdev_init(SLMP_INFO *info); static void hdlcdev_exit(SLMP_INFO *info); #endif /* ioctl handlers */ static int get_stats(SLMP_INFO *info, struct mgsl_icount __user *user_icount); static int get_params(SLMP_INFO *info, MGSL_PARAMS __user *params); static int set_params(SLMP_INFO *info, MGSL_PARAMS __user *params); static int get_txidle(SLMP_INFO *info, int __user *idle_mode); static int set_txidle(SLMP_INFO *info, int idle_mode); static int tx_enable(SLMP_INFO *info, int enable); static int tx_abort(SLMP_INFO *info); static int rx_enable(SLMP_INFO *info, int enable); static int modem_input_wait(SLMP_INFO *info,int arg); static int wait_mgsl_event(SLMP_INFO *info, int __user *mask_ptr); static int tiocmget(struct tty_struct *tty); static int tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear); static int set_break(struct tty_struct *tty, int break_state); static void add_device(SLMP_INFO *info); static void device_init(int adapter_num, struct pci_dev *pdev); static int claim_resources(SLMP_INFO *info); static void release_resources(SLMP_INFO *info); static int startup(SLMP_INFO *info); static int block_til_ready(struct tty_struct *tty, struct file * filp,SLMP_INFO *info); static int carrier_raised(struct tty_port *port); static void shutdown(SLMP_INFO *info); static void program_hw(SLMP_INFO *info); static void change_params(SLMP_INFO *info); static bool init_adapter(SLMP_INFO *info); static bool register_test(SLMP_INFO *info); static bool irq_test(SLMP_INFO *info); static bool loopback_test(SLMP_INFO *info); static int adapter_test(SLMP_INFO *info); static bool memory_test(SLMP_INFO *info); static void reset_adapter(SLMP_INFO *info); static void reset_port(SLMP_INFO *info); static void async_mode(SLMP_INFO *info); static void hdlc_mode(SLMP_INFO *info); static void rx_stop(SLMP_INFO *info); static void rx_start(SLMP_INFO *info); static void rx_reset_buffers(SLMP_INFO *info); static void rx_free_frame_buffers(SLMP_INFO *info, unsigned int first, unsigned int last); static bool rx_get_frame(SLMP_INFO *info); static void tx_start(SLMP_INFO *info); static void tx_stop(SLMP_INFO *info); static void tx_load_fifo(SLMP_INFO *info); static void tx_set_idle(SLMP_INFO *info); static void tx_load_dma_buffer(SLMP_INFO *info, const char *buf, unsigned int count); static void get_signals(SLMP_INFO *info); static void set_signals(SLMP_INFO *info); static void enable_loopback(SLMP_INFO *info, int enable); static void set_rate(SLMP_INFO *info, u32 data_rate); static int bh_action(SLMP_INFO *info); static void bh_handler(struct work_struct *work); static void bh_receive(SLMP_INFO *info); static void bh_transmit(SLMP_INFO *info); static void bh_status(SLMP_INFO *info); static void isr_timer(SLMP_INFO *info); static void isr_rxint(SLMP_INFO *info); static void isr_rxrdy(SLMP_INFO *info); static void isr_txint(SLMP_INFO *info); static void isr_txrdy(SLMP_INFO *info); static void isr_rxdmaok(SLMP_INFO *info); static void isr_rxdmaerror(SLMP_INFO *info); static void isr_txdmaok(SLMP_INFO *info); static void isr_txdmaerror(SLMP_INFO *info); static void isr_io_pin(SLMP_INFO *info, u16 status); static int alloc_dma_bufs(SLMP_INFO *info); static void free_dma_bufs(SLMP_INFO *info); static int alloc_buf_list(SLMP_INFO *info); static int alloc_frame_bufs(SLMP_INFO *info, SCADESC *list, SCADESC_EX *list_ex,int count); static int alloc_tmp_rx_buf(SLMP_INFO *info); static void free_tmp_rx_buf(SLMP_INFO *info); static void load_pci_memory(SLMP_INFO *info, char* dest, const char* src, unsigned short count); static void trace_block(SLMP_INFO *info, const char* data, int count, int xmit); static void tx_timeout(unsigned long context); static void status_timeout(unsigned long context); static unsigned char read_reg(SLMP_INFO *info, unsigned char addr); static void write_reg(SLMP_INFO *info, unsigned char addr, unsigned char val); static u16 read_reg16(SLMP_INFO *info, unsigned char addr); static void write_reg16(SLMP_INFO *info, unsigned char addr, u16 val); static unsigned char read_status_reg(SLMP_INFO * info); static void write_control_reg(SLMP_INFO * info); static unsigned char rx_active_fifo_level = 16; // rx request FIFO activation level in bytes static unsigned char tx_active_fifo_level = 16; // tx request FIFO activation level in bytes static unsigned char tx_negate_fifo_level = 32; // tx request FIFO negation level in bytes static u32 misc_ctrl_value = 0x007e4040; static u32 lcr1_brdr_value = 0x00800028; static u32 read_ahead_count = 8; /* DPCR, DMA Priority Control * * 07..05 Not used, must be 0 * 04 BRC, bus release condition: 0=all transfers complete * 1=release after 1 xfer on all channels * 03 CCC, channel change condition: 0=every cycle * 1=after each channel completes all xfers * 02..00 PR<2..0>, priority 100=round robin * * 00000100 = 0x00 */ static unsigned char dma_priority = 0x04; // Number of bytes that can be written to shared RAM // in a single write operation static u32 sca_pci_load_interval = 64; /* * 1st function defined in .text section. Calling this function in * init_module() followed by a breakpoint allows a remote debugger * (gdb) to get the .text address for the add-symbol-file command. * This allows remote debugging of dynamically loadable modules. */ static void* synclinkmp_get_text_ptr(void); static void* synclinkmp_get_text_ptr(void) {return synclinkmp_get_text_ptr;} static inline int sanity_check(SLMP_INFO *info, char *name, const char *routine) { #ifdef SANITY_CHECK static const char *badmagic = "Warning: bad magic number for synclinkmp_struct (%s) in %s\n"; static const char *badinfo = "Warning: null synclinkmp_struct for (%s) in %s\n"; if (!info) { printk(badinfo, name, routine); return 1; } if (info->magic != MGSL_MAGIC) { printk(badmagic, name, routine); return 1; } #else if (!info) return 1; #endif return 0; } /** * line discipline callback wrappers * * The wrappers maintain line discipline references * while calling into the line discipline. * * ldisc_receive_buf - pass receive data to line discipline */ static void ldisc_receive_buf(struct tty_struct *tty, const __u8 *data, char *flags, int count) { struct tty_ldisc *ld; if (!tty) return; ld = tty_ldisc_ref(tty); if (ld) { if (ld->ops->receive_buf) ld->ops->receive_buf(tty, data, flags, count); tty_ldisc_deref(ld); } } /* tty callbacks */ static int install(struct tty_driver *driver, struct tty_struct *tty) { SLMP_INFO *info; int line = tty->index; if (line >= synclinkmp_device_count) { printk("%s(%d): open with invalid line #%d.\n", __FILE__,__LINE__,line); return -ENODEV; } info = synclinkmp_device_list; while (info && info->line != line) info = info->next_device; if (sanity_check(info, tty->name, "open")) return -ENODEV; if (info->init_error) { printk("%s(%d):%s device is not allocated, init error=%d\n", __FILE__, __LINE__, info->device_name, info->init_error); return -ENODEV; } tty->driver_data = info; return tty_port_install(&info->port, driver, tty); } /* Called when a port is opened. Init and enable port. */ static int open(struct tty_struct *tty, struct file *filp) { SLMP_INFO *info = tty->driver_data; unsigned long flags; int retval; info->port.tty = tty; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):%s open(), old ref count = %d\n", __FILE__,__LINE__,tty->driver->name, info->port.count); /* If port is closing, signal caller to try again */ if (info->port.flags & ASYNC_CLOSING){ wait_event_interruptible_tty(tty, info->port.close_wait, !(info->port.flags & ASYNC_CLOSING)); retval = ((info->port.flags & ASYNC_HUP_NOTIFY) ? -EAGAIN : -ERESTARTSYS); goto cleanup; } info->port.low_latency = (info->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0; spin_lock_irqsave(&info->netlock, flags); if (info->netcount) { retval = -EBUSY; spin_unlock_irqrestore(&info->netlock, flags); goto cleanup; } info->port.count++; spin_unlock_irqrestore(&info->netlock, flags); if (info->port.count == 1) { /* 1st open on this device, init hardware */ retval = startup(info); if (retval < 0) goto cleanup; } retval = block_til_ready(tty, filp, info); if (retval) { if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):%s block_til_ready() returned %d\n", __FILE__,__LINE__, info->device_name, retval); goto cleanup; } if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):%s open() success\n", __FILE__,__LINE__, info->device_name); retval = 0; cleanup: if (retval) { if (tty->count == 1) info->port.tty = NULL; /* tty layer will release tty struct */ if(info->port.count) info->port.count--; } return retval; } /* Called when port is closed. Wait for remaining data to be * sent. Disable port and free resources. */ static void close(struct tty_struct *tty, struct file *filp) { SLMP_INFO * info = tty->driver_data; if (sanity_check(info, tty->name, "close")) return; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):%s close() entry, count=%d\n", __FILE__,__LINE__, info->device_name, info->port.count); if (tty_port_close_start(&info->port, tty, filp) == 0) goto cleanup; mutex_lock(&info->port.mutex); if (info->port.flags & ASYNC_INITIALIZED) wait_until_sent(tty, info->timeout); flush_buffer(tty); tty_ldisc_flush(tty); shutdown(info); mutex_unlock(&info->port.mutex); tty_port_close_end(&info->port, tty); info->port.tty = NULL; cleanup: if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):%s close() exit, count=%d\n", __FILE__,__LINE__, tty->driver->name, info->port.count); } /* Called by tty_hangup() when a hangup is signaled. * This is the same as closing all open descriptors for the port. */ static void hangup(struct tty_struct *tty) { SLMP_INFO *info = tty->driver_data; unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):%s hangup()\n", __FILE__,__LINE__, info->device_name ); if (sanity_check(info, tty->name, "hangup")) return; mutex_lock(&info->port.mutex); flush_buffer(tty); shutdown(info); spin_lock_irqsave(&info->port.lock, flags); info->port.count = 0; info->port.flags &= ~ASYNC_NORMAL_ACTIVE; info->port.tty = NULL; spin_unlock_irqrestore(&info->port.lock, flags); mutex_unlock(&info->port.mutex); wake_up_interruptible(&info->port.open_wait); } /* Set new termios settings */ static void set_termios(struct tty_struct *tty, struct ktermios *old_termios) { SLMP_INFO *info = tty->driver_data; unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):%s set_termios()\n", __FILE__,__LINE__, tty->driver->name ); change_params(info); /* Handle transition to B0 status */ if (old_termios->c_cflag & CBAUD && !(tty->termios.c_cflag & CBAUD)) { info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR); spin_lock_irqsave(&info->lock,flags); set_signals(info); spin_unlock_irqrestore(&info->lock,flags); } /* Handle transition away from B0 status */ if (!(old_termios->c_cflag & CBAUD) && tty->termios.c_cflag & CBAUD) { info->serial_signals |= SerialSignal_DTR; if (!(tty->termios.c_cflag & CRTSCTS) || !test_bit(TTY_THROTTLED, &tty->flags)) { info->serial_signals |= SerialSignal_RTS; } spin_lock_irqsave(&info->lock,flags); set_signals(info); spin_unlock_irqrestore(&info->lock,flags); } /* Handle turning off CRTSCTS */ if (old_termios->c_cflag & CRTSCTS && !(tty->termios.c_cflag & CRTSCTS)) { tty->hw_stopped = 0; tx_release(tty); } } /* Send a block of data * * Arguments: * * tty pointer to tty information structure * buf pointer to buffer containing send data * count size of send data in bytes * * Return Value: number of characters written */ static int write(struct tty_struct *tty, const unsigned char *buf, int count) { int c, ret = 0; SLMP_INFO *info = tty->driver_data; unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):%s write() count=%d\n", __FILE__,__LINE__,info->device_name,count); if (sanity_check(info, tty->name, "write")) goto cleanup; if (!info->tx_buf) goto cleanup; if (info->params.mode == MGSL_MODE_HDLC) { if (count > info->max_frame_size) { ret = -EIO; goto cleanup; } if (info->tx_active) goto cleanup; if (info->tx_count) { /* send accumulated data from send_char() calls */ /* as frame and wait before accepting more data. */ tx_load_dma_buffer(info, info->tx_buf, info->tx_count); goto start; } ret = info->tx_count = count; tx_load_dma_buffer(info, buf, count); goto start; } for (;;) { c = min_t(int, count, min(info->max_frame_size - info->tx_count - 1, info->max_frame_size - info->tx_put)); if (c <= 0) break; memcpy(info->tx_buf + info->tx_put, buf, c); spin_lock_irqsave(&info->lock,flags); info->tx_put += c; if (info->tx_put >= info->max_frame_size) info->tx_put -= info->max_frame_size; info->tx_count += c; spin_unlock_irqrestore(&info->lock,flags); buf += c; count -= c; ret += c; } if (info->params.mode == MGSL_MODE_HDLC) { if (count) { ret = info->tx_count = 0; goto cleanup; } tx_load_dma_buffer(info, info->tx_buf, info->tx_count); } start: if (info->tx_count && !tty->stopped && !tty->hw_stopped) { spin_lock_irqsave(&info->lock,flags); if (!info->tx_active) tx_start(info); spin_unlock_irqrestore(&info->lock,flags); } cleanup: if (debug_level >= DEBUG_LEVEL_INFO) printk( "%s(%d):%s write() returning=%d\n", __FILE__,__LINE__,info->device_name,ret); return ret; } /* Add a character to the transmit buffer. */ static int put_char(struct tty_struct *tty, unsigned char ch) { SLMP_INFO *info = tty->driver_data; unsigned long flags; int ret = 0; if ( debug_level >= DEBUG_LEVEL_INFO ) { printk( "%s(%d):%s put_char(%d)\n", __FILE__,__LINE__,info->device_name,ch); } if (sanity_check(info, tty->name, "put_char")) return 0; if (!info->tx_buf) return 0; spin_lock_irqsave(&info->lock,flags); if ( (info->params.mode != MGSL_MODE_HDLC) || !info->tx_active ) { if (info->tx_count < info->max_frame_size - 1) { info->tx_buf[info->tx_put++] = ch; if (info->tx_put >= info->max_frame_size) info->tx_put -= info->max_frame_size; info->tx_count++; ret = 1; } } spin_unlock_irqrestore(&info->lock,flags); return ret; } /* Send a high-priority XON/XOFF character */ static void send_xchar(struct tty_struct *tty, char ch) { SLMP_INFO *info = tty->driver_data; unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):%s send_xchar(%d)\n", __FILE__,__LINE__, info->device_name, ch ); if (sanity_check(info, tty->name, "send_xchar")) return; info->x_char = ch; if (ch) { /* Make sure transmit interrupts are on */ spin_lock_irqsave(&info->lock,flags); if (!info->tx_enabled) tx_start(info); spin_unlock_irqrestore(&info->lock,flags); } } /* Wait until the transmitter is empty. */ static void wait_until_sent(struct tty_struct *tty, int timeout) { SLMP_INFO * info = tty->driver_data; unsigned long orig_jiffies, char_time; if (!info ) return; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):%s wait_until_sent() entry\n", __FILE__,__LINE__, info->device_name ); if (sanity_check(info, tty->name, "wait_until_sent")) return; if (!test_bit(ASYNCB_INITIALIZED, &info->port.flags)) goto exit; orig_jiffies = jiffies; /* Set check interval to 1/5 of estimated time to * send a character, and make it at least 1. The check * interval should also be less than the timeout. * Note: use tight timings here to satisfy the NIST-PCTS. */ if ( info->params.data_rate ) { char_time = info->timeout/(32 * 5); if (!char_time) char_time++; } else char_time = 1; if (timeout) char_time = min_t(unsigned long, char_time, timeout); if ( info->params.mode == MGSL_MODE_HDLC ) { while (info->tx_active) { msleep_interruptible(jiffies_to_msecs(char_time)); if (signal_pending(current)) break; if (timeout && time_after(jiffies, orig_jiffies + timeout)) break; } } else { /* * TODO: determine if there is something similar to USC16C32 * TXSTATUS_ALL_SENT status */ while ( info->tx_active && info->tx_enabled) { msleep_interruptible(jiffies_to_msecs(char_time)); if (signal_pending(current)) break; if (timeout && time_after(jiffies, orig_jiffies + timeout)) break; } } exit: if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):%s wait_until_sent() exit\n", __FILE__,__LINE__, info->device_name ); } /* Return the count of free bytes in transmit buffer */ static int write_room(struct tty_struct *tty) { SLMP_INFO *info = tty->driver_data; int ret; if (sanity_check(info, tty->name, "write_room")) return 0; if (info->params.mode == MGSL_MODE_HDLC) { ret = (info->tx_active) ? 0 : HDLC_MAX_FRAME_SIZE; } else { ret = info->max_frame_size - info->tx_count - 1; if (ret < 0) ret = 0; } if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):%s write_room()=%d\n", __FILE__, __LINE__, info->device_name, ret); return ret; } /* enable transmitter and send remaining buffered characters */ static void flush_chars(struct tty_struct *tty) { SLMP_INFO *info = tty->driver_data; unsigned long flags; if ( debug_level >= DEBUG_LEVEL_INFO ) printk( "%s(%d):%s flush_chars() entry tx_count=%d\n", __FILE__,__LINE__,info->device_name,info->tx_count); if (sanity_check(info, tty->name, "flush_chars")) return; if (info->tx_count <= 0 || tty->stopped || tty->hw_stopped || !info->tx_buf) return; if ( debug_level >= DEBUG_LEVEL_INFO ) printk( "%s(%d):%s flush_chars() entry, starting transmitter\n", __FILE__,__LINE__,info->device_name ); spin_lock_irqsave(&info->lock,flags); if (!info->tx_active) { if ( (info->params.mode == MGSL_MODE_HDLC) && info->tx_count ) { /* operating in synchronous (frame oriented) mode */ /* copy data from circular tx_buf to */ /* transmit DMA buffer. */ tx_load_dma_buffer(info, info->tx_buf,info->tx_count); } tx_start(info); } spin_unlock_irqrestore(&info->lock,flags); } /* Discard all data in the send buffer */ static void flush_buffer(struct tty_struct *tty) { SLMP_INFO *info = tty->driver_data; unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):%s flush_buffer() entry\n", __FILE__,__LINE__, info->device_name ); if (sanity_check(info, tty->name, "flush_buffer")) return; spin_lock_irqsave(&info->lock,flags); info->tx_count = info->tx_put = info->tx_get = 0; del_timer(&info->tx_timer); spin_unlock_irqrestore(&info->lock,flags); tty_wakeup(tty); } /* throttle (stop) transmitter */ static void tx_hold(struct tty_struct *tty) { SLMP_INFO *info = tty->driver_data; unsigned long flags; if (sanity_check(info, tty->name, "tx_hold")) return; if ( debug_level >= DEBUG_LEVEL_INFO ) printk("%s(%d):%s tx_hold()\n", __FILE__,__LINE__,info->device_name); spin_lock_irqsave(&info->lock,flags); if (info->tx_enabled) tx_stop(info); spin_unlock_irqrestore(&info->lock,flags); } /* release (start) transmitter */ static void tx_release(struct tty_struct *tty) { SLMP_INFO *info = tty->driver_data; unsigned long flags; if (sanity_check(info, tty->name, "tx_release")) return; if ( debug_level >= DEBUG_LEVEL_INFO ) printk("%s(%d):%s tx_release()\n", __FILE__,__LINE__,info->device_name); spin_lock_irqsave(&info->lock,flags); if (!info->tx_enabled) tx_start(info); spin_unlock_irqrestore(&info->lock,flags); } /* Service an IOCTL request * * Arguments: * * tty pointer to tty instance data * cmd IOCTL command code * arg command argument/context * * Return Value: 0 if success, otherwise error code */ static int ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { SLMP_INFO *info = tty->driver_data; void __user *argp = (void __user *)arg; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):%s ioctl() cmd=%08X\n", __FILE__,__LINE__, info->device_name, cmd ); if (sanity_check(info, tty->name, "ioctl")) return -ENODEV; if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) && (cmd != TIOCMIWAIT)) { if (tty->flags & (1 << TTY_IO_ERROR)) return -EIO; } switch (cmd) { case MGSL_IOCGPARAMS: return get_params(info, argp); case MGSL_IOCSPARAMS: return set_params(info, argp); case MGSL_IOCGTXIDLE: return get_txidle(info, argp); case MGSL_IOCSTXIDLE: return set_txidle(info, (int)arg); case MGSL_IOCTXENABLE: return tx_enable(info, (int)arg); case MGSL_IOCRXENABLE: return rx_enable(info, (int)arg); case MGSL_IOCTXABORT: return tx_abort(info); case MGSL_IOCGSTATS: return get_stats(info, argp); case MGSL_IOCWAITEVENT: return wait_mgsl_event(info, argp); case MGSL_IOCLOOPTXDONE: return 0; // TODO: Not supported, need to document /* Wait for modem input (DCD,RI,DSR,CTS) change * as specified by mask in arg (TIOCM_RNG/DSR/CD/CTS) */ case TIOCMIWAIT: return modem_input_wait(info,(int)arg); /* * Get counter of input serial line interrupts (DCD,RI,DSR,CTS) * Return: write counters to the user passed counter struct * NB: both 1->0 and 0->1 transitions are counted except for * RI where only 0->1 is counted. */ default: return -ENOIOCTLCMD; } return 0; } static int get_icount(struct tty_struct *tty, struct serial_icounter_struct *icount) { SLMP_INFO *info = tty->driver_data; struct mgsl_icount cnow; /* kernel counter temps */ unsigned long flags; spin_lock_irqsave(&info->lock,flags); cnow = info->icount; spin_unlock_irqrestore(&info->lock,flags); icount->cts = cnow.cts; icount->dsr = cnow.dsr; icount->rng = cnow.rng; icount->dcd = cnow.dcd; icount->rx = cnow.rx; icount->tx = cnow.tx; icount->frame = cnow.frame; icount->overrun = cnow.overrun; icount->parity = cnow.parity; icount->brk = cnow.brk; icount->buf_overrun = cnow.buf_overrun; return 0; } /* * /proc fs routines.... */ static inline void line_info(struct seq_file *m, SLMP_INFO *info) { char stat_buf[30]; unsigned long flags; seq_printf(m, "%s: SCABase=%08x Mem=%08X StatusControl=%08x LCR=%08X\n" "\tIRQ=%d MaxFrameSize=%u\n", info->device_name, info->phys_sca_base, info->phys_memory_base, info->phys_statctrl_base, info->phys_lcr_base, info->irq_level, info->max_frame_size ); /* output current serial signal states */ spin_lock_irqsave(&info->lock,flags); get_signals(info); spin_unlock_irqrestore(&info->lock,flags); stat_buf[0] = 0; stat_buf[1] = 0; if (info->serial_signals & SerialSignal_RTS) strcat(stat_buf, "|RTS"); if (info->serial_signals & SerialSignal_CTS) strcat(stat_buf, "|CTS"); if (info->serial_signals & SerialSignal_DTR) strcat(stat_buf, "|DTR"); if (info->serial_signals & SerialSignal_DSR) strcat(stat_buf, "|DSR"); if (info->serial_signals & SerialSignal_DCD) strcat(stat_buf, "|CD"); if (info->serial_signals & SerialSignal_RI) strcat(stat_buf, "|RI"); if (info->params.mode == MGSL_MODE_HDLC) { seq_printf(m, "\tHDLC txok:%d rxok:%d", info->icount.txok, info->icount.rxok); if (info->icount.txunder) seq_printf(m, " txunder:%d", info->icount.txunder); if (info->icount.txabort) seq_printf(m, " txabort:%d", info->icount.txabort); if (info->icount.rxshort) seq_printf(m, " rxshort:%d", info->icount.rxshort); if (info->icount.rxlong) seq_printf(m, " rxlong:%d", info->icount.rxlong); if (info->icount.rxover) seq_printf(m, " rxover:%d", info->icount.rxover); if (info->icount.rxcrc) seq_printf(m, " rxlong:%d", info->icount.rxcrc); } else { seq_printf(m, "\tASYNC tx:%d rx:%d", info->icount.tx, info->icount.rx); if (info->icount.frame) seq_printf(m, " fe:%d", info->icount.frame); if (info->icount.parity) seq_printf(m, " pe:%d", info->icount.parity); if (info->icount.brk) seq_printf(m, " brk:%d", info->icount.brk); if (info->icount.overrun) seq_printf(m, " oe:%d", info->icount.overrun); } /* Append serial signal status to end */ seq_printf(m, " %s\n", stat_buf+1); seq_printf(m, "\ttxactive=%d bh_req=%d bh_run=%d pending_bh=%x\n", info->tx_active,info->bh_requested,info->bh_running, info->pending_bh); } /* Called to print information about devices */ static int synclinkmp_proc_show(struct seq_file *m, void *v) { SLMP_INFO *info; seq_printf(m, "synclinkmp driver:%s\n", driver_version); info = synclinkmp_device_list; while( info ) { line_info(m, info); info = info->next_device; } return 0; } static int synclinkmp_proc_open(struct inode *inode, struct file *file) { return single_open(file, synclinkmp_proc_show, NULL); } static const struct file_operations synclinkmp_proc_fops = { .owner = THIS_MODULE, .open = synclinkmp_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; /* Return the count of bytes in transmit buffer */ static int chars_in_buffer(struct tty_struct *tty) { SLMP_INFO *info = tty->driver_data; if (sanity_check(info, tty->name, "chars_in_buffer")) return 0; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):%s chars_in_buffer()=%d\n", __FILE__, __LINE__, info->device_name, info->tx_count); return info->tx_count; } /* Signal remote device to throttle send data (our receive data) */ static void throttle(struct tty_struct * tty) { SLMP_INFO *info = tty->driver_data; unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):%s throttle() entry\n", __FILE__,__LINE__, info->device_name ); if (sanity_check(info, tty->name, "throttle")) return; if (I_IXOFF(tty)) send_xchar(tty, STOP_CHAR(tty)); if (tty->termios.c_cflag & CRTSCTS) { spin_lock_irqsave(&info->lock,flags); info->serial_signals &= ~SerialSignal_RTS; set_signals(info); spin_unlock_irqrestore(&info->lock,flags); } } /* Signal remote device to stop throttling send data (our receive data) */ static void unthrottle(struct tty_struct * tty) { SLMP_INFO *info = tty->driver_data; unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):%s unthrottle() entry\n", __FILE__,__LINE__, info->device_name ); if (sanity_check(info, tty->name, "unthrottle")) return; if (I_IXOFF(tty)) { if (info->x_char) info->x_char = 0; else send_xchar(tty, START_CHAR(tty)); } if (tty->termios.c_cflag & CRTSCTS) { spin_lock_irqsave(&info->lock,flags); info->serial_signals |= SerialSignal_RTS; set_signals(info); spin_unlock_irqrestore(&info->lock,flags); } } /* set or clear transmit break condition * break_state -1=set break condition, 0=clear */ static int set_break(struct tty_struct *tty, int break_state) { unsigned char RegValue; SLMP_INFO * info = tty->driver_data; unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):%s set_break(%d)\n", __FILE__,__LINE__, info->device_name, break_state); if (sanity_check(info, tty->name, "set_break")) return -EINVAL; spin_lock_irqsave(&info->lock,flags); RegValue = read_reg(info, CTL); if (break_state == -1) RegValue |= BIT3; else RegValue &= ~BIT3; write_reg(info, CTL, RegValue); spin_unlock_irqrestore(&info->lock,flags); return 0; } #if SYNCLINK_GENERIC_HDLC /** * called by generic HDLC layer when protocol selected (PPP, frame relay, etc.) * set encoding and frame check sequence (FCS) options * * dev pointer to network device structure * encoding serial encoding setting * parity FCS setting * * returns 0 if success, otherwise error code */ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding, unsigned short parity) { SLMP_INFO *info = dev_to_port(dev); unsigned char new_encoding; unsigned short new_crctype; /* return error if TTY interface open */ if (info->port.count) return -EBUSY; switch (encoding) { case ENCODING_NRZ: new_encoding = HDLC_ENCODING_NRZ; break; case ENCODING_NRZI: new_encoding = HDLC_ENCODING_NRZI_SPACE; break; case ENCODING_FM_MARK: new_encoding = HDLC_ENCODING_BIPHASE_MARK; break; case ENCODING_FM_SPACE: new_encoding = HDLC_ENCODING_BIPHASE_SPACE; break; case ENCODING_MANCHESTER: new_encoding = HDLC_ENCODING_BIPHASE_LEVEL; break; default: return -EINVAL; } switch (parity) { case PARITY_NONE: new_crctype = HDLC_CRC_NONE; break; case PARITY_CRC16_PR1_CCITT: new_crctype = HDLC_CRC_16_CCITT; break; case PARITY_CRC32_PR1_CCITT: new_crctype = HDLC_CRC_32_CCITT; break; default: return -EINVAL; } info->params.encoding = new_encoding; info->params.crc_type = new_crctype; /* if network interface up, reprogram hardware */ if (info->netcount) program_hw(info); return 0; } /** * called by generic HDLC layer to send frame * * skb socket buffer containing HDLC frame * dev pointer to network device structure */ static netdev_tx_t hdlcdev_xmit(struct sk_buff *skb, struct net_device *dev) { SLMP_INFO *info = dev_to_port(dev); unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk(KERN_INFO "%s:hdlc_xmit(%s)\n",__FILE__,dev->name); /* stop sending until this frame completes */ netif_stop_queue(dev); /* copy data to device buffers */ info->tx_count = skb->len; tx_load_dma_buffer(info, skb->data, skb->len); /* update network statistics */ dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; /* done with socket buffer, so free it */ dev_kfree_skb(skb); /* save start time for transmit timeout detection */ dev->trans_start = jiffies; /* start hardware transmitter if necessary */ spin_lock_irqsave(&info->lock,flags); if (!info->tx_active) tx_start(info); spin_unlock_irqrestore(&info->lock,flags); return NETDEV_TX_OK; } /** * called by network layer when interface enabled * claim resources and initialize hardware * * dev pointer to network device structure * * returns 0 if success, otherwise error code */ static int hdlcdev_open(struct net_device *dev) { SLMP_INFO *info = dev_to_port(dev); int rc; unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s:hdlcdev_open(%s)\n",__FILE__,dev->name); /* generic HDLC layer open processing */ if ((rc = hdlc_open(dev))) return rc; /* arbitrate between network and tty opens */ spin_lock_irqsave(&info->netlock, flags); if (info->port.count != 0 || info->netcount != 0) { printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name); spin_unlock_irqrestore(&info->netlock, flags); return -EBUSY; } info->netcount=1; spin_unlock_irqrestore(&info->netlock, flags); /* claim resources and init adapter */ if ((rc = startup(info)) != 0) { spin_lock_irqsave(&info->netlock, flags); info->netcount=0; spin_unlock_irqrestore(&info->netlock, flags); return rc; } /* assert RTS and DTR, apply hardware settings */ info->serial_signals |= SerialSignal_RTS | SerialSignal_DTR; program_hw(info); /* enable network layer transmit */ dev->trans_start = jiffies; netif_start_queue(dev); /* inform generic HDLC layer of current DCD status */ spin_lock_irqsave(&info->lock, flags); get_signals(info); spin_unlock_irqrestore(&info->lock, flags); if (info->serial_signals & SerialSignal_DCD) netif_carrier_on(dev); else netif_carrier_off(dev); return 0; } /** * called by network layer when interface is disabled * shutdown hardware and release resources * * dev pointer to network device structure * * returns 0 if success, otherwise error code */ static int hdlcdev_close(struct net_device *dev) { SLMP_INFO *info = dev_to_port(dev); unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s:hdlcdev_close(%s)\n",__FILE__,dev->name); netif_stop_queue(dev); /* shutdown adapter and release resources */ shutdown(info); hdlc_close(dev); spin_lock_irqsave(&info->netlock, flags); info->netcount=0; spin_unlock_irqrestore(&info->netlock, flags); return 0; } /** * called by network layer to process IOCTL call to network device * * dev pointer to network device structure * ifr pointer to network interface request structure * cmd IOCTL command code * * returns 0 if success, otherwise error code */ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { const size_t size = sizeof(sync_serial_settings); sync_serial_settings new_line; sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync; SLMP_INFO *info = dev_to_port(dev); unsigned int flags; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name); /* return error if TTY interface open */ if (info->port.count) return -EBUSY; if (cmd != SIOCWANDEV) return hdlc_ioctl(dev, ifr, cmd); switch(ifr->ifr_settings.type) { case IF_GET_IFACE: /* return current sync_serial_settings */ ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL; if (ifr->ifr_settings.size < size) { ifr->ifr_settings.size = size; /* data size wanted */ return -ENOBUFS; } flags = info->params.flags & (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL | HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN | HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL | HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN); memset(&new_line, 0, sizeof(new_line)); switch (flags){ case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN): new_line.clock_type = CLOCK_EXT; break; case (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_INT; break; case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_TXINT; break; case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN): new_line.clock_type = CLOCK_TXFROMRX; break; default: new_line.clock_type = CLOCK_DEFAULT; } new_line.clock_rate = info->params.clock_speed; new_line.loopback = info->params.loopback ? 1:0; if (copy_to_user(line, &new_line, size)) return -EFAULT; return 0; case IF_IFACE_SYNC_SERIAL: /* set sync_serial_settings */ if(!capable(CAP_NET_ADMIN)) return -EPERM; if (copy_from_user(&new_line, line, size)) return -EFAULT; switch (new_line.clock_type) { case CLOCK_EXT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN; break; case CLOCK_TXFROMRX: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN; break; case CLOCK_INT: flags = HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG; break; case CLOCK_TXINT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG; break; case CLOCK_DEFAULT: flags = info->params.flags & (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL | HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN | HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL | HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN); break; default: return -EINVAL; } if (new_line.loopback != 0 && new_line.loopback != 1) return -EINVAL; info->params.flags &= ~(HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL | HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN | HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL | HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN); info->params.flags |= flags; info->params.loopback = new_line.loopback; if (flags & (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG)) info->params.clock_speed = new_line.clock_rate; else info->params.clock_speed = 0; /* if network interface up, reprogram hardware */ if (info->netcount) program_hw(info); return 0; default: return hdlc_ioctl(dev, ifr, cmd); } } /** * called by network layer when transmit timeout is detected * * dev pointer to network device structure */ static void hdlcdev_tx_timeout(struct net_device *dev) { SLMP_INFO *info = dev_to_port(dev); unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk("hdlcdev_tx_timeout(%s)\n",dev->name); dev->stats.tx_errors++; dev->stats.tx_aborted_errors++; spin_lock_irqsave(&info->lock,flags); tx_stop(info); spin_unlock_irqrestore(&info->lock,flags); netif_wake_queue(dev); } /** * called by device driver when transmit completes * reenable network layer transmit if stopped * * info pointer to device instance information */ static void hdlcdev_tx_done(SLMP_INFO *info) { if (netif_queue_stopped(info->netdev)) netif_wake_queue(info->netdev); } /** * called by device driver when frame received * pass frame to network layer * * info pointer to device instance information * buf pointer to buffer contianing frame data * size count of data bytes in buf */ static void hdlcdev_rx(SLMP_INFO *info, char *buf, int size) { struct sk_buff *skb = dev_alloc_skb(size); struct net_device *dev = info->netdev; if (debug_level >= DEBUG_LEVEL_INFO) printk("hdlcdev_rx(%s)\n",dev->name); if (skb == NULL) { printk(KERN_NOTICE "%s: can't alloc skb, dropping packet\n", dev->name); dev->stats.rx_dropped++; return; } memcpy(skb_put(skb, size), buf, size); skb->protocol = hdlc_type_trans(skb, dev); dev->stats.rx_packets++; dev->stats.rx_bytes += size; netif_rx(skb); } static const struct net_device_ops hdlcdev_ops = { .ndo_open = hdlcdev_open, .ndo_stop = hdlcdev_close, .ndo_change_mtu = hdlc_change_mtu, .ndo_start_xmit = hdlc_start_xmit, .ndo_do_ioctl = hdlcdev_ioctl, .ndo_tx_timeout = hdlcdev_tx_timeout, }; /** * called by device driver when adding device instance * do generic HDLC initialization * * info pointer to device instance information * * returns 0 if success, otherwise error code */ static int hdlcdev_init(SLMP_INFO *info) { int rc; struct net_device *dev; hdlc_device *hdlc; /* allocate and initialize network and HDLC layer objects */ if (!(dev = alloc_hdlcdev(info))) { printk(KERN_ERR "%s:hdlc device allocation failure\n",__FILE__); return -ENOMEM; } /* for network layer reporting purposes only */ dev->mem_start = info->phys_sca_base; dev->mem_end = info->phys_sca_base + SCA_BASE_SIZE - 1; dev->irq = info->irq_level; /* network layer callbacks and settings */ dev->netdev_ops = &hdlcdev_ops; dev->watchdog_timeo = 10 * HZ; dev->tx_queue_len = 50; /* generic HDLC layer callbacks and settings */ hdlc = dev_to_hdlc(dev); hdlc->attach = hdlcdev_attach; hdlc->xmit = hdlcdev_xmit; /* register objects with HDLC layer */ if ((rc = register_hdlc_device(dev))) { printk(KERN_WARNING "%s:unable to register hdlc device\n",__FILE__); free_netdev(dev); return rc; } info->netdev = dev; return 0; } /** * called by device driver when removing device instance * do generic HDLC cleanup * * info pointer to device instance information */ static void hdlcdev_exit(SLMP_INFO *info) { unregister_hdlc_device(info->netdev); free_netdev(info->netdev); info->netdev = NULL; } #endif /* CONFIG_HDLC */ /* Return next bottom half action to perform. * Return Value: BH action code or 0 if nothing to do. */ static int bh_action(SLMP_INFO *info) { unsigned long flags; int rc = 0; spin_lock_irqsave(&info->lock,flags); if (info->pending_bh & BH_RECEIVE) { info->pending_bh &= ~BH_RECEIVE; rc = BH_RECEIVE; } else if (info->pending_bh & BH_TRANSMIT) { info->pending_bh &= ~BH_TRANSMIT; rc = BH_TRANSMIT; } else if (info->pending_bh & BH_STATUS) { info->pending_bh &= ~BH_STATUS; rc = BH_STATUS; } if (!rc) { /* Mark BH routine as complete */ info->bh_running = false; info->bh_requested = false; } spin_unlock_irqrestore(&info->lock,flags); return rc; } /* Perform bottom half processing of work items queued by ISR. */ static void bh_handler(struct work_struct *work) { SLMP_INFO *info = container_of(work, SLMP_INFO, task); int action; if ( debug_level >= DEBUG_LEVEL_BH ) printk( "%s(%d):%s bh_handler() entry\n", __FILE__,__LINE__,info->device_name); info->bh_running = true; while((action = bh_action(info)) != 0) { /* Process work item */ if ( debug_level >= DEBUG_LEVEL_BH ) printk( "%s(%d):%s bh_handler() work item action=%d\n", __FILE__,__LINE__,info->device_name, action); switch (action) { case BH_RECEIVE: bh_receive(info); break; case BH_TRANSMIT: bh_transmit(info); break; case BH_STATUS: bh_status(info); break; default: /* unknown work item ID */ printk("%s(%d):%s Unknown work item ID=%08X!\n", __FILE__,__LINE__,info->device_name,action); break; } } if ( debug_level >= DEBUG_LEVEL_BH ) printk( "%s(%d):%s bh_handler() exit\n", __FILE__,__LINE__,info->device_name); } static void bh_receive(SLMP_INFO *info) { if ( debug_level >= DEBUG_LEVEL_BH ) printk( "%s(%d):%s bh_receive()\n", __FILE__,__LINE__,info->device_name); while( rx_get_frame(info) ); } static void bh_transmit(SLMP_INFO *info) { struct tty_struct *tty = info->port.tty; if ( debug_level >= DEBUG_LEVEL_BH ) printk( "%s(%d):%s bh_transmit() entry\n", __FILE__,__LINE__,info->device_name); if (tty) tty_wakeup(tty); } static void bh_status(SLMP_INFO *info) { if ( debug_level >= DEBUG_LEVEL_BH ) printk( "%s(%d):%s bh_status() entry\n", __FILE__,__LINE__,info->device_name); info->ri_chkcount = 0; info->dsr_chkcount = 0; info->dcd_chkcount = 0; info->cts_chkcount = 0; } static void isr_timer(SLMP_INFO * info) { unsigned char timer = (info->port_num & 1) ? TIMER2 : TIMER0; /* IER2<7..4> = timer<3..0> interrupt enables (0=disabled) */ write_reg(info, IER2, 0); /* TMCS, Timer Control/Status Register * * 07 CMF, Compare match flag (read only) 1=match * 06 ECMI, CMF Interrupt Enable: 0=disabled * 05 Reserved, must be 0 * 04 TME, Timer Enable * 03..00 Reserved, must be 0 * * 0000 0000 */ write_reg(info, (unsigned char)(timer + TMCS), 0); info->irq_occurred = true; if ( debug_level >= DEBUG_LEVEL_ISR ) printk("%s(%d):%s isr_timer()\n", __FILE__,__LINE__,info->device_name); } static void isr_rxint(SLMP_INFO * info) { struct tty_struct *tty = info->port.tty; struct mgsl_icount *icount = &info->icount; unsigned char status = read_reg(info, SR1) & info->ie1_value & (FLGD + IDLD + CDCD + BRKD); unsigned char status2 = read_reg(info, SR2) & info->ie2_value & OVRN; /* clear status bits */ if (status) write_reg(info, SR1, status); if (status2) write_reg(info, SR2, status2); if ( debug_level >= DEBUG_LEVEL_ISR ) printk("%s(%d):%s isr_rxint status=%02X %02x\n", __FILE__,__LINE__,info->device_name,status,status2); if (info->params.mode == MGSL_MODE_ASYNC) { if (status & BRKD) { icount->brk++; /* process break detection if tty control * is not set to ignore it */ if (!(status & info->ignore_status_mask1)) { if (info->read_status_mask1 & BRKD) { tty_insert_flip_char(&info->port, 0, TTY_BREAK); if (tty && (info->port.flags & ASYNC_SAK)) do_SAK(tty); } } } } else { if (status & (FLGD|IDLD)) { if (status & FLGD) info->icount.exithunt++; else if (status & IDLD) info->icount.rxidle++; wake_up_interruptible(&info->event_wait_q); } } if (status & CDCD) { /* simulate a common modem status change interrupt * for our handler */ get_signals( info ); isr_io_pin(info, MISCSTATUS_DCD_LATCHED|(info->serial_signals&SerialSignal_DCD)); } } /* * handle async rx data interrupts */ static void isr_rxrdy(SLMP_INFO * info) { u16 status; unsigned char DataByte; struct mgsl_icount *icount = &info->icount; if ( debug_level >= DEBUG_LEVEL_ISR ) printk("%s(%d):%s isr_rxrdy\n", __FILE__,__LINE__,info->device_name); while((status = read_reg(info,CST0)) & BIT0) { int flag = 0; bool over = false; DataByte = read_reg(info,TRB); icount->rx++; if ( status & (PE + FRME + OVRN) ) { printk("%s(%d):%s rxerr=%04X\n", __FILE__,__LINE__,info->device_name,status); /* update error statistics */ if (status & PE) icount->parity++; else if (status & FRME) icount->frame++; else if (status & OVRN) icount->overrun++; /* discard char if tty control flags say so */ if (status & info->ignore_status_mask2) continue; status &= info->read_status_mask2; if (status & PE) flag = TTY_PARITY; else if (status & FRME) flag = TTY_FRAME; if (status & OVRN) { /* Overrun is special, since it's * reported immediately, and doesn't * affect the current character */ over = true; } } /* end of if (error) */ tty_insert_flip_char(&info->port, DataByte, flag); if (over) tty_insert_flip_char(&info->port, 0, TTY_OVERRUN); } if ( debug_level >= DEBUG_LEVEL_ISR ) { printk("%s(%d):%s rx=%d brk=%d parity=%d frame=%d overrun=%d\n", __FILE__,__LINE__,info->device_name, icount->rx,icount->brk,icount->parity, icount->frame,icount->overrun); } tty_flip_buffer_push(&info->port); } static void isr_txeom(SLMP_INFO * info, unsigned char status) { if ( debug_level >= DEBUG_LEVEL_ISR ) printk("%s(%d):%s isr_txeom status=%02x\n", __FILE__,__LINE__,info->device_name,status); write_reg(info, TXDMA + DIR, 0x00); /* disable Tx DMA IRQs */ write_reg(info, TXDMA + DSR, 0xc0); /* clear IRQs and disable DMA */ write_reg(info, TXDMA + DCMD, SWABORT); /* reset/init DMA channel */ if (status & UDRN) { write_reg(info, CMD, TXRESET); write_reg(info, CMD, TXENABLE); } else write_reg(info, CMD, TXBUFCLR); /* disable and clear tx interrupts */ info->ie0_value &= ~TXRDYE; info->ie1_value &= ~(IDLE + UDRN); write_reg16(info, IE0, (unsigned short)((info->ie1_value << 8) + info->ie0_value)); write_reg(info, SR1, (unsigned char)(UDRN + IDLE)); if ( info->tx_active ) { if (info->params.mode != MGSL_MODE_ASYNC) { if (status & UDRN) info->icount.txunder++; else if (status & IDLE) info->icount.txok++; } info->tx_active = false; info->tx_count = info->tx_put = info->tx_get = 0; del_timer(&info->tx_timer); if (info->params.mode != MGSL_MODE_ASYNC && info->drop_rts_on_tx_done ) { info->serial_signals &= ~SerialSignal_RTS; info->drop_rts_on_tx_done = false; set_signals(info); } #if SYNCLINK_GENERIC_HDLC if (info->netcount) hdlcdev_tx_done(info); else #endif { if (info->port.tty && (info->port.tty->stopped || info->port.tty->hw_stopped)) { tx_stop(info); return; } info->pending_bh |= BH_TRANSMIT; } } } /* * handle tx status interrupts */ static void isr_txint(SLMP_INFO * info) { unsigned char status = read_reg(info, SR1) & info->ie1_value & (UDRN + IDLE + CCTS); /* clear status bits */ write_reg(info, SR1, status); if ( debug_level >= DEBUG_LEVEL_ISR ) printk("%s(%d):%s isr_txint status=%02x\n", __FILE__,__LINE__,info->device_name,status); if (status & (UDRN + IDLE)) isr_txeom(info, status); if (status & CCTS) { /* simulate a common modem status change interrupt * for our handler */ get_signals( info ); isr_io_pin(info, MISCSTATUS_CTS_LATCHED|(info->serial_signals&SerialSignal_CTS)); } } /* * handle async tx data interrupts */ static void isr_txrdy(SLMP_INFO * info) { if ( debug_level >= DEBUG_LEVEL_ISR ) printk("%s(%d):%s isr_txrdy() tx_count=%d\n", __FILE__,__LINE__,info->device_name,info->tx_count); if (info->params.mode != MGSL_MODE_ASYNC) { /* disable TXRDY IRQ, enable IDLE IRQ */ info->ie0_value &= ~TXRDYE; info->ie1_value |= IDLE; write_reg16(info, IE0, (unsigned short)((info->ie1_value << 8) + info->ie0_value)); return; } if (info->port.tty && (info->port.tty->stopped || info->port.tty->hw_stopped)) { tx_stop(info); return; } if ( info->tx_count ) tx_load_fifo( info ); else { info->tx_active = false; info->ie0_value &= ~TXRDYE; write_reg(info, IE0, info->ie0_value); } if (info->tx_count < WAKEUP_CHARS) info->pending_bh |= BH_TRANSMIT; } static void isr_rxdmaok(SLMP_INFO * info) { /* BIT7 = EOT (end of transfer) * BIT6 = EOM (end of message/frame) */ unsigned char status = read_reg(info,RXDMA + DSR) & 0xc0; /* clear IRQ (BIT0 must be 1 to prevent clearing DE bit) */ write_reg(info, RXDMA + DSR, (unsigned char)(status | 1)); if ( debug_level >= DEBUG_LEVEL_ISR ) printk("%s(%d):%s isr_rxdmaok(), status=%02x\n", __FILE__,__LINE__,info->device_name,status); info->pending_bh |= BH_RECEIVE; } static void isr_rxdmaerror(SLMP_INFO * info) { /* BIT5 = BOF (buffer overflow) * BIT4 = COF (counter overflow) */ unsigned char status = read_reg(info,RXDMA + DSR) & 0x30; /* clear IRQ (BIT0 must be 1 to prevent clearing DE bit) */ write_reg(info, RXDMA + DSR, (unsigned char)(status | 1)); if ( debug_level >= DEBUG_LEVEL_ISR ) printk("%s(%d):%s isr_rxdmaerror(), status=%02x\n", __FILE__,__LINE__,info->device_name,status); info->rx_overflow = true; info->pending_bh |= BH_RECEIVE; } static void isr_txdmaok(SLMP_INFO * info) { unsigned char status_reg1 = read_reg(info, SR1); write_reg(info, TXDMA + DIR, 0x00); /* disable Tx DMA IRQs */ write_reg(info, TXDMA + DSR, 0xc0); /* clear IRQs and disable DMA */ write_reg(info, TXDMA + DCMD, SWABORT); /* reset/init DMA channel */ if ( debug_level >= DEBUG_LEVEL_ISR ) printk("%s(%d):%s isr_txdmaok(), status=%02x\n", __FILE__,__LINE__,info->device_name,status_reg1); /* program TXRDY as FIFO empty flag, enable TXRDY IRQ */ write_reg16(info, TRC0, 0); info->ie0_value |= TXRDYE; write_reg(info, IE0, info->ie0_value); } static void isr_txdmaerror(SLMP_INFO * info) { /* BIT5 = BOF (buffer overflow) * BIT4 = COF (counter overflow) */ unsigned char status = read_reg(info,TXDMA + DSR) & 0x30; /* clear IRQ (BIT0 must be 1 to prevent clearing DE bit) */ write_reg(info, TXDMA + DSR, (unsigned char)(status | 1)); if ( debug_level >= DEBUG_LEVEL_ISR ) printk("%s(%d):%s isr_txdmaerror(), status=%02x\n", __FILE__,__LINE__,info->device_name,status); } /* handle input serial signal changes */ static void isr_io_pin( SLMP_INFO *info, u16 status ) { struct mgsl_icount *icount; if ( debug_level >= DEBUG_LEVEL_ISR ) printk("%s(%d):isr_io_pin status=%04X\n", __FILE__,__LINE__,status); if (status & (MISCSTATUS_CTS_LATCHED | MISCSTATUS_DCD_LATCHED | MISCSTATUS_DSR_LATCHED | MISCSTATUS_RI_LATCHED) ) { icount = &info->icount; /* update input line counters */ if (status & MISCSTATUS_RI_LATCHED) { icount->rng++; if ( status & SerialSignal_RI ) info->input_signal_events.ri_up++; else info->input_signal_events.ri_down++; } if (status & MISCSTATUS_DSR_LATCHED) { icount->dsr++; if ( status & SerialSignal_DSR ) info->input_signal_events.dsr_up++; else info->input_signal_events.dsr_down++; } if (status & MISCSTATUS_DCD_LATCHED) { if ((info->dcd_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT) { info->ie1_value &= ~CDCD; write_reg(info, IE1, info->ie1_value); } icount->dcd++; if (status & SerialSignal_DCD) { info->input_signal_events.dcd_up++; } else info->input_signal_events.dcd_down++; #if SYNCLINK_GENERIC_HDLC if (info->netcount) { if (status & SerialSignal_DCD) netif_carrier_on(info->netdev); else netif_carrier_off(info->netdev); } #endif } if (status & MISCSTATUS_CTS_LATCHED) { if ((info->cts_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT) { info->ie1_value &= ~CCTS; write_reg(info, IE1, info->ie1_value); } icount->cts++; if ( status & SerialSignal_CTS ) info->input_signal_events.cts_up++; else info->input_signal_events.cts_down++; } wake_up_interruptible(&info->status_event_wait_q); wake_up_interruptible(&info->event_wait_q); if ( (info->port.flags & ASYNC_CHECK_CD) && (status & MISCSTATUS_DCD_LATCHED) ) { if ( debug_level >= DEBUG_LEVEL_ISR ) printk("%s CD now %s...", info->device_name, (status & SerialSignal_DCD) ? "on" : "off"); if (status & SerialSignal_DCD) wake_up_interruptible(&info->port.open_wait); else { if ( debug_level >= DEBUG_LEVEL_ISR ) printk("doing serial hangup..."); if (info->port.tty) tty_hangup(info->port.tty); } } if (tty_port_cts_enabled(&info->port) && (status & MISCSTATUS_CTS_LATCHED) ) { if ( info->port.tty ) { if (info->port.tty->hw_stopped) { if (status & SerialSignal_CTS) { if ( debug_level >= DEBUG_LEVEL_ISR ) printk("CTS tx start..."); info->port.tty->hw_stopped = 0; tx_start(info); info->pending_bh |= BH_TRANSMIT; return; } } else { if (!(status & SerialSignal_CTS)) { if ( debug_level >= DEBUG_LEVEL_ISR ) printk("CTS tx stop..."); info->port.tty->hw_stopped = 1; tx_stop(info); } } } } } info->pending_bh |= BH_STATUS; } /* Interrupt service routine entry point. * * Arguments: * irq interrupt number that caused interrupt * dev_id device ID supplied during interrupt registration * regs interrupted processor context */ static irqreturn_t synclinkmp_interrupt(int dummy, void *dev_id) { SLMP_INFO *info = dev_id; unsigned char status, status0, status1=0; unsigned char dmastatus, dmastatus0, dmastatus1=0; unsigned char timerstatus0, timerstatus1=0; unsigned char shift; unsigned int i; unsigned short tmp; if ( debug_level >= DEBUG_LEVEL_ISR ) printk(KERN_DEBUG "%s(%d): synclinkmp_interrupt(%d)entry.\n", __FILE__, __LINE__, info->irq_level); spin_lock(&info->lock); for(;;) { /* get status for SCA0 (ports 0-1) */ tmp = read_reg16(info, ISR0); /* get ISR0 and ISR1 in one read */ status0 = (unsigned char)tmp; dmastatus0 = (unsigned char)(tmp>>8); timerstatus0 = read_reg(info, ISR2); if ( debug_level >= DEBUG_LEVEL_ISR ) printk(KERN_DEBUG "%s(%d):%s status0=%02x, dmastatus0=%02x, timerstatus0=%02x\n", __FILE__, __LINE__, info->device_name, status0, dmastatus0, timerstatus0); if (info->port_count == 4) { /* get status for SCA1 (ports 2-3) */ tmp = read_reg16(info->port_array[2], ISR0); status1 = (unsigned char)tmp; dmastatus1 = (unsigned char)(tmp>>8); timerstatus1 = read_reg(info->port_array[2], ISR2); if ( debug_level >= DEBUG_LEVEL_ISR ) printk("%s(%d):%s status1=%02x, dmastatus1=%02x, timerstatus1=%02x\n", __FILE__,__LINE__,info->device_name, status1,dmastatus1,timerstatus1); } if (!status0 && !dmastatus0 && !timerstatus0 && !status1 && !dmastatus1 && !timerstatus1) break; for(i=0; i < info->port_count ; i++) { if (info->port_array[i] == NULL) continue; if (i < 2) { status = status0; dmastatus = dmastatus0; } else { status = status1; dmastatus = dmastatus1; } shift = i & 1 ? 4 :0; if (status & BIT0 << shift) isr_rxrdy(info->port_array[i]); if (status & BIT1 << shift) isr_txrdy(info->port_array[i]); if (status & BIT2 << shift) isr_rxint(info->port_array[i]); if (status & BIT3 << shift) isr_txint(info->port_array[i]); if (dmastatus & BIT0 << shift) isr_rxdmaerror(info->port_array[i]); if (dmastatus & BIT1 << shift) isr_rxdmaok(info->port_array[i]); if (dmastatus & BIT2 << shift) isr_txdmaerror(info->port_array[i]); if (dmastatus & BIT3 << shift) isr_txdmaok(info->port_array[i]); } if (timerstatus0 & (BIT5 | BIT4)) isr_timer(info->port_array[0]); if (timerstatus0 & (BIT7 | BIT6)) isr_timer(info->port_array[1]); if (timerstatus1 & (BIT5 | BIT4)) isr_timer(info->port_array[2]); if (timerstatus1 & (BIT7 | BIT6)) isr_timer(info->port_array[3]); } for(i=0; i < info->port_count ; i++) { SLMP_INFO * port = info->port_array[i]; /* Request bottom half processing if there's something * for it to do and the bh is not already running. * * Note: startup adapter diags require interrupts. * do not request bottom half processing if the * device is not open in a normal mode. */ if ( port && (port->port.count || port->netcount) && port->pending_bh && !port->bh_running && !port->bh_requested ) { if ( debug_level >= DEBUG_LEVEL_ISR ) printk("%s(%d):%s queueing bh task.\n", __FILE__,__LINE__,port->device_name); schedule_work(&port->task); port->bh_requested = true; } } spin_unlock(&info->lock); if ( debug_level >= DEBUG_LEVEL_ISR ) printk(KERN_DEBUG "%s(%d):synclinkmp_interrupt(%d)exit.\n", __FILE__, __LINE__, info->irq_level); return IRQ_HANDLED; } /* Initialize and start device. */ static int startup(SLMP_INFO * info) { if ( debug_level >= DEBUG_LEVEL_INFO ) printk("%s(%d):%s tx_releaseup()\n",__FILE__,__LINE__,info->device_name); if (info->port.flags & ASYNC_INITIALIZED) return 0; if (!info->tx_buf) { info->tx_buf = kmalloc(info->max_frame_size, GFP_KERNEL); if (!info->tx_buf) { printk(KERN_ERR"%s(%d):%s can't allocate transmit buffer\n", __FILE__,__LINE__,info->device_name); return -ENOMEM; } } info->pending_bh = 0; memset(&info->icount, 0, sizeof(info->icount)); /* program hardware for current parameters */ reset_port(info); change_params(info); mod_timer(&info->status_timer, jiffies + msecs_to_jiffies(10)); if (info->port.tty) clear_bit(TTY_IO_ERROR, &info->port.tty->flags); info->port.flags |= ASYNC_INITIALIZED; return 0; } /* Called by close() and hangup() to shutdown hardware */ static void shutdown(SLMP_INFO * info) { unsigned long flags; if (!(info->port.flags & ASYNC_INITIALIZED)) return; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):%s synclinkmp_shutdown()\n", __FILE__,__LINE__, info->device_name ); /* clear status wait queue because status changes */ /* can't happen after shutting down the hardware */ wake_up_interruptible(&info->status_event_wait_q); wake_up_interruptible(&info->event_wait_q); del_timer(&info->tx_timer); del_timer(&info->status_timer); kfree(info->tx_buf); info->tx_buf = NULL; spin_lock_irqsave(&info->lock,flags); reset_port(info); if (!info->port.tty || info->port.tty->termios.c_cflag & HUPCL) { info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR); set_signals(info); } spin_unlock_irqrestore(&info->lock,flags); if (info->port.tty) set_bit(TTY_IO_ERROR, &info->port.tty->flags); info->port.flags &= ~ASYNC_INITIALIZED; } static void program_hw(SLMP_INFO *info) { unsigned long flags; spin_lock_irqsave(&info->lock,flags); rx_stop(info); tx_stop(info); info->tx_count = info->tx_put = info->tx_get = 0; if (info->params.mode == MGSL_MODE_HDLC || info->netcount) hdlc_mode(info); else async_mode(info); set_signals(info); info->dcd_chkcount = 0; info->cts_chkcount = 0; info->ri_chkcount = 0; info->dsr_chkcount = 0; info->ie1_value |= (CDCD|CCTS); write_reg(info, IE1, info->ie1_value); get_signals(info); if (info->netcount || (info->port.tty && info->port.tty->termios.c_cflag & CREAD) ) rx_start(info); spin_unlock_irqrestore(&info->lock,flags); } /* Reconfigure adapter based on new parameters */ static void change_params(SLMP_INFO *info) { unsigned cflag; int bits_per_char; if (!info->port.tty) return; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):%s change_params()\n", __FILE__,__LINE__, info->device_name ); cflag = info->port.tty->termios.c_cflag; /* if B0 rate (hangup) specified then negate RTS and DTR */ /* otherwise assert RTS and DTR */ if (cflag & CBAUD) info->serial_signals |= SerialSignal_RTS | SerialSignal_DTR; else info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR); /* byte size and parity */ switch (cflag & CSIZE) { case CS5: info->params.data_bits = 5; break; case CS6: info->params.data_bits = 6; break; case CS7: info->params.data_bits = 7; break; case CS8: info->params.data_bits = 8; break; /* Never happens, but GCC is too dumb to figure it out */ default: info->params.data_bits = 7; break; } if (cflag & CSTOPB) info->params.stop_bits = 2; else info->params.stop_bits = 1; info->params.parity = ASYNC_PARITY_NONE; if (cflag & PARENB) { if (cflag & PARODD) info->params.parity = ASYNC_PARITY_ODD; else info->params.parity = ASYNC_PARITY_EVEN; #ifdef CMSPAR if (cflag & CMSPAR) info->params.parity = ASYNC_PARITY_SPACE; #endif } /* calculate number of jiffies to transmit a full * FIFO (32 bytes) at specified data rate */ bits_per_char = info->params.data_bits + info->params.stop_bits + 1; /* if port data rate is set to 460800 or less then * allow tty settings to override, otherwise keep the * current data rate. */ if (info->params.data_rate <= 460800) { info->params.data_rate = tty_get_baud_rate(info->port.tty); } if ( info->params.data_rate ) { info->timeout = (32*HZ*bits_per_char) / info->params.data_rate; } info->timeout += HZ/50; /* Add .02 seconds of slop */ if (cflag & CRTSCTS) info->port.flags |= ASYNC_CTS_FLOW; else info->port.flags &= ~ASYNC_CTS_FLOW; if (cflag & CLOCAL) info->port.flags &= ~ASYNC_CHECK_CD; else info->port.flags |= ASYNC_CHECK_CD; /* process tty input control flags */ info->read_status_mask2 = OVRN; if (I_INPCK(info->port.tty)) info->read_status_mask2 |= PE | FRME; if (I_BRKINT(info->port.tty) || I_PARMRK(info->port.tty)) info->read_status_mask1 |= BRKD; if (I_IGNPAR(info->port.tty)) info->ignore_status_mask2 |= PE | FRME; if (I_IGNBRK(info->port.tty)) { info->ignore_status_mask1 |= BRKD; /* If ignoring parity and break indicators, ignore * overruns too. (For real raw support). */ if (I_IGNPAR(info->port.tty)) info->ignore_status_mask2 |= OVRN; } program_hw(info); } static int get_stats(SLMP_INFO * info, struct mgsl_icount __user *user_icount) { int err; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):%s get_params()\n", __FILE__,__LINE__, info->device_name); if (!user_icount) { memset(&info->icount, 0, sizeof(info->icount)); } else { mutex_lock(&info->port.mutex); COPY_TO_USER(err, user_icount, &info->icount, sizeof(struct mgsl_icount)); mutex_unlock(&info->port.mutex); if (err) return -EFAULT; } return 0; } static int get_params(SLMP_INFO * info, MGSL_PARAMS __user *user_params) { int err; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):%s get_params()\n", __FILE__,__LINE__, info->device_name); mutex_lock(&info->port.mutex); COPY_TO_USER(err,user_params, &info->params, sizeof(MGSL_PARAMS)); mutex_unlock(&info->port.mutex); if (err) { if ( debug_level >= DEBUG_LEVEL_INFO ) printk( "%s(%d):%s get_params() user buffer copy failed\n", __FILE__,__LINE__,info->device_name); return -EFAULT; } return 0; } static int set_params(SLMP_INFO * info, MGSL_PARAMS __user *new_params) { unsigned long flags; MGSL_PARAMS tmp_params; int err; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):%s set_params\n", __FILE__,__LINE__,info->device_name ); COPY_FROM_USER(err,&tmp_params, new_params, sizeof(MGSL_PARAMS)); if (err) { if ( debug_level >= DEBUG_LEVEL_INFO ) printk( "%s(%d):%s set_params() user buffer copy failed\n", __FILE__,__LINE__,info->device_name); return -EFAULT; } mutex_lock(&info->port.mutex); spin_lock_irqsave(&info->lock,flags); memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS)); spin_unlock_irqrestore(&info->lock,flags); change_params(info); mutex_unlock(&info->port.mutex); return 0; } static int get_txidle(SLMP_INFO * info, int __user *idle_mode) { int err; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):%s get_txidle()=%d\n", __FILE__,__LINE__, info->device_name, info->idle_mode); COPY_TO_USER(err,idle_mode, &info->idle_mode, sizeof(int)); if (err) { if ( debug_level >= DEBUG_LEVEL_INFO ) printk( "%s(%d):%s get_txidle() user buffer copy failed\n", __FILE__,__LINE__,info->device_name); return -EFAULT; } return 0; } static int set_txidle(SLMP_INFO * info, int idle_mode) { unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):%s set_txidle(%d)\n", __FILE__,__LINE__,info->device_name, idle_mode ); spin_lock_irqsave(&info->lock,flags); info->idle_mode = idle_mode; tx_set_idle( info ); spin_unlock_irqrestore(&info->lock,flags); return 0; } static int tx_enable(SLMP_INFO * info, int enable) { unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):%s tx_enable(%d)\n", __FILE__,__LINE__,info->device_name, enable); spin_lock_irqsave(&info->lock,flags); if ( enable ) { if ( !info->tx_enabled ) { tx_start(info); } } else { if ( info->tx_enabled ) tx_stop(info); } spin_unlock_irqrestore(&info->lock,flags); return 0; } /* abort send HDLC frame */ static int tx_abort(SLMP_INFO * info) { unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):%s tx_abort()\n", __FILE__,__LINE__,info->device_name); spin_lock_irqsave(&info->lock,flags); if ( info->tx_active && info->params.mode == MGSL_MODE_HDLC ) { info->ie1_value &= ~UDRN; info->ie1_value |= IDLE; write_reg(info, IE1, info->ie1_value); /* disable tx status interrupts */ write_reg(info, SR1, (unsigned char)(IDLE + UDRN)); /* clear pending */ write_reg(info, TXDMA + DSR, 0); /* disable DMA channel */ write_reg(info, TXDMA + DCMD, SWABORT); /* reset/init DMA channel */ write_reg(info, CMD, TXABORT); } spin_unlock_irqrestore(&info->lock,flags); return 0; } static int rx_enable(SLMP_INFO * info, int enable) { unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):%s rx_enable(%d)\n", __FILE__,__LINE__,info->device_name,enable); spin_lock_irqsave(&info->lock,flags); if ( enable ) { if ( !info->rx_enabled ) rx_start(info); } else { if ( info->rx_enabled ) rx_stop(info); } spin_unlock_irqrestore(&info->lock,flags); return 0; } /* wait for specified event to occur */ static int wait_mgsl_event(SLMP_INFO * info, int __user *mask_ptr) { unsigned long flags; int s; int rc=0; struct mgsl_icount cprev, cnow; int events; int mask; struct _input_signal_events oldsigs, newsigs; DECLARE_WAITQUEUE(wait, current); COPY_FROM_USER(rc,&mask, mask_ptr, sizeof(int)); if (rc) { return -EFAULT; } if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):%s wait_mgsl_event(%d)\n", __FILE__,__LINE__,info->device_name,mask); spin_lock_irqsave(&info->lock,flags); /* return immediately if state matches requested events */ get_signals(info); s = info->serial_signals; events = mask & ( ((s & SerialSignal_DSR) ? MgslEvent_DsrActive:MgslEvent_DsrInactive) + ((s & SerialSignal_DCD) ? MgslEvent_DcdActive:MgslEvent_DcdInactive) + ((s & SerialSignal_CTS) ? MgslEvent_CtsActive:MgslEvent_CtsInactive) + ((s & SerialSignal_RI) ? MgslEvent_RiActive :MgslEvent_RiInactive) ); if (events) { spin_unlock_irqrestore(&info->lock,flags); goto exit; } /* save current irq counts */ cprev = info->icount; oldsigs = info->input_signal_events; /* enable hunt and idle irqs if needed */ if (mask & (MgslEvent_ExitHuntMode+MgslEvent_IdleReceived)) { unsigned char oldval = info->ie1_value; unsigned char newval = oldval + (mask & MgslEvent_ExitHuntMode ? FLGD:0) + (mask & MgslEvent_IdleReceived ? IDLD:0); if ( oldval != newval ) { info->ie1_value = newval; write_reg(info, IE1, info->ie1_value); } } set_current_state(TASK_INTERRUPTIBLE); add_wait_queue(&info->event_wait_q, &wait); spin_unlock_irqrestore(&info->lock,flags); for(;;) { schedule(); if (signal_pending(current)) { rc = -ERESTARTSYS; break; } /* get current irq counts */ spin_lock_irqsave(&info->lock,flags); cnow = info->icount; newsigs = info->input_signal_events; set_current_state(TASK_INTERRUPTIBLE); spin_unlock_irqrestore(&info->lock,flags); /* if no change, wait aborted for some reason */ if (newsigs.dsr_up == oldsigs.dsr_up && newsigs.dsr_down == oldsigs.dsr_down && newsigs.dcd_up == oldsigs.dcd_up && newsigs.dcd_down == oldsigs.dcd_down && newsigs.cts_up == oldsigs.cts_up && newsigs.cts_down == oldsigs.cts_down && newsigs.ri_up == oldsigs.ri_up && newsigs.ri_down == oldsigs.ri_down && cnow.exithunt == cprev.exithunt && cnow.rxidle == cprev.rxidle) { rc = -EIO; break; } events = mask & ( (newsigs.dsr_up != oldsigs.dsr_up ? MgslEvent_DsrActive:0) + (newsigs.dsr_down != oldsigs.dsr_down ? MgslEvent_DsrInactive:0) + (newsigs.dcd_up != oldsigs.dcd_up ? MgslEvent_DcdActive:0) + (newsigs.dcd_down != oldsigs.dcd_down ? MgslEvent_DcdInactive:0) + (newsigs.cts_up != oldsigs.cts_up ? MgslEvent_CtsActive:0) + (newsigs.cts_down != oldsigs.cts_down ? MgslEvent_CtsInactive:0) + (newsigs.ri_up != oldsigs.ri_up ? MgslEvent_RiActive:0) + (newsigs.ri_down != oldsigs.ri_down ? MgslEvent_RiInactive:0) + (cnow.exithunt != cprev.exithunt ? MgslEvent_ExitHuntMode:0) + (cnow.rxidle != cprev.rxidle ? MgslEvent_IdleReceived:0) ); if (events) break; cprev = cnow; oldsigs = newsigs; } remove_wait_queue(&info->event_wait_q, &wait); set_current_state(TASK_RUNNING); if (mask & (MgslEvent_ExitHuntMode + MgslEvent_IdleReceived)) { spin_lock_irqsave(&info->lock,flags); if (!waitqueue_active(&info->event_wait_q)) { /* disable enable exit hunt mode/idle rcvd IRQs */ info->ie1_value &= ~(FLGD|IDLD); write_reg(info, IE1, info->ie1_value); } spin_unlock_irqrestore(&info->lock,flags); } exit: if ( rc == 0 ) PUT_USER(rc, events, mask_ptr); return rc; } static int modem_input_wait(SLMP_INFO *info,int arg) { unsigned long flags; int rc; struct mgsl_icount cprev, cnow; DECLARE_WAITQUEUE(wait, current); /* save current irq counts */ spin_lock_irqsave(&info->lock,flags); cprev = info->icount; add_wait_queue(&info->status_event_wait_q, &wait); set_current_state(TASK_INTERRUPTIBLE); spin_unlock_irqrestore(&info->lock,flags); for(;;) { schedule(); if (signal_pending(current)) { rc = -ERESTARTSYS; break; } /* get new irq counts */ spin_lock_irqsave(&info->lock,flags); cnow = info->icount; set_current_state(TASK_INTERRUPTIBLE); spin_unlock_irqrestore(&info->lock,flags); /* if no change, wait aborted for some reason */ if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr && cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) { rc = -EIO; break; } /* check for change in caller specified modem input */ if ((arg & TIOCM_RNG && cnow.rng != cprev.rng) || (arg & TIOCM_DSR && cnow.dsr != cprev.dsr) || (arg & TIOCM_CD && cnow.dcd != cprev.dcd) || (arg & TIOCM_CTS && cnow.cts != cprev.cts)) { rc = 0; break; } cprev = cnow; } remove_wait_queue(&info->status_event_wait_q, &wait); set_current_state(TASK_RUNNING); return rc; } /* return the state of the serial control and status signals */ static int tiocmget(struct tty_struct *tty) { SLMP_INFO *info = tty->driver_data; unsigned int result; unsigned long flags; spin_lock_irqsave(&info->lock,flags); get_signals(info); spin_unlock_irqrestore(&info->lock,flags); result = ((info->serial_signals & SerialSignal_RTS) ? TIOCM_RTS : 0) | ((info->serial_signals & SerialSignal_DTR) ? TIOCM_DTR : 0) | ((info->serial_signals & SerialSignal_DCD) ? TIOCM_CAR : 0) | ((info->serial_signals & SerialSignal_RI) ? TIOCM_RNG : 0) | ((info->serial_signals & SerialSignal_DSR) ? TIOCM_DSR : 0) | ((info->serial_signals & SerialSignal_CTS) ? TIOCM_CTS : 0); if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):%s tiocmget() value=%08X\n", __FILE__,__LINE__, info->device_name, result ); return result; } /* set modem control signals (DTR/RTS) */ static int tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear) { SLMP_INFO *info = tty->driver_data; unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):%s tiocmset(%x,%x)\n", __FILE__,__LINE__,info->device_name, set, clear); if (set & TIOCM_RTS) info->serial_signals |= SerialSignal_RTS; if (set & TIOCM_DTR) info->serial_signals |= SerialSignal_DTR; if (clear & TIOCM_RTS) info->serial_signals &= ~SerialSignal_RTS; if (clear & TIOCM_DTR) info->serial_signals &= ~SerialSignal_DTR; spin_lock_irqsave(&info->lock,flags); set_signals(info); spin_unlock_irqrestore(&info->lock,flags); return 0; } static int carrier_raised(struct tty_port *port) { SLMP_INFO *info = container_of(port, SLMP_INFO, port); unsigned long flags; spin_lock_irqsave(&info->lock,flags); get_signals(info); spin_unlock_irqrestore(&info->lock,flags); return (info->serial_signals & SerialSignal_DCD) ? 1 : 0; } static void dtr_rts(struct tty_port *port, int on) { SLMP_INFO *info = container_of(port, SLMP_INFO, port); unsigned long flags; spin_lock_irqsave(&info->lock,flags); if (on) info->serial_signals |= SerialSignal_RTS | SerialSignal_DTR; else info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR); set_signals(info); spin_unlock_irqrestore(&info->lock,flags); } /* Block the current process until the specified port is ready to open. */ static int block_til_ready(struct tty_struct *tty, struct file *filp, SLMP_INFO *info) { DECLARE_WAITQUEUE(wait, current); int retval; bool do_clocal = false; unsigned long flags; int cd; struct tty_port *port = &info->port; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):%s block_til_ready()\n", __FILE__,__LINE__, tty->driver->name ); if (filp->f_flags & O_NONBLOCK || tty->flags & (1 << TTY_IO_ERROR)){ /* nonblock mode is set or port is not enabled */ /* just verify that callout device is not active */ port->flags |= ASYNC_NORMAL_ACTIVE; return 0; } if (tty->termios.c_cflag & CLOCAL) do_clocal = true; /* Wait for carrier detect and the line to become * free (i.e., not in use by the callout). While we are in * this loop, port->count is dropped by one, so that * close() knows when to free things. We restore it upon * exit, either normal or abnormal. */ retval = 0; add_wait_queue(&port->open_wait, &wait); if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):%s block_til_ready() before block, count=%d\n", __FILE__,__LINE__, tty->driver->name, port->count ); spin_lock_irqsave(&info->lock, flags); port->count--; spin_unlock_irqrestore(&info->lock, flags); port->blocked_open++; while (1) { if (C_BAUD(tty) && test_bit(ASYNCB_INITIALIZED, &port->flags)) tty_port_raise_dtr_rts(port); set_current_state(TASK_INTERRUPTIBLE); if (tty_hung_up_p(filp) || !(port->flags & ASYNC_INITIALIZED)){ retval = (port->flags & ASYNC_HUP_NOTIFY) ? -EAGAIN : -ERESTARTSYS; break; } cd = tty_port_carrier_raised(port); if (!(port->flags & ASYNC_CLOSING) && (do_clocal || cd)) break; if (signal_pending(current)) { retval = -ERESTARTSYS; break; } if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):%s block_til_ready() count=%d\n", __FILE__,__LINE__, tty->driver->name, port->count ); tty_unlock(tty); schedule(); tty_lock(tty); } set_current_state(TASK_RUNNING); remove_wait_queue(&port->open_wait, &wait); if (!tty_hung_up_p(filp)) port->count++; port->blocked_open--; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):%s block_til_ready() after, count=%d\n", __FILE__,__LINE__, tty->driver->name, port->count ); if (!retval) port->flags |= ASYNC_NORMAL_ACTIVE; return retval; } static int alloc_dma_bufs(SLMP_INFO *info) { unsigned short BuffersPerFrame; unsigned short BufferCount; // Force allocation to start at 64K boundary for each port. // This is necessary because *all* buffer descriptors for a port // *must* be in the same 64K block. All descriptors on a port // share a common 'base' address (upper 8 bits of 24 bits) programmed // into the CBP register. info->port_array[0]->last_mem_alloc = (SCA_MEM_SIZE/4) * info->port_num; /* Calculate the number of DMA buffers necessary to hold the */ /* largest allowable frame size. Note: If the max frame size is */ /* not an even multiple of the DMA buffer size then we need to */ /* round the buffer count per frame up one. */ BuffersPerFrame = (unsigned short)(info->max_frame_size/SCABUFSIZE); if ( info->max_frame_size % SCABUFSIZE ) BuffersPerFrame++; /* calculate total number of data buffers (SCABUFSIZE) possible * in one ports memory (SCA_MEM_SIZE/4) after allocating memory * for the descriptor list (BUFFERLISTSIZE). */ BufferCount = (SCA_MEM_SIZE/4 - BUFFERLISTSIZE)/SCABUFSIZE; /* limit number of buffers to maximum amount of descriptors */ if (BufferCount > BUFFERLISTSIZE/sizeof(SCADESC)) BufferCount = BUFFERLISTSIZE/sizeof(SCADESC); /* use enough buffers to transmit one max size frame */ info->tx_buf_count = BuffersPerFrame + 1; /* never use more than half the available buffers for transmit */ if (info->tx_buf_count > (BufferCount/2)) info->tx_buf_count = BufferCount/2; if (info->tx_buf_count > SCAMAXDESC) info->tx_buf_count = SCAMAXDESC; /* use remaining buffers for receive */ info->rx_buf_count = BufferCount - info->tx_buf_count; if (info->rx_buf_count > SCAMAXDESC) info->rx_buf_count = SCAMAXDESC; if ( debug_level >= DEBUG_LEVEL_INFO ) printk("%s(%d):%s Allocating %d TX and %d RX DMA buffers.\n", __FILE__,__LINE__, info->device_name, info->tx_buf_count,info->rx_buf_count); if ( alloc_buf_list( info ) < 0 || alloc_frame_bufs(info, info->rx_buf_list, info->rx_buf_list_ex, info->rx_buf_count) < 0 || alloc_frame_bufs(info, info->tx_buf_list, info->tx_buf_list_ex, info->tx_buf_count) < 0 || alloc_tmp_rx_buf(info) < 0 ) { printk("%s(%d):%s Can't allocate DMA buffer memory\n", __FILE__,__LINE__, info->device_name); return -ENOMEM; } rx_reset_buffers( info ); return 0; } /* Allocate DMA buffers for the transmit and receive descriptor lists. */ static int alloc_buf_list(SLMP_INFO *info) { unsigned int i; /* build list in adapter shared memory */ info->buffer_list = info->memory_base + info->port_array[0]->last_mem_alloc; info->buffer_list_phys = info->port_array[0]->last_mem_alloc; info->port_array[0]->last_mem_alloc += BUFFERLISTSIZE; memset(info->buffer_list, 0, BUFFERLISTSIZE); /* Save virtual address pointers to the receive and */ /* transmit buffer lists. (Receive 1st). These pointers will */ /* be used by the processor to access the lists. */ info->rx_buf_list = (SCADESC *)info->buffer_list; info->tx_buf_list = (SCADESC *)info->buffer_list; info->tx_buf_list += info->rx_buf_count; /* Build links for circular buffer entry lists (tx and rx) * * Note: links are physical addresses read by the SCA device * to determine the next buffer entry to use. */ for ( i = 0; i < info->rx_buf_count; i++ ) { /* calculate and store physical address of this buffer entry */ info->rx_buf_list_ex[i].phys_entry = info->buffer_list_phys + (i * SCABUFSIZE); /* calculate and store physical address of */ /* next entry in cirular list of entries */ info->rx_buf_list[i].next = info->buffer_list_phys; if ( i < info->rx_buf_count - 1 ) info->rx_buf_list[i].next += (i + 1) * sizeof(SCADESC); info->rx_buf_list[i].length = SCABUFSIZE; } for ( i = 0; i < info->tx_buf_count; i++ ) { /* calculate and store physical address of this buffer entry */ info->tx_buf_list_ex[i].phys_entry = info->buffer_list_phys + ((info->rx_buf_count + i) * sizeof(SCADESC)); /* calculate and store physical address of */ /* next entry in cirular list of entries */ info->tx_buf_list[i].next = info->buffer_list_phys + info->rx_buf_count * sizeof(SCADESC); if ( i < info->tx_buf_count - 1 ) info->tx_buf_list[i].next += (i + 1) * sizeof(SCADESC); } return 0; } /* Allocate the frame DMA buffers used by the specified buffer list. */ static int alloc_frame_bufs(SLMP_INFO *info, SCADESC *buf_list,SCADESC_EX *buf_list_ex,int count) { int i; unsigned long phys_addr; for ( i = 0; i < count; i++ ) { buf_list_ex[i].virt_addr = info->memory_base + info->port_array[0]->last_mem_alloc; phys_addr = info->port_array[0]->last_mem_alloc; info->port_array[0]->last_mem_alloc += SCABUFSIZE; buf_list[i].buf_ptr = (unsigned short)phys_addr; buf_list[i].buf_base = (unsigned char)(phys_addr >> 16); } return 0; } static void free_dma_bufs(SLMP_INFO *info) { info->buffer_list = NULL; info->rx_buf_list = NULL; info->tx_buf_list = NULL; } /* allocate buffer large enough to hold max_frame_size. * This buffer is used to pass an assembled frame to the line discipline. */ static int alloc_tmp_rx_buf(SLMP_INFO *info) { info->tmp_rx_buf = kmalloc(info->max_frame_size, GFP_KERNEL); if (info->tmp_rx_buf == NULL) return -ENOMEM; /* unused flag buffer to satisfy receive_buf calling interface */ info->flag_buf = kzalloc(info->max_frame_size, GFP_KERNEL); if (!info->flag_buf) { kfree(info->tmp_rx_buf); info->tmp_rx_buf = NULL; return -ENOMEM; } return 0; } static void free_tmp_rx_buf(SLMP_INFO *info) { kfree(info->tmp_rx_buf); info->tmp_rx_buf = NULL; kfree(info->flag_buf); info->flag_buf = NULL; } static int claim_resources(SLMP_INFO *info) { if (request_mem_region(info->phys_memory_base,SCA_MEM_SIZE,"synclinkmp") == NULL) { printk( "%s(%d):%s mem addr conflict, Addr=%08X\n", __FILE__,__LINE__,info->device_name, info->phys_memory_base); info->init_error = DiagStatus_AddressConflict; goto errout; } else info->shared_mem_requested = true; if (request_mem_region(info->phys_lcr_base + info->lcr_offset,128,"synclinkmp") == NULL) { printk( "%s(%d):%s lcr mem addr conflict, Addr=%08X\n", __FILE__,__LINE__,info->device_name, info->phys_lcr_base); info->init_error = DiagStatus_AddressConflict; goto errout; } else info->lcr_mem_requested = true; if (request_mem_region(info->phys_sca_base + info->sca_offset,SCA_BASE_SIZE,"synclinkmp") == NULL) { printk( "%s(%d):%s sca mem addr conflict, Addr=%08X\n", __FILE__,__LINE__,info->device_name, info->phys_sca_base); info->init_error = DiagStatus_AddressConflict; goto errout; } else info->sca_base_requested = true; if (request_mem_region(info->phys_statctrl_base + info->statctrl_offset,SCA_REG_SIZE,"synclinkmp") == NULL) { printk( "%s(%d):%s stat/ctrl mem addr conflict, Addr=%08X\n", __FILE__,__LINE__,info->device_name, info->phys_statctrl_base); info->init_error = DiagStatus_AddressConflict; goto errout; } else info->sca_statctrl_requested = true; info->memory_base = ioremap_nocache(info->phys_memory_base, SCA_MEM_SIZE); if (!info->memory_base) { printk( "%s(%d):%s Can't map shared memory, MemAddr=%08X\n", __FILE__,__LINE__,info->device_name, info->phys_memory_base ); info->init_error = DiagStatus_CantAssignPciResources; goto errout; } info->lcr_base = ioremap_nocache(info->phys_lcr_base, PAGE_SIZE); if (!info->lcr_base) { printk( "%s(%d):%s Can't map LCR memory, MemAddr=%08X\n", __FILE__,__LINE__,info->device_name, info->phys_lcr_base ); info->init_error = DiagStatus_CantAssignPciResources; goto errout; } info->lcr_base += info->lcr_offset; info->sca_base = ioremap_nocache(info->phys_sca_base, PAGE_SIZE); if (!info->sca_base) { printk( "%s(%d):%s Can't map SCA memory, MemAddr=%08X\n", __FILE__,__LINE__,info->device_name, info->phys_sca_base ); info->init_error = DiagStatus_CantAssignPciResources; goto errout; } info->sca_base += info->sca_offset; info->statctrl_base = ioremap_nocache(info->phys_statctrl_base, PAGE_SIZE); if (!info->statctrl_base) { printk( "%s(%d):%s Can't map SCA Status/Control memory, MemAddr=%08X\n", __FILE__,__LINE__,info->device_name, info->phys_statctrl_base ); info->init_error = DiagStatus_CantAssignPciResources; goto errout; } info->statctrl_base += info->statctrl_offset; if ( !memory_test(info) ) { printk( "%s(%d):Shared Memory Test failed for device %s MemAddr=%08X\n", __FILE__,__LINE__,info->device_name, info->phys_memory_base ); info->init_error = DiagStatus_MemoryError; goto errout; } return 0; errout: release_resources( info ); return -ENODEV; } static void release_resources(SLMP_INFO *info) { if ( debug_level >= DEBUG_LEVEL_INFO ) printk( "%s(%d):%s release_resources() entry\n", __FILE__,__LINE__,info->device_name ); if ( info->irq_requested ) { free_irq(info->irq_level, info); info->irq_requested = false; } if ( info->shared_mem_requested ) { release_mem_region(info->phys_memory_base,SCA_MEM_SIZE); info->shared_mem_requested = false; } if ( info->lcr_mem_requested ) { release_mem_region(info->phys_lcr_base + info->lcr_offset,128); info->lcr_mem_requested = false; } if ( info->sca_base_requested ) { release_mem_region(info->phys_sca_base + info->sca_offset,SCA_BASE_SIZE); info->sca_base_requested = false; } if ( info->sca_statctrl_requested ) { release_mem_region(info->phys_statctrl_base + info->statctrl_offset,SCA_REG_SIZE); info->sca_statctrl_requested = false; } if (info->memory_base){ iounmap(info->memory_base); info->memory_base = NULL; } if (info->sca_base) { iounmap(info->sca_base - info->sca_offset); info->sca_base=NULL; } if (info->statctrl_base) { iounmap(info->statctrl_base - info->statctrl_offset); info->statctrl_base=NULL; } if (info->lcr_base){ iounmap(info->lcr_base - info->lcr_offset); info->lcr_base = NULL; } if ( debug_level >= DEBUG_LEVEL_INFO ) printk( "%s(%d):%s release_resources() exit\n", __FILE__,__LINE__,info->device_name ); } /* Add the specified device instance data structure to the * global linked list of devices and increment the device count. */ static void add_device(SLMP_INFO *info) { info->next_device = NULL; info->line = synclinkmp_device_count; sprintf(info->device_name,"ttySLM%dp%d",info->adapter_num,info->port_num); if (info->line < MAX_DEVICES) { if (maxframe[info->line]) info->max_frame_size = maxframe[info->line]; } synclinkmp_device_count++; if ( !synclinkmp_device_list ) synclinkmp_device_list = info; else { SLMP_INFO *current_dev = synclinkmp_device_list; while( current_dev->next_device ) current_dev = current_dev->next_device; current_dev->next_device = info; } if ( info->max_frame_size < 4096 ) info->max_frame_size = 4096; else if ( info->max_frame_size > 65535 ) info->max_frame_size = 65535; printk( "SyncLink MultiPort %s: " "Mem=(%08x %08X %08x %08X) IRQ=%d MaxFrameSize=%u\n", info->device_name, info->phys_sca_base, info->phys_memory_base, info->phys_statctrl_base, info->phys_lcr_base, info->irq_level, info->max_frame_size ); #if SYNCLINK_GENERIC_HDLC hdlcdev_init(info); #endif } static const struct tty_port_operations port_ops = { .carrier_raised = carrier_raised, .dtr_rts = dtr_rts, }; /* Allocate and initialize a device instance structure * * Return Value: pointer to SLMP_INFO if success, otherwise NULL */ static SLMP_INFO *alloc_dev(int adapter_num, int port_num, struct pci_dev *pdev) { SLMP_INFO *info; info = kzalloc(sizeof(SLMP_INFO), GFP_KERNEL); if (!info) { printk("%s(%d) Error can't allocate device instance data for adapter %d, port %d\n", __FILE__,__LINE__, adapter_num, port_num); } else { tty_port_init(&info->port); info->port.ops = &port_ops; info->magic = MGSL_MAGIC; INIT_WORK(&info->task, bh_handler); info->max_frame_size = 4096; info->port.close_delay = 5*HZ/10; info->port.closing_wait = 30*HZ; init_waitqueue_head(&info->status_event_wait_q); init_waitqueue_head(&info->event_wait_q); spin_lock_init(&info->netlock); memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS)); info->idle_mode = HDLC_TXIDLE_FLAGS; info->adapter_num = adapter_num; info->port_num = port_num; /* Copy configuration info to device instance data */ info->irq_level = pdev->irq; info->phys_lcr_base = pci_resource_start(pdev,0); info->phys_sca_base = pci_resource_start(pdev,2); info->phys_memory_base = pci_resource_start(pdev,3); info->phys_statctrl_base = pci_resource_start(pdev,4); /* Because veremap only works on page boundaries we must map * a larger area than is actually implemented for the LCR * memory range. We map a full page starting at the page boundary. */ info->lcr_offset = info->phys_lcr_base & (PAGE_SIZE-1); info->phys_lcr_base &= ~(PAGE_SIZE-1); info->sca_offset = info->phys_sca_base & (PAGE_SIZE-1); info->phys_sca_base &= ~(PAGE_SIZE-1); info->statctrl_offset = info->phys_statctrl_base & (PAGE_SIZE-1); info->phys_statctrl_base &= ~(PAGE_SIZE-1); info->bus_type = MGSL_BUS_TYPE_PCI; info->irq_flags = IRQF_SHARED; setup_timer(&info->tx_timer, tx_timeout, (unsigned long)info); setup_timer(&info->status_timer, status_timeout, (unsigned long)info); /* Store the PCI9050 misc control register value because a flaw * in the PCI9050 prevents LCR registers from being read if * BIOS assigns an LCR base address with bit 7 set. * * Only the misc control register is accessed for which only * write access is needed, so set an initial value and change * bits to the device instance data as we write the value * to the actual misc control register. */ info->misc_ctrl_value = 0x087e4546; /* initial port state is unknown - if startup errors * occur, init_error will be set to indicate the * problem. Once the port is fully initialized, * this value will be set to 0 to indicate the * port is available. */ info->init_error = -1; } return info; } static void device_init(int adapter_num, struct pci_dev *pdev) { SLMP_INFO *port_array[SCA_MAX_PORTS]; int port; /* allocate device instances for up to SCA_MAX_PORTS devices */ for ( port = 0; port < SCA_MAX_PORTS; ++port ) { port_array[port] = alloc_dev(adapter_num,port,pdev); if( port_array[port] == NULL ) { for (--port; port >= 0; --port) { tty_port_destroy(&port_array[port]->port); kfree(port_array[port]); } return; } } /* give copy of port_array to all ports and add to device list */ for ( port = 0; port < SCA_MAX_PORTS; ++port ) { memcpy(port_array[port]->port_array,port_array,sizeof(port_array)); add_device( port_array[port] ); spin_lock_init(&port_array[port]->lock); } /* Allocate and claim adapter resources */ if ( !claim_resources(port_array[0]) ) { alloc_dma_bufs(port_array[0]); /* copy resource information from first port to others */ for ( port = 1; port < SCA_MAX_PORTS; ++port ) { port_array[port]->lock = port_array[0]->lock; port_array[port]->irq_level = port_array[0]->irq_level; port_array[port]->memory_base = port_array[0]->memory_base; port_array[port]->sca_base = port_array[0]->sca_base; port_array[port]->statctrl_base = port_array[0]->statctrl_base; port_array[port]->lcr_base = port_array[0]->lcr_base; alloc_dma_bufs(port_array[port]); } if ( request_irq(port_array[0]->irq_level, synclinkmp_interrupt, port_array[0]->irq_flags, port_array[0]->device_name, port_array[0]) < 0 ) { printk( "%s(%d):%s Can't request interrupt, IRQ=%d\n", __FILE__,__LINE__, port_array[0]->device_name, port_array[0]->irq_level ); } else { port_array[0]->irq_requested = true; adapter_test(port_array[0]); } } } static const struct tty_operations ops = { .install = install, .open = open, .close = close, .write = write, .put_char = put_char, .flush_chars = flush_chars, .write_room = write_room, .chars_in_buffer = chars_in_buffer, .flush_buffer = flush_buffer, .ioctl = ioctl, .throttle = throttle, .unthrottle = unthrottle, .send_xchar = send_xchar, .break_ctl = set_break, .wait_until_sent = wait_until_sent, .set_termios = set_termios, .stop = tx_hold, .start = tx_release, .hangup = hangup, .tiocmget = tiocmget, .tiocmset = tiocmset, .get_icount = get_icount, .proc_fops = &synclinkmp_proc_fops, }; static void synclinkmp_cleanup(void) { int rc; SLMP_INFO *info; SLMP_INFO *tmp; printk("Unloading %s %s\n", driver_name, driver_version); if (serial_driver) { if ((rc = tty_unregister_driver(serial_driver))) printk("%s(%d) failed to unregister tty driver err=%d\n", __FILE__,__LINE__,rc); put_tty_driver(serial_driver); } /* reset devices */ info = synclinkmp_device_list; while(info) { reset_port(info); info = info->next_device; } /* release devices */ info = synclinkmp_device_list; while(info) { #if SYNCLINK_GENERIC_HDLC hdlcdev_exit(info); #endif free_dma_bufs(info); free_tmp_rx_buf(info); if ( info->port_num == 0 ) { if (info->sca_base) write_reg(info, LPR, 1); /* set low power mode */ release_resources(info); } tmp = info; info = info->next_device; tty_port_destroy(&tmp->port); kfree(tmp); } pci_unregister_driver(&synclinkmp_pci_driver); } /* Driver initialization entry point. */ static int __init synclinkmp_init(void) { int rc; if (break_on_load) { synclinkmp_get_text_ptr(); BREAKPOINT(); } printk("%s %s\n", driver_name, driver_version); if ((rc = pci_register_driver(&synclinkmp_pci_driver)) < 0) { printk("%s:failed to register PCI driver, error=%d\n",__FILE__,rc); return rc; } serial_driver = alloc_tty_driver(128); if (!serial_driver) { rc = -ENOMEM; goto error; } /* Initialize the tty_driver structure */ serial_driver->driver_name = "synclinkmp"; serial_driver->name = "ttySLM"; serial_driver->major = ttymajor; serial_driver->minor_start = 64; serial_driver->type = TTY_DRIVER_TYPE_SERIAL; serial_driver->subtype = SERIAL_TYPE_NORMAL; serial_driver->init_termios = tty_std_termios; serial_driver->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL; serial_driver->init_termios.c_ispeed = 9600; serial_driver->init_termios.c_ospeed = 9600; serial_driver->flags = TTY_DRIVER_REAL_RAW; tty_set_operations(serial_driver, &ops); if ((rc = tty_register_driver(serial_driver)) < 0) { printk("%s(%d):Couldn't register serial driver\n", __FILE__,__LINE__); put_tty_driver(serial_driver); serial_driver = NULL; goto error; } printk("%s %s, tty major#%d\n", driver_name, driver_version, serial_driver->major); return 0; error: synclinkmp_cleanup(); return rc; } static void __exit synclinkmp_exit(void) { synclinkmp_cleanup(); } module_init(synclinkmp_init); module_exit(synclinkmp_exit); /* Set the port for internal loopback mode. * The TxCLK and RxCLK signals are generated from the BRG and * the TxD is looped back to the RxD internally. */ static void enable_loopback(SLMP_INFO *info, int enable) { if (enable) { /* MD2 (Mode Register 2) * 01..00 CNCT<1..0> Channel Connection 11=Local Loopback */ write_reg(info, MD2, (unsigned char)(read_reg(info, MD2) | (BIT1 + BIT0))); /* degate external TxC clock source */ info->port_array[0]->ctrlreg_value |= (BIT0 << (info->port_num * 2)); write_control_reg(info); /* RXS/TXS (Rx/Tx clock source) * 07 Reserved, must be 0 * 06..04 Clock Source, 100=BRG * 03..00 Clock Divisor, 0000=1 */ write_reg(info, RXS, 0x40); write_reg(info, TXS, 0x40); } else { /* MD2 (Mode Register 2) * 01..00 CNCT<1..0> Channel connection, 0=normal */ write_reg(info, MD2, (unsigned char)(read_reg(info, MD2) & ~(BIT1 + BIT0))); /* RXS/TXS (Rx/Tx clock source) * 07 Reserved, must be 0 * 06..04 Clock Source, 000=RxC/TxC Pin * 03..00 Clock Divisor, 0000=1 */ write_reg(info, RXS, 0x00); write_reg(info, TXS, 0x00); } /* set LinkSpeed if available, otherwise default to 2Mbps */ if (info->params.clock_speed) set_rate(info, info->params.clock_speed); else set_rate(info, 3686400); } /* Set the baud rate register to the desired speed * * data_rate data rate of clock in bits per second * A data rate of 0 disables the AUX clock. */ static void set_rate( SLMP_INFO *info, u32 data_rate ) { u32 TMCValue; unsigned char BRValue; u32 Divisor=0; /* fBRG = fCLK/(TMC * 2^BR) */ if (data_rate != 0) { Divisor = 14745600/data_rate; if (!Divisor) Divisor = 1; TMCValue = Divisor; BRValue = 0; if (TMCValue != 1 && TMCValue != 2) { /* BRValue of 0 provides 50/50 duty cycle *only* when * TMCValue is 1 or 2. BRValue of 1 to 9 always provides * 50/50 duty cycle. */ BRValue = 1; TMCValue >>= 1; } /* while TMCValue is too big for TMC register, divide * by 2 and increment BR exponent. */ for(; TMCValue > 256 && BRValue < 10; BRValue++) TMCValue >>= 1; write_reg(info, TXS, (unsigned char)((read_reg(info, TXS) & 0xf0) | BRValue)); write_reg(info, RXS, (unsigned char)((read_reg(info, RXS) & 0xf0) | BRValue)); write_reg(info, TMC, (unsigned char)TMCValue); } else { write_reg(info, TXS,0); write_reg(info, RXS,0); write_reg(info, TMC, 0); } } /* Disable receiver */ static void rx_stop(SLMP_INFO *info) { if (debug_level >= DEBUG_LEVEL_ISR) printk("%s(%d):%s rx_stop()\n", __FILE__,__LINE__, info->device_name ); write_reg(info, CMD, RXRESET); info->ie0_value &= ~RXRDYE; write_reg(info, IE0, info->ie0_value); /* disable Rx data interrupts */ write_reg(info, RXDMA + DSR, 0); /* disable Rx DMA */ write_reg(info, RXDMA + DCMD, SWABORT); /* reset/init Rx DMA */ write_reg(info, RXDMA + DIR, 0); /* disable Rx DMA interrupts */ info->rx_enabled = false; info->rx_overflow = false; } /* enable the receiver */ static void rx_start(SLMP_INFO *info) { int i; if (debug_level >= DEBUG_LEVEL_ISR) printk("%s(%d):%s rx_start()\n", __FILE__,__LINE__, info->device_name ); write_reg(info, CMD, RXRESET); if ( info->params.mode == MGSL_MODE_HDLC ) { /* HDLC, disabe IRQ on rxdata */ info->ie0_value &= ~RXRDYE; write_reg(info, IE0, info->ie0_value); /* Reset all Rx DMA buffers and program rx dma */ write_reg(info, RXDMA + DSR, 0); /* disable Rx DMA */ write_reg(info, RXDMA + DCMD, SWABORT); /* reset/init Rx DMA */ for (i = 0; i < info->rx_buf_count; i++) { info->rx_buf_list[i].status = 0xff; // throttle to 4 shared memory writes at a time to prevent // hogging local bus (keep latency time for DMA requests low). if (!(i % 4)) read_status_reg(info); } info->current_rx_buf = 0; /* set current/1st descriptor address */ write_reg16(info, RXDMA + CDA, info->rx_buf_list_ex[0].phys_entry); /* set new last rx descriptor address */ write_reg16(info, RXDMA + EDA, info->rx_buf_list_ex[info->rx_buf_count - 1].phys_entry); /* set buffer length (shared by all rx dma data buffers) */ write_reg16(info, RXDMA + BFL, SCABUFSIZE); write_reg(info, RXDMA + DIR, 0x60); /* enable Rx DMA interrupts (EOM/BOF) */ write_reg(info, RXDMA + DSR, 0xf2); /* clear Rx DMA IRQs, enable Rx DMA */ } else { /* async, enable IRQ on rxdata */ info->ie0_value |= RXRDYE; write_reg(info, IE0, info->ie0_value); } write_reg(info, CMD, RXENABLE); info->rx_overflow = false; info->rx_enabled = true; } /* Enable the transmitter and send a transmit frame if * one is loaded in the DMA buffers. */ static void tx_start(SLMP_INFO *info) { if (debug_level >= DEBUG_LEVEL_ISR) printk("%s(%d):%s tx_start() tx_count=%d\n", __FILE__,__LINE__, info->device_name,info->tx_count ); if (!info->tx_enabled ) { write_reg(info, CMD, TXRESET); write_reg(info, CMD, TXENABLE); info->tx_enabled = true; } if ( info->tx_count ) { /* If auto RTS enabled and RTS is inactive, then assert */ /* RTS and set a flag indicating that the driver should */ /* negate RTS when the transmission completes. */ info->drop_rts_on_tx_done = false; if (info->params.mode != MGSL_MODE_ASYNC) { if ( info->params.flags & HDLC_FLAG_AUTO_RTS ) { get_signals( info ); if ( !(info->serial_signals & SerialSignal_RTS) ) { info->serial_signals |= SerialSignal_RTS; set_signals( info ); info->drop_rts_on_tx_done = true; } } write_reg16(info, TRC0, (unsigned short)(((tx_negate_fifo_level-1)<<8) + tx_active_fifo_level)); write_reg(info, TXDMA + DSR, 0); /* disable DMA channel */ write_reg(info, TXDMA + DCMD, SWABORT); /* reset/init DMA channel */ /* set TX CDA (current descriptor address) */ write_reg16(info, TXDMA + CDA, info->tx_buf_list_ex[0].phys_entry); /* set TX EDA (last descriptor address) */ write_reg16(info, TXDMA + EDA, info->tx_buf_list_ex[info->last_tx_buf].phys_entry); /* enable underrun IRQ */ info->ie1_value &= ~IDLE; info->ie1_value |= UDRN; write_reg(info, IE1, info->ie1_value); write_reg(info, SR1, (unsigned char)(IDLE + UDRN)); write_reg(info, TXDMA + DIR, 0x40); /* enable Tx DMA interrupts (EOM) */ write_reg(info, TXDMA + DSR, 0xf2); /* clear Tx DMA IRQs, enable Tx DMA */ mod_timer(&info->tx_timer, jiffies + msecs_to_jiffies(5000)); } else { tx_load_fifo(info); /* async, enable IRQ on txdata */ info->ie0_value |= TXRDYE; write_reg(info, IE0, info->ie0_value); } info->tx_active = true; } } /* stop the transmitter and DMA */ static void tx_stop( SLMP_INFO *info ) { if (debug_level >= DEBUG_LEVEL_ISR) printk("%s(%d):%s tx_stop()\n", __FILE__,__LINE__, info->device_name ); del_timer(&info->tx_timer); write_reg(info, TXDMA + DSR, 0); /* disable DMA channel */ write_reg(info, TXDMA + DCMD, SWABORT); /* reset/init DMA channel */ write_reg(info, CMD, TXRESET); info->ie1_value &= ~(UDRN + IDLE); write_reg(info, IE1, info->ie1_value); /* disable tx status interrupts */ write_reg(info, SR1, (unsigned char)(IDLE + UDRN)); /* clear pending */ info->ie0_value &= ~TXRDYE; write_reg(info, IE0, info->ie0_value); /* disable tx data interrupts */ info->tx_enabled = false; info->tx_active = false; } /* Fill the transmit FIFO until the FIFO is full or * there is no more data to load. */ static void tx_load_fifo(SLMP_INFO *info) { u8 TwoBytes[2]; /* do nothing is now tx data available and no XON/XOFF pending */ if ( !info->tx_count && !info->x_char ) return; /* load the Transmit FIFO until FIFOs full or all data sent */ while( info->tx_count && (read_reg(info,SR0) & BIT1) ) { /* there is more space in the transmit FIFO and */ /* there is more data in transmit buffer */ if ( (info->tx_count > 1) && !info->x_char ) { /* write 16-bits */ TwoBytes[0] = info->tx_buf[info->tx_get++]; if (info->tx_get >= info->max_frame_size) info->tx_get -= info->max_frame_size; TwoBytes[1] = info->tx_buf[info->tx_get++]; if (info->tx_get >= info->max_frame_size) info->tx_get -= info->max_frame_size; write_reg16(info, TRB, *((u16 *)TwoBytes)); info->tx_count -= 2; info->icount.tx += 2; } else { /* only 1 byte left to transmit or 1 FIFO slot left */ if (info->x_char) { /* transmit pending high priority char */ write_reg(info, TRB, info->x_char); info->x_char = 0; } else { write_reg(info, TRB, info->tx_buf[info->tx_get++]); if (info->tx_get >= info->max_frame_size) info->tx_get -= info->max_frame_size; info->tx_count--; } info->icount.tx++; } } } /* Reset a port to a known state */ static void reset_port(SLMP_INFO *info) { if (info->sca_base) { tx_stop(info); rx_stop(info); info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR); set_signals(info); /* disable all port interrupts */ info->ie0_value = 0; info->ie1_value = 0; info->ie2_value = 0; write_reg(info, IE0, info->ie0_value); write_reg(info, IE1, info->ie1_value); write_reg(info, IE2, info->ie2_value); write_reg(info, CMD, CHRESET); } } /* Reset all the ports to a known state. */ static void reset_adapter(SLMP_INFO *info) { int i; for ( i=0; i < SCA_MAX_PORTS; ++i) { if (info->port_array[i]) reset_port(info->port_array[i]); } } /* Program port for asynchronous communications. */ static void async_mode(SLMP_INFO *info) { unsigned char RegValue; tx_stop(info); rx_stop(info); /* MD0, Mode Register 0 * * 07..05 PRCTL<2..0>, Protocol Mode, 000=async * 04 AUTO, Auto-enable (RTS/CTS/DCD) * 03 Reserved, must be 0 * 02 CRCCC, CRC Calculation, 0=disabled * 01..00 STOP<1..0> Stop bits (00=1,10=2) * * 0000 0000 */ RegValue = 0x00; if (info->params.stop_bits != 1) RegValue |= BIT1; write_reg(info, MD0, RegValue); /* MD1, Mode Register 1 * * 07..06 BRATE<1..0>, bit rate, 00=1/1 01=1/16 10=1/32 11=1/64 * 05..04 TXCHR<1..0>, tx char size, 00=8 bits,01=7,10=6,11=5 * 03..02 RXCHR<1..0>, rx char size * 01..00 PMPM<1..0>, Parity mode, 00=none 10=even 11=odd * * 0100 0000 */ RegValue = 0x40; switch (info->params.data_bits) { case 7: RegValue |= BIT4 + BIT2; break; case 6: RegValue |= BIT5 + BIT3; break; case 5: RegValue |= BIT5 + BIT4 + BIT3 + BIT2; break; } if (info->params.parity != ASYNC_PARITY_NONE) { RegValue |= BIT1; if (info->params.parity == ASYNC_PARITY_ODD) RegValue |= BIT0; } write_reg(info, MD1, RegValue); /* MD2, Mode Register 2 * * 07..02 Reserved, must be 0 * 01..00 CNCT<1..0> Channel connection, 00=normal 11=local loopback * * 0000 0000 */ RegValue = 0x00; if (info->params.loopback) RegValue |= (BIT1 + BIT0); write_reg(info, MD2, RegValue); /* RXS, Receive clock source * * 07 Reserved, must be 0 * 06..04 RXCS<2..0>, clock source, 000=RxC Pin, 100=BRG, 110=DPLL * 03..00 RXBR<3..0>, rate divisor, 0000=1 */ RegValue=BIT6; write_reg(info, RXS, RegValue); /* TXS, Transmit clock source * * 07 Reserved, must be 0 * 06..04 RXCS<2..0>, clock source, 000=TxC Pin, 100=BRG, 110=Receive Clock * 03..00 RXBR<3..0>, rate divisor, 0000=1 */ RegValue=BIT6; write_reg(info, TXS, RegValue); /* Control Register * * 6,4,2,0 CLKSEL<3..0>, 0 = TcCLK in, 1 = Auxclk out */ info->port_array[0]->ctrlreg_value |= (BIT0 << (info->port_num * 2)); write_control_reg(info); tx_set_idle(info); /* RRC Receive Ready Control 0 * * 07..05 Reserved, must be 0 * 04..00 RRC<4..0> Rx FIFO trigger active 0x00 = 1 byte */ write_reg(info, RRC, 0x00); /* TRC0 Transmit Ready Control 0 * * 07..05 Reserved, must be 0 * 04..00 TRC<4..0> Tx FIFO trigger active 0x10 = 16 bytes */ write_reg(info, TRC0, 0x10); /* TRC1 Transmit Ready Control 1 * * 07..05 Reserved, must be 0 * 04..00 TRC<4..0> Tx FIFO trigger inactive 0x1e = 31 bytes (full-1) */ write_reg(info, TRC1, 0x1e); /* CTL, MSCI control register * * 07..06 Reserved, set to 0 * 05 UDRNC, underrun control, 0=abort 1=CRC+flag (HDLC/BSC) * 04 IDLC, idle control, 0=mark 1=idle register * 03 BRK, break, 0=off 1 =on (async) * 02 SYNCLD, sync char load enable (BSC) 1=enabled * 01 GOP, go active on poll (LOOP mode) 1=enabled * 00 RTS, RTS output control, 0=active 1=inactive * * 0001 0001 */ RegValue = 0x10; if (!(info->serial_signals & SerialSignal_RTS)) RegValue |= 0x01; write_reg(info, CTL, RegValue); /* enable status interrupts */ info->ie0_value |= TXINTE + RXINTE; write_reg(info, IE0, info->ie0_value); /* enable break detect interrupt */ info->ie1_value = BRKD; write_reg(info, IE1, info->ie1_value); /* enable rx overrun interrupt */ info->ie2_value = OVRN; write_reg(info, IE2, info->ie2_value); set_rate( info, info->params.data_rate * 16 ); } /* Program the SCA for HDLC communications. */ static void hdlc_mode(SLMP_INFO *info) { unsigned char RegValue; u32 DpllDivisor; // Can't use DPLL because SCA outputs recovered clock on RxC when // DPLL mode selected. This causes output contention with RxC receiver. // Use of DPLL would require external hardware to disable RxC receiver // when DPLL mode selected. info->params.flags &= ~(HDLC_FLAG_TXC_DPLL + HDLC_FLAG_RXC_DPLL); /* disable DMA interrupts */ write_reg(info, TXDMA + DIR, 0); write_reg(info, RXDMA + DIR, 0); /* MD0, Mode Register 0 * * 07..05 PRCTL<2..0>, Protocol Mode, 100=HDLC * 04 AUTO, Auto-enable (RTS/CTS/DCD) * 03 Reserved, must be 0 * 02 CRCCC, CRC Calculation, 1=enabled * 01 CRC1, CRC selection, 0=CRC-16,1=CRC-CCITT-16 * 00 CRC0, CRC initial value, 1 = all 1s * * 1000 0001 */ RegValue = 0x81; if (info->params.flags & HDLC_FLAG_AUTO_CTS) RegValue |= BIT4; if (info->params.flags & HDLC_FLAG_AUTO_DCD) RegValue |= BIT4; if (info->params.crc_type == HDLC_CRC_16_CCITT) RegValue |= BIT2 + BIT1; write_reg(info, MD0, RegValue); /* MD1, Mode Register 1 * * 07..06 ADDRS<1..0>, Address detect, 00=no addr check * 05..04 TXCHR<1..0>, tx char size, 00=8 bits * 03..02 RXCHR<1..0>, rx char size, 00=8 bits * 01..00 PMPM<1..0>, Parity mode, 00=no parity * * 0000 0000 */ RegValue = 0x00; write_reg(info, MD1, RegValue); /* MD2, Mode Register 2 * * 07 NRZFM, 0=NRZ, 1=FM * 06..05 CODE<1..0> Encoding, 00=NRZ * 04..03 DRATE<1..0> DPLL Divisor, 00=8 * 02 Reserved, must be 0 * 01..00 CNCT<1..0> Channel connection, 0=normal * * 0000 0000 */ RegValue = 0x00; switch(info->params.encoding) { case HDLC_ENCODING_NRZI: RegValue |= BIT5; break; case HDLC_ENCODING_BIPHASE_MARK: RegValue |= BIT7 + BIT5; break; /* aka FM1 */ case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT7 + BIT6; break; /* aka FM0 */ case HDLC_ENCODING_BIPHASE_LEVEL: RegValue |= BIT7; break; /* aka Manchester */ #if 0 case HDLC_ENCODING_NRZB: /* not supported */ case HDLC_ENCODING_NRZI_MARK: /* not supported */ case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: /* not supported */ #endif } if ( info->params.flags & HDLC_FLAG_DPLL_DIV16 ) { DpllDivisor = 16; RegValue |= BIT3; } else if ( info->params.flags & HDLC_FLAG_DPLL_DIV8 ) { DpllDivisor = 8; } else { DpllDivisor = 32; RegValue |= BIT4; } write_reg(info, MD2, RegValue); /* RXS, Receive clock source * * 07 Reserved, must be 0 * 06..04 RXCS<2..0>, clock source, 000=RxC Pin, 100=BRG, 110=DPLL * 03..00 RXBR<3..0>, rate divisor, 0000=1 */ RegValue=0; if (info->params.flags & HDLC_FLAG_RXC_BRG) RegValue |= BIT6; if (info->params.flags & HDLC_FLAG_RXC_DPLL) RegValue |= BIT6 + BIT5; write_reg(info, RXS, RegValue); /* TXS, Transmit clock source * * 07 Reserved, must be 0 * 06..04 RXCS<2..0>, clock source, 000=TxC Pin, 100=BRG, 110=Receive Clock * 03..00 RXBR<3..0>, rate divisor, 0000=1 */ RegValue=0; if (info->params.flags & HDLC_FLAG_TXC_BRG) RegValue |= BIT6; if (info->params.flags & HDLC_FLAG_TXC_DPLL) RegValue |= BIT6 + BIT5; write_reg(info, TXS, RegValue); if (info->params.flags & HDLC_FLAG_RXC_DPLL) set_rate(info, info->params.clock_speed * DpllDivisor); else set_rate(info, info->params.clock_speed); /* GPDATA (General Purpose I/O Data Register) * * 6,4,2,0 CLKSEL<3..0>, 0 = TcCLK in, 1 = Auxclk out */ if (info->params.flags & HDLC_FLAG_TXC_BRG) info->port_array[0]->ctrlreg_value |= (BIT0 << (info->port_num * 2)); else info->port_array[0]->ctrlreg_value &= ~(BIT0 << (info->port_num * 2)); write_control_reg(info); /* RRC Receive Ready Control 0 * * 07..05 Reserved, must be 0 * 04..00 RRC<4..0> Rx FIFO trigger active */ write_reg(info, RRC, rx_active_fifo_level); /* TRC0 Transmit Ready Control 0 * * 07..05 Reserved, must be 0 * 04..00 TRC<4..0> Tx FIFO trigger active */ write_reg(info, TRC0, tx_active_fifo_level); /* TRC1 Transmit Ready Control 1 * * 07..05 Reserved, must be 0 * 04..00 TRC<4..0> Tx FIFO trigger inactive 0x1f = 32 bytes (full) */ write_reg(info, TRC1, (unsigned char)(tx_negate_fifo_level - 1)); /* DMR, DMA Mode Register * * 07..05 Reserved, must be 0 * 04 TMOD, Transfer Mode: 1=chained-block * 03 Reserved, must be 0 * 02 NF, Number of Frames: 1=multi-frame * 01 CNTE, Frame End IRQ Counter enable: 0=disabled * 00 Reserved, must be 0 * * 0001 0100 */ write_reg(info, TXDMA + DMR, 0x14); write_reg(info, RXDMA + DMR, 0x14); /* Set chain pointer base (upper 8 bits of 24 bit addr) */ write_reg(info, RXDMA + CPB, (unsigned char)(info->buffer_list_phys >> 16)); /* Set chain pointer base (upper 8 bits of 24 bit addr) */ write_reg(info, TXDMA + CPB, (unsigned char)(info->buffer_list_phys >> 16)); /* enable status interrupts. other code enables/disables * the individual sources for these two interrupt classes. */ info->ie0_value |= TXINTE + RXINTE; write_reg(info, IE0, info->ie0_value); /* CTL, MSCI control register * * 07..06 Reserved, set to 0 * 05 UDRNC, underrun control, 0=abort 1=CRC+flag (HDLC/BSC) * 04 IDLC, idle control, 0=mark 1=idle register * 03 BRK, break, 0=off 1 =on (async) * 02 SYNCLD, sync char load enable (BSC) 1=enabled * 01 GOP, go active on poll (LOOP mode) 1=enabled * 00 RTS, RTS output control, 0=active 1=inactive * * 0001 0001 */ RegValue = 0x10; if (!(info->serial_signals & SerialSignal_RTS)) RegValue |= 0x01; write_reg(info, CTL, RegValue); /* preamble not supported ! */ tx_set_idle(info); tx_stop(info); rx_stop(info); set_rate(info, info->params.clock_speed); if (info->params.loopback) enable_loopback(info,1); } /* Set the transmit HDLC idle mode */ static void tx_set_idle(SLMP_INFO *info) { unsigned char RegValue = 0xff; /* Map API idle mode to SCA register bits */ switch(info->idle_mode) { case HDLC_TXIDLE_FLAGS: RegValue = 0x7e; break; case HDLC_TXIDLE_ALT_ZEROS_ONES: RegValue = 0xaa; break; case HDLC_TXIDLE_ZEROS: RegValue = 0x00; break; case HDLC_TXIDLE_ONES: RegValue = 0xff; break; case HDLC_TXIDLE_ALT_MARK_SPACE: RegValue = 0xaa; break; case HDLC_TXIDLE_SPACE: RegValue = 0x00; break; case HDLC_TXIDLE_MARK: RegValue = 0xff; break; } write_reg(info, IDL, RegValue); } /* Query the adapter for the state of the V24 status (input) signals. */ static void get_signals(SLMP_INFO *info) { u16 status = read_reg(info, SR3); u16 gpstatus = read_status_reg(info); u16 testbit; /* clear all serial signals except RTS and DTR */ info->serial_signals &= SerialSignal_RTS | SerialSignal_DTR; /* set serial signal bits to reflect MISR */ if (!(status & BIT3)) info->serial_signals |= SerialSignal_CTS; if ( !(status & BIT2)) info->serial_signals |= SerialSignal_DCD; testbit = BIT1 << (info->port_num * 2); // Port 0..3 RI is GPDATA<1,3,5,7> if (!(gpstatus & testbit)) info->serial_signals |= SerialSignal_RI; testbit = BIT0 << (info->port_num * 2); // Port 0..3 DSR is GPDATA<0,2,4,6> if (!(gpstatus & testbit)) info->serial_signals |= SerialSignal_DSR; } /* Set the state of RTS and DTR based on contents of * serial_signals member of device context. */ static void set_signals(SLMP_INFO *info) { unsigned char RegValue; u16 EnableBit; RegValue = read_reg(info, CTL); if (info->serial_signals & SerialSignal_RTS) RegValue &= ~BIT0; else RegValue |= BIT0; write_reg(info, CTL, RegValue); // Port 0..3 DTR is ctrl reg <1,3,5,7> EnableBit = BIT1 << (info->port_num*2); if (info->serial_signals & SerialSignal_DTR) info->port_array[0]->ctrlreg_value &= ~EnableBit; else info->port_array[0]->ctrlreg_value |= EnableBit; write_control_reg(info); } /*******************/ /* DMA Buffer Code */ /*******************/ /* Set the count for all receive buffers to SCABUFSIZE * and set the current buffer to the first buffer. This effectively * makes all buffers free and discards any data in buffers. */ static void rx_reset_buffers(SLMP_INFO *info) { rx_free_frame_buffers(info, 0, info->rx_buf_count - 1); } /* Free the buffers used by a received frame * * info pointer to device instance data * first index of 1st receive buffer of frame * last index of last receive buffer of frame */ static void rx_free_frame_buffers(SLMP_INFO *info, unsigned int first, unsigned int last) { bool done = false; while(!done) { /* reset current buffer for reuse */ info->rx_buf_list[first].status = 0xff; if (first == last) { done = true; /* set new last rx descriptor address */ write_reg16(info, RXDMA + EDA, info->rx_buf_list_ex[first].phys_entry); } first++; if (first == info->rx_buf_count) first = 0; } /* set current buffer to next buffer after last buffer of frame */ info->current_rx_buf = first; } /* Return a received frame from the receive DMA buffers. * Only frames received without errors are returned. * * Return Value: true if frame returned, otherwise false */ static bool rx_get_frame(SLMP_INFO *info) { unsigned int StartIndex, EndIndex; /* index of 1st and last buffers of Rx frame */ unsigned short status; unsigned int framesize = 0; bool ReturnCode = false; unsigned long flags; struct tty_struct *tty = info->port.tty; unsigned char addr_field = 0xff; SCADESC *desc; SCADESC_EX *desc_ex; CheckAgain: /* assume no frame returned, set zero length */ framesize = 0; addr_field = 0xff; /* * current_rx_buf points to the 1st buffer of the next available * receive frame. To find the last buffer of the frame look for * a non-zero status field in the buffer entries. (The status * field is set by the 16C32 after completing a receive frame. */ StartIndex = EndIndex = info->current_rx_buf; for ( ;; ) { desc = &info->rx_buf_list[EndIndex]; desc_ex = &info->rx_buf_list_ex[EndIndex]; if (desc->status == 0xff) goto Cleanup; /* current desc still in use, no frames available */ if (framesize == 0 && info->params.addr_filter != 0xff) addr_field = desc_ex->virt_addr[0]; framesize += desc->length; /* Status != 0 means last buffer of frame */ if (desc->status) break; EndIndex++; if (EndIndex == info->rx_buf_count) EndIndex = 0; if (EndIndex == info->current_rx_buf) { /* all buffers have been 'used' but none mark */ /* the end of a frame. Reset buffers and receiver. */ if ( info->rx_enabled ){ spin_lock_irqsave(&info->lock,flags); rx_start(info); spin_unlock_irqrestore(&info->lock,flags); } goto Cleanup; } } /* check status of receive frame */ /* frame status is byte stored after frame data * * 7 EOM (end of msg), 1 = last buffer of frame * 6 Short Frame, 1 = short frame * 5 Abort, 1 = frame aborted * 4 Residue, 1 = last byte is partial * 3 Overrun, 1 = overrun occurred during frame reception * 2 CRC, 1 = CRC error detected * */ status = desc->status; /* ignore CRC bit if not using CRC (bit is undefined) */ /* Note:CRC is not save to data buffer */ if (info->params.crc_type == HDLC_CRC_NONE) status &= ~BIT2; if (framesize == 0 || (addr_field != 0xff && addr_field != info->params.addr_filter)) { /* discard 0 byte frames, this seems to occur sometime * when remote is idling flags. */ rx_free_frame_buffers(info, StartIndex, EndIndex); goto CheckAgain; } if (framesize < 2) status |= BIT6; if (status & (BIT6+BIT5+BIT3+BIT2)) { /* received frame has errors, * update counts and mark frame size as 0 */ if (status & BIT6) info->icount.rxshort++; else if (status & BIT5) info->icount.rxabort++; else if (status & BIT3) info->icount.rxover++; else info->icount.rxcrc++; framesize = 0; #if SYNCLINK_GENERIC_HDLC { info->netdev->stats.rx_errors++; info->netdev->stats.rx_frame_errors++; } #endif } if ( debug_level >= DEBUG_LEVEL_BH ) printk("%s(%d):%s rx_get_frame() status=%04X size=%d\n", __FILE__,__LINE__,info->device_name,status,framesize); if ( debug_level >= DEBUG_LEVEL_DATA ) trace_block(info,info->rx_buf_list_ex[StartIndex].virt_addr, min_t(unsigned int, framesize, SCABUFSIZE), 0); if (framesize) { if (framesize > info->max_frame_size) info->icount.rxlong++; else { /* copy dma buffer(s) to contiguous intermediate buffer */ int copy_count = framesize; int index = StartIndex; unsigned char *ptmp = info->tmp_rx_buf; info->tmp_rx_buf_count = framesize; info->icount.rxok++; while(copy_count) { int partial_count = min(copy_count,SCABUFSIZE); memcpy( ptmp, info->rx_buf_list_ex[index].virt_addr, partial_count ); ptmp += partial_count; copy_count -= partial_count; if ( ++index == info->rx_buf_count ) index = 0; } #if SYNCLINK_GENERIC_HDLC if (info->netcount) hdlcdev_rx(info,info->tmp_rx_buf,framesize); else #endif ldisc_receive_buf(tty,info->tmp_rx_buf, info->flag_buf, framesize); } } /* Free the buffers used by this frame. */ rx_free_frame_buffers( info, StartIndex, EndIndex ); ReturnCode = true; Cleanup: if ( info->rx_enabled && info->rx_overflow ) { /* Receiver is enabled, but needs to restarted due to * rx buffer overflow. If buffers are empty, restart receiver. */ if (info->rx_buf_list[EndIndex].status == 0xff) { spin_lock_irqsave(&info->lock,flags); rx_start(info); spin_unlock_irqrestore(&info->lock,flags); } } return ReturnCode; } /* load the transmit DMA buffer with data */ static void tx_load_dma_buffer(SLMP_INFO *info, const char *buf, unsigned int count) { unsigned short copy_count; unsigned int i = 0; SCADESC *desc; SCADESC_EX *desc_ex; if ( debug_level >= DEBUG_LEVEL_DATA ) trace_block(info, buf, min_t(unsigned int, count, SCABUFSIZE), 1); /* Copy source buffer to one or more DMA buffers, starting with * the first transmit dma buffer. */ for(i=0;;) { copy_count = min_t(unsigned int, count, SCABUFSIZE); desc = &info->tx_buf_list[i]; desc_ex = &info->tx_buf_list_ex[i]; load_pci_memory(info, desc_ex->virt_addr,buf,copy_count); desc->length = copy_count; desc->status = 0; buf += copy_count; count -= copy_count; if (!count) break; i++; if (i >= info->tx_buf_count) i = 0; } info->tx_buf_list[i].status = 0x81; /* set EOM and EOT status */ info->last_tx_buf = ++i; } static bool register_test(SLMP_INFO *info) { static unsigned char testval[] = {0x00, 0xff, 0xaa, 0x55, 0x69, 0x96}; static unsigned int count = ARRAY_SIZE(testval); unsigned int i; bool rc = true; unsigned long flags; spin_lock_irqsave(&info->lock,flags); reset_port(info); /* assume failure */ info->init_error = DiagStatus_AddressFailure; /* Write bit patterns to various registers but do it out of */ /* sync, then read back and verify values. */ for (i = 0 ; i < count ; i++) { write_reg(info, TMC, testval[i]); write_reg(info, IDL, testval[(i+1)%count]); write_reg(info, SA0, testval[(i+2)%count]); write_reg(info, SA1, testval[(i+3)%count]); if ( (read_reg(info, TMC) != testval[i]) || (read_reg(info, IDL) != testval[(i+1)%count]) || (read_reg(info, SA0) != testval[(i+2)%count]) || (read_reg(info, SA1) != testval[(i+3)%count]) ) { rc = false; break; } } reset_port(info); spin_unlock_irqrestore(&info->lock,flags); return rc; } static bool irq_test(SLMP_INFO *info) { unsigned long timeout; unsigned long flags; unsigned char timer = (info->port_num & 1) ? TIMER2 : TIMER0; spin_lock_irqsave(&info->lock,flags); reset_port(info); /* assume failure */ info->init_error = DiagStatus_IrqFailure; info->irq_occurred = false; /* setup timer0 on SCA0 to interrupt */ /* IER2<7..4> = timer<3..0> interrupt enables (1=enabled) */ write_reg(info, IER2, (unsigned char)((info->port_num & 1) ? BIT6 : BIT4)); write_reg(info, (unsigned char)(timer + TEPR), 0); /* timer expand prescale */ write_reg16(info, (unsigned char)(timer + TCONR), 1); /* timer constant */ /* TMCS, Timer Control/Status Register * * 07 CMF, Compare match flag (read only) 1=match * 06 ECMI, CMF Interrupt Enable: 1=enabled * 05 Reserved, must be 0 * 04 TME, Timer Enable * 03..00 Reserved, must be 0 * * 0101 0000 */ write_reg(info, (unsigned char)(timer + TMCS), 0x50); spin_unlock_irqrestore(&info->lock,flags); timeout=100; while( timeout-- && !info->irq_occurred ) { msleep_interruptible(10); } spin_lock_irqsave(&info->lock,flags); reset_port(info); spin_unlock_irqrestore(&info->lock,flags); return info->irq_occurred; } /* initialize individual SCA device (2 ports) */ static bool sca_init(SLMP_INFO *info) { /* set wait controller to single mem partition (low), no wait states */ write_reg(info, PABR0, 0); /* wait controller addr boundary 0 */ write_reg(info, PABR1, 0); /* wait controller addr boundary 1 */ write_reg(info, WCRL, 0); /* wait controller low range */ write_reg(info, WCRM, 0); /* wait controller mid range */ write_reg(info, WCRH, 0); /* wait controller high range */ /* DPCR, DMA Priority Control * * 07..05 Not used, must be 0 * 04 BRC, bus release condition: 0=all transfers complete * 03 CCC, channel change condition: 0=every cycle * 02..00 PR<2..0>, priority 100=round robin * * 00000100 = 0x04 */ write_reg(info, DPCR, dma_priority); /* DMA Master Enable, BIT7: 1=enable all channels */ write_reg(info, DMER, 0x80); /* enable all interrupt classes */ write_reg(info, IER0, 0xff); /* TxRDY,RxRDY,TxINT,RxINT (ports 0-1) */ write_reg(info, IER1, 0xff); /* DMIB,DMIA (channels 0-3) */ write_reg(info, IER2, 0xf0); /* TIRQ (timers 0-3) */ /* ITCR, interrupt control register * 07 IPC, interrupt priority, 0=MSCI->DMA * 06..05 IAK<1..0>, Acknowledge cycle, 00=non-ack cycle * 04 VOS, Vector Output, 0=unmodified vector * 03..00 Reserved, must be 0 */ write_reg(info, ITCR, 0); return true; } /* initialize adapter hardware */ static bool init_adapter(SLMP_INFO *info) { int i; /* Set BIT30 of Local Control Reg 0x50 to reset SCA */ volatile u32 *MiscCtrl = (u32 *)(info->lcr_base + 0x50); u32 readval; info->misc_ctrl_value |= BIT30; *MiscCtrl = info->misc_ctrl_value; /* * Force at least 170ns delay before clearing * reset bit. Each read from LCR takes at least * 30ns so 10 times for 300ns to be safe. */ for(i=0;i<10;i++) readval = *MiscCtrl; info->misc_ctrl_value &= ~BIT30; *MiscCtrl = info->misc_ctrl_value; /* init control reg (all DTRs off, all clksel=input) */ info->ctrlreg_value = 0xaa; write_control_reg(info); { volatile u32 *LCR1BRDR = (u32 *)(info->lcr_base + 0x2c); lcr1_brdr_value &= ~(BIT5 + BIT4 + BIT3); switch(read_ahead_count) { case 16: lcr1_brdr_value |= BIT5 + BIT4 + BIT3; break; case 8: lcr1_brdr_value |= BIT5 + BIT4; break; case 4: lcr1_brdr_value |= BIT5 + BIT3; break; case 0: lcr1_brdr_value |= BIT5; break; } *LCR1BRDR = lcr1_brdr_value; *MiscCtrl = misc_ctrl_value; } sca_init(info->port_array[0]); sca_init(info->port_array[2]); return true; } /* Loopback an HDLC frame to test the hardware * interrupt and DMA functions. */ static bool loopback_test(SLMP_INFO *info) { #define TESTFRAMESIZE 20 unsigned long timeout; u16 count = TESTFRAMESIZE; unsigned char buf[TESTFRAMESIZE]; bool rc = false; unsigned long flags; struct tty_struct *oldtty = info->port.tty; u32 speed = info->params.clock_speed; info->params.clock_speed = 3686400; info->port.tty = NULL; /* assume failure */ info->init_error = DiagStatus_DmaFailure; /* build and send transmit frame */ for (count = 0; count < TESTFRAMESIZE;++count) buf[count] = (unsigned char)count; memset(info->tmp_rx_buf,0,TESTFRAMESIZE); /* program hardware for HDLC and enabled receiver */ spin_lock_irqsave(&info->lock,flags); hdlc_mode(info); enable_loopback(info,1); rx_start(info); info->tx_count = count; tx_load_dma_buffer(info,buf,count); tx_start(info); spin_unlock_irqrestore(&info->lock,flags); /* wait for receive complete */ /* Set a timeout for waiting for interrupt. */ for ( timeout = 100; timeout; --timeout ) { msleep_interruptible(10); if (rx_get_frame(info)) { rc = true; break; } } /* verify received frame length and contents */ if (rc && ( info->tmp_rx_buf_count != count || memcmp(buf, info->tmp_rx_buf,count))) { rc = false; } spin_lock_irqsave(&info->lock,flags); reset_adapter(info); spin_unlock_irqrestore(&info->lock,flags); info->params.clock_speed = speed; info->port.tty = oldtty; return rc; } /* Perform diagnostics on hardware */ static int adapter_test( SLMP_INFO *info ) { unsigned long flags; if ( debug_level >= DEBUG_LEVEL_INFO ) printk( "%s(%d):Testing device %s\n", __FILE__,__LINE__,info->device_name ); spin_lock_irqsave(&info->lock,flags); init_adapter(info); spin_unlock_irqrestore(&info->lock,flags); info->port_array[0]->port_count = 0; if ( register_test(info->port_array[0]) && register_test(info->port_array[1])) { info->port_array[0]->port_count = 2; if ( register_test(info->port_array[2]) && register_test(info->port_array[3]) ) info->port_array[0]->port_count += 2; } else { printk( "%s(%d):Register test failure for device %s Addr=%08lX\n", __FILE__,__LINE__,info->device_name, (unsigned long)(info->phys_sca_base)); return -ENODEV; } if ( !irq_test(info->port_array[0]) || !irq_test(info->port_array[1]) || (info->port_count == 4 && !irq_test(info->port_array[2])) || (info->port_count == 4 && !irq_test(info->port_array[3]))) { printk( "%s(%d):Interrupt test failure for device %s IRQ=%d\n", __FILE__,__LINE__,info->device_name, (unsigned short)(info->irq_level) ); return -ENODEV; } if (!loopback_test(info->port_array[0]) || !loopback_test(info->port_array[1]) || (info->port_count == 4 && !loopback_test(info->port_array[2])) || (info->port_count == 4 && !loopback_test(info->port_array[3]))) { printk( "%s(%d):DMA test failure for device %s\n", __FILE__,__LINE__,info->device_name); return -ENODEV; } if ( debug_level >= DEBUG_LEVEL_INFO ) printk( "%s(%d):device %s passed diagnostics\n", __FILE__,__LINE__,info->device_name ); info->port_array[0]->init_error = 0; info->port_array[1]->init_error = 0; if ( info->port_count > 2 ) { info->port_array[2]->init_error = 0; info->port_array[3]->init_error = 0; } return 0; } /* Test the shared memory on a PCI adapter. */ static bool memory_test(SLMP_INFO *info) { static unsigned long testval[] = { 0x0, 0x55555555, 0xaaaaaaaa, 0x66666666, 0x99999999, 0xffffffff, 0x12345678 }; unsigned long count = ARRAY_SIZE(testval); unsigned long i; unsigned long limit = SCA_MEM_SIZE/sizeof(unsigned long); unsigned long * addr = (unsigned long *)info->memory_base; /* Test data lines with test pattern at one location. */ for ( i = 0 ; i < count ; i++ ) { *addr = testval[i]; if ( *addr != testval[i] ) return false; } /* Test address lines with incrementing pattern over */ /* entire address range. */ for ( i = 0 ; i < limit ; i++ ) { *addr = i * 4; addr++; } addr = (unsigned long *)info->memory_base; for ( i = 0 ; i < limit ; i++ ) { if ( *addr != i * 4 ) return false; addr++; } memset( info->memory_base, 0, SCA_MEM_SIZE ); return true; } /* Load data into PCI adapter shared memory. * * The PCI9050 releases control of the local bus * after completing the current read or write operation. * * While the PCI9050 write FIFO not empty, the * PCI9050 treats all of the writes as a single transaction * and does not release the bus. This causes DMA latency problems * at high speeds when copying large data blocks to the shared memory. * * This function breaks a write into multiple transations by * interleaving a read which flushes the write FIFO and 'completes' * the write transation. This allows any pending DMA request to gain control * of the local bus in a timely fasion. */ static void load_pci_memory(SLMP_INFO *info, char* dest, const char* src, unsigned short count) { /* A load interval of 16 allows for 4 32-bit writes at */ /* 136ns each for a maximum latency of 542ns on the local bus.*/ unsigned short interval = count / sca_pci_load_interval; unsigned short i; for ( i = 0 ; i < interval ; i++ ) { memcpy(dest, src, sca_pci_load_interval); read_status_reg(info); dest += sca_pci_load_interval; src += sca_pci_load_interval; } memcpy(dest, src, count % sca_pci_load_interval); } static void trace_block(SLMP_INFO *info,const char* data, int count, int xmit) { int i; int linecount; if (xmit) printk("%s tx data:\n",info->device_name); else printk("%s rx data:\n",info->device_name); while(count) { if (count > 16) linecount = 16; else linecount = count; for(i=0;i<linecount;i++) printk("%02X ",(unsigned char)data[i]); for(;i<17;i++) printk(" "); for(i=0;i<linecount;i++) { if (data[i]>=040 && data[i]<=0176) printk("%c",data[i]); else printk("."); } printk("\n"); data += linecount; count -= linecount; } } /* end of trace_block() */ /* called when HDLC frame times out * update stats and do tx completion processing */ static void tx_timeout(unsigned long context) { SLMP_INFO *info = (SLMP_INFO*)context; unsigned long flags; if ( debug_level >= DEBUG_LEVEL_INFO ) printk( "%s(%d):%s tx_timeout()\n", __FILE__,__LINE__,info->device_name); if(info->tx_active && info->params.mode == MGSL_MODE_HDLC) { info->icount.txtimeout++; } spin_lock_irqsave(&info->lock,flags); info->tx_active = false; info->tx_count = info->tx_put = info->tx_get = 0; spin_unlock_irqrestore(&info->lock,flags); #if SYNCLINK_GENERIC_HDLC if (info->netcount) hdlcdev_tx_done(info); else #endif bh_transmit(info); } /* called to periodically check the DSR/RI modem signal input status */ static void status_timeout(unsigned long context) { u16 status = 0; SLMP_INFO *info = (SLMP_INFO*)context; unsigned long flags; unsigned char delta; spin_lock_irqsave(&info->lock,flags); get_signals(info); spin_unlock_irqrestore(&info->lock,flags); /* check for DSR/RI state change */ delta = info->old_signals ^ info->serial_signals; info->old_signals = info->serial_signals; if (delta & SerialSignal_DSR) status |= MISCSTATUS_DSR_LATCHED|(info->serial_signals&SerialSignal_DSR); if (delta & SerialSignal_RI) status |= MISCSTATUS_RI_LATCHED|(info->serial_signals&SerialSignal_RI); if (delta & SerialSignal_DCD) status |= MISCSTATUS_DCD_LATCHED|(info->serial_signals&SerialSignal_DCD); if (delta & SerialSignal_CTS) status |= MISCSTATUS_CTS_LATCHED|(info->serial_signals&SerialSignal_CTS); if (status) isr_io_pin(info,status); mod_timer(&info->status_timer, jiffies + msecs_to_jiffies(10)); } /* Register Access Routines - * All registers are memory mapped */ #define CALC_REGADDR() \ unsigned char * RegAddr = (unsigned char*)(info->sca_base + Addr); \ if (info->port_num > 1) \ RegAddr += 256; /* port 0-1 SCA0, 2-3 SCA1 */ \ if ( info->port_num & 1) { \ if (Addr > 0x7f) \ RegAddr += 0x40; /* DMA access */ \ else if (Addr > 0x1f && Addr < 0x60) \ RegAddr += 0x20; /* MSCI access */ \ } static unsigned char read_reg(SLMP_INFO * info, unsigned char Addr) { CALC_REGADDR(); return *RegAddr; } static void write_reg(SLMP_INFO * info, unsigned char Addr, unsigned char Value) { CALC_REGADDR(); *RegAddr = Value; } static u16 read_reg16(SLMP_INFO * info, unsigned char Addr) { CALC_REGADDR(); return *((u16 *)RegAddr); } static void write_reg16(SLMP_INFO * info, unsigned char Addr, u16 Value) { CALC_REGADDR(); *((u16 *)RegAddr) = Value; } static unsigned char read_status_reg(SLMP_INFO * info) { unsigned char *RegAddr = (unsigned char *)info->statctrl_base; return *RegAddr; } static void write_control_reg(SLMP_INFO * info) { unsigned char *RegAddr = (unsigned char *)info->statctrl_base; *RegAddr = info->port_array[0]->ctrlreg_value; } static int synclinkmp_init_one (struct pci_dev *dev, const struct pci_device_id *ent) { if (pci_enable_device(dev)) { printk("error enabling pci device %p\n", dev); return -EIO; } device_init( ++synclinkmp_adapter_count, dev ); return 0; } static void synclinkmp_remove_one (struct pci_dev *dev) { }
gpl-2.0
Marvellousteam/android_kernel_htc_msm7227
arch/x86/vdso/vdso32-setup.c
846
9947
/* * (C) Copyright 2002 Linus Torvalds * Portions based on the vdso-randomization code from exec-shield: * Copyright(C) 2005-2006, Red Hat, Inc., Ingo Molnar * * This file contains the needed initializations to support sysenter. */ #include <linux/init.h> #include <linux/smp.h> #include <linux/thread_info.h> #include <linux/sched.h> #include <linux/gfp.h> #include <linux/string.h> #include <linux/elf.h> #include <linux/mm.h> #include <linux/err.h> #include <linux/module.h> #include <asm/cpufeature.h> #include <asm/msr.h> #include <asm/pgtable.h> #include <asm/unistd.h> #include <asm/elf.h> #include <asm/tlbflush.h> #include <asm/vdso.h> #include <asm/proto.h> enum { VDSO_DISABLED = 0, VDSO_ENABLED = 1, VDSO_COMPAT = 2, }; #ifdef CONFIG_COMPAT_VDSO #define VDSO_DEFAULT VDSO_COMPAT #else #define VDSO_DEFAULT VDSO_ENABLED #endif #ifdef CONFIG_X86_64 #define vdso_enabled sysctl_vsyscall32 #define arch_setup_additional_pages syscall32_setup_pages #endif /* * This is the difference between the prelinked addresses in the vDSO images * and the VDSO_HIGH_BASE address where CONFIG_COMPAT_VDSO places the vDSO * in the user address space. */ #define VDSO_ADDR_ADJUST (VDSO_HIGH_BASE - (unsigned long)VDSO32_PRELINK) /* * Should the kernel map a VDSO page into processes and pass its * address down to glibc upon exec()? */ unsigned int __read_mostly vdso_enabled = VDSO_DEFAULT; static int __init vdso_setup(char *s) { vdso_enabled = simple_strtoul(s, NULL, 0); return 1; } /* * For consistency, the argument vdso32=[012] affects the 32-bit vDSO * behavior on both 64-bit and 32-bit kernels. * On 32-bit kernels, vdso=[012] means the same thing. */ __setup("vdso32=", vdso_setup); #ifdef CONFIG_X86_32 __setup_param("vdso=", vdso32_setup, vdso_setup, 0); EXPORT_SYMBOL_GPL(vdso_enabled); #endif static __init void reloc_symtab(Elf32_Ehdr *ehdr, unsigned offset, unsigned size) { Elf32_Sym *sym = (void *)ehdr + offset; unsigned nsym = size / sizeof(*sym); unsigned i; for(i = 0; i < nsym; i++, sym++) { if (sym->st_shndx == SHN_UNDEF || sym->st_shndx == SHN_ABS) continue; /* skip */ if (sym->st_shndx > SHN_LORESERVE) { printk(KERN_INFO "VDSO: unexpected st_shndx %x\n", sym->st_shndx); continue; } switch(ELF_ST_TYPE(sym->st_info)) { case STT_OBJECT: case STT_FUNC: case STT_SECTION: case STT_FILE: sym->st_value += VDSO_ADDR_ADJUST; } } } static __init void reloc_dyn(Elf32_Ehdr *ehdr, unsigned offset) { Elf32_Dyn *dyn = (void *)ehdr + offset; for(; dyn->d_tag != DT_NULL; dyn++) switch(dyn->d_tag) { case DT_PLTGOT: case DT_HASH: case DT_STRTAB: case DT_SYMTAB: case DT_RELA: case DT_INIT: case DT_FINI: case DT_REL: case DT_DEBUG: case DT_JMPREL: case DT_VERSYM: case DT_VERDEF: case DT_VERNEED: case DT_ADDRRNGLO ... DT_ADDRRNGHI: /* definitely pointers needing relocation */ dyn->d_un.d_ptr += VDSO_ADDR_ADJUST; break; case DT_ENCODING ... OLD_DT_LOOS-1: case DT_LOOS ... DT_HIOS-1: /* Tags above DT_ENCODING are pointers if they're even */ if (dyn->d_tag >= DT_ENCODING && (dyn->d_tag & 1) == 0) dyn->d_un.d_ptr += VDSO_ADDR_ADJUST; break; case DT_VERDEFNUM: case DT_VERNEEDNUM: case DT_FLAGS_1: case DT_RELACOUNT: case DT_RELCOUNT: case DT_VALRNGLO ... DT_VALRNGHI: /* definitely not pointers */ break; case OLD_DT_LOOS ... DT_LOOS-1: case DT_HIOS ... DT_VALRNGLO-1: default: if (dyn->d_tag > DT_ENCODING) printk(KERN_INFO "VDSO: unexpected DT_tag %x\n", dyn->d_tag); break; } } static __init void relocate_vdso(Elf32_Ehdr *ehdr) { Elf32_Phdr *phdr; Elf32_Shdr *shdr; int i; BUG_ON(memcmp(ehdr->e_ident, ELFMAG, SELFMAG) != 0 || !elf_check_arch_ia32(ehdr) || ehdr->e_type != ET_DYN); ehdr->e_entry += VDSO_ADDR_ADJUST; /* rebase phdrs */ phdr = (void *)ehdr + ehdr->e_phoff; for (i = 0; i < ehdr->e_phnum; i++) { phdr[i].p_vaddr += VDSO_ADDR_ADJUST; /* relocate dynamic stuff */ if (phdr[i].p_type == PT_DYNAMIC) reloc_dyn(ehdr, phdr[i].p_offset); } /* rebase sections */ shdr = (void *)ehdr + ehdr->e_shoff; for(i = 0; i < ehdr->e_shnum; i++) { if (!(shdr[i].sh_flags & SHF_ALLOC)) continue; shdr[i].sh_addr += VDSO_ADDR_ADJUST; if (shdr[i].sh_type == SHT_SYMTAB || shdr[i].sh_type == SHT_DYNSYM) reloc_symtab(ehdr, shdr[i].sh_offset, shdr[i].sh_size); } } static struct page *vdso32_pages[1]; #ifdef CONFIG_X86_64 #define vdso32_sysenter() (boot_cpu_has(X86_FEATURE_SYSENTER32)) #define vdso32_syscall() (boot_cpu_has(X86_FEATURE_SYSCALL32)) /* May not be __init: called during resume */ void syscall32_cpu_init(void) { /* Load these always in case some future AMD CPU supports SYSENTER from compat mode too. */ checking_wrmsrl(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS); checking_wrmsrl(MSR_IA32_SYSENTER_ESP, 0ULL); checking_wrmsrl(MSR_IA32_SYSENTER_EIP, (u64)ia32_sysenter_target); wrmsrl(MSR_CSTAR, ia32_cstar_target); } #define compat_uses_vma 1 static inline void map_compat_vdso(int map) { } #else /* CONFIG_X86_32 */ #define vdso32_sysenter() (boot_cpu_has(X86_FEATURE_SEP)) #define vdso32_syscall() (0) void enable_sep_cpu(void) { int cpu = get_cpu(); struct tss_struct *tss = &per_cpu(init_tss, cpu); if (!boot_cpu_has(X86_FEATURE_SEP)) { put_cpu(); return; } tss->x86_tss.ss1 = __KERNEL_CS; tss->x86_tss.sp1 = sizeof(struct tss_struct) + (unsigned long) tss; wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0); wrmsr(MSR_IA32_SYSENTER_ESP, tss->x86_tss.sp1, 0); wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long) ia32_sysenter_target, 0); put_cpu(); } static struct vm_area_struct gate_vma; static int __init gate_vma_init(void) { gate_vma.vm_mm = NULL; gate_vma.vm_start = FIXADDR_USER_START; gate_vma.vm_end = FIXADDR_USER_END; gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC; gate_vma.vm_page_prot = __P101; /* * Make sure the vDSO gets into every core dump. * Dumping its contents makes post-mortem fully interpretable later * without matching up the same kernel and hardware config to see * what PC values meant. */ gate_vma.vm_flags |= VM_ALWAYSDUMP; return 0; } #define compat_uses_vma 0 static void map_compat_vdso(int map) { static int vdso_mapped; if (map == vdso_mapped) return; vdso_mapped = map; __set_fixmap(FIX_VDSO, page_to_pfn(vdso32_pages[0]) << PAGE_SHIFT, map ? PAGE_READONLY_EXEC : PAGE_NONE); /* flush stray tlbs */ flush_tlb_all(); } #endif /* CONFIG_X86_64 */ int __init sysenter_setup(void) { void *syscall_page = (void *)get_zeroed_page(GFP_ATOMIC); const void *vsyscall; size_t vsyscall_len; vdso32_pages[0] = virt_to_page(syscall_page); #ifdef CONFIG_X86_32 gate_vma_init(); #endif if (vdso32_syscall()) { vsyscall = &vdso32_syscall_start; vsyscall_len = &vdso32_syscall_end - &vdso32_syscall_start; } else if (vdso32_sysenter()){ vsyscall = &vdso32_sysenter_start; vsyscall_len = &vdso32_sysenter_end - &vdso32_sysenter_start; } else { vsyscall = &vdso32_int80_start; vsyscall_len = &vdso32_int80_end - &vdso32_int80_start; } memcpy(syscall_page, vsyscall, vsyscall_len); relocate_vdso(syscall_page); return 0; } /* Setup a VMA at program startup for the vsyscall page */ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) { struct mm_struct *mm = current->mm; unsigned long addr; int ret = 0; bool compat; if (vdso_enabled == VDSO_DISABLED) return 0; down_write(&mm->mmap_sem); /* Test compat mode once here, in case someone changes it via sysctl */ compat = (vdso_enabled == VDSO_COMPAT); map_compat_vdso(compat); if (compat) addr = VDSO_HIGH_BASE; else { addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0); if (IS_ERR_VALUE(addr)) { ret = addr; goto up_fail; } } current->mm->context.vdso = (void *)addr; if (compat_uses_vma || !compat) { /* * MAYWRITE to allow gdb to COW and set breakpoints * * Make sure the vDSO gets into every core dump. * Dumping its contents makes post-mortem fully * interpretable later without matching up the same * kernel and hardware config to see what PC values * meant. */ ret = install_special_mapping(mm, addr, PAGE_SIZE, VM_READ|VM_EXEC| VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC| VM_ALWAYSDUMP, vdso32_pages); if (ret) goto up_fail; } current_thread_info()->sysenter_return = VDSO32_SYMBOL(addr, SYSENTER_RETURN); up_fail: if (ret) current->mm->context.vdso = NULL; up_write(&mm->mmap_sem); return ret; } #ifdef CONFIG_X86_64 __initcall(sysenter_setup); #ifdef CONFIG_SYSCTL /* Register vsyscall32 into the ABI table */ #include <linux/sysctl.h> static ctl_table abi_table2[] = { { .procname = "vsyscall32", .data = &sysctl_vsyscall32, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec }, {} }; static ctl_table abi_root_table2[] = { { .procname = "abi", .mode = 0555, .child = abi_table2 }, {} }; static __init int ia32_binfmt_init(void) { register_sysctl_table(abi_root_table2); return 0; } __initcall(ia32_binfmt_init); #endif #else /* CONFIG_X86_32 */ const char *arch_vma_name(struct vm_area_struct *vma) { if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) return "[vdso]"; return NULL; } struct vm_area_struct *get_gate_vma(struct task_struct *tsk) { struct mm_struct *mm = tsk->mm; /* Check to see if this task was created in compat vdso mode */ if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE) return &gate_vma; return NULL; } int in_gate_area(struct task_struct *task, unsigned long addr) { const struct vm_area_struct *vma = get_gate_vma(task); return vma && addr >= vma->vm_start && addr < vma->vm_end; } int in_gate_area_no_task(unsigned long addr) { return 0; } #endif /* CONFIG_X86_64 */
gpl-2.0
akuster/linux-yocto-dev
drivers/net/wireless/b43legacy/sysfs.c
2126
5618
/* Broadcom B43legacy wireless driver SYSFS support routines Copyright (c) 2006 Michael Buesch <m@bues.ch> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; see the file COPYING. If not, write to the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, Boston, MA 02110-1301, USA. */ #include "sysfs.h" #include "b43legacy.h" #include "main.h" #include "phy.h" #include "radio.h" #include <linux/capability.h> #define GENERIC_FILESIZE 64 static int get_integer(const char *buf, size_t count) { char tmp[10 + 1] = { 0 }; int ret = -EINVAL; if (count == 0) goto out; count = min_t(size_t, count, 10); memcpy(tmp, buf, count); ret = simple_strtol(tmp, NULL, 10); out: return ret; } static int get_boolean(const char *buf, size_t count) { if (count != 0) { if (buf[0] == '1') return 1; if (buf[0] == '0') return 0; if (count >= 4 && memcmp(buf, "true", 4) == 0) return 1; if (count >= 5 && memcmp(buf, "false", 5) == 0) return 0; if (count >= 3 && memcmp(buf, "yes", 3) == 0) return 1; if (count >= 2 && memcmp(buf, "no", 2) == 0) return 0; if (count >= 2 && memcmp(buf, "on", 2) == 0) return 1; if (count >= 3 && memcmp(buf, "off", 3) == 0) return 0; } return -EINVAL; } static ssize_t b43legacy_attr_interfmode_show(struct device *dev, struct device_attribute *attr, char *buf) { struct b43legacy_wldev *wldev = dev_to_b43legacy_wldev(dev); ssize_t count = 0; if (!capable(CAP_NET_ADMIN)) return -EPERM; mutex_lock(&wldev->wl->mutex); switch (wldev->phy.interfmode) { case B43legacy_INTERFMODE_NONE: count = snprintf(buf, PAGE_SIZE, "0 (No Interference" " Mitigation)\n"); break; case B43legacy_INTERFMODE_NONWLAN: count = snprintf(buf, PAGE_SIZE, "1 (Non-WLAN Interference" " Mitigation)\n"); break; case B43legacy_INTERFMODE_MANUALWLAN: count = snprintf(buf, PAGE_SIZE, "2 (WLAN Interference" " Mitigation)\n"); break; default: B43legacy_WARN_ON(1); } mutex_unlock(&wldev->wl->mutex); return count; } static ssize_t b43legacy_attr_interfmode_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct b43legacy_wldev *wldev = dev_to_b43legacy_wldev(dev); unsigned long flags; int err; int mode; if (!capable(CAP_NET_ADMIN)) return -EPERM; mode = get_integer(buf, count); switch (mode) { case 0: mode = B43legacy_INTERFMODE_NONE; break; case 1: mode = B43legacy_INTERFMODE_NONWLAN; break; case 2: mode = B43legacy_INTERFMODE_MANUALWLAN; break; case 3: mode = B43legacy_INTERFMODE_AUTOWLAN; break; default: return -EINVAL; } mutex_lock(&wldev->wl->mutex); spin_lock_irqsave(&wldev->wl->irq_lock, flags); err = b43legacy_radio_set_interference_mitigation(wldev, mode); if (err) b43legacyerr(wldev->wl, "Interference Mitigation not " "supported by device\n"); mmiowb(); spin_unlock_irqrestore(&wldev->wl->irq_lock, flags); mutex_unlock(&wldev->wl->mutex); return err ? err : count; } static DEVICE_ATTR(interference, 0644, b43legacy_attr_interfmode_show, b43legacy_attr_interfmode_store); static ssize_t b43legacy_attr_preamble_show(struct device *dev, struct device_attribute *attr, char *buf) { struct b43legacy_wldev *wldev = dev_to_b43legacy_wldev(dev); ssize_t count; if (!capable(CAP_NET_ADMIN)) return -EPERM; mutex_lock(&wldev->wl->mutex); if (wldev->short_preamble) count = snprintf(buf, PAGE_SIZE, "1 (Short Preamble" " enabled)\n"); else count = snprintf(buf, PAGE_SIZE, "0 (Short Preamble" " disabled)\n"); mutex_unlock(&wldev->wl->mutex); return count; } static ssize_t b43legacy_attr_preamble_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct b43legacy_wldev *wldev = dev_to_b43legacy_wldev(dev); unsigned long flags; int value; if (!capable(CAP_NET_ADMIN)) return -EPERM; value = get_boolean(buf, count); if (value < 0) return value; mutex_lock(&wldev->wl->mutex); spin_lock_irqsave(&wldev->wl->irq_lock, flags); wldev->short_preamble = !!value; spin_unlock_irqrestore(&wldev->wl->irq_lock, flags); mutex_unlock(&wldev->wl->mutex); return count; } static DEVICE_ATTR(shortpreamble, 0644, b43legacy_attr_preamble_show, b43legacy_attr_preamble_store); int b43legacy_sysfs_register(struct b43legacy_wldev *wldev) { struct device *dev = wldev->dev->dev; int err; B43legacy_WARN_ON(b43legacy_status(wldev) != B43legacy_STAT_INITIALIZED); err = device_create_file(dev, &dev_attr_interference); if (err) goto out; err = device_create_file(dev, &dev_attr_shortpreamble); if (err) goto err_remove_interfmode; out: return err; err_remove_interfmode: device_remove_file(dev, &dev_attr_interference); goto out; } void b43legacy_sysfs_unregister(struct b43legacy_wldev *wldev) { struct device *dev = wldev->dev->dev; device_remove_file(dev, &dev_attr_shortpreamble); device_remove_file(dev, &dev_attr_interference); }
gpl-2.0
sattarvoybek/android_kernel_zte_p839f30
arch/s390/kernel/bitmap.c
2382
1701
/* * Bitmaps for set_bit, clear_bit, test_and_set_bit, ... * See include/asm/{bitops.h|posix_types.h} for details * * Copyright IBM Corp. 1999, 2009 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>, */ #include <linux/bitops.h> #include <linux/module.h> const char _oi_bitmap[] = { 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80 }; EXPORT_SYMBOL(_oi_bitmap); const char _ni_bitmap[] = { 0xfe, 0xfd, 0xfb, 0xf7, 0xef, 0xdf, 0xbf, 0x7f }; EXPORT_SYMBOL(_ni_bitmap); const char _zb_findmap[] = { 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4, 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5, 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4, 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,6, 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4, 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5, 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4, 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,7, 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4, 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5, 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4, 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,6, 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4, 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5, 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4, 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,8 }; EXPORT_SYMBOL(_zb_findmap); const char _sb_findmap[] = { 8,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, 6,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, 7,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, 6,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 }; EXPORT_SYMBOL(_sb_findmap);
gpl-2.0
TeamEOS/kernel_htc_flounder
arch/s390/kernel/bitmap.c
2382
1701
/* * Bitmaps for set_bit, clear_bit, test_and_set_bit, ... * See include/asm/{bitops.h|posix_types.h} for details * * Copyright IBM Corp. 1999, 2009 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>, */ #include <linux/bitops.h> #include <linux/module.h> const char _oi_bitmap[] = { 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80 }; EXPORT_SYMBOL(_oi_bitmap); const char _ni_bitmap[] = { 0xfe, 0xfd, 0xfb, 0xf7, 0xef, 0xdf, 0xbf, 0x7f }; EXPORT_SYMBOL(_ni_bitmap); const char _zb_findmap[] = { 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4, 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5, 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4, 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,6, 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4, 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5, 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4, 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,7, 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4, 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5, 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4, 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,6, 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4, 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5, 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4, 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,8 }; EXPORT_SYMBOL(_zb_findmap); const char _sb_findmap[] = { 8,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, 6,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, 7,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, 6,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 }; EXPORT_SYMBOL(_sb_findmap);
gpl-2.0
zephiK/android_kernel_moto_shamu_fk
arch/s390/kernel/bitmap.c
2382
1701
/* * Bitmaps for set_bit, clear_bit, test_and_set_bit, ... * See include/asm/{bitops.h|posix_types.h} for details * * Copyright IBM Corp. 1999, 2009 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>, */ #include <linux/bitops.h> #include <linux/module.h> const char _oi_bitmap[] = { 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80 }; EXPORT_SYMBOL(_oi_bitmap); const char _ni_bitmap[] = { 0xfe, 0xfd, 0xfb, 0xf7, 0xef, 0xdf, 0xbf, 0x7f }; EXPORT_SYMBOL(_ni_bitmap); const char _zb_findmap[] = { 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4, 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5, 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4, 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,6, 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4, 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5, 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4, 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,7, 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4, 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5, 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4, 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,6, 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4, 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5, 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4, 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,8 }; EXPORT_SYMBOL(_zb_findmap); const char _sb_findmap[] = { 8,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, 6,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, 7,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, 6,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 }; EXPORT_SYMBOL(_sb_findmap);
gpl-2.0
Sohamlad7/kernel
drivers/video/backlight/vgg2432a4.c
3918
6301
/* drivers/video/backlight/vgg2432a4.c * * VGG2432A4 (ILI9320) LCD controller driver. * * Copyright 2007 Simtec Electronics * http://armlinux.simtec.co.uk/ * Ben Dooks <ben@simtec.co.uk> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/delay.h> #include <linux/err.h> #include <linux/fb.h> #include <linux/init.h> #include <linux/lcd.h> #include <linux/module.h> #include <linux/spi/spi.h> #include <video/ili9320.h> #include "ili9320.h" /* Device initialisation sequences */ static const struct ili9320_reg vgg_init1[] = { { .address = ILI9320_POWER1, .value = ILI9320_POWER1_AP(0) | ILI9320_POWER1_BT(0), }, { .address = ILI9320_POWER2, .value = (ILI9320_POWER2_VC(7) | ILI9320_POWER2_DC0(0) | ILI9320_POWER2_DC1(0)), }, { .address = ILI9320_POWER3, .value = ILI9320_POWER3_VRH(0), }, { .address = ILI9320_POWER4, .value = ILI9320_POWER4_VREOUT(0), }, }; static const struct ili9320_reg vgg_init2[] = { { .address = ILI9320_POWER1, .value = (ILI9320_POWER1_AP(3) | ILI9320_POWER1_APE | ILI9320_POWER1_BT(7) | ILI9320_POWER1_SAP), }, { .address = ILI9320_POWER2, .value = ILI9320_POWER2_VC(7) | ILI9320_POWER2_DC0(3), } }; static const struct ili9320_reg vgg_gamma[] = { { .address = ILI9320_GAMMA1, .value = 0x0000, }, { .address = ILI9320_GAMMA2, .value = 0x0505, }, { .address = ILI9320_GAMMA3, .value = 0x0004, }, { .address = ILI9320_GAMMA4, .value = 0x0006, }, { .address = ILI9320_GAMMA5, .value = 0x0707, }, { .address = ILI9320_GAMMA6, .value = 0x0105, }, { .address = ILI9320_GAMMA7, .value = 0x0002, }, { .address = ILI9320_GAMMA8, .value = 0x0707, }, { .address = ILI9320_GAMMA9, .value = 0x0704, }, { .address = ILI9320_GAMMA10, .value = 0x807, } }; static const struct ili9320_reg vgg_init0[] = { [0] = { /* set direction and scan mode gate */ .address = ILI9320_DRIVER, .value = ILI9320_DRIVER_SS, }, { .address = ILI9320_DRIVEWAVE, .value = (ILI9320_DRIVEWAVE_MUSTSET | ILI9320_DRIVEWAVE_EOR | ILI9320_DRIVEWAVE_BC), }, { .address = ILI9320_ENTRYMODE, .value = ILI9320_ENTRYMODE_ID(3) | ILI9320_ENTRYMODE_BGR, }, { .address = ILI9320_RESIZING, .value = 0x0, }, }; static int vgg2432a4_lcd_init(struct ili9320 *lcd, struct ili9320_platdata *cfg) { unsigned int addr; int ret; /* Set VCore before anything else (VGG243237-6UFLWA) */ ret = ili9320_write(lcd, 0x00e5, 0x8000); if (ret) goto err_initial; /* Start the oscillator up before we can do anything else. */ ret = ili9320_write(lcd, ILI9320_OSCILATION, ILI9320_OSCILATION_OSC); if (ret) goto err_initial; /* must wait at-lesat 10ms after starting */ mdelay(15); ret = ili9320_write_regs(lcd, vgg_init0, ARRAY_SIZE(vgg_init0)); if (ret != 0) goto err_initial; ili9320_write(lcd, ILI9320_DISPLAY2, cfg->display2); ili9320_write(lcd, ILI9320_DISPLAY3, cfg->display3); ili9320_write(lcd, ILI9320_DISPLAY4, cfg->display4); ili9320_write(lcd, ILI9320_RGB_IF1, cfg->rgb_if1); ili9320_write(lcd, ILI9320_FRAMEMAKER, 0x0); ili9320_write(lcd, ILI9320_RGB_IF2, cfg->rgb_if2); ret = ili9320_write_regs(lcd, vgg_init1, ARRAY_SIZE(vgg_init1)); if (ret != 0) goto err_vgg; mdelay(300); ret = ili9320_write_regs(lcd, vgg_init2, ARRAY_SIZE(vgg_init2)); if (ret != 0) goto err_vgg2; mdelay(100); ili9320_write(lcd, ILI9320_POWER3, 0x13c); mdelay(100); ili9320_write(lcd, ILI9320_POWER4, 0x1c00); ili9320_write(lcd, ILI9320_POWER7, 0x000e); mdelay(100); ili9320_write(lcd, ILI9320_GRAM_HORIZ_ADDR, 0x00); ili9320_write(lcd, ILI9320_GRAM_VERT_ADD, 0x00); ret = ili9320_write_regs(lcd, vgg_gamma, ARRAY_SIZE(vgg_gamma)); if (ret != 0) goto err_vgg3; ili9320_write(lcd, ILI9320_HORIZ_START, 0x0); ili9320_write(lcd, ILI9320_HORIZ_END, cfg->hsize - 1); ili9320_write(lcd, ILI9320_VERT_START, 0x0); ili9320_write(lcd, ILI9320_VERT_END, cfg->vsize - 1); ili9320_write(lcd, ILI9320_DRIVER2, ILI9320_DRIVER2_NL(((cfg->vsize - 240) / 8) + 0x1D)); ili9320_write(lcd, ILI9320_BASE_IMAGE, 0x1); ili9320_write(lcd, ILI9320_VERT_SCROLL, 0x00); for (addr = ILI9320_PARTIAL1_POSITION; addr <= ILI9320_PARTIAL2_END; addr++) { ili9320_write(lcd, addr, 0x0); } ili9320_write(lcd, ILI9320_INTERFACE1, 0x10); ili9320_write(lcd, ILI9320_INTERFACE2, cfg->interface2); ili9320_write(lcd, ILI9320_INTERFACE3, cfg->interface3); ili9320_write(lcd, ILI9320_INTERFACE4, cfg->interface4); ili9320_write(lcd, ILI9320_INTERFACE5, cfg->interface5); ili9320_write(lcd, ILI9320_INTERFACE6, cfg->interface6); lcd->display1 = (ILI9320_DISPLAY1_D(3) | ILI9320_DISPLAY1_DTE | ILI9320_DISPLAY1_GON | ILI9320_DISPLAY1_BASEE | 0x40); ili9320_write(lcd, ILI9320_DISPLAY1, lcd->display1); return 0; err_vgg3: err_vgg2: err_vgg: err_initial: return ret; } #ifdef CONFIG_PM_SLEEP static int vgg2432a4_suspend(struct device *dev) { return ili9320_suspend(dev_get_drvdata(dev)); } static int vgg2432a4_resume(struct device *dev) { return ili9320_resume(dev_get_drvdata(dev)); } #endif static struct ili9320_client vgg2432a4_client = { .name = "VGG2432A4", .init = vgg2432a4_lcd_init, }; /* Device probe */ static int vgg2432a4_probe(struct spi_device *spi) { int ret; ret = ili9320_probe_spi(spi, &vgg2432a4_client); if (ret != 0) { dev_err(&spi->dev, "failed to initialise ili9320\n"); return ret; } return 0; } static int vgg2432a4_remove(struct spi_device *spi) { return ili9320_remove(spi_get_drvdata(spi)); } static void vgg2432a4_shutdown(struct spi_device *spi) { ili9320_shutdown(spi_get_drvdata(spi)); } static SIMPLE_DEV_PM_OPS(vgg2432a4_pm_ops, vgg2432a4_suspend, vgg2432a4_resume); static struct spi_driver vgg2432a4_driver = { .driver = { .name = "VGG2432A4", .owner = THIS_MODULE, .pm = &vgg2432a4_pm_ops, }, .probe = vgg2432a4_probe, .remove = vgg2432a4_remove, .shutdown = vgg2432a4_shutdown, }; module_spi_driver(vgg2432a4_driver); MODULE_AUTHOR("Ben Dooks <ben-linux@fluff.org>"); MODULE_DESCRIPTION("VGG2432A4 LCD Driver"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("spi:VGG2432A4");
gpl-2.0
android-ia/kernel_intel-uefi
drivers/isdn/hardware/avm/b1pci.c
4686
10820
/* $Id: b1pci.c,v 1.1.2.2 2004/01/16 21:09:27 keil Exp $ * * Module for AVM B1 PCI-card. * * Copyright 1999 by Carsten Paeth <calle@calle.de> * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/skbuff.h> #include <linux/delay.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/pci.h> #include <linux/capi.h> #include <asm/io.h> #include <linux/init.h> #include <linux/isdn/capicmd.h> #include <linux/isdn/capiutil.h> #include <linux/isdn/capilli.h> #include "avmcard.h" /* ------------------------------------------------------------- */ static char *revision = "$Revision: 1.1.2.2 $"; /* ------------------------------------------------------------- */ static struct pci_device_id b1pci_pci_tbl[] = { { PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_B1, PCI_ANY_ID, PCI_ANY_ID }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(pci, b1pci_pci_tbl); MODULE_DESCRIPTION("CAPI4Linux: Driver for AVM B1 PCI card"); MODULE_AUTHOR("Carsten Paeth"); MODULE_LICENSE("GPL"); /* ------------------------------------------------------------- */ static char *b1pci_procinfo(struct capi_ctr *ctrl) { avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata); if (!cinfo) return ""; sprintf(cinfo->infobuf, "%s %s 0x%x %d r%d", cinfo->cardname[0] ? cinfo->cardname : "-", cinfo->version[VER_DRIVER] ? cinfo->version[VER_DRIVER] : "-", cinfo->card ? cinfo->card->port : 0x0, cinfo->card ? cinfo->card->irq : 0, cinfo->card ? cinfo->card->revision : 0 ); return cinfo->infobuf; } /* ------------------------------------------------------------- */ static int b1pci_probe(struct capicardparams *p, struct pci_dev *pdev) { avmcard *card; avmctrl_info *cinfo; int retval; card = b1_alloc_card(1); if (!card) { printk(KERN_WARNING "b1pci: no memory.\n"); retval = -ENOMEM; goto err; } cinfo = card->ctrlinfo; sprintf(card->name, "b1pci-%x", p->port); card->port = p->port; card->irq = p->irq; card->cardtype = avm_b1pci; if (!request_region(card->port, AVMB1_PORTLEN, card->name)) { printk(KERN_WARNING "b1pci: ports 0x%03x-0x%03x in use.\n", card->port, card->port + AVMB1_PORTLEN); retval = -EBUSY; goto err_free; } b1_reset(card->port); retval = b1_detect(card->port, card->cardtype); if (retval) { printk(KERN_NOTICE "b1pci: NO card at 0x%x (%d)\n", card->port, retval); retval = -ENODEV; goto err_release_region; } b1_reset(card->port); b1_getrevision(card); retval = request_irq(card->irq, b1_interrupt, IRQF_SHARED, card->name, card); if (retval) { printk(KERN_ERR "b1pci: unable to get IRQ %d.\n", card->irq); retval = -EBUSY; goto err_release_region; } cinfo->capi_ctrl.driver_name = "b1pci"; cinfo->capi_ctrl.driverdata = cinfo; cinfo->capi_ctrl.register_appl = b1_register_appl; cinfo->capi_ctrl.release_appl = b1_release_appl; cinfo->capi_ctrl.send_message = b1_send_message; cinfo->capi_ctrl.load_firmware = b1_load_firmware; cinfo->capi_ctrl.reset_ctr = b1_reset_ctr; cinfo->capi_ctrl.procinfo = b1pci_procinfo; cinfo->capi_ctrl.proc_fops = &b1ctl_proc_fops; strcpy(cinfo->capi_ctrl.name, card->name); cinfo->capi_ctrl.owner = THIS_MODULE; retval = attach_capi_ctr(&cinfo->capi_ctrl); if (retval) { printk(KERN_ERR "b1pci: attach controller failed.\n"); goto err_free_irq; } if (card->revision >= 4) { printk(KERN_INFO "b1pci: AVM B1 PCI V4 at i/o %#x, irq %d, revision %d (no dma)\n", card->port, card->irq, card->revision); } else { printk(KERN_INFO "b1pci: AVM B1 PCI at i/o %#x, irq %d, revision %d\n", card->port, card->irq, card->revision); } pci_set_drvdata(pdev, card); return 0; err_free_irq: free_irq(card->irq, card); err_release_region: release_region(card->port, AVMB1_PORTLEN); err_free: b1_free_card(card); err: return retval; } static void b1pci_remove(struct pci_dev *pdev) { avmcard *card = pci_get_drvdata(pdev); avmctrl_info *cinfo = card->ctrlinfo; unsigned int port = card->port; b1_reset(port); b1_reset(port); detach_capi_ctr(&cinfo->capi_ctrl); free_irq(card->irq, card); release_region(card->port, AVMB1_PORTLEN); b1_free_card(card); } #ifdef CONFIG_ISDN_DRV_AVMB1_B1PCIV4 /* ------------------------------------------------------------- */ static char *b1pciv4_procinfo(struct capi_ctr *ctrl) { avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata); if (!cinfo) return ""; sprintf(cinfo->infobuf, "%s %s 0x%x %d 0x%lx r%d", cinfo->cardname[0] ? cinfo->cardname : "-", cinfo->version[VER_DRIVER] ? cinfo->version[VER_DRIVER] : "-", cinfo->card ? cinfo->card->port : 0x0, cinfo->card ? cinfo->card->irq : 0, cinfo->card ? cinfo->card->membase : 0, cinfo->card ? cinfo->card->revision : 0 ); return cinfo->infobuf; } /* ------------------------------------------------------------- */ static int b1pciv4_probe(struct capicardparams *p, struct pci_dev *pdev) { avmcard *card; avmctrl_info *cinfo; int retval; card = b1_alloc_card(1); if (!card) { printk(KERN_WARNING "b1pci: no memory.\n"); retval = -ENOMEM; goto err; } card->dma = avmcard_dma_alloc("b1pci", pdev, 2048 + 128, 2048 + 128); if (!card->dma) { printk(KERN_WARNING "b1pci: dma alloc.\n"); retval = -ENOMEM; goto err_free; } cinfo = card->ctrlinfo; sprintf(card->name, "b1pciv4-%x", p->port); card->port = p->port; card->irq = p->irq; card->membase = p->membase; card->cardtype = avm_b1pci; if (!request_region(card->port, AVMB1_PORTLEN, card->name)) { printk(KERN_WARNING "b1pci: ports 0x%03x-0x%03x in use.\n", card->port, card->port + AVMB1_PORTLEN); retval = -EBUSY; goto err_free_dma; } card->mbase = ioremap(card->membase, 64); if (!card->mbase) { printk(KERN_NOTICE "b1pci: can't remap memory at 0x%lx\n", card->membase); retval = -ENOMEM; goto err_release_region; } b1dma_reset(card); retval = b1pciv4_detect(card); if (retval) { printk(KERN_NOTICE "b1pci: NO card at 0x%x (%d)\n", card->port, retval); retval = -ENODEV; goto err_unmap; } b1dma_reset(card); b1_getrevision(card); retval = request_irq(card->irq, b1dma_interrupt, IRQF_SHARED, card->name, card); if (retval) { printk(KERN_ERR "b1pci: unable to get IRQ %d.\n", card->irq); retval = -EBUSY; goto err_unmap; } cinfo->capi_ctrl.owner = THIS_MODULE; cinfo->capi_ctrl.driver_name = "b1pciv4"; cinfo->capi_ctrl.driverdata = cinfo; cinfo->capi_ctrl.register_appl = b1dma_register_appl; cinfo->capi_ctrl.release_appl = b1dma_release_appl; cinfo->capi_ctrl.send_message = b1dma_send_message; cinfo->capi_ctrl.load_firmware = b1dma_load_firmware; cinfo->capi_ctrl.reset_ctr = b1dma_reset_ctr; cinfo->capi_ctrl.procinfo = b1pciv4_procinfo; cinfo->capi_ctrl.proc_fops = &b1dmactl_proc_fops; strcpy(cinfo->capi_ctrl.name, card->name); retval = attach_capi_ctr(&cinfo->capi_ctrl); if (retval) { printk(KERN_ERR "b1pci: attach controller failed.\n"); goto err_free_irq; } card->cardnr = cinfo->capi_ctrl.cnr; printk(KERN_INFO "b1pci: AVM B1 PCI V4 at i/o %#x, irq %d, mem %#lx, revision %d (dma)\n", card->port, card->irq, card->membase, card->revision); pci_set_drvdata(pdev, card); return 0; err_free_irq: free_irq(card->irq, card); err_unmap: iounmap(card->mbase); err_release_region: release_region(card->port, AVMB1_PORTLEN); err_free_dma: avmcard_dma_free(card->dma); err_free: b1_free_card(card); err: return retval; } static void b1pciv4_remove(struct pci_dev *pdev) { avmcard *card = pci_get_drvdata(pdev); avmctrl_info *cinfo = card->ctrlinfo; b1dma_reset(card); detach_capi_ctr(&cinfo->capi_ctrl); free_irq(card->irq, card); iounmap(card->mbase); release_region(card->port, AVMB1_PORTLEN); avmcard_dma_free(card->dma); b1_free_card(card); } #endif /* CONFIG_ISDN_DRV_AVMB1_B1PCIV4 */ static int b1pci_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct capicardparams param; int retval; if (pci_enable_device(pdev) < 0) { printk(KERN_ERR "b1pci: failed to enable AVM-B1\n"); return -ENODEV; } param.irq = pdev->irq; if (pci_resource_start(pdev, 2)) { /* B1 PCI V4 */ #ifdef CONFIG_ISDN_DRV_AVMB1_B1PCIV4 pci_set_master(pdev); #endif param.membase = pci_resource_start(pdev, 0); param.port = pci_resource_start(pdev, 2); printk(KERN_INFO "b1pci: PCI BIOS reports AVM-B1 V4 at i/o %#x, irq %d, mem %#x\n", param.port, param.irq, param.membase); #ifdef CONFIG_ISDN_DRV_AVMB1_B1PCIV4 retval = b1pciv4_probe(&param, pdev); #else retval = b1pci_probe(&param, pdev); #endif if (retval != 0) { printk(KERN_ERR "b1pci: no AVM-B1 V4 at i/o %#x, irq %d, mem %#x detected\n", param.port, param.irq, param.membase); } } else { param.membase = 0; param.port = pci_resource_start(pdev, 1); printk(KERN_INFO "b1pci: PCI BIOS reports AVM-B1 at i/o %#x, irq %d\n", param.port, param.irq); retval = b1pci_probe(&param, pdev); if (retval != 0) { printk(KERN_ERR "b1pci: no AVM-B1 at i/o %#x, irq %d detected\n", param.port, param.irq); } } return retval; } static void b1pci_pci_remove(struct pci_dev *pdev) { #ifdef CONFIG_ISDN_DRV_AVMB1_B1PCIV4 avmcard *card = pci_get_drvdata(pdev); if (card->dma) b1pciv4_remove(pdev); else b1pci_remove(pdev); #else b1pci_remove(pdev); #endif } static struct pci_driver b1pci_pci_driver = { .name = "b1pci", .id_table = b1pci_pci_tbl, .probe = b1pci_pci_probe, .remove = b1pci_pci_remove, }; static struct capi_driver capi_driver_b1pci = { .name = "b1pci", .revision = "1.0", }; #ifdef CONFIG_ISDN_DRV_AVMB1_B1PCIV4 static struct capi_driver capi_driver_b1pciv4 = { .name = "b1pciv4", .revision = "1.0", }; #endif static int __init b1pci_init(void) { char *p; char rev[32]; int err; if ((p = strchr(revision, ':')) != NULL && p[1]) { strlcpy(rev, p + 2, 32); if ((p = strchr(rev, '$')) != NULL && p > rev) *(p - 1) = 0; } else strcpy(rev, "1.0"); err = pci_register_driver(&b1pci_pci_driver); if (!err) { strlcpy(capi_driver_b1pci.revision, rev, 32); register_capi_driver(&capi_driver_b1pci); #ifdef CONFIG_ISDN_DRV_AVMB1_B1PCIV4 strlcpy(capi_driver_b1pciv4.revision, rev, 32); register_capi_driver(&capi_driver_b1pciv4); #endif printk(KERN_INFO "b1pci: revision %s\n", rev); } return err; } static void __exit b1pci_exit(void) { unregister_capi_driver(&capi_driver_b1pci); #ifdef CONFIG_ISDN_DRV_AVMB1_B1PCIV4 unregister_capi_driver(&capi_driver_b1pciv4); #endif pci_unregister_driver(&b1pci_pci_driver); } module_init(b1pci_init); module_exit(b1pci_exit);
gpl-2.0
MattCrystal/yolo-computing-machine
arch/tile/kernel/backtrace.c
4686
18097
/* * Copyright 2011 Tilera Corporation. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation, version 2. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for * more details. */ #include <linux/kernel.h> #include <linux/string.h> #include <asm/backtrace.h> #include <asm/tile-desc.h> #include <arch/abi.h> #ifdef __tilegx__ #define TILE_MAX_INSTRUCTIONS_PER_BUNDLE TILEGX_MAX_INSTRUCTIONS_PER_BUNDLE #define tile_decoded_instruction tilegx_decoded_instruction #define tile_mnemonic tilegx_mnemonic #define parse_insn_tile parse_insn_tilegx #define TILE_OPC_IRET TILEGX_OPC_IRET #define TILE_OPC_ADDI TILEGX_OPC_ADDI #define TILE_OPC_ADDLI TILEGX_OPC_ADDLI #define TILE_OPC_INFO TILEGX_OPC_INFO #define TILE_OPC_INFOL TILEGX_OPC_INFOL #define TILE_OPC_JRP TILEGX_OPC_JRP #define TILE_OPC_MOVE TILEGX_OPC_MOVE #define OPCODE_STORE TILEGX_OPC_ST typedef long long bt_int_reg_t; #else #define TILE_MAX_INSTRUCTIONS_PER_BUNDLE TILEPRO_MAX_INSTRUCTIONS_PER_BUNDLE #define tile_decoded_instruction tilepro_decoded_instruction #define tile_mnemonic tilepro_mnemonic #define parse_insn_tile parse_insn_tilepro #define TILE_OPC_IRET TILEPRO_OPC_IRET #define TILE_OPC_ADDI TILEPRO_OPC_ADDI #define TILE_OPC_ADDLI TILEPRO_OPC_ADDLI #define TILE_OPC_INFO TILEPRO_OPC_INFO #define TILE_OPC_INFOL TILEPRO_OPC_INFOL #define TILE_OPC_JRP TILEPRO_OPC_JRP #define TILE_OPC_MOVE TILEPRO_OPC_MOVE #define OPCODE_STORE TILEPRO_OPC_SW typedef int bt_int_reg_t; #endif /* A decoded bundle used for backtracer analysis. */ struct BacktraceBundle { tile_bundle_bits bits; int num_insns; struct tile_decoded_instruction insns[TILE_MAX_INSTRUCTIONS_PER_BUNDLE]; }; /* Locates an instruction inside the given bundle that * has the specified mnemonic, and whose first 'num_operands_to_match' * operands exactly match those in 'operand_values'. */ static const struct tile_decoded_instruction *find_matching_insn( const struct BacktraceBundle *bundle, tile_mnemonic mnemonic, const int *operand_values, int num_operands_to_match) { int i, j; bool match; for (i = 0; i < bundle->num_insns; i++) { const struct tile_decoded_instruction *insn = &bundle->insns[i]; if (insn->opcode->mnemonic != mnemonic) continue; match = true; for (j = 0; j < num_operands_to_match; j++) { if (operand_values[j] != insn->operand_values[j]) { match = false; break; } } if (match) return insn; } return NULL; } /* Does this bundle contain an 'iret' instruction? */ static inline bool bt_has_iret(const struct BacktraceBundle *bundle) { return find_matching_insn(bundle, TILE_OPC_IRET, NULL, 0) != NULL; } /* Does this bundle contain an 'addi sp, sp, OFFSET' or * 'addli sp, sp, OFFSET' instruction, and if so, what is OFFSET? */ static bool bt_has_addi_sp(const struct BacktraceBundle *bundle, int *adjust) { static const int vals[2] = { TREG_SP, TREG_SP }; const struct tile_decoded_instruction *insn = find_matching_insn(bundle, TILE_OPC_ADDI, vals, 2); if (insn == NULL) insn = find_matching_insn(bundle, TILE_OPC_ADDLI, vals, 2); #ifdef __tilegx__ if (insn == NULL) insn = find_matching_insn(bundle, TILEGX_OPC_ADDXLI, vals, 2); if (insn == NULL) insn = find_matching_insn(bundle, TILEGX_OPC_ADDXI, vals, 2); #endif if (insn == NULL) return false; *adjust = insn->operand_values[2]; return true; } /* Does this bundle contain any 'info OP' or 'infol OP' * instruction, and if so, what are their OP? Note that OP is interpreted * as an unsigned value by this code since that's what the caller wants. * Returns the number of info ops found. */ static int bt_get_info_ops(const struct BacktraceBundle *bundle, int operands[MAX_INFO_OPS_PER_BUNDLE]) { int num_ops = 0; int i; for (i = 0; i < bundle->num_insns; i++) { const struct tile_decoded_instruction *insn = &bundle->insns[i]; if (insn->opcode->mnemonic == TILE_OPC_INFO || insn->opcode->mnemonic == TILE_OPC_INFOL) { operands[num_ops++] = insn->operand_values[0]; } } return num_ops; } /* Does this bundle contain a jrp instruction, and if so, to which * register is it jumping? */ static bool bt_has_jrp(const struct BacktraceBundle *bundle, int *target_reg) { const struct tile_decoded_instruction *insn = find_matching_insn(bundle, TILE_OPC_JRP, NULL, 0); if (insn == NULL) return false; *target_reg = insn->operand_values[0]; return true; } /* Does this bundle modify the specified register in any way? */ static bool bt_modifies_reg(const struct BacktraceBundle *bundle, int reg) { int i, j; for (i = 0; i < bundle->num_insns; i++) { const struct tile_decoded_instruction *insn = &bundle->insns[i]; if (insn->opcode->implicitly_written_register == reg) return true; for (j = 0; j < insn->opcode->num_operands; j++) if (insn->operands[j]->is_dest_reg && insn->operand_values[j] == reg) return true; } return false; } /* Does this bundle modify sp? */ static inline bool bt_modifies_sp(const struct BacktraceBundle *bundle) { return bt_modifies_reg(bundle, TREG_SP); } /* Does this bundle modify lr? */ static inline bool bt_modifies_lr(const struct BacktraceBundle *bundle) { return bt_modifies_reg(bundle, TREG_LR); } /* Does this bundle contain the instruction 'move fp, sp'? */ static inline bool bt_has_move_r52_sp(const struct BacktraceBundle *bundle) { static const int vals[2] = { 52, TREG_SP }; return find_matching_insn(bundle, TILE_OPC_MOVE, vals, 2) != NULL; } /* Does this bundle contain a store of lr to sp? */ static inline bool bt_has_sw_sp_lr(const struct BacktraceBundle *bundle) { static const int vals[2] = { TREG_SP, TREG_LR }; return find_matching_insn(bundle, OPCODE_STORE, vals, 2) != NULL; } #ifdef __tilegx__ /* Track moveli values placed into registers. */ static inline void bt_update_moveli(const struct BacktraceBundle *bundle, int moveli_args[]) { int i; for (i = 0; i < bundle->num_insns; i++) { const struct tile_decoded_instruction *insn = &bundle->insns[i]; if (insn->opcode->mnemonic == TILEGX_OPC_MOVELI) { int reg = insn->operand_values[0]; moveli_args[reg] = insn->operand_values[1]; } } } /* Does this bundle contain an 'add sp, sp, reg' instruction * from a register that we saw a moveli into, and if so, what * is the value in the register? */ static bool bt_has_add_sp(const struct BacktraceBundle *bundle, int *adjust, int moveli_args[]) { static const int vals[2] = { TREG_SP, TREG_SP }; const struct tile_decoded_instruction *insn = find_matching_insn(bundle, TILEGX_OPC_ADDX, vals, 2); if (insn) { int reg = insn->operand_values[2]; if (moveli_args[reg]) { *adjust = moveli_args[reg]; return true; } } return false; } #endif /* Locates the caller's PC and SP for a program starting at the * given address. */ static void find_caller_pc_and_caller_sp(CallerLocation *location, const unsigned long start_pc, BacktraceMemoryReader read_memory_func, void *read_memory_func_extra) { /* Have we explicitly decided what the sp is, * rather than just the default? */ bool sp_determined = false; /* Has any bundle seen so far modified lr? */ bool lr_modified = false; /* Have we seen a move from sp to fp? */ bool sp_moved_to_r52 = false; /* Have we seen a terminating bundle? */ bool seen_terminating_bundle = false; /* Cut down on round-trip reading overhead by reading several * bundles at a time. */ tile_bundle_bits prefetched_bundles[32]; int num_bundles_prefetched = 0; int next_bundle = 0; unsigned long pc; #ifdef __tilegx__ /* Naively try to track moveli values to support addx for -m32. */ int moveli_args[TILEGX_NUM_REGISTERS] = { 0 }; #endif /* Default to assuming that the caller's sp is the current sp. * This is necessary to handle the case where we start backtracing * right at the end of the epilog. */ location->sp_location = SP_LOC_OFFSET; location->sp_offset = 0; /* Default to having no idea where the caller PC is. */ location->pc_location = PC_LOC_UNKNOWN; /* Don't even try if the PC is not aligned. */ if (start_pc % TILE_BUNDLE_ALIGNMENT_IN_BYTES != 0) return; for (pc = start_pc;; pc += sizeof(tile_bundle_bits)) { struct BacktraceBundle bundle; int num_info_ops, info_operands[MAX_INFO_OPS_PER_BUNDLE]; int one_ago, jrp_reg; bool has_jrp; if (next_bundle >= num_bundles_prefetched) { /* Prefetch some bytes, but don't cross a page * boundary since that might cause a read failure we * don't care about if we only need the first few * bytes. Note: we don't care what the actual page * size is; using the minimum possible page size will * prevent any problems. */ unsigned int bytes_to_prefetch = 4096 - (pc & 4095); if (bytes_to_prefetch > sizeof prefetched_bundles) bytes_to_prefetch = sizeof prefetched_bundles; if (!read_memory_func(prefetched_bundles, pc, bytes_to_prefetch, read_memory_func_extra)) { if (pc == start_pc) { /* The program probably called a bad * address, such as a NULL pointer. * So treat this as if we are at the * start of the function prolog so the * backtrace will show how we got here. */ location->pc_location = PC_LOC_IN_LR; return; } /* Unreadable address. Give up. */ break; } next_bundle = 0; num_bundles_prefetched = bytes_to_prefetch / sizeof(tile_bundle_bits); } /* Decode the next bundle. */ bundle.bits = prefetched_bundles[next_bundle++]; bundle.num_insns = parse_insn_tile(bundle.bits, pc, bundle.insns); num_info_ops = bt_get_info_ops(&bundle, info_operands); /* First look at any one_ago info ops if they are interesting, * since they should shadow any non-one-ago info ops. */ for (one_ago = (pc != start_pc) ? 1 : 0; one_ago >= 0; one_ago--) { int i; for (i = 0; i < num_info_ops; i++) { int info_operand = info_operands[i]; if (info_operand < CALLER_UNKNOWN_BASE) { /* Weird; reserved value, ignore it. */ continue; } /* Skip info ops which are not in the * "one_ago" mode we want right now. */ if (((info_operand & ONE_BUNDLE_AGO_FLAG) != 0) != (one_ago != 0)) continue; /* Clear the flag to make later checking * easier. */ info_operand &= ~ONE_BUNDLE_AGO_FLAG; /* Default to looking at PC_IN_LR_FLAG. */ if (info_operand & PC_IN_LR_FLAG) location->pc_location = PC_LOC_IN_LR; else location->pc_location = PC_LOC_ON_STACK; switch (info_operand) { case CALLER_UNKNOWN_BASE: location->pc_location = PC_LOC_UNKNOWN; location->sp_location = SP_LOC_UNKNOWN; return; case CALLER_SP_IN_R52_BASE: case CALLER_SP_IN_R52_BASE | PC_IN_LR_FLAG: location->sp_location = SP_LOC_IN_R52; return; default: { const unsigned int val = info_operand - CALLER_SP_OFFSET_BASE; const unsigned int sp_offset = (val >> NUM_INFO_OP_FLAGS) * 8; if (sp_offset < 32768) { /* This is a properly encoded * SP offset. */ location->sp_location = SP_LOC_OFFSET; location->sp_offset = sp_offset; return; } else { /* This looked like an SP * offset, but it's outside * the legal range, so this * must be an unrecognized * info operand. Ignore it. */ } } break; } } } if (seen_terminating_bundle) { /* We saw a terminating bundle during the previous * iteration, so we were only looking for an info op. */ break; } if (bundle.bits == 0) { /* Wacky terminating bundle. Stop looping, and hope * we've already seen enough to find the caller. */ break; } /* * Try to determine caller's SP. */ if (!sp_determined) { int adjust; if (bt_has_addi_sp(&bundle, &adjust) #ifdef __tilegx__ || bt_has_add_sp(&bundle, &adjust, moveli_args) #endif ) { location->sp_location = SP_LOC_OFFSET; if (adjust <= 0) { /* We are in prolog about to adjust * SP. */ location->sp_offset = 0; } else { /* We are in epilog restoring SP. */ location->sp_offset = adjust; } sp_determined = true; } else { if (bt_has_move_r52_sp(&bundle)) { /* Maybe in prolog, creating an * alloca-style frame. But maybe in * the middle of a fixed-size frame * clobbering r52 with SP. */ sp_moved_to_r52 = true; } if (bt_modifies_sp(&bundle)) { if (sp_moved_to_r52) { /* We saw SP get saved into * r52 earlier (or now), which * must have been in the * prolog, so we now know that * SP is still holding the * caller's sp value. */ location->sp_location = SP_LOC_OFFSET; location->sp_offset = 0; } else { /* Someone must have saved * aside the caller's SP value * into r52, so r52 holds the * current value. */ location->sp_location = SP_LOC_IN_R52; } sp_determined = true; } } #ifdef __tilegx__ /* Track moveli arguments for -m32 mode. */ bt_update_moveli(&bundle, moveli_args); #endif } if (bt_has_iret(&bundle)) { /* This is a terminating bundle. */ seen_terminating_bundle = true; continue; } /* * Try to determine caller's PC. */ jrp_reg = -1; has_jrp = bt_has_jrp(&bundle, &jrp_reg); if (has_jrp) seen_terminating_bundle = true; if (location->pc_location == PC_LOC_UNKNOWN) { if (has_jrp) { if (jrp_reg == TREG_LR && !lr_modified) { /* Looks like a leaf function, or else * lr is already restored. */ location->pc_location = PC_LOC_IN_LR; } else { location->pc_location = PC_LOC_ON_STACK; } } else if (bt_has_sw_sp_lr(&bundle)) { /* In prolog, spilling initial lr to stack. */ location->pc_location = PC_LOC_IN_LR; } else if (bt_modifies_lr(&bundle)) { lr_modified = true; } } } } /* Initializes a backtracer to start from the given location. * * If the frame pointer cannot be determined it is set to -1. * * state: The state to be filled in. * read_memory_func: A callback that reads memory. * read_memory_func_extra: An arbitrary argument to read_memory_func. * pc: The current PC. * lr: The current value of the 'lr' register. * sp: The current value of the 'sp' register. * r52: The current value of the 'r52' register. */ void backtrace_init(BacktraceIterator *state, BacktraceMemoryReader read_memory_func, void *read_memory_func_extra, unsigned long pc, unsigned long lr, unsigned long sp, unsigned long r52) { CallerLocation location; unsigned long fp, initial_frame_caller_pc; /* Find out where we are in the initial frame. */ find_caller_pc_and_caller_sp(&location, pc, read_memory_func, read_memory_func_extra); switch (location.sp_location) { case SP_LOC_UNKNOWN: /* Give up. */ fp = -1; break; case SP_LOC_IN_R52: fp = r52; break; case SP_LOC_OFFSET: fp = sp + location.sp_offset; break; default: /* Give up. */ fp = -1; break; } /* If the frame pointer is not aligned to the basic word size * something terrible happened and we should mark it as invalid. */ if (fp % sizeof(bt_int_reg_t) != 0) fp = -1; /* -1 means "don't know initial_frame_caller_pc". */ initial_frame_caller_pc = -1; switch (location.pc_location) { case PC_LOC_UNKNOWN: /* Give up. */ fp = -1; break; case PC_LOC_IN_LR: if (lr == 0 || lr % TILE_BUNDLE_ALIGNMENT_IN_BYTES != 0) { /* Give up. */ fp = -1; } else { initial_frame_caller_pc = lr; } break; case PC_LOC_ON_STACK: /* Leave initial_frame_caller_pc as -1, * meaning check the stack. */ break; default: /* Give up. */ fp = -1; break; } state->pc = pc; state->sp = sp; state->fp = fp; state->initial_frame_caller_pc = initial_frame_caller_pc; state->read_memory_func = read_memory_func; state->read_memory_func_extra = read_memory_func_extra; } /* Handle the case where the register holds more bits than the VA. */ static bool valid_addr_reg(bt_int_reg_t reg) { return ((unsigned long)reg == reg); } /* Advances the backtracing state to the calling frame, returning * true iff successful. */ bool backtrace_next(BacktraceIterator *state) { unsigned long next_fp, next_pc; bt_int_reg_t next_frame[2]; if (state->fp == -1) { /* No parent frame. */ return false; } /* Try to read the frame linkage data chaining to the next function. */ if (!state->read_memory_func(&next_frame, state->fp, sizeof next_frame, state->read_memory_func_extra)) { return false; } next_fp = next_frame[1]; if (!valid_addr_reg(next_frame[1]) || next_fp % sizeof(bt_int_reg_t) != 0) { /* Caller's frame pointer is suspect, so give up. */ return false; } if (state->initial_frame_caller_pc != -1) { /* We must be in the initial stack frame and already know the * caller PC. */ next_pc = state->initial_frame_caller_pc; /* Force reading stack next time, in case we were in the * initial frame. We don't do this above just to paranoidly * avoid changing the struct at all when we return false. */ state->initial_frame_caller_pc = -1; } else { /* Get the caller PC from the frame linkage area. */ next_pc = next_frame[0]; if (!valid_addr_reg(next_frame[0]) || next_pc == 0 || next_pc % TILE_BUNDLE_ALIGNMENT_IN_BYTES != 0) { /* The PC is suspect, so give up. */ return false; } } /* Update state to become the caller's stack frame. */ state->pc = next_pc; state->sp = state->fp; state->fp = next_fp; return true; }
gpl-2.0
hernstrom/linux
drivers/hid/hid-twinhan.c
4686
4767
/* * HID driver for TwinHan IR remote control * * Based on hid-gyration.c * * Copyright (c) 2009 Bruno Prémont <bonbons@linux-vserver.org> */ /* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License. */ #include <linux/device.h> #include <linux/input.h> #include <linux/hid.h> #include <linux/module.h> #include "hid-ids.h" /* Remote control key layout + listing: * * Full Screen Power * KEY_SCREEN KEY_POWER2 * * 1 2 3 * KEY_NUMERIC_1 KEY_NUMERIC_2 KEY_NUMERIC_3 * * 4 5 6 * KEY_NUMERIC_4 KEY_NUMERIC_5 KEY_NUMERIC_6 * * 7 8 9 * KEY_NUMERIC_7 KEY_NUMERIC_8 KEY_NUMERIC_9 * * REC 0 Favorite * KEY_RECORD KEY_NUMERIC_0 KEY_FAVORITES * * Rewind Forward * KEY_REWIND CH+ KEY_FORWARD * KEY_CHANNELUP * * VOL- > VOL+ * KEY_VOLUMEDOWN KEY_PLAY KEY_VOLUMEUP * * CH- * KEY_CHANNELDOWN * Recall Stop * KEY_RESTART KEY_STOP * * Timeshift/Pause Mute Cancel * KEY_PAUSE KEY_MUTE KEY_CANCEL * * Capture Preview EPG * KEY_PRINT KEY_PROGRAM KEY_EPG * * Record List Tab Teletext * KEY_LIST KEY_TAB KEY_TEXT */ #define th_map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, \ EV_KEY, (c)) static int twinhan_input_mapping(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { if ((usage->hid & HID_USAGE_PAGE) != HID_UP_KEYBOARD) return 0; switch (usage->hid & HID_USAGE) { /* Map all keys from Twinhan Remote */ case 0x004: th_map_key_clear(KEY_TEXT); break; case 0x006: th_map_key_clear(KEY_RESTART); break; case 0x008: th_map_key_clear(KEY_EPG); break; case 0x00c: th_map_key_clear(KEY_REWIND); break; case 0x00e: th_map_key_clear(KEY_PROGRAM); break; case 0x00f: th_map_key_clear(KEY_LIST); break; case 0x010: th_map_key_clear(KEY_MUTE); break; case 0x011: th_map_key_clear(KEY_FORWARD); break; case 0x013: th_map_key_clear(KEY_PRINT); break; case 0x017: th_map_key_clear(KEY_PAUSE); break; case 0x019: th_map_key_clear(KEY_FAVORITES); break; case 0x01d: th_map_key_clear(KEY_SCREEN); break; case 0x01e: th_map_key_clear(KEY_NUMERIC_1); break; case 0x01f: th_map_key_clear(KEY_NUMERIC_2); break; case 0x020: th_map_key_clear(KEY_NUMERIC_3); break; case 0x021: th_map_key_clear(KEY_NUMERIC_4); break; case 0x022: th_map_key_clear(KEY_NUMERIC_5); break; case 0x023: th_map_key_clear(KEY_NUMERIC_6); break; case 0x024: th_map_key_clear(KEY_NUMERIC_7); break; case 0x025: th_map_key_clear(KEY_NUMERIC_8); break; case 0x026: th_map_key_clear(KEY_NUMERIC_9); break; case 0x027: th_map_key_clear(KEY_NUMERIC_0); break; case 0x028: th_map_key_clear(KEY_PLAY); break; case 0x029: th_map_key_clear(KEY_CANCEL); break; case 0x02b: th_map_key_clear(KEY_TAB); break; /* Power = 0x0e0 + 0x0e1 + 0x0e2 + 0x03f */ case 0x03f: th_map_key_clear(KEY_POWER2); break; case 0x04a: th_map_key_clear(KEY_RECORD); break; case 0x04b: th_map_key_clear(KEY_CHANNELUP); break; case 0x04d: th_map_key_clear(KEY_STOP); break; case 0x04e: th_map_key_clear(KEY_CHANNELDOWN); break; /* Volume down = 0x0e1 + 0x051 */ case 0x051: th_map_key_clear(KEY_VOLUMEDOWN); break; /* Volume up = 0x0e1 + 0x052 */ case 0x052: th_map_key_clear(KEY_VOLUMEUP); break; /* Kill the extra keys used for multi-key "power" and "volume" keys * as well as continuously to release CTRL,ALT,META,... keys */ case 0x0e0: case 0x0e1: case 0x0e2: case 0x0e3: case 0x0e4: case 0x0e5: case 0x0e6: case 0x0e7: default: return -1; } return 1; } static const struct hid_device_id twinhan_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_TWINHAN, USB_DEVICE_ID_TWINHAN_IR_REMOTE) }, { } }; MODULE_DEVICE_TABLE(hid, twinhan_devices); static struct hid_driver twinhan_driver = { .name = "twinhan", .id_table = twinhan_devices, .input_mapping = twinhan_input_mapping, }; module_hid_driver(twinhan_driver); MODULE_LICENSE("GPL");
gpl-2.0
cubieboard/CC-A80-kernel-source
drivers/staging/tidspbridge/rmgr/dspdrv.c
4942
3947
/* * dspdrv.c * * DSP-BIOS Bridge driver support functions for TI OMAP processors. * * Interface to allocate and free bridge resources. * * Copyright (C) 2005-2006 Texas Instruments, Inc. * * This package is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. */ /* ----------------------------------- Host OS */ #include <linux/types.h> #include <dspbridge/host_os.h> /* ----------------------------------- DSP/BIOS Bridge */ #include <dspbridge/dbdefs.h> /* ----------------------------------- Platform Manager */ #include <dspbridge/drv.h> #include <dspbridge/dev.h> #include <dspbridge/dspapi.h> /* ----------------------------------- Resource Manager */ #include <dspbridge/mgr.h> /* ----------------------------------- This */ #include <dspbridge/dspdrv.h> /* * ======== dsp_init ======== * Allocates bridge resources. Loads a base image onto DSP, if specified. */ u32 dsp_init(u32 *init_status) { char dev_node[MAXREGPATHLENGTH] = "TIOMAP1510"; int status = -EPERM; struct drv_object *drv_obj = NULL; u32 device_node; u32 device_node_string; if (!api_init()) goto func_cont; status = drv_create(&drv_obj); if (status) { api_exit(); goto func_cont; } /* End drv_create */ /* Request Resources */ status = drv_request_resources((u32) &dev_node, &device_node_string); if (!status) { /* Attempt to Start the Device */ status = dev_start_device((struct cfg_devnode *) device_node_string); if (status) (void)drv_release_resources ((u32) device_node_string, drv_obj); } else { dev_dbg(bridge, "%s: drv_request_resources Failed\n", __func__); status = -EPERM; } /* Unwind whatever was loaded */ if (status) { /* irrespective of the status of dev_remove_device we conitinue * unloading. Get the Driver Object iterate through and remove. * Reset the status to E_FAIL to avoid going through * api_init_complete2. */ for (device_node = drv_get_first_dev_extension(); device_node != 0; device_node = drv_get_next_dev_extension(device_node)) { (void)dev_remove_device((struct cfg_devnode *) device_node); (void)drv_release_resources((u32) device_node, drv_obj); } /* Remove the Driver Object */ (void)drv_destroy(drv_obj); drv_obj = NULL; api_exit(); dev_dbg(bridge, "%s: Logical device failed init\n", __func__); } /* Unwinding the loaded drivers */ func_cont: /* Attempt to Start the Board */ if (!status) { /* BRD_AutoStart could fail if the dsp execuetable is not the * correct one. We should not propagate that error * into the device loader. */ (void)api_init_complete2(); } else { dev_dbg(bridge, "%s: Failed\n", __func__); } /* End api_init_complete2 */ *init_status = status; /* Return the Driver Object */ return (u32) drv_obj; } /* * ======== dsp_deinit ======== * Frees the resources allocated for bridge. */ bool dsp_deinit(u32 device_context) { bool ret = true; u32 device_node; struct mgr_object *mgr_obj = NULL; struct drv_data *drv_datap = dev_get_drvdata(bridge); while ((device_node = drv_get_first_dev_extension()) != 0) { (void)dev_remove_device((struct cfg_devnode *)device_node); (void)drv_release_resources((u32) device_node, (struct drv_object *)device_context); } (void)drv_destroy((struct drv_object *)device_context); /* Get the Manager Object from driver data * MGR Destroy will unload the DCD dll */ if (drv_datap && drv_datap->mgr_object) { mgr_obj = drv_datap->mgr_object; (void)mgr_destroy(mgr_obj); } else { pr_err("%s: Failed to retrieve the object handle\n", __func__); } api_exit(); return ret; }
gpl-2.0
Team-Blackout/Blackout-M7
drivers/i2c/busses/i2c-ali1535.c
4942
16713
/* * Copyright (c) 2000 Frodo Looijaard <frodol@dds.nl>, * Philip Edelbrock <phil@netroedge.com>, * Mark D. Studebaker <mdsxyz123@yahoo.com>, * Dan Eaton <dan.eaton@rocketlogix.com> and * Stephen Rousset <stephen.rousset@rocketlogix.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* This is the driver for the SMB Host controller on Acer Labs Inc. (ALI) M1535 South Bridge. The M1535 is a South bridge for portable systems. It is very similar to the M15x3 South bridges also produced by Acer Labs Inc. Some of the registers within the part have moved and some have been redefined slightly. Additionally, the sequencing of the SMBus transactions has been modified to be more consistent with the sequencing recommended by the manufacturer and observed through testing. These changes are reflected in this driver and can be identified by comparing this driver to the i2c-ali15x3 driver. For an overview of these chips see http://www.acerlabs.com The SMB controller is part of the 7101 device, which is an ACPI-compliant Power Management Unit (PMU). The whole 7101 device has to be enabled for the SMB to work. You can't just enable the SMB alone. The SMB and the ACPI have separate I/O spaces. We make sure that the SMB is enabled. We leave the ACPI alone. This driver controls the SMB Host only. This driver does not use interrupts. */ /* Note: we assume there can only be one ALI1535, with one SMBus interface */ #include <linux/module.h> #include <linux/pci.h> #include <linux/kernel.h> #include <linux/stddef.h> #include <linux/delay.h> #include <linux/ioport.h> #include <linux/i2c.h> #include <linux/init.h> #include <linux/acpi.h> #include <linux/io.h> /* ALI1535 SMBus address offsets */ #define SMBHSTSTS (0 + ali1535_smba) #define SMBHSTTYP (1 + ali1535_smba) #define SMBHSTPORT (2 + ali1535_smba) #define SMBHSTCMD (7 + ali1535_smba) #define SMBHSTADD (3 + ali1535_smba) #define SMBHSTDAT0 (4 + ali1535_smba) #define SMBHSTDAT1 (5 + ali1535_smba) #define SMBBLKDAT (6 + ali1535_smba) /* PCI Address Constants */ #define SMBCOM 0x004 #define SMBREV 0x008 #define SMBCFG 0x0D1 #define SMBBA 0x0E2 #define SMBHSTCFG 0x0F0 #define SMBCLK 0x0F2 /* Other settings */ #define MAX_TIMEOUT 500 /* times 1/100 sec */ #define ALI1535_SMB_IOSIZE 32 #define ALI1535_SMB_DEFAULTBASE 0x8040 /* ALI1535 address lock bits */ #define ALI1535_LOCK 0x06 /* dwe */ /* ALI1535 command constants */ #define ALI1535_QUICK 0x00 #define ALI1535_BYTE 0x10 #define ALI1535_BYTE_DATA 0x20 #define ALI1535_WORD_DATA 0x30 #define ALI1535_BLOCK_DATA 0x40 #define ALI1535_I2C_READ 0x60 #define ALI1535_DEV10B_EN 0x80 /* Enable 10-bit addressing in */ /* I2C read */ #define ALI1535_T_OUT 0x08 /* Time-out Command (write) */ #define ALI1535_A_HIGH_BIT9 0x08 /* Bit 9 of 10-bit address in */ /* Alert-Response-Address */ /* (read) */ #define ALI1535_KILL 0x04 /* Kill Command (write) */ #define ALI1535_A_HIGH_BIT8 0x04 /* Bit 8 of 10-bit address in */ /* Alert-Response-Address */ /* (read) */ #define ALI1535_D_HI_MASK 0x03 /* Mask for isolating bits 9-8 */ /* of 10-bit address in I2C */ /* Read Command */ /* ALI1535 status register bits */ #define ALI1535_STS_IDLE 0x04 #define ALI1535_STS_BUSY 0x08 /* host busy */ #define ALI1535_STS_DONE 0x10 /* transaction complete */ #define ALI1535_STS_DEV 0x20 /* device error */ #define ALI1535_STS_BUSERR 0x40 /* bus error */ #define ALI1535_STS_FAIL 0x80 /* failed bus transaction */ #define ALI1535_STS_ERR 0xE0 /* all the bad error bits */ #define ALI1535_BLOCK_CLR 0x04 /* reset block data index */ /* ALI1535 device address register bits */ #define ALI1535_RD_ADDR 0x01 /* Read/Write Bit in Device */ /* Address field */ /* -> Write = 0 */ /* -> Read = 1 */ #define ALI1535_SMBIO_EN 0x04 /* SMB I/O Space enable */ static struct pci_driver ali1535_driver; static unsigned long ali1535_smba; static unsigned short ali1535_offset; /* Detect whether a ALI1535 can be found, and initialize it, where necessary. Note the differences between kernels with the old PCI BIOS interface and newer kernels with the real PCI interface. In compat.h some things are defined to make the transition easier. */ static int __devinit ali1535_setup(struct pci_dev *dev) { int retval; unsigned char temp; /* Check the following things: - SMB I/O address is initialized - Device is enabled - We can use the addresses */ retval = pci_enable_device(dev); if (retval) { dev_err(&dev->dev, "ALI1535_smb can't enable device\n"); goto exit; } /* Determine the address of the SMBus area */ pci_read_config_word(dev, SMBBA, &ali1535_offset); dev_dbg(&dev->dev, "ALI1535_smb is at offset 0x%04x\n", ali1535_offset); ali1535_offset &= (0xffff & ~(ALI1535_SMB_IOSIZE - 1)); if (ali1535_offset == 0) { dev_warn(&dev->dev, "ALI1535_smb region uninitialized - upgrade BIOS?\n"); retval = -ENODEV; goto exit; } if (pci_resource_flags(dev, 0) & IORESOURCE_IO) ali1535_smba = pci_resource_start(dev, 0) + ali1535_offset; else ali1535_smba = ali1535_offset; retval = acpi_check_region(ali1535_smba, ALI1535_SMB_IOSIZE, ali1535_driver.name); if (retval) goto exit; if (!request_region(ali1535_smba, ALI1535_SMB_IOSIZE, ali1535_driver.name)) { dev_err(&dev->dev, "ALI1535_smb region 0x%lx already in use!\n", ali1535_smba); retval = -EBUSY; goto exit; } /* check if whole device is enabled */ pci_read_config_byte(dev, SMBCFG, &temp); if ((temp & ALI1535_SMBIO_EN) == 0) { dev_err(&dev->dev, "SMB device not enabled - upgrade BIOS?\n"); retval = -ENODEV; goto exit_free; } /* Is SMB Host controller enabled? */ pci_read_config_byte(dev, SMBHSTCFG, &temp); if ((temp & 1) == 0) { dev_err(&dev->dev, "SMBus controller not enabled - upgrade BIOS?\n"); retval = -ENODEV; goto exit_free; } /* set SMB clock to 74KHz as recommended in data sheet */ pci_write_config_byte(dev, SMBCLK, 0x20); /* The interrupt routing for SMB is set up in register 0x77 in the 1533 ISA Bridge device, NOT in the 7101 device. Don't bother with finding the 1533 device and reading the register. if ((....... & 0x0F) == 1) dev_dbg(&dev->dev, "ALI1535 using Interrupt 9 for SMBus.\n"); */ pci_read_config_byte(dev, SMBREV, &temp); dev_dbg(&dev->dev, "SMBREV = 0x%X\n", temp); dev_dbg(&dev->dev, "ALI1535_smba = 0x%lx\n", ali1535_smba); return 0; exit_free: release_region(ali1535_smba, ALI1535_SMB_IOSIZE); exit: return retval; } static int ali1535_transaction(struct i2c_adapter *adap) { int temp; int result = 0; int timeout = 0; dev_dbg(&adap->dev, "Transaction (pre): STS=%02x, TYP=%02x, " "CMD=%02x, ADD=%02x, DAT0=%02x, DAT1=%02x\n", inb_p(SMBHSTSTS), inb_p(SMBHSTTYP), inb_p(SMBHSTCMD), inb_p(SMBHSTADD), inb_p(SMBHSTDAT0), inb_p(SMBHSTDAT1)); /* get status */ temp = inb_p(SMBHSTSTS); /* Make sure the SMBus host is ready to start transmitting */ /* Check the busy bit first */ if (temp & ALI1535_STS_BUSY) { /* If the host controller is still busy, it may have timed out * in the previous transaction, resulting in a "SMBus Timeout" * printk. I've tried the following to reset a stuck busy bit. * 1. Reset the controller with an KILL command. (this * doesn't seem to clear the controller if an external * device is hung) * 2. Reset the controller and the other SMBus devices with a * T_OUT command. (this clears the host busy bit if an * external device is hung, but it comes back upon a new * access to a device) * 3. Disable and reenable the controller in SMBHSTCFG. Worst * case, nothing seems to work except power reset. */ /* Try resetting entire SMB bus, including other devices - This * may not work either - it clears the BUSY bit but then the * BUSY bit may come back on when you try and use the chip * again. If that's the case you are stuck. */ dev_info(&adap->dev, "Resetting entire SMB Bus to clear busy condition (%02x)\n", temp); outb_p(ALI1535_T_OUT, SMBHSTTYP); temp = inb_p(SMBHSTSTS); } /* now check the error bits and the busy bit */ if (temp & (ALI1535_STS_ERR | ALI1535_STS_BUSY)) { /* do a clear-on-write */ outb_p(0xFF, SMBHSTSTS); temp = inb_p(SMBHSTSTS); if (temp & (ALI1535_STS_ERR | ALI1535_STS_BUSY)) { /* This is probably going to be correctable only by a * power reset as one of the bits now appears to be * stuck */ /* This may be a bus or device with electrical problems. */ dev_err(&adap->dev, "SMBus reset failed! (0x%02x) - controller or " "device on bus is probably hung\n", temp); return -EBUSY; } } else { /* check and clear done bit */ if (temp & ALI1535_STS_DONE) outb_p(temp, SMBHSTSTS); } /* start the transaction by writing anything to the start register */ outb_p(0xFF, SMBHSTPORT); /* We will always wait for a fraction of a second! */ timeout = 0; do { usleep_range(1000, 2000); temp = inb_p(SMBHSTSTS); } while (((temp & ALI1535_STS_BUSY) && !(temp & ALI1535_STS_IDLE)) && (timeout++ < MAX_TIMEOUT)); /* If the SMBus is still busy, we give up */ if (timeout > MAX_TIMEOUT) { result = -ETIMEDOUT; dev_err(&adap->dev, "SMBus Timeout!\n"); } if (temp & ALI1535_STS_FAIL) { result = -EIO; dev_dbg(&adap->dev, "Error: Failed bus transaction\n"); } /* Unfortunately the ALI SMB controller maps "no response" and "bus * collision" into a single bit. No response is the usual case so don't * do a printk. This means that bus collisions go unreported. */ if (temp & ALI1535_STS_BUSERR) { result = -ENXIO; dev_dbg(&adap->dev, "Error: no response or bus collision ADD=%02x\n", inb_p(SMBHSTADD)); } /* haven't ever seen this */ if (temp & ALI1535_STS_DEV) { result = -EIO; dev_err(&adap->dev, "Error: device error\n"); } /* check to see if the "command complete" indication is set */ if (!(temp & ALI1535_STS_DONE)) { result = -ETIMEDOUT; dev_err(&adap->dev, "Error: command never completed\n"); } dev_dbg(&adap->dev, "Transaction (post): STS=%02x, TYP=%02x, " "CMD=%02x, ADD=%02x, DAT0=%02x, DAT1=%02x\n", inb_p(SMBHSTSTS), inb_p(SMBHSTTYP), inb_p(SMBHSTCMD), inb_p(SMBHSTADD), inb_p(SMBHSTDAT0), inb_p(SMBHSTDAT1)); /* take consequent actions for error conditions */ if (!(temp & ALI1535_STS_DONE)) { /* issue "kill" to reset host controller */ outb_p(ALI1535_KILL, SMBHSTTYP); outb_p(0xFF, SMBHSTSTS); } else if (temp & ALI1535_STS_ERR) { /* issue "timeout" to reset all devices on bus */ outb_p(ALI1535_T_OUT, SMBHSTTYP); outb_p(0xFF, SMBHSTSTS); } return result; } /* Return negative errno on error. */ static s32 ali1535_access(struct i2c_adapter *adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data *data) { int i, len; int temp; int timeout; s32 result = 0; /* make sure SMBus is idle */ temp = inb_p(SMBHSTSTS); for (timeout = 0; (timeout < MAX_TIMEOUT) && !(temp & ALI1535_STS_IDLE); timeout++) { usleep_range(1000, 2000); temp = inb_p(SMBHSTSTS); } if (timeout >= MAX_TIMEOUT) dev_warn(&adap->dev, "Idle wait Timeout! STS=0x%02x\n", temp); /* clear status register (clear-on-write) */ outb_p(0xFF, SMBHSTSTS); switch (size) { case I2C_SMBUS_QUICK: outb_p(((addr & 0x7f) << 1) | (read_write & 0x01), SMBHSTADD); size = ALI1535_QUICK; outb_p(size, SMBHSTTYP); /* output command */ break; case I2C_SMBUS_BYTE: outb_p(((addr & 0x7f) << 1) | (read_write & 0x01), SMBHSTADD); size = ALI1535_BYTE; outb_p(size, SMBHSTTYP); /* output command */ if (read_write == I2C_SMBUS_WRITE) outb_p(command, SMBHSTCMD); break; case I2C_SMBUS_BYTE_DATA: outb_p(((addr & 0x7f) << 1) | (read_write & 0x01), SMBHSTADD); size = ALI1535_BYTE_DATA; outb_p(size, SMBHSTTYP); /* output command */ outb_p(command, SMBHSTCMD); if (read_write == I2C_SMBUS_WRITE) outb_p(data->byte, SMBHSTDAT0); break; case I2C_SMBUS_WORD_DATA: outb_p(((addr & 0x7f) << 1) | (read_write & 0x01), SMBHSTADD); size = ALI1535_WORD_DATA; outb_p(size, SMBHSTTYP); /* output command */ outb_p(command, SMBHSTCMD); if (read_write == I2C_SMBUS_WRITE) { outb_p(data->word & 0xff, SMBHSTDAT0); outb_p((data->word & 0xff00) >> 8, SMBHSTDAT1); } break; case I2C_SMBUS_BLOCK_DATA: outb_p(((addr & 0x7f) << 1) | (read_write & 0x01), SMBHSTADD); size = ALI1535_BLOCK_DATA; outb_p(size, SMBHSTTYP); /* output command */ outb_p(command, SMBHSTCMD); if (read_write == I2C_SMBUS_WRITE) { len = data->block[0]; if (len < 0) { len = 0; data->block[0] = len; } if (len > 32) { len = 32; data->block[0] = len; } outb_p(len, SMBHSTDAT0); /* Reset SMBBLKDAT */ outb_p(inb_p(SMBHSTTYP) | ALI1535_BLOCK_CLR, SMBHSTTYP); for (i = 1; i <= len; i++) outb_p(data->block[i], SMBBLKDAT); } break; default: dev_warn(&adap->dev, "Unsupported transaction %d\n", size); result = -EOPNOTSUPP; goto EXIT; } result = ali1535_transaction(adap); if (result) goto EXIT; if ((read_write == I2C_SMBUS_WRITE) || (size == ALI1535_QUICK)) { result = 0; goto EXIT; } switch (size) { case ALI1535_BYTE: /* Result put in SMBHSTDAT0 */ data->byte = inb_p(SMBHSTDAT0); break; case ALI1535_BYTE_DATA: data->byte = inb_p(SMBHSTDAT0); break; case ALI1535_WORD_DATA: data->word = inb_p(SMBHSTDAT0) + (inb_p(SMBHSTDAT1) << 8); break; case ALI1535_BLOCK_DATA: len = inb_p(SMBHSTDAT0); if (len > 32) len = 32; data->block[0] = len; /* Reset SMBBLKDAT */ outb_p(inb_p(SMBHSTTYP) | ALI1535_BLOCK_CLR, SMBHSTTYP); for (i = 1; i <= data->block[0]; i++) { data->block[i] = inb_p(SMBBLKDAT); dev_dbg(&adap->dev, "Blk: len=%d, i=%d, data=%02x\n", len, i, data->block[i]); } break; } EXIT: return result; } static u32 ali1535_func(struct i2c_adapter *adapter) { return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_BLOCK_DATA; } static const struct i2c_algorithm smbus_algorithm = { .smbus_xfer = ali1535_access, .functionality = ali1535_func, }; static struct i2c_adapter ali1535_adapter = { .owner = THIS_MODULE, .class = I2C_CLASS_HWMON | I2C_CLASS_SPD, .algo = &smbus_algorithm, }; static DEFINE_PCI_DEVICE_TABLE(ali1535_ids) = { { PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101) }, { }, }; MODULE_DEVICE_TABLE(pci, ali1535_ids); static int __devinit ali1535_probe(struct pci_dev *dev, const struct pci_device_id *id) { if (ali1535_setup(dev)) { dev_warn(&dev->dev, "ALI1535 not detected, module not inserted.\n"); return -ENODEV; } /* set up the sysfs linkage to our parent device */ ali1535_adapter.dev.parent = &dev->dev; snprintf(ali1535_adapter.name, sizeof(ali1535_adapter.name), "SMBus ALI1535 adapter at %04x", ali1535_offset); return i2c_add_adapter(&ali1535_adapter); } static void __devexit ali1535_remove(struct pci_dev *dev) { i2c_del_adapter(&ali1535_adapter); release_region(ali1535_smba, ALI1535_SMB_IOSIZE); } static struct pci_driver ali1535_driver = { .name = "ali1535_smbus", .id_table = ali1535_ids, .probe = ali1535_probe, .remove = __devexit_p(ali1535_remove), }; static int __init i2c_ali1535_init(void) { return pci_register_driver(&ali1535_driver); } static void __exit i2c_ali1535_exit(void) { pci_unregister_driver(&ali1535_driver); } MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl>, " "Philip Edelbrock <phil@netroedge.com>, " "Mark D. Studebaker <mdsxyz123@yahoo.com> " "and Dan Eaton <dan.eaton@rocketlogix.com>"); MODULE_DESCRIPTION("ALI1535 SMBus driver"); MODULE_LICENSE("GPL"); module_init(i2c_ali1535_init); module_exit(i2c_ali1535_exit);
gpl-2.0
Team-Hydra/S5-AEL-Kernel
drivers/staging/tidspbridge/rmgr/proc.c
4942
49072
/* * proc.c * * DSP-BIOS Bridge driver support functions for TI OMAP processors. * * Processor interface at the driver level. * * Copyright (C) 2005-2006 Texas Instruments, Inc. * * This package is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. */ #include <linux/types.h> /* ------------------------------------ Host OS */ #include <linux/dma-mapping.h> #include <linux/scatterlist.h> #include <dspbridge/host_os.h> /* ----------------------------------- DSP/BIOS Bridge */ #include <dspbridge/dbdefs.h> /* ----------------------------------- OS Adaptation Layer */ #include <dspbridge/ntfy.h> #include <dspbridge/sync.h> /* ----------------------------------- Bridge Driver */ #include <dspbridge/dspdefs.h> #include <dspbridge/dspdeh.h> /* ----------------------------------- Platform Manager */ #include <dspbridge/cod.h> #include <dspbridge/dev.h> #include <dspbridge/procpriv.h> #include <dspbridge/dmm.h> /* ----------------------------------- Resource Manager */ #include <dspbridge/mgr.h> #include <dspbridge/node.h> #include <dspbridge/nldr.h> #include <dspbridge/rmm.h> /* ----------------------------------- Others */ #include <dspbridge/dbdcd.h> #include <dspbridge/msg.h> #include <dspbridge/dspioctl.h> #include <dspbridge/drv.h> /* ----------------------------------- This */ #include <dspbridge/proc.h> #include <dspbridge/pwr.h> #include <dspbridge/resourcecleanup.h> /* ----------------------------------- Defines, Data Structures, Typedefs */ #define MAXCMDLINELEN 255 #define PROC_ENVPROCID "PROC_ID=%d" #define MAXPROCIDLEN (8 + 5) #define PROC_DFLT_TIMEOUT 10000 /* Time out in milliseconds */ #define PWR_TIMEOUT 500 /* Sleep/wake timout in msec */ #define EXTEND "_EXT_END" /* Extmem end addr in DSP binary */ #define DSP_CACHE_LINE 128 #define BUFMODE_MASK (3 << 14) /* Buffer modes from DSP perspective */ #define RBUF 0x4000 /* Input buffer */ #define WBUF 0x8000 /* Output Buffer */ extern struct device *bridge; /* ----------------------------------- Globals */ /* The proc_object structure. */ struct proc_object { struct list_head link; /* Link to next proc_object */ struct dev_object *dev_obj; /* Device this PROC represents */ u32 process; /* Process owning this Processor */ struct mgr_object *mgr_obj; /* Manager Object Handle */ u32 attach_count; /* Processor attach count */ u32 processor_id; /* Processor number */ u32 timeout; /* Time out count */ enum dsp_procstate proc_state; /* Processor state */ u32 unit; /* DDSP unit number */ bool is_already_attached; /* * True if the Device below has * GPP Client attached */ struct ntfy_object *ntfy_obj; /* Manages notifications */ /* Bridge Context Handle */ struct bridge_dev_context *bridge_context; /* Function interface to Bridge driver */ struct bridge_drv_interface *intf_fxns; char *last_coff; struct list_head proc_list; }; DEFINE_MUTEX(proc_lock); /* For critical sections */ /* ----------------------------------- Function Prototypes */ static int proc_monitor(struct proc_object *proc_obj); static s32 get_envp_count(char **envp); static char **prepend_envp(char **new_envp, char **envp, s32 envp_elems, s32 cnew_envp, char *sz_var); /* remember mapping information */ static struct dmm_map_object *add_mapping_info(struct process_context *pr_ctxt, u32 mpu_addr, u32 dsp_addr, u32 size) { struct dmm_map_object *map_obj; u32 num_usr_pgs = size / PG_SIZE4K; pr_debug("%s: adding map info: mpu_addr 0x%x virt 0x%x size 0x%x\n", __func__, mpu_addr, dsp_addr, size); map_obj = kzalloc(sizeof(struct dmm_map_object), GFP_KERNEL); if (!map_obj) { pr_err("%s: kzalloc failed\n", __func__); return NULL; } INIT_LIST_HEAD(&map_obj->link); map_obj->pages = kcalloc(num_usr_pgs, sizeof(struct page *), GFP_KERNEL); if (!map_obj->pages) { pr_err("%s: kzalloc failed\n", __func__); kfree(map_obj); return NULL; } map_obj->mpu_addr = mpu_addr; map_obj->dsp_addr = dsp_addr; map_obj->size = size; map_obj->num_usr_pgs = num_usr_pgs; spin_lock(&pr_ctxt->dmm_map_lock); list_add(&map_obj->link, &pr_ctxt->dmm_map_list); spin_unlock(&pr_ctxt->dmm_map_lock); return map_obj; } static int match_exact_map_obj(struct dmm_map_object *map_obj, u32 dsp_addr, u32 size) { if (map_obj->dsp_addr == dsp_addr && map_obj->size != size) pr_err("%s: addr match (0x%x), size don't (0x%x != 0x%x)\n", __func__, dsp_addr, map_obj->size, size); return map_obj->dsp_addr == dsp_addr && map_obj->size == size; } static void remove_mapping_information(struct process_context *pr_ctxt, u32 dsp_addr, u32 size) { struct dmm_map_object *map_obj; pr_debug("%s: looking for virt 0x%x size 0x%x\n", __func__, dsp_addr, size); spin_lock(&pr_ctxt->dmm_map_lock); list_for_each_entry(map_obj, &pr_ctxt->dmm_map_list, link) { pr_debug("%s: candidate: mpu_addr 0x%x virt 0x%x size 0x%x\n", __func__, map_obj->mpu_addr, map_obj->dsp_addr, map_obj->size); if (match_exact_map_obj(map_obj, dsp_addr, size)) { pr_debug("%s: match, deleting map info\n", __func__); list_del(&map_obj->link); kfree(map_obj->dma_info.sg); kfree(map_obj->pages); kfree(map_obj); goto out; } pr_debug("%s: candidate didn't match\n", __func__); } pr_err("%s: failed to find given map info\n", __func__); out: spin_unlock(&pr_ctxt->dmm_map_lock); } static int match_containing_map_obj(struct dmm_map_object *map_obj, u32 mpu_addr, u32 size) { u32 map_obj_end = map_obj->mpu_addr + map_obj->size; return mpu_addr >= map_obj->mpu_addr && mpu_addr + size <= map_obj_end; } static struct dmm_map_object *find_containing_mapping( struct process_context *pr_ctxt, u32 mpu_addr, u32 size) { struct dmm_map_object *map_obj; pr_debug("%s: looking for mpu_addr 0x%x size 0x%x\n", __func__, mpu_addr, size); spin_lock(&pr_ctxt->dmm_map_lock); list_for_each_entry(map_obj, &pr_ctxt->dmm_map_list, link) { pr_debug("%s: candidate: mpu_addr 0x%x virt 0x%x size 0x%x\n", __func__, map_obj->mpu_addr, map_obj->dsp_addr, map_obj->size); if (match_containing_map_obj(map_obj, mpu_addr, size)) { pr_debug("%s: match!\n", __func__); goto out; } pr_debug("%s: no match!\n", __func__); } map_obj = NULL; out: spin_unlock(&pr_ctxt->dmm_map_lock); return map_obj; } static int find_first_page_in_cache(struct dmm_map_object *map_obj, unsigned long mpu_addr) { u32 mapped_base_page = map_obj->mpu_addr >> PAGE_SHIFT; u32 requested_base_page = mpu_addr >> PAGE_SHIFT; int pg_index = requested_base_page - mapped_base_page; if (pg_index < 0 || pg_index >= map_obj->num_usr_pgs) { pr_err("%s: failed (got %d)\n", __func__, pg_index); return -1; } pr_debug("%s: first page is %d\n", __func__, pg_index); return pg_index; } static inline struct page *get_mapping_page(struct dmm_map_object *map_obj, int pg_i) { pr_debug("%s: looking for pg_i %d, num_usr_pgs: %d\n", __func__, pg_i, map_obj->num_usr_pgs); if (pg_i < 0 || pg_i >= map_obj->num_usr_pgs) { pr_err("%s: requested pg_i %d is out of mapped range\n", __func__, pg_i); return NULL; } return map_obj->pages[pg_i]; } /* * ======== proc_attach ======== * Purpose: * Prepare for communication with a particular DSP processor, and return * a handle to the processor object. */ int proc_attach(u32 processor_id, const struct dsp_processorattrin *attr_in, void **ph_processor, struct process_context *pr_ctxt) { int status = 0; struct dev_object *hdev_obj; struct proc_object *p_proc_object = NULL; struct mgr_object *hmgr_obj = NULL; struct drv_object *hdrv_obj = NULL; struct drv_data *drv_datap = dev_get_drvdata(bridge); u8 dev_type; if (pr_ctxt->processor) { *ph_processor = pr_ctxt->processor; return status; } /* Get the Driver and Manager Object Handles */ if (!drv_datap || !drv_datap->drv_object || !drv_datap->mgr_object) { status = -ENODATA; pr_err("%s: Failed to get object handles\n", __func__); } else { hdrv_obj = drv_datap->drv_object; hmgr_obj = drv_datap->mgr_object; } if (!status) { /* Get the Device Object */ status = drv_get_dev_object(processor_id, hdrv_obj, &hdev_obj); } if (!status) status = dev_get_dev_type(hdev_obj, &dev_type); if (status) goto func_end; /* If we made it this far, create the Proceesor object: */ p_proc_object = kzalloc(sizeof(struct proc_object), GFP_KERNEL); /* Fill out the Processor Object: */ if (p_proc_object == NULL) { status = -ENOMEM; goto func_end; } p_proc_object->dev_obj = hdev_obj; p_proc_object->mgr_obj = hmgr_obj; p_proc_object->processor_id = dev_type; /* Store TGID instead of process handle */ p_proc_object->process = current->tgid; INIT_LIST_HEAD(&p_proc_object->proc_list); if (attr_in) p_proc_object->timeout = attr_in->timeout; else p_proc_object->timeout = PROC_DFLT_TIMEOUT; status = dev_get_intf_fxns(hdev_obj, &p_proc_object->intf_fxns); if (!status) { status = dev_get_bridge_context(hdev_obj, &p_proc_object->bridge_context); if (status) kfree(p_proc_object); } else kfree(p_proc_object); if (status) goto func_end; /* Create the Notification Object */ /* This is created with no event mask, no notify mask * and no valid handle to the notification. They all get * filled up when proc_register_notify is called */ p_proc_object->ntfy_obj = kmalloc(sizeof(struct ntfy_object), GFP_KERNEL); if (p_proc_object->ntfy_obj) ntfy_init(p_proc_object->ntfy_obj); else status = -ENOMEM; if (!status) { /* Insert the Processor Object into the DEV List. * Return handle to this Processor Object: * Find out if the Device is already attached to a * Processor. If so, return AlreadyAttached status */ status = dev_insert_proc_object(p_proc_object->dev_obj, (u32) p_proc_object, &p_proc_object-> is_already_attached); if (!status) { if (p_proc_object->is_already_attached) status = 0; } else { if (p_proc_object->ntfy_obj) { ntfy_delete(p_proc_object->ntfy_obj); kfree(p_proc_object->ntfy_obj); } kfree(p_proc_object); } if (!status) { *ph_processor = (void *)p_proc_object; pr_ctxt->processor = *ph_processor; (void)proc_notify_clients(p_proc_object, DSP_PROCESSORATTACH); } } else { /* Don't leak memory if status is failed */ kfree(p_proc_object); } func_end: return status; } static int get_exec_file(struct cfg_devnode *dev_node_obj, struct dev_object *hdev_obj, u32 size, char *exec_file) { u8 dev_type; s32 len; struct drv_data *drv_datap = dev_get_drvdata(bridge); dev_get_dev_type(hdev_obj, (u8 *) &dev_type); if (!exec_file) return -EFAULT; if (dev_type == DSP_UNIT) { if (!drv_datap || !drv_datap->base_img) return -EFAULT; if (strlen(drv_datap->base_img) > size) return -EINVAL; strcpy(exec_file, drv_datap->base_img); } else if (dev_type == IVA_UNIT && iva_img) { len = strlen(iva_img); strncpy(exec_file, iva_img, len + 1); } else { return -ENOENT; } return 0; } /* * ======== proc_auto_start ======== = * Purpose: * A Particular device gets loaded with the default image * if the AutoStart flag is set. * Parameters: * hdev_obj: Handle to the Device * Returns: * 0: On Successful Loading * -EPERM General Failure * Requires: * hdev_obj != NULL * Ensures: */ int proc_auto_start(struct cfg_devnode *dev_node_obj, struct dev_object *hdev_obj) { int status = -EPERM; struct proc_object *p_proc_object; char sz_exec_file[MAXCMDLINELEN]; char *argv[2]; struct mgr_object *hmgr_obj = NULL; struct drv_data *drv_datap = dev_get_drvdata(bridge); u8 dev_type; /* Create a Dummy PROC Object */ if (!drv_datap || !drv_datap->mgr_object) { status = -ENODATA; pr_err("%s: Failed to retrieve the object handle\n", __func__); goto func_end; } else { hmgr_obj = drv_datap->mgr_object; } p_proc_object = kzalloc(sizeof(struct proc_object), GFP_KERNEL); if (p_proc_object == NULL) { status = -ENOMEM; goto func_end; } p_proc_object->dev_obj = hdev_obj; p_proc_object->mgr_obj = hmgr_obj; status = dev_get_intf_fxns(hdev_obj, &p_proc_object->intf_fxns); if (!status) status = dev_get_bridge_context(hdev_obj, &p_proc_object->bridge_context); if (status) goto func_cont; /* Stop the Device, put it into standby mode */ status = proc_stop(p_proc_object); if (status) goto func_cont; /* Get the default executable for this board... */ dev_get_dev_type(hdev_obj, (u8 *) &dev_type); p_proc_object->processor_id = dev_type; status = get_exec_file(dev_node_obj, hdev_obj, sizeof(sz_exec_file), sz_exec_file); if (!status) { argv[0] = sz_exec_file; argv[1] = NULL; /* ...and try to load it: */ status = proc_load(p_proc_object, 1, (const char **)argv, NULL); if (!status) status = proc_start(p_proc_object); } kfree(p_proc_object->last_coff); p_proc_object->last_coff = NULL; func_cont: kfree(p_proc_object); func_end: return status; } /* * ======== proc_ctrl ======== * Purpose: * Pass control information to the GPP device driver managing the * DSP processor. * * This will be an OEM-only function, and not part of the DSP/BIOS Bridge * application developer's API. * Call the bridge_dev_ctrl fxn with the Argument. This is a Synchronous * Operation. arg can be null. */ int proc_ctrl(void *hprocessor, u32 dw_cmd, struct dsp_cbdata * arg) { int status = 0; struct proc_object *p_proc_object = hprocessor; u32 timeout = 0; if (p_proc_object) { /* intercept PWR deep sleep command */ if (dw_cmd == BRDIOCTL_DEEPSLEEP) { timeout = arg->cb_data; status = pwr_sleep_dsp(PWR_DEEPSLEEP, timeout); } /* intercept PWR emergency sleep command */ else if (dw_cmd == BRDIOCTL_EMERGENCYSLEEP) { timeout = arg->cb_data; status = pwr_sleep_dsp(PWR_EMERGENCYDEEPSLEEP, timeout); } else if (dw_cmd == PWR_DEEPSLEEP) { /* timeout = arg->cb_data; */ status = pwr_sleep_dsp(PWR_DEEPSLEEP, timeout); } /* intercept PWR wake commands */ else if (dw_cmd == BRDIOCTL_WAKEUP) { timeout = arg->cb_data; status = pwr_wake_dsp(timeout); } else if (dw_cmd == PWR_WAKEUP) { /* timeout = arg->cb_data; */ status = pwr_wake_dsp(timeout); } else if (!((*p_proc_object->intf_fxns->dev_cntrl) (p_proc_object->bridge_context, dw_cmd, arg))) { status = 0; } else { status = -EPERM; } } else { status = -EFAULT; } return status; } /* * ======== proc_detach ======== * Purpose: * Destroys the Processor Object. Removes the notification from the Dev * List. */ int proc_detach(struct process_context *pr_ctxt) { int status = 0; struct proc_object *p_proc_object = NULL; p_proc_object = (struct proc_object *)pr_ctxt->processor; if (p_proc_object) { /* Notify the Client */ ntfy_notify(p_proc_object->ntfy_obj, DSP_PROCESSORDETACH); /* Remove the notification memory */ if (p_proc_object->ntfy_obj) { ntfy_delete(p_proc_object->ntfy_obj); kfree(p_proc_object->ntfy_obj); } kfree(p_proc_object->last_coff); p_proc_object->last_coff = NULL; /* Remove the Proc from the DEV List */ (void)dev_remove_proc_object(p_proc_object->dev_obj, (u32) p_proc_object); /* Free the Processor Object */ kfree(p_proc_object); pr_ctxt->processor = NULL; } else { status = -EFAULT; } return status; } /* * ======== proc_enum_nodes ======== * Purpose: * Enumerate and get configuration information about nodes allocated * on a DSP processor. */ int proc_enum_nodes(void *hprocessor, void **node_tab, u32 node_tab_size, u32 *pu_num_nodes, u32 *pu_allocated) { int status = -EPERM; struct proc_object *p_proc_object = (struct proc_object *)hprocessor; struct node_mgr *hnode_mgr = NULL; if (p_proc_object) { if (!(dev_get_node_manager(p_proc_object->dev_obj, &hnode_mgr))) { if (hnode_mgr) { status = node_enum_nodes(hnode_mgr, node_tab, node_tab_size, pu_num_nodes, pu_allocated); } } } else { status = -EFAULT; } return status; } /* Cache operation against kernel address instead of users */ static int build_dma_sg(struct dmm_map_object *map_obj, unsigned long start, ssize_t len, int pg_i) { struct page *page; unsigned long offset; ssize_t rest; int ret = 0, i = 0; struct scatterlist *sg = map_obj->dma_info.sg; while (len) { page = get_mapping_page(map_obj, pg_i); if (!page) { pr_err("%s: no page for %08lx\n", __func__, start); ret = -EINVAL; goto out; } else if (IS_ERR(page)) { pr_err("%s: err page for %08lx(%lu)\n", __func__, start, PTR_ERR(page)); ret = PTR_ERR(page); goto out; } offset = start & ~PAGE_MASK; rest = min_t(ssize_t, PAGE_SIZE - offset, len); sg_set_page(&sg[i], page, rest, offset); len -= rest; start += rest; pg_i++, i++; } if (i != map_obj->dma_info.num_pages) { pr_err("%s: bad number of sg iterations\n", __func__); ret = -EFAULT; goto out; } out: return ret; } static int memory_regain_ownership(struct dmm_map_object *map_obj, unsigned long start, ssize_t len, enum dma_data_direction dir) { int ret = 0; unsigned long first_data_page = start >> PAGE_SHIFT; unsigned long last_data_page = ((u32)(start + len - 1) >> PAGE_SHIFT); /* calculating the number of pages this area spans */ unsigned long num_pages = last_data_page - first_data_page + 1; struct bridge_dma_map_info *dma_info = &map_obj->dma_info; if (!dma_info->sg) goto out; if (dma_info->dir != dir || dma_info->num_pages != num_pages) { pr_err("%s: dma info doesn't match given params\n", __func__); return -EINVAL; } dma_unmap_sg(bridge, dma_info->sg, num_pages, dma_info->dir); pr_debug("%s: dma_map_sg unmapped\n", __func__); kfree(dma_info->sg); map_obj->dma_info.sg = NULL; out: return ret; } /* Cache operation against kernel address instead of users */ static int memory_give_ownership(struct dmm_map_object *map_obj, unsigned long start, ssize_t len, enum dma_data_direction dir) { int pg_i, ret, sg_num; struct scatterlist *sg; unsigned long first_data_page = start >> PAGE_SHIFT; unsigned long last_data_page = ((u32)(start + len - 1) >> PAGE_SHIFT); /* calculating the number of pages this area spans */ unsigned long num_pages = last_data_page - first_data_page + 1; pg_i = find_first_page_in_cache(map_obj, start); if (pg_i < 0) { pr_err("%s: failed to find first page in cache\n", __func__); ret = -EINVAL; goto out; } sg = kcalloc(num_pages, sizeof(*sg), GFP_KERNEL); if (!sg) { pr_err("%s: kcalloc failed\n", __func__); ret = -ENOMEM; goto out; } sg_init_table(sg, num_pages); /* cleanup a previous sg allocation */ /* this may happen if application doesn't signal for e/o DMA */ kfree(map_obj->dma_info.sg); map_obj->dma_info.sg = sg; map_obj->dma_info.dir = dir; map_obj->dma_info.num_pages = num_pages; ret = build_dma_sg(map_obj, start, len, pg_i); if (ret) goto kfree_sg; sg_num = dma_map_sg(bridge, sg, num_pages, dir); if (sg_num < 1) { pr_err("%s: dma_map_sg failed: %d\n", __func__, sg_num); ret = -EFAULT; goto kfree_sg; } pr_debug("%s: dma_map_sg mapped %d elements\n", __func__, sg_num); map_obj->dma_info.sg_num = sg_num; return 0; kfree_sg: kfree(sg); map_obj->dma_info.sg = NULL; out: return ret; } int proc_begin_dma(void *hprocessor, void *pmpu_addr, u32 ul_size, enum dma_data_direction dir) { /* Keep STATUS here for future additions to this function */ int status = 0; struct process_context *pr_ctxt = (struct process_context *) hprocessor; struct dmm_map_object *map_obj; if (!pr_ctxt) { status = -EFAULT; goto err_out; } pr_debug("%s: addr 0x%x, size 0x%x, type %d\n", __func__, (u32)pmpu_addr, ul_size, dir); mutex_lock(&proc_lock); /* find requested memory are in cached mapping information */ map_obj = find_containing_mapping(pr_ctxt, (u32) pmpu_addr, ul_size); if (!map_obj) { pr_err("%s: find_containing_mapping failed\n", __func__); status = -EFAULT; goto no_map; } if (memory_give_ownership(map_obj, (u32) pmpu_addr, ul_size, dir)) { pr_err("%s: InValid address parameters %p %x\n", __func__, pmpu_addr, ul_size); status = -EFAULT; } no_map: mutex_unlock(&proc_lock); err_out: return status; } int proc_end_dma(void *hprocessor, void *pmpu_addr, u32 ul_size, enum dma_data_direction dir) { /* Keep STATUS here for future additions to this function */ int status = 0; struct process_context *pr_ctxt = (struct process_context *) hprocessor; struct dmm_map_object *map_obj; if (!pr_ctxt) { status = -EFAULT; goto err_out; } pr_debug("%s: addr 0x%x, size 0x%x, type %d\n", __func__, (u32)pmpu_addr, ul_size, dir); mutex_lock(&proc_lock); /* find requested memory are in cached mapping information */ map_obj = find_containing_mapping(pr_ctxt, (u32) pmpu_addr, ul_size); if (!map_obj) { pr_err("%s: find_containing_mapping failed\n", __func__); status = -EFAULT; goto no_map; } if (memory_regain_ownership(map_obj, (u32) pmpu_addr, ul_size, dir)) { pr_err("%s: InValid address parameters %p %x\n", __func__, pmpu_addr, ul_size); status = -EFAULT; } no_map: mutex_unlock(&proc_lock); err_out: return status; } /* * ======== proc_flush_memory ======== * Purpose: * Flush cache */ int proc_flush_memory(void *hprocessor, void *pmpu_addr, u32 ul_size, u32 ul_flags) { enum dma_data_direction dir = DMA_BIDIRECTIONAL; return proc_begin_dma(hprocessor, pmpu_addr, ul_size, dir); } /* * ======== proc_invalidate_memory ======== * Purpose: * Invalidates the memory specified */ int proc_invalidate_memory(void *hprocessor, void *pmpu_addr, u32 size) { enum dma_data_direction dir = DMA_FROM_DEVICE; return proc_begin_dma(hprocessor, pmpu_addr, size, dir); } /* * ======== proc_get_resource_info ======== * Purpose: * Enumerate the resources currently available on a processor. */ int proc_get_resource_info(void *hprocessor, u32 resource_type, struct dsp_resourceinfo *resource_info, u32 resource_info_size) { int status = -EPERM; struct proc_object *p_proc_object = (struct proc_object *)hprocessor; struct node_mgr *hnode_mgr = NULL; struct nldr_object *nldr_obj = NULL; struct rmm_target_obj *rmm = NULL; struct io_mgr *hio_mgr = NULL; /* IO manager handle */ if (!p_proc_object) { status = -EFAULT; goto func_end; } switch (resource_type) { case DSP_RESOURCE_DYNDARAM: case DSP_RESOURCE_DYNSARAM: case DSP_RESOURCE_DYNEXTERNAL: case DSP_RESOURCE_DYNSRAM: status = dev_get_node_manager(p_proc_object->dev_obj, &hnode_mgr); if (!hnode_mgr) { status = -EFAULT; goto func_end; } status = node_get_nldr_obj(hnode_mgr, &nldr_obj); if (!status) { status = nldr_get_rmm_manager(nldr_obj, &rmm); if (rmm) { if (!rmm_stat(rmm, (enum dsp_memtype)resource_type, (struct dsp_memstat *) &(resource_info->result. mem_stat))) status = -EINVAL; } else { status = -EFAULT; } } break; case DSP_RESOURCE_PROCLOAD: status = dev_get_io_mgr(p_proc_object->dev_obj, &hio_mgr); if (hio_mgr) status = p_proc_object->intf_fxns-> io_get_proc_load(hio_mgr, (struct dsp_procloadstat *) &(resource_info->result. proc_load_stat)); else status = -EFAULT; break; default: status = -EPERM; break; } func_end: return status; } /* * ======== proc_get_dev_object ======== * Purpose: * Return the Dev Object handle for a given Processor. * */ int proc_get_dev_object(void *hprocessor, struct dev_object **device_obj) { int status = -EPERM; struct proc_object *p_proc_object = (struct proc_object *)hprocessor; if (p_proc_object) { *device_obj = p_proc_object->dev_obj; status = 0; } else { *device_obj = NULL; status = -EFAULT; } return status; } /* * ======== proc_get_state ======== * Purpose: * Report the state of the specified DSP processor. */ int proc_get_state(void *hprocessor, struct dsp_processorstate *proc_state_obj, u32 state_info_size) { int status = 0; struct proc_object *p_proc_object = (struct proc_object *)hprocessor; int brd_status; if (p_proc_object) { /* First, retrieve BRD state information */ status = (*p_proc_object->intf_fxns->brd_status) (p_proc_object->bridge_context, &brd_status); if (!status) { switch (brd_status) { case BRD_STOPPED: proc_state_obj->proc_state = PROC_STOPPED; break; case BRD_SLEEP_TRANSITION: case BRD_DSP_HIBERNATION: /* Fall through */ case BRD_RUNNING: proc_state_obj->proc_state = PROC_RUNNING; break; case BRD_LOADED: proc_state_obj->proc_state = PROC_LOADED; break; case BRD_ERROR: proc_state_obj->proc_state = PROC_ERROR; break; default: proc_state_obj->proc_state = 0xFF; status = -EPERM; break; } } } else { status = -EFAULT; } dev_dbg(bridge, "%s, results: status: 0x%x proc_state_obj: 0x%x\n", __func__, status, proc_state_obj->proc_state); return status; } /* * ======== proc_get_trace ======== * Purpose: * Retrieve the current contents of the trace buffer, located on the * Processor. Predefined symbols for the trace buffer must have been * configured into the DSP executable. * Details: * We support using the symbols SYS_PUTCBEG and SYS_PUTCEND to define a * trace buffer, only. Treat it as an undocumented feature. * This call is destructive, meaning the processor is placed in the monitor * state as a result of this function. */ int proc_get_trace(void *hprocessor, u8 * pbuf, u32 max_size) { int status; status = -ENOSYS; return status; } /* * ======== proc_load ======== * Purpose: * Reset a processor and load a new base program image. * This will be an OEM-only function, and not part of the DSP/BIOS Bridge * application developer's API. */ int proc_load(void *hprocessor, const s32 argc_index, const char **user_args, const char **user_envp) { int status = 0; struct proc_object *p_proc_object = (struct proc_object *)hprocessor; struct io_mgr *hio_mgr; /* IO manager handle */ struct msg_mgr *hmsg_mgr; struct cod_manager *cod_mgr; /* Code manager handle */ char *pargv0; /* temp argv[0] ptr */ char **new_envp; /* Updated envp[] array. */ char sz_proc_id[MAXPROCIDLEN]; /* Size of "PROC_ID=<n>" */ s32 envp_elems; /* Num elements in envp[]. */ s32 cnew_envp; /* " " in new_envp[] */ s32 nproc_id = 0; /* Anticipate MP version. */ struct dcd_manager *hdcd_handle; struct dmm_object *dmm_mgr; u32 dw_ext_end; u32 proc_id; int brd_state; struct drv_data *drv_datap = dev_get_drvdata(bridge); #ifdef OPT_LOAD_TIME_INSTRUMENTATION struct timeval tv1; struct timeval tv2; #endif #if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ) struct dspbridge_platform_data *pdata = omap_dspbridge_dev->dev.platform_data; #endif #ifdef OPT_LOAD_TIME_INSTRUMENTATION do_gettimeofday(&tv1); #endif if (!p_proc_object) { status = -EFAULT; goto func_end; } dev_get_cod_mgr(p_proc_object->dev_obj, &cod_mgr); if (!cod_mgr) { status = -EPERM; goto func_end; } status = proc_stop(hprocessor); if (status) goto func_end; /* Place the board in the monitor state. */ status = proc_monitor(hprocessor); if (status) goto func_end; /* Save ptr to original argv[0]. */ pargv0 = (char *)user_args[0]; /*Prepend "PROC_ID=<nproc_id>"to envp array for target. */ envp_elems = get_envp_count((char **)user_envp); cnew_envp = (envp_elems ? (envp_elems + 1) : (envp_elems + 2)); new_envp = kzalloc(cnew_envp * sizeof(char **), GFP_KERNEL); if (new_envp) { status = snprintf(sz_proc_id, MAXPROCIDLEN, PROC_ENVPROCID, nproc_id); if (status == -1) { dev_dbg(bridge, "%s: Proc ID string overflow\n", __func__); status = -EPERM; } else { new_envp = prepend_envp(new_envp, (char **)user_envp, envp_elems, cnew_envp, sz_proc_id); /* Get the DCD Handle */ status = mgr_get_dcd_handle(p_proc_object->mgr_obj, (u32 *) &hdcd_handle); if (!status) { /* Before proceeding with new load, * check if a previously registered COFF * exists. * If yes, unregister nodes in previously * registered COFF. If any error occurred, * set previously registered COFF to NULL. */ if (p_proc_object->last_coff != NULL) { status = dcd_auto_unregister(hdcd_handle, p_proc_object-> last_coff); /* Regardless of auto unregister status, * free previously allocated * memory. */ kfree(p_proc_object->last_coff); p_proc_object->last_coff = NULL; } } /* On success, do cod_open_base() */ status = cod_open_base(cod_mgr, (char *)user_args[0], COD_SYMB); } } else { status = -ENOMEM; } if (!status) { /* Auto-register data base */ /* Get the DCD Handle */ status = mgr_get_dcd_handle(p_proc_object->mgr_obj, (u32 *) &hdcd_handle); if (!status) { /* Auto register nodes in specified COFF * file. If registration did not fail, * (status = 0 or -EACCES) * save the name of the COFF file for * de-registration in the future. */ status = dcd_auto_register(hdcd_handle, (char *)user_args[0]); if (status == -EACCES) status = 0; if (status) { status = -EPERM; } else { /* Allocate memory for pszLastCoff */ p_proc_object->last_coff = kzalloc((strlen(user_args[0]) + 1), GFP_KERNEL); /* If memory allocated, save COFF file name */ if (p_proc_object->last_coff) { strncpy(p_proc_object->last_coff, (char *)user_args[0], (strlen((char *)user_args[0]) + 1)); } } } } /* Update shared memory address and size */ if (!status) { /* Create the message manager. This must be done * before calling the IOOnLoaded function. */ dev_get_msg_mgr(p_proc_object->dev_obj, &hmsg_mgr); if (!hmsg_mgr) { status = msg_create(&hmsg_mgr, p_proc_object->dev_obj, (msg_onexit) node_on_exit); dev_set_msg_mgr(p_proc_object->dev_obj, hmsg_mgr); } } if (!status) { /* Set the Device object's message manager */ status = dev_get_io_mgr(p_proc_object->dev_obj, &hio_mgr); if (hio_mgr) status = (*p_proc_object->intf_fxns->io_on_loaded) (hio_mgr); else status = -EFAULT; } if (!status) { /* Now, attempt to load an exec: */ /* Boost the OPP level to Maximum level supported by baseport */ #if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ) if (pdata->cpu_set_freq) (*pdata->cpu_set_freq) (pdata->mpu_speed[VDD1_OPP5]); #endif status = cod_load_base(cod_mgr, argc_index, (char **)user_args, dev_brd_write_fxn, p_proc_object->dev_obj, NULL); if (status) { if (status == -EBADF) { dev_dbg(bridge, "%s: Failure to Load the EXE\n", __func__); } if (status == -ESPIPE) { pr_err("%s: Couldn't parse the file\n", __func__); } } /* Requesting the lowest opp supported */ #if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ) if (pdata->cpu_set_freq) (*pdata->cpu_set_freq) (pdata->mpu_speed[VDD1_OPP1]); #endif } if (!status) { /* Update the Processor status to loaded */ status = (*p_proc_object->intf_fxns->brd_set_state) (p_proc_object->bridge_context, BRD_LOADED); if (!status) { p_proc_object->proc_state = PROC_LOADED; if (p_proc_object->ntfy_obj) proc_notify_clients(p_proc_object, DSP_PROCESSORSTATECHANGE); } } if (!status) { status = proc_get_processor_id(hprocessor, &proc_id); if (proc_id == DSP_UNIT) { /* Use all available DSP address space after EXTMEM * for DMM */ if (!status) status = cod_get_sym_value(cod_mgr, EXTEND, &dw_ext_end); /* Reset DMM structs and add an initial free chunk */ if (!status) { status = dev_get_dmm_mgr(p_proc_object->dev_obj, &dmm_mgr); if (dmm_mgr) { /* Set dw_ext_end to DMM START u8 * address */ dw_ext_end = (dw_ext_end + 1) * DSPWORDSIZE; /* DMM memory is from EXT_END */ status = dmm_create_tables(dmm_mgr, dw_ext_end, DMMPOOLSIZE); } else { status = -EFAULT; } } } } /* Restore the original argv[0] */ kfree(new_envp); user_args[0] = pargv0; if (!status) { if (!((*p_proc_object->intf_fxns->brd_status) (p_proc_object->bridge_context, &brd_state))) { pr_info("%s: Processor Loaded %s\n", __func__, pargv0); kfree(drv_datap->base_img); drv_datap->base_img = kmalloc(strlen(pargv0) + 1, GFP_KERNEL); if (drv_datap->base_img) strncpy(drv_datap->base_img, pargv0, strlen(pargv0) + 1); else status = -ENOMEM; } } func_end: if (status) { pr_err("%s: Processor failed to load\n", __func__); proc_stop(p_proc_object); } #ifdef OPT_LOAD_TIME_INSTRUMENTATION do_gettimeofday(&tv2); if (tv2.tv_usec < tv1.tv_usec) { tv2.tv_usec += 1000000; tv2.tv_sec--; } dev_dbg(bridge, "%s: time to load %d sec and %d usec\n", __func__, tv2.tv_sec - tv1.tv_sec, tv2.tv_usec - tv1.tv_usec); #endif return status; } /* * ======== proc_map ======== * Purpose: * Maps a MPU buffer to DSP address space. */ int proc_map(void *hprocessor, void *pmpu_addr, u32 ul_size, void *req_addr, void **pp_map_addr, u32 ul_map_attr, struct process_context *pr_ctxt) { u32 va_align; u32 pa_align; struct dmm_object *dmm_mgr; u32 size_align; int status = 0; struct proc_object *p_proc_object = (struct proc_object *)hprocessor; struct dmm_map_object *map_obj; u32 tmp_addr = 0; #ifdef CONFIG_TIDSPBRIDGE_CACHE_LINE_CHECK if ((ul_map_attr & BUFMODE_MASK) != RBUF) { if (!IS_ALIGNED((u32)pmpu_addr, DSP_CACHE_LINE) || !IS_ALIGNED(ul_size, DSP_CACHE_LINE)) { pr_err("%s: not aligned: 0x%x (%d)\n", __func__, (u32)pmpu_addr, ul_size); return -EFAULT; } } #endif /* Calculate the page-aligned PA, VA and size */ va_align = PG_ALIGN_LOW((u32) req_addr, PG_SIZE4K); pa_align = PG_ALIGN_LOW((u32) pmpu_addr, PG_SIZE4K); size_align = PG_ALIGN_HIGH(ul_size + (u32) pmpu_addr - pa_align, PG_SIZE4K); if (!p_proc_object) { status = -EFAULT; goto func_end; } /* Critical section */ mutex_lock(&proc_lock); dmm_get_handle(p_proc_object, &dmm_mgr); if (dmm_mgr) status = dmm_map_memory(dmm_mgr, va_align, size_align); else status = -EFAULT; /* Add mapping to the page tables. */ if (!status) { /* Mapped address = MSB of VA | LSB of PA */ tmp_addr = (va_align | ((u32) pmpu_addr & (PG_SIZE4K - 1))); /* mapped memory resource tracking */ map_obj = add_mapping_info(pr_ctxt, pa_align, tmp_addr, size_align); if (!map_obj) status = -ENOMEM; else status = (*p_proc_object->intf_fxns->brd_mem_map) (p_proc_object->bridge_context, pa_align, va_align, size_align, ul_map_attr, map_obj->pages); } if (!status) { /* Mapped address = MSB of VA | LSB of PA */ *pp_map_addr = (void *) tmp_addr; } else { remove_mapping_information(pr_ctxt, tmp_addr, size_align); dmm_un_map_memory(dmm_mgr, va_align, &size_align); } mutex_unlock(&proc_lock); if (status) goto func_end; func_end: dev_dbg(bridge, "%s: hprocessor %p, pmpu_addr %p, ul_size %x, " "req_addr %p, ul_map_attr %x, pp_map_addr %p, va_align %x, " "pa_align %x, size_align %x status 0x%x\n", __func__, hprocessor, pmpu_addr, ul_size, req_addr, ul_map_attr, pp_map_addr, va_align, pa_align, size_align, status); return status; } /* * ======== proc_register_notify ======== * Purpose: * Register to be notified of specific processor events. */ int proc_register_notify(void *hprocessor, u32 event_mask, u32 notify_type, struct dsp_notification * hnotification) { int status = 0; struct proc_object *p_proc_object = (struct proc_object *)hprocessor; struct deh_mgr *hdeh_mgr; /* Check processor handle */ if (!p_proc_object) { status = -EFAULT; goto func_end; } /* Check if event mask is a valid processor related event */ if (event_mask & ~(DSP_PROCESSORSTATECHANGE | DSP_PROCESSORATTACH | DSP_PROCESSORDETACH | DSP_PROCESSORRESTART | DSP_MMUFAULT | DSP_SYSERROR | DSP_PWRERROR | DSP_WDTOVERFLOW)) status = -EINVAL; /* Check if notify type is valid */ if (notify_type != DSP_SIGNALEVENT) status = -EINVAL; if (!status) { /* If event mask is not DSP_SYSERROR, DSP_MMUFAULT, * or DSP_PWRERROR then register event immediately. */ if (event_mask & ~(DSP_SYSERROR | DSP_MMUFAULT | DSP_PWRERROR | DSP_WDTOVERFLOW)) { status = ntfy_register(p_proc_object->ntfy_obj, hnotification, event_mask, notify_type); /* Special case alert, special case alert! * If we're trying to *deregister* (i.e. event_mask * is 0), a DSP_SYSERROR or DSP_MMUFAULT notification, * we have to deregister with the DEH manager. * There's no way to know, based on event_mask which * manager the notification event was registered with, * so if we're trying to deregister and ntfy_register * failed, we'll give the deh manager a shot. */ if ((event_mask == 0) && status) { status = dev_get_deh_mgr(p_proc_object->dev_obj, &hdeh_mgr); status = bridge_deh_register_notify(hdeh_mgr, event_mask, notify_type, hnotification); } } else { status = dev_get_deh_mgr(p_proc_object->dev_obj, &hdeh_mgr); status = bridge_deh_register_notify(hdeh_mgr, event_mask, notify_type, hnotification); } } func_end: return status; } /* * ======== proc_reserve_memory ======== * Purpose: * Reserve a virtually contiguous region of DSP address space. */ int proc_reserve_memory(void *hprocessor, u32 ul_size, void **pp_rsv_addr, struct process_context *pr_ctxt) { struct dmm_object *dmm_mgr; int status = 0; struct proc_object *p_proc_object = (struct proc_object *)hprocessor; struct dmm_rsv_object *rsv_obj; if (!p_proc_object) { status = -EFAULT; goto func_end; } status = dmm_get_handle(p_proc_object, &dmm_mgr); if (!dmm_mgr) { status = -EFAULT; goto func_end; } status = dmm_reserve_memory(dmm_mgr, ul_size, (u32 *) pp_rsv_addr); if (status != 0) goto func_end; /* * A successful reserve should be followed by insertion of rsv_obj * into dmm_rsv_list, so that reserved memory resource tracking * remains uptodate */ rsv_obj = kmalloc(sizeof(struct dmm_rsv_object), GFP_KERNEL); if (rsv_obj) { rsv_obj->dsp_reserved_addr = (u32) *pp_rsv_addr; spin_lock(&pr_ctxt->dmm_rsv_lock); list_add(&rsv_obj->link, &pr_ctxt->dmm_rsv_list); spin_unlock(&pr_ctxt->dmm_rsv_lock); } func_end: dev_dbg(bridge, "%s: hprocessor: 0x%p ul_size: 0x%x pp_rsv_addr: 0x%p " "status 0x%x\n", __func__, hprocessor, ul_size, pp_rsv_addr, status); return status; } /* * ======== proc_start ======== * Purpose: * Start a processor running. */ int proc_start(void *hprocessor) { int status = 0; struct proc_object *p_proc_object = (struct proc_object *)hprocessor; struct cod_manager *cod_mgr; /* Code manager handle */ u32 dw_dsp_addr; /* Loaded code's entry point. */ int brd_state; if (!p_proc_object) { status = -EFAULT; goto func_end; } /* Call the bridge_brd_start */ if (p_proc_object->proc_state != PROC_LOADED) { status = -EBADR; goto func_end; } status = dev_get_cod_mgr(p_proc_object->dev_obj, &cod_mgr); if (!cod_mgr) { status = -EFAULT; goto func_cont; } status = cod_get_entry(cod_mgr, &dw_dsp_addr); if (status) goto func_cont; status = (*p_proc_object->intf_fxns->brd_start) (p_proc_object->bridge_context, dw_dsp_addr); if (status) goto func_cont; /* Call dev_create2 */ status = dev_create2(p_proc_object->dev_obj); if (!status) { p_proc_object->proc_state = PROC_RUNNING; /* Deep sleep switces off the peripheral clocks. * we just put the DSP CPU in idle in the idle loop. * so there is no need to send a command to DSP */ if (p_proc_object->ntfy_obj) { proc_notify_clients(p_proc_object, DSP_PROCESSORSTATECHANGE); } } else { /* Failed to Create Node Manager and DISP Object * Stop the Processor from running. Put it in STOPPED State */ (void)(*p_proc_object->intf_fxns-> brd_stop) (p_proc_object->bridge_context); p_proc_object->proc_state = PROC_STOPPED; } func_cont: if (!status) { if (!((*p_proc_object->intf_fxns->brd_status) (p_proc_object->bridge_context, &brd_state))) { pr_info("%s: dsp in running state\n", __func__); } } else { pr_err("%s: Failed to start the dsp\n", __func__); proc_stop(p_proc_object); } func_end: return status; } /* * ======== proc_stop ======== * Purpose: * Stop a processor running. */ int proc_stop(void *hprocessor) { int status = 0; struct proc_object *p_proc_object = (struct proc_object *)hprocessor; struct msg_mgr *hmsg_mgr; struct node_mgr *hnode_mgr; void *hnode; u32 node_tab_size = 1; u32 num_nodes = 0; u32 nodes_allocated = 0; if (!p_proc_object) { status = -EFAULT; goto func_end; } /* check if there are any running nodes */ status = dev_get_node_manager(p_proc_object->dev_obj, &hnode_mgr); if (!status && hnode_mgr) { status = node_enum_nodes(hnode_mgr, &hnode, node_tab_size, &num_nodes, &nodes_allocated); if ((status == -EINVAL) || (nodes_allocated > 0)) { pr_err("%s: Can't stop device, active nodes = %d \n", __func__, nodes_allocated); return -EBADR; } } /* Call the bridge_brd_stop */ /* It is OK to stop a device that does n't have nodes OR not started */ status = (*p_proc_object->intf_fxns-> brd_stop) (p_proc_object->bridge_context); if (!status) { dev_dbg(bridge, "%s: processor in standby mode\n", __func__); p_proc_object->proc_state = PROC_STOPPED; /* Destroy the Node Manager, msg_ctrl Manager */ if (!(dev_destroy2(p_proc_object->dev_obj))) { /* Destroy the msg_ctrl by calling msg_delete */ dev_get_msg_mgr(p_proc_object->dev_obj, &hmsg_mgr); if (hmsg_mgr) { msg_delete(hmsg_mgr); dev_set_msg_mgr(p_proc_object->dev_obj, NULL); } } } else { pr_err("%s: Failed to stop the processor\n", __func__); } func_end: return status; } /* * ======== proc_un_map ======== * Purpose: * Removes a MPU buffer mapping from the DSP address space. */ int proc_un_map(void *hprocessor, void *map_addr, struct process_context *pr_ctxt) { int status = 0; struct proc_object *p_proc_object = (struct proc_object *)hprocessor; struct dmm_object *dmm_mgr; u32 va_align; u32 size_align; va_align = PG_ALIGN_LOW((u32) map_addr, PG_SIZE4K); if (!p_proc_object) { status = -EFAULT; goto func_end; } status = dmm_get_handle(hprocessor, &dmm_mgr); if (!dmm_mgr) { status = -EFAULT; goto func_end; } /* Critical section */ mutex_lock(&proc_lock); /* * Update DMM structures. Get the size to unmap. * This function returns error if the VA is not mapped */ status = dmm_un_map_memory(dmm_mgr, (u32) va_align, &size_align); /* Remove mapping from the page tables. */ if (!status) { status = (*p_proc_object->intf_fxns->brd_mem_un_map) (p_proc_object->bridge_context, va_align, size_align); } if (status) goto unmap_failed; /* * A successful unmap should be followed by removal of map_obj * from dmm_map_list, so that mapped memory resource tracking * remains uptodate */ remove_mapping_information(pr_ctxt, (u32) map_addr, size_align); unmap_failed: mutex_unlock(&proc_lock); func_end: dev_dbg(bridge, "%s: hprocessor: 0x%p map_addr: 0x%p status: 0x%x\n", __func__, hprocessor, map_addr, status); return status; } /* * ======== proc_un_reserve_memory ======== * Purpose: * Frees a previously reserved region of DSP address space. */ int proc_un_reserve_memory(void *hprocessor, void *prsv_addr, struct process_context *pr_ctxt) { struct dmm_object *dmm_mgr; int status = 0; struct proc_object *p_proc_object = (struct proc_object *)hprocessor; struct dmm_rsv_object *rsv_obj; if (!p_proc_object) { status = -EFAULT; goto func_end; } status = dmm_get_handle(p_proc_object, &dmm_mgr); if (!dmm_mgr) { status = -EFAULT; goto func_end; } status = dmm_un_reserve_memory(dmm_mgr, (u32) prsv_addr); if (status != 0) goto func_end; /* * A successful unreserve should be followed by removal of rsv_obj * from dmm_rsv_list, so that reserved memory resource tracking * remains uptodate */ spin_lock(&pr_ctxt->dmm_rsv_lock); list_for_each_entry(rsv_obj, &pr_ctxt->dmm_rsv_list, link) { if (rsv_obj->dsp_reserved_addr == (u32) prsv_addr) { list_del(&rsv_obj->link); kfree(rsv_obj); break; } } spin_unlock(&pr_ctxt->dmm_rsv_lock); func_end: dev_dbg(bridge, "%s: hprocessor: 0x%p prsv_addr: 0x%p status: 0x%x\n", __func__, hprocessor, prsv_addr, status); return status; } /* * ======== = proc_monitor ======== == * Purpose: * Place the Processor in Monitor State. This is an internal * function and a requirement before Processor is loaded. * This does a bridge_brd_stop, dev_destroy2 and bridge_brd_monitor. * In dev_destroy2 we delete the node manager. * Parameters: * p_proc_object: Pointer to Processor Object * Returns: * 0: Processor placed in monitor mode. * !0: Failed to place processor in monitor mode. * Requires: * Valid Processor Handle * Ensures: * Success: ProcObject state is PROC_IDLE */ static int proc_monitor(struct proc_object *proc_obj) { int status = -EPERM; struct msg_mgr *hmsg_mgr; /* This is needed only when Device is loaded when it is * already 'ACTIVE' */ /* Destroy the Node Manager, msg_ctrl Manager */ if (!dev_destroy2(proc_obj->dev_obj)) { /* Destroy the msg_ctrl by calling msg_delete */ dev_get_msg_mgr(proc_obj->dev_obj, &hmsg_mgr); if (hmsg_mgr) { msg_delete(hmsg_mgr); dev_set_msg_mgr(proc_obj->dev_obj, NULL); } } /* Place the Board in the Monitor State */ if (!((*proc_obj->intf_fxns->brd_monitor) (proc_obj->bridge_context))) { status = 0; } return status; } /* * ======== get_envp_count ======== * Purpose: * Return the number of elements in the envp array, including the * terminating NULL element. */ static s32 get_envp_count(char **envp) { s32 ret = 0; if (envp) { while (*envp++) ret++; ret += 1; /* Include the terminating NULL in the count. */ } return ret; } /* * ======== prepend_envp ======== * Purpose: * Prepend an environment variable=value pair to the new envp array, and * copy in the existing var=value pairs in the old envp array. */ static char **prepend_envp(char **new_envp, char **envp, s32 envp_elems, s32 cnew_envp, char *sz_var) { char **pp_envp = new_envp; /* Prepend new environ var=value string */ *new_envp++ = sz_var; /* Copy user's environment into our own. */ while (envp_elems--) *new_envp++ = *envp++; /* Ensure NULL terminates the new environment strings array. */ if (envp_elems == 0) *new_envp = NULL; return pp_envp; } /* * ======== proc_notify_clients ======== * Purpose: * Notify the processor the events. */ int proc_notify_clients(void *proc, u32 events) { int status = 0; struct proc_object *p_proc_object = (struct proc_object *)proc; if (!p_proc_object) { status = -EFAULT; goto func_end; } ntfy_notify(p_proc_object->ntfy_obj, events); func_end: return status; } /* * ======== proc_notify_all_clients ======== * Purpose: * Notify the processor the events. This includes notifying all clients * attached to a particulat DSP. */ int proc_notify_all_clients(void *proc, u32 events) { int status = 0; struct proc_object *p_proc_object = (struct proc_object *)proc; if (!p_proc_object) { status = -EFAULT; goto func_end; } dev_notify_clients(p_proc_object->dev_obj, events); func_end: return status; } /* * ======== proc_get_processor_id ======== * Purpose: * Retrieves the processor ID. */ int proc_get_processor_id(void *proc, u32 * proc_id) { int status = 0; struct proc_object *p_proc_object = (struct proc_object *)proc; if (p_proc_object) *proc_id = p_proc_object->processor_id; else status = -EFAULT; return status; }
gpl-2.0
shazzl/TW_i9205_JB
sound/drivers/mts64.c
4942
27420
/* * ALSA Driver for Ego Systems Inc. (ESI) Miditerminal 4140 * Copyright (c) 2006 by Matthias König <mk@phasorlab.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * */ #include <linux/init.h> #include <linux/platform_device.h> #include <linux/parport.h> #include <linux/spinlock.h> #include <linux/module.h> #include <linux/delay.h> #include <linux/slab.h> #include <sound/core.h> #include <sound/initval.h> #include <sound/rawmidi.h> #include <sound/control.h> #define CARD_NAME "Miditerminal 4140" #define DRIVER_NAME "MTS64" #define PLATFORM_DRIVER "snd_mts64" static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; static struct platform_device *platform_devices[SNDRV_CARDS]; static int device_count; module_param_array(index, int, NULL, S_IRUGO); MODULE_PARM_DESC(index, "Index value for " CARD_NAME " soundcard."); module_param_array(id, charp, NULL, S_IRUGO); MODULE_PARM_DESC(id, "ID string for " CARD_NAME " soundcard."); module_param_array(enable, bool, NULL, S_IRUGO); MODULE_PARM_DESC(enable, "Enable " CARD_NAME " soundcard."); MODULE_AUTHOR("Matthias Koenig <mk@phasorlab.de>"); MODULE_DESCRIPTION("ESI Miditerminal 4140"); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("{{ESI,Miditerminal 4140}}"); /********************************************************************* * Chip specific *********************************************************************/ #define MTS64_NUM_INPUT_PORTS 5 #define MTS64_NUM_OUTPUT_PORTS 4 #define MTS64_SMPTE_SUBSTREAM 4 struct mts64 { spinlock_t lock; struct snd_card *card; struct snd_rawmidi *rmidi; struct pardevice *pardev; int pardev_claimed; int open_count; int current_midi_output_port; int current_midi_input_port; u8 mode[MTS64_NUM_INPUT_PORTS]; struct snd_rawmidi_substream *midi_input_substream[MTS64_NUM_INPUT_PORTS]; int smpte_switch; u8 time[4]; /* [0]=hh, [1]=mm, [2]=ss, [3]=ff */ u8 fps; }; static int snd_mts64_free(struct mts64 *mts) { kfree(mts); return 0; } static int __devinit snd_mts64_create(struct snd_card *card, struct pardevice *pardev, struct mts64 **rchip) { struct mts64 *mts; *rchip = NULL; mts = kzalloc(sizeof(struct mts64), GFP_KERNEL); if (mts == NULL) return -ENOMEM; /* Init chip specific data */ spin_lock_init(&mts->lock); mts->card = card; mts->pardev = pardev; mts->current_midi_output_port = -1; mts->current_midi_input_port = -1; *rchip = mts; return 0; } /********************************************************************* * HW register related constants *********************************************************************/ /* Status Bits */ #define MTS64_STAT_BSY 0x80 #define MTS64_STAT_BIT_SET 0x20 /* readout process, bit is set */ #define MTS64_STAT_PORT 0x10 /* read byte is a port number */ /* Control Bits */ #define MTS64_CTL_READOUT 0x08 /* enable readout */ #define MTS64_CTL_WRITE_CMD 0x06 #define MTS64_CTL_WRITE_DATA 0x02 #define MTS64_CTL_STROBE 0x01 /* Command */ #define MTS64_CMD_RESET 0xfe #define MTS64_CMD_PROBE 0x8f /* Used in probing procedure */ #define MTS64_CMD_SMPTE_SET_TIME 0xe8 #define MTS64_CMD_SMPTE_SET_FPS 0xee #define MTS64_CMD_SMPTE_STOP 0xef #define MTS64_CMD_SMPTE_FPS_24 0xe3 #define MTS64_CMD_SMPTE_FPS_25 0xe2 #define MTS64_CMD_SMPTE_FPS_2997 0xe4 #define MTS64_CMD_SMPTE_FPS_30D 0xe1 #define MTS64_CMD_SMPTE_FPS_30 0xe0 #define MTS64_CMD_COM_OPEN 0xf8 /* setting the communication mode */ #define MTS64_CMD_COM_CLOSE1 0xff /* clearing communication mode */ #define MTS64_CMD_COM_CLOSE2 0xf5 /********************************************************************* * Hardware specific functions *********************************************************************/ static void mts64_enable_readout(struct parport *p); static void mts64_disable_readout(struct parport *p); static int mts64_device_ready(struct parport *p); static int mts64_device_init(struct parport *p); static int mts64_device_open(struct mts64 *mts); static int mts64_device_close(struct mts64 *mts); static u8 mts64_map_midi_input(u8 c); static int mts64_probe(struct parport *p); static u16 mts64_read(struct parport *p); static u8 mts64_read_char(struct parport *p); static void mts64_smpte_start(struct parport *p, u8 hours, u8 minutes, u8 seconds, u8 frames, u8 idx); static void mts64_smpte_stop(struct parport *p); static void mts64_write_command(struct parport *p, u8 c); static void mts64_write_data(struct parport *p, u8 c); static void mts64_write_midi(struct mts64 *mts, u8 c, int midiport); /* Enables the readout procedure * * Before we can read a midi byte from the device, we have to set * bit 3 of control port. */ static void mts64_enable_readout(struct parport *p) { u8 c; c = parport_read_control(p); c |= MTS64_CTL_READOUT; parport_write_control(p, c); } /* Disables readout * * Readout is disabled by clearing bit 3 of control */ static void mts64_disable_readout(struct parport *p) { u8 c; c = parport_read_control(p); c &= ~MTS64_CTL_READOUT; parport_write_control(p, c); } /* waits for device ready * * Checks if BUSY (Bit 7 of status) is clear * 1 device ready * 0 failure */ static int mts64_device_ready(struct parport *p) { int i; u8 c; for (i = 0; i < 0xffff; ++i) { c = parport_read_status(p); c &= MTS64_STAT_BSY; if (c != 0) return 1; } return 0; } /* Init device (LED blinking startup magic) * * Returns: * 0 init ok * -EIO failure */ static int __devinit mts64_device_init(struct parport *p) { int i; mts64_write_command(p, MTS64_CMD_RESET); for (i = 0; i < 64; ++i) { msleep(100); if (mts64_probe(p) == 0) { /* success */ mts64_disable_readout(p); return 0; } } mts64_disable_readout(p); return -EIO; } /* * Opens the device (set communication mode) */ static int mts64_device_open(struct mts64 *mts) { int i; struct parport *p = mts->pardev->port; for (i = 0; i < 5; ++i) mts64_write_command(p, MTS64_CMD_COM_OPEN); return 0; } /* * Close device (clear communication mode) */ static int mts64_device_close(struct mts64 *mts) { int i; struct parport *p = mts->pardev->port; for (i = 0; i < 5; ++i) { mts64_write_command(p, MTS64_CMD_COM_CLOSE1); mts64_write_command(p, MTS64_CMD_COM_CLOSE2); } return 0; } /* map hardware port to substream number * * When reading a byte from the device, the device tells us * on what port the byte is. This HW port has to be mapped to * the midiport (substream number). * substream 0-3 are Midiports 1-4 * substream 4 is SMPTE Timecode * The mapping is done by the table: * HW | 0 | 1 | 2 | 3 | 4 * SW | 0 | 1 | 4 | 2 | 3 */ static u8 mts64_map_midi_input(u8 c) { static u8 map[] = { 0, 1, 4, 2, 3 }; return map[c]; } /* Probe parport for device * * Do we have a Miditerminal 4140 on parport? * Returns: * 0 device found * -ENODEV no device */ static int __devinit mts64_probe(struct parport *p) { u8 c; mts64_smpte_stop(p); mts64_write_command(p, MTS64_CMD_PROBE); msleep(50); c = mts64_read(p); c &= 0x00ff; if (c != MTS64_CMD_PROBE) return -ENODEV; else return 0; } /* Read byte incl. status from device * * Returns: * data in lower 8 bits and status in upper 8 bits */ static u16 mts64_read(struct parport *p) { u8 data, status; mts64_device_ready(p); mts64_enable_readout(p); status = parport_read_status(p); data = mts64_read_char(p); mts64_disable_readout(p); return (status << 8) | data; } /* Read a byte from device * * Note, that readout mode has to be enabled. * readout procedure is as follows: * - Write number of the Bit to read to DATA * - Read STATUS * - Bit 5 of STATUS indicates if Bit is set * * Returns: * Byte read from device */ static u8 mts64_read_char(struct parport *p) { u8 c = 0; u8 status; u8 i; for (i = 0; i < 8; ++i) { parport_write_data(p, i); c >>= 1; status = parport_read_status(p); if (status & MTS64_STAT_BIT_SET) c |= 0x80; } return c; } /* Starts SMPTE Timecode generation * * The device creates SMPTE Timecode by hardware. * 0 24 fps * 1 25 fps * 2 29.97 fps * 3 30 fps (Drop-frame) * 4 30 fps */ static void mts64_smpte_start(struct parport *p, u8 hours, u8 minutes, u8 seconds, u8 frames, u8 idx) { static u8 fps[5] = { MTS64_CMD_SMPTE_FPS_24, MTS64_CMD_SMPTE_FPS_25, MTS64_CMD_SMPTE_FPS_2997, MTS64_CMD_SMPTE_FPS_30D, MTS64_CMD_SMPTE_FPS_30 }; mts64_write_command(p, MTS64_CMD_SMPTE_SET_TIME); mts64_write_command(p, frames); mts64_write_command(p, seconds); mts64_write_command(p, minutes); mts64_write_command(p, hours); mts64_write_command(p, MTS64_CMD_SMPTE_SET_FPS); mts64_write_command(p, fps[idx]); } /* Stops SMPTE Timecode generation */ static void mts64_smpte_stop(struct parport *p) { mts64_write_command(p, MTS64_CMD_SMPTE_STOP); } /* Write a command byte to device */ static void mts64_write_command(struct parport *p, u8 c) { mts64_device_ready(p); parport_write_data(p, c); parport_write_control(p, MTS64_CTL_WRITE_CMD); parport_write_control(p, MTS64_CTL_WRITE_CMD | MTS64_CTL_STROBE); parport_write_control(p, MTS64_CTL_WRITE_CMD); } /* Write a data byte to device */ static void mts64_write_data(struct parport *p, u8 c) { mts64_device_ready(p); parport_write_data(p, c); parport_write_control(p, MTS64_CTL_WRITE_DATA); parport_write_control(p, MTS64_CTL_WRITE_DATA | MTS64_CTL_STROBE); parport_write_control(p, MTS64_CTL_WRITE_DATA); } /* Write a MIDI byte to midiport * * midiport ranges from 0-3 and maps to Ports 1-4 * assumptions: communication mode is on */ static void mts64_write_midi(struct mts64 *mts, u8 c, int midiport) { struct parport *p = mts->pardev->port; /* check current midiport */ if (mts->current_midi_output_port != midiport) mts64_write_command(p, midiport); /* write midi byte */ mts64_write_data(p, c); } /********************************************************************* * Control elements *********************************************************************/ /* SMPTE Switch */ #define snd_mts64_ctl_smpte_switch_info snd_ctl_boolean_mono_info static int snd_mts64_ctl_smpte_switch_get(struct snd_kcontrol* kctl, struct snd_ctl_elem_value *uctl) { struct mts64 *mts = snd_kcontrol_chip(kctl); spin_lock_irq(&mts->lock); uctl->value.integer.value[0] = mts->smpte_switch; spin_unlock_irq(&mts->lock); return 0; } /* smpte_switch is not accessed from IRQ handler, so we just need to protect the HW access */ static int snd_mts64_ctl_smpte_switch_put(struct snd_kcontrol* kctl, struct snd_ctl_elem_value *uctl) { struct mts64 *mts = snd_kcontrol_chip(kctl); int changed = 0; int val = !!uctl->value.integer.value[0]; spin_lock_irq(&mts->lock); if (mts->smpte_switch == val) goto __out; changed = 1; mts->smpte_switch = val; if (mts->smpte_switch) { mts64_smpte_start(mts->pardev->port, mts->time[0], mts->time[1], mts->time[2], mts->time[3], mts->fps); } else { mts64_smpte_stop(mts->pardev->port); } __out: spin_unlock_irq(&mts->lock); return changed; } static struct snd_kcontrol_new mts64_ctl_smpte_switch __devinitdata = { .iface = SNDRV_CTL_ELEM_IFACE_RAWMIDI, .name = "SMPTE Playback Switch", .index = 0, .access = SNDRV_CTL_ELEM_ACCESS_READWRITE, .private_value = 0, .info = snd_mts64_ctl_smpte_switch_info, .get = snd_mts64_ctl_smpte_switch_get, .put = snd_mts64_ctl_smpte_switch_put }; /* Time */ static int snd_mts64_ctl_smpte_time_h_info(struct snd_kcontrol *kctl, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 1; uinfo->value.integer.min = 0; uinfo->value.integer.max = 23; return 0; } static int snd_mts64_ctl_smpte_time_f_info(struct snd_kcontrol *kctl, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 1; uinfo->value.integer.min = 0; uinfo->value.integer.max = 99; return 0; } static int snd_mts64_ctl_smpte_time_info(struct snd_kcontrol *kctl, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 1; uinfo->value.integer.min = 0; uinfo->value.integer.max = 59; return 0; } static int snd_mts64_ctl_smpte_time_get(struct snd_kcontrol *kctl, struct snd_ctl_elem_value *uctl) { struct mts64 *mts = snd_kcontrol_chip(kctl); int idx = kctl->private_value; spin_lock_irq(&mts->lock); uctl->value.integer.value[0] = mts->time[idx]; spin_unlock_irq(&mts->lock); return 0; } static int snd_mts64_ctl_smpte_time_put(struct snd_kcontrol *kctl, struct snd_ctl_elem_value *uctl) { struct mts64 *mts = snd_kcontrol_chip(kctl); int idx = kctl->private_value; unsigned int time = uctl->value.integer.value[0] % 60; int changed = 0; spin_lock_irq(&mts->lock); if (mts->time[idx] != time) { changed = 1; mts->time[idx] = time; } spin_unlock_irq(&mts->lock); return changed; } static struct snd_kcontrol_new mts64_ctl_smpte_time_hours __devinitdata = { .iface = SNDRV_CTL_ELEM_IFACE_RAWMIDI, .name = "SMPTE Time Hours", .index = 0, .access = SNDRV_CTL_ELEM_ACCESS_READWRITE, .private_value = 0, .info = snd_mts64_ctl_smpte_time_h_info, .get = snd_mts64_ctl_smpte_time_get, .put = snd_mts64_ctl_smpte_time_put }; static struct snd_kcontrol_new mts64_ctl_smpte_time_minutes __devinitdata = { .iface = SNDRV_CTL_ELEM_IFACE_RAWMIDI, .name = "SMPTE Time Minutes", .index = 0, .access = SNDRV_CTL_ELEM_ACCESS_READWRITE, .private_value = 1, .info = snd_mts64_ctl_smpte_time_info, .get = snd_mts64_ctl_smpte_time_get, .put = snd_mts64_ctl_smpte_time_put }; static struct snd_kcontrol_new mts64_ctl_smpte_time_seconds __devinitdata = { .iface = SNDRV_CTL_ELEM_IFACE_RAWMIDI, .name = "SMPTE Time Seconds", .index = 0, .access = SNDRV_CTL_ELEM_ACCESS_READWRITE, .private_value = 2, .info = snd_mts64_ctl_smpte_time_info, .get = snd_mts64_ctl_smpte_time_get, .put = snd_mts64_ctl_smpte_time_put }; static struct snd_kcontrol_new mts64_ctl_smpte_time_frames __devinitdata = { .iface = SNDRV_CTL_ELEM_IFACE_RAWMIDI, .name = "SMPTE Time Frames", .index = 0, .access = SNDRV_CTL_ELEM_ACCESS_READWRITE, .private_value = 3, .info = snd_mts64_ctl_smpte_time_f_info, .get = snd_mts64_ctl_smpte_time_get, .put = snd_mts64_ctl_smpte_time_put }; /* FPS */ static int snd_mts64_ctl_smpte_fps_info(struct snd_kcontrol *kctl, struct snd_ctl_elem_info *uinfo) { static char *texts[5] = { "24", "25", "29.97", "30D", "30" }; uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; uinfo->count = 1; uinfo->value.enumerated.items = 5; if (uinfo->value.enumerated.item > 4) uinfo->value.enumerated.item = 4; strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]); return 0; } static int snd_mts64_ctl_smpte_fps_get(struct snd_kcontrol *kctl, struct snd_ctl_elem_value *uctl) { struct mts64 *mts = snd_kcontrol_chip(kctl); spin_lock_irq(&mts->lock); uctl->value.enumerated.item[0] = mts->fps; spin_unlock_irq(&mts->lock); return 0; } static int snd_mts64_ctl_smpte_fps_put(struct snd_kcontrol *kctl, struct snd_ctl_elem_value *uctl) { struct mts64 *mts = snd_kcontrol_chip(kctl); int changed = 0; if (uctl->value.enumerated.item[0] >= 5) return -EINVAL; spin_lock_irq(&mts->lock); if (mts->fps != uctl->value.enumerated.item[0]) { changed = 1; mts->fps = uctl->value.enumerated.item[0]; } spin_unlock_irq(&mts->lock); return changed; } static struct snd_kcontrol_new mts64_ctl_smpte_fps __devinitdata = { .iface = SNDRV_CTL_ELEM_IFACE_RAWMIDI, .name = "SMPTE Fps", .index = 0, .access = SNDRV_CTL_ELEM_ACCESS_READWRITE, .private_value = 0, .info = snd_mts64_ctl_smpte_fps_info, .get = snd_mts64_ctl_smpte_fps_get, .put = snd_mts64_ctl_smpte_fps_put }; static int __devinit snd_mts64_ctl_create(struct snd_card *card, struct mts64 *mts) { int err, i; static struct snd_kcontrol_new *control[] __devinitdata = { &mts64_ctl_smpte_switch, &mts64_ctl_smpte_time_hours, &mts64_ctl_smpte_time_minutes, &mts64_ctl_smpte_time_seconds, &mts64_ctl_smpte_time_frames, &mts64_ctl_smpte_fps, NULL }; for (i = 0; control[i]; ++i) { err = snd_ctl_add(card, snd_ctl_new1(control[i], mts)); if (err < 0) { snd_printd("Cannot create control: %s\n", control[i]->name); return err; } } return 0; } /********************************************************************* * Rawmidi *********************************************************************/ #define MTS64_MODE_INPUT_TRIGGERED 0x01 static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream) { struct mts64 *mts = substream->rmidi->private_data; if (mts->open_count == 0) { /* We don't need a spinlock here, because this is just called if the device has not been opened before. So there aren't any IRQs from the device */ mts64_device_open(mts); msleep(50); } ++(mts->open_count); return 0; } static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream) { struct mts64 *mts = substream->rmidi->private_data; unsigned long flags; --(mts->open_count); if (mts->open_count == 0) { /* We need the spinlock_irqsave here because we can still have IRQs at this point */ spin_lock_irqsave(&mts->lock, flags); mts64_device_close(mts); spin_unlock_irqrestore(&mts->lock, flags); msleep(500); } else if (mts->open_count < 0) mts->open_count = 0; return 0; } static void snd_mts64_rawmidi_output_trigger(struct snd_rawmidi_substream *substream, int up) { struct mts64 *mts = substream->rmidi->private_data; u8 data; unsigned long flags; spin_lock_irqsave(&mts->lock, flags); while (snd_rawmidi_transmit_peek(substream, &data, 1) == 1) { mts64_write_midi(mts, data, substream->number+1); snd_rawmidi_transmit_ack(substream, 1); } spin_unlock_irqrestore(&mts->lock, flags); } static void snd_mts64_rawmidi_input_trigger(struct snd_rawmidi_substream *substream, int up) { struct mts64 *mts = substream->rmidi->private_data; unsigned long flags; spin_lock_irqsave(&mts->lock, flags); if (up) mts->mode[substream->number] |= MTS64_MODE_INPUT_TRIGGERED; else mts->mode[substream->number] &= ~MTS64_MODE_INPUT_TRIGGERED; spin_unlock_irqrestore(&mts->lock, flags); } static struct snd_rawmidi_ops snd_mts64_rawmidi_output_ops = { .open = snd_mts64_rawmidi_open, .close = snd_mts64_rawmidi_close, .trigger = snd_mts64_rawmidi_output_trigger }; static struct snd_rawmidi_ops snd_mts64_rawmidi_input_ops = { .open = snd_mts64_rawmidi_open, .close = snd_mts64_rawmidi_close, .trigger = snd_mts64_rawmidi_input_trigger }; /* Create and initialize the rawmidi component */ static int __devinit snd_mts64_rawmidi_create(struct snd_card *card) { struct mts64 *mts = card->private_data; struct snd_rawmidi *rmidi; struct snd_rawmidi_substream *substream; struct list_head *list; int err; err = snd_rawmidi_new(card, CARD_NAME, 0, MTS64_NUM_OUTPUT_PORTS, MTS64_NUM_INPUT_PORTS, &rmidi); if (err < 0) return err; rmidi->private_data = mts; strcpy(rmidi->name, CARD_NAME); rmidi->info_flags = SNDRV_RAWMIDI_INFO_OUTPUT | SNDRV_RAWMIDI_INFO_INPUT | SNDRV_RAWMIDI_INFO_DUPLEX; mts->rmidi = rmidi; /* register rawmidi ops */ snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_OUTPUT, &snd_mts64_rawmidi_output_ops); snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_INPUT, &snd_mts64_rawmidi_input_ops); /* name substreams */ /* output */ list_for_each(list, &rmidi->streams[SNDRV_RAWMIDI_STREAM_OUTPUT].substreams) { substream = list_entry(list, struct snd_rawmidi_substream, list); sprintf(substream->name, "Miditerminal %d", substream->number+1); } /* input */ list_for_each(list, &rmidi->streams[SNDRV_RAWMIDI_STREAM_INPUT].substreams) { substream = list_entry(list, struct snd_rawmidi_substream, list); mts->midi_input_substream[substream->number] = substream; switch(substream->number) { case MTS64_SMPTE_SUBSTREAM: strcpy(substream->name, "Miditerminal SMPTE"); break; default: sprintf(substream->name, "Miditerminal %d", substream->number+1); } } /* controls */ err = snd_mts64_ctl_create(card, mts); return err; } /********************************************************************* * parport stuff *********************************************************************/ static void snd_mts64_interrupt(void *private) { struct mts64 *mts = ((struct snd_card*)private)->private_data; u16 ret; u8 status, data; struct snd_rawmidi_substream *substream; spin_lock(&mts->lock); ret = mts64_read(mts->pardev->port); data = ret & 0x00ff; status = ret >> 8; if (status & MTS64_STAT_PORT) { mts->current_midi_input_port = mts64_map_midi_input(data); } else { if (mts->current_midi_input_port == -1) goto __out; substream = mts->midi_input_substream[mts->current_midi_input_port]; if (mts->mode[substream->number] & MTS64_MODE_INPUT_TRIGGERED) snd_rawmidi_receive(substream, &data, 1); } __out: spin_unlock(&mts->lock); } static int __devinit snd_mts64_probe_port(struct parport *p) { struct pardevice *pardev; int res; pardev = parport_register_device(p, DRIVER_NAME, NULL, NULL, NULL, 0, NULL); if (!pardev) return -EIO; if (parport_claim(pardev)) { parport_unregister_device(pardev); return -EIO; } res = mts64_probe(p); parport_release(pardev); parport_unregister_device(pardev); return res; } static void __devinit snd_mts64_attach(struct parport *p) { struct platform_device *device; device = platform_device_alloc(PLATFORM_DRIVER, device_count); if (!device) return; /* Temporary assignment to forward the parport */ platform_set_drvdata(device, p); if (platform_device_add(device) < 0) { platform_device_put(device); return; } /* Since we dont get the return value of probe * We need to check if device probing succeeded or not */ if (!platform_get_drvdata(device)) { platform_device_unregister(device); return; } /* register device in global table */ platform_devices[device_count] = device; device_count++; } static void snd_mts64_detach(struct parport *p) { /* nothing to do here */ } static struct parport_driver mts64_parport_driver = { .name = "mts64", .attach = snd_mts64_attach, .detach = snd_mts64_detach }; /********************************************************************* * platform stuff *********************************************************************/ static void snd_mts64_card_private_free(struct snd_card *card) { struct mts64 *mts = card->private_data; struct pardevice *pardev = mts->pardev; if (pardev) { if (mts->pardev_claimed) parport_release(pardev); parport_unregister_device(pardev); } snd_mts64_free(mts); } static int __devinit snd_mts64_probe(struct platform_device *pdev) { struct pardevice *pardev; struct parport *p; int dev = pdev->id; struct snd_card *card = NULL; struct mts64 *mts = NULL; int err; p = platform_get_drvdata(pdev); platform_set_drvdata(pdev, NULL); if (dev >= SNDRV_CARDS) return -ENODEV; if (!enable[dev]) return -ENOENT; if ((err = snd_mts64_probe_port(p)) < 0) return err; err = snd_card_create(index[dev], id[dev], THIS_MODULE, 0, &card); if (err < 0) { snd_printd("Cannot create card\n"); return err; } strcpy(card->driver, DRIVER_NAME); strcpy(card->shortname, "ESI " CARD_NAME); sprintf(card->longname, "%s at 0x%lx, irq %i", card->shortname, p->base, p->irq); pardev = parport_register_device(p, /* port */ DRIVER_NAME, /* name */ NULL, /* preempt */ NULL, /* wakeup */ snd_mts64_interrupt, /* ISR */ PARPORT_DEV_EXCL, /* flags */ (void *)card); /* private */ if (pardev == NULL) { snd_printd("Cannot register pardevice\n"); err = -EIO; goto __err; } if ((err = snd_mts64_create(card, pardev, &mts)) < 0) { snd_printd("Cannot create main component\n"); parport_unregister_device(pardev); goto __err; } card->private_data = mts; card->private_free = snd_mts64_card_private_free; if ((err = snd_mts64_rawmidi_create(card)) < 0) { snd_printd("Creating Rawmidi component failed\n"); goto __err; } /* claim parport */ if (parport_claim(pardev)) { snd_printd("Cannot claim parport 0x%lx\n", pardev->port->base); err = -EIO; goto __err; } mts->pardev_claimed = 1; /* init device */ if ((err = mts64_device_init(p)) < 0) goto __err; platform_set_drvdata(pdev, card); snd_card_set_dev(card, &pdev->dev); /* At this point card will be usable */ if ((err = snd_card_register(card)) < 0) { snd_printd("Cannot register card\n"); goto __err; } snd_printk(KERN_INFO "ESI Miditerminal 4140 on 0x%lx\n", p->base); return 0; __err: snd_card_free(card); return err; } static int __devexit snd_mts64_remove(struct platform_device *pdev) { struct snd_card *card = platform_get_drvdata(pdev); if (card) snd_card_free(card); return 0; } static struct platform_driver snd_mts64_driver = { .probe = snd_mts64_probe, .remove = __devexit_p(snd_mts64_remove), .driver = { .name = PLATFORM_DRIVER } }; /********************************************************************* * module init stuff *********************************************************************/ static void snd_mts64_unregister_all(void) { int i; for (i = 0; i < SNDRV_CARDS; ++i) { if (platform_devices[i]) { platform_device_unregister(platform_devices[i]); platform_devices[i] = NULL; } } platform_driver_unregister(&snd_mts64_driver); parport_unregister_driver(&mts64_parport_driver); } static int __init snd_mts64_module_init(void) { int err; if ((err = platform_driver_register(&snd_mts64_driver)) < 0) return err; if (parport_register_driver(&mts64_parport_driver) != 0) { platform_driver_unregister(&snd_mts64_driver); return -EIO; } if (device_count == 0) { snd_mts64_unregister_all(); return -ENODEV; } return 0; } static void __exit snd_mts64_module_exit(void) { snd_mts64_unregister_all(); } module_init(snd_mts64_module_init); module_exit(snd_mts64_module_exit);
gpl-2.0
bilalliberty/android_kernel_htc_villec2-caf-based
drivers/media/video/omap3isp/ispqueue.c
5198
29507
/* * ispqueue.c * * TI OMAP3 ISP - Video buffers queue handling * * Copyright (C) 2010 Nokia Corporation * * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com> * Sakari Ailus <sakari.ailus@iki.fi> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA */ #include <asm/cacheflush.h> #include <linux/dma-mapping.h> #include <linux/mm.h> #include <linux/pagemap.h> #include <linux/poll.h> #include <linux/scatterlist.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include "ispqueue.h" /* ----------------------------------------------------------------------------- * Video buffers management */ /* * isp_video_buffer_cache_sync - Keep the buffers coherent between CPU and ISP * * The typical operation required here is Cache Invalidation across * the (user space) buffer address range. And this _must_ be done * at QBUF stage (and *only* at QBUF). * * We try to use optimal cache invalidation function: * - dmac_map_area: * - used when the number of pages are _low_. * - it becomes quite slow as the number of pages increase. * - for 648x492 viewfinder (150 pages) it takes 1.3 ms. * - for 5 Mpix buffer (2491 pages) it takes between 25-50 ms. * * - flush_cache_all: * - used when the number of pages are _high_. * - time taken in the range of 500-900 us. * - has a higher penalty but, as whole dcache + icache is invalidated */ /* * FIXME: dmac_inv_range crashes randomly on the user space buffer * address. Fall back to flush_cache_all for now. */ #define ISP_CACHE_FLUSH_PAGES_MAX 0 static void isp_video_buffer_cache_sync(struct isp_video_buffer *buf) { if (buf->skip_cache) return; if (buf->vbuf.m.userptr == 0 || buf->npages == 0 || buf->npages > ISP_CACHE_FLUSH_PAGES_MAX) flush_cache_all(); else { dmac_map_area((void *)buf->vbuf.m.userptr, buf->vbuf.length, DMA_FROM_DEVICE); outer_inv_range(buf->vbuf.m.userptr, buf->vbuf.m.userptr + buf->vbuf.length); } } /* * isp_video_buffer_lock_vma - Prevent VMAs from being unmapped * * Lock the VMAs underlying the given buffer into memory. This avoids the * userspace buffer mapping from being swapped out, making VIPT cache handling * easier. * * Note that the pages will not be freed as the buffers have been locked to * memory using by a call to get_user_pages(), but the userspace mapping could * still disappear if the VMAs are not locked. This is caused by the memory * management code trying to be as lock-less as possible, which results in the * userspace mapping manager not finding out that the pages are locked under * some conditions. */ static int isp_video_buffer_lock_vma(struct isp_video_buffer *buf, int lock) { struct vm_area_struct *vma; unsigned long start; unsigned long end; int ret = 0; if (buf->vbuf.memory == V4L2_MEMORY_MMAP) return 0; /* We can be called from workqueue context if the current task dies to * unlock the VMAs. In that case there's no current memory management * context so unlocking can't be performed, but the VMAs have been or * are getting destroyed anyway so it doesn't really matter. */ if (!current || !current->mm) return lock ? -EINVAL : 0; start = buf->vbuf.m.userptr; end = buf->vbuf.m.userptr + buf->vbuf.length - 1; down_write(&current->mm->mmap_sem); spin_lock(&current->mm->page_table_lock); do { vma = find_vma(current->mm, start); if (vma == NULL) { ret = -EFAULT; goto out; } if (lock) vma->vm_flags |= VM_LOCKED; else vma->vm_flags &= ~VM_LOCKED; start = vma->vm_end + 1; } while (vma->vm_end < end); if (lock) buf->vm_flags |= VM_LOCKED; else buf->vm_flags &= ~VM_LOCKED; out: spin_unlock(&current->mm->page_table_lock); up_write(&current->mm->mmap_sem); return ret; } /* * isp_video_buffer_sglist_kernel - Build a scatter list for a vmalloc'ed buffer * * Iterate over the vmalloc'ed area and create a scatter list entry for every * page. */ static int isp_video_buffer_sglist_kernel(struct isp_video_buffer *buf) { struct scatterlist *sglist; unsigned int npages; unsigned int i; void *addr; addr = buf->vaddr; npages = PAGE_ALIGN(buf->vbuf.length) >> PAGE_SHIFT; sglist = vmalloc(npages * sizeof(*sglist)); if (sglist == NULL) return -ENOMEM; sg_init_table(sglist, npages); for (i = 0; i < npages; ++i, addr += PAGE_SIZE) { struct page *page = vmalloc_to_page(addr); if (page == NULL || PageHighMem(page)) { vfree(sglist); return -EINVAL; } sg_set_page(&sglist[i], page, PAGE_SIZE, 0); } buf->sglen = npages; buf->sglist = sglist; return 0; } /* * isp_video_buffer_sglist_user - Build a scatter list for a userspace buffer * * Walk the buffer pages list and create a 1:1 mapping to a scatter list. */ static int isp_video_buffer_sglist_user(struct isp_video_buffer *buf) { struct scatterlist *sglist; unsigned int offset = buf->offset; unsigned int i; sglist = vmalloc(buf->npages * sizeof(*sglist)); if (sglist == NULL) return -ENOMEM; sg_init_table(sglist, buf->npages); for (i = 0; i < buf->npages; ++i) { if (PageHighMem(buf->pages[i])) { vfree(sglist); return -EINVAL; } sg_set_page(&sglist[i], buf->pages[i], PAGE_SIZE - offset, offset); offset = 0; } buf->sglen = buf->npages; buf->sglist = sglist; return 0; } /* * isp_video_buffer_sglist_pfnmap - Build a scatter list for a VM_PFNMAP buffer * * Create a scatter list of physically contiguous pages starting at the buffer * memory physical address. */ static int isp_video_buffer_sglist_pfnmap(struct isp_video_buffer *buf) { struct scatterlist *sglist; unsigned int offset = buf->offset; unsigned long pfn = buf->paddr >> PAGE_SHIFT; unsigned int i; sglist = vmalloc(buf->npages * sizeof(*sglist)); if (sglist == NULL) return -ENOMEM; sg_init_table(sglist, buf->npages); for (i = 0; i < buf->npages; ++i, ++pfn) { sg_set_page(&sglist[i], pfn_to_page(pfn), PAGE_SIZE - offset, offset); /* PFNMAP buffers will not get DMA-mapped, set the DMA address * manually. */ sg_dma_address(&sglist[i]) = (pfn << PAGE_SHIFT) + offset; offset = 0; } buf->sglen = buf->npages; buf->sglist = sglist; return 0; } /* * isp_video_buffer_cleanup - Release pages for a userspace VMA. * * Release pages locked by a call isp_video_buffer_prepare_user and free the * pages table. */ static void isp_video_buffer_cleanup(struct isp_video_buffer *buf) { enum dma_data_direction direction; unsigned int i; if (buf->queue->ops->buffer_cleanup) buf->queue->ops->buffer_cleanup(buf); if (!(buf->vm_flags & VM_PFNMAP)) { direction = buf->vbuf.type == V4L2_BUF_TYPE_VIDEO_CAPTURE ? DMA_FROM_DEVICE : DMA_TO_DEVICE; dma_unmap_sg(buf->queue->dev, buf->sglist, buf->sglen, direction); } vfree(buf->sglist); buf->sglist = NULL; buf->sglen = 0; if (buf->pages != NULL) { isp_video_buffer_lock_vma(buf, 0); for (i = 0; i < buf->npages; ++i) page_cache_release(buf->pages[i]); vfree(buf->pages); buf->pages = NULL; } buf->npages = 0; buf->skip_cache = false; } /* * isp_video_buffer_prepare_user - Pin userspace VMA pages to memory. * * This function creates a list of pages for a userspace VMA. The number of * pages is first computed based on the buffer size, and pages are then * retrieved by a call to get_user_pages. * * Pages are pinned to memory by get_user_pages, making them available for DMA * transfers. However, due to memory management optimization, it seems the * get_user_pages doesn't guarantee that the pinned pages will not be written * to swap and removed from the userspace mapping(s). When this happens, a page * fault can be generated when accessing those unmapped pages. * * If the fault is triggered by a page table walk caused by VIPT cache * management operations, the page fault handler might oops if the MM semaphore * is held, as it can't handle kernel page faults in that case. To fix that, a * fixup entry needs to be added to the cache management code, or the userspace * VMA must be locked to avoid removing pages from the userspace mapping in the * first place. * * If the number of pages retrieved is smaller than the number required by the * buffer size, the function returns -EFAULT. */ static int isp_video_buffer_prepare_user(struct isp_video_buffer *buf) { unsigned long data; unsigned int first; unsigned int last; int ret; data = buf->vbuf.m.userptr; first = (data & PAGE_MASK) >> PAGE_SHIFT; last = ((data + buf->vbuf.length - 1) & PAGE_MASK) >> PAGE_SHIFT; buf->offset = data & ~PAGE_MASK; buf->npages = last - first + 1; buf->pages = vmalloc(buf->npages * sizeof(buf->pages[0])); if (buf->pages == NULL) return -ENOMEM; down_read(&current->mm->mmap_sem); ret = get_user_pages(current, current->mm, data & PAGE_MASK, buf->npages, buf->vbuf.type == V4L2_BUF_TYPE_VIDEO_CAPTURE, 0, buf->pages, NULL); up_read(&current->mm->mmap_sem); if (ret != buf->npages) { buf->npages = ret < 0 ? 0 : ret; isp_video_buffer_cleanup(buf); return -EFAULT; } ret = isp_video_buffer_lock_vma(buf, 1); if (ret < 0) isp_video_buffer_cleanup(buf); return ret; } /* * isp_video_buffer_prepare_pfnmap - Validate a VM_PFNMAP userspace buffer * * Userspace VM_PFNMAP buffers are supported only if they are contiguous in * memory and if they span a single VMA. * * Return 0 if the buffer is valid, or -EFAULT otherwise. */ static int isp_video_buffer_prepare_pfnmap(struct isp_video_buffer *buf) { struct vm_area_struct *vma; unsigned long prev_pfn; unsigned long this_pfn; unsigned long start; unsigned long end; dma_addr_t pa; int ret = -EFAULT; start = buf->vbuf.m.userptr; end = buf->vbuf.m.userptr + buf->vbuf.length - 1; buf->offset = start & ~PAGE_MASK; buf->npages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1; buf->pages = NULL; down_read(&current->mm->mmap_sem); vma = find_vma(current->mm, start); if (vma == NULL || vma->vm_end < end) goto done; for (prev_pfn = 0; start <= end; start += PAGE_SIZE) { ret = follow_pfn(vma, start, &this_pfn); if (ret) goto done; if (prev_pfn == 0) pa = this_pfn << PAGE_SHIFT; else if (this_pfn != prev_pfn + 1) { ret = -EFAULT; goto done; } prev_pfn = this_pfn; } buf->paddr = pa + buf->offset; ret = 0; done: up_read(&current->mm->mmap_sem); return ret; } /* * isp_video_buffer_prepare_vm_flags - Get VMA flags for a userspace address * * This function locates the VMAs for the buffer's userspace address and checks * that their flags match. The only flag that we need to care for at the moment * is VM_PFNMAP. * * The buffer vm_flags field is set to the first VMA flags. * * Return -EFAULT if no VMA can be found for part of the buffer, or if the VMAs * have incompatible flags. */ static int isp_video_buffer_prepare_vm_flags(struct isp_video_buffer *buf) { struct vm_area_struct *vma; pgprot_t vm_page_prot; unsigned long start; unsigned long end; int ret = -EFAULT; start = buf->vbuf.m.userptr; end = buf->vbuf.m.userptr + buf->vbuf.length - 1; down_read(&current->mm->mmap_sem); do { vma = find_vma(current->mm, start); if (vma == NULL) goto done; if (start == buf->vbuf.m.userptr) { buf->vm_flags = vma->vm_flags; vm_page_prot = vma->vm_page_prot; } if ((buf->vm_flags ^ vma->vm_flags) & VM_PFNMAP) goto done; if (vm_page_prot != vma->vm_page_prot) goto done; start = vma->vm_end + 1; } while (vma->vm_end < end); /* Skip cache management to enhance performances for non-cached or * write-combining buffers. */ if (vm_page_prot == pgprot_noncached(vm_page_prot) || vm_page_prot == pgprot_writecombine(vm_page_prot)) buf->skip_cache = true; ret = 0; done: up_read(&current->mm->mmap_sem); return ret; } /* * isp_video_buffer_prepare - Make a buffer ready for operation * * Preparing a buffer involves: * * - validating VMAs (userspace buffers only) * - locking pages and VMAs into memory (userspace buffers only) * - building page and scatter-gather lists * - mapping buffers for DMA operation * - performing driver-specific preparation * * The function must be called in userspace context with a valid mm context * (this excludes cleanup paths such as sys_close when the userspace process * segfaults). */ static int isp_video_buffer_prepare(struct isp_video_buffer *buf) { enum dma_data_direction direction; int ret; switch (buf->vbuf.memory) { case V4L2_MEMORY_MMAP: ret = isp_video_buffer_sglist_kernel(buf); break; case V4L2_MEMORY_USERPTR: ret = isp_video_buffer_prepare_vm_flags(buf); if (ret < 0) return ret; if (buf->vm_flags & VM_PFNMAP) { ret = isp_video_buffer_prepare_pfnmap(buf); if (ret < 0) return ret; ret = isp_video_buffer_sglist_pfnmap(buf); } else { ret = isp_video_buffer_prepare_user(buf); if (ret < 0) return ret; ret = isp_video_buffer_sglist_user(buf); } break; default: return -EINVAL; } if (ret < 0) goto done; if (!(buf->vm_flags & VM_PFNMAP)) { direction = buf->vbuf.type == V4L2_BUF_TYPE_VIDEO_CAPTURE ? DMA_FROM_DEVICE : DMA_TO_DEVICE; ret = dma_map_sg(buf->queue->dev, buf->sglist, buf->sglen, direction); if (ret != buf->sglen) { ret = -EFAULT; goto done; } } if (buf->queue->ops->buffer_prepare) ret = buf->queue->ops->buffer_prepare(buf); done: if (ret < 0) { isp_video_buffer_cleanup(buf); return ret; } return ret; } /* * isp_video_queue_query - Query the status of a given buffer * * Locking: must be called with the queue lock held. */ static void isp_video_buffer_query(struct isp_video_buffer *buf, struct v4l2_buffer *vbuf) { memcpy(vbuf, &buf->vbuf, sizeof(*vbuf)); if (buf->vma_use_count) vbuf->flags |= V4L2_BUF_FLAG_MAPPED; switch (buf->state) { case ISP_BUF_STATE_ERROR: vbuf->flags |= V4L2_BUF_FLAG_ERROR; case ISP_BUF_STATE_DONE: vbuf->flags |= V4L2_BUF_FLAG_DONE; case ISP_BUF_STATE_QUEUED: case ISP_BUF_STATE_ACTIVE: vbuf->flags |= V4L2_BUF_FLAG_QUEUED; break; case ISP_BUF_STATE_IDLE: default: break; } } /* * isp_video_buffer_wait - Wait for a buffer to be ready * * In non-blocking mode, return immediately with 0 if the buffer is ready or * -EAGAIN if the buffer is in the QUEUED or ACTIVE state. * * In blocking mode, wait (interruptibly but with no timeout) on the buffer wait * queue using the same condition. */ static int isp_video_buffer_wait(struct isp_video_buffer *buf, int nonblocking) { if (nonblocking) { return (buf->state != ISP_BUF_STATE_QUEUED && buf->state != ISP_BUF_STATE_ACTIVE) ? 0 : -EAGAIN; } return wait_event_interruptible(buf->wait, buf->state != ISP_BUF_STATE_QUEUED && buf->state != ISP_BUF_STATE_ACTIVE); } /* ----------------------------------------------------------------------------- * Queue management */ /* * isp_video_queue_free - Free video buffers memory * * Buffers can only be freed if the queue isn't streaming and if no buffer is * mapped to userspace. Return -EBUSY if those conditions aren't statisfied. * * This function must be called with the queue lock held. */ static int isp_video_queue_free(struct isp_video_queue *queue) { unsigned int i; if (queue->streaming) return -EBUSY; for (i = 0; i < queue->count; ++i) { if (queue->buffers[i]->vma_use_count != 0) return -EBUSY; } for (i = 0; i < queue->count; ++i) { struct isp_video_buffer *buf = queue->buffers[i]; isp_video_buffer_cleanup(buf); vfree(buf->vaddr); buf->vaddr = NULL; kfree(buf); queue->buffers[i] = NULL; } INIT_LIST_HEAD(&queue->queue); queue->count = 0; return 0; } /* * isp_video_queue_alloc - Allocate video buffers memory * * This function must be called with the queue lock held. */ static int isp_video_queue_alloc(struct isp_video_queue *queue, unsigned int nbuffers, unsigned int size, enum v4l2_memory memory) { struct isp_video_buffer *buf; unsigned int i; void *mem; int ret; /* Start by freeing the buffers. */ ret = isp_video_queue_free(queue); if (ret < 0) return ret; /* Bail out of no buffers should be allocated. */ if (nbuffers == 0) return 0; /* Initialize the allocated buffers. */ for (i = 0; i < nbuffers; ++i) { buf = kzalloc(queue->bufsize, GFP_KERNEL); if (buf == NULL) break; if (memory == V4L2_MEMORY_MMAP) { /* Allocate video buffers memory for mmap mode. Align * the size to the page size. */ mem = vmalloc_32_user(PAGE_ALIGN(size)); if (mem == NULL) { kfree(buf); break; } buf->vbuf.m.offset = i * PAGE_ALIGN(size); buf->vaddr = mem; } buf->vbuf.index = i; buf->vbuf.length = size; buf->vbuf.type = queue->type; buf->vbuf.field = V4L2_FIELD_NONE; buf->vbuf.memory = memory; buf->queue = queue; init_waitqueue_head(&buf->wait); queue->buffers[i] = buf; } if (i == 0) return -ENOMEM; queue->count = i; return nbuffers; } /** * omap3isp_video_queue_cleanup - Clean up the video buffers queue * @queue: Video buffers queue * * Free all allocated resources and clean up the video buffers queue. The queue * must not be busy (no ongoing video stream) and buffers must have been * unmapped. * * Return 0 on success or -EBUSY if the queue is busy or buffers haven't been * unmapped. */ int omap3isp_video_queue_cleanup(struct isp_video_queue *queue) { return isp_video_queue_free(queue); } /** * omap3isp_video_queue_init - Initialize the video buffers queue * @queue: Video buffers queue * @type: V4L2 buffer type (capture or output) * @ops: Driver-specific queue operations * @dev: Device used for DMA operations * @bufsize: Size of the driver-specific buffer structure * * Initialize the video buffers queue with the supplied parameters. * * The queue type must be one of V4L2_BUF_TYPE_VIDEO_CAPTURE or * V4L2_BUF_TYPE_VIDEO_OUTPUT. Other buffer types are not supported yet. * * Buffer objects will be allocated using the given buffer size to allow room * for driver-specific fields. Driver-specific buffer structures must start * with a struct isp_video_buffer field. Drivers with no driver-specific buffer * structure must pass the size of the isp_video_buffer structure in the bufsize * parameter. * * Return 0 on success. */ int omap3isp_video_queue_init(struct isp_video_queue *queue, enum v4l2_buf_type type, const struct isp_video_queue_operations *ops, struct device *dev, unsigned int bufsize) { INIT_LIST_HEAD(&queue->queue); mutex_init(&queue->lock); spin_lock_init(&queue->irqlock); queue->type = type; queue->ops = ops; queue->dev = dev; queue->bufsize = bufsize; return 0; } /* ----------------------------------------------------------------------------- * V4L2 operations */ /** * omap3isp_video_queue_reqbufs - Allocate video buffers memory * * This function is intended to be used as a VIDIOC_REQBUFS ioctl handler. It * allocated video buffer objects and, for MMAP buffers, buffer memory. * * If the number of buffers is 0, all buffers are freed and the function returns * without performing any allocation. * * If the number of buffers is not 0, currently allocated buffers (if any) are * freed and the requested number of buffers are allocated. Depending on * driver-specific requirements and on memory availability, a number of buffer * smaller or bigger than requested can be allocated. This isn't considered as * an error. * * Return 0 on success or one of the following error codes: * * -EINVAL if the buffer type or index are invalid * -EBUSY if the queue is busy (streaming or buffers mapped) * -ENOMEM if the buffers can't be allocated due to an out-of-memory condition */ int omap3isp_video_queue_reqbufs(struct isp_video_queue *queue, struct v4l2_requestbuffers *rb) { unsigned int nbuffers = rb->count; unsigned int size; int ret; if (rb->type != queue->type) return -EINVAL; queue->ops->queue_prepare(queue, &nbuffers, &size); if (size == 0) return -EINVAL; nbuffers = min_t(unsigned int, nbuffers, ISP_VIDEO_MAX_BUFFERS); mutex_lock(&queue->lock); ret = isp_video_queue_alloc(queue, nbuffers, size, rb->memory); if (ret < 0) goto done; rb->count = ret; ret = 0; done: mutex_unlock(&queue->lock); return ret; } /** * omap3isp_video_queue_querybuf - Query the status of a buffer in a queue * * This function is intended to be used as a VIDIOC_QUERYBUF ioctl handler. It * returns the status of a given video buffer. * * Return 0 on success or -EINVAL if the buffer type or index are invalid. */ int omap3isp_video_queue_querybuf(struct isp_video_queue *queue, struct v4l2_buffer *vbuf) { struct isp_video_buffer *buf; int ret = 0; if (vbuf->type != queue->type) return -EINVAL; mutex_lock(&queue->lock); if (vbuf->index >= queue->count) { ret = -EINVAL; goto done; } buf = queue->buffers[vbuf->index]; isp_video_buffer_query(buf, vbuf); done: mutex_unlock(&queue->lock); return ret; } /** * omap3isp_video_queue_qbuf - Queue a buffer * * This function is intended to be used as a VIDIOC_QBUF ioctl handler. * * The v4l2_buffer structure passed from userspace is first sanity tested. If * sane, the buffer is then processed and added to the main queue and, if the * queue is streaming, to the IRQ queue. * * Before being enqueued, USERPTR buffers are checked for address changes. If * the buffer has a different userspace address, the old memory area is unlocked * and the new memory area is locked. */ int omap3isp_video_queue_qbuf(struct isp_video_queue *queue, struct v4l2_buffer *vbuf) { struct isp_video_buffer *buf; unsigned long flags; int ret = -EINVAL; if (vbuf->type != queue->type) goto done; mutex_lock(&queue->lock); if (vbuf->index >= queue->count) goto done; buf = queue->buffers[vbuf->index]; if (vbuf->memory != buf->vbuf.memory) goto done; if (buf->state != ISP_BUF_STATE_IDLE) goto done; if (vbuf->memory == V4L2_MEMORY_USERPTR && vbuf->length < buf->vbuf.length) goto done; if (vbuf->memory == V4L2_MEMORY_USERPTR && vbuf->m.userptr != buf->vbuf.m.userptr) { isp_video_buffer_cleanup(buf); buf->vbuf.m.userptr = vbuf->m.userptr; buf->prepared = 0; } if (!buf->prepared) { ret = isp_video_buffer_prepare(buf); if (ret < 0) goto done; buf->prepared = 1; } isp_video_buffer_cache_sync(buf); buf->state = ISP_BUF_STATE_QUEUED; list_add_tail(&buf->stream, &queue->queue); if (queue->streaming) { spin_lock_irqsave(&queue->irqlock, flags); queue->ops->buffer_queue(buf); spin_unlock_irqrestore(&queue->irqlock, flags); } ret = 0; done: mutex_unlock(&queue->lock); return ret; } /** * omap3isp_video_queue_dqbuf - Dequeue a buffer * * This function is intended to be used as a VIDIOC_DQBUF ioctl handler. * * The v4l2_buffer structure passed from userspace is first sanity tested. If * sane, the buffer is then processed and added to the main queue and, if the * queue is streaming, to the IRQ queue. * * Before being enqueued, USERPTR buffers are checked for address changes. If * the buffer has a different userspace address, the old memory area is unlocked * and the new memory area is locked. */ int omap3isp_video_queue_dqbuf(struct isp_video_queue *queue, struct v4l2_buffer *vbuf, int nonblocking) { struct isp_video_buffer *buf; int ret; if (vbuf->type != queue->type) return -EINVAL; mutex_lock(&queue->lock); if (list_empty(&queue->queue)) { ret = -EINVAL; goto done; } buf = list_first_entry(&queue->queue, struct isp_video_buffer, stream); ret = isp_video_buffer_wait(buf, nonblocking); if (ret < 0) goto done; list_del(&buf->stream); isp_video_buffer_query(buf, vbuf); buf->state = ISP_BUF_STATE_IDLE; vbuf->flags &= ~V4L2_BUF_FLAG_QUEUED; done: mutex_unlock(&queue->lock); return ret; } /** * omap3isp_video_queue_streamon - Start streaming * * This function is intended to be used as a VIDIOC_STREAMON ioctl handler. It * starts streaming on the queue and calls the buffer_queue operation for all * queued buffers. * * Return 0 on success. */ int omap3isp_video_queue_streamon(struct isp_video_queue *queue) { struct isp_video_buffer *buf; unsigned long flags; mutex_lock(&queue->lock); if (queue->streaming) goto done; queue->streaming = 1; spin_lock_irqsave(&queue->irqlock, flags); list_for_each_entry(buf, &queue->queue, stream) queue->ops->buffer_queue(buf); spin_unlock_irqrestore(&queue->irqlock, flags); done: mutex_unlock(&queue->lock); return 0; } /** * omap3isp_video_queue_streamoff - Stop streaming * * This function is intended to be used as a VIDIOC_STREAMOFF ioctl handler. It * stops streaming on the queue and wakes up all the buffers. * * Drivers must stop the hardware and synchronize with interrupt handlers and/or * delayed works before calling this function to make sure no buffer will be * touched by the driver and/or hardware. */ void omap3isp_video_queue_streamoff(struct isp_video_queue *queue) { struct isp_video_buffer *buf; unsigned long flags; unsigned int i; mutex_lock(&queue->lock); if (!queue->streaming) goto done; queue->streaming = 0; spin_lock_irqsave(&queue->irqlock, flags); for (i = 0; i < queue->count; ++i) { buf = queue->buffers[i]; if (buf->state == ISP_BUF_STATE_ACTIVE) wake_up(&buf->wait); buf->state = ISP_BUF_STATE_IDLE; } spin_unlock_irqrestore(&queue->irqlock, flags); INIT_LIST_HEAD(&queue->queue); done: mutex_unlock(&queue->lock); } /** * omap3isp_video_queue_discard_done - Discard all buffers marked as DONE * * This function is intended to be used with suspend/resume operations. It * discards all 'done' buffers as they would be too old to be requested after * resume. * * Drivers must stop the hardware and synchronize with interrupt handlers and/or * delayed works before calling this function to make sure no buffer will be * touched by the driver and/or hardware. */ void omap3isp_video_queue_discard_done(struct isp_video_queue *queue) { struct isp_video_buffer *buf; unsigned int i; mutex_lock(&queue->lock); if (!queue->streaming) goto done; for (i = 0; i < queue->count; ++i) { buf = queue->buffers[i]; if (buf->state == ISP_BUF_STATE_DONE) buf->state = ISP_BUF_STATE_ERROR; } done: mutex_unlock(&queue->lock); } static void isp_video_queue_vm_open(struct vm_area_struct *vma) { struct isp_video_buffer *buf = vma->vm_private_data; buf->vma_use_count++; } static void isp_video_queue_vm_close(struct vm_area_struct *vma) { struct isp_video_buffer *buf = vma->vm_private_data; buf->vma_use_count--; } static const struct vm_operations_struct isp_video_queue_vm_ops = { .open = isp_video_queue_vm_open, .close = isp_video_queue_vm_close, }; /** * omap3isp_video_queue_mmap - Map buffers to userspace * * This function is intended to be used as an mmap() file operation handler. It * maps a buffer to userspace based on the VMA offset. * * Only buffers of memory type MMAP are supported. */ int omap3isp_video_queue_mmap(struct isp_video_queue *queue, struct vm_area_struct *vma) { struct isp_video_buffer *uninitialized_var(buf); unsigned long size; unsigned int i; int ret = 0; mutex_lock(&queue->lock); for (i = 0; i < queue->count; ++i) { buf = queue->buffers[i]; if ((buf->vbuf.m.offset >> PAGE_SHIFT) == vma->vm_pgoff) break; } if (i == queue->count) { ret = -EINVAL; goto done; } size = vma->vm_end - vma->vm_start; if (buf->vbuf.memory != V4L2_MEMORY_MMAP || size != PAGE_ALIGN(buf->vbuf.length)) { ret = -EINVAL; goto done; } ret = remap_vmalloc_range(vma, buf->vaddr, 0); if (ret < 0) goto done; vma->vm_ops = &isp_video_queue_vm_ops; vma->vm_private_data = buf; isp_video_queue_vm_open(vma); done: mutex_unlock(&queue->lock); return ret; } /** * omap3isp_video_queue_poll - Poll video queue state * * This function is intended to be used as a poll() file operation handler. It * polls the state of the video buffer at the front of the queue and returns an * events mask. * * If no buffer is present at the front of the queue, POLLERR is returned. */ unsigned int omap3isp_video_queue_poll(struct isp_video_queue *queue, struct file *file, poll_table *wait) { struct isp_video_buffer *buf; unsigned int mask = 0; mutex_lock(&queue->lock); if (list_empty(&queue->queue)) { mask |= POLLERR; goto done; } buf = list_first_entry(&queue->queue, struct isp_video_buffer, stream); poll_wait(file, &buf->wait, wait); if (buf->state == ISP_BUF_STATE_DONE || buf->state == ISP_BUF_STATE_ERROR) { if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) mask |= POLLIN | POLLRDNORM; else mask |= POLLOUT | POLLWRNORM; } done: mutex_unlock(&queue->lock); return mask; }
gpl-2.0
chasmodo/android_kernel_oneplus_msm8974
net/nfc/af_nfc.c
7502
2491
/* * Copyright (C) 2011 Instituto Nokia de Tecnologia * * Authors: * Aloisio Almeida Jr <aloisio.almeida@openbossa.org> * Lauro Ramos Venancio <lauro.venancio@openbossa.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/nfc.h> #include <linux/module.h> #include "nfc.h" static DEFINE_RWLOCK(proto_tab_lock); static const struct nfc_protocol *proto_tab[NFC_SOCKPROTO_MAX]; static int nfc_sock_create(struct net *net, struct socket *sock, int proto, int kern) { int rc = -EPROTONOSUPPORT; if (net != &init_net) return -EAFNOSUPPORT; if (proto < 0 || proto >= NFC_SOCKPROTO_MAX) return -EINVAL; read_lock(&proto_tab_lock); if (proto_tab[proto] && try_module_get(proto_tab[proto]->owner)) { rc = proto_tab[proto]->create(net, sock, proto_tab[proto]); module_put(proto_tab[proto]->owner); } read_unlock(&proto_tab_lock); return rc; } static struct net_proto_family nfc_sock_family_ops = { .owner = THIS_MODULE, .family = PF_NFC, .create = nfc_sock_create, }; int nfc_proto_register(const struct nfc_protocol *nfc_proto) { int rc; if (nfc_proto->id < 0 || nfc_proto->id >= NFC_SOCKPROTO_MAX) return -EINVAL; rc = proto_register(nfc_proto->proto, 0); if (rc) return rc; write_lock(&proto_tab_lock); if (proto_tab[nfc_proto->id]) rc = -EBUSY; else proto_tab[nfc_proto->id] = nfc_proto; write_unlock(&proto_tab_lock); return rc; } EXPORT_SYMBOL(nfc_proto_register); void nfc_proto_unregister(const struct nfc_protocol *nfc_proto) { write_lock(&proto_tab_lock); proto_tab[nfc_proto->id] = NULL; write_unlock(&proto_tab_lock); proto_unregister(nfc_proto->proto); } EXPORT_SYMBOL(nfc_proto_unregister); int __init af_nfc_init(void) { return sock_register(&nfc_sock_family_ops); } void af_nfc_exit(void) { sock_unregister(PF_NFC); }
gpl-2.0
gokulnatha/GT-I9505
drivers/s390/char/sclp_rw.c
9806
12923
/* * driver: reading from and writing to system console on S/390 via SCLP * * Copyright IBM Corp. 1999, 2009 * * Author(s): Martin Peschke <mpeschke@de.ibm.com> * Martin Schwidefsky <schwidefsky@de.ibm.com> */ #include <linux/kmod.h> #include <linux/types.h> #include <linux/err.h> #include <linux/string.h> #include <linux/spinlock.h> #include <linux/ctype.h> #include <asm/uaccess.h> #include "sclp.h" #include "sclp_rw.h" /* * The room for the SCCB (only for writing) is not equal to a pages size * (as it is specified as the maximum size in the SCLP documentation) * because of the additional data structure described above. */ #define MAX_SCCB_ROOM (PAGE_SIZE - sizeof(struct sclp_buffer)) static void sclp_rw_pm_event(struct sclp_register *reg, enum sclp_pm_event sclp_pm_event) { sclp_console_pm_event(sclp_pm_event); } /* Event type structure for write message and write priority message */ static struct sclp_register sclp_rw_event = { .send_mask = EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK, .pm_event_fn = sclp_rw_pm_event, }; /* * Setup a sclp write buffer. Gets a page as input (4K) and returns * a pointer to a struct sclp_buffer structure that is located at the * end of the input page. This reduces the buffer space by a few * bytes but simplifies things. */ struct sclp_buffer * sclp_make_buffer(void *page, unsigned short columns, unsigned short htab) { struct sclp_buffer *buffer; struct write_sccb *sccb; sccb = (struct write_sccb *) page; /* * We keep the struct sclp_buffer structure at the end * of the sccb page. */ buffer = ((struct sclp_buffer *) ((addr_t) sccb + PAGE_SIZE)) - 1; buffer->sccb = sccb; buffer->retry_count = 0; buffer->mto_number = 0; buffer->mto_char_sum = 0; buffer->current_line = NULL; buffer->current_length = 0; buffer->columns = columns; buffer->htab = htab; /* initialize sccb */ memset(sccb, 0, sizeof(struct write_sccb)); sccb->header.length = sizeof(struct write_sccb); sccb->msg_buf.header.length = sizeof(struct msg_buf); sccb->msg_buf.header.type = EVTYP_MSG; sccb->msg_buf.mdb.header.length = sizeof(struct mdb); sccb->msg_buf.mdb.header.type = 1; sccb->msg_buf.mdb.header.tag = 0xD4C4C240; /* ebcdic "MDB " */ sccb->msg_buf.mdb.header.revision_code = 1; sccb->msg_buf.mdb.go.length = sizeof(struct go); sccb->msg_buf.mdb.go.type = 1; return buffer; } /* * Return a pointer to the original page that has been used to create * the buffer. */ void * sclp_unmake_buffer(struct sclp_buffer *buffer) { return buffer->sccb; } /* * Initialize a new Message Text Object (MTO) at the end of the provided buffer * with enough room for max_len characters. Return 0 on success. */ static int sclp_initialize_mto(struct sclp_buffer *buffer, int max_len) { struct write_sccb *sccb; struct mto *mto; int mto_size; /* max size of new Message Text Object including message text */ mto_size = sizeof(struct mto) + max_len; /* check if current buffer sccb can contain the mto */ sccb = buffer->sccb; if ((MAX_SCCB_ROOM - sccb->header.length) < mto_size) return -ENOMEM; /* find address of new message text object */ mto = (struct mto *)(((addr_t) sccb) + sccb->header.length); /* * fill the new Message-Text Object, * starting behind the former last byte of the SCCB */ memset(mto, 0, sizeof(struct mto)); mto->length = sizeof(struct mto); mto->type = 4; /* message text object */ mto->line_type_flags = LNTPFLGS_ENDTEXT; /* end text */ /* set pointer to first byte after struct mto. */ buffer->current_line = (char *) (mto + 1); buffer->current_length = 0; return 0; } /* * Finalize MTO initialized by sclp_initialize_mto(), updating the sizes of * MTO, enclosing MDB, event buffer and SCCB. */ static void sclp_finalize_mto(struct sclp_buffer *buffer) { struct write_sccb *sccb; struct mto *mto; int str_len, mto_size; str_len = buffer->current_length; buffer->current_line = NULL; buffer->current_length = 0; /* real size of new Message Text Object including message text */ mto_size = sizeof(struct mto) + str_len; /* find address of new message text object */ sccb = buffer->sccb; mto = (struct mto *)(((addr_t) sccb) + sccb->header.length); /* set size of message text object */ mto->length = mto_size; /* * update values of sizes * (SCCB, Event(Message) Buffer, Message Data Block) */ sccb->header.length += mto_size; sccb->msg_buf.header.length += mto_size; sccb->msg_buf.mdb.header.length += mto_size; /* * count number of buffered messages (= number of Message Text * Objects) and number of buffered characters * for the SCCB currently used for buffering and at all */ buffer->mto_number++; buffer->mto_char_sum += str_len; } /* * processing of a message including escape characters, * returns number of characters written to the output sccb * ("processed" means that is not guaranteed that the character have already * been sent to the SCLP but that it will be done at least next time the SCLP * is not busy) */ int sclp_write(struct sclp_buffer *buffer, const unsigned char *msg, int count) { int spaces, i_msg; int rc; /* * parse msg for escape sequences (\t,\v ...) and put formated * msg into an mto (created by sclp_initialize_mto). * * We have to do this work ourselfs because there is no support for * these characters on the native machine and only partial support * under VM (Why does VM interpret \n but the native machine doesn't ?) * * Depending on i/o-control setting the message is always written * immediately or we wait for a final new line maybe coming with the * next message. Besides we avoid a buffer overrun by writing its * content. * * RESTRICTIONS: * * \r and \b work within one line because we are not able to modify * previous output that have already been accepted by the SCLP. * * \t combined with following \r is not correctly represented because * \t is expanded to some spaces but \r does not know about a * previous \t and decreases the current position by one column. * This is in order to a slim and quick implementation. */ for (i_msg = 0; i_msg < count; i_msg++) { switch (msg[i_msg]) { case '\n': /* new line, line feed (ASCII) */ /* check if new mto needs to be created */ if (buffer->current_line == NULL) { rc = sclp_initialize_mto(buffer, 0); if (rc) return i_msg; } sclp_finalize_mto(buffer); break; case '\a': /* bell, one for several times */ /* set SCLP sound alarm bit in General Object */ buffer->sccb->msg_buf.mdb.go.general_msg_flags |= GNRLMSGFLGS_SNDALRM; break; case '\t': /* horizontal tabulator */ /* check if new mto needs to be created */ if (buffer->current_line == NULL) { rc = sclp_initialize_mto(buffer, buffer->columns); if (rc) return i_msg; } /* "go to (next htab-boundary + 1, same line)" */ do { if (buffer->current_length >= buffer->columns) break; /* ok, add a blank */ *buffer->current_line++ = 0x40; buffer->current_length++; } while (buffer->current_length % buffer->htab); break; case '\f': /* form feed */ case '\v': /* vertical tabulator */ /* "go to (actual column, actual line + 1)" */ /* = new line, leading spaces */ if (buffer->current_line != NULL) { spaces = buffer->current_length; sclp_finalize_mto(buffer); rc = sclp_initialize_mto(buffer, buffer->columns); if (rc) return i_msg; memset(buffer->current_line, 0x40, spaces); buffer->current_line += spaces; buffer->current_length = spaces; } else { /* one an empty line this is the same as \n */ rc = sclp_initialize_mto(buffer, buffer->columns); if (rc) return i_msg; sclp_finalize_mto(buffer); } break; case '\b': /* backspace */ /* "go to (actual column - 1, actual line)" */ /* decrement counter indicating position, */ /* do not remove last character */ if (buffer->current_line != NULL && buffer->current_length > 0) { buffer->current_length--; buffer->current_line--; } break; case 0x00: /* end of string */ /* transfer current line to SCCB */ if (buffer->current_line != NULL) sclp_finalize_mto(buffer); /* skip the rest of the message including the 0 byte */ i_msg = count - 1; break; default: /* no escape character */ /* do not output unprintable characters */ if (!isprint(msg[i_msg])) break; /* check if new mto needs to be created */ if (buffer->current_line == NULL) { rc = sclp_initialize_mto(buffer, buffer->columns); if (rc) return i_msg; } *buffer->current_line++ = sclp_ascebc(msg[i_msg]); buffer->current_length++; break; } /* check if current mto is full */ if (buffer->current_line != NULL && buffer->current_length >= buffer->columns) sclp_finalize_mto(buffer); } /* return number of processed characters */ return i_msg; } /* * Return the number of free bytes in the sccb */ int sclp_buffer_space(struct sclp_buffer *buffer) { int count; count = MAX_SCCB_ROOM - buffer->sccb->header.length; if (buffer->current_line != NULL) count -= sizeof(struct mto) + buffer->current_length; return count; } /* * Return number of characters in buffer */ int sclp_chars_in_buffer(struct sclp_buffer *buffer) { int count; count = buffer->mto_char_sum; if (buffer->current_line != NULL) count += buffer->current_length; return count; } /* * sets or provides some values that influence the drivers behaviour */ void sclp_set_columns(struct sclp_buffer *buffer, unsigned short columns) { buffer->columns = columns; if (buffer->current_line != NULL && buffer->current_length > buffer->columns) sclp_finalize_mto(buffer); } void sclp_set_htab(struct sclp_buffer *buffer, unsigned short htab) { buffer->htab = htab; } /* * called by sclp_console_init and/or sclp_tty_init */ int sclp_rw_init(void) { static int init_done = 0; int rc; if (init_done) return 0; rc = sclp_register(&sclp_rw_event); if (rc == 0) init_done = 1; return rc; } #define SCLP_BUFFER_MAX_RETRY 1 /* * second half of Write Event Data-function that has to be done after * interruption indicating completion of Service Call. */ static void sclp_writedata_callback(struct sclp_req *request, void *data) { int rc; struct sclp_buffer *buffer; struct write_sccb *sccb; buffer = (struct sclp_buffer *) data; sccb = buffer->sccb; if (request->status == SCLP_REQ_FAILED) { if (buffer->callback != NULL) buffer->callback(buffer, -EIO); return; } /* check SCLP response code and choose suitable action */ switch (sccb->header.response_code) { case 0x0020 : /* Normal completion, buffer processed, message(s) sent */ rc = 0; break; case 0x0340: /* Contained SCLP equipment check */ if (++buffer->retry_count > SCLP_BUFFER_MAX_RETRY) { rc = -EIO; break; } /* remove processed buffers and requeue rest */ if (sclp_remove_processed((struct sccb_header *) sccb) > 0) { /* not all buffers were processed */ sccb->header.response_code = 0x0000; buffer->request.status = SCLP_REQ_FILLED; rc = sclp_add_request(request); if (rc == 0) return; } else rc = 0; break; case 0x0040: /* SCLP equipment check */ case 0x05f0: /* Target resource in improper state */ if (++buffer->retry_count > SCLP_BUFFER_MAX_RETRY) { rc = -EIO; break; } /* retry request */ sccb->header.response_code = 0x0000; buffer->request.status = SCLP_REQ_FILLED; rc = sclp_add_request(request); if (rc == 0) return; break; default: if (sccb->header.response_code == 0x71f0) rc = -ENOMEM; else rc = -EINVAL; break; } if (buffer->callback != NULL) buffer->callback(buffer, rc); } /* * Setup the request structure in the struct sclp_buffer to do SCLP Write * Event Data and pass the request to the core SCLP loop. Return zero on * success, non-zero otherwise. */ int sclp_emit_buffer(struct sclp_buffer *buffer, void (*callback)(struct sclp_buffer *, int)) { struct write_sccb *sccb; /* add current line if there is one */ if (buffer->current_line != NULL) sclp_finalize_mto(buffer); /* Are there messages in the output buffer ? */ if (buffer->mto_number == 0) return -EIO; sccb = buffer->sccb; if (sclp_rw_event.sclp_receive_mask & EVTYP_MSG_MASK) /* Use normal write message */ sccb->msg_buf.header.type = EVTYP_MSG; else if (sclp_rw_event.sclp_receive_mask & EVTYP_PMSGCMD_MASK) /* Use write priority message */ sccb->msg_buf.header.type = EVTYP_PMSGCMD; else return -ENOSYS; buffer->request.command = SCLP_CMDW_WRITE_EVENT_DATA; buffer->request.status = SCLP_REQ_FILLED; buffer->request.callback = sclp_writedata_callback; buffer->request.callback_data = buffer; buffer->request.sccb = sccb; buffer->callback = callback; return sclp_add_request(&buffer->request); }
gpl-2.0
CyanogenMod/android_kernel_lge_g3
sound/firewire/fcp.c
10574
6133
/* * Function Control Protocol (IEC 61883-1) helper functions * * Copyright (c) Clemens Ladisch <clemens@ladisch.de> * Licensed under the terms of the GNU General Public License, version 2. */ #include <linux/device.h> #include <linux/firewire.h> #include <linux/firewire-constants.h> #include <linux/list.h> #include <linux/module.h> #include <linux/sched.h> #include <linux/spinlock.h> #include <linux/wait.h> #include <linux/delay.h> #include "fcp.h" #include "lib.h" #define CTS_AVC 0x00 #define ERROR_RETRIES 3 #define ERROR_DELAY_MS 5 #define FCP_TIMEOUT_MS 125 static DEFINE_SPINLOCK(transactions_lock); static LIST_HEAD(transactions); enum fcp_state { STATE_PENDING, STATE_BUS_RESET, STATE_COMPLETE, }; struct fcp_transaction { struct list_head list; struct fw_unit *unit; void *response_buffer; unsigned int response_size; unsigned int response_match_bytes; enum fcp_state state; wait_queue_head_t wait; }; /** * fcp_avc_transaction - send an AV/C command and wait for its response * @unit: a unit on the target device * @command: a buffer containing the command frame; must be DMA-able * @command_size: the size of @command * @response: a buffer for the response frame * @response_size: the maximum size of @response * @response_match_bytes: a bitmap specifying the bytes used to detect the * correct response frame * * This function sends a FCP command frame to the target and waits for the * corresponding response frame to be returned. * * Because it is possible for multiple FCP transactions to be active at the * same time, the correct response frame is detected by the value of certain * bytes. These bytes must be set in @response before calling this function, * and the corresponding bits must be set in @response_match_bytes. * * @command and @response can point to the same buffer. * * Asynchronous operation (INTERIM, NOTIFY) is not supported at the moment. * * Returns the actual size of the response frame, or a negative error code. */ int fcp_avc_transaction(struct fw_unit *unit, const void *command, unsigned int command_size, void *response, unsigned int response_size, unsigned int response_match_bytes) { struct fcp_transaction t; int tcode, ret, tries = 0; t.unit = unit; t.response_buffer = response; t.response_size = response_size; t.response_match_bytes = response_match_bytes; t.state = STATE_PENDING; init_waitqueue_head(&t.wait); spin_lock_irq(&transactions_lock); list_add_tail(&t.list, &transactions); spin_unlock_irq(&transactions_lock); for (;;) { tcode = command_size == 4 ? TCODE_WRITE_QUADLET_REQUEST : TCODE_WRITE_BLOCK_REQUEST; ret = snd_fw_transaction(t.unit, tcode, CSR_REGISTER_BASE + CSR_FCP_COMMAND, (void *)command, command_size); if (ret < 0) break; wait_event_timeout(t.wait, t.state != STATE_PENDING, msecs_to_jiffies(FCP_TIMEOUT_MS)); if (t.state == STATE_COMPLETE) { ret = t.response_size; break; } else if (t.state == STATE_BUS_RESET) { msleep(ERROR_DELAY_MS); } else if (++tries >= ERROR_RETRIES) { dev_err(&t.unit->device, "FCP command timed out\n"); ret = -EIO; break; } } spin_lock_irq(&transactions_lock); list_del(&t.list); spin_unlock_irq(&transactions_lock); return ret; } EXPORT_SYMBOL(fcp_avc_transaction); /** * fcp_bus_reset - inform the target handler about a bus reset * @unit: the unit that might be used by fcp_avc_transaction() * * This function must be called from the driver's .update handler to inform * the FCP transaction handler that a bus reset has happened. Any pending FCP * transactions are retried. */ void fcp_bus_reset(struct fw_unit *unit) { struct fcp_transaction *t; spin_lock_irq(&transactions_lock); list_for_each_entry(t, &transactions, list) { if (t->unit == unit && t->state == STATE_PENDING) { t->state = STATE_BUS_RESET; wake_up(&t->wait); } } spin_unlock_irq(&transactions_lock); } EXPORT_SYMBOL(fcp_bus_reset); /* checks whether the response matches the masked bytes in response_buffer */ static bool is_matching_response(struct fcp_transaction *transaction, const void *response, size_t length) { const u8 *p1, *p2; unsigned int mask, i; p1 = response; p2 = transaction->response_buffer; mask = transaction->response_match_bytes; for (i = 0; ; ++i) { if ((mask & 1) && p1[i] != p2[i]) return false; mask >>= 1; if (!mask) return true; if (--length == 0) return false; } } static void fcp_response(struct fw_card *card, struct fw_request *request, int tcode, int destination, int source, int generation, unsigned long long offset, void *data, size_t length, void *callback_data) { struct fcp_transaction *t; unsigned long flags; if (length < 1 || (*(const u8 *)data & 0xf0) != CTS_AVC) return; spin_lock_irqsave(&transactions_lock, flags); list_for_each_entry(t, &transactions, list) { struct fw_device *device = fw_parent_device(t->unit); if (device->card != card || device->generation != generation) continue; smp_rmb(); /* node_id vs. generation */ if (device->node_id != source) continue; if (t->state == STATE_PENDING && is_matching_response(t, data, length)) { t->state = STATE_COMPLETE; t->response_size = min((unsigned int)length, t->response_size); memcpy(t->response_buffer, data, t->response_size); wake_up(&t->wait); } } spin_unlock_irqrestore(&transactions_lock, flags); } static struct fw_address_handler response_register_handler = { .length = 0x200, .address_callback = fcp_response, }; static int __init fcp_module_init(void) { static const struct fw_address_region response_register_region = { .start = CSR_REGISTER_BASE + CSR_FCP_RESPONSE, .end = CSR_REGISTER_BASE + CSR_FCP_END, }; fw_core_add_address_handler(&response_register_handler, &response_register_region); return 0; } static void __exit fcp_module_exit(void) { WARN_ON(!list_empty(&transactions)); fw_core_remove_address_handler(&response_register_handler); } module_init(fcp_module_init); module_exit(fcp_module_exit);
gpl-2.0
omnirom/android_kernel_oppo_msm8974
arch/parisc/kernel/pa7300lc.c
13902
1099
/* * linux/arch/parisc/kernel/pa7300lc.c * - PA7300LC-specific functions * * Copyright (C) 2000 Philipp Rumpf */ #include <linux/sched.h> #include <linux/smp.h> #include <linux/kernel.h> #include <asm/io.h> #include <asm/ptrace.h> #include <asm/machdep.h> /* CPU register indices */ #define MIOC_STATUS 0xf040 #define MIOC_CONTROL 0xf080 #define MDERRADD 0xf0e0 #define DMAERR 0xf0e8 #define DIOERR 0xf0ec #define HIDMAMEM 0xf0f4 /* this returns the HPA of the CPU it was called on */ static u32 cpu_hpa(void) { return 0xfffb0000; } static void pa7300lc_lpmc(int code, struct pt_regs *regs) { u32 hpa; printk(KERN_WARNING "LPMC on CPU %d\n", smp_processor_id()); show_regs(regs); hpa = cpu_hpa(); printk(KERN_WARNING "MIOC_CONTROL %08x\n" "MIOC_STATUS %08x\n" "MDERRADD %08x\n" "DMAERR %08x\n" "DIOERR %08x\n" "HIDMAMEM %08x\n", gsc_readl(hpa+MIOC_CONTROL), gsc_readl(hpa+MIOC_STATUS), gsc_readl(hpa+MDERRADD), gsc_readl(hpa+DMAERR), gsc_readl(hpa+DIOERR), gsc_readl(hpa+HIDMAMEM)); } void pa7300lc_init(void) { cpu_lpmc = pa7300lc_lpmc; }
gpl-2.0
Pivosgroup/buildroot-linux-kernel-m3
drivers/amlogic/wifi/rtl8xxx_CU/hal/rtl8192c/usb/rtl8192cu_xmit.c
79
34924
/****************************************************************************** * * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * ******************************************************************************/ #define _RTL8192C_XMIT_C_ #include <drv_conf.h> #include <osdep_service.h> #include <drv_types.h> #include <rtw_byteorder.h> #include <wifi.h> #include <osdep_intf.h> #include <circ_buf.h> #include <usb_ops.h> #include <rtl8192c_hal.h> #if defined (PLATFORM_LINUX) && defined (PLATFORM_WINDOWS) #error "Shall be Linux or Windows, but not both!\n" #endif s32 rtl8192cu_init_xmit_priv(_adapter *padapter) { struct xmit_priv *pxmitpriv = &padapter->xmitpriv; #ifdef PLATFORM_LINUX tasklet_init(&pxmitpriv->xmit_tasklet, (void(*)(unsigned long))rtl8192cu_xmit_tasklet, (unsigned long)padapter); #endif return _SUCCESS; } void rtl8192cu_free_xmit_priv(_adapter *padapter) { } u32 rtw_get_ff_hwaddr(struct xmit_frame *pxmitframe) { u32 addr; struct pkt_attrib *pattrib = &pxmitframe->attrib; switch(pattrib->qsel) { case 0: case 3: addr = BE_QUEUE_INX; break; case 1: case 2: addr = BK_QUEUE_INX; break; case 4: case 5: addr = VI_QUEUE_INX; break; case 6: case 7: addr = VO_QUEUE_INX; break; case 0x10: addr = BCN_QUEUE_INX; break; case 0x11://BC/MC in PS (HIQ) addr = HIGH_QUEUE_INX; break; case 0x12: addr = MGT_QUEUE_INX; break; default: addr = BE_QUEUE_INX; break; } return addr; } int urb_zero_packet_chk(_adapter *padapter, int sz) { int blnSetTxDescOffset; struct dvobj_priv *pdvobj = (struct dvobj_priv*)&padapter->dvobjpriv; if ( pdvobj->ishighspeed ) { if ( ( (sz + TXDESC_SIZE) % 512 ) == 0 ) { blnSetTxDescOffset = 1; } else { blnSetTxDescOffset = 0; } } else { if ( ( (sz + TXDESC_SIZE) % 64 ) == 0 ) { blnSetTxDescOffset = 1; } else { blnSetTxDescOffset = 0; } } return blnSetTxDescOffset; } void rtl8192cu_cal_txdesc_chksum(struct tx_desc *ptxdesc) { u16 *usPtr = (u16*)ptxdesc; u32 count = 16; // (32 bytes / 2 bytes per XOR) => 16 times u32 index; u16 checksum = 0; //Clear first ptxdesc->txdw7 &= cpu_to_le32(0xffff0000); for(index = 0 ; index < count ; index++){ checksum = checksum ^ le16_to_cpu(*(usPtr + index)); } ptxdesc->txdw7 |= cpu_to_le32(0x0000ffff&checksum); } void fill_txdesc_sectype(struct pkt_attrib *pattrib, struct tx_desc *ptxdesc) { if ((pattrib->encrypt > 0) && !pattrib->bswenc) { switch (pattrib->encrypt) { //SEC_TYPE case _WEP40_: case _WEP104_: ptxdesc->txdw1 |= cpu_to_le32((0x01<<22)&0x00c00000); break; case _TKIP_: case _TKIP_WTMIC_: //ptxdesc->txdw1 |= cpu_to_le32((0x02<<22)&0x00c00000); ptxdesc->txdw1 |= cpu_to_le32((0x01<<22)&0x00c00000); break; case _AES_: ptxdesc->txdw1 |= cpu_to_le32((0x03<<22)&0x00c00000); break; case _NO_PRIVACY_: default: break; } } } void fill_txdesc_vcs(struct pkt_attrib *pattrib, u32 *pdw) { //DBG_8192C("cvs_mode=%d\n", pattrib->vcs_mode); switch(pattrib->vcs_mode) { case RTS_CTS: *pdw |= cpu_to_le32(BIT(12)); break; case CTS_TO_SELF: *pdw |= cpu_to_le32(BIT(11)); break; case NONE_VCS: default: break; } if(pattrib->vcs_mode) { *pdw |= cpu_to_le32(BIT(13));//ENABLE HW RTS } } void fill_txdesc_phy(struct pkt_attrib *pattrib, u32 *pdw) { //DBG_8192C("bwmode=%d, ch_off=%d\n", pattrib->bwmode, pattrib->ch_offset); if(pattrib->ht_en) { *pdw |= (pattrib->bwmode&HT_CHANNEL_WIDTH_40)? cpu_to_le32(BIT(25)):0; if(pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_LOWER) *pdw |= cpu_to_le32((0x01<<20)&0x003f0000); else if(pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_UPPER) *pdw |= cpu_to_le32((0x02<<20)&0x003f0000); else if(pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_DONT_CARE) *pdw |= 0; else *pdw |= cpu_to_le32((0x03<<20)&0x003f0000); } } #ifdef CONFIG_USB_TX_AGGREGATION static void _update_txdesc(struct xmit_frame *pxmitframe, u8 *pmem, int sz) { uint qsel; _adapter *padapter = pxmitframe->padapter; struct ht_priv *phtpriv = &padapter->mlmepriv.htpriv; struct mlme_ext_info *pmlmeinfo = &padapter->mlmeextpriv.mlmext_info; HAL_DATA_TYPE *pHalData = GET_HAL_DATA(padapter); struct dm_priv *pdmpriv = &pHalData->dmpriv; struct pkt_attrib *pattrib = &pxmitframe->attrib; sint bmcst = IS_MCAST(pattrib->ra); struct tx_desc *ptxdesc = (struct tx_desc*)pmem; _rtw_memset(ptxdesc, 0, sizeof(struct tx_desc)); //4 offset 0 ptxdesc->txdw0 |= cpu_to_le32(sz & 0x0000ffff); ptxdesc->txdw0 |= cpu_to_le32(OWN | FSG | LSG); ptxdesc->txdw0 |= cpu_to_le32(((TXDESC_SIZE + OFFSET_SZ) << OFFSET_SHT) & 0x00ff0000);//32 bytes for TX Desc if (bmcst) ptxdesc->txdw0 |= cpu_to_le32(BIT(24)); RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("update_txdesc: offset0=0x%08x\n", ptxdesc->txdw0)); //4 offset 4 // pkt_offset, unit:8 bytes padding if (pxmitframe->pkt_offset > 0) ptxdesc->txdw1 |= cpu_to_le32((pxmitframe->pkt_offset << 26) & 0x7c000000); #ifdef CONFIG_USB_TX_AGGREGATION if (pxmitframe->agg_num > 1) ptxdesc->txdw5 |= cpu_to_le32((pxmitframe->agg_num << 24) & 0xff000000); #endif if (pxmitframe->frame_tag == DATA_FRAMETAG) { //4 offset 4 ptxdesc->txdw1 |= cpu_to_le32(pattrib->mac_id&0x1f); qsel = (uint)(pattrib->qsel & 0x0000001f); ptxdesc->txdw1 |= cpu_to_le32((qsel << QSEL_SHT) & 0x00001f00); ptxdesc->txdw1 |= cpu_to_le32((pattrib->raid << 16) & 0x000f0000); fill_txdesc_sectype(pattrib, ptxdesc); if(pattrib->ampdu_en==_TRUE) ptxdesc->txdw1 |= cpu_to_le32(BIT(5));//AGG EN else ptxdesc->txdw1 |= cpu_to_le32(BIT(6));//AGG BK //4 offset 8 //4 offset 12 ptxdesc->txdw3 |= cpu_to_le32((pattrib->seqnum << 16) & 0xffff0000); //4 offset 16 , offset 20 if (pattrib->qos_en) ptxdesc->txdw4 |= cpu_to_le32(BIT(6));//QoS if ((pattrib->ether_type != 0x888e) && (pattrib->ether_type != 0x0806) && (pattrib->dhcp_pkt != 1)) { //Non EAP & ARP & DHCP type data packet fill_txdesc_vcs(pattrib, &ptxdesc->txdw4); fill_txdesc_phy(pattrib, &ptxdesc->txdw4); ptxdesc->txdw4 |= cpu_to_le32(0x00000008);//RTS Rate=24M ptxdesc->txdw5 |= cpu_to_le32(0x0001ff00); //ptxdesc->txdw5 |= cpu_to_le32(0x0000000b);//DataRate - 54M #ifdef SUPPORT_64_STA if(pattrib->mac_id>FW_CTRL_MACID ){ ptxdesc->txdw5 |= cpu_to_le32(pattrib->psta->init_rate); ptxdesc->txdw4 |=cpu_to_le32(0x00000100); //USE RATE ptxdesc->txdw3 |=cpu_to_le32(BIT(28)); //PKT_ID //printk("%s pattrib->mac_id=%d ptxdesc->txdw3=0x%x,ptxdesc->txdw4=0x%x,ptxdesc->txdw5=0x%x\n",__FUNCTION__,pattrib->mac_id,ptxdesc->txdw3,ptxdesc->txdw4,ptxdesc->txdw5); } else //use REG_INIDATA_RATE_SEL value ptxdesc->txdw5 |= cpu_to_le32(pdmpriv->INIDATA_RATE[pattrib->mac_id]); if(pattrib->mac_id==1){ //bcmc sta ptxdesc->txdw5 |= cpu_to_le32(padapter->registrypriv.bcmc_rate); ptxdesc->txdw4 |=cpu_to_le32(0x00000100); //USE RATE } #else //SUPPORT_64_STA //use REG_INIDATA_RATE_SEL value ptxdesc->txdw5 |= cpu_to_le32(pdmpriv->INIDATA_RATE[pattrib->mac_id]); #endif //SUPPORT_64_STA if (0)//for driver dbg { ptxdesc->txdw4 |= cpu_to_le32(BIT(8));//driver uses rate if (pattrib->ht_en) ptxdesc->txdw5 |= cpu_to_le32(BIT(6));//SGI ptxdesc->txdw5 |= cpu_to_le32(0x00000013);//init rate - mcs7 } } else { // EAP data packet and ARP packet. // Use the 1M data rate to send the EAP/ARP packet. // This will maybe make the handshake smooth. ptxdesc->txdw1 |= cpu_to_le32(BIT(6));//AGG BK ptxdesc->txdw4 |= cpu_to_le32(BIT(8));//driver uses rate } //4 offset 24 #ifdef CONFIG_TCP_CSUM_OFFLOAD_TX if (pattrib->hw_tcp_csum == 1) { // ptxdesc->txdw6 = 0; // clear TCP_CHECKSUM and IP_CHECKSUM. It's zero already!! u8 ip_hdr_offset = 32 + pattrib->hdrlen + pattrib->iv_len + 8; ptxdesc->txdw7 = (1 << 31) | (ip_hdr_offset << 16); DBG_8192C("ptxdesc->txdw7 = %08x\n", ptxdesc->txdw7); } #endif } else if(pxmitframe->frame_tag == MGNT_FRAMETAG) { //DBG_8192C("pxmitframe->frame_tag == MGNT_FRAMETAG\n"); //4 offset 4 ptxdesc->txdw1 |= cpu_to_le32(pattrib->mac_id&0x1f); qsel = (uint)(pattrib->qsel&0x0000001f); ptxdesc->txdw1 |= cpu_to_le32((qsel << QSEL_SHT) & 0x00001f00); ptxdesc->txdw1 |= cpu_to_le32((pattrib->raid<< 16) & 0x000f0000); //fill_txdesc_sectype(pattrib, ptxdesc); //4 offset 8 //4 offset 12 ptxdesc->txdw3 |= cpu_to_le32((pattrib->seqnum<<16)&0xffff0000); //4 offset 16 ptxdesc->txdw4 |= cpu_to_le32(BIT(8));//driver uses rate //4 offset 20 } else if(pxmitframe->frame_tag == TXAGG_FRAMETAG) { DBG_8192C("pxmitframe->frame_tag == TXAGG_FRAMETAG\n"); } else { DBG_8192C("pxmitframe->frame_tag = %d\n", pxmitframe->frame_tag); //4 offset 4 ptxdesc->txdw1 |= cpu_to_le32((4)&0x1f);//CAM_ID(MAC_ID) ptxdesc->txdw1 |= cpu_to_le32((6<< 16) & 0x000f0000);//raid //4 offset 8 //4 offset 12 ptxdesc->txdw3 |= cpu_to_le32((pattrib->seqnum << 16) & 0xffff0000); //4 offset 16 ptxdesc->txdw4 |= cpu_to_le32(BIT(8));//driver uses rate //4 offset 20 } // 2009.11.05. tynli_test. Suggested by SD4 Filen for FW LPS. // (1) The sequence number of each non-Qos frame / broadcast / multicast / // mgnt frame should be controled by Hw because Fw will also send null data // which we cannot control when Fw LPS enable. // --> default enable non-Qos data sequense number. 2010.06.23. by tynli. // (2) Enable HW SEQ control for beacon packet, because we use Hw beacon. // (3) Use HW Qos SEQ to control the seq num of Ext port non-Qos packets. // 2010.06.23. Added by tynli. if(!pattrib->qos_en) { ptxdesc->txdw4 |= cpu_to_le32(BIT(7)); // Hw set sequence number ptxdesc->txdw3 |= cpu_to_le32((8 <<28)); //set bit3 to 1. Suugested by TimChen. 2009.12.29. } rtl8192cu_cal_txdesc_chksum(ptxdesc); } #endif static s32 update_txdesc(struct xmit_frame *pxmitframe, u8 *pmem, s32 sz) { int pull=0; uint qsel; _adapter *padapter = pxmitframe->padapter; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; struct pkt_attrib *pattrib = &pxmitframe->attrib; HAL_DATA_TYPE *pHalData = GET_HAL_DATA(padapter); struct dm_priv *pdmpriv = &pHalData->dmpriv; struct tx_desc *ptxdesc = (struct tx_desc *)pmem; struct ht_priv *phtpriv = &pmlmepriv->htpriv; struct mlme_ext_info *pmlmeinfo = &padapter->mlmeextpriv.mlmext_info; sint bmcst = IS_MCAST(pattrib->ra); #ifdef CONFIG_P2P struct wifidirect_info* pwdinfo = &padapter->wdinfo; #endif //CONFIG_P2P #ifndef CONFIG_USE_USB_BUFFER_ALLOC_TX if(urb_zero_packet_chk(padapter, sz)==0) { ptxdesc = (struct tx_desc *)(pmem+PACKET_OFFSET_SZ); pull = 1; } #endif // CONFIG_USE_USB_BUFFER_ALLOC_TX _rtw_memset(ptxdesc, 0, sizeof(struct tx_desc)); if((pxmitframe->frame_tag&0x0f) == DATA_FRAMETAG) { //DBG_8192C("pxmitframe->frame_tag == DATA_FRAMETAG\n"); //offset 4 ptxdesc->txdw1 |= cpu_to_le32(pattrib->mac_id&0x1f); qsel = (uint)(pattrib->qsel & 0x0000001f); ptxdesc->txdw1 |= cpu_to_le32((qsel << QSEL_SHT) & 0x00001f00); ptxdesc->txdw1 |= cpu_to_le32((pattrib->raid<< 16) & 0x000f0000); fill_txdesc_sectype(pattrib, ptxdesc); if(pattrib->ampdu_en==_TRUE) ptxdesc->txdw1 |= cpu_to_le32(BIT(5));//AGG EN else ptxdesc->txdw1 |= cpu_to_le32(BIT(6));//AGG BK //offset 8 //offset 12 ptxdesc->txdw3 |= cpu_to_le32((pattrib->seqnum<<16)&0xffff0000); //offset 16 , offset 20 if (pattrib->qos_en) ptxdesc->txdw4 |= cpu_to_le32(BIT(6));//QoS if ((pattrib->ether_type != 0x888e) && (pattrib->ether_type != 0x0806) && (pattrib->dhcp_pkt != 1)) { //Non EAP & ARP & DHCP type data packet fill_txdesc_vcs(pattrib, &ptxdesc->txdw4); fill_txdesc_phy(pattrib, &ptxdesc->txdw4); ptxdesc->txdw4 |= cpu_to_le32(0x00000008);//RTS Rate=24M ptxdesc->txdw5 |= cpu_to_le32(0x0001ff00);// //ptxdesc->txdw5 |= cpu_to_le32(0x0000000b);//DataRate - 54M #ifdef SUPPORT_64_STA if(pattrib->mac_id>=FW_CTRL_MACID ){ ptxdesc->txdw5 |= cpu_to_le32(pattrib->psta->init_rate); ptxdesc->txdw4 |=cpu_to_le32(0x00000100); //USE RATE ptxdesc->txdw3 |=cpu_to_le32(BIT(28)); //PKT_ID //printk("%s pattrib->mac_id=%d\n",__FUNCTION__,pattrib->mac_id); //printk("%s pattrib->mac_id=%d ptxdesc->txdw1=0x%x,ptxdesc->txdw3=0x%x,\nptxdesc->txdw4=0x%x,ptxdesc->txdw5=0x%x\n",__FUNCTION__,pattrib->mac_id,ptxdesc->txdw1,ptxdesc->txdw3,ptxdesc->txdw4,ptxdesc->txdw5); } else //use REG_INIDATA_RATE_SEL value ptxdesc->txdw5 |= cpu_to_le32(pdmpriv->INIDATA_RATE[pattrib->mac_id]); if(pattrib->mac_id==1){ //bcmc sta ptxdesc->txdw5 |= cpu_to_le32(padapter->registrypriv.bcmc_rate); ptxdesc->txdw4 |=cpu_to_le32(0x00000100); //USE RATE } #else //SUPPORT_64_STA //use REG_INIDATA_RATE_SEL value ptxdesc->txdw5 |= cpu_to_le32(pdmpriv->INIDATA_RATE[pattrib->mac_id]); #endif //SUPPORT_64_STA if(0)//for driver dbg { ptxdesc->txdw4 |= cpu_to_le32(BIT(8));//driver uses rate if(pattrib->ht_en) ptxdesc->txdw5 |= cpu_to_le32(BIT(6));//SGI ptxdesc->txdw5 |= cpu_to_le32(0x00000013);//init rate - mcs7 } } else { // EAP data packet and ARP packet. // Use the 1M data rate to send the EAP/ARP packet. // This will maybe make the handshake smooth. ptxdesc->txdw1 |= cpu_to_le32(BIT(6));//AGG BK ptxdesc->txdw4 |= cpu_to_le32(BIT(8));//driver uses rate #ifdef CONFIG_P2P // Added by Albert 2011/03/22 // In the P2P mode, the driver should not support the b mode. // So, the Tx packet shouldn't use the CCK rate if(!rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE)) { ptxdesc->txdw5 |= cpu_to_le32( 0x04 ); // Use the 6M data rate. } #endif //CONFIG_P2P } //offset 24 #ifdef CONFIG_TCP_CSUM_OFFLOAD_TX if ( pattrib->hw_tcp_csum == 1 ) { // ptxdesc->txdw6 = 0; // clear TCP_CHECKSUM and IP_CHECKSUM. It's zero already!! u8 ip_hdr_offset = 32 + pattrib->hdrlen + pattrib->iv_len + 8; ptxdesc->txdw7 = (1 << 31) | (ip_hdr_offset << 16); DBG_8192C("ptxdesc->txdw7 = %08x\n", ptxdesc->txdw7); } #endif } else if((pxmitframe->frame_tag&0x0f)== MGNT_FRAMETAG) { //DBG_8192C("pxmitframe->frame_tag == MGNT_FRAMETAG\n"); //offset 4 ptxdesc->txdw1 |= cpu_to_le32(pattrib->mac_id&0x1f); qsel = (uint)(pattrib->qsel&0x0000001f); ptxdesc->txdw1 |= cpu_to_le32((qsel<<QSEL_SHT)&0x00001f00); ptxdesc->txdw1 |= cpu_to_le32((pattrib->raid<< 16) & 0x000f0000); //fill_txdesc_sectype(pattrib, ptxdesc); //offset 8 //offset 12 ptxdesc->txdw3 |= cpu_to_le32((pattrib->seqnum<<16)&0xffff0000); //offset 16 ptxdesc->txdw4 |= cpu_to_le32(BIT(8));//driver uses rate //offset 20 ptxdesc->txdw5 |= cpu_to_le32(BIT(17));//retry limit enable if(pattrib->retry_ctrl == _TRUE) ptxdesc->txdw5 |= cpu_to_le32(0x00180000);//retry limit = 6 else ptxdesc->txdw5 |= cpu_to_le32(0x00300000);//retry limit = 12 #ifdef CONFIG_P2P // Added by Albert 2011/03/17 // In the P2P mode, the driver should not support the b mode. // So, the Tx packet shouldn't use the CCK rate if(!rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE)) { ptxdesc->txdw5 |= cpu_to_le32( 0x04 ); // Use the 6M data rate. } #endif //CONFIG_P2P #ifdef CONFIG_INTEL_PROXIM if((padapter->proximity.proxim_on==_TRUE)&&(pattrib->intel_proxim==_TRUE)){ printk("\n %s pattrib->rate=%d\n",__FUNCTION__,pattrib->rate); ptxdesc->txdw5 |= cpu_to_le32( pattrib->rate); } #endif } else if((pxmitframe->frame_tag&0x0f) == TXAGG_FRAMETAG) { DBG_8192C("pxmitframe->frame_tag == TXAGG_FRAMETAG\n"); } #ifdef CONFIG_MP_INCLUDED else if((pxmitframe->frame_tag&0x0f) == MP_FRAMETAG) { fill_txdesc_for_mp(padapter, ptxdesc); } #endif else { DBG_8192C("pxmitframe->frame_tag = %d\n", pxmitframe->frame_tag); //offset 4 ptxdesc->txdw1 |= cpu_to_le32((4)&0x1f);//CAM_ID(MAC_ID) ptxdesc->txdw1 |= cpu_to_le32((6<< 16) & 0x000f0000);//raid //offset 8 //offset 12 ptxdesc->txdw3 |= cpu_to_le32((pattrib->seqnum<<16)&0xffff0000); //offset 16 ptxdesc->txdw4 |= cpu_to_le32(BIT(8));//driver uses rate //offset 20 } // 2009.11.05. tynli_test. Suggested by SD4 Filen for FW LPS. // (1) The sequence number of each non-Qos frame / broadcast / multicast / // mgnt frame should be controled by Hw because Fw will also send null data // which we cannot control when Fw LPS enable. // --> default enable non-Qos data sequense number. 2010.06.23. by tynli. // (2) Enable HW SEQ control for beacon packet, because we use Hw beacon. // (3) Use HW Qos SEQ to control the seq num of Ext port non-Qos packets. // 2010.06.23. Added by tynli. if(!pattrib->qos_en) { ptxdesc->txdw4 |= cpu_to_le32(BIT(7)); // Hw set sequence number ptxdesc->txdw3 |= cpu_to_le32((8 <<28)); //set bit3 to 1. Suugested by TimChen. 2009.12.29. } //offset 0 ptxdesc->txdw0 |= cpu_to_le32(sz&0x0000ffff); ptxdesc->txdw0 |= cpu_to_le32(OWN | FSG | LSG); ptxdesc->txdw0 |= cpu_to_le32(((TXDESC_SIZE+OFFSET_SZ)<<OFFSET_SHT)&0x00ff0000);//32 bytes for TX Desc if(bmcst) { ptxdesc->txdw0 |= cpu_to_le32(BIT(24)); } RT_TRACE(_module_rtl871x_xmit_c_,_drv_info_,("offset0-txdesc=0x%x\n", ptxdesc->txdw0)); //offset 4 if(!pull) ptxdesc->txdw1 |= cpu_to_le32((0x01<<26)&0xff000000);//pkt_offset, unit:8 bytes padding rtl8192cu_cal_txdesc_chksum(ptxdesc); return pull; } static void _rtw_dump_xframe(_adapter *padapter, struct xmit_frame *pxmitframe, u8 sync) { int t, sz, w_sz, pull=0; u8 *mem_addr; u32 ff_hwaddr; struct xmit_buf *pxmitbuf = pxmitframe->pxmitbuf; struct pkt_attrib *pattrib = &pxmitframe->attrib; struct xmit_priv *pxmitpriv = &padapter->xmitpriv; struct security_priv *psecuritypriv = &padapter->securitypriv; if ((pxmitframe->frame_tag == DATA_FRAMETAG) && (pxmitframe->attrib.ether_type != 0x0806) && (pxmitframe->attrib.ether_type != 0x888e) && (pxmitframe->attrib.dhcp_pkt != 1)) { rtw_issue_addbareq_cmd(padapter, pxmitframe); } mem_addr = pxmitframe->buf_addr; RT_TRACE(_module_rtl871x_xmit_c_,_drv_info_,("rtw_dump_xframe()\n")); for (t = 0; t < pattrib->nr_frags; t++) { if (t != (pattrib->nr_frags - 1)) { RT_TRACE(_module_rtl871x_xmit_c_,_drv_err_,("pattrib->nr_frags=%d\n", pattrib->nr_frags)); sz = pxmitpriv->frag_len; sz = sz - 4 - (psecuritypriv->sw_encrypt ? 0 : pattrib->icv_len); } else //no frag { sz = pattrib->last_txcmdsz; } pull = update_txdesc(pxmitframe, mem_addr, sz); if(pull) { mem_addr += PACKET_OFFSET_SZ; //pull txdesc head //pxmitbuf ->pbuf = mem_addr; pxmitframe->buf_addr = mem_addr; w_sz = sz + TXDESC_SIZE; } else { w_sz = sz + TXDESC_SIZE + PACKET_OFFSET_SZ; } ff_hwaddr = rtw_get_ff_hwaddr(pxmitframe); if(sync == _TRUE) rtw_write_port_sync(padapter, ff_hwaddr, w_sz, (unsigned char*)pxmitbuf); else rtw_write_port(padapter, ff_hwaddr, w_sz, (unsigned char*)pxmitbuf); rtw_count_tx_stats(padapter, pxmitframe, sz); RT_TRACE(_module_rtl871x_xmit_c_,_drv_info_,("rtw_write_port, w_sz=%d\n", w_sz)); //DBG_8192C("rtw_write_port, w_sz=%d, sz=%d, txdesc_sz=%d, tid=%d\n", w_sz, sz, w_sz-sz, pattrib->priority); mem_addr += w_sz; mem_addr = (u8 *)RND4(((SIZE_PTR)(mem_addr))); } rtw_free_xmitframe_ex(pxmitpriv, pxmitframe); } inline void rtw_dump_xframe(_adapter *padapter, struct xmit_frame *pxmitframe) { _rtw_dump_xframe(padapter, pxmitframe, _FALSE); } inline void rtw_dump_xframe_sync(_adapter *padapter, struct xmit_frame *pxmitframe) { _rtw_dump_xframe(padapter, pxmitframe, _TRUE); } #ifdef CONFIG_USB_TX_AGGREGATION static u32 xmitframe_need_length(struct xmit_frame *pxmitframe) { struct pkt_attrib *pattrib = &pxmitframe->attrib; u32 len = 0; // no consider fragement len = pattrib->hdrlen + pattrib->iv_len + SNAP_SIZE + sizeof(u16) + pattrib->pktlen + ((pattrib->bswenc) ? pattrib->icv_len : 0); if(pattrib->encrypt ==_TKIP_) len += 8; return len; } #define IDEA_CONDITION 1 // check all packets before enqueue s32 rtl8192cu_xmitframe_complete(_adapter *padapter, struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf) { HAL_DATA_TYPE *pHalData = GET_HAL_DATA(padapter); struct xmit_frame *pxmitframe = NULL; struct xmit_frame *pfirstframe = NULL; // aggregate variable // struct hw_xmit *phwxmit; struct sta_info *psta = NULL; struct tx_servq *ptxservq = NULL; _irqL irqL; _list *xmitframe_plist = NULL, *xmitframe_phead = NULL; u32 pbuf; // next pkt address u32 pbuf_tail; // last pkt tail u32 len; // packet length, except TXDESC_SIZE and PKT_OFFSET u32 bulkSize = pHalData->UsbBulkOutSize; u8 descCount; u32 bulkPtr; // dump frame variable u32 ff_hwaddr; #ifndef IDEA_CONDITION int res = _SUCCESS; #endif RT_TRACE(_module_rtl8192c_xmit_c_, _drv_info_, ("+xmitframe_complete\n")); // check xmitbuffer is ok if (pxmitbuf == NULL) { pxmitbuf = rtw_alloc_xmitbuf(pxmitpriv); if (pxmitbuf == NULL) return _FALSE; } //3 1. pick up first frame do { rtw_free_xmitframe_ex(pxmitpriv, pxmitframe); pxmitframe = rtw_dequeue_xframe(pxmitpriv, pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry); if (pxmitframe == NULL) { // no more xmit frame, release xmit buffer rtw_free_xmitbuf(pxmitpriv, pxmitbuf); return _FALSE; } #ifndef IDEA_CONDITION if (pxmitframe->frame_tag != DATA_FRAMETAG) { RT_TRACE(_module_rtl8192c_xmit_c_, _drv_err_, ("xmitframe_complete: frame tag(%d) is not DATA_FRAMETAG(%d)!\n", pxmitframe->frame_tag, DATA_FRAMETAG)); // rtw_free_xmitframe_ex(pxmitpriv, pxmitframe); continue; } // TID 0~15 if ((pxmitframe->attrib.priority < 0) || (pxmitframe->attrib.priority > 15)) { RT_TRACE(_module_rtl8192c_xmit_c_, _drv_err_, ("xmitframe_complete: TID(%d) should be 0~15!\n", pxmitframe->attrib.priority)); // rtw_free_xmitframe_ex(pxmitpriv, pxmitframe); continue; } #endif pxmitframe->pxmitbuf = pxmitbuf; pxmitframe->buf_addr = pxmitbuf->pbuf; pxmitbuf->priv_data = pxmitframe; //pxmitframe->agg_num = 1; // alloc xmitframe should assign to 1. pxmitframe->pkt_offset = 1; // first frame of aggregation, reserve offset #ifdef IDEA_CONDITION rtw_xmitframe_coalesce(padapter, pxmitframe->pkt, pxmitframe); #else res = rtw_xmitframe_coalesce(padapter, pxmitframe->pkt, pxmitframe); if (res == _FALSE) { // rtw_free_xmitframe_ex(pxmitpriv, pxmitframe); continue; } #endif // always return ndis_packet after rtw_xmitframe_coalesce rtw_os_xmit_complete(padapter, pxmitframe); break; } while (1); //3 2. aggregate same priority and same DA(AP or STA) frames pfirstframe = pxmitframe; len = xmitframe_need_length(pfirstframe) + TXDESC_OFFSET; pbuf_tail = len; pbuf = _RND8(pbuf_tail); // check pkt amount in one bluk descCount = 0; bulkPtr = bulkSize; if (pbuf < bulkPtr) descCount++; else { descCount = 0; bulkPtr = ((pbuf / bulkSize) + 1) * bulkSize; // round to next bulkSize } // dequeue same priority packet from station tx queue psta = pfirstframe->attrib.psta; switch (pfirstframe->attrib.priority) { case 1: case 2: ptxservq = &(psta->sta_xmitpriv.bk_q); // phwxmit = pxmitpriv->hwxmits + 3; break; case 4: case 5: ptxservq = &(psta->sta_xmitpriv.vi_q); // phwxmit = pxmitpriv->hwxmits + 1; break; case 6: case 7: ptxservq = &(psta->sta_xmitpriv.vo_q); // phwxmit = pxmitpriv->hwxmits; break; case 0: case 3: default: ptxservq = &(psta->sta_xmitpriv.be_q); // phwxmit = pxmitpriv->hwxmits + 2; break; } _enter_critical_bh(&pxmitpriv->lock, &irqL); xmitframe_phead = get_list_head(&ptxservq->sta_pending); xmitframe_plist = get_next(xmitframe_phead); while (rtw_end_of_queue_search(xmitframe_phead, xmitframe_plist) == _FALSE) { pxmitframe = LIST_CONTAINOR(xmitframe_plist, struct xmit_frame, list); xmitframe_plist = get_next(xmitframe_plist); len = xmitframe_need_length(pxmitframe) + TXDESC_SIZE; // no offset if (pbuf + len > MAX_XMITBUF_SZ) break; rtw_list_delete(&pxmitframe->list); ptxservq->qcnt--; #ifndef IDEA_CONDITION // suppose only data frames would be in queue if (pxmitframe->frame_tag != DATA_FRAMETAG) { RT_TRACE(_module_rtl8192c_xmit_c_, _drv_err_, ("xmitframe_complete: frame tag(%d) is not DATA_FRAMETAG(%d)!\n", pxmitframe->frame_tag, DATA_FRAMETAG)); rtw_free_xmitframe_ex(pxmitpriv, pxmitframe); continue; } // TID 0~15 if ((pxmitframe->attrib.priority < 0) || (pxmitframe->attrib.priority > 15)) { RT_TRACE(_module_rtl8192c_xmit_c_, _drv_err_, ("xmitframe_complete: TID(%d) should be 0~15!\n", pxmitframe->attrib.priority)); rtw_free_xmitframe_ex(pxmitpriv, pxmitframe); continue; } #endif // pxmitframe->pxmitbuf = pxmitbuf; pxmitframe->buf_addr = pxmitbuf->pbuf + pbuf; pxmitframe->agg_num = 0; // not first frame of aggregation pxmitframe->pkt_offset = 0; // not first frame of aggregation, no need to reserve offset #ifdef IDEA_CONDITION rtw_xmitframe_coalesce(padapter, pxmitframe->pkt, pxmitframe); #else res = rtw_xmitframe_coalesce(padapter, pxmitframe->pkt, pxmitframe); if (res == _FALSE) { rtw_free_xmitframe_ex(pxmitpriv, pxmitframe); continue; } #endif // always return ndis_packet after rtw_xmitframe_coalesce rtw_os_xmit_complete(padapter, pxmitframe); // (len - TXDESC_SIZE) == pxmitframe->attrib.last_txcmdsz _update_txdesc(pxmitframe, pxmitframe->buf_addr, pxmitframe->attrib.last_txcmdsz); // don't need xmitframe any more rtw_free_xmitframe_ex(pxmitpriv, pxmitframe); // handle pointer and stop condition pbuf_tail = pbuf + len; pbuf = _RND8(pbuf_tail); pfirstframe->agg_num++; if (MAX_TX_AGG_PACKET_NUMBER == pfirstframe->agg_num) break; if (pbuf < bulkPtr) { descCount++; if (descCount == pHalData->UsbTxAggDescNum) break; } else { descCount = 0; bulkPtr = ((pbuf / bulkSize) + 1) * bulkSize; } } if (_rtw_queue_empty(&ptxservq->sta_pending) == _TRUE) rtw_list_delete(&ptxservq->tx_pending); _exit_critical_bh(&pxmitpriv->lock, &irqL); if ((pfirstframe->attrib.ether_type != 0x0806) && (pfirstframe->attrib.ether_type != 0x888e) && (pfirstframe->attrib.dhcp_pkt != 1)) { rtw_issue_addbareq_cmd(padapter, pfirstframe); } #ifndef CONFIG_USE_USB_BUFFER_ALLOC_TX //3 3. update first frame txdesc if ((pbuf_tail % bulkSize) == 0) { // remove pkt_offset pbuf_tail -= PACKET_OFFSET_SZ; pfirstframe->buf_addr += PACKET_OFFSET_SZ; pfirstframe->pkt_offset = 0; } #endif // CONFIG_USE_USB_BUFFER_ALLOC_TX _update_txdesc(pfirstframe, pfirstframe->buf_addr, pfirstframe->attrib.last_txcmdsz); //3 4. write xmit buffer to USB FIFO ff_hwaddr = rtw_get_ff_hwaddr(pfirstframe); // xmit address == ((xmit_frame*)pxmitbuf->priv_data)->buf_addr rtw_write_port(padapter, ff_hwaddr, pbuf_tail, (u8*)pxmitbuf); //3 5. update statisitc pbuf_tail -= (pfirstframe->agg_num * TXDESC_SIZE); if (pfirstframe->pkt_offset == 1) pbuf_tail -= PACKET_OFFSET_SZ; rtw_count_tx_stats(padapter, pfirstframe, pbuf_tail); rtw_free_xmitframe_ex(pxmitpriv, pfirstframe); return _TRUE; } #else s32 rtl8192cu_xmitframe_complete(_adapter *padapter, struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf) { struct hw_xmit *phwxmits; sint hwentry; struct xmit_frame *pxmitframe=NULL; int res=_SUCCESS, xcnt = 0; phwxmits = pxmitpriv->hwxmits; hwentry = pxmitpriv->hwxmit_entry; RT_TRACE(_module_rtl871x_xmit_c_,_drv_info_,("xmitframe_complete()\n")); if(pxmitbuf==NULL) { pxmitbuf = rtw_alloc_xmitbuf(pxmitpriv); if(!pxmitbuf) { return _FALSE; } } do { pxmitframe = rtw_dequeue_xframe(pxmitpriv, phwxmits, hwentry); if(pxmitframe) { pxmitframe->pxmitbuf = pxmitbuf; pxmitframe->buf_addr = pxmitbuf->pbuf; pxmitbuf->priv_data = pxmitframe; if((pxmitframe->frame_tag&0x0f) == DATA_FRAMETAG) { if(pxmitframe->attrib.priority<=15)//TID0~15 { res = rtw_xmitframe_coalesce(padapter, pxmitframe->pkt, pxmitframe); } rtw_os_xmit_complete(padapter, pxmitframe);//always return ndis_packet after rtw_xmitframe_coalesce } RT_TRACE(_module_rtl871x_xmit_c_,_drv_info_,("xmitframe_complete(): rtw_dump_xframe\n")); if(res == _SUCCESS) { rtw_dump_xframe(padapter, pxmitframe); } else { rtw_free_xmitbuf(pxmitpriv, pxmitbuf); rtw_free_xmitframe_ex(pxmitpriv, pxmitframe); } xcnt++; } else { rtw_free_xmitbuf(pxmitpriv, pxmitbuf); return _FALSE; } break; }while(0/*xcnt < (NR_XMITFRAME >> 3)*/); return _TRUE; } #endif static s32 xmitframe_direct(_adapter *padapter, struct xmit_frame *pxmitframe) { s32 res = _SUCCESS; res = rtw_xmitframe_coalesce(padapter, pxmitframe->pkt, pxmitframe); if (res == _SUCCESS) { rtw_dump_xframe(padapter, pxmitframe); } return res; } /* * Return * _TRUE dump packet directly * _FALSE enqueue packet */ static s32 pre_xmitframe(_adapter *padapter, struct xmit_frame *pxmitframe) { _irqL irqL; s32 res; struct xmit_buf *pxmitbuf = NULL; struct xmit_priv *pxmitpriv = &padapter->xmitpriv; struct pkt_attrib *pattrib = &pxmitframe->attrib; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; _enter_critical_bh(&pxmitpriv->lock, &irqL); if (rtw_txframes_sta_ac_pending(padapter, pattrib) > 0) goto enqueue; if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY) == _TRUE) goto enqueue; pxmitbuf = rtw_alloc_xmitbuf(pxmitpriv); if (pxmitbuf == NULL) goto enqueue; _exit_critical_bh(&pxmitpriv->lock, &irqL); pxmitframe->pxmitbuf = pxmitbuf; pxmitframe->buf_addr = pxmitbuf->pbuf; pxmitbuf->priv_data = pxmitframe; if (xmitframe_direct(padapter, pxmitframe) != _SUCCESS) { rtw_free_xmitbuf(pxmitpriv, pxmitbuf); rtw_free_xmitframe_ex(pxmitpriv, pxmitframe); } return _TRUE; enqueue: res = rtw_xmitframe_enqueue(padapter, pxmitframe); _exit_critical_bh(&pxmitpriv->lock, &irqL); if (res != _SUCCESS) { RT_TRACE(_module_xmit_osdep_c_, _drv_err_, ("pre_xmitframe: enqueue xmitframe fail\n")); rtw_free_xmitframe_ex(pxmitpriv, pxmitframe); // Trick, make the statistics correct pxmitpriv->tx_pkts--; pxmitpriv->tx_drop++; return _TRUE; } return _FALSE; } void rtl8192cu_mgnt_xmit(_adapter *padapter, struct xmit_frame *pmgntframe) { rtw_dump_xframe(padapter, pmgntframe); } /* * Return * _TRUE dump packet directly ok * _FALSE temporary can't transmit packets to hardware */ s32 rtl8192cu_hal_xmit(_adapter *padapter, struct xmit_frame *pxmitframe) { return pre_xmitframe(padapter, pxmitframe); } #ifdef CONFIG_HOSTAPD_MLME static void rtl8192cu_hostap_mgnt_xmit_cb(struct urb *urb) { #ifdef PLATFORM_LINUX struct sk_buff *skb = (struct sk_buff *)urb->context; //DBG_8192C("%s\n", __FUNCTION__); dev_kfree_skb_any(skb); #endif } s32 rtl8192cu_hostap_mgnt_xmit_entry(_adapter *padapter, _pkt *pkt) { #ifdef PLATFORM_LINUX u16 fc; int rc, len, pipe; unsigned int bmcst, tid, qsel; struct sk_buff *skb, *pxmit_skb; struct urb *urb; unsigned char *pxmitbuf; struct tx_desc *ptxdesc; struct rtw_ieee80211_hdr *tx_hdr; struct hostapd_priv *phostapdpriv = padapter->phostapdpriv; struct net_device *pnetdev = padapter->pnetdev; HAL_DATA_TYPE *pHalData = GET_HAL_DATA(padapter); struct dvobj_priv *pdvobj = &padapter->dvobjpriv; //DBG_8192C("%s\n", __FUNCTION__); skb = pkt; len = skb->len; tx_hdr = (struct rtw_ieee80211_hdr *)(skb->data); fc = le16_to_cpu(tx_hdr->frame_ctl); bmcst = IS_MCAST(tx_hdr->addr1); if ((fc & RTW_IEEE80211_FCTL_FTYPE) != RTW_IEEE80211_FTYPE_MGMT) goto _exit; #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)) // http://www.mail-archive.com/netdev@vger.kernel.org/msg17214.html pxmit_skb = dev_alloc_skb(len + TXDESC_SIZE); #else pxmit_skb = netdev_alloc_skb(pnetdev, len + TXDESC_SIZE); #endif if(!pxmit_skb) goto _exit; pxmitbuf = pxmit_skb->data; urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) { goto _exit; } // ----- fill tx desc ----- ptxdesc = (struct tx_desc *)pxmitbuf; _rtw_memset(ptxdesc, 0, sizeof(*ptxdesc)); //offset 0 ptxdesc->txdw0 |= cpu_to_le32(len&0x0000ffff); ptxdesc->txdw0 |= cpu_to_le32(((TXDESC_SIZE+OFFSET_SZ)<<OFFSET_SHT)&0x00ff0000);//default = 32 bytes for TX Desc ptxdesc->txdw0 |= cpu_to_le32(OWN | FSG | LSG); if(bmcst) { ptxdesc->txdw0 |= cpu_to_le32(BIT(24)); } //offset 4 ptxdesc->txdw1 |= cpu_to_le32(0x00);//MAC_ID ptxdesc->txdw1 |= cpu_to_le32((0x12<<QSEL_SHT)&0x00001f00); ptxdesc->txdw1 |= cpu_to_le32((0x06<< 16) & 0x000f0000);//b mode //offset 8 //offset 12 ptxdesc->txdw3 |= cpu_to_le32((le16_to_cpu(tx_hdr->seq_ctl)<<16)&0xffff0000); //offset 16 ptxdesc->txdw4 |= cpu_to_le32(BIT(8));//driver uses rate //offset 20 //HW append seq ptxdesc->txdw4 |= cpu_to_le32(BIT(7)); // Hw set sequence number ptxdesc->txdw3 |= cpu_to_le32((8 <<28)); //set bit3 to 1. Suugested by TimChen. 2009.12.29. rtl8192cu_cal_txdesc_chksum(ptxdesc); // ----- end of fill tx desc ----- // skb_put(pxmit_skb, len + TXDESC_SIZE); pxmitbuf = pxmitbuf + TXDESC_SIZE; _rtw_memcpy(pxmitbuf, skb->data, len); //DBG_8192C("mgnt_xmit, len=%x\n", pxmit_skb->len); // ----- prepare urb for submit ----- //translate DMA FIFO addr to pipehandle //pipe = ffaddr2pipehdl(pdvobj, MGT_QUEUE_INX); pipe = usb_sndbulkpipe(pdvobj->pusbdev, pHalData->Queue2EPNum[(u8)MGT_QUEUE_INX]&0x0f); usb_fill_bulk_urb(urb, pdvobj->pusbdev, pipe, pxmit_skb->data, pxmit_skb->len, rtl8192cu_hostap_mgnt_xmit_cb, pxmit_skb); urb->transfer_flags |= URB_ZERO_PACKET; usb_anchor_urb(urb, &phostapdpriv->anchored); rc = usb_submit_urb(urb, GFP_ATOMIC); if (rc < 0) { usb_unanchor_urb(urb); kfree_skb(skb); } usb_free_urb(urb); _exit: dev_kfree_skb_any(skb); #endif return 0; } #endif
gpl-2.0
s2plus/android_kernel_samsung_galaxys2plus
drivers/input/touchscreen/cyttsp4_btn.c
79
13020
/* * cyttsp4_btn.c * Cypress TrueTouch(TM) Standard Product V4 CapSense touch reports module. * For use with Cypress Txx4xx parts. * Supported parts include: * TMA4XX * TMA1036 * * Copyright (C) 2012 Cypress Semiconductor * Copyright (C) 2011 Sony Ericsson Mobile Communications AB. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2, and only version 2, as published by the * Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * Contact Cypress Semiconductor at www.cypress.com <ttdrivers@cypress.com> * */ #include <linux/cyttsp4_bus.h> #include <linux/delay.h> #ifdef CONFIG_HAS_EARLYSUSPEND #include <linux/earlysuspend.h> #endif #include <linux/gpio.h> #include <linux/input.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/limits.h> #include <linux/module.h> #include <linux/pm_runtime.h> #include <linux/slab.h> #include <linux/workqueue.h> #include <linux/cyttsp4_btn.h> #include <linux/cyttsp4_core.h> #include "cyttsp4_regs.h" struct cyttsp4_btn_data { struct cyttsp4_device *ttsp; struct cyttsp4_btn_platform_data *pdata; struct cyttsp4_sysinfo *si; struct input_dev *input; #ifdef CONFIG_HAS_EARLYSUSPEND struct early_suspend es; bool is_suspended; #endif bool input_device_registered; char phys[NAME_MAX]; u8 pr_buf[CY_MAX_PRBUF_SIZE]; }; static void cyttsp4_btn_key_action(struct cyttsp4_btn_data *bd, int cur_btn, u8 cur_btn_mask, int num_btns, int new_btn_state) { struct device *dev = &bd->ttsp->dev; struct cyttsp4_sysinfo *si = bd->si; int btn; int cur_btn_state; cur_btn_state = new_btn_state == CY_BTN_PRESSED ? CY_BTN_RELEASED : CY_BTN_PRESSED; for (btn = 0; btn < num_btns; btn++) { if ((si->btn[cur_btn + btn].enabled) && (((cur_btn_mask >> (btn * CY_BITS_PER_BTN)) & (CY_NUM_BTN_EVENT_ID - 1)) == new_btn_state) && (si->btn[cur_btn + btn].state == cur_btn_state)) { input_report_key(bd->input, si->btn [cur_btn + btn].key_code, new_btn_state); si->btn[cur_btn + btn].state = new_btn_state; input_sync(bd->input); printk( "%s: btn=%d key_code=%d %s\n", __func__, cur_btn + btn, si->btn[cur_btn + btn].key_code, new_btn_state == CY_BTN_PRESSED ? "PRESSED" : "RELEASED"); } } return; } static void cyttsp4_get_btn_touches(struct cyttsp4_btn_data *bd) { enum cyttsp4_btn_state btn_state = CY_BTN_RELEASED; struct cyttsp4_sysinfo *si = bd->si; int num_btn; int num_cur_btn; int cur_reg; int cur_btn; u8 cur_btn_mask; for (btn_state = CY_BTN_RELEASED; btn_state < CY_BTN_NUM_STATE; btn_state++) { for (cur_reg = cur_btn = 0, num_cur_btn = si->si_ofs.num_btns; cur_reg < si->si_ofs.num_btn_regs; cur_reg++, cur_btn += CY_NUM_BTN_PER_REG, num_cur_btn -= CY_NUM_BTN_PER_REG) { if (num_cur_btn > 0) { cur_btn_mask = si->xy_mode[si->si_ofs.rep_ofs + 2 + cur_reg]; num_btn = num_cur_btn / CY_NUM_BTN_PER_REG ? CY_NUM_BTN_PER_REG : num_cur_btn; cyttsp4_btn_key_action(bd, cur_btn, cur_btn_mask, num_btn, btn_state); } } } return; } static void cyttsp4_btn_lift_all(struct cyttsp4_btn_data *bd) { struct cyttsp4_sysinfo *si = bd->si; int btn_reg; int num_regs; if (si->si_ofs.num_btns == 0) return; num_regs = (si->si_ofs.num_btns + CY_NUM_BTN_PER_REG - 1) / CY_NUM_BTN_PER_REG; for (btn_reg = 0; btn_reg < num_regs; btn_reg++) si->xy_mode[si->si_ofs.rep_ofs + 2 + btn_reg] = 0; cyttsp4_get_btn_touches(bd); } #ifdef VERBOSE_DEBUG static void cyttsp4_log_btn_data(struct cyttsp4_btn_data *bd) { struct device *dev = &bd->ttsp->dev; struct cyttsp4_sysinfo *si = bd->si; int cur; int t; for (cur = 0; cur < si->si_ofs.num_btns; cur++) { bd->pr_buf[0] = 0; snprintf(bd->pr_buf, CY_MAX_PRBUF_SIZE, "btn_rec[%d]=0x", cur); for (t = 0; t < si->si_ofs.btn_rec_size; t++) snprintf(bd->pr_buf, CY_MAX_PRBUF_SIZE, "%s%02X", bd->pr_buf, si->btn_rec_data [(cur * si->si_ofs.btn_rec_size) + t]); dev_vdbg(dev, "%s: %s\n", __func__, bd->pr_buf); } return; } #endif /* read xy_data for all current CapSense button touches */ static int cyttsp4_xy_worker(struct cyttsp4_btn_data *bd) { struct device *dev = &bd->ttsp->dev; struct cyttsp4_sysinfo *si = bd->si; u8 rep_stat = si->xy_mode[si->si_ofs.rep_ofs + 1]; #ifdef VERBOSE_DEBUG int rc; #endif /* rep_data for bad packet check */ if (IS_BAD_PKT(rep_stat)) { dev_dbg(dev, "%s: Invalid buffer detected\n", __func__); return 0; } /* extract button press/release touch information */ if (si->si_ofs.num_btns > 0) { cyttsp4_get_btn_touches(bd); #ifdef VERBOSE_DEBUG /* read button diff data */ rc = cyttsp4_read(bd->ttsp, CY_MODE_OPERATIONAL, si->si_ofs.tt_stat_ofs + 1 + si->si_ofs.max_tchs * si->si_ofs.tch_rec_size, si->btn_rec_data, si->si_ofs.num_btns * si->si_ofs.btn_rec_size); if (rc < 0) { dev_err(dev, "%s: read fail on button regs r=%d\n", __func__, rc); return 0; } /* log button press/release touch information */ cyttsp4_log_btn_data(bd); #endif } dev_vdbg(dev, "%s: done\n", __func__); return 0; } static int cyttsp4_btn_attention(struct cyttsp4_device *ttsp) { struct device *dev = &ttsp->dev; struct cyttsp4_btn_data *bd = dev_get_drvdata(dev); int rc = 0; dev_vdbg(dev, "%s\n", __func__); /* core handles handshake */ rc = cyttsp4_xy_worker(bd); if (rc < 0) dev_err(dev, "%s: xy_worker error r=%d\n", __func__, rc); return rc; } static int cyttsp4_startup_attention(struct cyttsp4_device *ttsp) { struct device *dev = &ttsp->dev; struct cyttsp4_btn_data *bd = dev_get_drvdata(dev); struct cyttsp4_sysinfo *si = bd->si; int btn; dev_vdbg(dev, "%s\n", __func__); for (btn = 0; btn < si->si_ofs.num_btns; btn++) bd->si->btn[btn].state = CY_BTN_RELEASED; return 0; } int cyttsp4_btn_open(struct input_dev *input) { struct device *dev = input->dev.parent; struct cyttsp4_device *ttsp = container_of(dev, struct cyttsp4_device, dev); dev_dbg(dev, "%s\n", __func__); pm_runtime_get(dev); dev_vdbg(dev, "%s: setup subscriptions\n", __func__); /* set up touch call back */ cyttsp4_subscribe_attention(ttsp, CY_ATTEN_IRQ, cyttsp4_btn_attention, CY_MODE_OPERATIONAL); /* set up startup call back */ cyttsp4_subscribe_attention(ttsp, CY_ATTEN_STARTUP, cyttsp4_startup_attention, 0); return 0; } void cyttsp4_btn_close(struct input_dev *input) { struct device *dev = input->dev.parent; struct cyttsp4_btn_data *bd = dev_get_drvdata(dev); struct cyttsp4_device *ttsp = container_of(dev, struct cyttsp4_device, dev); dev_dbg(dev, "%s\n", __func__); cyttsp4_btn_lift_all(bd); cyttsp4_unsubscribe_attention(ttsp, CY_ATTEN_IRQ, cyttsp4_btn_attention, CY_MODE_OPERATIONAL); cyttsp4_unsubscribe_attention(ttsp, CY_ATTEN_STARTUP, cyttsp4_startup_attention, 0); pm_runtime_put(dev); } #ifdef CONFIG_HAS_EARLYSUSPEND static void cyttsp4_btn_early_suspend(struct early_suspend *h) { struct cyttsp4_btn_data *bd = container_of(h, struct cyttsp4_btn_data, es); struct device *dev = &bd->ttsp->dev; dev_dbg(dev, "%s\n", __func__); if (bd->si) cyttsp4_btn_lift_all(bd); pm_runtime_put(dev); bd->is_suspended = true; } static void cyttsp4_btn_late_resume(struct early_suspend *h) { struct cyttsp4_btn_data *bd = container_of(h, struct cyttsp4_btn_data, es); struct device *dev = &bd->ttsp->dev; dev_dbg(dev, "%s\n", __func__); pm_runtime_get(dev); bd->is_suspended = false; } #endif #ifdef CONFIG_PM_RUNTIME static int cyttsp4_btn_suspend(struct device *dev) { struct cyttsp4_btn_data *bd = dev_get_drvdata(dev); dev_dbg(dev, "%s\n", __func__); if (bd->si) cyttsp4_btn_lift_all(bd); return 0; } static int cyttsp4_btn_resume(struct device *dev) { dev_dbg(dev, "%s\n", __func__); return 0; } #endif static const struct dev_pm_ops cyttsp4_btn_pm_ops = { SET_RUNTIME_PM_OPS(cyttsp4_btn_suspend, cyttsp4_btn_resume, NULL) }; static int cyttsp4_setup_input_device(struct cyttsp4_device *ttsp) { struct device *dev = &ttsp->dev; struct cyttsp4_btn_data *bd = dev_get_drvdata(dev); int i; int rc; dev_vdbg(dev, "%s: Initialize event signals\n", __func__); __set_bit(EV_KEY, bd->input->evbit); __set_bit(EV_LED, bd->input->evbit); __set_bit(LED_MISC, bd->input->ledbit); for (i = 0; i < bd->si->si_ofs.num_btns; i++) __set_bit(bd->si->btn[i].key_code, bd->input->keybit); rc = input_register_device(bd->input); if (rc < 0) dev_err(dev, "%s: Error, failed register input device r=%d\n", __func__, rc); else bd->input_device_registered = true; return rc; } static int cyttsp4_setup_input_attention(struct cyttsp4_device *ttsp) { struct device *dev = &ttsp->dev; struct cyttsp4_btn_data *bd = dev_get_drvdata(dev); int rc; dev_vdbg(dev, "%s\n", __func__); bd->si = cyttsp4_request_sysinfo(ttsp); if (!bd->si) return -1; rc = cyttsp4_setup_input_device(ttsp); cyttsp4_unsubscribe_attention(ttsp, CY_ATTEN_STARTUP, cyttsp4_setup_input_attention, 0); return rc; } static int cyttsp4_btn_probe(struct cyttsp4_device *ttsp) { struct cyttsp4_btn_data *bd; struct device *dev = &ttsp->dev; struct cyttsp4_btn_platform_data *pdata = dev_get_platdata(dev); int rc = 0; dev_info(dev, "%s\n", __func__); dev_dbg(dev, "%s: debug on\n", __func__); dev_vdbg(dev, "%s: verbose debug on\n", __func__); bd = kzalloc(sizeof(*bd), GFP_KERNEL); if (bd == NULL) { dev_err(dev, "%s: Error, kzalloc\n", __func__); rc = -ENOMEM; goto error_alloc_data_failed; } bd->ttsp = ttsp; bd->pdata = pdata; dev_set_drvdata(dev, bd); /* Create the input device and register it. */ dev_vdbg(dev, "%s: Create the input device and register it\n", __func__); bd->input = input_allocate_device(); if (bd->input == NULL) { dev_err(dev, "%s: Error, failed to allocate input device\n", __func__); rc = -ENOSYS; goto error_alloc_failed; } bd->input->name = ttsp->name; scnprintf(bd->phys, sizeof(bd->phys)-1, "%s", dev_name(dev)); bd->input->phys = bd->phys; bd->input->dev.parent = &bd->ttsp->dev; bd->input->open = cyttsp4_btn_open; bd->input->close = cyttsp4_btn_close; input_set_drvdata(bd->input, bd); pm_runtime_enable(dev); /* get sysinfo */ bd->si = cyttsp4_request_sysinfo(ttsp); if (bd->si) { rc = cyttsp4_setup_input_device(ttsp); if (rc) goto error_init_input; } else { dev_err(dev, "%s: Fail get sysinfo pointer from core p=%p\n", __func__, bd->si); cyttsp4_subscribe_attention(ttsp, CY_ATTEN_STARTUP, cyttsp4_setup_input_attention, 0); } #ifdef CONFIG_HAS_EARLYSUSPEND bd->es.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 1; bd->es.suspend = cyttsp4_btn_early_suspend; bd->es.resume = cyttsp4_btn_late_resume; register_early_suspend(&bd->es); #endif dev_dbg(dev, "%s: ok\n", __func__); return 0; error_init_input: pm_runtime_disable(dev); input_free_device(bd->input); error_alloc_failed: dev_set_drvdata(dev, NULL); kfree(bd); error_alloc_data_failed: dev_err(dev, "%s failed.\n", __func__); return rc; } static int cyttsp4_btn_release(struct cyttsp4_device *ttsp) { struct device *dev = &ttsp->dev; struct cyttsp4_btn_data *bd = dev_get_drvdata(dev); dev_dbg(dev, "%s\n", __func__); #ifdef CONFIG_HAS_EARLYSUSPEND /* * This check is to prevent pm_runtime usage_count drop below zero * because of removing the module while in suspended state */ if (bd->is_suspended) pm_runtime_get_noresume(dev); unregister_early_suspend(&bd->es); #endif if (bd->input_device_registered) { input_unregister_device(bd->input); } else { input_free_device(bd->input); cyttsp4_unsubscribe_attention(ttsp, CY_ATTEN_STARTUP, cyttsp4_setup_input_attention, 0); } pm_runtime_suspend(dev); pm_runtime_disable(dev); dev_set_drvdata(dev, NULL); kfree(bd); return 0; } struct cyttsp4_driver cyttsp4_btn_driver = { .probe = cyttsp4_btn_probe, .remove = cyttsp4_btn_release, .driver = { .name = CYTTSP4_BTN_NAME, .bus = &cyttsp4_bus_type, .owner = THIS_MODULE, .pm = &cyttsp4_btn_pm_ops, }, }; static int __init cyttsp4_btn_init(void) { int rc = 0; rc = cyttsp4_register_driver(&cyttsp4_btn_driver); pr_info("%s: Cypress TTSP MT v4 CapSense BTN (Built %s), rc=%d\n", __func__, CY_DRIVER_DATE, rc); return rc; } module_init(cyttsp4_btn_init); static void __exit cyttsp4_btn_exit(void) { cyttsp4_unregister_driver(&cyttsp4_btn_driver); pr_info("%s: module exit\n", __func__); } module_exit(cyttsp4_btn_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Cypress TTSP 2D multi-touch CapSense BTN driver"); MODULE_AUTHOR("Cypress Semiconductor");
gpl-2.0
KylinUI/android_kernel_htc_msm8960
drivers/staging/prima/CORE/BAP/src/bapApiLinkCntl.c
79
84075
/* * Copyright (c) 2012, Code Aurora Forum. All rights reserved. * * Previously licensed under the ISC license by Qualcomm Atheros, Inc. * * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the * above copyright notice and this permission notice appear in all * copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR * PERFORMANCE OF THIS SOFTWARE. */ /*=========================================================================== b a p A p i L i n k C n t l . C OVERVIEW: This software unit holds the implementation of the WLAN BAP modules Link Control functions. The functions externalized by this module are to be called ONLY by other WLAN modules (HDD) that properly register with the BAP Layer initially. DEPENDENCIES: Are listed for each API below. Copyright (c) 2008 QUALCOMM Incorporated. All Rights Reserved. Qualcomm Confidential and Proprietary ===========================================================================*/ /*=========================================================================== EDIT HISTORY FOR FILE This section contains comments describing changes made to the module. Notice that changes are listed in reverse chronological order. $Header: /home/labuser/ampBlueZ_2/CORE/BAP/src/bapApiLinkCntl.c,v 1.1 2010/10/23 23:40:28 labuser Exp labuser $$DateTime$$Author: labuser $ when who what, where, why ---------- --- -------------------------------------------------------- 2008-09-15 jez Created module ===========================================================================*/ /*---------------------------------------------------------------------------- * Include Files * -------------------------------------------------------------------------*/ //#include "wlan_qct_tl.h" #include "vos_trace.h" // Pick up the CSR callback definition #include "csrApi.h" /* BT-AMP PAL API header file */ #include "bapApi.h" #include "bapInternal.h" #include "btampFsm.h" //#define BAP_DEBUG /*---------------------------------------------------------------------------- * Preprocessor Definitions and Constants * -------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- * Type Declarations * -------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- * Global Data Definitions * -------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- * Static Variable Definitions * -------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- * Static Function Declarations and Definitions * -------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- * Externalized Function Definitions * -------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- * Function Declarations and Documentation * -------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- FUNCTION WLANBAP_RoamCallback() DESCRIPTION Callback for Roam (connection status) Events DEPENDENCIES NA. PARAMETERS IN pContext: is the pContext passed in with the roam request pCsrRoamInfo: is a pointer to a tCsrRoamInfo, see definition of eRoamCmdStatus and eRoamCmdResult: for detail valid members. It may be NULL roamId: is to identify the callback related roam request. 0 means unsolicited roamStatus: is a flag indicating the status of the callback roamResult: is the result RETURN VALUE The eHalStatus code associated with performing the operation eHAL_STATUS_SUCCESS: Success SIDE EFFECTS ----------------------------------------------------------------------------*/ #if 0 eCSR_ROAM_RESULT_WDS_STARTED #define eWLAN_BAP_MAC_START_BSS_SUCCESS /* bapRoamCompleteCallback with eCSR_ROAM_RESULT_WDS_STARTED */ eCSR_ROAM_RESULT_FAILURE eCSR_ROAM_RESULT_NOT_ASSOCIATED #define eWLAN_BAP_MAC_START_FAILS /* bapRoamCompleteCallback with eCSR_ROAM_RESULT_FAILURE or eCSR_ROAM_RESULT_NOT_ASSOCIATED */ eCSR_ROAM_RESULT_WDS_ASSOCIATED #define eWLAN_BAP_MAC_CONNECT_COMPLETED /* bapRoamCompleteCallback with eCSR_ROAM_RESULT_WDS_ASSOCIATED */ eCSR_ROAM_RESULT_FAILURE eCSR_ROAM_RESULT_NOT_ASSOCIATED #define eWLAN_BAP_MAC_CONNECT_FAILED /* bapRoamCompleteCallback with eCSR_ROAM_RESULT_FAILURE or eCSR_ROAM_RESULT_NOT_ASSOCIATED */ eCSR_ROAM_RESULT_WDS_ASSOCIATION_IND #define eWLAN_BAP_MAC_CONNECT_INDICATION /* bapRoamCompleteCallback with eCSR_ROAM_RESULT_WDS_ASSOCIATION_IND */ eCSR_ROAM_RESULT_KEY_SET #define eWLAN_BAP_MAC_KEY_SET_SUCCESS /* bapRoamCompleteCallback with eCSR_ROAM_RESULT_KEY_SET */ eCSR_ROAM_RESULT_WDS_DISASSOC_IND #define eWLAN_BAP_MAC_INDICATES_MEDIA_DISCONNECTION /* bapRoamCompleteCallback with eCSR_ROAM_RESULT_WDS_DISASSOC_IND */ eCSR_ROAM_RESULT_WDS_STOPPED #define eWLAN_BAP_MAC_READY_FOR_CONNECTIONS /* bapRoamCompleteCallback with eCSR_ROAM_RESULT_WDS_STOPPED */ #endif //0 eHalStatus WLANBAP_RoamCallback ( void *pContext, tCsrRoamInfo *pCsrRoamInfo, tANI_U32 roamId, eRoamCmdStatus roamStatus, eCsrRoamResult roamResult ) { eHalStatus halStatus = eHAL_STATUS_SUCCESS; /* btampContext value */ ptBtampContext btampContext = (ptBtampContext) pContext; tWLAN_BAPEvent bapEvent; /* State machine event */ VOS_STATUS vosStatus = VOS_STATUS_SUCCESS; v_U8_t status; /* return the BT-AMP status here */ /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/ VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, before switch on roamStatus = %d", __FUNCTION__, roamStatus); switch (roamStatus) { //JEZ081110: For testing purposes, with Infra STA as BT STA, this //actually takes care of the "eCSR_ROAM_RESULT_WDS_STARTED" case, //below, better than "eCSR_ROAM_RESULT_IBSS_STARTED". //case eCSR_ROAM_ROAMING_START: case eCSR_ROAM_ASSOCIATION_START: /* bapRoamCompleteCallback with eCSR_ROAM_RESULT_WDS_STARTED */ VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, CSR roamResult = %s (%d)", __FUNCTION__, "eCSR_ROAM_ROAMING_START", roamResult); // This only gets called when CSR decides to roam on its own - due to lostlink. #if 0 if ((pCsrRoamInfo) && (pCsrRoamInfo->pConnectedProfile) && (pCsrRoamInfo->pConnectedProfile->pBssDesc)) { memcpy(bssid.ether_addr_octet, pCsrRoamInfo->pConnectedProfile->pBssDesc->bssId, sizeof(tSirMacAddr)); apple80211Interface->willRoam(&bssid); // Return result isn't significant VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "%s: willRoam returns\n", __FUNCTION__); } #endif //0 /* Fill in the event structure */ bapEvent.event = eWLAN_BAP_MAC_START_BSS_SUCCESS; bapEvent.params = pCsrRoamInfo; bapEvent.u1 = roamStatus; bapEvent.u2 = roamResult; /* Handle event */ vosStatus = btampFsm(btampContext, &bapEvent, &status); break; case eCSR_ROAM_SET_KEY_COMPLETE: /* bapRoamCompleteCallback with eCSR_ROAM_SET_KEY_COMPLETE */ VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, CSR roamStatus = %s (%d)", __FUNCTION__, "eCSR_ROAM_SET_KEY_COMPLETE", roamStatus); /* Fill in the event structure */ bapEvent.event = eWLAN_BAP_MAC_KEY_SET_SUCCESS; bapEvent.params = pCsrRoamInfo; bapEvent.u1 = roamStatus; bapEvent.u2 = roamResult; /* Handle event */ vosStatus = btampFsm(btampContext, &bapEvent, &status); break; case eCSR_ROAM_DISASSOCIATED: /* bapRoamCompleteCallback with eCSR_ROAM_DISASSOCIATED */ VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, CSR roamStatus = %s (%d)", __FUNCTION__, "eCSR_ROAM_DISASSOCIATED", roamStatus); case eCSR_ROAM_LOSTLINK: /* bapRoamCompleteCallback with eCSR_ROAM_LOSTLINK */ VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, CSR roamStatus = %s (%d)", __FUNCTION__, "eCSR_ROAM_LOSTLINK", roamStatus); if (roamResult != eCSR_ROAM_RESULT_NONE) { /* Fill in the event structure */ bapEvent.event = eWLAN_BAP_MAC_READY_FOR_CONNECTIONS; bapEvent.params = pCsrRoamInfo; bapEvent.u1 = roamStatus; bapEvent.u2 = roamResult; /* Handle event */ vosStatus = btampFsm(btampContext, &bapEvent, &status); } break; default: VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, unsupported CSR roamStatus = %d", __FUNCTION__, roamStatus); break; } VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, before switch on roamResult = %d", __FUNCTION__, roamResult); switch (roamResult) { //JEZ081110: Commented out for testing. Test relies upon IBSS. case eCSR_ROAM_RESULT_IBSS_STARTED: VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, CSR roamResult = %s (%d)", __FUNCTION__, "eCSR_ROAM_RESULT_IBSS_STARTED", roamResult); case eCSR_ROAM_RESULT_WDS_STARTED: /* bapRoamCompleteCallback with eCSR_ROAM_RESULT_WDS_STARTED */ VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, CSR roamResult = %s (%d)", __FUNCTION__, "eCSR_ROAM_RESULT_WDS_STARTED", roamResult); /* Fill in the event structure */ bapEvent.event = eWLAN_BAP_MAC_START_BSS_SUCCESS; bapEvent.params = pCsrRoamInfo; bapEvent.u1 = roamStatus; bapEvent.u2 = roamResult; /* Handle event */ vosStatus = btampFsm(btampContext, &bapEvent, &status); break; //JEZ081110: Commented out for testing. Test relies upon IBSS. //JEZ081110: But I cannot rely upon IBSS for the initial testing. case eCSR_ROAM_RESULT_FAILURE: //case eCSR_ROAM_RESULT_NOT_ASSOCIATED: //case eCSR_ROAM_RESULT_IBSS_START_FAILED: /* bapRoamCompleteCallback with eCSR_ROAM_RESULT_FAILURE or eCSR_ROAM_RESULT_NOT_ASSOCIATED */ VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, CSR roamResult = %s (%d)", __FUNCTION__, "eCSR_ROAM_RESULT_FAILURE", roamResult); #ifdef FEATURE_WLAN_BTAMP_UT_RF break; #endif case eCSR_ROAM_RESULT_WDS_START_FAILED: /* bapRoamCompleteCallback with eCSR_ROAM_RESULT_WDS_START_FAILED */ VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, CSR roamResult = %s (%d)", __FUNCTION__, "eCSR_ROAM_RESULT_WDS_START_FAILED", roamResult); /* Fill in the event structure */ /* I don't think I should signal a eCSR_ROAM_RESULT_FAILURE * as a eWLAN_BAP_MAC_START_FAILS */ bapEvent.event = eWLAN_BAP_MAC_START_FAILS; bapEvent.params = pCsrRoamInfo; bapEvent.u1 = roamStatus; bapEvent.u2 = roamResult; /* Handle event */ vosStatus = btampFsm(btampContext, &bapEvent, &status); break; //JEZ081110: Commented out for testing. This handles both Infra STA and IBSS STA. case eCSR_ROAM_RESULT_IBSS_CONNECT: VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, CSR roamResult = %s (%d)", __FUNCTION__, "eCSR_ROAM_RESULT_IBSS_CONNECT", roamResult); case eCSR_ROAM_RESULT_ASSOCIATED: VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, CSR roamResult = %s (%d)", __FUNCTION__, "eCSR_ROAM_RESULT_ASSOCIATED", roamResult); case eCSR_ROAM_RESULT_WDS_ASSOCIATED: /* bapRoamCompleteCallback with eCSR_ROAM_RESULT_WDS_ASSOCIATED */ VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, CSR roamResult = %s (%d)", __FUNCTION__, "eCSR_ROAM_RESULT_WDS_ASSOCIATED", roamResult); /* Fill in the event structure */ bapEvent.event = eWLAN_BAP_MAC_CONNECT_COMPLETED; bapEvent.params = pCsrRoamInfo; bapEvent.u1 = roamStatus; bapEvent.u2 = roamResult; /* Handle event */ vosStatus = btampFsm(btampContext, &bapEvent, &status); break; //JEZ081110: Commented out for testing. Test relies upon IBSS. //JEZ081110: But I cannot rely upon IBSS for the initial testing. //case eCSR_ROAM_RESULT_FAILURE: case eCSR_ROAM_RESULT_IBSS_START_FAILED: VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, CSR roamResult = %s (%d)", __FUNCTION__, "eCSR_ROAM_RESULT_IBSS_START_FAILED", roamResult); case eCSR_ROAM_RESULT_NOT_ASSOCIATED: /* bapRoamCompleteCallback with eCSR_ROAM_RESULT_FAILURE or eCSR_ROAM_RESULT_NOT_ASSOCIATED */ VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, CSR roamResult = %s (%d)", __FUNCTION__, "eCSR_ROAM_RESULT_NOT_ASSOCIATED", roamResult); #ifdef FEATURE_WLAN_BTAMP_UT_RF break; #endif case eCSR_ROAM_RESULT_WDS_NOT_ASSOCIATED: /* bapRoamCompleteCallback with eCSR_ROAM_RESULT_WDS_NOT_ASSOCIATED */ VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, CSR roamResult = %s (%d)", __FUNCTION__, "eCSR_ROAM_RESULT_WDS_NOT_ASSOCIATED", roamResult); /* Fill in the event structure */ bapEvent.event = eWLAN_BAP_MAC_CONNECT_FAILED; bapEvent.params = pCsrRoamInfo; bapEvent.u1 = roamStatus; bapEvent.u2 = roamResult; /* Handle event */ vosStatus = btampFsm(btampContext, &bapEvent, &status); break; //JEZ081110: I think I have to check for the bssType to //differentiate between IBSS Start and IBSS Join success. //case eCSR_ROAM_RESULT_IBSS_CONNECT: //VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, CSR roamResult = %s (%d)", __FUNCTION__, "eCSR_ROAM_RESULT_IBSS_CONNECT", roamResult); //JEZ081110: Commented out for testing. Test relies upon IBSS. // No longer commented out. case eCSR_ROAM_RESULT_WDS_ASSOCIATION_IND: /* bapRoamCompleteCallback with eCSR_ROAM_RESULT_WDS_ASSOCIATION_IND */ VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, CSR roamResult = %s (%d)", __FUNCTION__, "eCSR_ROAM_RESULT_WDS_ASSOCIATION_IND", roamResult); /* Fill in the event structure */ bapEvent.event = eWLAN_BAP_MAC_CONNECT_INDICATION; bapEvent.params = pCsrRoamInfo; bapEvent.u1 = roamStatus; bapEvent.u2 = roamResult; /* Handle event */ vosStatus = btampFsm(btampContext, &bapEvent, &status); /* If BAP doesn't like the incoming association, signal SME/CSR */ if ( status != WLANBAP_STATUS_SUCCESS) halStatus = eHAL_STATUS_FAILURE; break; //JEZ081110: Not supported in SME and CSR, yet. #if 0 case eCSR_ROAM_RESULT_KEY_SET: /* bapRoamCompleteCallback with eCSR_ROAM_RESULT_KEY_SET */ VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, CSR roamResult = %s (%d)", __FUNCTION__, "eCSR_ROAM_RESULT_KEY_SET", roamResult); /* Fill in the event structure */ bapEvent.event = eWLAN_BAP_MAC_KEY_SET_SUCCESS; bapEvent.params = pCsrRoamInfo; bapEvent.u1 = roamStatus; bapEvent.u2 = roamResult; /* Handle event */ vosStatus = btampFsm(btampContext, &bapEvent, &status); break; #endif //0 case eCSR_ROAM_RESULT_DISASSOC_IND: VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, CSR roamResult = %s (%d)", __FUNCTION__, "eCSR_ROAM_RESULT_DISASSOC_IND", roamResult); case eCSR_ROAM_RESULT_WDS_DISASSOCIATED: /* bapRoamCompleteCallback with eCSR_ROAM_RESULT_WDS_DISASSOCIATED */ VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, CSR roamResult = %s (%d)", __FUNCTION__, "eCSR_ROAM_RESULT_WDS_DISASSOCIATED", roamResult); /* Fill in the event structure */ bapEvent.event = eWLAN_BAP_MAC_INDICATES_MEDIA_DISCONNECTION; bapEvent.params = pCsrRoamInfo; bapEvent.u1 = roamStatus; bapEvent.u2 = roamResult; /* Handle event */ vosStatus = btampFsm(btampContext, &bapEvent, &status); /* Fill in the event structure */ bapEvent.event = eWLAN_BAP_MAC_READY_FOR_CONNECTIONS; bapEvent.params = pCsrRoamInfo; bapEvent.u1 = roamStatus; bapEvent.u2 = roamResult; /* Handle event */ vosStatus = btampFsm(btampContext, &bapEvent, &status); break; //JEZ081110: Commented out for testing. Test relies upon IBSS. case eCSR_ROAM_RESULT_IBSS_INACTIVE: VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, CSR roamResult = %s (%d)", __FUNCTION__, "eCSR_ROAM_RESULT_IBSS_INACTIVE", roamResult); case eCSR_ROAM_RESULT_WDS_STOPPED: /* bapRoamCompleteCallback with eCSR_ROAM_RESULT_WDS_STOPPED */ VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, CSR roamResult = %s (%d)", __FUNCTION__, "eCSR_ROAM_RESULT_WDS_STOPPED", roamResult); /* Fill in the event structure */ bapEvent.event = eWLAN_BAP_MAC_READY_FOR_CONNECTIONS; bapEvent.params = pCsrRoamInfo; bapEvent.u1 = roamStatus; bapEvent.u2 = roamResult; /* Handle event */ vosStatus = btampFsm(btampContext, &bapEvent, &status); break; default: VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, unsupported CSR roamResult = %d", __FUNCTION__, roamResult); break; } #if 0 switch (roamResult) { case eCSR_ROAM_RESULT_IBSS_CONNECT: // we have an IBSS connection... // update our state btampContext->mAssociatedStatus = WLANBAP_STATUS_SUCCESS; btampContext->mAssociated = VOS_TRUE; // update "assocBssid" with the BSSID of the IBSS if (pCsrRoamInfo) memcpy(btampContext->assocBssid, pCsrRoamInfo->peerMacOrBssidForIBSS, 6); // We must update the system role to match that of the // lower layers in case the upper layers decided to try // joining the network in infrastructure mode if the // initial join in IBSS mode fails. Andreas Wolf // (awolf@apple.com) explains the behavior as follows: // "If the client attempts to join an open network and it fails // on the first attempt, it reverts back to b-only mode. This // workaround was specifically put in place to allow the client // to associate to some third party b-only infrastructure APs. // It did not take IBSS into account, it seems that the fallback // always forces infrastructure." btampContext->systemRole = eSYSTEM_STA_IN_IBSS_ROLE; if (mLinkStatus == 0) { // enable the flow of data DBGLOG("%s: marking link as up in %s\n", __FUNCTION__, "eCSR_ROAM_RESULT_IBSS_CONNECT"); mLinkStatus = 1; ((IO80211Interface*) mNetworkIF)->setLinkState(kIO80211NetworkLinkUp); outputQueue->setCapacity(TRANSMIT_QUEUE_SIZE); outputQueue->start(); // Let them know we are ready ((IO80211Interface*) mNetworkIF)->postMessage(APPLE80211_M_ASSOC_DONE); } else { DBGLOG("%s: link is already up in %s\n", __FUNCTION__, "eCSR_ROAM_RESULT_IBSS_CONNECT"); } break; case eCSR_ROAM_RESULT_IBSS_INACTIVE: // we have no more IBSS peers, so disable the flow of data if (mLinkStatus != 0) { DBGLOG("%s: marking link as down in %s\n", __FUNCTION__, "eCSR_ROAM_RESULT_IBSS_INACTIVE"); mLinkStatus = (tANI_U8) 0; // JEZ070627: Revisit ? ((IO80211Interface*) mNetworkIF)->setLinkState(kIO80211NetworkLinkDown); outputQueue->stop(); outputQueue->setCapacity(0); // update our state btampContext->mAssociated = false; } else { DBGLOG("%s: link already down in %s\n", __FUNCTION__, "eCSR_ROAM_RESULT_IBSS_INACTIVE"); } break; case eCSR_ROAM_RESULT_ASSOCIATED: btampContext->mAssociatedStatus = APPLE80211_STATUS_SUCCESS; btampContext->mAssociated = true; if ((pCsrRoamInfo) && (pCsrRoamInfo->pBssDesc)) { ccpCsrToAppleScanResult(mPMacObject, pCsrRoamInfo->pBssDesc, &scanResult); /* Save away the IEs used by the AP */ ccpCsrToAssocApiedata( mPMacObject, pCsrRoamInfo->pBssDesc, &(btampContext->apiedata)); if (BssidChanged((tCsrBssid*) btampContext->assocBssid, (ether_addr*) scanResult.asr_bssid)) { memcpy(btampContext->assocBssid, scanResult.asr_bssid, 6); ((IO80211Interface*) mNetworkIF)->postMessage(APPLE80211_M_BSSID_CHANGED ); } } ((IO80211Interface*) mNetworkIF)->postMessage(APPLE80211_M_ASSOC_DONE); if (mLinkStatus == 0) { mLinkStatus = (tANI_U8) 1; ((IO80211Interface*) mNetworkIF)->setLinkState(kIO80211NetworkLinkUp); DBGLOG("%s: marking link as up in %s\n", __FUNCTION__, "eCSR_ROAM_RESULT_ASSOCIATED"); outputQueue->setCapacity(TRANSMIT_QUEUE_SIZE); outputQueue->start(); } else { DBGLOG("%s: link is already up in %s\n", __FUNCTION__, "eCSR_ROAM_RESULT_ASSOCIATED"); } break; case eCSR_ROAM_RESULT_NOT_ASSOCIATED: btampContext->mAssociatedStatus = APPLE80211_STATUS_UNAVAILABLE; btampContext->mAssociated = false; if (mLinkStatus != 0) { DBGLOG("%s: marking link as down in %s\n", __FUNCTION__, "eCSR_ROAM_RESULT_NOT_ASSOCIATED"); mLinkStatus = (tANI_U8) 0; ((IO80211Interface*) mNetworkIF)->setLinkState(kIO80211NetworkLinkDown); } else { DBGLOG("%s: link already down in %s\n", __FUNCTION__, "eCSR_ROAM_RESULT_NOT_ASSOCIATED"); } break; case eCSR_ROAM_RESULT_FAILURE: btampContext->mAssociatedStatus = APPLE80211_STATUS_UNSPECIFIED_FAILURE; btampContext->mAssociated = false; if (mLinkStatus != 0) { DBGLOG("%s: marking link as down in %s\n", __FUNCTION__, "eCSR_ROAM_RESULT_FAILURE"); mLinkStatus = (tANI_U8) 0; ((IO80211Interface*) mNetworkIF)->setLinkState(kIO80211NetworkLinkDown); } else { DBGLOG("%s: link already down in %s\n", __FUNCTION__, "eCSR_ROAM_RESULT_FAILURE"); } break; case eCSR_ROAM_RESULT_DISASSOC_IND: { btampContext->mAssociated = false; if (mLinkStatus != 0) { DBGLOG("%s: marking link as down in %s\n", __FUNCTION__, "eCSR_ROAM_RESULT_DISASSOC_IND"); mLinkStatus = (tANI_U8) 0; ((IO80211Interface*) mNetworkIF)->setLinkState(kIO80211NetworkLinkDown); } else { DBGLOG("%s: link already down in %s\n", __FUNCTION__, "eCSR_ROAM_RESULT_DISASSOC_IND"); } //if (pCsrRoamInfo) // For now, leave this commented out. Until CSR changes integrated. { // Now set the reason and status codes. // Actually, the "result code" field in the tSirSmeDisassocInd should be named reasonCode and NOT statusCode. // "Reason Codes" are found in DisAssoc or DeAuth Ind. "Status Code" fields are found in Rsp Mgmt Frame. // For now, we are going to have to (painfully) map the only "result code" type information we have // available at ALL from LIM/CSR. And that is the statusCode field of type tSirResultCodes // BTW, tSirResultCodes is the COMPLETELY WRONG TYPE for this "result code" field. It SHOULD be // of type tSirMacReasonCodes. // Right now, we don't even have that. So, I have to just make up some "reason code" that I will // pretend I found in the incoming DisAssoc Indication. //btampContext->statusCode = ((tpSirSmeDisassocInd) pCallbackInfo)->statusCode; // tSirResultCodes //btampContext->reasonCode = ((tpSirSmeDisassocInd) pCallbackInfo)->statusCode; // tSirResultCodes btampContext->reasonCode = (tANI_U16) eSIR_MAC_UNSPEC_FAILURE_REASON; //tANI_U16 // tSirMacReasonCodes btampContext->deAuthReasonCode = 0; // tANI_U16 // eSIR_SME_DEAUTH_FROM_PEER // Shouldn't the next line really use a tANI_U16? //0; // tANI_U16 // eSIR_SME_DISASSOC_FROM_PEER btampContext->disassocReasonCode = btampContext->reasonCode; // tSirMacReasonCodes // Let's remember the peer who just disassoc'd us //memcpy(btampContext->peerMacAddr, pCsrRoamInfo->peerMacOrBssidForIBSS, 6); } } break; case eCSR_ROAM_RESULT_DEAUTH_IND: { btampContext->mAssociated = false; if (mLinkStatus != 0) { DBGLOG("%s: marking link as down in %s\n", __FUNCTION__, "eCSR_ROAM_RESULT_DEAUTH_IND"); mLinkStatus = (tANI_U8) 0; ((IO80211Interface*) mNetworkIF)->setLinkState(kIO80211NetworkLinkDown); } else { DBGLOG("%s: link already down in %s\n", __FUNCTION__, "eCSR_ROAM_RESULT_DEAUTH_IND"); } //if (pCsrRoamInfo) // For now, leave this commented out. Until CSR changes integrated. { // Now set the reason and status codes. // Actually, the "result code" field in the tSirSmeDeauthInd should be named reasonCode and NOT statusCode. // "Reason Codes" are found in DisAssoc or DeAuth Ind. "Status Code" fields are found in Rsp Mgmt Frame. // For now, we are going to have to (painfully) map the only "result code" type information we have // available at ALL from LIM/CSR. And that is the statusCode field of type tSirResultCodes // BTW, tSirResultCodes is the COMPLETELY WRONG TYPE for this "result code" field. It SHOULD be // of type tSirMacReasonCodes. // Right now, we don't even have that. So, I have to just make up some "reason code" that I will // pretend I found in the incoming DeAuth Indication. //btampContext->statusCode = ((tpSirSmeDeauthInd) pCallbackInfo)->statusCode; // tSirResultCodes //btampContext->reasonCode = ((tpSirSmeDeauthInd) pCallbackInfo)->statusCode; // tSirResultCodes btampContext->reasonCode = (tANI_U16) eSIR_MAC_UNSPEC_FAILURE_REASON; //tANI_U16 // tSirMacReasonCodes btampContext->disassocReasonCode = 0; // tANI_U16 // eSIR_SME_DISASSOC_FROM_PEER // Shouldn't the next line really use a tANI_U16? //0; // tANI_U16 // eSIR_SME_DEAUTH_FROM_PEER btampContext->deAuthReasonCode = btampContext->reasonCode; // tSirMacReasonCodes // Let's remember the peer who just de-auth'd us //memcpy(btampContext->peerMacAddr, ((tpSirSmeDeauthInd) pCallbackInfo)->peerMacAddr, 6); } } break; case eCSR_ROAM_RESULT_MIC_ERROR_UNICAST: //if (eCSR_ROAM_MIC_ERROR_IND == roamStatus) // Make sure { if (btampContext->mTKIPCounterMeasures) { ((IO80211Interface*) mNetworkIF)->postMessage(APPLE80211_M_MIC_ERROR_UCAST); DBGLOG("%s: TKIP Countermeasures in effect in %s\n", __FUNCTION__, "eCSR_ROAM_RESULT_MIC_ERROR_UNICAST"); } else { DBGLOG("%s: TKIP Countermeasures disabled in %s\n", __FUNCTION__, "eCSR_ROAM_RESULT_MIC_ERROR_UNICAST"); } } break; case eCSR_ROAM_RESULT_MIC_ERROR_GROUP: //if (eCSR_ROAM_MIC_ERROR_IND == roamStatus) // Make sure { if (btampContext->mTKIPCounterMeasures) { ((IO80211Interface*) mNetworkIF)->postMessage(APPLE80211_M_MIC_ERROR_MCAST); DBGLOG("%s: TKIP Countermeasures in effect in %s\n", __FUNCTION__, "eCSR_ROAM_RESULT_MIC_ERROR_GROUP"); } else { DBGLOG("%s: TKIP Countermeasures disabled in %s\n", __FUNCTION__, "eCSR_ROAM_RESULT_MIC_ERROR_GROUP"); } } break; default: break; } switch (roamStatus) { case eCSR_ROAM_ROAMING_START: DBGLOG("%s: In %s\n", __FUNCTION__, "eCSR_ROAM_ROAMING_START"); // This only gets called when CSR decides to roam on its own - due to lostlink. // Apple still needs to be told. if ((pCsrRoamInfo) && (pCsrRoamInfo->pConnectedProfile) && (pCsrRoamInfo->pConnectedProfile->pBssDesc)) { memcpy(bssid.ether_addr_octet, pCsrRoamInfo->pConnectedProfile->pBssDesc->bssId, sizeof(tSirMacAddr)); apple80211Interface->willRoam(&bssid); // Return result isn't significant DBGLOG("%s: willRoam returns\n", __FUNCTION__); } break; case eCSR_ROAM_SHOULD_ROAM: if ((pCsrRoamInfo) && (pCsrRoamInfo->pBssDesc)) { // pCallbackInfo points to the BSS desc. Convert to Apple Scan Result. halStatus = ccpCsrToAppleScanResult( mPMacObject, pCsrRoamInfo->pBssDesc, &scanResult); if ( halStatus != 0 ) return eHAL_STATUS_FAILURE; roamAccepted = apple80211Interface->shouldRoam(&scanResult); // Return result is crucial if (roamAccepted == true) { // If the roam is acceptable, return SUCCESS DBGLOG("%s: shouldRoam returns \"acceptable\"\n", __FUNCTION__); //#if 0 // Actually, before returning, immediately signal willRoam // This is a workaround for a CSR bug. Eventually, when // eCSR_ROAM_ASSOCIATION_START gets called WITH callback param p1 // pointing to a tBssDescription, this work-around can be removed. memcpy(bssid.ether_addr_octet, pCsrRoamInfo->pBssDesc->bssId, sizeof(tSirMacAddr)); apple80211Interface->willRoam(&bssid); // Return result isn't significant DBGLOG("%s: willRoam (called out of order) returns\n", __FUNCTION__); DBGLOG(" with BSSID = " MAC_ADDR_STRING(bssid.ether_addr_octet)); //#endif return eHAL_STATUS_SUCCESS; } else { // If the roam is NOT acceptable, return FAILURE DBGLOG("%s: shouldRoam returns \"NOT acceptable\"\n", __FUNCTION__); return eHAL_STATUS_FAILURE; } } break; case eCSR_ROAM_DISASSOCIATED: //if (eCSR_ROAM_RESULT_FORCED == roamResult || eCSR_ROAM_RESULT_MIC_ERROR == roamResult) { btampContext->mAssociated = false; if (mLinkStatus != 0) { DBGLOG("%s: marking link as down in %s\n", __FUNCTION__, "eCSR_ROAM_DISASSOCIATED"); mLinkStatus = (tANI_U8) 0; ((IO80211Interface*) mNetworkIF)->setLinkState(kIO80211NetworkLinkDown); } else { DBGLOG("%s: link already down in %s\n", __FUNCTION__, "eCSR_ROAM_DISASSOCIATED"); } } break; case eCSR_ROAM_LOSTLINK: btampContext->mAssociatedStatus = APPLE80211_STATUS_UNSPECIFIED_FAILURE; btampContext->mAssociated = false; if (mLinkStatus != 0) { DBGLOG("%s: marking link as down in %s\n", __FUNCTION__, "eCSR_ROAM_LOSTLINK"); mLinkStatus = (tANI_U8) 0; ((IO80211Interface*) mNetworkIF)->setLinkState(kIO80211NetworkLinkDown); } else { DBGLOG("%s: link already down in %s\n", __FUNCTION__, "eCSR_ROAM_LOSTLINK"); } break; case eCSR_ROAM_ASSOCIATION_START: DBGLOG("%s: In %s\n", __FUNCTION__, "eCSR_ROAM_ASSOCIATION_START"); #if 0 // This is the right place to call willRoam - for an "initial" association. // But, unfortunately, when eCSR_ROAM_ASSOCIATION_START gets called, // it doesn't have a pointer to the tBssDescription in the roaming callback // routines parameter p1 (pCallbackInfo in SetWextState). So, don't use this code, yet. if ((pCsrRoamInfo) && (pCsrRoamInfo->pBssDesc) { memcpy(bssid.ether_addr_octet, pCsrRoamInfo->pBssDesc->bssId, 6); apple80211Interface->willRoam(&bssid); // Return result isn't significant DBGLOG("%s: willRoam returns\n", __FUNCTION__); DBGLOG(" with BSSID = " MAC_ADDR_STRING(bssid.ether_addr_octet)); } #endif //0 break; case eCSR_ROAM_ASSOCIATION_COMPLETION: DBGLOG("%s: In %s\n", __FUNCTION__, "eCSR_ROAM_ASSOCIATION_COMPLETION"); break; case eCSR_ROAM_MIC_ERROR_IND: // Handled in eCSR_ROAM_RESULT_MIC_ERROR_UNICAST and GROUP, above case eCSR_ROAM_CANCELLED: case eCSR_ROAM_ROAMING_COMPLETION: case eCSR_ROAM_SCAN_FOUND_NEW_BSS: default: break; } #endif //0 return halStatus; } /*---------------------------------------------------------------------------- Host Controller Interface Procedural API ---------------------------------------------------------------------------*/ /** BT v3.0 Link Control commands */ /*---------------------------------------------------------------------------- Each of the next eight command result in asynchronous events (e.g., HCI_PHYSICAL_LINK_COMPLETE_EVENT, HCI_LOGICAL_LINK_COMPLETE_EVENT, etc...) These are signalled thru the event callback. (I.E., (*tpWLAN_BAPEventCB).) ---------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- FUNCTION WLAN_BAPPhysicalLinkCreate() DESCRIPTION Implements the actual HCI Create Physical Link command DEPENDENCIES NA. PARAMETERS IN btampHandle: pointer to the BAP handle. Returned from WLANBAP_GetNewHndl. WLANBAP_GetNewHndl has to be called before every call to WLAN_BAPPhysicalLinkCreate. Since the context is per physical link. pBapHCIPhysLinkCreate: pointer to the "HCI Create Physical Link" Structure. pHddHdl: The context passed in by the caller. (e.g., BSL specific context) IN/OUT pBapHCIEvent: Return event value for the command status event. (The caller of this routine is responsible for sending the Command Status event up the HCI interface.) RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_FAULT: pointer to pBapHCIPhysLinkCreate is NULL VOS_STATUS_SUCCESS: Success SIDE EFFECTS ----------------------------------------------------------------------------*/ VOS_STATUS WLAN_BAPPhysicalLinkCreate ( ptBtampHandle btampHandle, tBtampTLVHCI_Create_Physical_Link_Cmd *pBapHCIPhysLinkCreate, v_PVOID_t pHddHdl, /* BSL passes in its specific context */ /* And I get phy_link_handle from the Command */ tpBtampHCI_Event pBapHCIEvent /* This now encodes ALL event types */ /* Including Command Complete and Command Status*/ ) { tWLAN_BAPEvent bapEvent; /* State machine event */ VOS_STATUS vosStatus; /* I am using btampContext, instead of pBapPhysLinkMachine */ //tWLAN_BAPbapPhysLinkMachine *pBapPhysLinkMachine; ptBtampContext btampContext = (ptBtampContext) btampHandle; /* btampContext value */ v_U8_t status; /* return the BT-AMP status here */ BTAMPFSM_INSTANCEDATA_T *instanceVar = &(btampContext->bapPhysLinkMachine); /* Validate params */ if ((pBapHCIPhysLinkCreate == NULL) || (NULL == btampContext)) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "%s: btampHandle value: %x, pBapHCIPhysLinkCreate is %x", __FUNCTION__, btampHandle, pBapHCIPhysLinkCreate); return VOS_STATUS_E_FAULT; } VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "%s: btampHandle value: %x", __FUNCTION__, btampHandle); if(DISCONNECTED != instanceVar->stateVar) { /* Create/Accept Phy link request in invalid state */ status = WLANBAP_ERROR_MAX_NUM_CNCTS; } else { /* Fill in the event structure */ bapEvent.event = eWLAN_BAP_HCI_PHYSICAL_LINK_CREATE; bapEvent.params = pBapHCIPhysLinkCreate; //bapEvent.callback = pBapHCIPhysLinkCreateCB; /* Allocate a new state machine instance */ /* There will only ever be one of these (NB: Don't assume this.) */ /* So for now this returns a pointer to a static structure */ /* (With all state set to initial values) */ vosStatus = WLANBAP_CreateNewPhyLinkCtx ( btampHandle, pBapHCIPhysLinkCreate->phy_link_handle, /* I get phy_link_handle from the Command */ pHddHdl, /* BSL passes in its specific context */ &btampContext, /* Handle to return per assoc btampContext value in */ BT_INITIATOR); /* BT_INITIATOR */ VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "%s: btampContext value: %x", __FUNCTION__, btampContext); /* Handle event */ vosStatus = btampFsm(btampContext, &bapEvent, &status); } /* Format the command status event to return... */ pBapHCIEvent->bapHCIEventCode = BTAMP_TLV_HCI_COMMAND_STATUS_EVENT; pBapHCIEvent->u.btampCommandStatusEvent.present = 1; pBapHCIEvent->u.btampCommandStatusEvent.status = status; pBapHCIEvent->u.btampCommandStatusEvent.num_hci_command_packets = 1; pBapHCIEvent->u.btampCommandStatusEvent.command_opcode = BTAMP_TLV_HCI_CREATE_PHYSICAL_LINK_CMD; /* ... */ return VOS_STATUS_SUCCESS; } /* WLAN_BAPPhysicalLinkCreate */ /*---------------------------------------------------------------------------- FUNCTION WLAN_BAPPhysicalLinkAccept() DESCRIPTION Implements the actual HCI Accept Physical Link command DEPENDENCIES NA. PARAMETERS IN btampHandle: pointer to the BAP handle. Returned from WLANBAP_GetNewHndl. pBapHCIPhysLinkAccept: pointer to the "HCI Accept Physical Link" Structure. pHddHdl: The context passed in by the caller. (e.g., BSL specific context) IN/OUT pBapHCIEvent: Return event value for the command status event. (The caller of this routine is responsible for sending the Command Status event up the HCI interface.) RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_FAULT: pointer to pBapHCIPhysLinkAccept is NULL VOS_STATUS_SUCCESS: Success SIDE EFFECTS ----------------------------------------------------------------------------*/ VOS_STATUS WLAN_BAPPhysicalLinkAccept ( ptBtampHandle btampHandle, tBtampTLVHCI_Accept_Physical_Link_Cmd *pBapHCIPhysLinkAccept, v_PVOID_t pHddHdl, /* BSL passes in its specific context */ /* And I get phy_link_handle from the Command */ tpBtampHCI_Event pBapHCIEvent /* This now encodes ALL event types */ /* Including Command Complete and Command Status*/ ) { tWLAN_BAPEvent bapEvent; /* State machine event */ VOS_STATUS vosStatus; /* I am using btampContext, instead of pBapPhysLinkMachine */ //tWLAN_BAPbapPhysLinkMachine *pBapPhysLinkMachine; ptBtampContext btampContext = (ptBtampContext) btampHandle; /* btampContext value */ v_U8_t status; /* return the BT-AMP status here */ BTAMPFSM_INSTANCEDATA_T *instanceVar; /* Validate params */ if ((pBapHCIPhysLinkAccept == NULL) || (NULL == btampContext)) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "%s: btampHandle value: %x, pBapHCIPhysLinkAccept is %x", __FUNCTION__, btampHandle, pBapHCIPhysLinkAccept); return VOS_STATUS_E_FAULT; } VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "%s: btampHandle value: %x", __FUNCTION__, btampHandle); instanceVar = &(btampContext->bapPhysLinkMachine); if(DISCONNECTED != instanceVar->stateVar) { /* Create/Accept Phy link request in invalid state */ status = WLANBAP_ERROR_MAX_NUM_CNCTS; } else { /* Fill in the event structure */ bapEvent.event = eWLAN_BAP_HCI_PHYSICAL_LINK_ACCEPT; bapEvent.params = pBapHCIPhysLinkAccept; //bapEvent.callback = pBapHCIPhysLinkAcceptCB; /* Allocate a new state machine instance */ /* There will only ever be one of these (NB: Don't assume this.) */ /* So for now this returns a pointer to a static structure */ /* (With all state set to initial values) */ vosStatus = WLANBAP_CreateNewPhyLinkCtx ( btampHandle, pBapHCIPhysLinkAccept->phy_link_handle, /* I get phy_link_handle from the Command */ pHddHdl, /* BSL passes in its specific context */ &btampContext, /* Handle to return per assoc btampContext value in */ BT_RESPONDER); /* BT_RESPONDER */ VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "%s: btampContext value: %x", __FUNCTION__, btampContext); /* Handle event */ vosStatus = btampFsm(btampContext, &bapEvent, &status); } /* Format the command status event to return... */ pBapHCIEvent->bapHCIEventCode = BTAMP_TLV_HCI_COMMAND_STATUS_EVENT; pBapHCIEvent->u.btampCommandStatusEvent.present = 1; pBapHCIEvent->u.btampCommandStatusEvent.status = status; pBapHCIEvent->u.btampCommandStatusEvent.num_hci_command_packets = 1; pBapHCIEvent->u.btampCommandStatusEvent.command_opcode = BTAMP_TLV_HCI_ACCEPT_PHYSICAL_LINK_CMD; /* ... */ return VOS_STATUS_SUCCESS; } /* WLAN_BAPPhysicalLinkAccept */ /*---------------------------------------------------------------------------- FUNCTION WLAN_BAPPhysicalLinkDisconnect() DESCRIPTION Implements the actual HCI Disconnect Physical Link command DEPENDENCIES NA. PARAMETERS IN btampHandle: pointer to the BAP handle. Returned from WLANBAP_GetNewHndl. pBapHCIPhysLinkDisconnect: pointer to the "HCI Disconnect Physical Link" Structure. IN/OUT pBapHCIEvent: Return event value for the command status event. (The caller of this routine is responsible for sending the Command Status event up the HCI interface.) RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_FAULT: pointer to pBapHCIPhysLinkDisconnect is NULL VOS_STATUS_SUCCESS: Success SIDE EFFECTS ----------------------------------------------------------------------------*/ VOS_STATUS WLAN_BAPPhysicalLinkDisconnect ( ptBtampHandle btampHandle, tBtampTLVHCI_Disconnect_Physical_Link_Cmd *pBapHCIPhysLinkDisconnect, tpBtampHCI_Event pBapHCIEvent /* This now encodes ALL event types */ /* Including Command Complete and Command Status*/ ) { tWLAN_BAPEvent bapEvent; /* State machine event */ VOS_STATUS vosStatus; /* I am using btampContext, instead of pBapPhysLinkMachine */ //tWLAN_BAPbapPhysLinkMachine *pBapPhysLinkMachine; ptBtampContext btampContext = (ptBtampContext) btampHandle; /* btampContext value */ v_U8_t status; /* return the BT-AMP status here */ /* Validate params */ if (pBapHCIPhysLinkDisconnect == NULL) { return VOS_STATUS_E_FAULT; } VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "%s: btampHandle value: %x", __FUNCTION__, btampHandle); /* Validate the Physical link handle */ if (pBapHCIPhysLinkDisconnect->phy_link_handle != btampContext->phy_link_handle) { /* Format the command status event to return... */ pBapHCIEvent->bapHCIEventCode = BTAMP_TLV_HCI_COMMAND_STATUS_EVENT; pBapHCIEvent->u.btampCommandStatusEvent.present = 1; pBapHCIEvent->u.btampCommandStatusEvent.status = WLANBAP_ERROR_NO_CNCT; pBapHCIEvent->u.btampCommandStatusEvent.num_hci_command_packets = 1; pBapHCIEvent->u.btampCommandStatusEvent.command_opcode = BTAMP_TLV_HCI_DISCONNECT_PHYSICAL_LINK_CMD; return VOS_STATUS_SUCCESS; } /* Fill in the event structure */ bapEvent.event = eWLAN_BAP_HCI_PHYSICAL_LINK_DISCONNECT; bapEvent.params = pBapHCIPhysLinkDisconnect; VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "%s: btampContext value: %x", __FUNCTION__, btampContext); /* Handle event */ vosStatus = btampFsm(btampContext, &bapEvent, &status); /* Fill in the event structure */ bapEvent.event = eWLAN_BAP_MAC_READY_FOR_CONNECTIONS; bapEvent.params = pBapHCIPhysLinkDisconnect; /* Handle event */ vosStatus = btampFsm(btampContext, &bapEvent, &status); /* Format the command status event to return... */ pBapHCIEvent->bapHCIEventCode = BTAMP_TLV_HCI_COMMAND_STATUS_EVENT; pBapHCIEvent->u.btampCommandStatusEvent.present = 1; pBapHCIEvent->u.btampCommandStatusEvent.status = status; pBapHCIEvent->u.btampCommandStatusEvent.num_hci_command_packets = 1; pBapHCIEvent->u.btampCommandStatusEvent.command_opcode = BTAMP_TLV_HCI_DISCONNECT_PHYSICAL_LINK_CMD; /* ... */ return VOS_STATUS_SUCCESS; } /* WLAN_BAPPhysicalLinkDisconnect */ /*---------------------------------------------------------------------------- FUNCTION WLAN_BAPLogicalLinkCreate() DESCRIPTION Implements the actual HCI Create Logical Link command DEPENDENCIES NA. PARAMETERS IN btampHandle: pointer to the BAP handle. Returned from WLANBAP_GetNewHndl. pBapHCILogLinkCreate: pointer to the "HCI Create Logical Link" Structure. IN/OUT pBapHCIEvent: Return event value for the command status event. (The caller of this routine is responsible for sending the Command Status event up the HCI interface.) RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_FAULT: pointer to pBapHCILogLinkCreate is NULL VOS_STATUS_SUCCESS: Success SIDE EFFECTS ----------------------------------------------------------------------------*/ VOS_STATUS WLAN_BAPLogicalLinkCreate ( ptBtampHandle btampHandle, tBtampTLVHCI_Create_Logical_Link_Cmd *pBapHCILogLinkCreate, tpBtampHCI_Event pBapHCIEvent /* This now encodes ALL event types */ /* Including Command Complete and Command Status*/ ) { tBtampHCI_Event bapHCIEvent; /* This now encodes ALL event types */ VOS_STATUS vosStatus; ptBtampContext btampContext = (ptBtampContext) btampHandle; v_U16_t log_link_index = 0; BTAMPFSM_INSTANCEDATA_T *instanceVar = &(btampContext->bapPhysLinkMachine); VOS_STATUS retval; v_U16_t index_for_logLinkCtx = 0; /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/ /* Validate params */ if (btampHandle == NULL) { return VOS_STATUS_E_FAULT; } /* Validate params */ if (pBapHCILogLinkCreate == NULL) { return VOS_STATUS_E_FAULT; } VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "%s: btampHandle value: %x", __FUNCTION__, btampHandle); /* Validate the BAP state to accept the logical link request Logical Link create/accept requests are allowed only in CONNECTED state */ /* Form and immediately return the command status event... */ bapHCIEvent.bapHCIEventCode = BTAMP_TLV_HCI_COMMAND_STATUS_EVENT; bapHCIEvent.u.btampCommandStatusEvent.present = 1; bapHCIEvent.u.btampCommandStatusEvent.num_hci_command_packets = 1; bapHCIEvent.u.btampCommandStatusEvent.command_opcode = BTAMP_TLV_HCI_CREATE_LOGICAL_LINK_CMD; retval = VOS_STATUS_E_FAILURE; if(DISCONNECTED == instanceVar->stateVar) { /* Create Logical link request in invalid state */ pBapHCIEvent->u.btampLogicalLinkCompleteEvent.status = WLANBAP_ERROR_CMND_DISALLOWED; bapHCIEvent.u.btampCommandStatusEvent.status = WLANBAP_ERROR_NO_CNCT; } else if (CONNECTED != instanceVar->stateVar) { /* Create Logical link request in invalid state */ pBapHCIEvent->u.btampLogicalLinkCompleteEvent.status = WLANBAP_ERROR_CMND_DISALLOWED; bapHCIEvent.u.btampCommandStatusEvent.status = WLANBAP_ERROR_CMND_DISALLOWED; } else if (pBapHCILogLinkCreate->phy_link_handle != btampContext->phy_link_handle) { /* Invalid Physical link handle */ pBapHCIEvent->u.btampLogicalLinkCompleteEvent.status = WLANBAP_ERROR_INVALID_HCI_CMND_PARAM; bapHCIEvent.u.btampCommandStatusEvent.status = WLANBAP_ERROR_INVALID_HCI_CMND_PARAM; } else { btampContext->btamp_logical_link_state = WLAN_BAPLogLinkInProgress; if( TRUE == btampContext->btamp_logical_link_cancel_pending ) { pBapHCIEvent->u.btampLogicalLinkCompleteEvent.status = WLANBAP_ERROR_NO_CNCT; bapHCIEvent.u.btampCommandStatusEvent.status = WLANBAP_ERROR_NO_CNCT; btampContext->btamp_logical_link_state = WLAN_BAPLogLinkClosed; btampContext->btamp_logical_link_cancel_pending = FALSE; } else { /* If btamp_async_logical_link_create is set, we will seralize the req on MC thread & handle it there after; If the above flag is not set respond to HCI the sync way as before */ if(FALSE == btampContext->btamp_async_logical_link_create) { /* Allocate a logical link index for these flow specs */ vosStatus = WLANBAP_CreateNewLogLinkCtx( btampContext, /* per assoc btampContext value */ pBapHCILogLinkCreate->phy_link_handle, /* I get phy_link_handle from the Command */ pBapHCILogLinkCreate->tx_flow_spec, /* I get tx_flow_spec from the Command */ pBapHCILogLinkCreate->rx_flow_spec, /* I get rx_flow_spec from the Command */ &log_link_index /* Return the logical link index here */ ); if (VOS_STATUS_SUCCESS != vosStatus) { /* Invalid flow spec format */ pBapHCIEvent->u.btampLogicalLinkCompleteEvent.status = WLANBAP_ERROR_INVALID_HCI_CMND_PARAM; bapHCIEvent.u.btampCommandStatusEvent.status = WLANBAP_ERROR_INVALID_HCI_CMND_PARAM; btampContext->btamp_logical_link_state = WLAN_BAPLogLinkClosed; } else { retval = VOS_STATUS_SUCCESS; bapHCIEvent.u.btampCommandStatusEvent.status = WLANBAP_STATUS_SUCCESS; pBapHCIEvent->u.btampLogicalLinkCompleteEvent.status = WLANBAP_STATUS_SUCCESS; btampContext->btamp_logical_link_state = WLAN_BAPLogLinkOpen; } } else { btampContext->btamp_logical_link_req_info.phyLinkHandle = pBapHCILogLinkCreate->phy_link_handle; vos_mem_copy(btampContext->btamp_logical_link_req_info.txFlowSpec, pBapHCILogLinkCreate->tx_flow_spec, 18); vos_mem_copy(btampContext->btamp_logical_link_req_info.rxFlowSpec, pBapHCILogLinkCreate->rx_flow_spec, 18); btampContext->btamp_async_logical_link_create = FALSE; vosStatus = btampEstablishLogLink(btampContext); if(VOS_STATUS_SUCCESS == vosStatus) { retval = VOS_STATUS_E_BUSY;//this will make sure event complete is not sent to HCI } else { pBapHCIEvent->u.btampLogicalLinkCompleteEvent.status = WLANBAP_ERROR_INVALID_HCI_CMND_PARAM; bapHCIEvent.u.btampCommandStatusEvent.status = WLANBAP_ERROR_INVALID_HCI_CMND_PARAM; btampContext->btamp_logical_link_state = WLAN_BAPLogLinkClosed; } } } } vosStatus = (*btampContext->pBapHCIEventCB) ( btampContext->pHddHdl, /* this refers to the BSL per connection context */ &bapHCIEvent, /* This now encodes ALL event types */ VOS_TRUE /* Flag to indicate assoc-specific event */ ); index_for_logLinkCtx = log_link_index >> 8; /* Format the Logical Link Complete event to return... */ pBapHCIEvent->bapHCIEventCode = BTAMP_TLV_HCI_LOGICAL_LINK_COMPLETE_EVENT; pBapHCIEvent->u.btampLogicalLinkCompleteEvent.present = 1; /* Return the logical link index here */ pBapHCIEvent->u.btampLogicalLinkCompleteEvent.log_link_handle = log_link_index; pBapHCIEvent->u.btampLogicalLinkCompleteEvent.phy_link_handle = pBapHCILogLinkCreate->phy_link_handle; pBapHCIEvent->u.btampLogicalLinkCompleteEvent.flow_spec_id = btampContext->btampLogLinkCtx[index_for_logLinkCtx].btampFlowSpec.flow_spec_id; /* ... */ return retval; } /* WLAN_BAPLogicalLinkCreate */ /*---------------------------------------------------------------------------- FUNCTION WLAN_BAPLogicalLinkAccept() DESCRIPTION Implements the actual HCI Accept Logical Link command DEPENDENCIES NA. PARAMETERS IN btampHandle: pointer to the BAP handle. Returned from WLANBAP_GetNewHndl. pBapHCILogLinkAccept: pointer to the "HCI Accept Logical Link" Structure. IN/OUT pBapHCIEvent: Return event value for the command status event. (The caller of this routine is responsible for sending the Command Status event up the HCI interface.) RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_FAULT: pointer to pBapHCILogLinkAccept is NULL VOS_STATUS_SUCCESS: Success SIDE EFFECTS ----------------------------------------------------------------------------*/ VOS_STATUS WLAN_BAPLogicalLinkAccept ( ptBtampHandle btampHandle, tBtampTLVHCI_Accept_Logical_Link_Cmd *pBapHCILogLinkAccept, tpBtampHCI_Event pBapHCIEvent /* This now encodes ALL event types */ /* Including Command Complete and Command Status*/ ) { tBtampHCI_Event bapHCIEvent; /* This now encodes ALL event types */ VOS_STATUS vosStatus; ptBtampContext btampContext = (ptBtampContext) btampHandle; v_U16_t log_link_index = 0; BTAMPFSM_INSTANCEDATA_T *instanceVar = &(btampContext->bapPhysLinkMachine); VOS_STATUS retval; v_U16_t index_for_logLinkCtx; /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/ /* Validate params */ if (btampHandle == NULL) { return VOS_STATUS_E_FAULT; } /* Validate params */ if (pBapHCILogLinkAccept == NULL) { return VOS_STATUS_E_FAULT; } VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "%s: btampHandle value: %x", __FUNCTION__, btampHandle); /* Validate the BAP state to accept the logical link request Logical Link create/accept requests are allowed only in CONNECTED state */ /* Form and immediately return the command status event... */ bapHCIEvent.bapHCIEventCode = BTAMP_TLV_HCI_COMMAND_STATUS_EVENT; bapHCIEvent.u.btampCommandStatusEvent.present = 1; bapHCIEvent.u.btampCommandStatusEvent.num_hci_command_packets = 1; bapHCIEvent.u.btampCommandStatusEvent.command_opcode = BTAMP_TLV_HCI_ACCEPT_LOGICAL_LINK_CMD; retval = VOS_STATUS_E_FAILURE; if(DISCONNECTED == instanceVar->stateVar) { /* Create Logical link request in invalid state */ pBapHCIEvent->u.btampLogicalLinkCompleteEvent.status = WLANBAP_ERROR_CMND_DISALLOWED; bapHCIEvent.u.btampCommandStatusEvent.status = WLANBAP_ERROR_NO_CNCT; } else if (CONNECTED != instanceVar->stateVar) { /* Create Logical link request in invalid state */ pBapHCIEvent->u.btampLogicalLinkCompleteEvent.status = WLANBAP_ERROR_CMND_DISALLOWED; bapHCIEvent.u.btampCommandStatusEvent.status = WLANBAP_ERROR_CMND_DISALLOWED; } else if (pBapHCILogLinkAccept->phy_link_handle != btampContext->phy_link_handle) { /* Invalid Physical link handle */ pBapHCIEvent->u.btampLogicalLinkCompleteEvent.status = WLANBAP_ERROR_INVALID_HCI_CMND_PARAM; bapHCIEvent.u.btampCommandStatusEvent.status = WLANBAP_ERROR_INVALID_HCI_CMND_PARAM; } else { btampContext->btamp_logical_link_state = WLAN_BAPLogLinkInProgress; if( TRUE == btampContext->btamp_logical_link_cancel_pending ) { pBapHCIEvent->u.btampLogicalLinkCompleteEvent.status = WLANBAP_ERROR_NO_CNCT; bapHCIEvent.u.btampCommandStatusEvent.status = WLANBAP_ERROR_NO_CNCT; btampContext->btamp_logical_link_state = WLAN_BAPLogLinkClosed; btampContext->btamp_logical_link_cancel_pending = FALSE; } else { /* If btamp_async_logical_link_create is set, we will seralize the req on MC thread & handle it there after; If the above flag is not set respond to HCI the sync way as before */ if(FALSE == btampContext->btamp_async_logical_link_create) { /* Allocate a logical link index for these flow specs */ vosStatus = WLANBAP_CreateNewLogLinkCtx( btampContext, /* per assoc btampContext value */ pBapHCILogLinkAccept->phy_link_handle, /* I get phy_link_handle from the Command */ pBapHCILogLinkAccept->tx_flow_spec, /* I get tx_flow_spec from the Command */ pBapHCILogLinkAccept->rx_flow_spec, /* I get rx_flow_spec from the Command */ &log_link_index /* Return the logical link index here */ ); if (VOS_STATUS_SUCCESS != vosStatus) { /* Invalid flow spec format */ pBapHCIEvent->u.btampLogicalLinkCompleteEvent.status = WLANBAP_ERROR_INVALID_HCI_CMND_PARAM; bapHCIEvent.u.btampCommandStatusEvent.status = WLANBAP_ERROR_INVALID_HCI_CMND_PARAM; btampContext->btamp_logical_link_state = WLAN_BAPLogLinkClosed; } else { retval = VOS_STATUS_SUCCESS; bapHCIEvent.u.btampCommandStatusEvent.status = WLANBAP_STATUS_SUCCESS; pBapHCIEvent->u.btampLogicalLinkCompleteEvent.status = WLANBAP_STATUS_SUCCESS; btampContext->btamp_logical_link_state = WLAN_BAPLogLinkOpen; } } else { btampContext->btamp_logical_link_req_info.phyLinkHandle = pBapHCILogLinkAccept->phy_link_handle; vos_mem_copy(btampContext->btamp_logical_link_req_info.txFlowSpec, pBapHCILogLinkAccept->tx_flow_spec, 18); vos_mem_copy(btampContext->btamp_logical_link_req_info.rxFlowSpec, pBapHCILogLinkAccept->rx_flow_spec, 18); btampContext->btamp_async_logical_link_create = FALSE; vosStatus = btampEstablishLogLink(btampContext); if(VOS_STATUS_SUCCESS == vosStatus) { retval = VOS_STATUS_E_BUSY;//this will make sure event complete is not sent to HCI } else { pBapHCIEvent->u.btampLogicalLinkCompleteEvent.status = WLANBAP_ERROR_INVALID_HCI_CMND_PARAM; bapHCIEvent.u.btampCommandStatusEvent.status = WLANBAP_ERROR_INVALID_HCI_CMND_PARAM; btampContext->btamp_logical_link_state = WLAN_BAPLogLinkClosed; } } } } vosStatus = (*btampContext->pBapHCIEventCB) ( btampContext->pHddHdl, /* this refers to the BSL per connection context */ &bapHCIEvent, /* This now encodes ALL event types */ VOS_TRUE /* Flag to indicate assoc-specific event */ ); index_for_logLinkCtx = log_link_index >> 8; /* Format the Logical Link Complete event to return... */ pBapHCIEvent->bapHCIEventCode = BTAMP_TLV_HCI_LOGICAL_LINK_COMPLETE_EVENT; pBapHCIEvent->u.btampLogicalLinkCompleteEvent.present = 1; /* Return the logical link index here */ pBapHCIEvent->u.btampLogicalLinkCompleteEvent.log_link_handle = log_link_index; pBapHCIEvent->u.btampLogicalLinkCompleteEvent.phy_link_handle = pBapHCILogLinkAccept->phy_link_handle; pBapHCIEvent->u.btampLogicalLinkCompleteEvent.flow_spec_id = btampContext->btampLogLinkCtx[index_for_logLinkCtx].btampFlowSpec.flow_spec_id; /* ... */ return retval; } /* WLAN_BAPLogicalLinkAccept */ /*---------------------------------------------------------------------------- FUNCTION WLAN_BAPLogicalLinkDisconnect() DESCRIPTION Implements the actual HCI Disconnect Logical Link command DEPENDENCIES NA. PARAMETERS IN btampHandle: pointer to the BAP handle. Returned from WLANBAP_GetNewHndl. pBapHCILogLinkDisconnect: pointer to the "HCI Disconnect Logical Link" Structure. IN/OUT pBapHCIEvent: Return event value for the command status event. (The caller of this routine is responsible for sending the Command Status event up the HCI interface.) RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_FAULT: pointer to pBapHCILogLinkDisconnect is NULL VOS_STATUS_SUCCESS: Success SIDE EFFECTS ----------------------------------------------------------------------------*/ VOS_STATUS WLAN_BAPLogicalLinkDisconnect ( ptBtampHandle btampHandle, tBtampTLVHCI_Disconnect_Logical_Link_Cmd *pBapHCILogLinkDisconnect, tpBtampHCI_Event pBapHCIEvent /* This now encodes ALL event types */ /* Including Command Complete and Command Status*/ ) { tBtampHCI_Event bapHCIEvent; /* This now encodes ALL event types */ ptBtampContext btampContext = (ptBtampContext) btampHandle; tpBtampLogLinkCtx pLogLinkContext; VOS_STATUS retval = VOS_STATUS_SUCCESS; v_U8_t log_link_index; /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ /*------------------------------------------------------------------------ Sanity check ------------------------------------------------------------------------*/ if (( NULL == pBapHCILogLinkDisconnect ) || ( NULL == btampContext)) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "Critical error: Invalid input parameter on %s", __FUNCTION__); return VOS_STATUS_E_FAULT; } /* Derive logical link index from handle */ log_link_index = ((pBapHCILogLinkDisconnect->log_link_handle) >> 8); if( log_link_index > WLANBAP_MAX_LOG_LINKS ) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "Critical error: Invalid input parameter on %s", __FUNCTION__); /* Fill in the event code to propagate the event notification to BRM BRM generates the Command status Event based on this.*/ pBapHCIEvent->bapHCIEventCode = BTAMP_TLV_HCI_DISCONNECT_LOGICAL_LINK_COMPLETE_EVENT; pBapHCIEvent->u.btampDisconnectLogicalLinkCompleteEvent.present = 1; pBapHCIEvent->u.btampDisconnectLogicalLinkCompleteEvent.status = WLANBAP_ERROR_INVALID_HCI_CMND_PARAM; return VOS_STATUS_E_INVAL; } #ifdef BAP_DEBUG /* Trace the tBtampCtx being passed in. */ VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN BAP Context Monitor: btampContext value = %x in %s:%d", btampContext, __FUNCTION__, __LINE__ ); #endif //BAP_DEBUG bapHCIEvent.bapHCIEventCode = BTAMP_TLV_HCI_COMMAND_STATUS_EVENT; bapHCIEvent.u.btampCommandStatusEvent.present = 1; bapHCIEvent.u.btampCommandStatusEvent.num_hci_command_packets = 1; bapHCIEvent.u.btampCommandStatusEvent.command_opcode = BTAMP_TLV_HCI_DISCONNECT_LOGICAL_LINK_CMD; /*------------------------------------------------------------------------ FIXME: Validate the Logical Link handle, Generation and freeing... Here the Logical link is not validated and assumed that it is correct to. get the Logical link context. . ------------------------------------------------------------------------*/ pLogLinkContext = &(btampContext->btampLogLinkCtx[log_link_index]); // Validate whether the context is active. if ((VOS_FALSE == pLogLinkContext->present) || (pBapHCILogLinkDisconnect->log_link_handle != pLogLinkContext->log_link_handle)) { /* If status is failed, the platform specific layer generates the command status event with proper status */ pBapHCIEvent->u.btampDisconnectLogicalLinkCompleteEvent.status = WLANBAP_ERROR_INVALID_HCI_CMND_PARAM; bapHCIEvent.u.btampCommandStatusEvent.status = WLANBAP_ERROR_NO_CNCT; retval = VOS_STATUS_E_FAILURE; #ifdef BAP_DEBUG /* Log the error. */ VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "%s:%d Invalid Logical Link handle(should be) = %d(%d)", __FUNCTION__, __LINE__, pBapHCILogLinkDisconnect->log_link_handle, pLogLinkContext->log_link_handle); VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, " Logical Link index = %d", log_link_index); #endif //BAP_DEBUG } else { /* Form and return the command status event... */ bapHCIEvent.u.btampCommandStatusEvent.status = WLANBAP_STATUS_SUCCESS; pBapHCIEvent->u.btampDisconnectLogicalLinkCompleteEvent.status = WLANBAP_STATUS_SUCCESS; pLogLinkContext->present = VOS_FALSE; pLogLinkContext->uTxPktCompleted = 0; pLogLinkContext->log_link_handle = 0; /* Decrement the total logical link count */ btampContext->total_log_link_index--; btampContext->btamp_logical_link_state = WLAN_BAPLogLinkClosed; } /* Notify the Command status Event */ (*btampContext->pBapHCIEventCB) ( btampContext->pHddHdl, /* this refers to the BSL per connection context */ &bapHCIEvent, /* This now encodes ALL event types */ VOS_TRUE /* Flag to indicate assoc-specific event */ ); /* Format the Logical Link Complete event to return... */ pBapHCIEvent->bapHCIEventCode = BTAMP_TLV_HCI_DISCONNECT_LOGICAL_LINK_COMPLETE_EVENT; pBapHCIEvent->u.btampDisconnectLogicalLinkCompleteEvent.present = 1; /* Return the logical link index here */ pBapHCIEvent->u.btampDisconnectLogicalLinkCompleteEvent.log_link_handle = pBapHCILogLinkDisconnect->log_link_handle; pBapHCIEvent->u.btampDisconnectLogicalLinkCompleteEvent.reason = WLANBAP_ERROR_TERM_BY_LOCAL_HOST; return retval; } /* WLAN_BAPLogicalLinkDisconnect */ /*---------------------------------------------------------------------------- FUNCTION WLAN_BAPLogicalLinkCancel() DESCRIPTION Implements the actual HCI Cancel Logical Link command DEPENDENCIES NA. PARAMETERS IN btampHandle: pointer to the BAP handle. Returned from WLANBAP_GetNewHndl. pBapHCILogLinkCancel: pointer to the "HCI Cancel Logical Link" Structure. IN/OUT pBapHCIEvent: Return event value for the command complete event. (The caller of this routine is responsible for sending the Command Complete event up the HCI interface.) (BTW, the required "HCI Logical Link Complete Event" will be generated by the BAP state machine and sent up via the (*tpWLAN_BAPEventCB).) RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_FAULT: pointer to pBapHCILogLinkCancel is NULL VOS_STATUS_SUCCESS: Success SIDE EFFECTS ----------------------------------------------------------------------------*/ VOS_STATUS WLAN_BAPLogicalLinkCancel ( ptBtampHandle btampHandle, tBtampTLVHCI_Logical_Link_Cancel_Cmd *pBapHCILogLinkCancel, tpBtampHCI_Event pBapHCIEvent /* This now encodes ALL event types */ /* Including Command Complete and Command Status*/ ) { ptBtampContext btampContext; BTAMPFSM_INSTANCEDATA_T *instanceVar; /* Validate params */ if ((btampHandle == NULL) || (pBapHCILogLinkCancel == NULL) || (pBapHCIEvent == NULL)) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "%s: Null Parameters Not allowed", __FUNCTION__); return VOS_STATUS_E_FAULT; } btampContext = (ptBtampContext) btampHandle; instanceVar = &(btampContext->bapPhysLinkMachine); /* Form and immediately return the command status event... */ pBapHCIEvent->bapHCIEventCode = BTAMP_TLV_HCI_COMMAND_COMPLETE_EVENT; pBapHCIEvent->u.btampCommandCompleteEvent.present = 1; pBapHCIEvent->u.btampCommandCompleteEvent.command_opcode = BTAMP_TLV_HCI_LOGICAL_LINK_CANCEL_CMD; pBapHCIEvent->u.btampCommandCompleteEvent.num_hci_command_packets = 1; if (pBapHCILogLinkCancel->phy_link_handle != btampContext->phy_link_handle) { pBapHCIEvent->u.btampCommandCompleteEvent.cc_event.Logical_Link_Cancel.status = WLANBAP_ERROR_NO_CNCT; } else { /* As the logical link create is returned immediately, the logical link is created and so cancel can not return success. And it returns WLANBAP_ERROR_NO_CNCT if not connected or WLANBAP_ERROR_MAX_NUM_ACL_CNCTS if connected */ if(WLAN_BAPLogLinkClosed == btampContext->btamp_logical_link_state ) { /* Cancel Logical link request in invalid state */ pBapHCIEvent->u.btampCommandCompleteEvent.cc_event.Logical_Link_Cancel.status = WLANBAP_ERROR_NO_CNCT; } else if(WLAN_BAPLogLinkOpen == btampContext->btamp_logical_link_state ) { /* Cancel Logical link request in conected state */ pBapHCIEvent->u.btampCommandCompleteEvent.cc_event.Logical_Link_Cancel.status = WLANBAP_ERROR_MAX_NUM_ACL_CNCTS; } else if(WLAN_BAPLogLinkInProgress == btampContext->btamp_logical_link_state ) { /* Cancel Logical link request in progress state, need to fail logical link creation as well */ btampContext->btamp_logical_link_cancel_pending = TRUE; pBapHCIEvent->u.btampCommandCompleteEvent.cc_event.Logical_Link_Cancel.status = WLANBAP_STATUS_SUCCESS; } else { pBapHCIEvent->u.btampCommandCompleteEvent.cc_event.Logical_Link_Cancel.status = WLANBAP_ERROR_NO_CNCT; } } pBapHCIEvent->u.btampCommandCompleteEvent.cc_event.Logical_Link_Cancel.phy_link_handle = pBapHCILogLinkCancel->phy_link_handle; /* Since the status is not success, the Tx flow spec Id is not meaningful and filling with 0 */ pBapHCIEvent->u.btampCommandCompleteEvent.cc_event.Logical_Link_Cancel.tx_flow_spec_id = pBapHCILogLinkCancel->tx_flow_spec_id; return VOS_STATUS_SUCCESS; } /* WLAN_BAPLogicalLinkCancel */ /*---------------------------------------------------------------------------- FUNCTION WLAN_BAPFlowSpecModify() DESCRIPTION Implements the actual HCI Modify Logical Link command Produces an asynchronous flow spec modify complete event. Through the event callback. DEPENDENCIES NA. PARAMETERS IN btampHandle: pointer to the BAP handle. Returned from WLANBAP_GetNewHndl. pBapHCIFlowSpecModify: pointer to the "HCI Flow Spec Modify" Structure. IN/OUT pBapHCIEvent: Return event value for the command status event. (The caller of this routine is responsible for sending the Command Status event up the HCI interface.) RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_FAULT: pointer to pBapHCIFlowSpecModify is NULL VOS_STATUS_SUCCESS: Success SIDE EFFECTS ----------------------------------------------------------------------------*/ VOS_STATUS WLAN_BAPFlowSpecModify ( ptBtampHandle btampHandle, tBtampTLVHCI_Flow_Spec_Modify_Cmd *pBapHCIFlowSpecModify, tpBtampHCI_Event pBapHCIEvent /* This now encodes ALL event types */ /* Including Command Complete and Command Status*/ ) { v_U16_t index_for_logLinkHandle = 0; ptBtampContext btampContext; tpBtampLogLinkCtx pLogLinkContext; v_U32_t retval; VOS_STATUS vosStatus = VOS_STATUS_SUCCESS; tBtampHCI_Event bapHCIEvent; /* This now encodes ALL event types */ /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/ /* Validate params */ if ((btampHandle == NULL) || (pBapHCIFlowSpecModify == NULL) || (pBapHCIEvent == NULL)) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "%s: Null Parameters Not allowed", __FUNCTION__); return VOS_STATUS_E_FAULT; } btampContext = (ptBtampContext) btampHandle; index_for_logLinkHandle = pBapHCIFlowSpecModify->log_link_handle >> 8; /* Return the logical link index here */ VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO, " %s:index_for_logLinkHandle=%d", __FUNCTION__,index_for_logLinkHandle); bapHCIEvent.bapHCIEventCode = BTAMP_TLV_HCI_COMMAND_STATUS_EVENT; bapHCIEvent.u.btampCommandStatusEvent.present = 1; bapHCIEvent.u.btampCommandStatusEvent.num_hci_command_packets = 1; bapHCIEvent.u.btampCommandStatusEvent.command_opcode = BTAMP_TLV_HCI_FLOW_SPEC_MODIFY_CMD; /*------------------------------------------------------------------------ Evaluate the Tx and Rx Flow specification for this logical link. ------------------------------------------------------------------------*/ // Currently we only support flow specs with service types of BE (0x01) /*------------------------------------------------------------------------ Now configure the Logical Link context. ------------------------------------------------------------------------*/ pLogLinkContext = &(btampContext->btampLogLinkCtx[index_for_logLinkHandle]); /* Extract Tx flow spec into the context structure */ retval = btampUnpackTlvFlow_Spec((void *)btampContext, pBapHCIFlowSpecModify->tx_flow_spec, WLAN_BAP_PAL_FLOW_SPEC_TLV_LEN, &pLogLinkContext->btampFlowSpec); if (retval != BTAMP_PARSE_SUCCESS) { /* Flow spec parsing failed, return failure */ vosStatus = VOS_STATUS_E_FAILURE; pBapHCIEvent->u.btampFlowSpecModifyCompleteEvent.status = WLANBAP_ERROR_INVALID_HCI_CMND_PARAM; bapHCIEvent.u.btampCommandStatusEvent.status = WLANBAP_ERROR_INVALID_HCI_CMND_PARAM; } else { bapHCIEvent.u.btampCommandStatusEvent.status = WLANBAP_STATUS_SUCCESS; pBapHCIEvent->u.btampFlowSpecModifyCompleteEvent.status = WLANBAP_STATUS_SUCCESS; } /* Notify the Command status Event */ vosStatus = (*btampContext->pBapHCIEventCB) ( btampContext->pHddHdl, /* this refers to the BSL per connection context */ &bapHCIEvent, /* This now encodes ALL event types */ VOS_TRUE /* Flag to indicate assoc-specific event */ ); /* Form and immediately return the command status event... */ pBapHCIEvent->bapHCIEventCode = BTAMP_TLV_HCI_FLOW_SPEC_MODIFY_COMPLETE_EVENT; pBapHCIEvent->u.btampFlowSpecModifyCompleteEvent.present = 1; pBapHCIEvent->u.btampFlowSpecModifyCompleteEvent.log_link_handle = pBapHCIFlowSpecModify->log_link_handle; return vosStatus; } /* WLAN_BAPFlowSpecModify */ void WLAN_BAPEstablishLogicalLink(ptBtampContext btampContext) { tBtampHCI_Event bapHCIEvent; /* This now encodes ALL event types */ v_U16_t log_link_index = 0; v_U16_t index_for_logLinkCtx = 0; VOS_STATUS vosStatus = VOS_STATUS_SUCCESS; if (btampContext == NULL) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "%s: Null Parameters Not allowed", __FUNCTION__); return; } if( TRUE == btampContext->btamp_logical_link_cancel_pending ) { bapHCIEvent.u.btampCommandStatusEvent.status = WLANBAP_ERROR_NO_CNCT; btampContext->btamp_logical_link_state = WLAN_BAPLogLinkClosed; btampContext->btamp_logical_link_cancel_pending = FALSE; } else { /* Allocate a logical link index for these flow specs */ vosStatus = WLANBAP_CreateNewLogLinkCtx( btampContext, /* per assoc btampContext value */ btampContext->btamp_logical_link_req_info.phyLinkHandle, /* I get phy_link_handle from the Command */ btampContext->btamp_logical_link_req_info.txFlowSpec, /* I get tx_flow_spec from the Command */ btampContext->btamp_logical_link_req_info.rxFlowSpec, /* I get rx_flow_spec from the Command */ &log_link_index /* Return the logical link index here */ ); if (VOS_STATUS_SUCCESS != vosStatus) { /* Invalid flow spec format */ bapHCIEvent.u.btampLogicalLinkCompleteEvent.status = WLANBAP_ERROR_INVALID_HCI_CMND_PARAM; btampContext->btamp_logical_link_state = WLAN_BAPLogLinkClosed; } else { bapHCIEvent.u.btampLogicalLinkCompleteEvent.status = WLANBAP_STATUS_SUCCESS; btampContext->btamp_logical_link_state = WLAN_BAPLogLinkOpen; } } index_for_logLinkCtx = log_link_index >> 8; /* Format the Logical Link Complete event to return... */ bapHCIEvent.bapHCIEventCode = BTAMP_TLV_HCI_LOGICAL_LINK_COMPLETE_EVENT; bapHCIEvent.u.btampLogicalLinkCompleteEvent.present = 1; /* Return the logical link index here */ bapHCIEvent.u.btampLogicalLinkCompleteEvent.log_link_handle = log_link_index; bapHCIEvent.u.btampLogicalLinkCompleteEvent.phy_link_handle = btampContext->btamp_logical_link_req_info.phyLinkHandle; bapHCIEvent.u.btampLogicalLinkCompleteEvent.flow_spec_id = btampContext->btampLogLinkCtx[index_for_logLinkCtx].btampFlowSpec.flow_spec_id; vosStatus = (*btampContext->pBapHCIEventCB) ( btampContext->pHddHdl, /* this refers to the BSL per connection context */ &bapHCIEvent, /* This now encodes ALL event types */ VOS_TRUE /* Flag to indicate assoc-specific event */ ); return; }
gpl-2.0
dwindsor/linux-stable
drivers/scsi/mac53c94.c
591
15375
/* * SCSI low-level driver for the 53c94 SCSI bus adaptor found * on Power Macintosh computers, controlling the external SCSI chain. * We assume the 53c94 is connected to a DBDMA (descriptor-based DMA) * controller. * * Paul Mackerras, August 1996. * Copyright (C) 1996 Paul Mackerras. */ #include <linux/kernel.h> #include <linux/delay.h> #include <linux/types.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/blkdev.h> #include <linux/proc_fs.h> #include <linux/stat.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/module.h> #include <asm/dbdma.h> #include <asm/io.h> #include <asm/pgtable.h> #include <asm/prom.h> #include <asm/pci-bridge.h> #include <asm/macio.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include "mac53c94.h" enum fsc_phase { idle, selecting, dataing, completing, busfreeing, }; struct fsc_state { struct mac53c94_regs __iomem *regs; int intr; struct dbdma_regs __iomem *dma; int dmaintr; int clk_freq; struct Scsi_Host *host; struct scsi_cmnd *request_q; struct scsi_cmnd *request_qtail; struct scsi_cmnd *current_req; /* req we're currently working on */ enum fsc_phase phase; /* what we're currently trying to do */ struct dbdma_cmd *dma_cmds; /* space for dbdma commands, aligned */ void *dma_cmd_space; struct pci_dev *pdev; dma_addr_t dma_addr; struct macio_dev *mdev; }; static void mac53c94_init(struct fsc_state *); static void mac53c94_start(struct fsc_state *); static void mac53c94_interrupt(int, void *); static irqreturn_t do_mac53c94_interrupt(int, void *); static void cmd_done(struct fsc_state *, int result); static void set_dma_cmds(struct fsc_state *, struct scsi_cmnd *); static int mac53c94_queue_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) { struct fsc_state *state; #if 0 if (cmd->sc_data_direction == DMA_TO_DEVICE) { int i; printk(KERN_DEBUG "mac53c94_queue %p: command is", cmd); for (i = 0; i < cmd->cmd_len; ++i) printk(KERN_CONT " %.2x", cmd->cmnd[i]); printk(KERN_CONT "\n"); printk(KERN_DEBUG "use_sg=%d request_bufflen=%d request_buffer=%p\n", scsi_sg_count(cmd), scsi_bufflen(cmd), scsi_sglist(cmd)); } #endif cmd->scsi_done = done; cmd->host_scribble = NULL; state = (struct fsc_state *) cmd->device->host->hostdata; if (state->request_q == NULL) state->request_q = cmd; else state->request_qtail->host_scribble = (void *) cmd; state->request_qtail = cmd; if (state->phase == idle) mac53c94_start(state); return 0; } static DEF_SCSI_QCMD(mac53c94_queue) static int mac53c94_host_reset(struct scsi_cmnd *cmd) { struct fsc_state *state = (struct fsc_state *) cmd->device->host->hostdata; struct mac53c94_regs __iomem *regs = state->regs; struct dbdma_regs __iomem *dma = state->dma; unsigned long flags; spin_lock_irqsave(cmd->device->host->host_lock, flags); writel((RUN|PAUSE|FLUSH|WAKE) << 16, &dma->control); writeb(CMD_SCSI_RESET, &regs->command); /* assert RST */ udelay(100); /* leave it on for a while (>= 25us) */ writeb(CMD_RESET, &regs->command); udelay(20); mac53c94_init(state); writeb(CMD_NOP, &regs->command); spin_unlock_irqrestore(cmd->device->host->host_lock, flags); return SUCCESS; } static void mac53c94_init(struct fsc_state *state) { struct mac53c94_regs __iomem *regs = state->regs; struct dbdma_regs __iomem *dma = state->dma; int x; writeb(state->host->this_id | CF1_PAR_ENABLE, &regs->config1); writeb(TIMO_VAL(250), &regs->sel_timeout); /* 250ms */ writeb(CLKF_VAL(state->clk_freq), &regs->clk_factor); writeb(CF2_FEATURE_EN, &regs->config2); writeb(0, &regs->config3); writeb(0, &regs->sync_period); writeb(0, &regs->sync_offset); x = readb(&regs->interrupt); writel((RUN|PAUSE|FLUSH|WAKE) << 16, &dma->control); } /* * Start the next command for a 53C94. * Should be called with interrupts disabled. */ static void mac53c94_start(struct fsc_state *state) { struct scsi_cmnd *cmd; struct mac53c94_regs __iomem *regs = state->regs; int i; if (state->phase != idle || state->current_req != NULL) panic("inappropriate mac53c94_start (state=%p)", state); if (state->request_q == NULL) return; state->current_req = cmd = state->request_q; state->request_q = (struct scsi_cmnd *) cmd->host_scribble; /* Off we go */ writeb(0, &regs->count_lo); writeb(0, &regs->count_mid); writeb(0, &regs->count_hi); writeb(CMD_NOP + CMD_DMA_MODE, &regs->command); udelay(1); writeb(CMD_FLUSH, &regs->command); udelay(1); writeb(cmd->device->id, &regs->dest_id); writeb(0, &regs->sync_period); writeb(0, &regs->sync_offset); /* load the command into the FIFO */ for (i = 0; i < cmd->cmd_len; ++i) writeb(cmd->cmnd[i], &regs->fifo); /* do select without ATN XXX */ writeb(CMD_SELECT, &regs->command); state->phase = selecting; set_dma_cmds(state, cmd); } static irqreturn_t do_mac53c94_interrupt(int irq, void *dev_id) { unsigned long flags; struct Scsi_Host *dev = ((struct fsc_state *) dev_id)->current_req->device->host; spin_lock_irqsave(dev->host_lock, flags); mac53c94_interrupt(irq, dev_id); spin_unlock_irqrestore(dev->host_lock, flags); return IRQ_HANDLED; } static void mac53c94_interrupt(int irq, void *dev_id) { struct fsc_state *state = (struct fsc_state *) dev_id; struct mac53c94_regs __iomem *regs = state->regs; struct dbdma_regs __iomem *dma = state->dma; struct scsi_cmnd *cmd = state->current_req; int nb, stat, seq, intr; static int mac53c94_errors; /* * Apparently, reading the interrupt register unlatches * the status and sequence step registers. */ seq = readb(&regs->seqstep); stat = readb(&regs->status); intr = readb(&regs->interrupt); #if 0 printk(KERN_DEBUG "mac53c94_intr, intr=%x stat=%x seq=%x phase=%d\n", intr, stat, seq, state->phase); #endif if (intr & INTR_RESET) { /* SCSI bus was reset */ printk(KERN_INFO "external SCSI bus reset detected\n"); writeb(CMD_NOP, &regs->command); writel(RUN << 16, &dma->control); /* stop dma */ cmd_done(state, DID_RESET << 16); return; } if (intr & INTR_ILL_CMD) { printk(KERN_ERR "53c94: invalid cmd, intr=%x stat=%x seq=%x phase=%d\n", intr, stat, seq, state->phase); cmd_done(state, DID_ERROR << 16); return; } if (stat & STAT_ERROR) { #if 0 /* XXX these seem to be harmless? */ printk("53c94: bad error, intr=%x stat=%x seq=%x phase=%d\n", intr, stat, seq, state->phase); #endif ++mac53c94_errors; writeb(CMD_NOP + CMD_DMA_MODE, &regs->command); } if (cmd == 0) { printk(KERN_DEBUG "53c94: interrupt with no command active?\n"); return; } if (stat & STAT_PARITY) { printk(KERN_ERR "mac53c94: parity error\n"); cmd_done(state, DID_PARITY << 16); return; } switch (state->phase) { case selecting: if (intr & INTR_DISCONNECT) { /* selection timed out */ cmd_done(state, DID_BAD_TARGET << 16); return; } if (intr != INTR_BUS_SERV + INTR_DONE) { printk(KERN_DEBUG "got intr %x during selection\n", intr); cmd_done(state, DID_ERROR << 16); return; } if ((seq & SS_MASK) != SS_DONE) { printk(KERN_DEBUG "seq step %x after command\n", seq); cmd_done(state, DID_ERROR << 16); return; } writeb(CMD_NOP, &regs->command); /* set DMA controller going if any data to transfer */ if ((stat & (STAT_MSG|STAT_CD)) == 0 && (scsi_sg_count(cmd) > 0 || scsi_bufflen(cmd))) { nb = cmd->SCp.this_residual; if (nb > 0xfff0) nb = 0xfff0; cmd->SCp.this_residual -= nb; writeb(nb, &regs->count_lo); writeb(nb >> 8, &regs->count_mid); writeb(CMD_DMA_MODE + CMD_NOP, &regs->command); writel(virt_to_phys(state->dma_cmds), &dma->cmdptr); writel((RUN << 16) | RUN, &dma->control); writeb(CMD_DMA_MODE + CMD_XFER_DATA, &regs->command); state->phase = dataing; break; } else if ((stat & STAT_PHASE) == STAT_CD + STAT_IO) { /* up to status phase already */ writeb(CMD_I_COMPLETE, &regs->command); state->phase = completing; } else { printk(KERN_DEBUG "in unexpected phase %x after cmd\n", stat & STAT_PHASE); cmd_done(state, DID_ERROR << 16); return; } break; case dataing: if (intr != INTR_BUS_SERV) { printk(KERN_DEBUG "got intr %x before status\n", intr); cmd_done(state, DID_ERROR << 16); return; } if (cmd->SCp.this_residual != 0 && (stat & (STAT_MSG|STAT_CD)) == 0) { /* Set up the count regs to transfer more */ nb = cmd->SCp.this_residual; if (nb > 0xfff0) nb = 0xfff0; cmd->SCp.this_residual -= nb; writeb(nb, &regs->count_lo); writeb(nb >> 8, &regs->count_mid); writeb(CMD_DMA_MODE + CMD_NOP, &regs->command); writeb(CMD_DMA_MODE + CMD_XFER_DATA, &regs->command); break; } if ((stat & STAT_PHASE) != STAT_CD + STAT_IO) { printk(KERN_DEBUG "intr %x before data xfer complete\n", intr); } writel(RUN << 16, &dma->control); /* stop dma */ scsi_dma_unmap(cmd); /* should check dma status */ writeb(CMD_I_COMPLETE, &regs->command); state->phase = completing; break; case completing: if (intr != INTR_DONE) { printk(KERN_DEBUG "got intr %x on completion\n", intr); cmd_done(state, DID_ERROR << 16); return; } cmd->SCp.Status = readb(&regs->fifo); cmd->SCp.Message = readb(&regs->fifo); cmd->result = CMD_ACCEPT_MSG; writeb(CMD_ACCEPT_MSG, &regs->command); state->phase = busfreeing; break; case busfreeing: if (intr != INTR_DISCONNECT) { printk(KERN_DEBUG "got intr %x when expected disconnect\n", intr); } cmd_done(state, (DID_OK << 16) + (cmd->SCp.Message << 8) + cmd->SCp.Status); break; default: printk(KERN_DEBUG "don't know about phase %d\n", state->phase); } } static void cmd_done(struct fsc_state *state, int result) { struct scsi_cmnd *cmd; cmd = state->current_req; if (cmd != 0) { cmd->result = result; (*cmd->scsi_done)(cmd); state->current_req = NULL; } state->phase = idle; mac53c94_start(state); } /* * Set up DMA commands for transferring data. */ static void set_dma_cmds(struct fsc_state *state, struct scsi_cmnd *cmd) { int i, dma_cmd, total, nseg; struct scatterlist *scl; struct dbdma_cmd *dcmds; dma_addr_t dma_addr; u32 dma_len; nseg = scsi_dma_map(cmd); BUG_ON(nseg < 0); if (!nseg) return; dma_cmd = cmd->sc_data_direction == DMA_TO_DEVICE ? OUTPUT_MORE : INPUT_MORE; dcmds = state->dma_cmds; total = 0; scsi_for_each_sg(cmd, scl, nseg, i) { dma_addr = sg_dma_address(scl); dma_len = sg_dma_len(scl); if (dma_len > 0xffff) panic("mac53c94: scatterlist element >= 64k"); total += dma_len; dcmds->req_count = cpu_to_le16(dma_len); dcmds->command = cpu_to_le16(dma_cmd); dcmds->phy_addr = cpu_to_le32(dma_addr); dcmds->xfer_status = 0; ++dcmds; } dma_cmd += OUTPUT_LAST - OUTPUT_MORE; dcmds[-1].command = cpu_to_le16(dma_cmd); dcmds->command = cpu_to_le16(DBDMA_STOP); cmd->SCp.this_residual = total; } static struct scsi_host_template mac53c94_template = { .proc_name = "53c94", .name = "53C94", .queuecommand = mac53c94_queue, .eh_host_reset_handler = mac53c94_host_reset, .can_queue = 1, .this_id = 7, .sg_tablesize = SG_ALL, .use_clustering = DISABLE_CLUSTERING, }; static int mac53c94_probe(struct macio_dev *mdev, const struct of_device_id *match) { struct device_node *node = macio_get_of_node(mdev); struct pci_dev *pdev = macio_get_pci_dev(mdev); struct fsc_state *state; struct Scsi_Host *host; void *dma_cmd_space; const unsigned char *clkprop; int proplen, rc = -ENODEV; if (macio_resource_count(mdev) != 2 || macio_irq_count(mdev) != 2) { printk(KERN_ERR "mac53c94: expected 2 addrs and intrs" " (got %d/%d)\n", macio_resource_count(mdev), macio_irq_count(mdev)); return -ENODEV; } if (macio_request_resources(mdev, "mac53c94") != 0) { printk(KERN_ERR "mac53c94: unable to request memory resources"); return -EBUSY; } host = scsi_host_alloc(&mac53c94_template, sizeof(struct fsc_state)); if (host == NULL) { printk(KERN_ERR "mac53c94: couldn't register host"); rc = -ENOMEM; goto out_release; } state = (struct fsc_state *) host->hostdata; macio_set_drvdata(mdev, state); state->host = host; state->pdev = pdev; state->mdev = mdev; state->regs = (struct mac53c94_regs __iomem *) ioremap(macio_resource_start(mdev, 0), 0x1000); state->intr = macio_irq(mdev, 0); state->dma = (struct dbdma_regs __iomem *) ioremap(macio_resource_start(mdev, 1), 0x1000); state->dmaintr = macio_irq(mdev, 1); if (state->regs == NULL || state->dma == NULL) { printk(KERN_ERR "mac53c94: ioremap failed for %s\n", node->full_name); goto out_free; } clkprop = of_get_property(node, "clock-frequency", &proplen); if (clkprop == NULL || proplen != sizeof(int)) { printk(KERN_ERR "%s: can't get clock frequency, " "assuming 25MHz\n", node->full_name); state->clk_freq = 25000000; } else state->clk_freq = *(int *)clkprop; /* Space for dma command list: +1 for stop command, * +1 to allow for aligning. * XXX FIXME: Use DMA consistent routines */ dma_cmd_space = kmalloc((host->sg_tablesize + 2) * sizeof(struct dbdma_cmd), GFP_KERNEL); if (dma_cmd_space == 0) { printk(KERN_ERR "mac53c94: couldn't allocate dma " "command space for %s\n", node->full_name); rc = -ENOMEM; goto out_free; } state->dma_cmds = (struct dbdma_cmd *)DBDMA_ALIGN(dma_cmd_space); memset(state->dma_cmds, 0, (host->sg_tablesize + 1) * sizeof(struct dbdma_cmd)); state->dma_cmd_space = dma_cmd_space; mac53c94_init(state); if (request_irq(state->intr, do_mac53c94_interrupt, 0, "53C94",state)) { printk(KERN_ERR "mac53C94: can't get irq %d for %s\n", state->intr, node->full_name); goto out_free_dma; } rc = scsi_add_host(host, &mdev->ofdev.dev); if (rc != 0) goto out_release_irq; scsi_scan_host(host); return 0; out_release_irq: free_irq(state->intr, state); out_free_dma: kfree(state->dma_cmd_space); out_free: if (state->dma != NULL) iounmap(state->dma); if (state->regs != NULL) iounmap(state->regs); scsi_host_put(host); out_release: macio_release_resources(mdev); return rc; } static int mac53c94_remove(struct macio_dev *mdev) { struct fsc_state *fp = (struct fsc_state *)macio_get_drvdata(mdev); struct Scsi_Host *host = fp->host; scsi_remove_host(host); free_irq(fp->intr, fp); if (fp->regs) iounmap(fp->regs); if (fp->dma) iounmap(fp->dma); kfree(fp->dma_cmd_space); scsi_host_put(host); macio_release_resources(mdev); return 0; } static struct of_device_id mac53c94_match[] = { { .name = "53c94", }, {}, }; MODULE_DEVICE_TABLE (of, mac53c94_match); static struct macio_driver mac53c94_driver = { .driver = { .name = "mac53c94", .owner = THIS_MODULE, .of_match_table = mac53c94_match, }, .probe = mac53c94_probe, .remove = mac53c94_remove, }; static int __init init_mac53c94(void) { return macio_register_driver(&mac53c94_driver); } static void __exit exit_mac53c94(void) { return macio_unregister_driver(&mac53c94_driver); } module_init(init_mac53c94); module_exit(exit_mac53c94); MODULE_DESCRIPTION("PowerMac 53c94 SCSI driver"); MODULE_AUTHOR("Paul Mackerras <paulus@samba.org>"); MODULE_LICENSE("GPL");
gpl-2.0
Kernel-Saram/ef30s-ics-kernel
drivers/net/tokenring/smctr.c
847
189711
/* * smctr.c: A network driver for the SMC Token Ring Adapters. * * Written by Jay Schulist <jschlst@samba.org> * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * * This device driver works with the following SMC adapters: * - SMC TokenCard Elite (8115T, chips 825/584) * - SMC TokenCard Elite/A MCA (8115T/A, chips 825/594) * * Source(s): * - SMC TokenCard SDK. * * Maintainer(s): * JS Jay Schulist <jschlst@samba.org> * * Changes: * 07102000 JS Fixed a timing problem in smctr_wait_cmd(); * Also added a bit more discriptive error msgs. * 07122000 JS Fixed problem with detecting a card with * module io/irq/mem specified. * * To do: * 1. Multicast support. * * Initial 2.5 cleanup Alan Cox <alan@lxorguk.ukuu.org.uk> 2002/10/28 */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/interrupt.h> #include <linux/ptrace.h> #include <linux/ioport.h> #include <linux/in.h> #include <linux/string.h> #include <linux/time.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/mca-legacy.h> #include <linux/delay.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/trdevice.h> #include <linux/bitops.h> #include <linux/firmware.h> #include <asm/system.h> #include <asm/io.h> #include <asm/dma.h> #include <asm/irq.h> #if BITS_PER_LONG == 64 #error FIXME: driver does not support 64-bit platforms #endif #include "smctr.h" /* Our Stuff */ static const char version[] __initdata = KERN_INFO "smctr.c: v1.4 7/12/00 by jschlst@samba.org\n"; static const char cardname[] = "smctr"; #define SMCTR_IO_EXTENT 20 #ifdef CONFIG_MCA_LEGACY static unsigned int smctr_posid = 0x6ec6; #endif static int ringspeed; /* SMC Name of the Adapter. */ static char smctr_name[] = "SMC TokenCard"; static char *smctr_model = "Unknown"; /* Use 0 for production, 1 for verification, 2 for debug, and * 3 for very verbose debug. */ #ifndef SMCTR_DEBUG #define SMCTR_DEBUG 1 #endif static unsigned int smctr_debug = SMCTR_DEBUG; /* smctr.c prototypes and functions are arranged alphabeticly * for clearity, maintainability and pure old fashion fun. */ /* A */ static int smctr_alloc_shared_memory(struct net_device *dev); /* B */ static int smctr_bypass_state(struct net_device *dev); /* C */ static int smctr_checksum_firmware(struct net_device *dev); static int __init smctr_chk_isa(struct net_device *dev); static int smctr_chg_rx_mask(struct net_device *dev); static int smctr_clear_int(struct net_device *dev); static int smctr_clear_trc_reset(int ioaddr); static int smctr_close(struct net_device *dev); /* D */ static int smctr_decode_firmware(struct net_device *dev, const struct firmware *fw); static int smctr_disable_16bit(struct net_device *dev); static int smctr_disable_adapter_ctrl_store(struct net_device *dev); static int smctr_disable_bic_int(struct net_device *dev); /* E */ static int smctr_enable_16bit(struct net_device *dev); static int smctr_enable_adapter_ctrl_store(struct net_device *dev); static int smctr_enable_adapter_ram(struct net_device *dev); static int smctr_enable_bic_int(struct net_device *dev); /* G */ static int __init smctr_get_boardid(struct net_device *dev, int mca); static int smctr_get_group_address(struct net_device *dev); static int smctr_get_functional_address(struct net_device *dev); static unsigned int smctr_get_num_rx_bdbs(struct net_device *dev); static int smctr_get_physical_drop_number(struct net_device *dev); static __u8 *smctr_get_rx_pointer(struct net_device *dev, short queue); static int smctr_get_station_id(struct net_device *dev); static FCBlock *smctr_get_tx_fcb(struct net_device *dev, __u16 queue, __u16 bytes_count); static int smctr_get_upstream_neighbor_addr(struct net_device *dev); /* H */ static int smctr_hardware_send_packet(struct net_device *dev, struct net_local *tp); /* I */ static int smctr_init_acbs(struct net_device *dev); static int smctr_init_adapter(struct net_device *dev); static int smctr_init_card_real(struct net_device *dev); static int smctr_init_rx_bdbs(struct net_device *dev); static int smctr_init_rx_fcbs(struct net_device *dev); static int smctr_init_shared_memory(struct net_device *dev); static int smctr_init_tx_bdbs(struct net_device *dev); static int smctr_init_tx_fcbs(struct net_device *dev); static int smctr_internal_self_test(struct net_device *dev); static irqreturn_t smctr_interrupt(int irq, void *dev_id); static int smctr_issue_enable_int_cmd(struct net_device *dev, __u16 interrupt_enable_mask); static int smctr_issue_int_ack(struct net_device *dev, __u16 iack_code, __u16 ibits); static int smctr_issue_init_timers_cmd(struct net_device *dev); static int smctr_issue_init_txrx_cmd(struct net_device *dev); static int smctr_issue_insert_cmd(struct net_device *dev); static int smctr_issue_read_ring_status_cmd(struct net_device *dev); static int smctr_issue_read_word_cmd(struct net_device *dev, __u16 aword_cnt); static int smctr_issue_remove_cmd(struct net_device *dev); static int smctr_issue_resume_acb_cmd(struct net_device *dev); static int smctr_issue_resume_rx_bdb_cmd(struct net_device *dev, __u16 queue); static int smctr_issue_resume_rx_fcb_cmd(struct net_device *dev, __u16 queue); static int smctr_issue_resume_tx_fcb_cmd(struct net_device *dev, __u16 queue); static int smctr_issue_test_internal_rom_cmd(struct net_device *dev); static int smctr_issue_test_hic_cmd(struct net_device *dev); static int smctr_issue_test_mac_reg_cmd(struct net_device *dev); static int smctr_issue_trc_loopback_cmd(struct net_device *dev); static int smctr_issue_tri_loopback_cmd(struct net_device *dev); static int smctr_issue_write_byte_cmd(struct net_device *dev, short aword_cnt, void *byte); static int smctr_issue_write_word_cmd(struct net_device *dev, short aword_cnt, void *word); /* J */ static int smctr_join_complete_state(struct net_device *dev); /* L */ static int smctr_link_tx_fcbs_to_bdbs(struct net_device *dev); static int smctr_load_firmware(struct net_device *dev); static int smctr_load_node_addr(struct net_device *dev); static int smctr_lobe_media_test(struct net_device *dev); static int smctr_lobe_media_test_cmd(struct net_device *dev); static int smctr_lobe_media_test_state(struct net_device *dev); /* M */ static int smctr_make_8025_hdr(struct net_device *dev, MAC_HEADER *rmf, MAC_HEADER *tmf, __u16 ac_fc); static int smctr_make_access_pri(struct net_device *dev, MAC_SUB_VECTOR *tsv); static int smctr_make_addr_mod(struct net_device *dev, MAC_SUB_VECTOR *tsv); static int smctr_make_auth_funct_class(struct net_device *dev, MAC_SUB_VECTOR *tsv); static int smctr_make_corr(struct net_device *dev, MAC_SUB_VECTOR *tsv, __u16 correlator); static int smctr_make_funct_addr(struct net_device *dev, MAC_SUB_VECTOR *tsv); static int smctr_make_group_addr(struct net_device *dev, MAC_SUB_VECTOR *tsv); static int smctr_make_phy_drop_num(struct net_device *dev, MAC_SUB_VECTOR *tsv); static int smctr_make_product_id(struct net_device *dev, MAC_SUB_VECTOR *tsv); static int smctr_make_station_id(struct net_device *dev, MAC_SUB_VECTOR *tsv); static int smctr_make_ring_station_status(struct net_device *dev, MAC_SUB_VECTOR *tsv); static int smctr_make_ring_station_version(struct net_device *dev, MAC_SUB_VECTOR *tsv); static int smctr_make_tx_status_code(struct net_device *dev, MAC_SUB_VECTOR *tsv, __u16 tx_fstatus); static int smctr_make_upstream_neighbor_addr(struct net_device *dev, MAC_SUB_VECTOR *tsv); static int smctr_make_wrap_data(struct net_device *dev, MAC_SUB_VECTOR *tsv); /* O */ static int smctr_open(struct net_device *dev); static int smctr_open_tr(struct net_device *dev); /* P */ struct net_device *smctr_probe(int unit); static int __init smctr_probe1(struct net_device *dev, int ioaddr); static int smctr_process_rx_packet(MAC_HEADER *rmf, __u16 size, struct net_device *dev, __u16 rx_status); /* R */ static int smctr_ram_memory_test(struct net_device *dev); static int smctr_rcv_chg_param(struct net_device *dev, MAC_HEADER *rmf, __u16 *correlator); static int smctr_rcv_init(struct net_device *dev, MAC_HEADER *rmf, __u16 *correlator); static int smctr_rcv_tx_forward(struct net_device *dev, MAC_HEADER *rmf); static int smctr_rcv_rq_addr_state_attch(struct net_device *dev, MAC_HEADER *rmf, __u16 *correlator); static int smctr_rcv_unknown(struct net_device *dev, MAC_HEADER *rmf, __u16 *correlator); static int smctr_reset_adapter(struct net_device *dev); static int smctr_restart_tx_chain(struct net_device *dev, short queue); static int smctr_ring_status_chg(struct net_device *dev); static int smctr_rx_frame(struct net_device *dev); /* S */ static int smctr_send_dat(struct net_device *dev); static netdev_tx_t smctr_send_packet(struct sk_buff *skb, struct net_device *dev); static int smctr_send_lobe_media_test(struct net_device *dev); static int smctr_send_rpt_addr(struct net_device *dev, MAC_HEADER *rmf, __u16 correlator); static int smctr_send_rpt_attch(struct net_device *dev, MAC_HEADER *rmf, __u16 correlator); static int smctr_send_rpt_state(struct net_device *dev, MAC_HEADER *rmf, __u16 correlator); static int smctr_send_rpt_tx_forward(struct net_device *dev, MAC_HEADER *rmf, __u16 tx_fstatus); static int smctr_send_rsp(struct net_device *dev, MAC_HEADER *rmf, __u16 rcode, __u16 correlator); static int smctr_send_rq_init(struct net_device *dev); static int smctr_send_tx_forward(struct net_device *dev, MAC_HEADER *rmf, __u16 *tx_fstatus); static int smctr_set_auth_access_pri(struct net_device *dev, MAC_SUB_VECTOR *rsv); static int smctr_set_auth_funct_class(struct net_device *dev, MAC_SUB_VECTOR *rsv); static int smctr_set_corr(struct net_device *dev, MAC_SUB_VECTOR *rsv, __u16 *correlator); static int smctr_set_error_timer_value(struct net_device *dev, MAC_SUB_VECTOR *rsv); static int smctr_set_frame_forward(struct net_device *dev, MAC_SUB_VECTOR *rsv, __u8 dc_sc); static int smctr_set_local_ring_num(struct net_device *dev, MAC_SUB_VECTOR *rsv); static unsigned short smctr_set_ctrl_attention(struct net_device *dev); static void smctr_set_multicast_list(struct net_device *dev); static int smctr_set_page(struct net_device *dev, __u8 *buf); static int smctr_set_phy_drop(struct net_device *dev, MAC_SUB_VECTOR *rsv); static int smctr_set_ring_speed(struct net_device *dev); static int smctr_set_rx_look_ahead(struct net_device *dev); static int smctr_set_trc_reset(int ioaddr); static int smctr_setup_single_cmd(struct net_device *dev, __u16 command, __u16 subcommand); static int smctr_setup_single_cmd_w_data(struct net_device *dev, __u16 command, __u16 subcommand); static char *smctr_malloc(struct net_device *dev, __u16 size); static int smctr_status_chg(struct net_device *dev); /* T */ static void smctr_timeout(struct net_device *dev); static int smctr_trc_send_packet(struct net_device *dev, FCBlock *fcb, __u16 queue); static __u16 smctr_tx_complete(struct net_device *dev, __u16 queue); static unsigned short smctr_tx_move_frame(struct net_device *dev, struct sk_buff *skb, __u8 *pbuff, unsigned int bytes); /* U */ static int smctr_update_err_stats(struct net_device *dev); static int smctr_update_rx_chain(struct net_device *dev, __u16 queue); static int smctr_update_tx_chain(struct net_device *dev, FCBlock *fcb, __u16 queue); /* W */ static int smctr_wait_cmd(struct net_device *dev); static int smctr_wait_while_cbusy(struct net_device *dev); #define TO_256_BYTE_BOUNDRY(X) (((X + 0xff) & 0xff00) - X) #define TO_PARAGRAPH_BOUNDRY(X) (((X + 0x0f) & 0xfff0) - X) #define PARAGRAPH_BOUNDRY(X) smctr_malloc(dev, TO_PARAGRAPH_BOUNDRY(X)) /* Allocate Adapter Shared Memory. * IMPORTANT NOTE: Any changes to this function MUST be mirrored in the * function "get_num_rx_bdbs" below!!! * * Order of memory allocation: * * 0. Initial System Configuration Block Pointer * 1. System Configuration Block * 2. System Control Block * 3. Action Command Block * 4. Interrupt Status Block * * 5. MAC TX FCB'S * 6. NON-MAC TX FCB'S * 7. MAC TX BDB'S * 8. NON-MAC TX BDB'S * 9. MAC RX FCB'S * 10. NON-MAC RX FCB'S * 11. MAC RX BDB'S * 12. NON-MAC RX BDB'S * 13. MAC TX Data Buffer( 1, 256 byte buffer) * 14. MAC RX Data Buffer( 1, 256 byte buffer) * * 15. NON-MAC TX Data Buffer * 16. NON-MAC RX Data Buffer */ static int smctr_alloc_shared_memory(struct net_device *dev) { struct net_local *tp = netdev_priv(dev); if(smctr_debug > 10) printk(KERN_DEBUG "%s: smctr_alloc_shared_memory\n", dev->name); /* Allocate initial System Control Block pointer. * This pointer is located in the last page, last offset - 4. */ tp->iscpb_ptr = (ISCPBlock *)(tp->ram_access + ((__u32)64 * 0x400) - (long)ISCP_BLOCK_SIZE); /* Allocate System Control Blocks. */ tp->scgb_ptr = (SCGBlock *)smctr_malloc(dev, sizeof(SCGBlock)); PARAGRAPH_BOUNDRY(tp->sh_mem_used); tp->sclb_ptr = (SCLBlock *)smctr_malloc(dev, sizeof(SCLBlock)); PARAGRAPH_BOUNDRY(tp->sh_mem_used); tp->acb_head = (ACBlock *)smctr_malloc(dev, sizeof(ACBlock)*tp->num_acbs); PARAGRAPH_BOUNDRY(tp->sh_mem_used); tp->isb_ptr = (ISBlock *)smctr_malloc(dev, sizeof(ISBlock)); PARAGRAPH_BOUNDRY(tp->sh_mem_used); tp->misc_command_data = (__u16 *)smctr_malloc(dev, MISC_DATA_SIZE); PARAGRAPH_BOUNDRY(tp->sh_mem_used); /* Allocate transmit FCBs. */ tp->tx_fcb_head[MAC_QUEUE] = (FCBlock *)smctr_malloc(dev, sizeof(FCBlock) * tp->num_tx_fcbs[MAC_QUEUE]); tp->tx_fcb_head[NON_MAC_QUEUE] = (FCBlock *)smctr_malloc(dev, sizeof(FCBlock) * tp->num_tx_fcbs[NON_MAC_QUEUE]); tp->tx_fcb_head[BUG_QUEUE] = (FCBlock *)smctr_malloc(dev, sizeof(FCBlock) * tp->num_tx_fcbs[BUG_QUEUE]); /* Allocate transmit BDBs. */ tp->tx_bdb_head[MAC_QUEUE] = (BDBlock *)smctr_malloc(dev, sizeof(BDBlock) * tp->num_tx_bdbs[MAC_QUEUE]); tp->tx_bdb_head[NON_MAC_QUEUE] = (BDBlock *)smctr_malloc(dev, sizeof(BDBlock) * tp->num_tx_bdbs[NON_MAC_QUEUE]); tp->tx_bdb_head[BUG_QUEUE] = (BDBlock *)smctr_malloc(dev, sizeof(BDBlock) * tp->num_tx_bdbs[BUG_QUEUE]); /* Allocate receive FCBs. */ tp->rx_fcb_head[MAC_QUEUE] = (FCBlock *)smctr_malloc(dev, sizeof(FCBlock) * tp->num_rx_fcbs[MAC_QUEUE]); tp->rx_fcb_head[NON_MAC_QUEUE] = (FCBlock *)smctr_malloc(dev, sizeof(FCBlock) * tp->num_rx_fcbs[NON_MAC_QUEUE]); /* Allocate receive BDBs. */ tp->rx_bdb_head[MAC_QUEUE] = (BDBlock *)smctr_malloc(dev, sizeof(BDBlock) * tp->num_rx_bdbs[MAC_QUEUE]); tp->rx_bdb_end[MAC_QUEUE] = (BDBlock *)smctr_malloc(dev, 0); tp->rx_bdb_head[NON_MAC_QUEUE] = (BDBlock *)smctr_malloc(dev, sizeof(BDBlock) * tp->num_rx_bdbs[NON_MAC_QUEUE]); tp->rx_bdb_end[NON_MAC_QUEUE] = (BDBlock *)smctr_malloc(dev, 0); /* Allocate MAC transmit buffers. * MAC Tx Buffers doen't have to be on an ODD Boundry. */ tp->tx_buff_head[MAC_QUEUE] = (__u16 *)smctr_malloc(dev, tp->tx_buff_size[MAC_QUEUE]); tp->tx_buff_curr[MAC_QUEUE] = tp->tx_buff_head[MAC_QUEUE]; tp->tx_buff_end [MAC_QUEUE] = (__u16 *)smctr_malloc(dev, 0); /* Allocate BUG transmit buffers. */ tp->tx_buff_head[BUG_QUEUE] = (__u16 *)smctr_malloc(dev, tp->tx_buff_size[BUG_QUEUE]); tp->tx_buff_curr[BUG_QUEUE] = tp->tx_buff_head[BUG_QUEUE]; tp->tx_buff_end[BUG_QUEUE] = (__u16 *)smctr_malloc(dev, 0); /* Allocate MAC receive data buffers. * MAC Rx buffer doesn't have to be on a 256 byte boundary. */ tp->rx_buff_head[MAC_QUEUE] = (__u16 *)smctr_malloc(dev, RX_DATA_BUFFER_SIZE * tp->num_rx_bdbs[MAC_QUEUE]); tp->rx_buff_end[MAC_QUEUE] = (__u16 *)smctr_malloc(dev, 0); /* Allocate Non-MAC transmit buffers. * ?? For maximum Netware performance, put Tx Buffers on * ODD Boundry and then restore malloc to Even Boundrys. */ smctr_malloc(dev, 1L); tp->tx_buff_head[NON_MAC_QUEUE] = (__u16 *)smctr_malloc(dev, tp->tx_buff_size[NON_MAC_QUEUE]); tp->tx_buff_curr[NON_MAC_QUEUE] = tp->tx_buff_head[NON_MAC_QUEUE]; tp->tx_buff_end [NON_MAC_QUEUE] = (__u16 *)smctr_malloc(dev, 0); smctr_malloc(dev, 1L); /* Allocate Non-MAC receive data buffers. * To guarantee a minimum of 256 contiguous memory to * UM_Receive_Packet's lookahead pointer, before a page * change or ring end is encountered, place each rx buffer on * a 256 byte boundary. */ smctr_malloc(dev, TO_256_BYTE_BOUNDRY(tp->sh_mem_used)); tp->rx_buff_head[NON_MAC_QUEUE] = (__u16 *)smctr_malloc(dev, RX_DATA_BUFFER_SIZE * tp->num_rx_bdbs[NON_MAC_QUEUE]); tp->rx_buff_end[NON_MAC_QUEUE] = (__u16 *)smctr_malloc(dev, 0); return (0); } /* Enter Bypass state. */ static int smctr_bypass_state(struct net_device *dev) { int err; if(smctr_debug > 10) printk(KERN_DEBUG "%s: smctr_bypass_state\n", dev->name); err = smctr_setup_single_cmd(dev, ACB_CMD_CHANGE_JOIN_STATE, JS_BYPASS_STATE); return (err); } static int smctr_checksum_firmware(struct net_device *dev) { struct net_local *tp = netdev_priv(dev); __u16 i, checksum = 0; if(smctr_debug > 10) printk(KERN_DEBUG "%s: smctr_checksum_firmware\n", dev->name); smctr_enable_adapter_ctrl_store(dev); for(i = 0; i < CS_RAM_SIZE; i += 2) checksum += *((__u16 *)(tp->ram_access + i)); tp->microcode_version = *(__u16 *)(tp->ram_access + CS_RAM_VERSION_OFFSET); tp->microcode_version >>= 8; smctr_disable_adapter_ctrl_store(dev); if(checksum) return (checksum); return (0); } static int __init smctr_chk_mca(struct net_device *dev) { #ifdef CONFIG_MCA_LEGACY struct net_local *tp = netdev_priv(dev); int current_slot; __u8 r1, r2, r3, r4, r5; current_slot = mca_find_unused_adapter(smctr_posid, 0); if(current_slot == MCA_NOTFOUND) return (-ENODEV); mca_set_adapter_name(current_slot, smctr_name); mca_mark_as_used(current_slot); tp->slot_num = current_slot; r1 = mca_read_stored_pos(tp->slot_num, 2); r2 = mca_read_stored_pos(tp->slot_num, 3); if(tp->slot_num) outb(CNFG_POS_CONTROL_REG, (__u8)((tp->slot_num - 1) | CNFG_SLOT_ENABLE_BIT)); else outb(CNFG_POS_CONTROL_REG, (__u8)((tp->slot_num) | CNFG_SLOT_ENABLE_BIT)); r1 = inb(CNFG_POS_REG1); r2 = inb(CNFG_POS_REG0); tp->bic_type = BIC_594_CHIP; /* IO */ r2 = mca_read_stored_pos(tp->slot_num, 2); r2 &= 0xF0; dev->base_addr = ((__u16)r2 << 8) + (__u16)0x800; request_region(dev->base_addr, SMCTR_IO_EXTENT, smctr_name); /* IRQ */ r5 = mca_read_stored_pos(tp->slot_num, 5); r5 &= 0xC; switch(r5) { case 0: dev->irq = 3; break; case 0x4: dev->irq = 4; break; case 0x8: dev->irq = 10; break; default: dev->irq = 15; break; } if (request_irq(dev->irq, smctr_interrupt, IRQF_SHARED, smctr_name, dev)) { release_region(dev->base_addr, SMCTR_IO_EXTENT); return -ENODEV; } /* Get RAM base */ r3 = mca_read_stored_pos(tp->slot_num, 3); tp->ram_base = ((__u32)(r3 & 0x7) << 13) + 0x0C0000; if (r3 & 0x8) tp->ram_base += 0x010000; if (r3 & 0x80) tp->ram_base += 0xF00000; /* Get Ram Size */ r3 &= 0x30; r3 >>= 4; tp->ram_usable = (__u16)CNFG_SIZE_8KB << r3; tp->ram_size = (__u16)CNFG_SIZE_64KB; tp->board_id |= TOKEN_MEDIA; r4 = mca_read_stored_pos(tp->slot_num, 4); tp->rom_base = ((__u32)(r4 & 0x7) << 13) + 0x0C0000; if (r4 & 0x8) tp->rom_base += 0x010000; /* Get ROM size. */ r4 >>= 4; switch (r4) { case 0: tp->rom_size = CNFG_SIZE_8KB; break; case 1: tp->rom_size = CNFG_SIZE_16KB; break; case 2: tp->rom_size = CNFG_SIZE_32KB; break; default: tp->rom_size = ROM_DISABLE; } /* Get Media Type. */ r5 = mca_read_stored_pos(tp->slot_num, 5); r5 &= CNFG_MEDIA_TYPE_MASK; switch(r5) { case (0): tp->media_type = MEDIA_STP_4; break; case (1): tp->media_type = MEDIA_STP_16; break; case (3): tp->media_type = MEDIA_UTP_16; break; default: tp->media_type = MEDIA_UTP_4; break; } tp->media_menu = 14; r2 = mca_read_stored_pos(tp->slot_num, 2); if(!(r2 & 0x02)) tp->mode_bits |= EARLY_TOKEN_REL; /* Disable slot */ outb(CNFG_POS_CONTROL_REG, 0); tp->board_id = smctr_get_boardid(dev, 1); switch(tp->board_id & 0xffff) { case WD8115TA: smctr_model = "8115T/A"; break; case WD8115T: if(tp->extra_info & CHIP_REV_MASK) smctr_model = "8115T rev XE"; else smctr_model = "8115T rev XD"; break; default: smctr_model = "Unknown"; break; } return (0); #else return (-1); #endif /* CONFIG_MCA_LEGACY */ } static int smctr_chg_rx_mask(struct net_device *dev) { struct net_local *tp = netdev_priv(dev); int err = 0; if(smctr_debug > 10) printk(KERN_DEBUG "%s: smctr_chg_rx_mask\n", dev->name); smctr_enable_16bit(dev); smctr_set_page(dev, (__u8 *)tp->ram_access); if(tp->mode_bits & LOOPING_MODE_MASK) tp->config_word0 |= RX_OWN_BIT; else tp->config_word0 &= ~RX_OWN_BIT; if(tp->receive_mask & PROMISCUOUS_MODE) tp->config_word0 |= PROMISCUOUS_BIT; else tp->config_word0 &= ~PROMISCUOUS_BIT; if(tp->receive_mask & ACCEPT_ERR_PACKETS) tp->config_word0 |= SAVBAD_BIT; else tp->config_word0 &= ~SAVBAD_BIT; if(tp->receive_mask & ACCEPT_ATT_MAC_FRAMES) tp->config_word0 |= RXATMAC; else tp->config_word0 &= ~RXATMAC; if(tp->receive_mask & ACCEPT_MULTI_PROM) tp->config_word1 |= MULTICAST_ADDRESS_BIT; else tp->config_word1 &= ~MULTICAST_ADDRESS_BIT; if(tp->receive_mask & ACCEPT_SOURCE_ROUTING_SPANNING) tp->config_word1 |= SOURCE_ROUTING_SPANNING_BITS; else { if(tp->receive_mask & ACCEPT_SOURCE_ROUTING) tp->config_word1 |= SOURCE_ROUTING_EXPLORER_BIT; else tp->config_word1 &= ~SOURCE_ROUTING_SPANNING_BITS; } if((err = smctr_issue_write_word_cmd(dev, RW_CONFIG_REGISTER_0, &tp->config_word0))) { return (err); } if((err = smctr_issue_write_word_cmd(dev, RW_CONFIG_REGISTER_1, &tp->config_word1))) { return (err); } smctr_disable_16bit(dev); return (0); } static int smctr_clear_int(struct net_device *dev) { struct net_local *tp = netdev_priv(dev); outb((tp->trc_mask | CSR_CLRTINT), dev->base_addr + CSR); return (0); } static int smctr_clear_trc_reset(int ioaddr) { __u8 r; r = inb(ioaddr + MSR); outb(~MSR_RST & r, ioaddr + MSR); return (0); } /* * The inverse routine to smctr_open(). */ static int smctr_close(struct net_device *dev) { struct net_local *tp = netdev_priv(dev); struct sk_buff *skb; int err; netif_stop_queue(dev); tp->cleanup = 1; /* Check to see if adapter is already in a closed state. */ if(tp->status != OPEN) return (0); smctr_enable_16bit(dev); smctr_set_page(dev, (__u8 *)tp->ram_access); if((err = smctr_issue_remove_cmd(dev))) { smctr_disable_16bit(dev); return (err); } for(;;) { skb = skb_dequeue(&tp->SendSkbQueue); if(skb == NULL) break; tp->QueueSkb++; dev_kfree_skb(skb); } return (0); } static int smctr_decode_firmware(struct net_device *dev, const struct firmware *fw) { struct net_local *tp = netdev_priv(dev); short bit = 0x80, shift = 12; DECODE_TREE_NODE *tree; short branch, tsize; __u16 buff = 0; long weight; __u8 *ucode; __u16 *mem; if(smctr_debug > 10) printk(KERN_DEBUG "%s: smctr_decode_firmware\n", dev->name); weight = *(long *)(fw->data + WEIGHT_OFFSET); tsize = *(__u8 *)(fw->data + TREE_SIZE_OFFSET); tree = (DECODE_TREE_NODE *)(fw->data + TREE_OFFSET); ucode = (__u8 *)(fw->data + TREE_OFFSET + (tsize * sizeof(DECODE_TREE_NODE))); mem = (__u16 *)(tp->ram_access); while(weight) { branch = ROOT; while((tree + branch)->tag != LEAF && weight) { branch = *ucode & bit ? (tree + branch)->llink : (tree + branch)->rlink; bit >>= 1; weight--; if(bit == 0) { bit = 0x80; ucode++; } } buff |= (tree + branch)->info << shift; shift -= 4; if(shift < 0) { *(mem++) = SWAP_BYTES(buff); buff = 0; shift = 12; } } /* The following assumes the Control Store Memory has * been initialized to zero. If the last partial word * is zero, it will not be written. */ if(buff) *(mem++) = SWAP_BYTES(buff); return (0); } static int smctr_disable_16bit(struct net_device *dev) { return (0); } /* * On Exit, Adapter is: * 1. TRC is in a reset state and un-initialized. * 2. Adapter memory is enabled. * 3. Control Store memory is out of context (-WCSS is 1). */ static int smctr_disable_adapter_ctrl_store(struct net_device *dev) { struct net_local *tp = netdev_priv(dev); int ioaddr = dev->base_addr; if(smctr_debug > 10) printk(KERN_DEBUG "%s: smctr_disable_adapter_ctrl_store\n", dev->name); tp->trc_mask |= CSR_WCSS; outb(tp->trc_mask, ioaddr + CSR); return (0); } static int smctr_disable_bic_int(struct net_device *dev) { struct net_local *tp = netdev_priv(dev); int ioaddr = dev->base_addr; tp->trc_mask = CSR_MSK_ALL | CSR_MSKCBUSY | CSR_MSKTINT | CSR_WCSS; outb(tp->trc_mask, ioaddr + CSR); return (0); } static int smctr_enable_16bit(struct net_device *dev) { struct net_local *tp = netdev_priv(dev); __u8 r; if(tp->adapter_bus == BUS_ISA16_TYPE) { r = inb(dev->base_addr + LAAR); outb((r | LAAR_MEM16ENB), dev->base_addr + LAAR); } return (0); } /* * To enable the adapter control store memory: * 1. Adapter must be in a RESET state. * 2. Adapter memory must be enabled. * 3. Control Store Memory is in context (-WCSS is 0). */ static int smctr_enable_adapter_ctrl_store(struct net_device *dev) { struct net_local *tp = netdev_priv(dev); int ioaddr = dev->base_addr; if(smctr_debug > 10) printk(KERN_DEBUG "%s: smctr_enable_adapter_ctrl_store\n", dev->name); smctr_set_trc_reset(ioaddr); smctr_enable_adapter_ram(dev); tp->trc_mask &= ~CSR_WCSS; outb(tp->trc_mask, ioaddr + CSR); return (0); } static int smctr_enable_adapter_ram(struct net_device *dev) { int ioaddr = dev->base_addr; __u8 r; if(smctr_debug > 10) printk(KERN_DEBUG "%s: smctr_enable_adapter_ram\n", dev->name); r = inb(ioaddr + MSR); outb(MSR_MEMB | r, ioaddr + MSR); return (0); } static int smctr_enable_bic_int(struct net_device *dev) { struct net_local *tp = netdev_priv(dev); int ioaddr = dev->base_addr; __u8 r; switch(tp->bic_type) { case (BIC_584_CHIP): tp->trc_mask = CSR_MSKCBUSY | CSR_WCSS; outb(tp->trc_mask, ioaddr + CSR); r = inb(ioaddr + IRR); outb(r | IRR_IEN, ioaddr + IRR); break; case (BIC_594_CHIP): tp->trc_mask = CSR_MSKCBUSY | CSR_WCSS; outb(tp->trc_mask, ioaddr + CSR); r = inb(ioaddr + IMCCR); outb(r | IMCCR_EIL, ioaddr + IMCCR); break; } return (0); } static int __init smctr_chk_isa(struct net_device *dev) { struct net_local *tp = netdev_priv(dev); int ioaddr = dev->base_addr; __u8 r1, r2, b, chksum = 0; __u16 r; int i; int err = -ENODEV; if(smctr_debug > 10) printk(KERN_DEBUG "%s: smctr_chk_isa %#4x\n", dev->name, ioaddr); if((ioaddr & 0x1F) != 0) goto out; /* Grab the region so that no one else tries to probe our ioports. */ if (!request_region(ioaddr, SMCTR_IO_EXTENT, smctr_name)) { err = -EBUSY; goto out; } /* Checksum SMC node address */ for(i = 0; i < 8; i++) { b = inb(ioaddr + LAR0 + i); chksum += b; } if (chksum != NODE_ADDR_CKSUM) goto out2; b = inb(ioaddr + BDID); if(b != BRD_ID_8115T) { printk(KERN_ERR "%s: The adapter found is not supported\n", dev->name); goto out2; } /* Check for 8115T Board ID */ r2 = 0; for(r = 0; r < 8; r++) { r1 = inb(ioaddr + 0x8 + r); r2 += r1; } /* value of RegF adds up the sum to 0xFF */ if((r2 != 0xFF) && (r2 != 0xEE)) goto out2; /* Get adapter ID */ tp->board_id = smctr_get_boardid(dev, 0); switch(tp->board_id & 0xffff) { case WD8115TA: smctr_model = "8115T/A"; break; case WD8115T: if(tp->extra_info & CHIP_REV_MASK) smctr_model = "8115T rev XE"; else smctr_model = "8115T rev XD"; break; default: smctr_model = "Unknown"; break; } /* Store BIC type. */ tp->bic_type = BIC_584_CHIP; tp->nic_type = NIC_825_CHIP; /* Copy Ram Size */ tp->ram_usable = CNFG_SIZE_16KB; tp->ram_size = CNFG_SIZE_64KB; /* Get 58x Ram Base */ r1 = inb(ioaddr); r1 &= 0x3F; r2 = inb(ioaddr + CNFG_LAAR_584); r2 &= CNFG_LAAR_MASK; r2 <<= 3; r2 |= ((r1 & 0x38) >> 3); tp->ram_base = ((__u32)r2 << 16) + (((__u32)(r1 & 0x7)) << 13); /* Get 584 Irq */ r1 = 0; r1 = inb(ioaddr + CNFG_ICR_583); r1 &= CNFG_ICR_IR2_584; r2 = inb(ioaddr + CNFG_IRR_583); r2 &= CNFG_IRR_IRQS; /* 0x60 */ r2 >>= 5; switch(r2) { case 0: if(r1 == 0) dev->irq = 2; else dev->irq = 10; break; case 1: if(r1 == 0) dev->irq = 3; else dev->irq = 11; break; case 2: if(r1 == 0) { if(tp->extra_info & ALTERNATE_IRQ_BIT) dev->irq = 5; else dev->irq = 4; } else dev->irq = 15; break; case 3: if(r1 == 0) dev->irq = 7; else dev->irq = 4; break; default: printk(KERN_ERR "%s: No IRQ found aborting\n", dev->name); goto out2; } if (request_irq(dev->irq, smctr_interrupt, IRQF_SHARED, smctr_name, dev)) goto out2; /* Get 58x Rom Base */ r1 = inb(ioaddr + CNFG_BIO_583); r1 &= 0x3E; r1 |= 0x40; tp->rom_base = (__u32)r1 << 13; /* Get 58x Rom Size */ r1 = inb(ioaddr + CNFG_BIO_583); r1 &= 0xC0; if(r1 == 0) tp->rom_size = ROM_DISABLE; else { r1 >>= 6; tp->rom_size = (__u16)CNFG_SIZE_8KB << r1; } /* Get 58x Boot Status */ r1 = inb(ioaddr + CNFG_GP2); tp->mode_bits &= (~BOOT_STATUS_MASK); if(r1 & CNFG_GP2_BOOT_NIBBLE) tp->mode_bits |= BOOT_TYPE_1; /* Get 58x Zero Wait State */ tp->mode_bits &= (~ZERO_WAIT_STATE_MASK); r1 = inb(ioaddr + CNFG_IRR_583); if(r1 & CNFG_IRR_ZWS) tp->mode_bits |= ZERO_WAIT_STATE_8_BIT; if(tp->board_id & BOARD_16BIT) { r1 = inb(ioaddr + CNFG_LAAR_584); if(r1 & CNFG_LAAR_ZWS) tp->mode_bits |= ZERO_WAIT_STATE_16_BIT; } /* Get 584 Media Menu */ tp->media_menu = 14; r1 = inb(ioaddr + CNFG_IRR_583); tp->mode_bits &= 0xf8ff; /* (~CNFG_INTERFACE_TYPE_MASK) */ if((tp->board_id & TOKEN_MEDIA) == TOKEN_MEDIA) { /* Get Advanced Features */ if(((r1 & 0x6) >> 1) == 0x3) tp->media_type |= MEDIA_UTP_16; else { if(((r1 & 0x6) >> 1) == 0x2) tp->media_type |= MEDIA_STP_16; else { if(((r1 & 0x6) >> 1) == 0x1) tp->media_type |= MEDIA_UTP_4; else tp->media_type |= MEDIA_STP_4; } } r1 = inb(ioaddr + CNFG_GP2); if(!(r1 & 0x2) ) /* GP2_ETRD */ tp->mode_bits |= EARLY_TOKEN_REL; /* see if the chip is corrupted if(smctr_read_584_chksum(ioaddr)) { printk(KERN_ERR "%s: EEPROM Checksum Failure\n", dev->name); free_irq(dev->irq, dev); goto out2; } */ } return (0); out2: release_region(ioaddr, SMCTR_IO_EXTENT); out: return err; } static int __init smctr_get_boardid(struct net_device *dev, int mca) { struct net_local *tp = netdev_priv(dev); int ioaddr = dev->base_addr; __u8 r, r1, IdByte; __u16 BoardIdMask; tp->board_id = BoardIdMask = 0; if(mca) { BoardIdMask |= (MICROCHANNEL+INTERFACE_CHIP+TOKEN_MEDIA+PAGED_RAM+BOARD_16BIT); tp->extra_info |= (INTERFACE_594_CHIP+RAM_SIZE_64K+NIC_825_BIT+ALTERNATE_IRQ_BIT+SLOT_16BIT); } else { BoardIdMask|=(INTERFACE_CHIP+TOKEN_MEDIA+PAGED_RAM+BOARD_16BIT); tp->extra_info |= (INTERFACE_584_CHIP + RAM_SIZE_64K + NIC_825_BIT + ALTERNATE_IRQ_BIT); } if(!mca) { r = inb(ioaddr + BID_REG_1); r &= 0x0c; outb(r, ioaddr + BID_REG_1); r = inb(ioaddr + BID_REG_1); if(r & BID_SIXTEEN_BIT_BIT) { tp->extra_info |= SLOT_16BIT; tp->adapter_bus = BUS_ISA16_TYPE; } else tp->adapter_bus = BUS_ISA8_TYPE; } else tp->adapter_bus = BUS_MCA_TYPE; /* Get Board Id Byte */ IdByte = inb(ioaddr + BID_BOARD_ID_BYTE); /* if Major version > 1.0 then * return; */ if(IdByte & 0xF8) return (-1); r1 = inb(ioaddr + BID_REG_1); r1 &= BID_ICR_MASK; r1 |= BID_OTHER_BIT; outb(r1, ioaddr + BID_REG_1); r1 = inb(ioaddr + BID_REG_3); r1 &= BID_EAR_MASK; r1 |= BID_ENGR_PAGE; outb(r1, ioaddr + BID_REG_3); r1 = inb(ioaddr + BID_REG_1); r1 &= BID_ICR_MASK; r1 |= (BID_RLA | BID_OTHER_BIT); outb(r1, ioaddr + BID_REG_1); r1 = inb(ioaddr + BID_REG_1); while(r1 & BID_RECALL_DONE_MASK) r1 = inb(ioaddr + BID_REG_1); r = inb(ioaddr + BID_LAR_0 + BID_REG_6); /* clear chip rev bits */ tp->extra_info &= ~CHIP_REV_MASK; tp->extra_info |= ((r & BID_EEPROM_CHIP_REV_MASK) << 6); r1 = inb(ioaddr + BID_REG_1); r1 &= BID_ICR_MASK; r1 |= BID_OTHER_BIT; outb(r1, ioaddr + BID_REG_1); r1 = inb(ioaddr + BID_REG_3); r1 &= BID_EAR_MASK; r1 |= BID_EA6; outb(r1, ioaddr + BID_REG_3); r1 = inb(ioaddr + BID_REG_1); r1 &= BID_ICR_MASK; r1 |= BID_RLA; outb(r1, ioaddr + BID_REG_1); r1 = inb(ioaddr + BID_REG_1); while(r1 & BID_RECALL_DONE_MASK) r1 = inb(ioaddr + BID_REG_1); return (BoardIdMask); } static int smctr_get_group_address(struct net_device *dev) { smctr_issue_read_word_cmd(dev, RW_INDIVIDUAL_GROUP_ADDR); return(smctr_wait_cmd(dev)); } static int smctr_get_functional_address(struct net_device *dev) { smctr_issue_read_word_cmd(dev, RW_FUNCTIONAL_ADDR); return(smctr_wait_cmd(dev)); } /* Calculate number of Non-MAC receive BDB's and data buffers. * This function must simulate allocateing shared memory exactly * as the allocate_shared_memory function above. */ static unsigned int smctr_get_num_rx_bdbs(struct net_device *dev) { struct net_local *tp = netdev_priv(dev); unsigned int mem_used = 0; /* Allocate System Control Blocks. */ mem_used += sizeof(SCGBlock); mem_used += TO_PARAGRAPH_BOUNDRY(mem_used); mem_used += sizeof(SCLBlock); mem_used += TO_PARAGRAPH_BOUNDRY(mem_used); mem_used += sizeof(ACBlock) * tp->num_acbs; mem_used += TO_PARAGRAPH_BOUNDRY(mem_used); mem_used += sizeof(ISBlock); mem_used += TO_PARAGRAPH_BOUNDRY(mem_used); mem_used += MISC_DATA_SIZE; /* Allocate transmit FCB's. */ mem_used += TO_PARAGRAPH_BOUNDRY(mem_used); mem_used += sizeof(FCBlock) * tp->num_tx_fcbs[MAC_QUEUE]; mem_used += sizeof(FCBlock) * tp->num_tx_fcbs[NON_MAC_QUEUE]; mem_used += sizeof(FCBlock) * tp->num_tx_fcbs[BUG_QUEUE]; /* Allocate transmit BDBs. */ mem_used += sizeof(BDBlock) * tp->num_tx_bdbs[MAC_QUEUE]; mem_used += sizeof(BDBlock) * tp->num_tx_bdbs[NON_MAC_QUEUE]; mem_used += sizeof(BDBlock) * tp->num_tx_bdbs[BUG_QUEUE]; /* Allocate receive FCBs. */ mem_used += sizeof(FCBlock) * tp->num_rx_fcbs[MAC_QUEUE]; mem_used += sizeof(FCBlock) * tp->num_rx_fcbs[NON_MAC_QUEUE]; /* Allocate receive BDBs. */ mem_used += sizeof(BDBlock) * tp->num_rx_bdbs[MAC_QUEUE]; /* Allocate MAC transmit buffers. * MAC transmit buffers don't have to be on an ODD Boundry. */ mem_used += tp->tx_buff_size[MAC_QUEUE]; /* Allocate BUG transmit buffers. */ mem_used += tp->tx_buff_size[BUG_QUEUE]; /* Allocate MAC receive data buffers. * MAC receive buffers don't have to be on a 256 byte boundary. */ mem_used += RX_DATA_BUFFER_SIZE * tp->num_rx_bdbs[MAC_QUEUE]; /* Allocate Non-MAC transmit buffers. * For maximum Netware performance, put Tx Buffers on * ODD Boundry,and then restore malloc to Even Boundrys. */ mem_used += 1L; mem_used += tp->tx_buff_size[NON_MAC_QUEUE]; mem_used += 1L; /* CALCULATE NUMBER OF NON-MAC RX BDB'S * AND NON-MAC RX DATA BUFFERS * * Make sure the mem_used offset at this point is the * same as in allocate_shared memory or the following * boundary adjustment will be incorrect (i.e. not allocating * the non-mac receive buffers above cannot change the 256 * byte offset). * * Since this cannot be guaranteed, adding the full 256 bytes * to the amount of shared memory used at this point will guaranteed * that the rx data buffers do not overflow shared memory. */ mem_used += 0x100; return((0xffff - mem_used) / (RX_DATA_BUFFER_SIZE + sizeof(BDBlock))); } static int smctr_get_physical_drop_number(struct net_device *dev) { smctr_issue_read_word_cmd(dev, RW_PHYSICAL_DROP_NUMBER); return(smctr_wait_cmd(dev)); } static __u8 * smctr_get_rx_pointer(struct net_device *dev, short queue) { struct net_local *tp = netdev_priv(dev); BDBlock *bdb; bdb = (BDBlock *)((__u32)tp->ram_access + (__u32)(tp->rx_fcb_curr[queue]->trc_bdb_ptr)); tp->rx_fcb_curr[queue]->bdb_ptr = bdb; return ((__u8 *)bdb->data_block_ptr); } static int smctr_get_station_id(struct net_device *dev) { smctr_issue_read_word_cmd(dev, RW_INDIVIDUAL_MAC_ADDRESS); return(smctr_wait_cmd(dev)); } /* * Get the current statistics. This may be called with the card open * or closed. */ static struct net_device_stats *smctr_get_stats(struct net_device *dev) { struct net_local *tp = netdev_priv(dev); return ((struct net_device_stats *)&tp->MacStat); } static FCBlock *smctr_get_tx_fcb(struct net_device *dev, __u16 queue, __u16 bytes_count) { struct net_local *tp = netdev_priv(dev); FCBlock *pFCB; BDBlock *pbdb; unsigned short alloc_size; unsigned short *temp; if(smctr_debug > 20) printk(KERN_DEBUG "smctr_get_tx_fcb\n"); /* check if there is enough FCB blocks */ if(tp->num_tx_fcbs_used[queue] >= tp->num_tx_fcbs[queue]) return ((FCBlock *)(-1L)); /* round off the input pkt size to the nearest even number */ alloc_size = (bytes_count + 1) & 0xfffe; /* check if enough mem */ if((tp->tx_buff_used[queue] + alloc_size) > tp->tx_buff_size[queue]) return ((FCBlock *)(-1L)); /* check if past the end ; * if exactly enough mem to end of ring, alloc from front. * this avoids update of curr when curr = end */ if(((unsigned long)(tp->tx_buff_curr[queue]) + alloc_size) >= (unsigned long)(tp->tx_buff_end[queue])) { /* check if enough memory from ring head */ alloc_size = alloc_size + (__u16)((__u32)tp->tx_buff_end[queue] - (__u32)tp->tx_buff_curr[queue]); if((tp->tx_buff_used[queue] + alloc_size) > tp->tx_buff_size[queue]) { return ((FCBlock *)(-1L)); } /* ring wrap */ tp->tx_buff_curr[queue] = tp->tx_buff_head[queue]; } tp->tx_buff_used[queue] += alloc_size; tp->num_tx_fcbs_used[queue]++; tp->tx_fcb_curr[queue]->frame_length = bytes_count; tp->tx_fcb_curr[queue]->memory_alloc = alloc_size; temp = tp->tx_buff_curr[queue]; tp->tx_buff_curr[queue] = (__u16 *)((__u32)temp + (__u32)((bytes_count + 1) & 0xfffe)); pbdb = tp->tx_fcb_curr[queue]->bdb_ptr; pbdb->buffer_length = bytes_count; pbdb->data_block_ptr = temp; pbdb->trc_data_block_ptr = TRC_POINTER(temp); pFCB = tp->tx_fcb_curr[queue]; tp->tx_fcb_curr[queue] = tp->tx_fcb_curr[queue]->next_ptr; return (pFCB); } static int smctr_get_upstream_neighbor_addr(struct net_device *dev) { smctr_issue_read_word_cmd(dev, RW_UPSTREAM_NEIGHBOR_ADDRESS); return(smctr_wait_cmd(dev)); } static int smctr_hardware_send_packet(struct net_device *dev, struct net_local *tp) { struct tr_statistics *tstat = &tp->MacStat; struct sk_buff *skb; FCBlock *fcb; if(smctr_debug > 10) printk(KERN_DEBUG"%s: smctr_hardware_send_packet\n", dev->name); if(tp->status != OPEN) return (-1); if(tp->monitor_state_ready != 1) return (-1); for(;;) { /* Send first buffer from queue */ skb = skb_dequeue(&tp->SendSkbQueue); if(skb == NULL) return (-1); tp->QueueSkb++; if(skb->len < SMC_HEADER_SIZE || skb->len > tp->max_packet_size) return (-1); smctr_enable_16bit(dev); smctr_set_page(dev, (__u8 *)tp->ram_access); if((fcb = smctr_get_tx_fcb(dev, NON_MAC_QUEUE, skb->len)) == (FCBlock *)(-1L)) { smctr_disable_16bit(dev); return (-1); } smctr_tx_move_frame(dev, skb, (__u8 *)fcb->bdb_ptr->data_block_ptr, skb->len); smctr_set_page(dev, (__u8 *)fcb); smctr_trc_send_packet(dev, fcb, NON_MAC_QUEUE); dev_kfree_skb(skb); tstat->tx_packets++; smctr_disable_16bit(dev); } return (0); } static int smctr_init_acbs(struct net_device *dev) { struct net_local *tp = netdev_priv(dev); unsigned int i; ACBlock *acb; if(smctr_debug > 10) printk(KERN_DEBUG "%s: smctr_init_acbs\n", dev->name); acb = tp->acb_head; acb->cmd_done_status = (ACB_COMMAND_DONE | ACB_COMMAND_SUCCESSFUL); acb->cmd_info = ACB_CHAIN_END; acb->cmd = 0; acb->subcmd = 0; acb->data_offset_lo = 0; acb->data_offset_hi = 0; acb->next_ptr = (ACBlock *)(((char *)acb) + sizeof(ACBlock)); acb->trc_next_ptr = TRC_POINTER(acb->next_ptr); for(i = 1; i < tp->num_acbs; i++) { acb = acb->next_ptr; acb->cmd_done_status = (ACB_COMMAND_DONE | ACB_COMMAND_SUCCESSFUL); acb->cmd_info = ACB_CHAIN_END; acb->cmd = 0; acb->subcmd = 0; acb->data_offset_lo = 0; acb->data_offset_hi = 0; acb->next_ptr = (ACBlock *)(((char *)acb) + sizeof(ACBlock)); acb->trc_next_ptr = TRC_POINTER(acb->next_ptr); } acb->next_ptr = tp->acb_head; acb->trc_next_ptr = TRC_POINTER(tp->acb_head); tp->acb_next = tp->acb_head->next_ptr; tp->acb_curr = tp->acb_head->next_ptr; tp->num_acbs_used = 0; return (0); } static int smctr_init_adapter(struct net_device *dev) { struct net_local *tp = netdev_priv(dev); int err; if(smctr_debug > 10) printk(KERN_DEBUG "%s: smctr_init_adapter\n", dev->name); tp->status = CLOSED; tp->page_offset_mask = (tp->ram_usable * 1024) - 1; skb_queue_head_init(&tp->SendSkbQueue); tp->QueueSkb = MAX_TX_QUEUE; if(!(tp->group_address_0 & 0x0080)) tp->group_address_0 |= 0x00C0; if(!(tp->functional_address_0 & 0x00C0)) tp->functional_address_0 |= 0x00C0; tp->functional_address[0] &= 0xFF7F; if(tp->authorized_function_classes == 0) tp->authorized_function_classes = 0x7FFF; if(tp->authorized_access_priority == 0) tp->authorized_access_priority = 0x06; smctr_disable_bic_int(dev); smctr_set_trc_reset(dev->base_addr); smctr_enable_16bit(dev); smctr_set_page(dev, (__u8 *)tp->ram_access); if(smctr_checksum_firmware(dev)) { printk(KERN_ERR "%s: Previously loaded firmware is missing\n",dev->name); return (-ENOENT); } if((err = smctr_ram_memory_test(dev))) { printk(KERN_ERR "%s: RAM memory test failed.\n", dev->name); return (-EIO); } smctr_set_rx_look_ahead(dev); smctr_load_node_addr(dev); /* Initialize adapter for Internal Self Test. */ smctr_reset_adapter(dev); if((err = smctr_init_card_real(dev))) { printk(KERN_ERR "%s: Initialization of card failed (%d)\n", dev->name, err); return (-EINVAL); } /* This routine clobbers the TRC's internal registers. */ if((err = smctr_internal_self_test(dev))) { printk(KERN_ERR "%s: Card failed internal self test (%d)\n", dev->name, err); return (-EINVAL); } /* Re-Initialize adapter's internal registers */ smctr_reset_adapter(dev); if((err = smctr_init_card_real(dev))) { printk(KERN_ERR "%s: Initialization of card failed (%d)\n", dev->name, err); return (-EINVAL); } smctr_enable_bic_int(dev); if((err = smctr_issue_enable_int_cmd(dev, TRC_INTERRUPT_ENABLE_MASK))) return (err); smctr_disable_16bit(dev); return (0); } static int smctr_init_card_real(struct net_device *dev) { struct net_local *tp = netdev_priv(dev); int err = 0; if(smctr_debug > 10) printk(KERN_DEBUG "%s: smctr_init_card_real\n", dev->name); tp->sh_mem_used = 0; tp->num_acbs = NUM_OF_ACBS; /* Range Check Max Packet Size */ if(tp->max_packet_size < 256) tp->max_packet_size = 256; else { if(tp->max_packet_size > NON_MAC_TX_BUFFER_MEMORY) tp->max_packet_size = NON_MAC_TX_BUFFER_MEMORY; } tp->num_of_tx_buffs = (NON_MAC_TX_BUFFER_MEMORY / tp->max_packet_size) - 1; if(tp->num_of_tx_buffs > NUM_NON_MAC_TX_FCBS) tp->num_of_tx_buffs = NUM_NON_MAC_TX_FCBS; else { if(tp->num_of_tx_buffs == 0) tp->num_of_tx_buffs = 1; } /* Tx queue constants */ tp->num_tx_fcbs [BUG_QUEUE] = NUM_BUG_TX_FCBS; tp->num_tx_bdbs [BUG_QUEUE] = NUM_BUG_TX_BDBS; tp->tx_buff_size [BUG_QUEUE] = BUG_TX_BUFFER_MEMORY; tp->tx_buff_used [BUG_QUEUE] = 0; tp->tx_queue_status [BUG_QUEUE] = NOT_TRANSMITING; tp->num_tx_fcbs [MAC_QUEUE] = NUM_MAC_TX_FCBS; tp->num_tx_bdbs [MAC_QUEUE] = NUM_MAC_TX_BDBS; tp->tx_buff_size [MAC_QUEUE] = MAC_TX_BUFFER_MEMORY; tp->tx_buff_used [MAC_QUEUE] = 0; tp->tx_queue_status [MAC_QUEUE] = NOT_TRANSMITING; tp->num_tx_fcbs [NON_MAC_QUEUE] = NUM_NON_MAC_TX_FCBS; tp->num_tx_bdbs [NON_MAC_QUEUE] = NUM_NON_MAC_TX_BDBS; tp->tx_buff_size [NON_MAC_QUEUE] = NON_MAC_TX_BUFFER_MEMORY; tp->tx_buff_used [NON_MAC_QUEUE] = 0; tp->tx_queue_status [NON_MAC_QUEUE] = NOT_TRANSMITING; /* Receive Queue Constants */ tp->num_rx_fcbs[MAC_QUEUE] = NUM_MAC_RX_FCBS; tp->num_rx_bdbs[MAC_QUEUE] = NUM_MAC_RX_BDBS; if(tp->extra_info & CHIP_REV_MASK) tp->num_rx_fcbs[NON_MAC_QUEUE] = 78; /* 825 Rev. XE */ else tp->num_rx_fcbs[NON_MAC_QUEUE] = 7; /* 825 Rev. XD */ tp->num_rx_bdbs[NON_MAC_QUEUE] = smctr_get_num_rx_bdbs(dev); smctr_alloc_shared_memory(dev); smctr_init_shared_memory(dev); if((err = smctr_issue_init_timers_cmd(dev))) return (err); if((err = smctr_issue_init_txrx_cmd(dev))) { printk(KERN_ERR "%s: Hardware failure\n", dev->name); return (err); } return (0); } static int smctr_init_rx_bdbs(struct net_device *dev) { struct net_local *tp = netdev_priv(dev); unsigned int i, j; BDBlock *bdb; __u16 *buf; if(smctr_debug > 10) printk(KERN_DEBUG "%s: smctr_init_rx_bdbs\n", dev->name); for(i = 0; i < NUM_RX_QS_USED; i++) { bdb = tp->rx_bdb_head[i]; buf = tp->rx_buff_head[i]; bdb->info = (BDB_CHAIN_END | BDB_NO_WARNING); bdb->buffer_length = RX_DATA_BUFFER_SIZE; bdb->next_ptr = (BDBlock *)(((char *)bdb) + sizeof(BDBlock)); bdb->data_block_ptr = buf; bdb->trc_next_ptr = TRC_POINTER(bdb->next_ptr); if(i == NON_MAC_QUEUE) bdb->trc_data_block_ptr = RX_BUFF_TRC_POINTER(buf); else bdb->trc_data_block_ptr = TRC_POINTER(buf); for(j = 1; j < tp->num_rx_bdbs[i]; j++) { bdb->next_ptr->back_ptr = bdb; bdb = bdb->next_ptr; buf = (__u16 *)((char *)buf + RX_DATA_BUFFER_SIZE); bdb->info = (BDB_NOT_CHAIN_END | BDB_NO_WARNING); bdb->buffer_length = RX_DATA_BUFFER_SIZE; bdb->next_ptr = (BDBlock *)(((char *)bdb) + sizeof(BDBlock)); bdb->data_block_ptr = buf; bdb->trc_next_ptr = TRC_POINTER(bdb->next_ptr); if(i == NON_MAC_QUEUE) bdb->trc_data_block_ptr = RX_BUFF_TRC_POINTER(buf); else bdb->trc_data_block_ptr = TRC_POINTER(buf); } bdb->next_ptr = tp->rx_bdb_head[i]; bdb->trc_next_ptr = TRC_POINTER(tp->rx_bdb_head[i]); tp->rx_bdb_head[i]->back_ptr = bdb; tp->rx_bdb_curr[i] = tp->rx_bdb_head[i]->next_ptr; } return (0); } static int smctr_init_rx_fcbs(struct net_device *dev) { struct net_local *tp = netdev_priv(dev); unsigned int i, j; FCBlock *fcb; for(i = 0; i < NUM_RX_QS_USED; i++) { fcb = tp->rx_fcb_head[i]; fcb->frame_status = 0; fcb->frame_length = 0; fcb->info = FCB_CHAIN_END; fcb->next_ptr = (FCBlock *)(((char*)fcb) + sizeof(FCBlock)); if(i == NON_MAC_QUEUE) fcb->trc_next_ptr = RX_FCB_TRC_POINTER(fcb->next_ptr); else fcb->trc_next_ptr = TRC_POINTER(fcb->next_ptr); for(j = 1; j < tp->num_rx_fcbs[i]; j++) { fcb->next_ptr->back_ptr = fcb; fcb = fcb->next_ptr; fcb->frame_status = 0; fcb->frame_length = 0; fcb->info = FCB_WARNING; fcb->next_ptr = (FCBlock *)(((char *)fcb) + sizeof(FCBlock)); if(i == NON_MAC_QUEUE) fcb->trc_next_ptr = RX_FCB_TRC_POINTER(fcb->next_ptr); else fcb->trc_next_ptr = TRC_POINTER(fcb->next_ptr); } fcb->next_ptr = tp->rx_fcb_head[i]; if(i == NON_MAC_QUEUE) fcb->trc_next_ptr = RX_FCB_TRC_POINTER(fcb->next_ptr); else fcb->trc_next_ptr = TRC_POINTER(fcb->next_ptr); tp->rx_fcb_head[i]->back_ptr = fcb; tp->rx_fcb_curr[i] = tp->rx_fcb_head[i]->next_ptr; } return(0); } static int smctr_init_shared_memory(struct net_device *dev) { struct net_local *tp = netdev_priv(dev); unsigned int i; __u32 *iscpb; if(smctr_debug > 10) printk(KERN_DEBUG "%s: smctr_init_shared_memory\n", dev->name); smctr_set_page(dev, (__u8 *)(unsigned int)tp->iscpb_ptr); /* Initialize Initial System Configuration Point. (ISCP) */ iscpb = (__u32 *)PAGE_POINTER(&tp->iscpb_ptr->trc_scgb_ptr); *iscpb = (__u32)(SWAP_WORDS(TRC_POINTER(tp->scgb_ptr))); smctr_set_page(dev, (__u8 *)tp->ram_access); /* Initialize System Configuration Pointers. (SCP) */ tp->scgb_ptr->config = (SCGB_ADDRESS_POINTER_FORMAT | SCGB_MULTI_WORD_CONTROL | SCGB_DATA_FORMAT | SCGB_BURST_LENGTH); tp->scgb_ptr->trc_sclb_ptr = TRC_POINTER(tp->sclb_ptr); tp->scgb_ptr->trc_acb_ptr = TRC_POINTER(tp->acb_head); tp->scgb_ptr->trc_isb_ptr = TRC_POINTER(tp->isb_ptr); tp->scgb_ptr->isbsiz = (sizeof(ISBlock)) - 2; /* Initialize System Control Block. (SCB) */ tp->sclb_ptr->valid_command = SCLB_VALID | SCLB_CMD_NOP; tp->sclb_ptr->iack_code = 0; tp->sclb_ptr->resume_control = 0; tp->sclb_ptr->int_mask_control = 0; tp->sclb_ptr->int_mask_state = 0; /* Initialize Interrupt Status Block. (ISB) */ for(i = 0; i < NUM_OF_INTERRUPTS; i++) { tp->isb_ptr->IStatus[i].IType = 0xf0; tp->isb_ptr->IStatus[i].ISubtype = 0; } tp->current_isb_index = 0; /* Initialize Action Command Block. (ACB) */ smctr_init_acbs(dev); /* Initialize transmit FCB's and BDB's. */ smctr_link_tx_fcbs_to_bdbs(dev); smctr_init_tx_bdbs(dev); smctr_init_tx_fcbs(dev); /* Initialize receive FCB's and BDB's. */ smctr_init_rx_bdbs(dev); smctr_init_rx_fcbs(dev); return (0); } static int smctr_init_tx_bdbs(struct net_device *dev) { struct net_local *tp = netdev_priv(dev); unsigned int i, j; BDBlock *bdb; for(i = 0; i < NUM_TX_QS_USED; i++) { bdb = tp->tx_bdb_head[i]; bdb->info = (BDB_NOT_CHAIN_END | BDB_NO_WARNING); bdb->next_ptr = (BDBlock *)(((char *)bdb) + sizeof(BDBlock)); bdb->trc_next_ptr = TRC_POINTER(bdb->next_ptr); for(j = 1; j < tp->num_tx_bdbs[i]; j++) { bdb->next_ptr->back_ptr = bdb; bdb = bdb->next_ptr; bdb->info = (BDB_NOT_CHAIN_END | BDB_NO_WARNING); bdb->next_ptr = (BDBlock *)(((char *)bdb) + sizeof( BDBlock)); bdb->trc_next_ptr = TRC_POINTER(bdb->next_ptr); } bdb->next_ptr = tp->tx_bdb_head[i]; bdb->trc_next_ptr = TRC_POINTER(tp->tx_bdb_head[i]); tp->tx_bdb_head[i]->back_ptr = bdb; } return (0); } static int smctr_init_tx_fcbs(struct net_device *dev) { struct net_local *tp = netdev_priv(dev); unsigned int i, j; FCBlock *fcb; for(i = 0; i < NUM_TX_QS_USED; i++) { fcb = tp->tx_fcb_head[i]; fcb->frame_status = 0; fcb->frame_length = 0; fcb->info = FCB_CHAIN_END; fcb->next_ptr = (FCBlock *)(((char *)fcb) + sizeof(FCBlock)); fcb->trc_next_ptr = TRC_POINTER(fcb->next_ptr); for(j = 1; j < tp->num_tx_fcbs[i]; j++) { fcb->next_ptr->back_ptr = fcb; fcb = fcb->next_ptr; fcb->frame_status = 0; fcb->frame_length = 0; fcb->info = FCB_CHAIN_END; fcb->next_ptr = (FCBlock *)(((char *)fcb) + sizeof(FCBlock)); fcb->trc_next_ptr = TRC_POINTER(fcb->next_ptr); } fcb->next_ptr = tp->tx_fcb_head[i]; fcb->trc_next_ptr = TRC_POINTER(tp->tx_fcb_head[i]); tp->tx_fcb_head[i]->back_ptr = fcb; tp->tx_fcb_end[i] = tp->tx_fcb_head[i]->next_ptr; tp->tx_fcb_curr[i] = tp->tx_fcb_head[i]->next_ptr; tp->num_tx_fcbs_used[i] = 0; } return (0); } static int smctr_internal_self_test(struct net_device *dev) { struct net_local *tp = netdev_priv(dev); int err; if((err = smctr_issue_test_internal_rom_cmd(dev))) return (err); if((err = smctr_wait_cmd(dev))) return (err); if(tp->acb_head->cmd_done_status & 0xff) return (-1); if((err = smctr_issue_test_hic_cmd(dev))) return (err); if((err = smctr_wait_cmd(dev))) return (err); if(tp->acb_head->cmd_done_status & 0xff) return (-1); if((err = smctr_issue_test_mac_reg_cmd(dev))) return (err); if((err = smctr_wait_cmd(dev))) return (err); if(tp->acb_head->cmd_done_status & 0xff) return (-1); return (0); } /* * The typical workload of the driver: Handle the network interface interrupts. */ static irqreturn_t smctr_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; struct net_local *tp; int ioaddr; __u16 interrupt_unmask_bits = 0, interrupt_ack_code = 0xff00; __u16 err1, err = NOT_MY_INTERRUPT; __u8 isb_type, isb_subtype; __u16 isb_index; ioaddr = dev->base_addr; tp = netdev_priv(dev); if(tp->status == NOT_INITIALIZED) return IRQ_NONE; spin_lock(&tp->lock); smctr_disable_bic_int(dev); smctr_enable_16bit(dev); smctr_clear_int(dev); /* First read the LSB */ while((tp->isb_ptr->IStatus[tp->current_isb_index].IType & 0xf0) == 0) { isb_index = tp->current_isb_index; isb_type = tp->isb_ptr->IStatus[isb_index].IType; isb_subtype = tp->isb_ptr->IStatus[isb_index].ISubtype; (tp->current_isb_index)++; if(tp->current_isb_index == NUM_OF_INTERRUPTS) tp->current_isb_index = 0; if(isb_type >= 0x10) { smctr_disable_16bit(dev); spin_unlock(&tp->lock); return IRQ_HANDLED; } err = HARDWARE_FAILED; interrupt_ack_code = isb_index; tp->isb_ptr->IStatus[isb_index].IType |= 0xf0; interrupt_unmask_bits |= (1 << (__u16)isb_type); switch(isb_type) { case ISB_IMC_MAC_TYPE_3: smctr_disable_16bit(dev); switch(isb_subtype) { case 0: tp->monitor_state = MS_MONITOR_FSM_INACTIVE; break; case 1: tp->monitor_state = MS_REPEAT_BEACON_STATE; break; case 2: tp->monitor_state = MS_REPEAT_CLAIM_TOKEN_STATE; break; case 3: tp->monitor_state = MS_TRANSMIT_CLAIM_TOKEN_STATE; break; case 4: tp->monitor_state = MS_STANDBY_MONITOR_STATE; break; case 5: tp->monitor_state = MS_TRANSMIT_BEACON_STATE; break; case 6: tp->monitor_state = MS_ACTIVE_MONITOR_STATE; break; case 7: tp->monitor_state = MS_TRANSMIT_RING_PURGE_STATE; break; case 8: /* diagnostic state */ break; case 9: tp->monitor_state = MS_BEACON_TEST_STATE; if(smctr_lobe_media_test(dev)) { tp->ring_status_flags = RING_STATUS_CHANGED; tp->ring_status = AUTO_REMOVAL_ERROR; smctr_ring_status_chg(dev); smctr_bypass_state(dev); } else smctr_issue_insert_cmd(dev); break; /* case 0x0a-0xff, illegal states */ default: break; } tp->ring_status_flags = MONITOR_STATE_CHANGED; err = smctr_ring_status_chg(dev); smctr_enable_16bit(dev); break; /* Type 0x02 - MAC Error Counters Interrupt * One or more MAC Error Counter is half full * MAC Error Counters * Lost_FR_Error_Counter * RCV_Congestion_Counter * FR_copied_Error_Counter * FREQ_Error_Counter * Token_Error_Counter * Line_Error_Counter * Internal_Error_Count */ case ISB_IMC_MAC_ERROR_COUNTERS: /* Read 802.5 Error Counters */ err = smctr_issue_read_ring_status_cmd(dev); break; /* Type 0x04 - MAC Type 2 Interrupt * HOST needs to enqueue MAC Frame for transmission * SubType Bit 15 - RQ_INIT_PDU( Request Initialization) * Changed from RQ_INIT_PDU to * TRC_Status_Changed_Indicate */ case ISB_IMC_MAC_TYPE_2: err = smctr_issue_read_ring_status_cmd(dev); break; /* Type 0x05 - TX Frame Interrupt (FI). */ case ISB_IMC_TX_FRAME: /* BUG QUEUE for TRC stuck receive BUG */ if(isb_subtype & TX_PENDING_PRIORITY_2) { if((err = smctr_tx_complete(dev, BUG_QUEUE)) != SUCCESS) break; } /* NON-MAC frames only */ if(isb_subtype & TX_PENDING_PRIORITY_1) { if((err = smctr_tx_complete(dev, NON_MAC_QUEUE)) != SUCCESS) break; } /* MAC frames only */ if(isb_subtype & TX_PENDING_PRIORITY_0) err = smctr_tx_complete(dev, MAC_QUEUE); break; /* Type 0x06 - TX END OF QUEUE (FE) */ case ISB_IMC_END_OF_TX_QUEUE: /* BUG queue */ if(isb_subtype & TX_PENDING_PRIORITY_2) { /* ok to clear Receive FIFO overrun * imask send_BUG now completes. */ interrupt_unmask_bits |= 0x800; tp->tx_queue_status[BUG_QUEUE] = NOT_TRANSMITING; if((err = smctr_tx_complete(dev, BUG_QUEUE)) != SUCCESS) break; if((err = smctr_restart_tx_chain(dev, BUG_QUEUE)) != SUCCESS) break; } /* NON-MAC queue only */ if(isb_subtype & TX_PENDING_PRIORITY_1) { tp->tx_queue_status[NON_MAC_QUEUE] = NOT_TRANSMITING; if((err = smctr_tx_complete(dev, NON_MAC_QUEUE)) != SUCCESS) break; if((err = smctr_restart_tx_chain(dev, NON_MAC_QUEUE)) != SUCCESS) break; } /* MAC queue only */ if(isb_subtype & TX_PENDING_PRIORITY_0) { tp->tx_queue_status[MAC_QUEUE] = NOT_TRANSMITING; if((err = smctr_tx_complete(dev, MAC_QUEUE)) != SUCCESS) break; err = smctr_restart_tx_chain(dev, MAC_QUEUE); } break; /* Type 0x07 - NON-MAC RX Resource Interrupt * Subtype bit 12 - (BW) BDB warning * Subtype bit 13 - (FW) FCB warning * Subtype bit 14 - (BE) BDB End of chain * Subtype bit 15 - (FE) FCB End of chain */ case ISB_IMC_NON_MAC_RX_RESOURCE: tp->rx_fifo_overrun_count = 0; tp->receive_queue_number = NON_MAC_QUEUE; err1 = smctr_rx_frame(dev); if(isb_subtype & NON_MAC_RX_RESOURCE_FE) { if((err = smctr_issue_resume_rx_fcb_cmd( dev, NON_MAC_QUEUE)) != SUCCESS) break; if(tp->ptr_rx_fcb_overruns) (*tp->ptr_rx_fcb_overruns)++; } if(isb_subtype & NON_MAC_RX_RESOURCE_BE) { if((err = smctr_issue_resume_rx_bdb_cmd( dev, NON_MAC_QUEUE)) != SUCCESS) break; if(tp->ptr_rx_bdb_overruns) (*tp->ptr_rx_bdb_overruns)++; } err = err1; break; /* Type 0x08 - MAC RX Resource Interrupt * Subtype bit 12 - (BW) BDB warning * Subtype bit 13 - (FW) FCB warning * Subtype bit 14 - (BE) BDB End of chain * Subtype bit 15 - (FE) FCB End of chain */ case ISB_IMC_MAC_RX_RESOURCE: tp->receive_queue_number = MAC_QUEUE; err1 = smctr_rx_frame(dev); if(isb_subtype & MAC_RX_RESOURCE_FE) { if((err = smctr_issue_resume_rx_fcb_cmd( dev, MAC_QUEUE)) != SUCCESS) break; if(tp->ptr_rx_fcb_overruns) (*tp->ptr_rx_fcb_overruns)++; } if(isb_subtype & MAC_RX_RESOURCE_BE) { if((err = smctr_issue_resume_rx_bdb_cmd( dev, MAC_QUEUE)) != SUCCESS) break; if(tp->ptr_rx_bdb_overruns) (*tp->ptr_rx_bdb_overruns)++; } err = err1; break; /* Type 0x09 - NON_MAC RX Frame Interrupt */ case ISB_IMC_NON_MAC_RX_FRAME: tp->rx_fifo_overrun_count = 0; tp->receive_queue_number = NON_MAC_QUEUE; err = smctr_rx_frame(dev); break; /* Type 0x0A - MAC RX Frame Interrupt */ case ISB_IMC_MAC_RX_FRAME: tp->receive_queue_number = MAC_QUEUE; err = smctr_rx_frame(dev); break; /* Type 0x0B - TRC status * TRC has encountered an error condition * subtype bit 14 - transmit FIFO underrun * subtype bit 15 - receive FIFO overrun */ case ISB_IMC_TRC_FIFO_STATUS: if(isb_subtype & TRC_FIFO_STATUS_TX_UNDERRUN) { if(tp->ptr_tx_fifo_underruns) (*tp->ptr_tx_fifo_underruns)++; } if(isb_subtype & TRC_FIFO_STATUS_RX_OVERRUN) { /* update overrun stuck receive counter * if >= 3, has to clear it by sending * back to back frames. We pick * DAT(duplicate address MAC frame) */ tp->rx_fifo_overrun_count++; if(tp->rx_fifo_overrun_count >= 3) { tp->rx_fifo_overrun_count = 0; /* delay clearing fifo overrun * imask till send_BUG tx * complete posted */ interrupt_unmask_bits &= (~0x800); printk(KERN_CRIT "Jay please send bug\n");// smctr_send_bug(dev); } if(tp->ptr_rx_fifo_overruns) (*tp->ptr_rx_fifo_overruns)++; } err = SUCCESS; break; /* Type 0x0C - Action Command Status Interrupt * Subtype bit 14 - CB end of command chain (CE) * Subtype bit 15 - CB command interrupt (CI) */ case ISB_IMC_COMMAND_STATUS: err = SUCCESS; if(tp->acb_head->cmd == ACB_CMD_HIC_NOP) { printk(KERN_ERR "i1\n"); smctr_disable_16bit(dev); /* XXXXXXXXXXXXXXXXX */ /* err = UM_Interrupt(dev); */ smctr_enable_16bit(dev); } else { if((tp->acb_head->cmd == ACB_CMD_READ_TRC_STATUS) && (tp->acb_head->subcmd == RW_TRC_STATUS_BLOCK)) { if(tp->ptr_bcn_type) { *(tp->ptr_bcn_type) = (__u32)((SBlock *)tp->misc_command_data)->BCN_Type; } if(((SBlock *)tp->misc_command_data)->Status_CHG_Indicate & ERROR_COUNTERS_CHANGED) { smctr_update_err_stats(dev); } if(((SBlock *)tp->misc_command_data)->Status_CHG_Indicate & TI_NDIS_RING_STATUS_CHANGED) { tp->ring_status = ((SBlock*)tp->misc_command_data)->TI_NDIS_Ring_Status; smctr_disable_16bit(dev); err = smctr_ring_status_chg(dev); smctr_enable_16bit(dev); if((tp->ring_status & REMOVE_RECEIVED) && (tp->config_word0 & NO_AUTOREMOVE)) { smctr_issue_remove_cmd(dev); } if(err != SUCCESS) { tp->acb_pending = 0; break; } } if(((SBlock *)tp->misc_command_data)->Status_CHG_Indicate & UNA_CHANGED) { if(tp->ptr_una) { tp->ptr_una[0] = SWAP_BYTES(((SBlock *)tp->misc_command_data)->UNA[0]); tp->ptr_una[1] = SWAP_BYTES(((SBlock *)tp->misc_command_data)->UNA[1]); tp->ptr_una[2] = SWAP_BYTES(((SBlock *)tp->misc_command_data)->UNA[2]); } } if(((SBlock *)tp->misc_command_data)->Status_CHG_Indicate & READY_TO_SEND_RQ_INIT) { err = smctr_send_rq_init(dev); } } } tp->acb_pending = 0; break; /* Type 0x0D - MAC Type 1 interrupt * Subtype -- 00 FR_BCN received at S12 * 01 FR_BCN received at S21 * 02 FR_DAT(DA=MA, A<>0) received at S21 * 03 TSM_EXP at S21 * 04 FR_REMOVE received at S42 * 05 TBR_EXP, BR_FLAG_SET at S42 * 06 TBT_EXP at S53 */ case ISB_IMC_MAC_TYPE_1: if(isb_subtype > 8) { err = HARDWARE_FAILED; break; } err = SUCCESS; switch(isb_subtype) { case 0: tp->join_state = JS_BYPASS_STATE; if(tp->status != CLOSED) { tp->status = CLOSED; err = smctr_status_chg(dev); } break; case 1: tp->join_state = JS_LOBE_TEST_STATE; break; case 2: tp->join_state = JS_DETECT_MONITOR_PRESENT_STATE; break; case 3: tp->join_state = JS_AWAIT_NEW_MONITOR_STATE; break; case 4: tp->join_state = JS_DUPLICATE_ADDRESS_TEST_STATE; break; case 5: tp->join_state = JS_NEIGHBOR_NOTIFICATION_STATE; break; case 6: tp->join_state = JS_REQUEST_INITIALIZATION_STATE; break; case 7: tp->join_state = JS_JOIN_COMPLETE_STATE; tp->status = OPEN; err = smctr_status_chg(dev); break; case 8: tp->join_state = JS_BYPASS_WAIT_STATE; break; } break ; /* Type 0x0E - TRC Initialization Sequence Interrupt * Subtype -- 00-FF Initializatin sequence complete */ case ISB_IMC_TRC_INTRNL_TST_STATUS: tp->status = INITIALIZED; smctr_disable_16bit(dev); err = smctr_status_chg(dev); smctr_enable_16bit(dev); break; /* other interrupt types, illegal */ default: break; } if(err != SUCCESS) break; } /* Checking the ack code instead of the unmask bits here is because : * while fixing the stuck receive, DAT frame are sent and mask off * FIFO overrun interrupt temporarily (interrupt_unmask_bits = 0) * but we still want to issue ack to ISB */ if(!(interrupt_ack_code & 0xff00)) smctr_issue_int_ack(dev, interrupt_ack_code, interrupt_unmask_bits); smctr_disable_16bit(dev); smctr_enable_bic_int(dev); spin_unlock(&tp->lock); return IRQ_HANDLED; } static int smctr_issue_enable_int_cmd(struct net_device *dev, __u16 interrupt_enable_mask) { struct net_local *tp = netdev_priv(dev); int err; if((err = smctr_wait_while_cbusy(dev))) return (err); tp->sclb_ptr->int_mask_control = interrupt_enable_mask; tp->sclb_ptr->valid_command = SCLB_VALID | SCLB_CMD_CLEAR_INTERRUPT_MASK; smctr_set_ctrl_attention(dev); return (0); } static int smctr_issue_int_ack(struct net_device *dev, __u16 iack_code, __u16 ibits) { struct net_local *tp = netdev_priv(dev); if(smctr_wait_while_cbusy(dev)) return (-1); tp->sclb_ptr->int_mask_control = ibits; tp->sclb_ptr->iack_code = iack_code << 1; /* use the offset from base */ tp->sclb_ptr->resume_control = 0; tp->sclb_ptr->valid_command = SCLB_VALID | SCLB_IACK_CODE_VALID | SCLB_CMD_CLEAR_INTERRUPT_MASK; smctr_set_ctrl_attention(dev); return (0); } static int smctr_issue_init_timers_cmd(struct net_device *dev) { struct net_local *tp = netdev_priv(dev); unsigned int i; int err; __u16 *pTimer_Struc = (__u16 *)tp->misc_command_data; if((err = smctr_wait_while_cbusy(dev))) return (err); if((err = smctr_wait_cmd(dev))) return (err); tp->config_word0 = THDREN | DMA_TRIGGER | USETPT | NO_AUTOREMOVE; tp->config_word1 = 0; if((tp->media_type == MEDIA_STP_16) || (tp->media_type == MEDIA_UTP_16) || (tp->media_type == MEDIA_STP_16_UTP_16)) { tp->config_word0 |= FREQ_16MB_BIT; } if(tp->mode_bits & EARLY_TOKEN_REL) tp->config_word0 |= ETREN; if(tp->mode_bits & LOOPING_MODE_MASK) tp->config_word0 |= RX_OWN_BIT; else tp->config_word0 &= ~RX_OWN_BIT; if(tp->receive_mask & PROMISCUOUS_MODE) tp->config_word0 |= PROMISCUOUS_BIT; else tp->config_word0 &= ~PROMISCUOUS_BIT; if(tp->receive_mask & ACCEPT_ERR_PACKETS) tp->config_word0 |= SAVBAD_BIT; else tp->config_word0 &= ~SAVBAD_BIT; if(tp->receive_mask & ACCEPT_ATT_MAC_FRAMES) tp->config_word0 |= RXATMAC; else tp->config_word0 &= ~RXATMAC; if(tp->receive_mask & ACCEPT_MULTI_PROM) tp->config_word1 |= MULTICAST_ADDRESS_BIT; else tp->config_word1 &= ~MULTICAST_ADDRESS_BIT; if(tp->receive_mask & ACCEPT_SOURCE_ROUTING_SPANNING) tp->config_word1 |= SOURCE_ROUTING_SPANNING_BITS; else { if(tp->receive_mask & ACCEPT_SOURCE_ROUTING) tp->config_word1 |= SOURCE_ROUTING_EXPLORER_BIT; else tp->config_word1 &= ~SOURCE_ROUTING_SPANNING_BITS; } if((tp->media_type == MEDIA_STP_16) || (tp->media_type == MEDIA_UTP_16) || (tp->media_type == MEDIA_STP_16_UTP_16)) { tp->config_word1 |= INTERFRAME_SPACING_16; } else tp->config_word1 |= INTERFRAME_SPACING_4; *pTimer_Struc++ = tp->config_word0; *pTimer_Struc++ = tp->config_word1; if((tp->media_type == MEDIA_STP_4) || (tp->media_type == MEDIA_UTP_4) || (tp->media_type == MEDIA_STP_4_UTP_4)) { *pTimer_Struc++ = 0x00FA; /* prescale */ *pTimer_Struc++ = 0x2710; /* TPT_limit */ *pTimer_Struc++ = 0x2710; /* TQP_limit */ *pTimer_Struc++ = 0x0A28; /* TNT_limit */ *pTimer_Struc++ = 0x3E80; /* TBT_limit */ *pTimer_Struc++ = 0x3A98; /* TSM_limit */ *pTimer_Struc++ = 0x1B58; /* TAM_limit */ *pTimer_Struc++ = 0x00C8; /* TBR_limit */ *pTimer_Struc++ = 0x07D0; /* TER_limit */ *pTimer_Struc++ = 0x000A; /* TGT_limit */ *pTimer_Struc++ = 0x1162; /* THT_limit */ *pTimer_Struc++ = 0x07D0; /* TRR_limit */ *pTimer_Struc++ = 0x1388; /* TVX_limit */ *pTimer_Struc++ = 0x0000; /* reserved */ } else { *pTimer_Struc++ = 0x03E8; /* prescale */ *pTimer_Struc++ = 0x9C40; /* TPT_limit */ *pTimer_Struc++ = 0x9C40; /* TQP_limit */ *pTimer_Struc++ = 0x0A28; /* TNT_limit */ *pTimer_Struc++ = 0x3E80; /* TBT_limit */ *pTimer_Struc++ = 0x3A98; /* TSM_limit */ *pTimer_Struc++ = 0x1B58; /* TAM_limit */ *pTimer_Struc++ = 0x00C8; /* TBR_limit */ *pTimer_Struc++ = 0x07D0; /* TER_limit */ *pTimer_Struc++ = 0x000A; /* TGT_limit */ *pTimer_Struc++ = 0x4588; /* THT_limit */ *pTimer_Struc++ = 0x1F40; /* TRR_limit */ *pTimer_Struc++ = 0x4E20; /* TVX_limit */ *pTimer_Struc++ = 0x0000; /* reserved */ } /* Set node address. */ *pTimer_Struc++ = dev->dev_addr[0] << 8 | (dev->dev_addr[1] & 0xFF); *pTimer_Struc++ = dev->dev_addr[2] << 8 | (dev->dev_addr[3] & 0xFF); *pTimer_Struc++ = dev->dev_addr[4] << 8 | (dev->dev_addr[5] & 0xFF); /* Set group address. */ *pTimer_Struc++ = tp->group_address_0 << 8 | tp->group_address_0 >> 8; *pTimer_Struc++ = tp->group_address[0] << 8 | tp->group_address[0] >> 8; *pTimer_Struc++ = tp->group_address[1] << 8 | tp->group_address[1] >> 8; /* Set functional address. */ *pTimer_Struc++ = tp->functional_address_0 << 8 | tp->functional_address_0 >> 8; *pTimer_Struc++ = tp->functional_address[0] << 8 | tp->functional_address[0] >> 8; *pTimer_Struc++ = tp->functional_address[1] << 8 | tp->functional_address[1] >> 8; /* Set Bit-Wise group address. */ *pTimer_Struc++ = tp->bitwise_group_address[0] << 8 | tp->bitwise_group_address[0] >> 8; *pTimer_Struc++ = tp->bitwise_group_address[1] << 8 | tp->bitwise_group_address[1] >> 8; /* Set ring number address. */ *pTimer_Struc++ = tp->source_ring_number; *pTimer_Struc++ = tp->target_ring_number; /* Physical drop number. */ *pTimer_Struc++ = (unsigned short)0; *pTimer_Struc++ = (unsigned short)0; /* Product instance ID. */ for(i = 0; i < 9; i++) *pTimer_Struc++ = (unsigned short)0; err = smctr_setup_single_cmd_w_data(dev, ACB_CMD_INIT_TRC_TIMERS, 0); return (err); } static int smctr_issue_init_txrx_cmd(struct net_device *dev) { struct net_local *tp = netdev_priv(dev); unsigned int i; int err; void **txrx_ptrs = (void *)tp->misc_command_data; if((err = smctr_wait_while_cbusy(dev))) return (err); if((err = smctr_wait_cmd(dev))) { printk(KERN_ERR "%s: Hardware failure\n", dev->name); return (err); } /* Initialize Transmit Queue Pointers that are used, to point to * a single FCB. */ for(i = 0; i < NUM_TX_QS_USED; i++) *txrx_ptrs++ = (void *)TRC_POINTER(tp->tx_fcb_head[i]); /* Initialize Transmit Queue Pointers that are NOT used to ZERO. */ for(; i < MAX_TX_QS; i++) *txrx_ptrs++ = (void *)0; /* Initialize Receive Queue Pointers (MAC and Non-MAC) that are * used, to point to a single FCB and a BDB chain of buffers. */ for(i = 0; i < NUM_RX_QS_USED; i++) { *txrx_ptrs++ = (void *)TRC_POINTER(tp->rx_fcb_head[i]); *txrx_ptrs++ = (void *)TRC_POINTER(tp->rx_bdb_head[i]); } /* Initialize Receive Queue Pointers that are NOT used to ZERO. */ for(; i < MAX_RX_QS; i++) { *txrx_ptrs++ = (void *)0; *txrx_ptrs++ = (void *)0; } err = smctr_setup_single_cmd_w_data(dev, ACB_CMD_INIT_TX_RX, 0); return (err); } static int smctr_issue_insert_cmd(struct net_device *dev) { int err; err = smctr_setup_single_cmd(dev, ACB_CMD_INSERT, ACB_SUB_CMD_NOP); return (err); } static int smctr_issue_read_ring_status_cmd(struct net_device *dev) { int err; if((err = smctr_wait_while_cbusy(dev))) return (err); if((err = smctr_wait_cmd(dev))) return (err); err = smctr_setup_single_cmd_w_data(dev, ACB_CMD_READ_TRC_STATUS, RW_TRC_STATUS_BLOCK); return (err); } static int smctr_issue_read_word_cmd(struct net_device *dev, __u16 aword_cnt) { int err; if((err = smctr_wait_while_cbusy(dev))) return (err); if((err = smctr_wait_cmd(dev))) return (err); err = smctr_setup_single_cmd_w_data(dev, ACB_CMD_MCT_READ_VALUE, aword_cnt); return (err); } static int smctr_issue_remove_cmd(struct net_device *dev) { struct net_local *tp = netdev_priv(dev); int err; if((err = smctr_wait_while_cbusy(dev))) return (err); tp->sclb_ptr->resume_control = 0; tp->sclb_ptr->valid_command = SCLB_VALID | SCLB_CMD_REMOVE; smctr_set_ctrl_attention(dev); return (0); } static int smctr_issue_resume_acb_cmd(struct net_device *dev) { struct net_local *tp = netdev_priv(dev); int err; if((err = smctr_wait_while_cbusy(dev))) return (err); tp->sclb_ptr->resume_control = SCLB_RC_ACB; tp->sclb_ptr->valid_command = SCLB_VALID | SCLB_RESUME_CONTROL_VALID; tp->acb_pending = 1; smctr_set_ctrl_attention(dev); return (0); } static int smctr_issue_resume_rx_bdb_cmd(struct net_device *dev, __u16 queue) { struct net_local *tp = netdev_priv(dev); int err; if((err = smctr_wait_while_cbusy(dev))) return (err); if(queue == MAC_QUEUE) tp->sclb_ptr->resume_control = SCLB_RC_RX_MAC_BDB; else tp->sclb_ptr->resume_control = SCLB_RC_RX_NON_MAC_BDB; tp->sclb_ptr->valid_command = SCLB_VALID | SCLB_RESUME_CONTROL_VALID; smctr_set_ctrl_attention(dev); return (0); } static int smctr_issue_resume_rx_fcb_cmd(struct net_device *dev, __u16 queue) { struct net_local *tp = netdev_priv(dev); if(smctr_debug > 10) printk(KERN_DEBUG "%s: smctr_issue_resume_rx_fcb_cmd\n", dev->name); if(smctr_wait_while_cbusy(dev)) return (-1); if(queue == MAC_QUEUE) tp->sclb_ptr->resume_control = SCLB_RC_RX_MAC_FCB; else tp->sclb_ptr->resume_control = SCLB_RC_RX_NON_MAC_FCB; tp->sclb_ptr->valid_command = SCLB_VALID | SCLB_RESUME_CONTROL_VALID; smctr_set_ctrl_attention(dev); return (0); } static int smctr_issue_resume_tx_fcb_cmd(struct net_device *dev, __u16 queue) { struct net_local *tp = netdev_priv(dev); if(smctr_debug > 10) printk(KERN_DEBUG "%s: smctr_issue_resume_tx_fcb_cmd\n", dev->name); if(smctr_wait_while_cbusy(dev)) return (-1); tp->sclb_ptr->resume_control = (SCLB_RC_TFCB0 << queue); tp->sclb_ptr->valid_command = SCLB_RESUME_CONTROL_VALID | SCLB_VALID; smctr_set_ctrl_attention(dev); return (0); } static int smctr_issue_test_internal_rom_cmd(struct net_device *dev) { int err; err = smctr_setup_single_cmd(dev, ACB_CMD_MCT_TEST, TRC_INTERNAL_ROM_TEST); return (err); } static int smctr_issue_test_hic_cmd(struct net_device *dev) { int err; err = smctr_setup_single_cmd(dev, ACB_CMD_HIC_TEST, TRC_HOST_INTERFACE_REG_TEST); return (err); } static int smctr_issue_test_mac_reg_cmd(struct net_device *dev) { int err; err = smctr_setup_single_cmd(dev, ACB_CMD_MCT_TEST, TRC_MAC_REGISTERS_TEST); return (err); } static int smctr_issue_trc_loopback_cmd(struct net_device *dev) { int err; err = smctr_setup_single_cmd(dev, ACB_CMD_MCT_TEST, TRC_INTERNAL_LOOPBACK); return (err); } static int smctr_issue_tri_loopback_cmd(struct net_device *dev) { int err; err = smctr_setup_single_cmd(dev, ACB_CMD_MCT_TEST, TRC_TRI_LOOPBACK); return (err); } static int smctr_issue_write_byte_cmd(struct net_device *dev, short aword_cnt, void *byte) { struct net_local *tp = netdev_priv(dev); unsigned int iword, ibyte; int err; if((err = smctr_wait_while_cbusy(dev))) return (err); if((err = smctr_wait_cmd(dev))) return (err); for(iword = 0, ibyte = 0; iword < (unsigned int)(aword_cnt & 0xff); iword++, ibyte += 2) { tp->misc_command_data[iword] = (*((__u8 *)byte + ibyte) << 8) | (*((__u8 *)byte + ibyte + 1)); } return (smctr_setup_single_cmd_w_data(dev, ACB_CMD_MCT_WRITE_VALUE, aword_cnt)); } static int smctr_issue_write_word_cmd(struct net_device *dev, short aword_cnt, void *word) { struct net_local *tp = netdev_priv(dev); unsigned int i, err; if((err = smctr_wait_while_cbusy(dev))) return (err); if((err = smctr_wait_cmd(dev))) return (err); for(i = 0; i < (unsigned int)(aword_cnt & 0xff); i++) tp->misc_command_data[i] = *((__u16 *)word + i); err = smctr_setup_single_cmd_w_data(dev, ACB_CMD_MCT_WRITE_VALUE, aword_cnt); return (err); } static int smctr_join_complete_state(struct net_device *dev) { int err; err = smctr_setup_single_cmd(dev, ACB_CMD_CHANGE_JOIN_STATE, JS_JOIN_COMPLETE_STATE); return (err); } static int smctr_link_tx_fcbs_to_bdbs(struct net_device *dev) { struct net_local *tp = netdev_priv(dev); unsigned int i, j; FCBlock *fcb; BDBlock *bdb; for(i = 0; i < NUM_TX_QS_USED; i++) { fcb = tp->tx_fcb_head[i]; bdb = tp->tx_bdb_head[i]; for(j = 0; j < tp->num_tx_fcbs[i]; j++) { fcb->bdb_ptr = bdb; fcb->trc_bdb_ptr = TRC_POINTER(bdb); fcb = (FCBlock *)((char *)fcb + sizeof(FCBlock)); bdb = (BDBlock *)((char *)bdb + sizeof(BDBlock)); } } return (0); } static int smctr_load_firmware(struct net_device *dev) { struct net_local *tp = netdev_priv(dev); const struct firmware *fw; __u16 i, checksum = 0; int err = 0; if(smctr_debug > 10) printk(KERN_DEBUG "%s: smctr_load_firmware\n", dev->name); if (request_firmware(&fw, "tr_smctr.bin", &dev->dev)) { printk(KERN_ERR "%s: firmware not found\n", dev->name); return (UCODE_NOT_PRESENT); } tp->num_of_tx_buffs = 4; tp->mode_bits |= UMAC; tp->receive_mask = 0; tp->max_packet_size = 4177; /* Can only upload the firmware once per adapter reset. */ if (tp->microcode_version != 0) { err = (UCODE_PRESENT); goto out; } /* Verify the firmware exists and is there in the right amount. */ if (!fw->data || (*(fw->data + UCODE_VERSION_OFFSET) < UCODE_VERSION)) { err = (UCODE_NOT_PRESENT); goto out; } /* UCODE_SIZE is not included in Checksum. */ for(i = 0; i < *((__u16 *)(fw->data + UCODE_SIZE_OFFSET)); i += 2) checksum += *((__u16 *)(fw->data + 2 + i)); if (checksum) { err = (UCODE_NOT_PRESENT); goto out; } /* At this point we have a valid firmware image, lets kick it on up. */ smctr_enable_adapter_ram(dev); smctr_enable_16bit(dev); smctr_set_page(dev, (__u8 *)tp->ram_access); if((smctr_checksum_firmware(dev)) || (*(fw->data + UCODE_VERSION_OFFSET) > tp->microcode_version)) { smctr_enable_adapter_ctrl_store(dev); /* Zero out ram space for firmware. */ for(i = 0; i < CS_RAM_SIZE; i += 2) *((__u16 *)(tp->ram_access + i)) = 0; smctr_decode_firmware(dev, fw); tp->microcode_version = *(fw->data + UCODE_VERSION_OFFSET); *((__u16 *)(tp->ram_access + CS_RAM_VERSION_OFFSET)) = (tp->microcode_version << 8); *((__u16 *)(tp->ram_access + CS_RAM_CHECKSUM_OFFSET)) = ~(tp->microcode_version << 8) + 1; smctr_disable_adapter_ctrl_store(dev); if(smctr_checksum_firmware(dev)) err = HARDWARE_FAILED; } else err = UCODE_PRESENT; smctr_disable_16bit(dev); out: release_firmware(fw); return (err); } static int smctr_load_node_addr(struct net_device *dev) { int ioaddr = dev->base_addr; unsigned int i; __u8 r; for(i = 0; i < 6; i++) { r = inb(ioaddr + LAR0 + i); dev->dev_addr[i] = (char)r; } dev->addr_len = 6; return (0); } /* Lobe Media Test. * During the transmission of the initial 1500 lobe media MAC frames, * the phase lock loop in the 805 chip may lock, and then un-lock, causing * the 825 to go into a PURGE state. When performing a PURGE, the MCT * microcode will not transmit any frames given to it by the host, and * will consequently cause a timeout. * * NOTE 1: If the monitor_state is MS_BEACON_TEST_STATE, all transmit * queues other than the one used for the lobe_media_test should be * disabled.!? * * NOTE 2: If the monitor_state is MS_BEACON_TEST_STATE and the receive_mask * has any multi-cast or promiscous bits set, the receive_mask needs to * be changed to clear the multi-cast or promiscous mode bits, the lobe_test * run, and then the receive mask set back to its original value if the test * is successful. */ static int smctr_lobe_media_test(struct net_device *dev) { struct net_local *tp = netdev_priv(dev); unsigned int i, perror = 0; unsigned short saved_rcv_mask; if(smctr_debug > 10) printk(KERN_DEBUG "%s: smctr_lobe_media_test\n", dev->name); /* Clear receive mask for lobe test. */ saved_rcv_mask = tp->receive_mask; tp->receive_mask = 0; smctr_chg_rx_mask(dev); /* Setup the lobe media test. */ smctr_lobe_media_test_cmd(dev); if(smctr_wait_cmd(dev)) goto err; /* Tx lobe media test frames. */ for(i = 0; i < 1500; ++i) { if(smctr_send_lobe_media_test(dev)) { if(perror) goto err; else { perror = 1; if(smctr_lobe_media_test_cmd(dev)) goto err; } } } if(smctr_send_dat(dev)) { if(smctr_send_dat(dev)) goto err; } /* Check if any frames received during test. */ if((tp->rx_fcb_curr[MAC_QUEUE]->frame_status) || (tp->rx_fcb_curr[NON_MAC_QUEUE]->frame_status)) goto err; /* Set receive mask to "Promisc" mode. */ tp->receive_mask = saved_rcv_mask; smctr_chg_rx_mask(dev); return 0; err: smctr_reset_adapter(dev); tp->status = CLOSED; return LOBE_MEDIA_TEST_FAILED; } static int smctr_lobe_media_test_cmd(struct net_device *dev) { struct net_local *tp = netdev_priv(dev); int err; if(smctr_debug > 10) printk(KERN_DEBUG "%s: smctr_lobe_media_test_cmd\n", dev->name); /* Change to lobe media test state. */ if(tp->monitor_state != MS_BEACON_TEST_STATE) { smctr_lobe_media_test_state(dev); if(smctr_wait_cmd(dev)) { printk(KERN_ERR "Lobe Failed test state\n"); return (LOBE_MEDIA_TEST_FAILED); } } err = smctr_setup_single_cmd(dev, ACB_CMD_MCT_TEST, TRC_LOBE_MEDIA_TEST); return (err); } static int smctr_lobe_media_test_state(struct net_device *dev) { int err; err = smctr_setup_single_cmd(dev, ACB_CMD_CHANGE_JOIN_STATE, JS_LOBE_TEST_STATE); return (err); } static int smctr_make_8025_hdr(struct net_device *dev, MAC_HEADER *rmf, MAC_HEADER *tmf, __u16 ac_fc) { tmf->ac = MSB(ac_fc); /* msb is access control */ tmf->fc = LSB(ac_fc); /* lsb is frame control */ tmf->sa[0] = dev->dev_addr[0]; tmf->sa[1] = dev->dev_addr[1]; tmf->sa[2] = dev->dev_addr[2]; tmf->sa[3] = dev->dev_addr[3]; tmf->sa[4] = dev->dev_addr[4]; tmf->sa[5] = dev->dev_addr[5]; switch(tmf->vc) { /* Send RQ_INIT to RPS */ case RQ_INIT: tmf->da[0] = 0xc0; tmf->da[1] = 0x00; tmf->da[2] = 0x00; tmf->da[3] = 0x00; tmf->da[4] = 0x00; tmf->da[5] = 0x02; break; /* Send RPT_TX_FORWARD to CRS */ case RPT_TX_FORWARD: tmf->da[0] = 0xc0; tmf->da[1] = 0x00; tmf->da[2] = 0x00; tmf->da[3] = 0x00; tmf->da[4] = 0x00; tmf->da[5] = 0x10; break; /* Everything else goes to sender */ default: tmf->da[0] = rmf->sa[0]; tmf->da[1] = rmf->sa[1]; tmf->da[2] = rmf->sa[2]; tmf->da[3] = rmf->sa[3]; tmf->da[4] = rmf->sa[4]; tmf->da[5] = rmf->sa[5]; break; } return (0); } static int smctr_make_access_pri(struct net_device *dev, MAC_SUB_VECTOR *tsv) { struct net_local *tp = netdev_priv(dev); tsv->svi = AUTHORIZED_ACCESS_PRIORITY; tsv->svl = S_AUTHORIZED_ACCESS_PRIORITY; tsv->svv[0] = MSB(tp->authorized_access_priority); tsv->svv[1] = LSB(tp->authorized_access_priority); return (0); } static int smctr_make_addr_mod(struct net_device *dev, MAC_SUB_VECTOR *tsv) { tsv->svi = ADDRESS_MODIFER; tsv->svl = S_ADDRESS_MODIFER; tsv->svv[0] = 0; tsv->svv[1] = 0; return (0); } static int smctr_make_auth_funct_class(struct net_device *dev, MAC_SUB_VECTOR *tsv) { struct net_local *tp = netdev_priv(dev); tsv->svi = AUTHORIZED_FUNCTION_CLASS; tsv->svl = S_AUTHORIZED_FUNCTION_CLASS; tsv->svv[0] = MSB(tp->authorized_function_classes); tsv->svv[1] = LSB(tp->authorized_function_classes); return (0); } static int smctr_make_corr(struct net_device *dev, MAC_SUB_VECTOR *tsv, __u16 correlator) { tsv->svi = CORRELATOR; tsv->svl = S_CORRELATOR; tsv->svv[0] = MSB(correlator); tsv->svv[1] = LSB(correlator); return (0); } static int smctr_make_funct_addr(struct net_device *dev, MAC_SUB_VECTOR *tsv) { struct net_local *tp = netdev_priv(dev); smctr_get_functional_address(dev); tsv->svi = FUNCTIONAL_ADDRESS; tsv->svl = S_FUNCTIONAL_ADDRESS; tsv->svv[0] = MSB(tp->misc_command_data[0]); tsv->svv[1] = LSB(tp->misc_command_data[0]); tsv->svv[2] = MSB(tp->misc_command_data[1]); tsv->svv[3] = LSB(tp->misc_command_data[1]); return (0); } static int smctr_make_group_addr(struct net_device *dev, MAC_SUB_VECTOR *tsv) { struct net_local *tp = netdev_priv(dev); smctr_get_group_address(dev); tsv->svi = GROUP_ADDRESS; tsv->svl = S_GROUP_ADDRESS; tsv->svv[0] = MSB(tp->misc_command_data[0]); tsv->svv[1] = LSB(tp->misc_command_data[0]); tsv->svv[2] = MSB(tp->misc_command_data[1]); tsv->svv[3] = LSB(tp->misc_command_data[1]); /* Set Group Address Sub-vector to all zeros if only the * Group Address/Functional Address Indicator is set. */ if(tsv->svv[0] == 0x80 && tsv->svv[1] == 0x00 && tsv->svv[2] == 0x00 && tsv->svv[3] == 0x00) tsv->svv[0] = 0x00; return (0); } static int smctr_make_phy_drop_num(struct net_device *dev, MAC_SUB_VECTOR *tsv) { struct net_local *tp = netdev_priv(dev); smctr_get_physical_drop_number(dev); tsv->svi = PHYSICAL_DROP; tsv->svl = S_PHYSICAL_DROP; tsv->svv[0] = MSB(tp->misc_command_data[0]); tsv->svv[1] = LSB(tp->misc_command_data[0]); tsv->svv[2] = MSB(tp->misc_command_data[1]); tsv->svv[3] = LSB(tp->misc_command_data[1]); return (0); } static int smctr_make_product_id(struct net_device *dev, MAC_SUB_VECTOR *tsv) { int i; tsv->svi = PRODUCT_INSTANCE_ID; tsv->svl = S_PRODUCT_INSTANCE_ID; for(i = 0; i < 18; i++) tsv->svv[i] = 0xF0; return (0); } static int smctr_make_station_id(struct net_device *dev, MAC_SUB_VECTOR *tsv) { struct net_local *tp = netdev_priv(dev); smctr_get_station_id(dev); tsv->svi = STATION_IDENTIFER; tsv->svl = S_STATION_IDENTIFER; tsv->svv[0] = MSB(tp->misc_command_data[0]); tsv->svv[1] = LSB(tp->misc_command_data[0]); tsv->svv[2] = MSB(tp->misc_command_data[1]); tsv->svv[3] = LSB(tp->misc_command_data[1]); tsv->svv[4] = MSB(tp->misc_command_data[2]); tsv->svv[5] = LSB(tp->misc_command_data[2]); return (0); } static int smctr_make_ring_station_status(struct net_device *dev, MAC_SUB_VECTOR * tsv) { tsv->svi = RING_STATION_STATUS; tsv->svl = S_RING_STATION_STATUS; tsv->svv[0] = 0; tsv->svv[1] = 0; tsv->svv[2] = 0; tsv->svv[3] = 0; tsv->svv[4] = 0; tsv->svv[5] = 0; return (0); } static int smctr_make_ring_station_version(struct net_device *dev, MAC_SUB_VECTOR *tsv) { struct net_local *tp = netdev_priv(dev); tsv->svi = RING_STATION_VERSION_NUMBER; tsv->svl = S_RING_STATION_VERSION_NUMBER; tsv->svv[0] = 0xe2; /* EBCDIC - S */ tsv->svv[1] = 0xd4; /* EBCDIC - M */ tsv->svv[2] = 0xc3; /* EBCDIC - C */ tsv->svv[3] = 0x40; /* EBCDIC - */ tsv->svv[4] = 0xe5; /* EBCDIC - V */ tsv->svv[5] = 0xF0 + (tp->microcode_version >> 4); tsv->svv[6] = 0xF0 + (tp->microcode_version & 0x0f); tsv->svv[7] = 0x40; /* EBCDIC - */ tsv->svv[8] = 0xe7; /* EBCDIC - X */ if(tp->extra_info & CHIP_REV_MASK) tsv->svv[9] = 0xc5; /* EBCDIC - E */ else tsv->svv[9] = 0xc4; /* EBCDIC - D */ return (0); } static int smctr_make_tx_status_code(struct net_device *dev, MAC_SUB_VECTOR *tsv, __u16 tx_fstatus) { tsv->svi = TRANSMIT_STATUS_CODE; tsv->svl = S_TRANSMIT_STATUS_CODE; tsv->svv[0] = ((tx_fstatus & 0x0100 >> 6) | IBM_PASS_SOURCE_ADDR); /* Stripped frame status of Transmitted Frame */ tsv->svv[1] = tx_fstatus & 0xff; return (0); } static int smctr_make_upstream_neighbor_addr(struct net_device *dev, MAC_SUB_VECTOR *tsv) { struct net_local *tp = netdev_priv(dev); smctr_get_upstream_neighbor_addr(dev); tsv->svi = UPSTREAM_NEIGHBOR_ADDRESS; tsv->svl = S_UPSTREAM_NEIGHBOR_ADDRESS; tsv->svv[0] = MSB(tp->misc_command_data[0]); tsv->svv[1] = LSB(tp->misc_command_data[0]); tsv->svv[2] = MSB(tp->misc_command_data[1]); tsv->svv[3] = LSB(tp->misc_command_data[1]); tsv->svv[4] = MSB(tp->misc_command_data[2]); tsv->svv[5] = LSB(tp->misc_command_data[2]); return (0); } static int smctr_make_wrap_data(struct net_device *dev, MAC_SUB_VECTOR *tsv) { tsv->svi = WRAP_DATA; tsv->svl = S_WRAP_DATA; return (0); } /* * Open/initialize the board. This is called sometime after * booting when the 'ifconfig' program is run. * * This routine should set everything up anew at each open, even * registers that "should" only need to be set once at boot, so that * there is non-reboot way to recover if something goes wrong. */ static int smctr_open(struct net_device *dev) { int err; if(smctr_debug > 10) printk(KERN_DEBUG "%s: smctr_open\n", dev->name); err = smctr_init_adapter(dev); if(err < 0) return (err); return (err); } /* Interrupt driven open of Token card. */ static int smctr_open_tr(struct net_device *dev) { struct net_local *tp = netdev_priv(dev); unsigned long flags; int err; if(smctr_debug > 10) printk(KERN_DEBUG "%s: smctr_open_tr\n", dev->name); /* Now we can actually open the adapter. */ if(tp->status == OPEN) return (0); if(tp->status != INITIALIZED) return (-1); /* FIXME: it would work a lot better if we masked the irq sources on the card here, then we could skip the locking and poll nicely */ spin_lock_irqsave(&tp->lock, flags); smctr_set_page(dev, (__u8 *)tp->ram_access); if((err = smctr_issue_resume_rx_fcb_cmd(dev, (short)MAC_QUEUE))) goto out; if((err = smctr_issue_resume_rx_bdb_cmd(dev, (short)MAC_QUEUE))) goto out; if((err = smctr_issue_resume_rx_fcb_cmd(dev, (short)NON_MAC_QUEUE))) goto out; if((err = smctr_issue_resume_rx_bdb_cmd(dev, (short)NON_MAC_QUEUE))) goto out; tp->status = CLOSED; /* Insert into the Ring or Enter Loopback Mode. */ if((tp->mode_bits & LOOPING_MODE_MASK) == LOOPBACK_MODE_1) { tp->status = CLOSED; if(!(err = smctr_issue_trc_loopback_cmd(dev))) { if(!(err = smctr_wait_cmd(dev))) tp->status = OPEN; } smctr_status_chg(dev); } else { if((tp->mode_bits & LOOPING_MODE_MASK) == LOOPBACK_MODE_2) { tp->status = CLOSED; if(!(err = smctr_issue_tri_loopback_cmd(dev))) { if(!(err = smctr_wait_cmd(dev))) tp->status = OPEN; } smctr_status_chg(dev); } else { if((tp->mode_bits & LOOPING_MODE_MASK) == LOOPBACK_MODE_3) { tp->status = CLOSED; if(!(err = smctr_lobe_media_test_cmd(dev))) { if(!(err = smctr_wait_cmd(dev))) tp->status = OPEN; } smctr_status_chg(dev); } else { if(!(err = smctr_lobe_media_test(dev))) err = smctr_issue_insert_cmd(dev); else { if(err == LOBE_MEDIA_TEST_FAILED) printk(KERN_WARNING "%s: Lobe Media Test Failure - Check cable?\n", dev->name); } } } } out: spin_unlock_irqrestore(&tp->lock, flags); return (err); } /* Check for a network adapter of this type, * and return device structure if one exists. */ struct net_device __init *smctr_probe(int unit) { struct net_device *dev = alloc_trdev(sizeof(struct net_local)); static const unsigned ports[] = { 0x200, 0x220, 0x240, 0x260, 0x280, 0x2A0, 0x2C0, 0x2E0, 0x300, 0x320, 0x340, 0x360, 0x380, 0 }; const unsigned *port; int err = 0; if (!dev) return ERR_PTR(-ENOMEM); if (unit >= 0) { sprintf(dev->name, "tr%d", unit); netdev_boot_setup_check(dev); } if (dev->base_addr > 0x1ff) /* Check a single specified location. */ err = smctr_probe1(dev, dev->base_addr); else if(dev->base_addr != 0) /* Don't probe at all. */ err =-ENXIO; else { for (port = ports; *port; port++) { err = smctr_probe1(dev, *port); if (!err) break; } } if (err) goto out; err = register_netdev(dev); if (err) goto out1; return dev; out1: #ifdef CONFIG_MCA_LEGACY { struct net_local *tp = netdev_priv(dev); if (tp->slot_num) mca_mark_as_unused(tp->slot_num); } #endif release_region(dev->base_addr, SMCTR_IO_EXTENT); free_irq(dev->irq, dev); out: free_netdev(dev); return ERR_PTR(err); } static const struct net_device_ops smctr_netdev_ops = { .ndo_open = smctr_open, .ndo_stop = smctr_close, .ndo_start_xmit = smctr_send_packet, .ndo_tx_timeout = smctr_timeout, .ndo_get_stats = smctr_get_stats, .ndo_set_multicast_list = smctr_set_multicast_list, }; static int __init smctr_probe1(struct net_device *dev, int ioaddr) { static unsigned version_printed; struct net_local *tp = netdev_priv(dev); int err; __u32 *ram; if(smctr_debug && version_printed++ == 0) printk(version); spin_lock_init(&tp->lock); dev->base_addr = ioaddr; /* Actually detect an adapter now. */ err = smctr_chk_isa(dev); if(err < 0) { if ((err = smctr_chk_mca(dev)) < 0) { err = -ENODEV; goto out; } } tp = netdev_priv(dev); dev->mem_start = tp->ram_base; dev->mem_end = dev->mem_start + 0x10000; ram = (__u32 *)phys_to_virt(dev->mem_start); tp->ram_access = *(__u32 *)&ram; tp->status = NOT_INITIALIZED; err = smctr_load_firmware(dev); if(err != UCODE_PRESENT && err != SUCCESS) { printk(KERN_ERR "%s: Firmware load failed (%d)\n", dev->name, err); err = -EIO; goto out; } /* Allow user to specify ring speed on module insert. */ if(ringspeed == 4) tp->media_type = MEDIA_UTP_4; else tp->media_type = MEDIA_UTP_16; printk(KERN_INFO "%s: %s %s at Io %#4x, Irq %d, Rom %#4x, Ram %#4x.\n", dev->name, smctr_name, smctr_model, (unsigned int)dev->base_addr, dev->irq, tp->rom_base, tp->ram_base); dev->netdev_ops = &smctr_netdev_ops; dev->watchdog_timeo = HZ; return (0); out: return err; } static int smctr_process_rx_packet(MAC_HEADER *rmf, __u16 size, struct net_device *dev, __u16 rx_status) { struct net_local *tp = netdev_priv(dev); struct sk_buff *skb; __u16 rcode, correlator; int err = 0; __u8 xframe = 1; rmf->vl = SWAP_BYTES(rmf->vl); if(rx_status & FCB_RX_STATUS_DA_MATCHED) { switch(rmf->vc) { /* Received MAC Frames Processed by RS. */ case INIT: if((rcode = smctr_rcv_init(dev, rmf, &correlator)) == HARDWARE_FAILED) { return (rcode); } if((err = smctr_send_rsp(dev, rmf, rcode, correlator))) { return (err); } break; case CHG_PARM: if((rcode = smctr_rcv_chg_param(dev, rmf, &correlator)) ==HARDWARE_FAILED) { return (rcode); } if((err = smctr_send_rsp(dev, rmf, rcode, correlator))) { return (err); } break; case RQ_ADDR: if((rcode = smctr_rcv_rq_addr_state_attch(dev, rmf, &correlator)) != POSITIVE_ACK) { if(rcode == HARDWARE_FAILED) return (rcode); else return (smctr_send_rsp(dev, rmf, rcode, correlator)); } if((err = smctr_send_rpt_addr(dev, rmf, correlator))) { return (err); } break; case RQ_ATTCH: if((rcode = smctr_rcv_rq_addr_state_attch(dev, rmf, &correlator)) != POSITIVE_ACK) { if(rcode == HARDWARE_FAILED) return (rcode); else return (smctr_send_rsp(dev, rmf, rcode, correlator)); } if((err = smctr_send_rpt_attch(dev, rmf, correlator))) { return (err); } break; case RQ_STATE: if((rcode = smctr_rcv_rq_addr_state_attch(dev, rmf, &correlator)) != POSITIVE_ACK) { if(rcode == HARDWARE_FAILED) return (rcode); else return (smctr_send_rsp(dev, rmf, rcode, correlator)); } if((err = smctr_send_rpt_state(dev, rmf, correlator))) { return (err); } break; case TX_FORWARD: { __u16 uninitialized_var(tx_fstatus); if((rcode = smctr_rcv_tx_forward(dev, rmf)) != POSITIVE_ACK) { if(rcode == HARDWARE_FAILED) return (rcode); else return (smctr_send_rsp(dev, rmf, rcode, correlator)); } if((err = smctr_send_tx_forward(dev, rmf, &tx_fstatus)) == HARDWARE_FAILED) { return (err); } if(err == A_FRAME_WAS_FORWARDED) { if((err = smctr_send_rpt_tx_forward(dev, rmf, tx_fstatus)) == HARDWARE_FAILED) { return (err); } } break; } /* Received MAC Frames Processed by CRS/REM/RPS. */ case RSP: case RQ_INIT: case RPT_NEW_MON: case RPT_SUA_CHG: case RPT_ACTIVE_ERR: case RPT_NN_INCMP: case RPT_ERROR: case RPT_ATTCH: case RPT_STATE: case RPT_ADDR: break; /* Rcvd Att. MAC Frame (if RXATMAC set) or UNKNOWN */ default: xframe = 0; if(!(tp->receive_mask & ACCEPT_ATT_MAC_FRAMES)) { rcode = smctr_rcv_unknown(dev, rmf, &correlator); if((err = smctr_send_rsp(dev, rmf,rcode, correlator))) { return (err); } } break; } } else { /* 1. DA doesn't match (Promiscuous Mode). * 2. Parse for Extended MAC Frame Type. */ switch(rmf->vc) { case RSP: case INIT: case RQ_INIT: case RQ_ADDR: case RQ_ATTCH: case RQ_STATE: case CHG_PARM: case RPT_ADDR: case RPT_ERROR: case RPT_ATTCH: case RPT_STATE: case RPT_NEW_MON: case RPT_SUA_CHG: case RPT_NN_INCMP: case RPT_ACTIVE_ERR: break; default: xframe = 0; break; } } /* NOTE: UNKNOWN MAC frames will NOT be passed up unless * ACCEPT_ATT_MAC_FRAMES is set. */ if(((tp->receive_mask & ACCEPT_ATT_MAC_FRAMES) && (xframe == (__u8)0)) || ((tp->receive_mask & ACCEPT_EXT_MAC_FRAMES) && (xframe == (__u8)1))) { rmf->vl = SWAP_BYTES(rmf->vl); if (!(skb = dev_alloc_skb(size))) return -ENOMEM; skb->len = size; /* Slide data into a sleek skb. */ skb_put(skb, skb->len); skb_copy_to_linear_data(skb, rmf, skb->len); /* Update Counters */ tp->MacStat.rx_packets++; tp->MacStat.rx_bytes += skb->len; /* Kick the packet on up. */ skb->protocol = tr_type_trans(skb, dev); netif_rx(skb); err = 0; } return (err); } /* Adapter RAM test. Incremental word ODD boundary data test. */ static int smctr_ram_memory_test(struct net_device *dev) { struct net_local *tp = netdev_priv(dev); __u16 page, pages_of_ram, start_pattern = 0, word_pattern = 0, word_read = 0, err_word = 0, err_pattern = 0; unsigned int err_offset; __u32 j, pword; __u8 err = 0; if(smctr_debug > 10) printk(KERN_DEBUG "%s: smctr_ram_memory_test\n", dev->name); start_pattern = 0x0001; pages_of_ram = tp->ram_size / tp->ram_usable; pword = tp->ram_access; /* Incremental word ODD boundary test. */ for(page = 0; (page < pages_of_ram) && (~err); page++, start_pattern += 0x8000) { smctr_set_page(dev, (__u8 *)(tp->ram_access + (page * tp->ram_usable * 1024) + 1)); word_pattern = start_pattern; for(j = 1; j < (__u32)(tp->ram_usable * 1024) - 1; j += 2) *(__u16 *)(pword + j) = word_pattern++; word_pattern = start_pattern; for(j = 1; j < (__u32)(tp->ram_usable * 1024) - 1 && (~err); j += 2, word_pattern++) { word_read = *(__u16 *)(pword + j); if(word_read != word_pattern) { err = (__u8)1; err_offset = j; err_word = word_read; err_pattern = word_pattern; return (RAM_TEST_FAILED); } } } /* Zero out memory. */ for(page = 0; page < pages_of_ram && (~err); page++) { smctr_set_page(dev, (__u8 *)(tp->ram_access + (page * tp->ram_usable * 1024))); word_pattern = 0; for(j = 0; j < (__u32)tp->ram_usable * 1024; j +=2) *(__u16 *)(pword + j) = word_pattern; for(j =0; j < (__u32)tp->ram_usable * 1024 && (~err); j += 2) { word_read = *(__u16 *)(pword + j); if(word_read != word_pattern) { err = (__u8)1; err_offset = j; err_word = word_read; err_pattern = word_pattern; return (RAM_TEST_FAILED); } } } smctr_set_page(dev, (__u8 *)tp->ram_access); return (0); } static int smctr_rcv_chg_param(struct net_device *dev, MAC_HEADER *rmf, __u16 *correlator) { MAC_SUB_VECTOR *rsv; signed short vlen; __u16 rcode = POSITIVE_ACK; unsigned int svectors = F_NO_SUB_VECTORS_FOUND; /* This Frame can only come from a CRS */ if((rmf->dc_sc & SC_MASK) != SC_CRS) return(E_INAPPROPRIATE_SOURCE_CLASS); /* Remove MVID Length from total length. */ vlen = (signed short)rmf->vl - 4; /* Point to First SVID */ rsv = (MAC_SUB_VECTOR *)((__u32)rmf + sizeof(MAC_HEADER)); /* Search for Appropriate SVID's. */ while((vlen > 0) && (rcode == POSITIVE_ACK)) { switch(rsv->svi) { case CORRELATOR: svectors |= F_CORRELATOR; rcode = smctr_set_corr(dev, rsv, correlator); break; case LOCAL_RING_NUMBER: svectors |= F_LOCAL_RING_NUMBER; rcode = smctr_set_local_ring_num(dev, rsv); break; case ASSIGN_PHYSICAL_DROP: svectors |= F_ASSIGN_PHYSICAL_DROP; rcode = smctr_set_phy_drop(dev, rsv); break; case ERROR_TIMER_VALUE: svectors |= F_ERROR_TIMER_VALUE; rcode = smctr_set_error_timer_value(dev, rsv); break; case AUTHORIZED_FUNCTION_CLASS: svectors |= F_AUTHORIZED_FUNCTION_CLASS; rcode = smctr_set_auth_funct_class(dev, rsv); break; case AUTHORIZED_ACCESS_PRIORITY: svectors |= F_AUTHORIZED_ACCESS_PRIORITY; rcode = smctr_set_auth_access_pri(dev, rsv); break; default: rcode = E_SUB_VECTOR_UNKNOWN; break; } /* Let Sender Know if SUM of SV length's is * larger then length in MVID length field */ if((vlen -= rsv->svl) < 0) rcode = E_VECTOR_LENGTH_ERROR; rsv = (MAC_SUB_VECTOR *)((__u32)rsv + rsv->svl); } if(rcode == POSITIVE_ACK) { /* Let Sender Know if MVID length field * is larger then SUM of SV length's */ if(vlen != 0) rcode = E_VECTOR_LENGTH_ERROR; else { /* Let Sender Know if Expected SVID Missing */ if((svectors & R_CHG_PARM) ^ R_CHG_PARM) rcode = E_MISSING_SUB_VECTOR; } } return (rcode); } static int smctr_rcv_init(struct net_device *dev, MAC_HEADER *rmf, __u16 *correlator) { MAC_SUB_VECTOR *rsv; signed short vlen; __u16 rcode = POSITIVE_ACK; unsigned int svectors = F_NO_SUB_VECTORS_FOUND; /* This Frame can only come from a RPS */ if((rmf->dc_sc & SC_MASK) != SC_RPS) return (E_INAPPROPRIATE_SOURCE_CLASS); /* Remove MVID Length from total length. */ vlen = (signed short)rmf->vl - 4; /* Point to First SVID */ rsv = (MAC_SUB_VECTOR *)((__u32)rmf + sizeof(MAC_HEADER)); /* Search for Appropriate SVID's */ while((vlen > 0) && (rcode == POSITIVE_ACK)) { switch(rsv->svi) { case CORRELATOR: svectors |= F_CORRELATOR; rcode = smctr_set_corr(dev, rsv, correlator); break; case LOCAL_RING_NUMBER: svectors |= F_LOCAL_RING_NUMBER; rcode = smctr_set_local_ring_num(dev, rsv); break; case ASSIGN_PHYSICAL_DROP: svectors |= F_ASSIGN_PHYSICAL_DROP; rcode = smctr_set_phy_drop(dev, rsv); break; case ERROR_TIMER_VALUE: svectors |= F_ERROR_TIMER_VALUE; rcode = smctr_set_error_timer_value(dev, rsv); break; default: rcode = E_SUB_VECTOR_UNKNOWN; break; } /* Let Sender Know if SUM of SV length's is * larger then length in MVID length field */ if((vlen -= rsv->svl) < 0) rcode = E_VECTOR_LENGTH_ERROR; rsv = (MAC_SUB_VECTOR *)((__u32)rsv + rsv->svl); } if(rcode == POSITIVE_ACK) { /* Let Sender Know if MVID length field * is larger then SUM of SV length's */ if(vlen != 0) rcode = E_VECTOR_LENGTH_ERROR; else { /* Let Sender Know if Expected SV Missing */ if((svectors & R_INIT) ^ R_INIT) rcode = E_MISSING_SUB_VECTOR; } } return (rcode); } static int smctr_rcv_tx_forward(struct net_device *dev, MAC_HEADER *rmf) { MAC_SUB_VECTOR *rsv; signed short vlen; __u16 rcode = POSITIVE_ACK; unsigned int svectors = F_NO_SUB_VECTORS_FOUND; /* This Frame can only come from a CRS */ if((rmf->dc_sc & SC_MASK) != SC_CRS) return (E_INAPPROPRIATE_SOURCE_CLASS); /* Remove MVID Length from total length */ vlen = (signed short)rmf->vl - 4; /* Point to First SVID */ rsv = (MAC_SUB_VECTOR *)((__u32)rmf + sizeof(MAC_HEADER)); /* Search for Appropriate SVID's */ while((vlen > 0) && (rcode == POSITIVE_ACK)) { switch(rsv->svi) { case FRAME_FORWARD: svectors |= F_FRAME_FORWARD; rcode = smctr_set_frame_forward(dev, rsv, rmf->dc_sc); break; default: rcode = E_SUB_VECTOR_UNKNOWN; break; } /* Let Sender Know if SUM of SV length's is * larger then length in MVID length field */ if((vlen -= rsv->svl) < 0) rcode = E_VECTOR_LENGTH_ERROR; rsv = (MAC_SUB_VECTOR *)((__u32)rsv + rsv->svl); } if(rcode == POSITIVE_ACK) { /* Let Sender Know if MVID length field * is larger then SUM of SV length's */ if(vlen != 0) rcode = E_VECTOR_LENGTH_ERROR; else { /* Let Sender Know if Expected SV Missing */ if((svectors & R_TX_FORWARD) ^ R_TX_FORWARD) rcode = E_MISSING_SUB_VECTOR; } } return (rcode); } static int smctr_rcv_rq_addr_state_attch(struct net_device *dev, MAC_HEADER *rmf, __u16 *correlator) { MAC_SUB_VECTOR *rsv; signed short vlen; __u16 rcode = POSITIVE_ACK; unsigned int svectors = F_NO_SUB_VECTORS_FOUND; /* Remove MVID Length from total length */ vlen = (signed short)rmf->vl - 4; /* Point to First SVID */ rsv = (MAC_SUB_VECTOR *)((__u32)rmf + sizeof(MAC_HEADER)); /* Search for Appropriate SVID's */ while((vlen > 0) && (rcode == POSITIVE_ACK)) { switch(rsv->svi) { case CORRELATOR: svectors |= F_CORRELATOR; rcode = smctr_set_corr(dev, rsv, correlator); break; default: rcode = E_SUB_VECTOR_UNKNOWN; break; } /* Let Sender Know if SUM of SV length's is * larger then length in MVID length field */ if((vlen -= rsv->svl) < 0) rcode = E_VECTOR_LENGTH_ERROR; rsv = (MAC_SUB_VECTOR *)((__u32)rsv + rsv->svl); } if(rcode == POSITIVE_ACK) { /* Let Sender Know if MVID length field * is larger then SUM of SV length's */ if(vlen != 0) rcode = E_VECTOR_LENGTH_ERROR; else { /* Let Sender Know if Expected SVID Missing */ if((svectors & R_RQ_ATTCH_STATE_ADDR) ^ R_RQ_ATTCH_STATE_ADDR) rcode = E_MISSING_SUB_VECTOR; } } return (rcode); } static int smctr_rcv_unknown(struct net_device *dev, MAC_HEADER *rmf, __u16 *correlator) { MAC_SUB_VECTOR *rsv; signed short vlen; *correlator = 0; /* Remove MVID Length from total length */ vlen = (signed short)rmf->vl - 4; /* Point to First SVID */ rsv = (MAC_SUB_VECTOR *)((__u32)rmf + sizeof(MAC_HEADER)); /* Search for CORRELATOR for RSP to UNKNOWN */ while((vlen > 0) && (*correlator == 0)) { switch(rsv->svi) { case CORRELATOR: smctr_set_corr(dev, rsv, correlator); break; default: break; } vlen -= rsv->svl; rsv = (MAC_SUB_VECTOR *)((__u32)rsv + rsv->svl); } return (E_UNRECOGNIZED_VECTOR_ID); } /* * Reset the 825 NIC and exit w: * 1. The NIC reset cleared (non-reset state), halted and un-initialized. * 2. TINT masked. * 3. CBUSY masked. * 4. TINT clear. * 5. CBUSY clear. */ static int smctr_reset_adapter(struct net_device *dev) { struct net_local *tp = netdev_priv(dev); int ioaddr = dev->base_addr; /* Reseting the NIC will put it in a halted and un-initialized state. */ smctr_set_trc_reset(ioaddr); mdelay(200); /* ~2 ms */ smctr_clear_trc_reset(ioaddr); mdelay(200); /* ~2 ms */ /* Remove any latched interrupts that occurred prior to reseting the * adapter or possibily caused by line glitches due to the reset. */ outb(tp->trc_mask | CSR_CLRTINT | CSR_CLRCBUSY, ioaddr + CSR); return (0); } static int smctr_restart_tx_chain(struct net_device *dev, short queue) { struct net_local *tp = netdev_priv(dev); int err = 0; if(smctr_debug > 10) printk(KERN_DEBUG "%s: smctr_restart_tx_chain\n", dev->name); if(tp->num_tx_fcbs_used[queue] != 0 && tp->tx_queue_status[queue] == NOT_TRANSMITING) { tp->tx_queue_status[queue] = TRANSMITING; err = smctr_issue_resume_tx_fcb_cmd(dev, queue); } return (err); } static int smctr_ring_status_chg(struct net_device *dev) { struct net_local *tp = netdev_priv(dev); if(smctr_debug > 10) printk(KERN_DEBUG "%s: smctr_ring_status_chg\n", dev->name); /* Check for ring_status_flag: whenever MONITOR_STATE_BIT * Bit is set, check value of monitor_state, only then we * enable and start transmit/receive timeout (if and only * if it is MS_ACTIVE_MONITOR_STATE or MS_STANDBY_MONITOR_STATE) */ if(tp->ring_status_flags == MONITOR_STATE_CHANGED) { if((tp->monitor_state == MS_ACTIVE_MONITOR_STATE) || (tp->monitor_state == MS_STANDBY_MONITOR_STATE)) { tp->monitor_state_ready = 1; } else { /* if adapter is NOT in either active monitor * or standby monitor state => Disable * transmit/receive timeout. */ tp->monitor_state_ready = 0; /* Ring speed problem, switching to auto mode. */ if(tp->monitor_state == MS_MONITOR_FSM_INACTIVE && !tp->cleanup) { printk(KERN_INFO "%s: Incorrect ring speed switching.\n", dev->name); smctr_set_ring_speed(dev); } } } if(!(tp->ring_status_flags & RING_STATUS_CHANGED)) return (0); switch(tp->ring_status) { case RING_RECOVERY: printk(KERN_INFO "%s: Ring Recovery\n", dev->name); break; case SINGLE_STATION: printk(KERN_INFO "%s: Single Statinon\n", dev->name); break; case COUNTER_OVERFLOW: printk(KERN_INFO "%s: Counter Overflow\n", dev->name); break; case REMOVE_RECEIVED: printk(KERN_INFO "%s: Remove Received\n", dev->name); break; case AUTO_REMOVAL_ERROR: printk(KERN_INFO "%s: Auto Remove Error\n", dev->name); break; case LOBE_WIRE_FAULT: printk(KERN_INFO "%s: Lobe Wire Fault\n", dev->name); break; case TRANSMIT_BEACON: printk(KERN_INFO "%s: Transmit Beacon\n", dev->name); break; case SOFT_ERROR: printk(KERN_INFO "%s: Soft Error\n", dev->name); break; case HARD_ERROR: printk(KERN_INFO "%s: Hard Error\n", dev->name); break; case SIGNAL_LOSS: printk(KERN_INFO "%s: Signal Loss\n", dev->name); break; default: printk(KERN_INFO "%s: Unknown ring status change\n", dev->name); break; } return (0); } static int smctr_rx_frame(struct net_device *dev) { struct net_local *tp = netdev_priv(dev); __u16 queue, status, rx_size, err = 0; __u8 *pbuff; if(smctr_debug > 10) printk(KERN_DEBUG "%s: smctr_rx_frame\n", dev->name); queue = tp->receive_queue_number; while((status = tp->rx_fcb_curr[queue]->frame_status) != SUCCESS) { err = HARDWARE_FAILED; if(((status & 0x007f) == 0) || ((tp->receive_mask & ACCEPT_ERR_PACKETS) != 0)) { /* frame length less the CRC (4 bytes) + FS (1 byte) */ rx_size = tp->rx_fcb_curr[queue]->frame_length - 5; pbuff = smctr_get_rx_pointer(dev, queue); smctr_set_page(dev, pbuff); smctr_disable_16bit(dev); /* pbuff points to addr within one page */ pbuff = (__u8 *)PAGE_POINTER(pbuff); if(queue == NON_MAC_QUEUE) { struct sk_buff *skb; skb = dev_alloc_skb(rx_size); if (skb) { skb_put(skb, rx_size); skb_copy_to_linear_data(skb, pbuff, rx_size); /* Update Counters */ tp->MacStat.rx_packets++; tp->MacStat.rx_bytes += skb->len; /* Kick the packet on up. */ skb->protocol = tr_type_trans(skb, dev); netif_rx(skb); } else { } } else smctr_process_rx_packet((MAC_HEADER *)pbuff, rx_size, dev, status); } smctr_enable_16bit(dev); smctr_set_page(dev, (__u8 *)tp->ram_access); smctr_update_rx_chain(dev, queue); if(err != SUCCESS) break; } return (err); } static int smctr_send_dat(struct net_device *dev) { struct net_local *tp = netdev_priv(dev); unsigned int i, err; MAC_HEADER *tmf; FCBlock *fcb; if(smctr_debug > 10) printk(KERN_DEBUG "%s: smctr_send_dat\n", dev->name); if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, sizeof(MAC_HEADER))) == (FCBlock *)(-1L)) { return (OUT_OF_RESOURCES); } /* Initialize DAT Data Fields. */ tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr; tmf->ac = MSB(AC_FC_DAT); tmf->fc = LSB(AC_FC_DAT); for(i = 0; i < 6; i++) { tmf->sa[i] = dev->dev_addr[i]; tmf->da[i] = dev->dev_addr[i]; } tmf->vc = DAT; tmf->dc_sc = DC_RS | SC_RS; tmf->vl = 4; tmf->vl = SWAP_BYTES(tmf->vl); /* Start Transmit. */ if((err = smctr_trc_send_packet(dev, fcb, MAC_QUEUE))) return (err); /* Wait for Transmit to Complete */ for(i = 0; i < 10000; i++) { if(fcb->frame_status & FCB_COMMAND_DONE) break; mdelay(1); } /* Check if GOOD frame Tx'ed. */ if(!(fcb->frame_status & FCB_COMMAND_DONE) || fcb->frame_status & (FCB_TX_STATUS_E | FCB_TX_AC_BITS)) { return (INITIALIZE_FAILED); } /* De-allocated Tx FCB and Frame Buffer * The FCB must be de-allocated manually if executing with * interrupts disabled, other wise the ISR (LM_Service_Events) * will de-allocate it when the interrupt occurs. */ tp->tx_queue_status[MAC_QUEUE] = NOT_TRANSMITING; smctr_update_tx_chain(dev, fcb, MAC_QUEUE); return (0); } static void smctr_timeout(struct net_device *dev) { /* * If we get here, some higher level has decided we are broken. * There should really be a "kick me" function call instead. * * Resetting the token ring adapter takes a long time so just * fake transmission time and go on trying. Our own timeout * routine is in sktr_timer_chk() */ dev->trans_start = jiffies; /* prevent tx timeout */ netif_wake_queue(dev); } /* * Gets skb from system, queues it and checks if it can be sent */ static netdev_tx_t smctr_send_packet(struct sk_buff *skb, struct net_device *dev) { struct net_local *tp = netdev_priv(dev); if(smctr_debug > 10) printk(KERN_DEBUG "%s: smctr_send_packet\n", dev->name); /* * Block a transmit overlap */ netif_stop_queue(dev); if(tp->QueueSkb == 0) return NETDEV_TX_BUSY; /* Return with tbusy set: queue full */ tp->QueueSkb--; skb_queue_tail(&tp->SendSkbQueue, skb); smctr_hardware_send_packet(dev, tp); if(tp->QueueSkb > 0) netif_wake_queue(dev); return NETDEV_TX_OK; } static int smctr_send_lobe_media_test(struct net_device *dev) { struct net_local *tp = netdev_priv(dev); MAC_SUB_VECTOR *tsv; MAC_HEADER *tmf; FCBlock *fcb; __u32 i; int err; if(smctr_debug > 15) printk(KERN_DEBUG "%s: smctr_send_lobe_media_test\n", dev->name); if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, sizeof(struct trh_hdr) + S_WRAP_DATA + S_WRAP_DATA)) == (FCBlock *)(-1L)) { return (OUT_OF_RESOURCES); } /* Initialize DAT Data Fields. */ tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr; tmf->ac = MSB(AC_FC_LOBE_MEDIA_TEST); tmf->fc = LSB(AC_FC_LOBE_MEDIA_TEST); for(i = 0; i < 6; i++) { tmf->da[i] = 0; tmf->sa[i] = dev->dev_addr[i]; } tmf->vc = LOBE_MEDIA_TEST; tmf->dc_sc = DC_RS | SC_RS; tmf->vl = 4; tsv = (MAC_SUB_VECTOR *)((__u32)tmf + sizeof(MAC_HEADER)); smctr_make_wrap_data(dev, tsv); tmf->vl += tsv->svl; tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl); smctr_make_wrap_data(dev, tsv); tmf->vl += tsv->svl; /* Start Transmit. */ tmf->vl = SWAP_BYTES(tmf->vl); if((err = smctr_trc_send_packet(dev, fcb, MAC_QUEUE))) return (err); /* Wait for Transmit to Complete. (10 ms). */ for(i=0; i < 10000; i++) { if(fcb->frame_status & FCB_COMMAND_DONE) break; mdelay(1); } /* Check if GOOD frame Tx'ed */ if(!(fcb->frame_status & FCB_COMMAND_DONE) || fcb->frame_status & (FCB_TX_STATUS_E | FCB_TX_AC_BITS)) { return (LOBE_MEDIA_TEST_FAILED); } /* De-allocated Tx FCB and Frame Buffer * The FCB must be de-allocated manually if executing with * interrupts disabled, other wise the ISR (LM_Service_Events) * will de-allocate it when the interrupt occurs. */ tp->tx_queue_status[MAC_QUEUE] = NOT_TRANSMITING; smctr_update_tx_chain(dev, fcb, MAC_QUEUE); return (0); } static int smctr_send_rpt_addr(struct net_device *dev, MAC_HEADER *rmf, __u16 correlator) { MAC_HEADER *tmf; MAC_SUB_VECTOR *tsv; FCBlock *fcb; if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, sizeof(MAC_HEADER) + S_CORRELATOR + S_PHYSICAL_DROP + S_UPSTREAM_NEIGHBOR_ADDRESS + S_ADDRESS_MODIFER + S_GROUP_ADDRESS + S_FUNCTIONAL_ADDRESS)) == (FCBlock *)(-1L)) { return (0); } tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr; tmf->vc = RPT_ADDR; tmf->dc_sc = (rmf->dc_sc & SC_MASK) << 4; tmf->vl = 4; smctr_make_8025_hdr(dev, rmf, tmf, AC_FC_RPT_ADDR); tsv = (MAC_SUB_VECTOR *)((__u32)tmf + sizeof(MAC_HEADER)); smctr_make_corr(dev, tsv, correlator); tmf->vl += tsv->svl; tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl); smctr_make_phy_drop_num(dev, tsv); tmf->vl += tsv->svl; tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl); smctr_make_upstream_neighbor_addr(dev, tsv); tmf->vl += tsv->svl; tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl); smctr_make_addr_mod(dev, tsv); tmf->vl += tsv->svl; tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl); smctr_make_group_addr(dev, tsv); tmf->vl += tsv->svl; tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl); smctr_make_funct_addr(dev, tsv); tmf->vl += tsv->svl; /* Subtract out MVID and MVL which is * include in both vl and MAC_HEADER */ /* fcb->frame_length = tmf->vl + sizeof(MAC_HEADER) - 4; fcb->bdb_ptr->buffer_length = tmf->vl + sizeof(MAC_HEADER) - 4; */ tmf->vl = SWAP_BYTES(tmf->vl); return (smctr_trc_send_packet(dev, fcb, MAC_QUEUE)); } static int smctr_send_rpt_attch(struct net_device *dev, MAC_HEADER *rmf, __u16 correlator) { MAC_HEADER *tmf; MAC_SUB_VECTOR *tsv; FCBlock *fcb; if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, sizeof(MAC_HEADER) + S_CORRELATOR + S_PRODUCT_INSTANCE_ID + S_FUNCTIONAL_ADDRESS + S_AUTHORIZED_FUNCTION_CLASS + S_AUTHORIZED_ACCESS_PRIORITY)) == (FCBlock *)(-1L)) { return (0); } tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr; tmf->vc = RPT_ATTCH; tmf->dc_sc = (rmf->dc_sc & SC_MASK) << 4; tmf->vl = 4; smctr_make_8025_hdr(dev, rmf, tmf, AC_FC_RPT_ATTCH); tsv = (MAC_SUB_VECTOR *)((__u32)tmf + sizeof(MAC_HEADER)); smctr_make_corr(dev, tsv, correlator); tmf->vl += tsv->svl; tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl); smctr_make_product_id(dev, tsv); tmf->vl += tsv->svl; tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl); smctr_make_funct_addr(dev, tsv); tmf->vl += tsv->svl; tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl); smctr_make_auth_funct_class(dev, tsv); tmf->vl += tsv->svl; tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl); smctr_make_access_pri(dev, tsv); tmf->vl += tsv->svl; /* Subtract out MVID and MVL which is * include in both vl and MAC_HEADER */ /* fcb->frame_length = tmf->vl + sizeof(MAC_HEADER) - 4; fcb->bdb_ptr->buffer_length = tmf->vl + sizeof(MAC_HEADER) - 4; */ tmf->vl = SWAP_BYTES(tmf->vl); return (smctr_trc_send_packet(dev, fcb, MAC_QUEUE)); } static int smctr_send_rpt_state(struct net_device *dev, MAC_HEADER *rmf, __u16 correlator) { MAC_HEADER *tmf; MAC_SUB_VECTOR *tsv; FCBlock *fcb; if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, sizeof(MAC_HEADER) + S_CORRELATOR + S_RING_STATION_VERSION_NUMBER + S_RING_STATION_STATUS + S_STATION_IDENTIFER)) == (FCBlock *)(-1L)) { return (0); } tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr; tmf->vc = RPT_STATE; tmf->dc_sc = (rmf->dc_sc & SC_MASK) << 4; tmf->vl = 4; smctr_make_8025_hdr(dev, rmf, tmf, AC_FC_RPT_STATE); tsv = (MAC_SUB_VECTOR *)((__u32)tmf + sizeof(MAC_HEADER)); smctr_make_corr(dev, tsv, correlator); tmf->vl += tsv->svl; tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl); smctr_make_ring_station_version(dev, tsv); tmf->vl += tsv->svl; tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl); smctr_make_ring_station_status(dev, tsv); tmf->vl += tsv->svl; tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl); smctr_make_station_id(dev, tsv); tmf->vl += tsv->svl; /* Subtract out MVID and MVL which is * include in both vl and MAC_HEADER */ /* fcb->frame_length = tmf->vl + sizeof(MAC_HEADER) - 4; fcb->bdb_ptr->buffer_length = tmf->vl + sizeof(MAC_HEADER) - 4; */ tmf->vl = SWAP_BYTES(tmf->vl); return (smctr_trc_send_packet(dev, fcb, MAC_QUEUE)); } static int smctr_send_rpt_tx_forward(struct net_device *dev, MAC_HEADER *rmf, __u16 tx_fstatus) { MAC_HEADER *tmf; MAC_SUB_VECTOR *tsv; FCBlock *fcb; if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, sizeof(MAC_HEADER) + S_TRANSMIT_STATUS_CODE)) == (FCBlock *)(-1L)) { return (0); } tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr; tmf->vc = RPT_TX_FORWARD; tmf->dc_sc = (rmf->dc_sc & SC_MASK) << 4; tmf->vl = 4; smctr_make_8025_hdr(dev, rmf, tmf, AC_FC_RPT_TX_FORWARD); tsv = (MAC_SUB_VECTOR *)((__u32)tmf + sizeof(MAC_HEADER)); smctr_make_tx_status_code(dev, tsv, tx_fstatus); tmf->vl += tsv->svl; /* Subtract out MVID and MVL which is * include in both vl and MAC_HEADER */ /* fcb->frame_length = tmf->vl + sizeof(MAC_HEADER) - 4; fcb->bdb_ptr->buffer_length = tmf->vl + sizeof(MAC_HEADER) - 4; */ tmf->vl = SWAP_BYTES(tmf->vl); return(smctr_trc_send_packet(dev, fcb, MAC_QUEUE)); } static int smctr_send_rsp(struct net_device *dev, MAC_HEADER *rmf, __u16 rcode, __u16 correlator) { MAC_HEADER *tmf; MAC_SUB_VECTOR *tsv; FCBlock *fcb; if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, sizeof(MAC_HEADER) + S_CORRELATOR + S_RESPONSE_CODE)) == (FCBlock *)(-1L)) { return (0); } tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr; tmf->vc = RSP; tmf->dc_sc = (rmf->dc_sc & SC_MASK) << 4; tmf->vl = 4; smctr_make_8025_hdr(dev, rmf, tmf, AC_FC_RSP); tsv = (MAC_SUB_VECTOR *)((__u32)tmf + sizeof(MAC_HEADER)); smctr_make_corr(dev, tsv, correlator); return (0); } static int smctr_send_rq_init(struct net_device *dev) { struct net_local *tp = netdev_priv(dev); MAC_HEADER *tmf; MAC_SUB_VECTOR *tsv; FCBlock *fcb; unsigned int i, count = 0; __u16 fstatus; int err; do { if(((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, sizeof(MAC_HEADER) + S_PRODUCT_INSTANCE_ID + S_UPSTREAM_NEIGHBOR_ADDRESS + S_RING_STATION_VERSION_NUMBER + S_ADDRESS_MODIFER)) == (FCBlock *)(-1L))) { return (0); } tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr; tmf->vc = RQ_INIT; tmf->dc_sc = DC_RPS | SC_RS; tmf->vl = 4; smctr_make_8025_hdr(dev, NULL, tmf, AC_FC_RQ_INIT); tsv = (MAC_SUB_VECTOR *)((__u32)tmf + sizeof(MAC_HEADER)); smctr_make_product_id(dev, tsv); tmf->vl += tsv->svl; tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl); smctr_make_upstream_neighbor_addr(dev, tsv); tmf->vl += tsv->svl; tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl); smctr_make_ring_station_version(dev, tsv); tmf->vl += tsv->svl; tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl); smctr_make_addr_mod(dev, tsv); tmf->vl += tsv->svl; /* Subtract out MVID and MVL which is * include in both vl and MAC_HEADER */ /* fcb->frame_length = tmf->vl + sizeof(MAC_HEADER) - 4; fcb->bdb_ptr->buffer_length = tmf->vl + sizeof(MAC_HEADER) - 4; */ tmf->vl = SWAP_BYTES(tmf->vl); if((err = smctr_trc_send_packet(dev, fcb, MAC_QUEUE))) return (err); /* Wait for Transmit to Complete */ for(i = 0; i < 10000; i++) { if(fcb->frame_status & FCB_COMMAND_DONE) break; mdelay(1); } /* Check if GOOD frame Tx'ed */ fstatus = fcb->frame_status; if(!(fstatus & FCB_COMMAND_DONE)) return (HARDWARE_FAILED); if(!(fstatus & FCB_TX_STATUS_E)) count++; /* De-allocated Tx FCB and Frame Buffer * The FCB must be de-allocated manually if executing with * interrupts disabled, other wise the ISR (LM_Service_Events) * will de-allocate it when the interrupt occurs. */ tp->tx_queue_status[MAC_QUEUE] = NOT_TRANSMITING; smctr_update_tx_chain(dev, fcb, MAC_QUEUE); } while(count < 4 && ((fstatus & FCB_TX_AC_BITS) ^ FCB_TX_AC_BITS)); return (smctr_join_complete_state(dev)); } static int smctr_send_tx_forward(struct net_device *dev, MAC_HEADER *rmf, __u16 *tx_fstatus) { struct net_local *tp = netdev_priv(dev); FCBlock *fcb; unsigned int i; int err; /* Check if this is the END POINT of the Transmit Forward Chain. */ if(rmf->vl <= 18) return (0); /* Allocate Transmit FCB only by requesting 0 bytes * of data buffer. */ if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, 0)) == (FCBlock *)(-1L)) return (0); /* Set pointer to Transmit Frame Buffer to the data * portion of the received TX Forward frame, making * sure to skip over the Vector Code (vc) and Vector * length (vl). */ fcb->bdb_ptr->trc_data_block_ptr = TRC_POINTER((__u32)rmf + sizeof(MAC_HEADER) + 2); fcb->bdb_ptr->data_block_ptr = (__u16 *)((__u32)rmf + sizeof(MAC_HEADER) + 2); fcb->frame_length = rmf->vl - 4 - 2; fcb->bdb_ptr->buffer_length = rmf->vl - 4 - 2; if((err = smctr_trc_send_packet(dev, fcb, MAC_QUEUE))) return (err); /* Wait for Transmit to Complete */ for(i = 0; i < 10000; i++) { if(fcb->frame_status & FCB_COMMAND_DONE) break; mdelay(1); } /* Check if GOOD frame Tx'ed */ if(!(fcb->frame_status & FCB_COMMAND_DONE)) { if((err = smctr_issue_resume_tx_fcb_cmd(dev, MAC_QUEUE))) return (err); for(i = 0; i < 10000; i++) { if(fcb->frame_status & FCB_COMMAND_DONE) break; mdelay(1); } if(!(fcb->frame_status & FCB_COMMAND_DONE)) return (HARDWARE_FAILED); } *tx_fstatus = fcb->frame_status; return (A_FRAME_WAS_FORWARDED); } static int smctr_set_auth_access_pri(struct net_device *dev, MAC_SUB_VECTOR *rsv) { struct net_local *tp = netdev_priv(dev); if(rsv->svl != S_AUTHORIZED_ACCESS_PRIORITY) return (E_SUB_VECTOR_LENGTH_ERROR); tp->authorized_access_priority = (rsv->svv[0] << 8 | rsv->svv[1]); return (POSITIVE_ACK); } static int smctr_set_auth_funct_class(struct net_device *dev, MAC_SUB_VECTOR *rsv) { struct net_local *tp = netdev_priv(dev); if(rsv->svl != S_AUTHORIZED_FUNCTION_CLASS) return (E_SUB_VECTOR_LENGTH_ERROR); tp->authorized_function_classes = (rsv->svv[0] << 8 | rsv->svv[1]); return (POSITIVE_ACK); } static int smctr_set_corr(struct net_device *dev, MAC_SUB_VECTOR *rsv, __u16 *correlator) { if(rsv->svl != S_CORRELATOR) return (E_SUB_VECTOR_LENGTH_ERROR); *correlator = (rsv->svv[0] << 8 | rsv->svv[1]); return (POSITIVE_ACK); } static int smctr_set_error_timer_value(struct net_device *dev, MAC_SUB_VECTOR *rsv) { __u16 err_tval; int err; if(rsv->svl != S_ERROR_TIMER_VALUE) return (E_SUB_VECTOR_LENGTH_ERROR); err_tval = (rsv->svv[0] << 8 | rsv->svv[1])*10; smctr_issue_write_word_cmd(dev, RW_TER_THRESHOLD, &err_tval); if((err = smctr_wait_cmd(dev))) return (err); return (POSITIVE_ACK); } static int smctr_set_frame_forward(struct net_device *dev, MAC_SUB_VECTOR *rsv, __u8 dc_sc) { if((rsv->svl < 2) || (rsv->svl > S_FRAME_FORWARD)) return (E_SUB_VECTOR_LENGTH_ERROR); if((dc_sc & DC_MASK) != DC_CRS) { if(rsv->svl >= 2 && rsv->svl < 20) return (E_TRANSMIT_FORWARD_INVALID); if((rsv->svv[0] != 0) || (rsv->svv[1] != 0)) return (E_TRANSMIT_FORWARD_INVALID); } return (POSITIVE_ACK); } static int smctr_set_local_ring_num(struct net_device *dev, MAC_SUB_VECTOR *rsv) { struct net_local *tp = netdev_priv(dev); if(rsv->svl != S_LOCAL_RING_NUMBER) return (E_SUB_VECTOR_LENGTH_ERROR); if(tp->ptr_local_ring_num) *(__u16 *)(tp->ptr_local_ring_num) = (rsv->svv[0] << 8 | rsv->svv[1]); return (POSITIVE_ACK); } static unsigned short smctr_set_ctrl_attention(struct net_device *dev) { struct net_local *tp = netdev_priv(dev); int ioaddr = dev->base_addr; if(tp->bic_type == BIC_585_CHIP) outb((tp->trc_mask | HWR_CA), ioaddr + HWR); else { outb((tp->trc_mask | CSR_CA), ioaddr + CSR); outb(tp->trc_mask, ioaddr + CSR); } return (0); } static void smctr_set_multicast_list(struct net_device *dev) { if(smctr_debug > 10) printk(KERN_DEBUG "%s: smctr_set_multicast_list\n", dev->name); } static int smctr_set_page(struct net_device *dev, __u8 *buf) { struct net_local *tp = netdev_priv(dev); __u8 amask; __u32 tptr; tptr = (__u32)buf - (__u32)tp->ram_access; amask = (__u8)((tptr & PR_PAGE_MASK) >> 8); outb(amask, dev->base_addr + PR); return (0); } static int smctr_set_phy_drop(struct net_device *dev, MAC_SUB_VECTOR *rsv) { int err; if(rsv->svl != S_PHYSICAL_DROP) return (E_SUB_VECTOR_LENGTH_ERROR); smctr_issue_write_byte_cmd(dev, RW_PHYSICAL_DROP_NUMBER, &rsv->svv[0]); if((err = smctr_wait_cmd(dev))) return (err); return (POSITIVE_ACK); } /* Reset the ring speed to the opposite of what it was. This auto-pilot * mode requires a complete reset and re-init of the adapter. */ static int smctr_set_ring_speed(struct net_device *dev) { struct net_local *tp = netdev_priv(dev); int err; if(tp->media_type == MEDIA_UTP_16) tp->media_type = MEDIA_UTP_4; else tp->media_type = MEDIA_UTP_16; smctr_enable_16bit(dev); /* Re-Initialize adapter's internal registers */ smctr_reset_adapter(dev); if((err = smctr_init_card_real(dev))) return (err); smctr_enable_bic_int(dev); if((err = smctr_issue_enable_int_cmd(dev, TRC_INTERRUPT_ENABLE_MASK))) return (err); smctr_disable_16bit(dev); return (0); } static int smctr_set_rx_look_ahead(struct net_device *dev) { struct net_local *tp = netdev_priv(dev); __u16 sword, rword; if(smctr_debug > 10) printk(KERN_DEBUG "%s: smctr_set_rx_look_ahead_flag\n", dev->name); tp->adapter_flags &= ~(FORCED_16BIT_MODE); tp->adapter_flags |= RX_VALID_LOOKAHEAD; if(tp->adapter_bus == BUS_ISA16_TYPE) { sword = *((__u16 *)(tp->ram_access)); *((__u16 *)(tp->ram_access)) = 0x1234; smctr_disable_16bit(dev); rword = *((__u16 *)(tp->ram_access)); smctr_enable_16bit(dev); if(rword != 0x1234) tp->adapter_flags |= FORCED_16BIT_MODE; *((__u16 *)(tp->ram_access)) = sword; } return (0); } static int smctr_set_trc_reset(int ioaddr) { __u8 r; r = inb(ioaddr + MSR); outb(MSR_RST | r, ioaddr + MSR); return (0); } /* * This function can be called if the adapter is busy or not. */ static int smctr_setup_single_cmd(struct net_device *dev, __u16 command, __u16 subcommand) { struct net_local *tp = netdev_priv(dev); unsigned int err; if(smctr_debug > 10) printk(KERN_DEBUG "%s: smctr_setup_single_cmd\n", dev->name); if((err = smctr_wait_while_cbusy(dev))) return (err); if((err = (unsigned int)smctr_wait_cmd(dev))) return (err); tp->acb_head->cmd_done_status = 0; tp->acb_head->cmd = command; tp->acb_head->subcmd = subcommand; err = smctr_issue_resume_acb_cmd(dev); return (err); } /* * This function can not be called with the adapter busy. */ static int smctr_setup_single_cmd_w_data(struct net_device *dev, __u16 command, __u16 subcommand) { struct net_local *tp = netdev_priv(dev); tp->acb_head->cmd_done_status = ACB_COMMAND_NOT_DONE; tp->acb_head->cmd = command; tp->acb_head->subcmd = subcommand; tp->acb_head->data_offset_lo = (__u16)TRC_POINTER(tp->misc_command_data); return(smctr_issue_resume_acb_cmd(dev)); } static char *smctr_malloc(struct net_device *dev, __u16 size) { struct net_local *tp = netdev_priv(dev); char *m; m = (char *)(tp->ram_access + tp->sh_mem_used); tp->sh_mem_used += (__u32)size; return (m); } static int smctr_status_chg(struct net_device *dev) { struct net_local *tp = netdev_priv(dev); if(smctr_debug > 10) printk(KERN_DEBUG "%s: smctr_status_chg\n", dev->name); switch(tp->status) { case OPEN: break; case CLOSED: break; /* Interrupt driven open() completion. XXX */ case INITIALIZED: tp->group_address_0 = 0; tp->group_address[0] = 0; tp->group_address[1] = 0; tp->functional_address_0 = 0; tp->functional_address[0] = 0; tp->functional_address[1] = 0; smctr_open_tr(dev); break; default: printk(KERN_INFO "%s: status change unknown %x\n", dev->name, tp->status); break; } return (0); } static int smctr_trc_send_packet(struct net_device *dev, FCBlock *fcb, __u16 queue) { struct net_local *tp = netdev_priv(dev); int err = 0; if(smctr_debug > 10) printk(KERN_DEBUG "%s: smctr_trc_send_packet\n", dev->name); fcb->info = FCB_CHAIN_END | FCB_ENABLE_TFS; if(tp->num_tx_fcbs[queue] != 1) fcb->back_ptr->info = FCB_INTERRUPT_ENABLE | FCB_ENABLE_TFS; if(tp->tx_queue_status[queue] == NOT_TRANSMITING) { tp->tx_queue_status[queue] = TRANSMITING; err = smctr_issue_resume_tx_fcb_cmd(dev, queue); } return (err); } static __u16 smctr_tx_complete(struct net_device *dev, __u16 queue) { struct net_local *tp = netdev_priv(dev); __u16 status, err = 0; int cstatus; if(smctr_debug > 10) printk(KERN_DEBUG "%s: smctr_tx_complete\n", dev->name); while((status = tp->tx_fcb_end[queue]->frame_status) != SUCCESS) { if(status & 0x7e00 ) { err = HARDWARE_FAILED; break; } if((err = smctr_update_tx_chain(dev, tp->tx_fcb_end[queue], queue)) != SUCCESS) break; smctr_disable_16bit(dev); if(tp->mode_bits & UMAC) { if(!(status & (FCB_TX_STATUS_AR1 | FCB_TX_STATUS_AR2))) cstatus = NO_SUCH_DESTINATION; else { if(!(status & (FCB_TX_STATUS_CR1 | FCB_TX_STATUS_CR2))) cstatus = DEST_OUT_OF_RESOURCES; else { if(status & FCB_TX_STATUS_E) cstatus = MAX_COLLISIONS; else cstatus = SUCCESS; } } } else cstatus = SUCCESS; if(queue == BUG_QUEUE) err = SUCCESS; smctr_enable_16bit(dev); if(err != SUCCESS) break; } return (err); } static unsigned short smctr_tx_move_frame(struct net_device *dev, struct sk_buff *skb, __u8 *pbuff, unsigned int bytes) { struct net_local *tp = netdev_priv(dev); unsigned int ram_usable; __u32 flen, len, offset = 0; __u8 *frag, *page; if(smctr_debug > 10) printk(KERN_DEBUG "%s: smctr_tx_move_frame\n", dev->name); ram_usable = ((unsigned int)tp->ram_usable) << 10; frag = skb->data; flen = skb->len; while(flen > 0 && bytes > 0) { smctr_set_page(dev, pbuff); offset = SMC_PAGE_OFFSET(pbuff); if(offset + flen > ram_usable) len = ram_usable - offset; else len = flen; if(len > bytes) len = bytes; page = (char *) (offset + tp->ram_access); memcpy(page, frag, len); flen -=len; bytes -= len; frag += len; pbuff += len; } return (0); } /* Update the error statistic counters for this adapter. */ static int smctr_update_err_stats(struct net_device *dev) { struct net_local *tp = netdev_priv(dev); struct tr_statistics *tstat = &tp->MacStat; if(tstat->internal_errors) tstat->internal_errors += *(tp->misc_command_data + 0) & 0x00ff; if(tstat->line_errors) tstat->line_errors += *(tp->misc_command_data + 0) >> 8; if(tstat->A_C_errors) tstat->A_C_errors += *(tp->misc_command_data + 1) & 0x00ff; if(tstat->burst_errors) tstat->burst_errors += *(tp->misc_command_data + 1) >> 8; if(tstat->abort_delimiters) tstat->abort_delimiters += *(tp->misc_command_data + 2) >> 8; if(tstat->recv_congest_count) tstat->recv_congest_count += *(tp->misc_command_data + 3) & 0x00ff; if(tstat->lost_frames) tstat->lost_frames += *(tp->misc_command_data + 3) >> 8; if(tstat->frequency_errors) tstat->frequency_errors += *(tp->misc_command_data + 4) & 0x00ff; if(tstat->frame_copied_errors) tstat->frame_copied_errors += *(tp->misc_command_data + 4) >> 8; if(tstat->token_errors) tstat->token_errors += *(tp->misc_command_data + 5) >> 8; return (0); } static int smctr_update_rx_chain(struct net_device *dev, __u16 queue) { struct net_local *tp = netdev_priv(dev); FCBlock *fcb; BDBlock *bdb; __u16 size, len; fcb = tp->rx_fcb_curr[queue]; len = fcb->frame_length; fcb->frame_status = 0; fcb->info = FCB_CHAIN_END; fcb->back_ptr->info = FCB_WARNING; tp->rx_fcb_curr[queue] = tp->rx_fcb_curr[queue]->next_ptr; /* update RX BDBs */ size = (len >> RX_BDB_SIZE_SHIFT); if(len & RX_DATA_BUFFER_SIZE_MASK) size += sizeof(BDBlock); size &= (~RX_BDB_SIZE_MASK); /* check if wrap around */ bdb = (BDBlock *)((__u32)(tp->rx_bdb_curr[queue]) + (__u32)(size)); if((__u32)bdb >= (__u32)tp->rx_bdb_end[queue]) { bdb = (BDBlock *)((__u32)(tp->rx_bdb_head[queue]) + (__u32)(bdb) - (__u32)(tp->rx_bdb_end[queue])); } bdb->back_ptr->info = BDB_CHAIN_END; tp->rx_bdb_curr[queue]->back_ptr->info = BDB_NOT_CHAIN_END; tp->rx_bdb_curr[queue] = bdb; return (0); } static int smctr_update_tx_chain(struct net_device *dev, FCBlock *fcb, __u16 queue) { struct net_local *tp = netdev_priv(dev); if(smctr_debug > 20) printk(KERN_DEBUG "smctr_update_tx_chain\n"); if(tp->num_tx_fcbs_used[queue] <= 0) return (HARDWARE_FAILED); else { if(tp->tx_buff_used[queue] < fcb->memory_alloc) { tp->tx_buff_used[queue] = 0; return (HARDWARE_FAILED); } tp->tx_buff_used[queue] -= fcb->memory_alloc; /* if all transmit buffer are cleared * need to set the tx_buff_curr[] to tx_buff_head[] * otherwise, tx buffer will be segregate and cannot * accommodate and buffer greater than (curr - head) and * (end - curr) since we do not allow wrap around allocation. */ if(tp->tx_buff_used[queue] == 0) tp->tx_buff_curr[queue] = tp->tx_buff_head[queue]; tp->num_tx_fcbs_used[queue]--; fcb->frame_status = 0; tp->tx_fcb_end[queue] = fcb->next_ptr; netif_wake_queue(dev); return (0); } } static int smctr_wait_cmd(struct net_device *dev) { struct net_local *tp = netdev_priv(dev); unsigned int loop_count = 0x20000; if(smctr_debug > 10) printk(KERN_DEBUG "%s: smctr_wait_cmd\n", dev->name); while(loop_count) { if(tp->acb_head->cmd_done_status & ACB_COMMAND_DONE) break; udelay(1); loop_count--; } if(loop_count == 0) return(HARDWARE_FAILED); if(tp->acb_head->cmd_done_status & 0xff) return(HARDWARE_FAILED); return (0); } static int smctr_wait_while_cbusy(struct net_device *dev) { struct net_local *tp = netdev_priv(dev); unsigned int timeout = 0x20000; int ioaddr = dev->base_addr; __u8 r; if(tp->bic_type == BIC_585_CHIP) { while(timeout) { r = inb(ioaddr + HWR); if((r & HWR_CBUSY) == 0) break; timeout--; } } else { while(timeout) { r = inb(ioaddr + CSR); if((r & CSR_CBUSY) == 0) break; timeout--; } } if(timeout) return (0); else return (HARDWARE_FAILED); } #ifdef MODULE static struct net_device* dev_smctr[SMCTR_MAX_ADAPTERS]; static int io[SMCTR_MAX_ADAPTERS]; static int irq[SMCTR_MAX_ADAPTERS]; MODULE_LICENSE("GPL"); MODULE_FIRMWARE("tr_smctr.bin"); module_param_array(io, int, NULL, 0); module_param_array(irq, int, NULL, 0); module_param(ringspeed, int, 0); static struct net_device * __init setup_card(int n) { struct net_device *dev = alloc_trdev(sizeof(struct net_local)); int err; if (!dev) return ERR_PTR(-ENOMEM); dev->irq = irq[n]; err = smctr_probe1(dev, io[n]); if (err) goto out; err = register_netdev(dev); if (err) goto out1; return dev; out1: #ifdef CONFIG_MCA_LEGACY { struct net_local *tp = netdev_priv(dev); if (tp->slot_num) mca_mark_as_unused(tp->slot_num); } #endif release_region(dev->base_addr, SMCTR_IO_EXTENT); free_irq(dev->irq, dev); out: free_netdev(dev); return ERR_PTR(err); } int __init init_module(void) { int i, found = 0; struct net_device *dev; for(i = 0; i < SMCTR_MAX_ADAPTERS; i++) { dev = io[0]? setup_card(i) : smctr_probe(-1); if (!IS_ERR(dev)) { ++found; dev_smctr[i] = dev; } } return found ? 0 : -ENODEV; } void __exit cleanup_module(void) { int i; for(i = 0; i < SMCTR_MAX_ADAPTERS; i++) { struct net_device *dev = dev_smctr[i]; if (dev) { unregister_netdev(dev); #ifdef CONFIG_MCA_LEGACY { struct net_local *tp = netdev_priv(dev); if (tp->slot_num) mca_mark_as_unused(tp->slot_num); } #endif release_region(dev->base_addr, SMCTR_IO_EXTENT); if (dev->irq) free_irq(dev->irq, dev); free_netdev(dev); } } } #endif /* MODULE */
gpl-2.0
y10g/lge-kernel-startablet-l06c
drivers/net/tokenring/tms380tr.c
847
63934
/* * tms380tr.c: A network driver library for Texas Instruments TMS380-based * Token Ring Adapters. * * Originally sktr.c: Written 1997 by Christoph Goos * * A fine result of the Linux Systems Network Architecture Project. * http://www.linux-sna.org * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * * The following modules are currently available for card support: * - tmspci (Generic PCI card support) * - abyss (Madge PCI support) * - tmsisa (SysKonnect TR4/16 ISA) * * Sources: * - The hardware related parts of this driver are take from * the SysKonnect Token Ring driver for Windows NT. * - I used the IBM Token Ring driver 'ibmtr.c' as a base for this * driver, as well as the 'skeleton.c' driver by Donald Becker. * - Also various other drivers in the linux source tree were taken * as samples for some tasks. * - TI TMS380 Second-Generation Token Ring User's Guide * - TI datasheets for respective chips * - David Hein at Texas Instruments * - Various Madge employees * * Maintainer(s): * JS Jay Schulist jschlst@samba.org * CG Christoph Goos cgoos@syskonnect.de * AF Adam Fritzler * MLP Mike Phillips phillim@amtrak.com * JF Jochen Friedrich jochen@scram.de * * Modification History: * 29-Aug-97 CG Created * 04-Apr-98 CG Fixed problems caused by tok_timer_check * 10-Apr-98 CG Fixed lockups at cable disconnection * 27-May-98 JS Formated to Linux Kernel Format * 31-May-98 JS Hacked in PCI support * 16-Jun-98 JS Modulized for multiple cards with one driver * Sep-99 AF Renamed to tms380tr (supports more than SK's) * 23-Sep-99 AF Added Compaq and Thomas-Conrad PCI support * Fixed a bug causing double copies on PCI * Fixed for new multicast stuff (2.2/2.3) * 25-Sep-99 AF Uped TPL_NUM from 3 to 9 * Removed extraneous 'No free TPL' * 22-Dec-99 AF Added Madge PCI Mk2 support and generalized * parts of the initilization procedure. * 30-Dec-99 AF Turned tms380tr into a library ala 8390. * Madge support is provided in the abyss module * Generic PCI support is in the tmspci module. * 30-Nov-00 JF Updated PCI code to support IO MMU via * pci_map_static(). Alpha uses this MMU for ISA * as well. * 14-Jan-01 JF Fix DMA on ifdown/ifup sequences. Some * cleanup. * 13-Jan-02 JF Add spinlock to fix race condition. * 09-Nov-02 JF Fixed printks to not SPAM the console during * normal operation. * 30-Dec-02 JF Removed incorrect __init from * tms380tr_init_card. * 22-Jul-05 JF Converted to dma-mapping. * * To do: * 1. Multi/Broadcast packet handling (this may have fixed itself) * 2. Write a sktrisa module that includes the old ISA support (done) * 3. Allow modules to load their own microcode * 4. Speed up the BUD process -- freezing the kernel for 3+sec is * quite unacceptable. * 5. Still a few remaining stalls when the cable is unplugged. */ #ifdef MODULE static const char version[] = "tms380tr.c: v1.10 30/12/2002 by Christoph Goos, Adam Fritzler\n"; #endif #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/interrupt.h> #include <linux/ptrace.h> #include <linux/ioport.h> #include <linux/in.h> #include <linux/string.h> #include <linux/time.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/dma-mapping.h> #include <linux/delay.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/trdevice.h> #include <linux/firmware.h> #include <linux/bitops.h> #include <asm/system.h> #include <asm/io.h> #include <asm/dma.h> #include <asm/irq.h> #include <asm/uaccess.h> #include "tms380tr.h" /* Our Stuff */ /* Use 0 for production, 1 for verification, 2 for debug, and * 3 for very verbose debug. */ #ifndef TMS380TR_DEBUG #define TMS380TR_DEBUG 0 #endif static unsigned int tms380tr_debug = TMS380TR_DEBUG; /* Index to functions, as function prototypes. * Alphabetical by function name. */ /* "A" */ /* "B" */ static int tms380tr_bringup_diags(struct net_device *dev); /* "C" */ static void tms380tr_cancel_tx_queue(struct net_local* tp); static int tms380tr_chipset_init(struct net_device *dev); static void tms380tr_chk_irq(struct net_device *dev); static void tms380tr_chk_outstanding_cmds(struct net_device *dev); static void tms380tr_chk_src_addr(unsigned char *frame, unsigned char *hw_addr); static unsigned char tms380tr_chk_ssb(struct net_local *tp, unsigned short IrqType); int tms380tr_close(struct net_device *dev); static void tms380tr_cmd_status_irq(struct net_device *dev); /* "D" */ static void tms380tr_disable_interrupts(struct net_device *dev); #if TMS380TR_DEBUG > 0 static void tms380tr_dump(unsigned char *Data, int length); #endif /* "E" */ static void tms380tr_enable_interrupts(struct net_device *dev); static void tms380tr_exec_cmd(struct net_device *dev, unsigned short Command); static void tms380tr_exec_sifcmd(struct net_device *dev, unsigned int WriteValue); /* "F" */ /* "G" */ static struct net_device_stats *tms380tr_get_stats(struct net_device *dev); /* "H" */ static netdev_tx_t tms380tr_hardware_send_packet(struct sk_buff *skb, struct net_device *dev); /* "I" */ static int tms380tr_init_adapter(struct net_device *dev); static void tms380tr_init_ipb(struct net_local *tp); static void tms380tr_init_net_local(struct net_device *dev); static void tms380tr_init_opb(struct net_device *dev); /* "M" */ /* "O" */ int tms380tr_open(struct net_device *dev); static void tms380tr_open_adapter(struct net_device *dev); /* "P" */ /* "R" */ static void tms380tr_rcv_status_irq(struct net_device *dev); static int tms380tr_read_ptr(struct net_device *dev); static void tms380tr_read_ram(struct net_device *dev, unsigned char *Data, unsigned short Address, int Length); static int tms380tr_reset_adapter(struct net_device *dev); static void tms380tr_reset_interrupt(struct net_device *dev); static void tms380tr_ring_status_irq(struct net_device *dev); /* "S" */ static netdev_tx_t tms380tr_send_packet(struct sk_buff *skb, struct net_device *dev); static void tms380tr_set_multicast_list(struct net_device *dev); static int tms380tr_set_mac_address(struct net_device *dev, void *addr); /* "T" */ static void tms380tr_timer_chk(unsigned long data); static void tms380tr_timer_end_wait(unsigned long data); static void tms380tr_tx_status_irq(struct net_device *dev); /* "U" */ static void tms380tr_update_rcv_stats(struct net_local *tp, unsigned char DataPtr[], unsigned int Length); /* "W" */ void tms380tr_wait(unsigned long time); static void tms380tr_write_rpl_status(RPL *rpl, unsigned int Status); static void tms380tr_write_tpl_status(TPL *tpl, unsigned int Status); #define SIFREADB(reg) \ (((struct net_local *)netdev_priv(dev))->sifreadb(dev, reg)) #define SIFWRITEB(val, reg) \ (((struct net_local *)netdev_priv(dev))->sifwriteb(dev, val, reg)) #define SIFREADW(reg) \ (((struct net_local *)netdev_priv(dev))->sifreadw(dev, reg)) #define SIFWRITEW(val, reg) \ (((struct net_local *)netdev_priv(dev))->sifwritew(dev, val, reg)) #if 0 /* TMS380TR_DEBUG > 0 */ static int madgemc_sifprobe(struct net_device *dev) { unsigned char old, chk1, chk2; old = SIFREADB(SIFADR); /* Get the old SIFADR value */ chk1 = 0; /* Begin with check value 0 */ do { madgemc_setregpage(dev, 0); /* Write new SIFADR value */ SIFWRITEB(chk1, SIFADR); chk2 = SIFREADB(SIFADR); if (chk2 != chk1) return -1; madgemc_setregpage(dev, 1); /* Read, invert and write */ chk2 = SIFREADB(SIFADD); if (chk2 != chk1) return -1; madgemc_setregpage(dev, 0); chk2 ^= 0x0FE; SIFWRITEB(chk2, SIFADR); /* Read, invert and compare */ madgemc_setregpage(dev, 1); chk2 = SIFREADB(SIFADD); madgemc_setregpage(dev, 0); chk2 ^= 0x0FE; if(chk1 != chk2) return (-1); /* No adapter */ chk1 -= 2; } while(chk1 != 0); /* Repeat 128 times (all byte values) */ madgemc_setregpage(dev, 0); /* sanity */ /* Restore the SIFADR value */ SIFWRITEB(old, SIFADR); return (0); } #endif /* * Open/initialize the board. This is called sometime after * booting when the 'ifconfig' program is run. * * This routine should set everything up anew at each open, even * registers that "should" only need to be set once at boot, so that * there is non-reboot way to recover if something goes wrong. */ int tms380tr_open(struct net_device *dev) { struct net_local *tp = netdev_priv(dev); int err; /* init the spinlock */ spin_lock_init(&tp->lock); init_timer(&tp->timer); /* Reset the hardware here. Don't forget to set the station address. */ #ifdef CONFIG_ISA if(dev->dma > 0) { unsigned long flags=claim_dma_lock(); disable_dma(dev->dma); set_dma_mode(dev->dma, DMA_MODE_CASCADE); enable_dma(dev->dma); release_dma_lock(flags); } #endif err = tms380tr_chipset_init(dev); if(err) { printk(KERN_INFO "%s: Chipset initialization error\n", dev->name); return (-1); } tp->timer.expires = jiffies + 30*HZ; tp->timer.function = tms380tr_timer_end_wait; tp->timer.data = (unsigned long)dev; add_timer(&tp->timer); printk(KERN_DEBUG "%s: Adapter RAM size: %dK\n", dev->name, tms380tr_read_ptr(dev)); tms380tr_enable_interrupts(dev); tms380tr_open_adapter(dev); netif_start_queue(dev); /* Wait for interrupt from hardware. If interrupt does not come, * there will be a timeout from the timer. */ tp->Sleeping = 1; interruptible_sleep_on(&tp->wait_for_tok_int); del_timer(&tp->timer); /* If AdapterVirtOpenFlag is 1, the adapter is now open for use */ if(tp->AdapterVirtOpenFlag == 0) { tms380tr_disable_interrupts(dev); return (-1); } tp->StartTime = jiffies; /* Start function control timer */ tp->timer.expires = jiffies + 2*HZ; tp->timer.function = tms380tr_timer_chk; tp->timer.data = (unsigned long)dev; add_timer(&tp->timer); return (0); } /* * Timeout function while waiting for event */ static void tms380tr_timer_end_wait(unsigned long data) { struct net_device *dev = (struct net_device*)data; struct net_local *tp = netdev_priv(dev); if(tp->Sleeping) { tp->Sleeping = 0; wake_up_interruptible(&tp->wait_for_tok_int); } } /* * Initialize the chipset */ static int tms380tr_chipset_init(struct net_device *dev) { struct net_local *tp = netdev_priv(dev); int err; tms380tr_init_ipb(tp); tms380tr_init_opb(dev); tms380tr_init_net_local(dev); if(tms380tr_debug > 3) printk(KERN_DEBUG "%s: Resetting adapter...\n", dev->name); err = tms380tr_reset_adapter(dev); if(err < 0) return (-1); if(tms380tr_debug > 3) printk(KERN_DEBUG "%s: Bringup diags...\n", dev->name); err = tms380tr_bringup_diags(dev); if(err < 0) return (-1); if(tms380tr_debug > 3) printk(KERN_DEBUG "%s: Init adapter...\n", dev->name); err = tms380tr_init_adapter(dev); if(err < 0) return (-1); if(tms380tr_debug > 3) printk(KERN_DEBUG "%s: Done!\n", dev->name); return (0); } /* * Initializes the net_local structure. */ static void tms380tr_init_net_local(struct net_device *dev) { struct net_local *tp = netdev_priv(dev); int i; dma_addr_t dmabuf; tp->scb.CMD = 0; tp->scb.Parm[0] = 0; tp->scb.Parm[1] = 0; tp->ssb.STS = 0; tp->ssb.Parm[0] = 0; tp->ssb.Parm[1] = 0; tp->ssb.Parm[2] = 0; tp->CMDqueue = 0; tp->AdapterOpenFlag = 0; tp->AdapterVirtOpenFlag = 0; tp->ScbInUse = 0; tp->OpenCommandIssued = 0; tp->ReOpenInProgress = 0; tp->HaltInProgress = 0; tp->TransmitHaltScheduled = 0; tp->LobeWireFaultLogged = 0; tp->LastOpenStatus = 0; tp->MaxPacketSize = DEFAULT_PACKET_SIZE; /* Create circular chain of transmit lists */ for (i = 0; i < TPL_NUM; i++) { tp->Tpl[i].NextTPLAddr = htonl(((char *)(&tp->Tpl[(i+1) % TPL_NUM]) - (char *)tp) + tp->dmabuffer); /* DMA buffer may be MMU driven */ tp->Tpl[i].Status = 0; tp->Tpl[i].FrameSize = 0; tp->Tpl[i].FragList[0].DataCount = 0; tp->Tpl[i].FragList[0].DataAddr = 0; tp->Tpl[i].NextTPLPtr = &tp->Tpl[(i+1) % TPL_NUM]; tp->Tpl[i].MData = NULL; tp->Tpl[i].TPLIndex = i; tp->Tpl[i].DMABuff = 0; tp->Tpl[i].BusyFlag = 0; } tp->TplFree = tp->TplBusy = &tp->Tpl[0]; /* Create circular chain of receive lists */ for (i = 0; i < RPL_NUM; i++) { tp->Rpl[i].NextRPLAddr = htonl(((char *)(&tp->Rpl[(i+1) % RPL_NUM]) - (char *)tp) + tp->dmabuffer); /* DMA buffer may be MMU driven */ tp->Rpl[i].Status = (RX_VALID | RX_START_FRAME | RX_END_FRAME | RX_FRAME_IRQ); tp->Rpl[i].FrameSize = 0; tp->Rpl[i].FragList[0].DataCount = cpu_to_be16((unsigned short)tp->MaxPacketSize); /* Alloc skb and point adapter to data area */ tp->Rpl[i].Skb = dev_alloc_skb(tp->MaxPacketSize); tp->Rpl[i].DMABuff = 0; /* skb == NULL ? then use local buffer */ if(tp->Rpl[i].Skb == NULL) { tp->Rpl[i].SkbStat = SKB_UNAVAILABLE; tp->Rpl[i].FragList[0].DataAddr = htonl(((char *)tp->LocalRxBuffers[i] - (char *)tp) + tp->dmabuffer); tp->Rpl[i].MData = tp->LocalRxBuffers[i]; } else /* SKB != NULL */ { tp->Rpl[i].Skb->dev = dev; skb_put(tp->Rpl[i].Skb, tp->MaxPacketSize); /* data unreachable for DMA ? then use local buffer */ dmabuf = dma_map_single(tp->pdev, tp->Rpl[i].Skb->data, tp->MaxPacketSize, DMA_FROM_DEVICE); if(tp->dmalimit && (dmabuf + tp->MaxPacketSize > tp->dmalimit)) { tp->Rpl[i].SkbStat = SKB_DATA_COPY; tp->Rpl[i].FragList[0].DataAddr = htonl(((char *)tp->LocalRxBuffers[i] - (char *)tp) + tp->dmabuffer); tp->Rpl[i].MData = tp->LocalRxBuffers[i]; } else /* DMA directly in skb->data */ { tp->Rpl[i].SkbStat = SKB_DMA_DIRECT; tp->Rpl[i].FragList[0].DataAddr = htonl(dmabuf); tp->Rpl[i].MData = tp->Rpl[i].Skb->data; tp->Rpl[i].DMABuff = dmabuf; } } tp->Rpl[i].NextRPLPtr = &tp->Rpl[(i+1) % RPL_NUM]; tp->Rpl[i].RPLIndex = i; } tp->RplHead = &tp->Rpl[0]; tp->RplTail = &tp->Rpl[RPL_NUM-1]; tp->RplTail->Status = (RX_START_FRAME | RX_END_FRAME | RX_FRAME_IRQ); } /* * Initializes the initialisation parameter block. */ static void tms380tr_init_ipb(struct net_local *tp) { tp->ipb.Init_Options = BURST_MODE; tp->ipb.CMD_Status_IV = 0; tp->ipb.TX_IV = 0; tp->ipb.RX_IV = 0; tp->ipb.Ring_Status_IV = 0; tp->ipb.SCB_Clear_IV = 0; tp->ipb.Adapter_CHK_IV = 0; tp->ipb.RX_Burst_Size = BURST_SIZE; tp->ipb.TX_Burst_Size = BURST_SIZE; tp->ipb.DMA_Abort_Thrhld = DMA_RETRIES; tp->ipb.SCB_Addr = 0; tp->ipb.SSB_Addr = 0; } /* * Initializes the open parameter block. */ static void tms380tr_init_opb(struct net_device *dev) { struct net_local *tp; unsigned long Addr; unsigned short RplSize = RPL_SIZE; unsigned short TplSize = TPL_SIZE; unsigned short BufferSize = BUFFER_SIZE; int i; tp = netdev_priv(dev); tp->ocpl.OPENOptions = 0; tp->ocpl.OPENOptions |= ENABLE_FULL_DUPLEX_SELECTION; tp->ocpl.FullDuplex = 0; tp->ocpl.FullDuplex |= OPEN_FULL_DUPLEX_OFF; /* * Set node address * * We go ahead and put it in the OPB even though on * most of the generic adapters this isn't required. * Its simpler this way. -- ASF */ for (i=0;i<6;i++) tp->ocpl.NodeAddr[i] = ((unsigned char *)dev->dev_addr)[i]; tp->ocpl.GroupAddr = 0; tp->ocpl.FunctAddr = 0; tp->ocpl.RxListSize = cpu_to_be16((unsigned short)RplSize); tp->ocpl.TxListSize = cpu_to_be16((unsigned short)TplSize); tp->ocpl.BufSize = cpu_to_be16((unsigned short)BufferSize); tp->ocpl.Reserved = 0; tp->ocpl.TXBufMin = TX_BUF_MIN; tp->ocpl.TXBufMax = TX_BUF_MAX; Addr = htonl(((char *)tp->ProductID - (char *)tp) + tp->dmabuffer); tp->ocpl.ProdIDAddr[0] = LOWORD(Addr); tp->ocpl.ProdIDAddr[1] = HIWORD(Addr); } /* * Send OPEN command to adapter */ static void tms380tr_open_adapter(struct net_device *dev) { struct net_local *tp = netdev_priv(dev); if(tp->OpenCommandIssued) return; tp->OpenCommandIssued = 1; tms380tr_exec_cmd(dev, OC_OPEN); } /* * Clear the adapter's interrupt flag. Clear system interrupt enable * (SINTEN): disable adapter to system interrupts. */ static void tms380tr_disable_interrupts(struct net_device *dev) { SIFWRITEB(0, SIFACL); } /* * Set the adapter's interrupt flag. Set system interrupt enable * (SINTEN): enable adapter to system interrupts. */ static void tms380tr_enable_interrupts(struct net_device *dev) { SIFWRITEB(ACL_SINTEN, SIFACL); } /* * Put command in command queue, try to execute it. */ static void tms380tr_exec_cmd(struct net_device *dev, unsigned short Command) { struct net_local *tp = netdev_priv(dev); tp->CMDqueue |= Command; tms380tr_chk_outstanding_cmds(dev); } static void tms380tr_timeout(struct net_device *dev) { /* * If we get here, some higher level has decided we are broken. * There should really be a "kick me" function call instead. * * Resetting the token ring adapter takes a long time so just * fake transmission time and go on trying. Our own timeout * routine is in tms380tr_timer_chk() */ dev->trans_start = jiffies; /* prevent tx timeout */ netif_wake_queue(dev); } /* * Gets skb from system, queues it and checks if it can be sent */ static netdev_tx_t tms380tr_send_packet(struct sk_buff *skb, struct net_device *dev) { struct net_local *tp = netdev_priv(dev); netdev_tx_t rc; rc = tms380tr_hardware_send_packet(skb, dev); if(tp->TplFree->NextTPLPtr->BusyFlag) netif_stop_queue(dev); return rc; } /* * Move frames into adapter tx queue */ static netdev_tx_t tms380tr_hardware_send_packet(struct sk_buff *skb, struct net_device *dev) { TPL *tpl; short length; unsigned char *buf; unsigned long flags; int i; dma_addr_t dmabuf, newbuf; struct net_local *tp = netdev_priv(dev); /* Try to get a free TPL from the chain. * * NOTE: We *must* always leave one unused TPL in the chain, * because otherwise the adapter might send frames twice. */ spin_lock_irqsave(&tp->lock, flags); if(tp->TplFree->NextTPLPtr->BusyFlag) { /* No free TPL */ if (tms380tr_debug > 0) printk(KERN_DEBUG "%s: No free TPL\n", dev->name); spin_unlock_irqrestore(&tp->lock, flags); return NETDEV_TX_BUSY; } dmabuf = 0; /* Is buffer reachable for Busmaster-DMA? */ length = skb->len; dmabuf = dma_map_single(tp->pdev, skb->data, length, DMA_TO_DEVICE); if(tp->dmalimit && (dmabuf + length > tp->dmalimit)) { /* Copy frame to local buffer */ dma_unmap_single(tp->pdev, dmabuf, length, DMA_TO_DEVICE); dmabuf = 0; i = tp->TplFree->TPLIndex; buf = tp->LocalTxBuffers[i]; skb_copy_from_linear_data(skb, buf, length); newbuf = ((char *)buf - (char *)tp) + tp->dmabuffer; } else { /* Send direct from skb->data */ newbuf = dmabuf; buf = skb->data; } /* Source address in packet? */ tms380tr_chk_src_addr(buf, dev->dev_addr); tp->LastSendTime = jiffies; tpl = tp->TplFree; /* Get the "free" TPL */ tpl->BusyFlag = 1; /* Mark TPL as busy */ tp->TplFree = tpl->NextTPLPtr; /* Save the skb for delayed return of skb to system */ tpl->Skb = skb; tpl->DMABuff = dmabuf; tpl->FragList[0].DataCount = cpu_to_be16((unsigned short)length); tpl->FragList[0].DataAddr = htonl(newbuf); /* Write the data length in the transmit list. */ tpl->FrameSize = cpu_to_be16((unsigned short)length); tpl->MData = buf; /* Transmit the frame and set the status values. */ tms380tr_write_tpl_status(tpl, TX_VALID | TX_START_FRAME | TX_END_FRAME | TX_PASS_SRC_ADDR | TX_FRAME_IRQ); /* Let adapter send the frame. */ tms380tr_exec_sifcmd(dev, CMD_TX_VALID); spin_unlock_irqrestore(&tp->lock, flags); return NETDEV_TX_OK; } /* * Write the given value to the 'Status' field of the specified TPL. * NOTE: This function should be used whenever the status of any TPL must be * modified by the driver, because the compiler may otherwise change the * order of instructions such that writing the TPL status may be executed at * an undesirable time. When this function is used, the status is always * written when the function is called. */ static void tms380tr_write_tpl_status(TPL *tpl, unsigned int Status) { tpl->Status = Status; } static void tms380tr_chk_src_addr(unsigned char *frame, unsigned char *hw_addr) { unsigned char SRBit; if((((unsigned long)frame[8]) & ~0x80) != 0) /* Compare 4 bytes */ return; if((unsigned short)frame[12] != 0) /* Compare 2 bytes */ return; SRBit = frame[8] & 0x80; memcpy(&frame[8], hw_addr, 6); frame[8] |= SRBit; } /* * The timer routine: Check if adapter still open and working, reopen if not. */ static void tms380tr_timer_chk(unsigned long data) { struct net_device *dev = (struct net_device*)data; struct net_local *tp = netdev_priv(dev); if(tp->HaltInProgress) return; tms380tr_chk_outstanding_cmds(dev); if(time_before(tp->LastSendTime + SEND_TIMEOUT, jiffies) && (tp->TplFree != tp->TplBusy)) { /* Anything to send, but stalled too long */ tp->LastSendTime = jiffies; tms380tr_exec_cmd(dev, OC_CLOSE); /* Does reopen automatically */ } tp->timer.expires = jiffies + 2*HZ; add_timer(&tp->timer); if(tp->AdapterOpenFlag || tp->ReOpenInProgress) return; tp->ReOpenInProgress = 1; tms380tr_open_adapter(dev); } /* * The typical workload of the driver: Handle the network interface interrupts. */ irqreturn_t tms380tr_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; struct net_local *tp; unsigned short irq_type; int handled = 0; tp = netdev_priv(dev); irq_type = SIFREADW(SIFSTS); while(irq_type & STS_SYSTEM_IRQ) { handled = 1; irq_type &= STS_IRQ_MASK; if(!tms380tr_chk_ssb(tp, irq_type)) { printk(KERN_DEBUG "%s: DATA LATE occurred\n", dev->name); break; } switch(irq_type) { case STS_IRQ_RECEIVE_STATUS: tms380tr_reset_interrupt(dev); tms380tr_rcv_status_irq(dev); break; case STS_IRQ_TRANSMIT_STATUS: /* Check if TRANSMIT.HALT command is complete */ if(tp->ssb.Parm[0] & COMMAND_COMPLETE) { tp->TransmitCommandActive = 0; tp->TransmitHaltScheduled = 0; /* Issue a new transmit command. */ tms380tr_exec_cmd(dev, OC_TRANSMIT); } tms380tr_reset_interrupt(dev); tms380tr_tx_status_irq(dev); break; case STS_IRQ_COMMAND_STATUS: /* The SSB contains status of last command * other than receive/transmit. */ tms380tr_cmd_status_irq(dev); break; case STS_IRQ_SCB_CLEAR: /* The SCB is free for another command. */ tp->ScbInUse = 0; tms380tr_chk_outstanding_cmds(dev); break; case STS_IRQ_RING_STATUS: tms380tr_ring_status_irq(dev); break; case STS_IRQ_ADAPTER_CHECK: tms380tr_chk_irq(dev); break; case STS_IRQ_LLC_STATUS: printk(KERN_DEBUG "tms380tr: unexpected LLC status IRQ\n"); break; case STS_IRQ_TIMER: printk(KERN_DEBUG "tms380tr: unexpected Timer IRQ\n"); break; case STS_IRQ_RECEIVE_PENDING: printk(KERN_DEBUG "tms380tr: unexpected Receive Pending IRQ\n"); break; default: printk(KERN_DEBUG "Unknown Token Ring IRQ (0x%04x)\n", irq_type); break; } /* Reset system interrupt if not already done. */ if(irq_type != STS_IRQ_TRANSMIT_STATUS && irq_type != STS_IRQ_RECEIVE_STATUS) { tms380tr_reset_interrupt(dev); } irq_type = SIFREADW(SIFSTS); } return IRQ_RETVAL(handled); } /* * Reset the INTERRUPT SYSTEM bit and issue SSB CLEAR command. */ static void tms380tr_reset_interrupt(struct net_device *dev) { struct net_local *tp = netdev_priv(dev); SSB *ssb = &tp->ssb; /* * [Workaround for "Data Late"] * Set all fields of the SSB to well-defined values so we can * check if the adapter has written the SSB. */ ssb->STS = (unsigned short) -1; ssb->Parm[0] = (unsigned short) -1; ssb->Parm[1] = (unsigned short) -1; ssb->Parm[2] = (unsigned short) -1; /* Free SSB by issuing SSB_CLEAR command after reading IRQ code * and clear STS_SYSTEM_IRQ bit: enable adapter for further interrupts. */ tms380tr_exec_sifcmd(dev, CMD_SSB_CLEAR | CMD_CLEAR_SYSTEM_IRQ); } /* * Check if the SSB has actually been written by the adapter. */ static unsigned char tms380tr_chk_ssb(struct net_local *tp, unsigned short IrqType) { SSB *ssb = &tp->ssb; /* The address of the SSB. */ /* C 0 1 2 INTERRUPT CODE * - - - - -------------- * 1 1 1 1 TRANSMIT STATUS * 1 1 1 1 RECEIVE STATUS * 1 ? ? 0 COMMAND STATUS * 0 0 0 0 SCB CLEAR * 1 1 0 0 RING STATUS * 0 0 0 0 ADAPTER CHECK * * 0 = SSB field not affected by interrupt * 1 = SSB field is affected by interrupt * * C = SSB ADDRESS +0: COMMAND * 0 = SSB ADDRESS +2: STATUS 0 * 1 = SSB ADDRESS +4: STATUS 1 * 2 = SSB ADDRESS +6: STATUS 2 */ /* Check if this interrupt does use the SSB. */ if(IrqType != STS_IRQ_TRANSMIT_STATUS && IrqType != STS_IRQ_RECEIVE_STATUS && IrqType != STS_IRQ_COMMAND_STATUS && IrqType != STS_IRQ_RING_STATUS) { return (1); /* SSB not involved. */ } /* Note: All fields of the SSB have been set to all ones (-1) after it * has last been used by the software (see DriverIsr()). * * Check if the affected SSB fields are still unchanged. */ if(ssb->STS == (unsigned short) -1) return (0); /* Command field not yet available. */ if(IrqType == STS_IRQ_COMMAND_STATUS) return (1); /* Status fields not always affected. */ if(ssb->Parm[0] == (unsigned short) -1) return (0); /* Status 1 field not yet available. */ if(IrqType == STS_IRQ_RING_STATUS) return (1); /* Status 2 & 3 fields not affected. */ /* Note: At this point, the interrupt is either TRANSMIT or RECEIVE. */ if(ssb->Parm[1] == (unsigned short) -1) return (0); /* Status 2 field not yet available. */ if(ssb->Parm[2] == (unsigned short) -1) return (0); /* Status 3 field not yet available. */ return (1); /* All SSB fields have been written by the adapter. */ } /* * Evaluates the command results status in the SSB status field. */ static void tms380tr_cmd_status_irq(struct net_device *dev) { struct net_local *tp = netdev_priv(dev); unsigned short ssb_cmd, ssb_parm_0; unsigned short ssb_parm_1; char *open_err = "Open error -"; char *code_err = "Open code -"; /* Copy the ssb values to local variables */ ssb_cmd = tp->ssb.STS; ssb_parm_0 = tp->ssb.Parm[0]; ssb_parm_1 = tp->ssb.Parm[1]; if(ssb_cmd == OPEN) { tp->Sleeping = 0; if(!tp->ReOpenInProgress) wake_up_interruptible(&tp->wait_for_tok_int); tp->OpenCommandIssued = 0; tp->ScbInUse = 0; if((ssb_parm_0 & 0x00FF) == GOOD_COMPLETION) { /* Success, the adapter is open. */ tp->LobeWireFaultLogged = 0; tp->AdapterOpenFlag = 1; tp->AdapterVirtOpenFlag = 1; tp->TransmitCommandActive = 0; tms380tr_exec_cmd(dev, OC_TRANSMIT); tms380tr_exec_cmd(dev, OC_RECEIVE); if(tp->ReOpenInProgress) tp->ReOpenInProgress = 0; return; } else /* The adapter did not open. */ { if(ssb_parm_0 & NODE_ADDR_ERROR) printk(KERN_INFO "%s: Node address error\n", dev->name); if(ssb_parm_0 & LIST_SIZE_ERROR) printk(KERN_INFO "%s: List size error\n", dev->name); if(ssb_parm_0 & BUF_SIZE_ERROR) printk(KERN_INFO "%s: Buffer size error\n", dev->name); if(ssb_parm_0 & TX_BUF_COUNT_ERROR) printk(KERN_INFO "%s: Tx buffer count error\n", dev->name); if(ssb_parm_0 & INVALID_OPEN_OPTION) printk(KERN_INFO "%s: Invalid open option\n", dev->name); if(ssb_parm_0 & OPEN_ERROR) { /* Show the open phase. */ switch(ssb_parm_0 & OPEN_PHASES_MASK) { case LOBE_MEDIA_TEST: if(!tp->LobeWireFaultLogged) { tp->LobeWireFaultLogged = 1; printk(KERN_INFO "%s: %s Lobe wire fault (check cable !).\n", dev->name, open_err); } tp->ReOpenInProgress = 1; tp->AdapterOpenFlag = 0; tp->AdapterVirtOpenFlag = 1; tms380tr_open_adapter(dev); return; case PHYSICAL_INSERTION: printk(KERN_INFO "%s: %s Physical insertion.\n", dev->name, open_err); break; case ADDRESS_VERIFICATION: printk(KERN_INFO "%s: %s Address verification.\n", dev->name, open_err); break; case PARTICIPATION_IN_RING_POLL: printk(KERN_INFO "%s: %s Participation in ring poll.\n", dev->name, open_err); break; case REQUEST_INITIALISATION: printk(KERN_INFO "%s: %s Request initialisation.\n", dev->name, open_err); break; case FULLDUPLEX_CHECK: printk(KERN_INFO "%s: %s Full duplex check.\n", dev->name, open_err); break; default: printk(KERN_INFO "%s: %s Unknown open phase\n", dev->name, open_err); break; } /* Show the open errors. */ switch(ssb_parm_0 & OPEN_ERROR_CODES_MASK) { case OPEN_FUNCTION_FAILURE: printk(KERN_INFO "%s: %s OPEN_FUNCTION_FAILURE", dev->name, code_err); tp->LastOpenStatus = OPEN_FUNCTION_FAILURE; break; case OPEN_SIGNAL_LOSS: printk(KERN_INFO "%s: %s OPEN_SIGNAL_LOSS\n", dev->name, code_err); tp->LastOpenStatus = OPEN_SIGNAL_LOSS; break; case OPEN_TIMEOUT: printk(KERN_INFO "%s: %s OPEN_TIMEOUT\n", dev->name, code_err); tp->LastOpenStatus = OPEN_TIMEOUT; break; case OPEN_RING_FAILURE: printk(KERN_INFO "%s: %s OPEN_RING_FAILURE\n", dev->name, code_err); tp->LastOpenStatus = OPEN_RING_FAILURE; break; case OPEN_RING_BEACONING: printk(KERN_INFO "%s: %s OPEN_RING_BEACONING\n", dev->name, code_err); tp->LastOpenStatus = OPEN_RING_BEACONING; break; case OPEN_DUPLICATE_NODEADDR: printk(KERN_INFO "%s: %s OPEN_DUPLICATE_NODEADDR\n", dev->name, code_err); tp->LastOpenStatus = OPEN_DUPLICATE_NODEADDR; break; case OPEN_REQUEST_INIT: printk(KERN_INFO "%s: %s OPEN_REQUEST_INIT\n", dev->name, code_err); tp->LastOpenStatus = OPEN_REQUEST_INIT; break; case OPEN_REMOVE_RECEIVED: printk(KERN_INFO "%s: %s OPEN_REMOVE_RECEIVED", dev->name, code_err); tp->LastOpenStatus = OPEN_REMOVE_RECEIVED; break; case OPEN_FULLDUPLEX_SET: printk(KERN_INFO "%s: %s OPEN_FULLDUPLEX_SET\n", dev->name, code_err); tp->LastOpenStatus = OPEN_FULLDUPLEX_SET; break; default: printk(KERN_INFO "%s: %s Unknown open err code", dev->name, code_err); tp->LastOpenStatus = OPEN_FUNCTION_FAILURE; break; } } tp->AdapterOpenFlag = 0; tp->AdapterVirtOpenFlag = 0; return; } } else { if(ssb_cmd != READ_ERROR_LOG) return; /* Add values from the error log table to the MAC * statistics counters and update the errorlogtable * memory. */ tp->MacStat.line_errors += tp->errorlogtable.Line_Error; tp->MacStat.burst_errors += tp->errorlogtable.Burst_Error; tp->MacStat.A_C_errors += tp->errorlogtable.ARI_FCI_Error; tp->MacStat.lost_frames += tp->errorlogtable.Lost_Frame_Error; tp->MacStat.recv_congest_count += tp->errorlogtable.Rx_Congest_Error; tp->MacStat.rx_errors += tp->errorlogtable.Rx_Congest_Error; tp->MacStat.frame_copied_errors += tp->errorlogtable.Frame_Copied_Error; tp->MacStat.token_errors += tp->errorlogtable.Token_Error; tp->MacStat.dummy1 += tp->errorlogtable.DMA_Bus_Error; tp->MacStat.dummy1 += tp->errorlogtable.DMA_Parity_Error; tp->MacStat.abort_delimiters += tp->errorlogtable.AbortDelimeters; tp->MacStat.frequency_errors += tp->errorlogtable.Frequency_Error; tp->MacStat.internal_errors += tp->errorlogtable.Internal_Error; } } /* * The inverse routine to tms380tr_open(). */ int tms380tr_close(struct net_device *dev) { struct net_local *tp = netdev_priv(dev); netif_stop_queue(dev); del_timer(&tp->timer); /* Flush the Tx and disable Rx here. */ tp->HaltInProgress = 1; tms380tr_exec_cmd(dev, OC_CLOSE); tp->timer.expires = jiffies + 1*HZ; tp->timer.function = tms380tr_timer_end_wait; tp->timer.data = (unsigned long)dev; add_timer(&tp->timer); tms380tr_enable_interrupts(dev); tp->Sleeping = 1; interruptible_sleep_on(&tp->wait_for_tok_int); tp->TransmitCommandActive = 0; del_timer(&tp->timer); tms380tr_disable_interrupts(dev); #ifdef CONFIG_ISA if(dev->dma > 0) { unsigned long flags=claim_dma_lock(); disable_dma(dev->dma); release_dma_lock(flags); } #endif SIFWRITEW(0xFF00, SIFCMD); #if 0 if(dev->dma > 0) /* what the? */ SIFWRITEB(0xff, POSREG); #endif tms380tr_cancel_tx_queue(tp); return (0); } /* * Get the current statistics. This may be called with the card open * or closed. */ static struct net_device_stats *tms380tr_get_stats(struct net_device *dev) { struct net_local *tp = netdev_priv(dev); return ((struct net_device_stats *)&tp->MacStat); } /* * Set or clear the multicast filter for this adapter. */ static void tms380tr_set_multicast_list(struct net_device *dev) { struct net_local *tp = netdev_priv(dev); unsigned int OpenOptions; OpenOptions = tp->ocpl.OPENOptions & ~(PASS_ADAPTER_MAC_FRAMES | PASS_ATTENTION_FRAMES | PASS_BEACON_MAC_FRAMES | COPY_ALL_MAC_FRAMES | COPY_ALL_NON_MAC_FRAMES); tp->ocpl.FunctAddr = 0; if(dev->flags & IFF_PROMISC) /* Enable promiscuous mode */ OpenOptions |= COPY_ALL_NON_MAC_FRAMES | COPY_ALL_MAC_FRAMES; else { if(dev->flags & IFF_ALLMULTI) { /* Disable promiscuous mode, use normal mode. */ tp->ocpl.FunctAddr = 0xFFFFFFFF; } else { struct netdev_hw_addr *ha; netdev_for_each_mc_addr(ha, dev) { ((char *)(&tp->ocpl.FunctAddr))[0] |= ha->addr[2]; ((char *)(&tp->ocpl.FunctAddr))[1] |= ha->addr[3]; ((char *)(&tp->ocpl.FunctAddr))[2] |= ha->addr[4]; ((char *)(&tp->ocpl.FunctAddr))[3] |= ha->addr[5]; } } tms380tr_exec_cmd(dev, OC_SET_FUNCT_ADDR); } tp->ocpl.OPENOptions = OpenOptions; tms380tr_exec_cmd(dev, OC_MODIFY_OPEN_PARMS); } /* * Wait for some time (microseconds) */ void tms380tr_wait(unsigned long time) { #if 0 long tmp; tmp = jiffies + time/(1000000/HZ); do { tmp = schedule_timeout_interruptible(tmp); } while(time_after(tmp, jiffies)); #else udelay(time); #endif } /* * Write a command value to the SIFCMD register */ static void tms380tr_exec_sifcmd(struct net_device *dev, unsigned int WriteValue) { unsigned short cmd; unsigned short SifStsValue; unsigned long loop_counter; WriteValue = ((WriteValue ^ CMD_SYSTEM_IRQ) | CMD_INTERRUPT_ADAPTER); cmd = (unsigned short)WriteValue; loop_counter = 0,5 * 800000; do { SifStsValue = SIFREADW(SIFSTS); } while((SifStsValue & CMD_INTERRUPT_ADAPTER) && loop_counter--); SIFWRITEW(cmd, SIFCMD); } /* * Processes adapter hardware reset, halts adapter and downloads firmware, * clears the halt bit. */ static int tms380tr_reset_adapter(struct net_device *dev) { struct net_local *tp = netdev_priv(dev); unsigned short *fw_ptr; unsigned short count, c, count2; const struct firmware *fw_entry = NULL; if (request_firmware(&fw_entry, "tms380tr.bin", tp->pdev) != 0) { printk(KERN_ALERT "%s: firmware %s is missing, cannot start.\n", dev->name, "tms380tr.bin"); return (-1); } fw_ptr = (unsigned short *)fw_entry->data; count2 = fw_entry->size / 2; /* Hardware adapter reset */ SIFWRITEW(ACL_ARESET, SIFACL); tms380tr_wait(40); c = SIFREADW(SIFACL); tms380tr_wait(20); if(dev->dma == 0) /* For PCI adapters */ { c &= ~(ACL_NSELOUT0 | ACL_NSELOUT1); /* Clear bits */ if(tp->setnselout) c |= (*tp->setnselout)(dev); } /* In case a command is pending - forget it */ tp->ScbInUse = 0; c &= ~ACL_ARESET; /* Clear adapter reset bit */ c |= ACL_CPHALT; /* Halt adapter CPU, allow download */ c |= ACL_BOOT; c |= ACL_SINTEN; c &= ~ACL_PSDMAEN; /* Clear pseudo dma bit */ SIFWRITEW(c, SIFACL); tms380tr_wait(40); count = 0; /* Download firmware via DIO interface: */ do { if (count2 < 3) continue; /* Download first address part */ SIFWRITEW(*fw_ptr, SIFADX); fw_ptr++; count2--; /* Download second address part */ SIFWRITEW(*fw_ptr, SIFADD); fw_ptr++; count2--; if((count = *fw_ptr) != 0) /* Load loop counter */ { fw_ptr++; /* Download block data */ count2--; if (count > count2) continue; for(; count > 0; count--) { SIFWRITEW(*fw_ptr, SIFINC); fw_ptr++; count2--; } } else /* Stop, if last block downloaded */ { c = SIFREADW(SIFACL); c &= (~ACL_CPHALT | ACL_SINTEN); /* Clear CPHALT and start BUD */ SIFWRITEW(c, SIFACL); if (fw_entry) release_firmware(fw_entry); return (1); } } while(count == 0); if (fw_entry) release_firmware(fw_entry); printk(KERN_INFO "%s: Adapter Download Failed\n", dev->name); return (-1); } MODULE_FIRMWARE("tms380tr.bin"); /* * Starts bring up diagnostics of token ring adapter and evaluates * diagnostic results. */ static int tms380tr_bringup_diags(struct net_device *dev) { int loop_cnt, retry_cnt; unsigned short Status; tms380tr_wait(HALF_SECOND); tms380tr_exec_sifcmd(dev, EXEC_SOFT_RESET); tms380tr_wait(HALF_SECOND); retry_cnt = BUD_MAX_RETRIES; /* maximal number of retrys */ do { retry_cnt--; if(tms380tr_debug > 3) printk(KERN_DEBUG "BUD-Status: "); loop_cnt = BUD_MAX_LOOPCNT; /* maximum: three seconds*/ do { /* Inspect BUD results */ loop_cnt--; tms380tr_wait(HALF_SECOND); Status = SIFREADW(SIFSTS); Status &= STS_MASK; if(tms380tr_debug > 3) printk(KERN_DEBUG " %04X\n", Status); /* BUD successfully completed */ if(Status == STS_INITIALIZE) return (1); /* Unrecoverable hardware error, BUD not completed? */ } while((loop_cnt > 0) && ((Status & (STS_ERROR | STS_TEST)) != (STS_ERROR | STS_TEST))); /* Error preventing completion of BUD */ if(retry_cnt > 0) { printk(KERN_INFO "%s: Adapter Software Reset.\n", dev->name); tms380tr_exec_sifcmd(dev, EXEC_SOFT_RESET); tms380tr_wait(HALF_SECOND); } } while(retry_cnt > 0); Status = SIFREADW(SIFSTS); printk(KERN_INFO "%s: Hardware error\n", dev->name); /* Hardware error occurred! */ Status &= 0x001f; if (Status & 0x0010) printk(KERN_INFO "%s: BUD Error: Timeout\n", dev->name); else if ((Status & 0x000f) > 6) printk(KERN_INFO "%s: BUD Error: Illegal Failure\n", dev->name); else printk(KERN_INFO "%s: Bring Up Diagnostics Error (%04X) occurred\n", dev->name, Status & 0x000f); return (-1); } /* * Copy initialisation data to adapter memory, beginning at address * 1:0A00; Starting DMA test and evaluating result bits. */ static int tms380tr_init_adapter(struct net_device *dev) { struct net_local *tp = netdev_priv(dev); const unsigned char SCB_Test[6] = {0x00, 0x00, 0xC1, 0xE2, 0xD4, 0x8B}; const unsigned char SSB_Test[8] = {0xFF, 0xFF, 0xD1, 0xD7, 0xC5, 0xD9, 0xC3, 0xD4}; void *ptr = (void *)&tp->ipb; unsigned short *ipb_ptr = (unsigned short *)ptr; unsigned char *cb_ptr = (unsigned char *) &tp->scb; unsigned char *sb_ptr = (unsigned char *) &tp->ssb; unsigned short Status; int i, loop_cnt, retry_cnt; /* Normalize: byte order low/high, word order high/low! (only IPB!) */ tp->ipb.SCB_Addr = SWAPW(((char *)&tp->scb - (char *)tp) + tp->dmabuffer); tp->ipb.SSB_Addr = SWAPW(((char *)&tp->ssb - (char *)tp) + tp->dmabuffer); if(tms380tr_debug > 3) { printk(KERN_DEBUG "%s: buffer (real): %lx\n", dev->name, (long) &tp->scb); printk(KERN_DEBUG "%s: buffer (virt): %lx\n", dev->name, (long) ((char *)&tp->scb - (char *)tp) + (long) tp->dmabuffer); printk(KERN_DEBUG "%s: buffer (DMA) : %lx\n", dev->name, (long) tp->dmabuffer); printk(KERN_DEBUG "%s: buffer (tp) : %lx\n", dev->name, (long) tp); } /* Maximum: three initialization retries */ retry_cnt = INIT_MAX_RETRIES; do { retry_cnt--; /* Transfer initialization block */ SIFWRITEW(0x0001, SIFADX); /* To address 0001:0A00 of adapter RAM */ SIFWRITEW(0x0A00, SIFADD); /* Write 11 words to adapter RAM */ for(i = 0; i < 11; i++) SIFWRITEW(ipb_ptr[i], SIFINC); /* Execute SCB adapter command */ tms380tr_exec_sifcmd(dev, CMD_EXECUTE); loop_cnt = INIT_MAX_LOOPCNT; /* Maximum: 11 seconds */ /* While remaining retries, no error and not completed */ do { Status = 0; loop_cnt--; tms380tr_wait(HALF_SECOND); /* Mask interesting status bits */ Status = SIFREADW(SIFSTS); Status &= STS_MASK; } while(((Status &(STS_INITIALIZE | STS_ERROR | STS_TEST)) != 0) && ((Status & STS_ERROR) == 0) && (loop_cnt != 0)); if((Status & (STS_INITIALIZE | STS_ERROR | STS_TEST)) == 0) { /* Initialization completed without error */ i = 0; do { /* Test if contents of SCB is valid */ if(SCB_Test[i] != *(cb_ptr + i)) { printk(KERN_INFO "%s: DMA failed\n", dev->name); /* DMA data error: wrong data in SCB */ return (-1); } i++; } while(i < 6); i = 0; do { /* Test if contents of SSB is valid */ if(SSB_Test[i] != *(sb_ptr + i)) /* DMA data error: wrong data in SSB */ return (-1); i++; } while (i < 8); return (1); /* Adapter successfully initialized */ } else { if((Status & STS_ERROR) != 0) { /* Initialization error occurred */ Status = SIFREADW(SIFSTS); Status &= STS_ERROR_MASK; /* ShowInitialisationErrorCode(Status); */ printk(KERN_INFO "%s: Status error: %d\n", dev->name, Status); return (-1); /* Unrecoverable error */ } else { if(retry_cnt > 0) { /* Reset adapter and try init again */ tms380tr_exec_sifcmd(dev, EXEC_SOFT_RESET); tms380tr_wait(HALF_SECOND); } } } } while(retry_cnt > 0); printk(KERN_INFO "%s: Retry exceeded\n", dev->name); return (-1); } /* * Check for outstanding commands in command queue and tries to execute * command immediately. Corresponding command flag in command queue is cleared. */ static void tms380tr_chk_outstanding_cmds(struct net_device *dev) { struct net_local *tp = netdev_priv(dev); unsigned long Addr = 0; if(tp->CMDqueue == 0) return; /* No command execution */ /* If SCB in use: no command */ if(tp->ScbInUse == 1) return; /* Check if adapter is opened, avoiding COMMAND_REJECT * interrupt by the adapter! */ if(tp->AdapterOpenFlag == 0) { if(tp->CMDqueue & OC_OPEN) { /* Execute OPEN command */ tp->CMDqueue ^= OC_OPEN; Addr = htonl(((char *)&tp->ocpl - (char *)tp) + tp->dmabuffer); tp->scb.Parm[0] = LOWORD(Addr); tp->scb.Parm[1] = HIWORD(Addr); tp->scb.CMD = OPEN; } else /* No OPEN command queued, but adapter closed. Note: * We'll try to re-open the adapter in DriverPoll() */ return; /* No adapter command issued */ } else { /* Adapter is open; evaluate command queue: try to execute * outstanding commands (depending on priority!) CLOSE * command queued */ if(tp->CMDqueue & OC_CLOSE) { tp->CMDqueue ^= OC_CLOSE; tp->AdapterOpenFlag = 0; tp->scb.Parm[0] = 0; /* Parm[0], Parm[1] are ignored */ tp->scb.Parm[1] = 0; /* but should be set to zero! */ tp->scb.CMD = CLOSE; if(!tp->HaltInProgress) tp->CMDqueue |= OC_OPEN; /* re-open adapter */ else tp->CMDqueue = 0; /* no more commands */ } else { if(tp->CMDqueue & OC_RECEIVE) { tp->CMDqueue ^= OC_RECEIVE; Addr = htonl(((char *)tp->RplHead - (char *)tp) + tp->dmabuffer); tp->scb.Parm[0] = LOWORD(Addr); tp->scb.Parm[1] = HIWORD(Addr); tp->scb.CMD = RECEIVE; } else { if(tp->CMDqueue & OC_TRANSMIT_HALT) { /* NOTE: TRANSMIT.HALT must be checked * before TRANSMIT. */ tp->CMDqueue ^= OC_TRANSMIT_HALT; tp->scb.CMD = TRANSMIT_HALT; /* Parm[0] and Parm[1] are ignored * but should be set to zero! */ tp->scb.Parm[0] = 0; tp->scb.Parm[1] = 0; } else { if(tp->CMDqueue & OC_TRANSMIT) { /* NOTE: TRANSMIT must be * checked after TRANSMIT.HALT */ if(tp->TransmitCommandActive) { if(!tp->TransmitHaltScheduled) { tp->TransmitHaltScheduled = 1; tms380tr_exec_cmd(dev, OC_TRANSMIT_HALT) ; } tp->TransmitCommandActive = 0; return; } tp->CMDqueue ^= OC_TRANSMIT; tms380tr_cancel_tx_queue(tp); Addr = htonl(((char *)tp->TplBusy - (char *)tp) + tp->dmabuffer); tp->scb.Parm[0] = LOWORD(Addr); tp->scb.Parm[1] = HIWORD(Addr); tp->scb.CMD = TRANSMIT; tp->TransmitCommandActive = 1; } else { if(tp->CMDqueue & OC_MODIFY_OPEN_PARMS) { tp->CMDqueue ^= OC_MODIFY_OPEN_PARMS; tp->scb.Parm[0] = tp->ocpl.OPENOptions; /* new OPEN options*/ tp->scb.Parm[0] |= ENABLE_FULL_DUPLEX_SELECTION; tp->scb.Parm[1] = 0; /* is ignored but should be zero */ tp->scb.CMD = MODIFY_OPEN_PARMS; } else { if(tp->CMDqueue & OC_SET_FUNCT_ADDR) { tp->CMDqueue ^= OC_SET_FUNCT_ADDR; tp->scb.Parm[0] = LOWORD(tp->ocpl.FunctAddr); tp->scb.Parm[1] = HIWORD(tp->ocpl.FunctAddr); tp->scb.CMD = SET_FUNCT_ADDR; } else { if(tp->CMDqueue & OC_SET_GROUP_ADDR) { tp->CMDqueue ^= OC_SET_GROUP_ADDR; tp->scb.Parm[0] = LOWORD(tp->ocpl.GroupAddr); tp->scb.Parm[1] = HIWORD(tp->ocpl.GroupAddr); tp->scb.CMD = SET_GROUP_ADDR; } else { if(tp->CMDqueue & OC_READ_ERROR_LOG) { tp->CMDqueue ^= OC_READ_ERROR_LOG; Addr = htonl(((char *)&tp->errorlogtable - (char *)tp) + tp->dmabuffer); tp->scb.Parm[0] = LOWORD(Addr); tp->scb.Parm[1] = HIWORD(Addr); tp->scb.CMD = READ_ERROR_LOG; } else { printk(KERN_WARNING "CheckForOutstandingCommand: unknown Command\n"); tp->CMDqueue = 0; return; } } } } } } } } } tp->ScbInUse = 1; /* Set semaphore: SCB in use. */ /* Execute SCB and generate IRQ when done. */ tms380tr_exec_sifcmd(dev, CMD_EXECUTE | CMD_SCB_REQUEST); } /* * IRQ conditions: signal loss on the ring, transmit or receive of beacon * frames (disabled if bit 1 of OPEN option is set); report error MAC * frame transmit (disabled if bit 2 of OPEN option is set); open or short * circuit fault on the lobe is detected; remove MAC frame received; * error counter overflow (255); opened adapter is the only station in ring. * After some of the IRQs the adapter is closed! */ static void tms380tr_ring_status_irq(struct net_device *dev) { struct net_local *tp = netdev_priv(dev); tp->CurrentRingStatus = be16_to_cpu((unsigned short)tp->ssb.Parm[0]); /* First: fill up statistics */ if(tp->ssb.Parm[0] & SIGNAL_LOSS) { printk(KERN_INFO "%s: Signal Loss\n", dev->name); tp->MacStat.line_errors++; } /* Adapter is closed, but initialized */ if(tp->ssb.Parm[0] & LOBE_WIRE_FAULT) { printk(KERN_INFO "%s: Lobe Wire Fault, Reopen Adapter\n", dev->name); tp->MacStat.line_errors++; } if(tp->ssb.Parm[0] & RING_RECOVERY) printk(KERN_INFO "%s: Ring Recovery\n", dev->name); /* Counter overflow: read error log */ if(tp->ssb.Parm[0] & COUNTER_OVERFLOW) { printk(KERN_INFO "%s: Counter Overflow\n", dev->name); tms380tr_exec_cmd(dev, OC_READ_ERROR_LOG); } /* Adapter is closed, but initialized */ if(tp->ssb.Parm[0] & REMOVE_RECEIVED) printk(KERN_INFO "%s: Remove Received, Reopen Adapter\n", dev->name); /* Adapter is closed, but initialized */ if(tp->ssb.Parm[0] & AUTO_REMOVAL_ERROR) printk(KERN_INFO "%s: Auto Removal Error, Reopen Adapter\n", dev->name); if(tp->ssb.Parm[0] & HARD_ERROR) printk(KERN_INFO "%s: Hard Error\n", dev->name); if(tp->ssb.Parm[0] & SOFT_ERROR) printk(KERN_INFO "%s: Soft Error\n", dev->name); if(tp->ssb.Parm[0] & TRANSMIT_BEACON) printk(KERN_INFO "%s: Transmit Beacon\n", dev->name); if(tp->ssb.Parm[0] & SINGLE_STATION) printk(KERN_INFO "%s: Single Station\n", dev->name); /* Check if adapter has been closed */ if(tp->ssb.Parm[0] & ADAPTER_CLOSED) { printk(KERN_INFO "%s: Adapter closed (Reopening)," "CurrentRingStat %x\n", dev->name, tp->CurrentRingStatus); tp->AdapterOpenFlag = 0; tms380tr_open_adapter(dev); } } /* * Issued if adapter has encountered an unrecoverable hardware * or software error. */ static void tms380tr_chk_irq(struct net_device *dev) { int i; unsigned short AdapterCheckBlock[4]; struct net_local *tp = netdev_priv(dev); tp->AdapterOpenFlag = 0; /* Adapter closed now */ /* Page number of adapter memory */ SIFWRITEW(0x0001, SIFADX); /* Address offset */ SIFWRITEW(CHECKADDR, SIFADR); /* Reading 8 byte adapter check block. */ for(i = 0; i < 4; i++) AdapterCheckBlock[i] = SIFREADW(SIFINC); if(tms380tr_debug > 3) { printk(KERN_DEBUG "%s: AdapterCheckBlock: ", dev->name); for (i = 0; i < 4; i++) printk("%04X", AdapterCheckBlock[i]); printk("\n"); } switch(AdapterCheckBlock[0]) { case DIO_PARITY: printk(KERN_INFO "%s: DIO parity error\n", dev->name); break; case DMA_READ_ABORT: printk(KERN_INFO "%s DMA read operation aborted:\n", dev->name); switch (AdapterCheckBlock[1]) { case 0: printk(KERN_INFO "Timeout\n"); printk(KERN_INFO "Address: %04X %04X\n", AdapterCheckBlock[2], AdapterCheckBlock[3]); break; case 1: printk(KERN_INFO "Parity error\n"); printk(KERN_INFO "Address: %04X %04X\n", AdapterCheckBlock[2], AdapterCheckBlock[3]); break; case 2: printk(KERN_INFO "Bus error\n"); printk(KERN_INFO "Address: %04X %04X\n", AdapterCheckBlock[2], AdapterCheckBlock[3]); break; default: printk(KERN_INFO "Unknown error.\n"); break; } break; case DMA_WRITE_ABORT: printk(KERN_INFO "%s: DMA write operation aborted:\n", dev->name); switch (AdapterCheckBlock[1]) { case 0: printk(KERN_INFO "Timeout\n"); printk(KERN_INFO "Address: %04X %04X\n", AdapterCheckBlock[2], AdapterCheckBlock[3]); break; case 1: printk(KERN_INFO "Parity error\n"); printk(KERN_INFO "Address: %04X %04X\n", AdapterCheckBlock[2], AdapterCheckBlock[3]); break; case 2: printk(KERN_INFO "Bus error\n"); printk(KERN_INFO "Address: %04X %04X\n", AdapterCheckBlock[2], AdapterCheckBlock[3]); break; default: printk(KERN_INFO "Unknown error.\n"); break; } break; case ILLEGAL_OP_CODE: printk(KERN_INFO "%s: Illegal operation code in firmware\n", dev->name); /* Parm[0-3]: adapter internal register R13-R15 */ break; case PARITY_ERRORS: printk(KERN_INFO "%s: Adapter internal bus parity error\n", dev->name); /* Parm[0-3]: adapter internal register R13-R15 */ break; case RAM_DATA_ERROR: printk(KERN_INFO "%s: RAM data error\n", dev->name); /* Parm[0-1]: MSW/LSW address of RAM location. */ break; case RAM_PARITY_ERROR: printk(KERN_INFO "%s: RAM parity error\n", dev->name); /* Parm[0-1]: MSW/LSW address of RAM location. */ break; case RING_UNDERRUN: printk(KERN_INFO "%s: Internal DMA underrun detected\n", dev->name); break; case INVALID_IRQ: printk(KERN_INFO "%s: Unrecognized interrupt detected\n", dev->name); /* Parm[0-3]: adapter internal register R13-R15 */ break; case INVALID_ERROR_IRQ: printk(KERN_INFO "%s: Unrecognized error interrupt detected\n", dev->name); /* Parm[0-3]: adapter internal register R13-R15 */ break; case INVALID_XOP: printk(KERN_INFO "%s: Unrecognized XOP request detected\n", dev->name); /* Parm[0-3]: adapter internal register R13-R15 */ break; default: printk(KERN_INFO "%s: Unknown status", dev->name); break; } if(tms380tr_chipset_init(dev) == 1) { /* Restart of firmware successful */ tp->AdapterOpenFlag = 1; } } /* * Internal adapter pointer to RAM data are copied from adapter into * host system. */ static int tms380tr_read_ptr(struct net_device *dev) { struct net_local *tp = netdev_priv(dev); unsigned short adapterram; tms380tr_read_ram(dev, (unsigned char *)&tp->intptrs.BurnedInAddrPtr, ADAPTER_INT_PTRS, 16); tms380tr_read_ram(dev, (unsigned char *)&adapterram, cpu_to_be16((unsigned short)tp->intptrs.AdapterRAMPtr), 2); return be16_to_cpu(adapterram); } /* * Reads a number of bytes from adapter to system memory. */ static void tms380tr_read_ram(struct net_device *dev, unsigned char *Data, unsigned short Address, int Length) { int i; unsigned short old_sifadx, old_sifadr, InWord; /* Save the current values */ old_sifadx = SIFREADW(SIFADX); old_sifadr = SIFREADW(SIFADR); /* Page number of adapter memory */ SIFWRITEW(0x0001, SIFADX); /* Address offset in adapter RAM */ SIFWRITEW(Address, SIFADR); /* Copy len byte from adapter memory to system data area. */ i = 0; for(;;) { InWord = SIFREADW(SIFINC); *(Data + i) = HIBYTE(InWord); /* Write first byte */ if(++i == Length) /* All is done break */ break; *(Data + i) = LOBYTE(InWord); /* Write second byte */ if (++i == Length) /* All is done break */ break; } /* Restore original values */ SIFWRITEW(old_sifadx, SIFADX); SIFWRITEW(old_sifadr, SIFADR); } /* * Cancel all queued packets in the transmission queue. */ static void tms380tr_cancel_tx_queue(struct net_local* tp) { TPL *tpl; /* * NOTE: There must not be an active TRANSMIT command pending, when * this function is called. */ if(tp->TransmitCommandActive) return; for(;;) { tpl = tp->TplBusy; if(!tpl->BusyFlag) break; /* "Remove" TPL from busy list. */ tp->TplBusy = tpl->NextTPLPtr; tms380tr_write_tpl_status(tpl, 0); /* Clear VALID bit */ tpl->BusyFlag = 0; /* "free" TPL */ printk(KERN_INFO "Cancel tx (%08lXh).\n", (unsigned long)tpl); if (tpl->DMABuff) dma_unmap_single(tp->pdev, tpl->DMABuff, tpl->Skb->len, DMA_TO_DEVICE); dev_kfree_skb_any(tpl->Skb); } } /* * This function is called whenever a transmit interrupt is generated by the * adapter. For a command complete interrupt, it is checked if we have to * issue a new transmit command or not. */ static void tms380tr_tx_status_irq(struct net_device *dev) { struct net_local *tp = netdev_priv(dev); unsigned char HighByte, HighAc, LowAc; TPL *tpl; /* NOTE: At this point the SSB from TRANSMIT STATUS is no longer * available, because the CLEAR SSB command has already been issued. * * Process all complete transmissions. */ for(;;) { tpl = tp->TplBusy; if(!tpl->BusyFlag || (tpl->Status & (TX_VALID | TX_FRAME_COMPLETE)) != TX_FRAME_COMPLETE) { break; } /* "Remove" TPL from busy list. */ tp->TplBusy = tpl->NextTPLPtr ; /* Check the transmit status field only for directed frames*/ if(DIRECTED_FRAME(tpl) && (tpl->Status & TX_ERROR) == 0) { HighByte = GET_TRANSMIT_STATUS_HIGH_BYTE(tpl->Status); HighAc = GET_FRAME_STATUS_HIGH_AC(HighByte); LowAc = GET_FRAME_STATUS_LOW_AC(HighByte); if((HighAc != LowAc) || (HighAc == AC_NOT_RECOGNIZED)) { printk(KERN_DEBUG "%s: (DA=%08lX not recognized)\n", dev->name, *(unsigned long *)&tpl->MData[2+2]); } else { if(tms380tr_debug > 3) printk(KERN_DEBUG "%s: Directed frame tx'd\n", dev->name); } } else { if(!DIRECTED_FRAME(tpl)) { if(tms380tr_debug > 3) printk(KERN_DEBUG "%s: Broadcast frame tx'd\n", dev->name); } } tp->MacStat.tx_packets++; if (tpl->DMABuff) dma_unmap_single(tp->pdev, tpl->DMABuff, tpl->Skb->len, DMA_TO_DEVICE); dev_kfree_skb_irq(tpl->Skb); tpl->BusyFlag = 0; /* "free" TPL */ } if(!tp->TplFree->NextTPLPtr->BusyFlag) netif_wake_queue(dev); } /* * Called if a frame receive interrupt is generated by the adapter. * Check if the frame is valid and indicate it to system. */ static void tms380tr_rcv_status_irq(struct net_device *dev) { struct net_local *tp = netdev_priv(dev); unsigned char *ReceiveDataPtr; struct sk_buff *skb; unsigned int Length, Length2; RPL *rpl; RPL *SaveHead; dma_addr_t dmabuf; /* NOTE: At this point the SSB from RECEIVE STATUS is no longer * available, because the CLEAR SSB command has already been issued. * * Process all complete receives. */ for(;;) { rpl = tp->RplHead; if(rpl->Status & RX_VALID) break; /* RPL still in use by adapter */ /* Forward RPLHead pointer to next list. */ SaveHead = tp->RplHead; tp->RplHead = rpl->NextRPLPtr; /* Get the frame size (Byte swap for Intel). * Do this early (see workaround comment below) */ Length = be16_to_cpu(rpl->FrameSize); /* Check if the Frame_Start, Frame_End and * Frame_Complete bits are set. */ if((rpl->Status & VALID_SINGLE_BUFFER_FRAME) == VALID_SINGLE_BUFFER_FRAME) { ReceiveDataPtr = rpl->MData; /* Workaround for delayed write of FrameSize on ISA * (FrameSize is false but valid-bit is reset) * Frame size is set to zero when the RPL is freed. * Length2 is there because there have also been * cases where the FrameSize was partially written */ Length2 = be16_to_cpu(rpl->FrameSize); if(Length == 0 || Length != Length2) { tp->RplHead = SaveHead; break; /* Return to tms380tr_interrupt */ } tms380tr_update_rcv_stats(tp,ReceiveDataPtr,Length); if(tms380tr_debug > 3) printk(KERN_DEBUG "%s: Packet Length %04X (%d)\n", dev->name, Length, Length); /* Indicate the received frame to system the * adapter does the Source-Routing padding for * us. See: OpenOptions in tms380tr_init_opb() */ skb = rpl->Skb; if(rpl->SkbStat == SKB_UNAVAILABLE) { /* Try again to allocate skb */ skb = dev_alloc_skb(tp->MaxPacketSize); if(skb == NULL) { /* Update Stats ?? */ } else { skb_put(skb, tp->MaxPacketSize); rpl->SkbStat = SKB_DATA_COPY; ReceiveDataPtr = rpl->MData; } } if(skb && (rpl->SkbStat == SKB_DATA_COPY || rpl->SkbStat == SKB_DMA_DIRECT)) { if(rpl->SkbStat == SKB_DATA_COPY) skb_copy_to_linear_data(skb, ReceiveDataPtr, Length); /* Deliver frame to system */ rpl->Skb = NULL; skb_trim(skb,Length); skb->protocol = tr_type_trans(skb,dev); netif_rx(skb); } } else /* Invalid frame */ { if(rpl->Skb != NULL) dev_kfree_skb_irq(rpl->Skb); /* Skip list. */ if(rpl->Status & RX_START_FRAME) /* Frame start bit is set -> overflow. */ tp->MacStat.rx_errors++; } if (rpl->DMABuff) dma_unmap_single(tp->pdev, rpl->DMABuff, tp->MaxPacketSize, DMA_TO_DEVICE); rpl->DMABuff = 0; /* Allocate new skb for rpl */ rpl->Skb = dev_alloc_skb(tp->MaxPacketSize); /* skb == NULL ? then use local buffer */ if(rpl->Skb == NULL) { rpl->SkbStat = SKB_UNAVAILABLE; rpl->FragList[0].DataAddr = htonl(((char *)tp->LocalRxBuffers[rpl->RPLIndex] - (char *)tp) + tp->dmabuffer); rpl->MData = tp->LocalRxBuffers[rpl->RPLIndex]; } else /* skb != NULL */ { rpl->Skb->dev = dev; skb_put(rpl->Skb, tp->MaxPacketSize); /* Data unreachable for DMA ? then use local buffer */ dmabuf = dma_map_single(tp->pdev, rpl->Skb->data, tp->MaxPacketSize, DMA_FROM_DEVICE); if(tp->dmalimit && (dmabuf + tp->MaxPacketSize > tp->dmalimit)) { rpl->SkbStat = SKB_DATA_COPY; rpl->FragList[0].DataAddr = htonl(((char *)tp->LocalRxBuffers[rpl->RPLIndex] - (char *)tp) + tp->dmabuffer); rpl->MData = tp->LocalRxBuffers[rpl->RPLIndex]; } else { /* DMA directly in skb->data */ rpl->SkbStat = SKB_DMA_DIRECT; rpl->FragList[0].DataAddr = htonl(dmabuf); rpl->MData = rpl->Skb->data; rpl->DMABuff = dmabuf; } } rpl->FragList[0].DataCount = cpu_to_be16((unsigned short)tp->MaxPacketSize); rpl->FrameSize = 0; /* Pass the last RPL back to the adapter */ tp->RplTail->FrameSize = 0; /* Reset the CSTAT field in the list. */ tms380tr_write_rpl_status(tp->RplTail, RX_VALID | RX_FRAME_IRQ); /* Current RPL becomes last one in list. */ tp->RplTail = tp->RplTail->NextRPLPtr; /* Inform adapter about RPL valid. */ tms380tr_exec_sifcmd(dev, CMD_RX_VALID); } } /* * This function should be used whenever the status of any RPL must be * modified by the driver, because the compiler may otherwise change the * order of instructions such that writing the RPL status may be executed * at an undesirable time. When this function is used, the status is * always written when the function is called. */ static void tms380tr_write_rpl_status(RPL *rpl, unsigned int Status) { rpl->Status = Status; } /* * The function updates the statistic counters in mac->MacStat. * It differtiates between directed and broadcast/multicast ( ==functional) * frames. */ static void tms380tr_update_rcv_stats(struct net_local *tp, unsigned char DataPtr[], unsigned int Length) { tp->MacStat.rx_packets++; tp->MacStat.rx_bytes += Length; /* Test functional bit */ if(DataPtr[2] & GROUP_BIT) tp->MacStat.multicast++; } static int tms380tr_set_mac_address(struct net_device *dev, void *addr) { struct net_local *tp = netdev_priv(dev); struct sockaddr *saddr = addr; if (tp->AdapterOpenFlag || tp->AdapterVirtOpenFlag) { printk(KERN_WARNING "%s: Cannot set MAC/LAA address while card is open\n", dev->name); return -EIO; } memcpy(dev->dev_addr, saddr->sa_data, dev->addr_len); return 0; } #if TMS380TR_DEBUG > 0 /* * Dump Packet (data) */ static void tms380tr_dump(unsigned char *Data, int length) { int i, j; for (i = 0, j = 0; i < length / 8; i++, j += 8) { printk(KERN_DEBUG "%02x %02x %02x %02x %02x %02x %02x %02x\n", Data[j+0],Data[j+1],Data[j+2],Data[j+3], Data[j+4],Data[j+5],Data[j+6],Data[j+7]); } } #endif void tmsdev_term(struct net_device *dev) { struct net_local *tp; tp = netdev_priv(dev); dma_unmap_single(tp->pdev, tp->dmabuffer, sizeof(struct net_local), DMA_BIDIRECTIONAL); } const struct net_device_ops tms380tr_netdev_ops = { .ndo_open = tms380tr_open, .ndo_stop = tms380tr_close, .ndo_start_xmit = tms380tr_send_packet, .ndo_tx_timeout = tms380tr_timeout, .ndo_get_stats = tms380tr_get_stats, .ndo_set_multicast_list = tms380tr_set_multicast_list, .ndo_set_mac_address = tms380tr_set_mac_address, }; EXPORT_SYMBOL(tms380tr_netdev_ops); int tmsdev_init(struct net_device *dev, struct device *pdev) { struct net_local *tms_local; memset(netdev_priv(dev), 0, sizeof(struct net_local)); tms_local = netdev_priv(dev); init_waitqueue_head(&tms_local->wait_for_tok_int); if (pdev->dma_mask) tms_local->dmalimit = *pdev->dma_mask; else return -ENOMEM; tms_local->pdev = pdev; tms_local->dmabuffer = dma_map_single(pdev, (void *)tms_local, sizeof(struct net_local), DMA_BIDIRECTIONAL); if (tms_local->dmabuffer + sizeof(struct net_local) > tms_local->dmalimit) { printk(KERN_INFO "%s: Memory not accessible for DMA\n", dev->name); tmsdev_term(dev); return -ENOMEM; } dev->netdev_ops = &tms380tr_netdev_ops; dev->watchdog_timeo = HZ; return 0; } EXPORT_SYMBOL(tms380tr_open); EXPORT_SYMBOL(tms380tr_close); EXPORT_SYMBOL(tms380tr_interrupt); EXPORT_SYMBOL(tmsdev_init); EXPORT_SYMBOL(tmsdev_term); EXPORT_SYMBOL(tms380tr_wait); #ifdef MODULE static struct module *TMS380_module = NULL; int init_module(void) { printk(KERN_DEBUG "%s", version); TMS380_module = &__this_module; return 0; } void cleanup_module(void) { TMS380_module = NULL; } #endif MODULE_LICENSE("GPL");
gpl-2.0
jcadduono/android_kernel_oneplus_msm8996
fs/stack.c
1871
2584
#include <linux/export.h> #include <linux/fs.h> #include <linux/fs_stack.h> /* does _NOT_ require i_mutex to be held. * * This function cannot be inlined since i_size_{read,write} is rather * heavy-weight on 32-bit systems */ void fsstack_copy_inode_size(struct inode *dst, struct inode *src) { loff_t i_size; blkcnt_t i_blocks; /* * i_size_read() includes its own seqlocking and protection from * preemption (see include/linux/fs.h): we need nothing extra for * that here, and prefer to avoid nesting locks than attempt to keep * i_size and i_blocks in sync together. */ i_size = i_size_read(src); /* * But if CONFIG_LBDAF (on 32-bit), we ought to make an effort to * keep the two halves of i_blocks in sync despite SMP or PREEMPT - * though stat's generic_fillattr() doesn't bother, and we won't be * applying quotas (where i_blocks does become important) at the * upper level. * * We don't actually know what locking is used at the lower level; * but if it's a filesystem that supports quotas, it will be using * i_lock as in inode_add_bytes(). */ if (sizeof(i_blocks) > sizeof(long)) spin_lock(&src->i_lock); i_blocks = src->i_blocks; if (sizeof(i_blocks) > sizeof(long)) spin_unlock(&src->i_lock); /* * If CONFIG_SMP or CONFIG_PREEMPT on 32-bit, it's vital for * fsstack_copy_inode_size() to hold some lock around * i_size_write(), otherwise i_size_read() may spin forever (see * include/linux/fs.h). We don't necessarily hold i_mutex when this * is called, so take i_lock for that case. * * And if CONFIG_LBDAF (on 32-bit), continue our effort to keep the * two halves of i_blocks in sync despite SMP or PREEMPT: use i_lock * for that case too, and do both at once by combining the tests. * * There is none of this locking overhead in the 64-bit case. */ if (sizeof(i_size) > sizeof(long) || sizeof(i_blocks) > sizeof(long)) spin_lock(&dst->i_lock); i_size_write(dst, i_size); dst->i_blocks = i_blocks; if (sizeof(i_size) > sizeof(long) || sizeof(i_blocks) > sizeof(long)) spin_unlock(&dst->i_lock); } EXPORT_SYMBOL_GPL(fsstack_copy_inode_size); /* copy all attributes */ void fsstack_copy_attr_all(struct inode *dest, const struct inode *src) { dest->i_mode = src->i_mode; dest->i_uid = src->i_uid; dest->i_gid = src->i_gid; dest->i_rdev = src->i_rdev; dest->i_atime = src->i_atime; dest->i_mtime = src->i_mtime; dest->i_ctime = src->i_ctime; dest->i_blkbits = src->i_blkbits; dest->i_flags = src->i_flags; set_nlink(dest, src->i_nlink); } EXPORT_SYMBOL_GPL(fsstack_copy_attr_all);
gpl-2.0
CaptainThrowback/kernel_android-tegra-flounder-3.10
arch/h8300/kernel/signal.c
2127
11059
/* * linux/arch/h8300/kernel/signal.c * * Copyright (C) 1991, 1992 Linus Torvalds * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive * for more details. */ /* * uClinux H8/300 support by Yoshinori Sato <ysato@users.sourceforge.jp> * and David McCullough <davidm@snapgear.com> * * Based on * Linux/m68k by Hamish Macdonald */ /* * ++roman (07/09/96): implemented signal stacks (specially for tosemu on * Atari :-) Current limitation: Only one sigstack can be active at one time. * If a second signal with SA_ONSTACK set arrives while working on a sigstack, * SA_ONSTACK is ignored. This behaviour avoids lots of trouble with nested * signal handlers! */ #include <linux/sched.h> #include <linux/mm.h> #include <linux/kernel.h> #include <linux/signal.h> #include <linux/syscalls.h> #include <linux/errno.h> #include <linux/wait.h> #include <linux/ptrace.h> #include <linux/unistd.h> #include <linux/stddef.h> #include <linux/highuid.h> #include <linux/personality.h> #include <linux/tty.h> #include <linux/binfmts.h> #include <linux/tracehook.h> #include <asm/setup.h> #include <asm/uaccess.h> #include <asm/pgtable.h> #include <asm/traps.h> #include <asm/ucontext.h> /* * Do a signal return; undo the signal stack. * * Keep the return code on the stack quadword aligned! * That makes the cache flush below easier. */ struct sigframe { long dummy_er0; long dummy_vector; #if defined(CONFIG_CPU_H8S) short dummy_exr; #endif long dummy_pc; char *pretcode; unsigned char retcode[8]; unsigned long extramask[_NSIG_WORDS-1]; struct sigcontext sc; int sig; } __attribute__((aligned(2),packed)); struct rt_sigframe { long dummy_er0; long dummy_vector; #if defined(CONFIG_CPU_H8S) short dummy_exr; #endif long dummy_pc; char *pretcode; struct siginfo *pinfo; void *puc; unsigned char retcode[8]; struct siginfo info; struct ucontext uc; int sig; } __attribute__((aligned(2),packed)); static inline int restore_sigcontext(struct sigcontext *usc, int *pd0) { struct pt_regs *regs = current_pt_regs(); int err = 0; unsigned int ccr; unsigned int usp; unsigned int er0; /* Always make any pending restarted system calls return -EINTR */ current_thread_info()->restart_block.fn = do_no_restart_syscall; #define COPY(r) err |= __get_user(regs->r, &usc->sc_##r) /* restore passed registers */ COPY(er1); COPY(er2); COPY(er3); COPY(er5); COPY(pc); ccr = regs->ccr & 0x10; COPY(ccr); #undef COPY regs->ccr &= 0xef; regs->ccr |= ccr; regs->orig_er0 = -1; /* disable syscall checks */ err |= __get_user(usp, &usc->sc_usp); wrusp(usp); err |= __get_user(er0, &usc->sc_er0); *pd0 = er0; return err; } asmlinkage int sys_sigreturn(void) { unsigned long usp = rdusp(); struct sigframe *frame = (struct sigframe *)(usp - 4); sigset_t set; int er0; if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) goto badframe; if (__get_user(set.sig[0], &frame->sc.sc_mask) || (_NSIG_WORDS > 1 && __copy_from_user(&set.sig[1], &frame->extramask, sizeof(frame->extramask)))) goto badframe; set_current_blocked(&set); if (restore_sigcontext(&frame->sc, &er0)) goto badframe; return er0; badframe: force_sig(SIGSEGV, current); return 0; } asmlinkage int sys_rt_sigreturn(void) { unsigned long usp = rdusp(); struct rt_sigframe *frame = (struct rt_sigframe *)(usp - 4); sigset_t set; int er0; if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) goto badframe; if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) goto badframe; set_current_blocked(&set); if (restore_sigcontext(&frame->uc.uc_mcontext, &er0)) goto badframe; if (restore_altstack(&frame->uc.uc_stack)) goto badframe; return er0; badframe: force_sig(SIGSEGV, current); return 0; } static int setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, unsigned long mask) { int err = 0; err |= __put_user(regs->er0, &sc->sc_er0); err |= __put_user(regs->er1, &sc->sc_er1); err |= __put_user(regs->er2, &sc->sc_er2); err |= __put_user(regs->er3, &sc->sc_er3); err |= __put_user(regs->er4, &sc->sc_er4); err |= __put_user(regs->er5, &sc->sc_er5); err |= __put_user(regs->er6, &sc->sc_er6); err |= __put_user(rdusp(), &sc->sc_usp); err |= __put_user(regs->pc, &sc->sc_pc); err |= __put_user(regs->ccr, &sc->sc_ccr); err |= __put_user(mask, &sc->sc_mask); return err; } static inline void * get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size) { unsigned long usp; /* Default to using normal stack. */ usp = rdusp(); /* This is the X/Open sanctioned signal stack switching. */ if (ka->sa.sa_flags & SA_ONSTACK) { if (!sas_ss_flags(usp)) usp = current->sas_ss_sp + current->sas_ss_size; } return (void *)((usp - frame_size) & -8UL); } static int setup_frame (int sig, struct k_sigaction *ka, sigset_t *set, struct pt_regs *regs) { struct sigframe *frame; int err = 0; int usig; unsigned char *ret; frame = get_sigframe(ka, regs, sizeof(*frame)); if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) goto give_sigsegv; usig = current_thread_info()->exec_domain && current_thread_info()->exec_domain->signal_invmap && sig < 32 ? current_thread_info()->exec_domain->signal_invmap[sig] : sig; err |= __put_user(usig, &frame->sig); if (err) goto give_sigsegv; err |= setup_sigcontext(&frame->sc, regs, set->sig[0]); if (err) goto give_sigsegv; if (_NSIG_WORDS > 1) { err |= copy_to_user(frame->extramask, &set->sig[1], sizeof(frame->extramask)); if (err) goto give_sigsegv; } ret = frame->retcode; if (ka->sa.sa_flags & SA_RESTORER) ret = (unsigned char *)(ka->sa.sa_restorer); else { /* sub.l er0,er0; mov.b #__NR_sigreturn,r0l; trapa #0 */ err |= __put_user(0x1a80f800 + (__NR_sigreturn & 0xff), (unsigned long *)(frame->retcode + 0)); err |= __put_user(0x5700, (unsigned short *)(frame->retcode + 4)); } /* Set up to return from userspace. */ err |= __put_user(ret, &frame->pretcode); if (err) goto give_sigsegv; /* Set up registers for signal handler */ wrusp ((unsigned long) frame); regs->pc = (unsigned long) ka->sa.sa_handler; regs->er0 = (current_thread_info()->exec_domain && current_thread_info()->exec_domain->signal_invmap && sig < 32 ? current_thread_info()->exec_domain->signal_invmap[sig] : sig); regs->er1 = (unsigned long)&(frame->sc); regs->er5 = current->mm->start_data; /* GOT base */ return 0; give_sigsegv: force_sigsegv(sig, current); return -EFAULT; } static int setup_rt_frame (int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set, struct pt_regs *regs) { struct rt_sigframe *frame; int err = 0; int usig; unsigned char *ret; frame = get_sigframe(ka, regs, sizeof(*frame)); if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) goto give_sigsegv; usig = current_thread_info()->exec_domain && current_thread_info()->exec_domain->signal_invmap && sig < 32 ? current_thread_info()->exec_domain->signal_invmap[sig] : sig; err |= __put_user(usig, &frame->sig); if (err) goto give_sigsegv; err |= __put_user(&frame->info, &frame->pinfo); err |= __put_user(&frame->uc, &frame->puc); err |= copy_siginfo_to_user(&frame->info, info); if (err) goto give_sigsegv; /* Create the ucontext. */ err |= __put_user(0, &frame->uc.uc_flags); err |= __put_user(0, &frame->uc.uc_link); err |= __save_altstack(&frame->uc.uc_stack, rdusp()); err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0]); err |= copy_to_user (&frame->uc.uc_sigmask, set, sizeof(*set)); if (err) goto give_sigsegv; /* Set up to return from userspace. */ ret = frame->retcode; if (ka->sa.sa_flags & SA_RESTORER) ret = (unsigned char *)(ka->sa.sa_restorer); else { /* sub.l er0,er0; mov.b #__NR_sigreturn,r0l; trapa #0 */ err |= __put_user(0x1a80f800 + (__NR_sigreturn & 0xff), (unsigned long *)(frame->retcode + 0)); err |= __put_user(0x5700, (unsigned short *)(frame->retcode + 4)); } err |= __put_user(ret, &frame->pretcode); if (err) goto give_sigsegv; /* Set up registers for signal handler */ wrusp ((unsigned long) frame); regs->pc = (unsigned long) ka->sa.sa_handler; regs->er0 = (current_thread_info()->exec_domain && current_thread_info()->exec_domain->signal_invmap && sig < 32 ? current_thread_info()->exec_domain->signal_invmap[sig] : sig); regs->er1 = (unsigned long)&(frame->info); regs->er2 = (unsigned long)&frame->uc; regs->er5 = current->mm->start_data; /* GOT base */ return 0; give_sigsegv: force_sigsegv(sig, current); return -EFAULT; } /* * OK, we're invoking a handler */ static void handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, struct pt_regs * regs) { sigset_t *oldset = sigmask_to_save(); int ret; /* are we from a system call? */ if (regs->orig_er0 >= 0) { switch (regs->er0) { case -ERESTART_RESTARTBLOCK: case -ERESTARTNOHAND: regs->er0 = -EINTR; break; case -ERESTARTSYS: if (!(ka->sa.sa_flags & SA_RESTART)) { regs->er0 = -EINTR; break; } /* fallthrough */ case -ERESTARTNOINTR: regs->er0 = regs->orig_er0; regs->pc -= 2; } } /* set up the stack frame */ if (ka->sa.sa_flags & SA_SIGINFO) ret = setup_rt_frame(sig, ka, info, oldset, regs); else ret = setup_frame(sig, ka, oldset, regs); if (!ret) signal_delivered(sig, info, ka, regs, 0); } /* * Note that 'init' is a special process: it doesn't get signals it doesn't * want to handle. Thus you cannot kill init even with a SIGKILL even by * mistake. */ static void do_signal(struct pt_regs *regs) { siginfo_t info; int signr; struct k_sigaction ka; /* * We want the common case to go fast, which * is why we may in certain cases get here from * kernel mode. Just return without doing anything * if so. */ if ((regs->ccr & 0x10)) return; current->thread.esp0 = (unsigned long) regs; signr = get_signal_to_deliver(&info, &ka, regs, NULL); if (signr > 0) { /* Whee! Actually deliver the signal. */ handle_signal(signr, &info, &ka, regs); return; } /* Did we come from a system call? */ if (regs->orig_er0 >= 0) { /* Restart the system call - no handlers present */ if (regs->er0 == -ERESTARTNOHAND || regs->er0 == -ERESTARTSYS || regs->er0 == -ERESTARTNOINTR) { regs->er0 = regs->orig_er0; regs->pc -= 2; } if (regs->er0 == -ERESTART_RESTARTBLOCK){ regs->er0 = __NR_restart_syscall; regs->pc -= 2; } } /* If there's no signal to deliver, we just restore the saved mask. */ restore_saved_sigmask(); } asmlinkage void do_notify_resume(struct pt_regs *regs, u32 thread_info_flags) { if (thread_info_flags & _TIF_SIGPENDING) do_signal(regs); if (thread_info_flags & _TIF_NOTIFY_RESUME) { clear_thread_flag(TIF_NOTIFY_RESUME); tracehook_notify_resume(regs); } }
gpl-2.0
DroidTh3ory-xx/kernel_samsung_tuna_killr-air
drivers/media/video/cx18/cx18-ioctl.c
2383
33429
/* * cx18 ioctl system call * * Derived from ivtv-ioctl.c * * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl> * Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA * 02111-1307 USA */ #include "cx18-driver.h" #include "cx18-io.h" #include "cx18-version.h" #include "cx18-mailbox.h" #include "cx18-i2c.h" #include "cx18-queue.h" #include "cx18-fileops.h" #include "cx18-vbi.h" #include "cx18-audio.h" #include "cx18-video.h" #include "cx18-streams.h" #include "cx18-ioctl.h" #include "cx18-gpio.h" #include "cx18-controls.h" #include "cx18-cards.h" #include "cx18-av-core.h" #include <media/tveeprom.h> #include <media/v4l2-chip-ident.h> u16 cx18_service2vbi(int type) { switch (type) { case V4L2_SLICED_TELETEXT_B: return CX18_SLICED_TYPE_TELETEXT_B; case V4L2_SLICED_CAPTION_525: return CX18_SLICED_TYPE_CAPTION_525; case V4L2_SLICED_WSS_625: return CX18_SLICED_TYPE_WSS_625; case V4L2_SLICED_VPS: return CX18_SLICED_TYPE_VPS; default: return 0; } } /* Check if VBI services are allowed on the (field, line) for the video std */ static int valid_service_line(int field, int line, int is_pal) { return (is_pal && line >= 6 && ((field == 0 && line <= 23) || (field == 1 && line <= 22))) || (!is_pal && line >= 10 && line < 22); } /* * For a (field, line, std) and inbound potential set of services for that line, * return the first valid service of those passed in the incoming set for that * line in priority order: * CC, VPS, or WSS over TELETEXT for well known lines * TELETEXT, before VPS, before CC, before WSS, for other lines */ static u16 select_service_from_set(int field, int line, u16 set, int is_pal) { u16 valid_set = (is_pal ? V4L2_SLICED_VBI_625 : V4L2_SLICED_VBI_525); int i; set = set & valid_set; if (set == 0 || !valid_service_line(field, line, is_pal)) return 0; if (!is_pal) { if (line == 21 && (set & V4L2_SLICED_CAPTION_525)) return V4L2_SLICED_CAPTION_525; } else { if (line == 16 && field == 0 && (set & V4L2_SLICED_VPS)) return V4L2_SLICED_VPS; if (line == 23 && field == 0 && (set & V4L2_SLICED_WSS_625)) return V4L2_SLICED_WSS_625; if (line == 23) return 0; } for (i = 0; i < 32; i++) { if ((1 << i) & set) return 1 << i; } return 0; } /* * Expand the service_set of *fmt into valid service_lines for the std, * and clear the passed in fmt->service_set */ void cx18_expand_service_set(struct v4l2_sliced_vbi_format *fmt, int is_pal) { u16 set = fmt->service_set; int f, l; fmt->service_set = 0; for (f = 0; f < 2; f++) { for (l = 0; l < 24; l++) fmt->service_lines[f][l] = select_service_from_set(f, l, set, is_pal); } } /* * Sanitize the service_lines in *fmt per the video std, and return 1 * if any service_line is left as valid after santization */ static int check_service_set(struct v4l2_sliced_vbi_format *fmt, int is_pal) { int f, l; u16 set = 0; for (f = 0; f < 2; f++) { for (l = 0; l < 24; l++) { fmt->service_lines[f][l] = select_service_from_set(f, l, fmt->service_lines[f][l], is_pal); set |= fmt->service_lines[f][l]; } } return set != 0; } /* Compute the service_set from the assumed valid service_lines of *fmt */ u16 cx18_get_service_set(struct v4l2_sliced_vbi_format *fmt) { int f, l; u16 set = 0; for (f = 0; f < 2; f++) { for (l = 0; l < 24; l++) set |= fmt->service_lines[f][l]; } return set; } static int cx18_g_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *fmt) { struct cx18_open_id *id = fh2id(fh); struct cx18 *cx = id->cx; struct cx18_stream *s = &cx->streams[id->type]; struct v4l2_pix_format *pixfmt = &fmt->fmt.pix; pixfmt->width = cx->cxhdl.width; pixfmt->height = cx->cxhdl.height; pixfmt->colorspace = V4L2_COLORSPACE_SMPTE170M; pixfmt->field = V4L2_FIELD_INTERLACED; pixfmt->priv = 0; if (id->type == CX18_ENC_STREAM_TYPE_YUV) { pixfmt->pixelformat = s->pixelformat; /* HM12 YUV size is (Y=(h*720) + UV=(h*(720/2))) UYUV YUV size is (Y=(h*720) + UV=(h*(720))) */ if (s->pixelformat == V4L2_PIX_FMT_HM12) pixfmt->sizeimage = pixfmt->height * 720 * 3 / 2; else pixfmt->sizeimage = pixfmt->height * 720 * 2; pixfmt->bytesperline = 720; } else { pixfmt->pixelformat = V4L2_PIX_FMT_MPEG; pixfmt->sizeimage = 128 * 1024; pixfmt->bytesperline = 0; } return 0; } static int cx18_g_fmt_vbi_cap(struct file *file, void *fh, struct v4l2_format *fmt) { struct cx18 *cx = fh2id(fh)->cx; struct v4l2_vbi_format *vbifmt = &fmt->fmt.vbi; vbifmt->sampling_rate = 27000000; vbifmt->offset = 248; /* FIXME - slightly wrong for both 50 & 60 Hz */ vbifmt->samples_per_line = vbi_active_samples - 4; vbifmt->sample_format = V4L2_PIX_FMT_GREY; vbifmt->start[0] = cx->vbi.start[0]; vbifmt->start[1] = cx->vbi.start[1]; vbifmt->count[0] = vbifmt->count[1] = cx->vbi.count; vbifmt->flags = 0; vbifmt->reserved[0] = 0; vbifmt->reserved[1] = 0; return 0; } static int cx18_g_fmt_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_format *fmt) { struct cx18 *cx = fh2id(fh)->cx; struct v4l2_sliced_vbi_format *vbifmt = &fmt->fmt.sliced; /* sane, V4L2 spec compliant, defaults */ vbifmt->reserved[0] = 0; vbifmt->reserved[1] = 0; vbifmt->io_size = sizeof(struct v4l2_sliced_vbi_data) * 36; memset(vbifmt->service_lines, 0, sizeof(vbifmt->service_lines)); vbifmt->service_set = 0; /* * Fetch the configured service_lines and total service_set from the * digitizer/slicer. Note, cx18_av_vbi() wipes the passed in * fmt->fmt.sliced under valid calling conditions */ if (v4l2_subdev_call(cx->sd_av, vbi, g_sliced_fmt, &fmt->fmt.sliced)) return -EINVAL; /* Ensure V4L2 spec compliant output */ vbifmt->reserved[0] = 0; vbifmt->reserved[1] = 0; vbifmt->io_size = sizeof(struct v4l2_sliced_vbi_data) * 36; vbifmt->service_set = cx18_get_service_set(vbifmt); return 0; } static int cx18_try_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *fmt) { struct cx18_open_id *id = fh2id(fh); struct cx18 *cx = id->cx; int w = fmt->fmt.pix.width; int h = fmt->fmt.pix.height; int min_h = 2; w = min(w, 720); w = max(w, 2); if (id->type == CX18_ENC_STREAM_TYPE_YUV) { /* YUV height must be a multiple of 32 */ h &= ~0x1f; min_h = 32; } h = min(h, cx->is_50hz ? 576 : 480); h = max(h, min_h); fmt->fmt.pix.width = w; fmt->fmt.pix.height = h; return 0; } static int cx18_try_fmt_vbi_cap(struct file *file, void *fh, struct v4l2_format *fmt) { return cx18_g_fmt_vbi_cap(file, fh, fmt); } static int cx18_try_fmt_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_format *fmt) { struct cx18 *cx = fh2id(fh)->cx; struct v4l2_sliced_vbi_format *vbifmt = &fmt->fmt.sliced; vbifmt->io_size = sizeof(struct v4l2_sliced_vbi_data) * 36; vbifmt->reserved[0] = 0; vbifmt->reserved[1] = 0; /* If given a service set, expand it validly & clear passed in set */ if (vbifmt->service_set) cx18_expand_service_set(vbifmt, cx->is_50hz); /* Sanitize the service_lines, and compute the new set if any valid */ if (check_service_set(vbifmt, cx->is_50hz)) vbifmt->service_set = cx18_get_service_set(vbifmt); return 0; } static int cx18_s_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *fmt) { struct cx18_open_id *id = fh2id(fh); struct cx18 *cx = id->cx; struct v4l2_mbus_framefmt mbus_fmt; struct cx18_stream *s = &cx->streams[id->type]; int ret; int w, h; ret = cx18_try_fmt_vid_cap(file, fh, fmt); if (ret) return ret; w = fmt->fmt.pix.width; h = fmt->fmt.pix.height; if (cx->cxhdl.width == w && cx->cxhdl.height == h && s->pixelformat == fmt->fmt.pix.pixelformat) return 0; if (atomic_read(&cx->ana_capturing) > 0) return -EBUSY; s->pixelformat = fmt->fmt.pix.pixelformat; mbus_fmt.width = cx->cxhdl.width = w; mbus_fmt.height = cx->cxhdl.height = h; mbus_fmt.code = V4L2_MBUS_FMT_FIXED; v4l2_subdev_call(cx->sd_av, video, s_mbus_fmt, &mbus_fmt); return cx18_g_fmt_vid_cap(file, fh, fmt); } static int cx18_s_fmt_vbi_cap(struct file *file, void *fh, struct v4l2_format *fmt) { struct cx18_open_id *id = fh2id(fh); struct cx18 *cx = id->cx; int ret; /* * Changing the Encoder's Raw VBI parameters won't have any effect * if any analog capture is ongoing */ if (!cx18_raw_vbi(cx) && atomic_read(&cx->ana_capturing) > 0) return -EBUSY; /* * Set the digitizer registers for raw active VBI. * Note cx18_av_vbi_wipes out a lot of the passed in fmt under valid * calling conditions */ ret = v4l2_subdev_call(cx->sd_av, vbi, s_raw_fmt, &fmt->fmt.vbi); if (ret) return ret; /* Store our new v4l2 (non-)sliced VBI state */ cx->vbi.sliced_in->service_set = 0; cx->vbi.in.type = V4L2_BUF_TYPE_VBI_CAPTURE; return cx18_g_fmt_vbi_cap(file, fh, fmt); } static int cx18_s_fmt_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_format *fmt) { struct cx18_open_id *id = fh2id(fh); struct cx18 *cx = id->cx; int ret; struct v4l2_sliced_vbi_format *vbifmt = &fmt->fmt.sliced; cx18_try_fmt_sliced_vbi_cap(file, fh, fmt); /* * Changing the Encoder's Raw VBI parameters won't have any effect * if any analog capture is ongoing */ if (cx18_raw_vbi(cx) && atomic_read(&cx->ana_capturing) > 0) return -EBUSY; /* * Set the service_lines requested in the digitizer/slicer registers. * Note, cx18_av_vbi() wipes some "impossible" service lines in the * passed in fmt->fmt.sliced under valid calling conditions */ ret = v4l2_subdev_call(cx->sd_av, vbi, s_sliced_fmt, &fmt->fmt.sliced); if (ret) return ret; /* Store our current v4l2 sliced VBI settings */ cx->vbi.in.type = V4L2_BUF_TYPE_SLICED_VBI_CAPTURE; memcpy(cx->vbi.sliced_in, vbifmt, sizeof(*cx->vbi.sliced_in)); return 0; } static int cx18_g_chip_ident(struct file *file, void *fh, struct v4l2_dbg_chip_ident *chip) { struct cx18 *cx = fh2id(fh)->cx; int err = 0; chip->ident = V4L2_IDENT_NONE; chip->revision = 0; switch (chip->match.type) { case V4L2_CHIP_MATCH_HOST: switch (chip->match.addr) { case 0: chip->ident = V4L2_IDENT_CX23418; chip->revision = cx18_read_reg(cx, 0xC72028); break; case 1: /* * The A/V decoder is always present, but in the rare * case that the card doesn't have analog, we don't * use it. We find it w/o using the cx->sd_av pointer */ cx18_call_hw(cx, CX18_HW_418_AV, core, g_chip_ident, chip); break; default: /* * Could return ident = V4L2_IDENT_UNKNOWN if we had * other host chips at higher addresses, but we don't */ err = -EINVAL; /* per V4L2 spec */ break; } break; case V4L2_CHIP_MATCH_I2C_DRIVER: /* If needed, returns V4L2_IDENT_AMBIGUOUS without extra work */ cx18_call_all(cx, core, g_chip_ident, chip); break; case V4L2_CHIP_MATCH_I2C_ADDR: /* * We could return V4L2_IDENT_UNKNOWN, but we don't do the work * to look if a chip is at the address with no driver. That's a * dangerous thing to do with EEPROMs anyway. */ cx18_call_all(cx, core, g_chip_ident, chip); break; default: err = -EINVAL; break; } return err; } #ifdef CONFIG_VIDEO_ADV_DEBUG static int cx18_cxc(struct cx18 *cx, unsigned int cmd, void *arg) { struct v4l2_dbg_register *regs = arg; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (regs->reg >= CX18_MEM_OFFSET + CX18_MEM_SIZE) return -EINVAL; regs->size = 4; if (cmd == VIDIOC_DBG_S_REGISTER) cx18_write_enc(cx, regs->val, regs->reg); else regs->val = cx18_read_enc(cx, regs->reg); return 0; } static int cx18_g_register(struct file *file, void *fh, struct v4l2_dbg_register *reg) { struct cx18 *cx = fh2id(fh)->cx; if (v4l2_chip_match_host(&reg->match)) return cx18_cxc(cx, VIDIOC_DBG_G_REGISTER, reg); /* FIXME - errors shouldn't be ignored */ cx18_call_all(cx, core, g_register, reg); return 0; } static int cx18_s_register(struct file *file, void *fh, struct v4l2_dbg_register *reg) { struct cx18 *cx = fh2id(fh)->cx; if (v4l2_chip_match_host(&reg->match)) return cx18_cxc(cx, VIDIOC_DBG_S_REGISTER, reg); /* FIXME - errors shouldn't be ignored */ cx18_call_all(cx, core, s_register, reg); return 0; } #endif static int cx18_querycap(struct file *file, void *fh, struct v4l2_capability *vcap) { struct cx18 *cx = fh2id(fh)->cx; strlcpy(vcap->driver, CX18_DRIVER_NAME, sizeof(vcap->driver)); strlcpy(vcap->card, cx->card_name, sizeof(vcap->card)); snprintf(vcap->bus_info, sizeof(vcap->bus_info), "PCI:%s", pci_name(cx->pci_dev)); vcap->version = CX18_DRIVER_VERSION; /* version */ vcap->capabilities = cx->v4l2_cap; /* capabilities */ return 0; } static int cx18_enumaudio(struct file *file, void *fh, struct v4l2_audio *vin) { struct cx18 *cx = fh2id(fh)->cx; return cx18_get_audio_input(cx, vin->index, vin); } static int cx18_g_audio(struct file *file, void *fh, struct v4l2_audio *vin) { struct cx18 *cx = fh2id(fh)->cx; vin->index = cx->audio_input; return cx18_get_audio_input(cx, vin->index, vin); } static int cx18_s_audio(struct file *file, void *fh, struct v4l2_audio *vout) { struct cx18 *cx = fh2id(fh)->cx; if (vout->index >= cx->nof_audio_inputs) return -EINVAL; cx->audio_input = vout->index; cx18_audio_set_io(cx); return 0; } static int cx18_enum_input(struct file *file, void *fh, struct v4l2_input *vin) { struct cx18 *cx = fh2id(fh)->cx; /* set it to defaults from our table */ return cx18_get_input(cx, vin->index, vin); } static int cx18_cropcap(struct file *file, void *fh, struct v4l2_cropcap *cropcap) { struct cx18 *cx = fh2id(fh)->cx; if (cropcap->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; cropcap->bounds.top = cropcap->bounds.left = 0; cropcap->bounds.width = 720; cropcap->bounds.height = cx->is_50hz ? 576 : 480; cropcap->pixelaspect.numerator = cx->is_50hz ? 59 : 10; cropcap->pixelaspect.denominator = cx->is_50hz ? 54 : 11; cropcap->defrect = cropcap->bounds; return 0; } static int cx18_s_crop(struct file *file, void *fh, struct v4l2_crop *crop) { struct cx18_open_id *id = fh2id(fh); struct cx18 *cx = id->cx; if (crop->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; CX18_DEBUG_WARN("VIDIOC_S_CROP not implemented\n"); return -EINVAL; } static int cx18_g_crop(struct file *file, void *fh, struct v4l2_crop *crop) { struct cx18 *cx = fh2id(fh)->cx; if (crop->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; CX18_DEBUG_WARN("VIDIOC_G_CROP not implemented\n"); return -EINVAL; } static int cx18_enum_fmt_vid_cap(struct file *file, void *fh, struct v4l2_fmtdesc *fmt) { static const struct v4l2_fmtdesc formats[] = { { 0, V4L2_BUF_TYPE_VIDEO_CAPTURE, 0, "HM12 (YUV 4:1:1)", V4L2_PIX_FMT_HM12, { 0, 0, 0, 0 } }, { 1, V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FMT_FLAG_COMPRESSED, "MPEG", V4L2_PIX_FMT_MPEG, { 0, 0, 0, 0 } }, { 2, V4L2_BUF_TYPE_VIDEO_CAPTURE, 0, "UYVY 4:2:2", V4L2_PIX_FMT_UYVY, { 0, 0, 0, 0 } }, }; if (fmt->index > ARRAY_SIZE(formats) - 1) return -EINVAL; *fmt = formats[fmt->index]; return 0; } static int cx18_g_input(struct file *file, void *fh, unsigned int *i) { struct cx18 *cx = fh2id(fh)->cx; *i = cx->active_input; return 0; } int cx18_s_input(struct file *file, void *fh, unsigned int inp) { struct cx18_open_id *id = fh2id(fh); struct cx18 *cx = id->cx; if (inp >= cx->nof_inputs) return -EINVAL; if (inp == cx->active_input) { CX18_DEBUG_INFO("Input unchanged\n"); return 0; } CX18_DEBUG_INFO("Changing input from %d to %d\n", cx->active_input, inp); cx->active_input = inp; /* Set the audio input to whatever is appropriate for the input type. */ cx->audio_input = cx->card->video_inputs[inp].audio_index; /* prevent others from messing with the streams until we're finished changing inputs. */ cx18_mute(cx); cx18_video_set_io(cx); cx18_audio_set_io(cx); cx18_unmute(cx); return 0; } static int cx18_g_frequency(struct file *file, void *fh, struct v4l2_frequency *vf) { struct cx18 *cx = fh2id(fh)->cx; if (vf->tuner != 0) return -EINVAL; cx18_call_all(cx, tuner, g_frequency, vf); return 0; } int cx18_s_frequency(struct file *file, void *fh, struct v4l2_frequency *vf) { struct cx18_open_id *id = fh2id(fh); struct cx18 *cx = id->cx; if (vf->tuner != 0) return -EINVAL; cx18_mute(cx); CX18_DEBUG_INFO("v4l2 ioctl: set frequency %d\n", vf->frequency); cx18_call_all(cx, tuner, s_frequency, vf); cx18_unmute(cx); return 0; } static int cx18_g_std(struct file *file, void *fh, v4l2_std_id *std) { struct cx18 *cx = fh2id(fh)->cx; *std = cx->std; return 0; } int cx18_s_std(struct file *file, void *fh, v4l2_std_id *std) { struct cx18_open_id *id = fh2id(fh); struct cx18 *cx = id->cx; if ((*std & V4L2_STD_ALL) == 0) return -EINVAL; if (*std == cx->std) return 0; if (test_bit(CX18_F_I_RADIO_USER, &cx->i_flags) || atomic_read(&cx->ana_capturing) > 0) { /* Switching standard would turn off the radio or mess with already running streams, prevent that by returning EBUSY. */ return -EBUSY; } cx->std = *std; cx->is_60hz = (*std & V4L2_STD_525_60) ? 1 : 0; cx->is_50hz = !cx->is_60hz; cx2341x_handler_set_50hz(&cx->cxhdl, cx->is_50hz); cx->cxhdl.width = 720; cx->cxhdl.height = cx->is_50hz ? 576 : 480; cx->vbi.count = cx->is_50hz ? 18 : 12; cx->vbi.start[0] = cx->is_50hz ? 6 : 10; cx->vbi.start[1] = cx->is_50hz ? 318 : 273; CX18_DEBUG_INFO("Switching standard to %llx.\n", (unsigned long long) cx->std); /* Tuner */ cx18_call_all(cx, core, s_std, cx->std); return 0; } static int cx18_s_tuner(struct file *file, void *fh, struct v4l2_tuner *vt) { struct cx18_open_id *id = fh2id(fh); struct cx18 *cx = id->cx; if (vt->index != 0) return -EINVAL; cx18_call_all(cx, tuner, s_tuner, vt); return 0; } static int cx18_g_tuner(struct file *file, void *fh, struct v4l2_tuner *vt) { struct cx18 *cx = fh2id(fh)->cx; if (vt->index != 0) return -EINVAL; cx18_call_all(cx, tuner, g_tuner, vt); if (vt->type == V4L2_TUNER_RADIO) strlcpy(vt->name, "cx18 Radio Tuner", sizeof(vt->name)); else strlcpy(vt->name, "cx18 TV Tuner", sizeof(vt->name)); return 0; } static int cx18_g_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_sliced_vbi_cap *cap) { struct cx18 *cx = fh2id(fh)->cx; int set = cx->is_50hz ? V4L2_SLICED_VBI_625 : V4L2_SLICED_VBI_525; int f, l; if (cap->type != V4L2_BUF_TYPE_SLICED_VBI_CAPTURE) return -EINVAL; cap->service_set = 0; for (f = 0; f < 2; f++) { for (l = 0; l < 24; l++) { if (valid_service_line(f, l, cx->is_50hz)) { /* * We can find all v4l2 supported vbi services * for the standard, on a valid line for the std */ cap->service_lines[f][l] = set; cap->service_set |= set; } else cap->service_lines[f][l] = 0; } } for (f = 0; f < 3; f++) cap->reserved[f] = 0; return 0; } static int _cx18_process_idx_data(struct cx18_buffer *buf, struct v4l2_enc_idx *idx) { int consumed, remaining; struct v4l2_enc_idx_entry *e_idx; struct cx18_enc_idx_entry *e_buf; /* Frame type lookup: 1=I, 2=P, 4=B */ const int mapping[8] = { -1, V4L2_ENC_IDX_FRAME_I, V4L2_ENC_IDX_FRAME_P, -1, V4L2_ENC_IDX_FRAME_B, -1, -1, -1 }; /* * Assumption here is that a buf holds an integral number of * struct cx18_enc_idx_entry objects and is properly aligned. * This is enforced by the module options on IDX buffer sizes. */ remaining = buf->bytesused - buf->readpos; consumed = 0; e_idx = &idx->entry[idx->entries]; e_buf = (struct cx18_enc_idx_entry *) &buf->buf[buf->readpos]; while (remaining >= sizeof(struct cx18_enc_idx_entry) && idx->entries < V4L2_ENC_IDX_ENTRIES) { e_idx->offset = (((u64) le32_to_cpu(e_buf->offset_high)) << 32) | le32_to_cpu(e_buf->offset_low); e_idx->pts = (((u64) (le32_to_cpu(e_buf->pts_high) & 1)) << 32) | le32_to_cpu(e_buf->pts_low); e_idx->length = le32_to_cpu(e_buf->length); e_idx->flags = mapping[le32_to_cpu(e_buf->flags) & 0x7]; e_idx->reserved[0] = 0; e_idx->reserved[1] = 0; idx->entries++; e_idx = &idx->entry[idx->entries]; e_buf++; remaining -= sizeof(struct cx18_enc_idx_entry); consumed += sizeof(struct cx18_enc_idx_entry); } /* Swallow any partial entries at the end, if there are any */ if (remaining > 0 && remaining < sizeof(struct cx18_enc_idx_entry)) consumed += remaining; buf->readpos += consumed; return consumed; } static int cx18_process_idx_data(struct cx18_stream *s, struct cx18_mdl *mdl, struct v4l2_enc_idx *idx) { if (s->type != CX18_ENC_STREAM_TYPE_IDX) return -EINVAL; if (mdl->curr_buf == NULL) mdl->curr_buf = list_first_entry(&mdl->buf_list, struct cx18_buffer, list); if (list_entry_is_past_end(mdl->curr_buf, &mdl->buf_list, list)) { /* * For some reason we've exhausted the buffers, but the MDL * object still said some data was unread. * Fix that and bail out. */ mdl->readpos = mdl->bytesused; return 0; } list_for_each_entry_from(mdl->curr_buf, &mdl->buf_list, list) { /* Skip any empty buffers in the MDL */ if (mdl->curr_buf->readpos >= mdl->curr_buf->bytesused) continue; mdl->readpos += _cx18_process_idx_data(mdl->curr_buf, idx); /* exit when MDL drained or request satisfied */ if (idx->entries >= V4L2_ENC_IDX_ENTRIES || mdl->curr_buf->readpos < mdl->curr_buf->bytesused || mdl->readpos >= mdl->bytesused) break; } return 0; } static int cx18_g_enc_index(struct file *file, void *fh, struct v4l2_enc_idx *idx) { struct cx18 *cx = fh2id(fh)->cx; struct cx18_stream *s = &cx->streams[CX18_ENC_STREAM_TYPE_IDX]; s32 tmp; struct cx18_mdl *mdl; if (!cx18_stream_enabled(s)) /* Module options inhibited IDX stream */ return -EINVAL; /* Compute the best case number of entries we can buffer */ tmp = s->buffers - s->bufs_per_mdl * CX18_ENC_STREAM_TYPE_IDX_FW_MDL_MIN; if (tmp <= 0) tmp = 1; tmp = tmp * s->buf_size / sizeof(struct cx18_enc_idx_entry); /* Fill out the header of the return structure */ idx->entries = 0; idx->entries_cap = tmp; memset(idx->reserved, 0, sizeof(idx->reserved)); /* Pull IDX MDLs and buffers from q_full and populate the entries */ do { mdl = cx18_dequeue(s, &s->q_full); if (mdl == NULL) /* No more IDX data right now */ break; /* Extract the Index entry data from the MDL and buffers */ cx18_process_idx_data(s, mdl, idx); if (mdl->readpos < mdl->bytesused) { /* We finished with data remaining, push the MDL back */ cx18_push(s, mdl, &s->q_full); break; } /* We drained this MDL, schedule it to go to the firmware */ cx18_enqueue(s, mdl, &s->q_free); } while (idx->entries < V4L2_ENC_IDX_ENTRIES); /* Tell the work handler to send free IDX MDLs to the firmware */ cx18_stream_load_fw_queue(s); return 0; } static struct videobuf_queue *cx18_vb_queue(struct cx18_open_id *id) { struct videobuf_queue *q = NULL; struct cx18 *cx = id->cx; struct cx18_stream *s = &cx->streams[id->type]; switch (s->vb_type) { case V4L2_BUF_TYPE_VIDEO_CAPTURE: q = &s->vbuf_q; break; case V4L2_BUF_TYPE_VBI_CAPTURE: break; default: break; } return q; } static int cx18_streamon(struct file *file, void *priv, enum v4l2_buf_type type) { struct cx18_open_id *id = file->private_data; struct cx18 *cx = id->cx; struct cx18_stream *s = &cx->streams[id->type]; /* Start the hardware only if we're the video device */ if ((s->vb_type != V4L2_BUF_TYPE_VIDEO_CAPTURE) && (s->vb_type != V4L2_BUF_TYPE_VBI_CAPTURE)) return -EINVAL; if (id->type != CX18_ENC_STREAM_TYPE_YUV) return -EINVAL; /* Establish a buffer timeout */ mod_timer(&s->vb_timeout, msecs_to_jiffies(2000) + jiffies); return videobuf_streamon(cx18_vb_queue(id)); } static int cx18_streamoff(struct file *file, void *priv, enum v4l2_buf_type type) { struct cx18_open_id *id = file->private_data; struct cx18 *cx = id->cx; struct cx18_stream *s = &cx->streams[id->type]; /* Start the hardware only if we're the video device */ if ((s->vb_type != V4L2_BUF_TYPE_VIDEO_CAPTURE) && (s->vb_type != V4L2_BUF_TYPE_VBI_CAPTURE)) return -EINVAL; if (id->type != CX18_ENC_STREAM_TYPE_YUV) return -EINVAL; return videobuf_streamoff(cx18_vb_queue(id)); } static int cx18_reqbufs(struct file *file, void *priv, struct v4l2_requestbuffers *rb) { struct cx18_open_id *id = file->private_data; struct cx18 *cx = id->cx; struct cx18_stream *s = &cx->streams[id->type]; if ((s->vb_type != V4L2_BUF_TYPE_VIDEO_CAPTURE) && (s->vb_type != V4L2_BUF_TYPE_VBI_CAPTURE)) return -EINVAL; return videobuf_reqbufs(cx18_vb_queue(id), rb); } static int cx18_querybuf(struct file *file, void *priv, struct v4l2_buffer *b) { struct cx18_open_id *id = file->private_data; struct cx18 *cx = id->cx; struct cx18_stream *s = &cx->streams[id->type]; if ((s->vb_type != V4L2_BUF_TYPE_VIDEO_CAPTURE) && (s->vb_type != V4L2_BUF_TYPE_VBI_CAPTURE)) return -EINVAL; return videobuf_querybuf(cx18_vb_queue(id), b); } static int cx18_qbuf(struct file *file, void *priv, struct v4l2_buffer *b) { struct cx18_open_id *id = file->private_data; struct cx18 *cx = id->cx; struct cx18_stream *s = &cx->streams[id->type]; if ((s->vb_type != V4L2_BUF_TYPE_VIDEO_CAPTURE) && (s->vb_type != V4L2_BUF_TYPE_VBI_CAPTURE)) return -EINVAL; return videobuf_qbuf(cx18_vb_queue(id), b); } static int cx18_dqbuf(struct file *file, void *priv, struct v4l2_buffer *b) { struct cx18_open_id *id = file->private_data; struct cx18 *cx = id->cx; struct cx18_stream *s = &cx->streams[id->type]; if ((s->vb_type != V4L2_BUF_TYPE_VIDEO_CAPTURE) && (s->vb_type != V4L2_BUF_TYPE_VBI_CAPTURE)) return -EINVAL; return videobuf_dqbuf(cx18_vb_queue(id), b, file->f_flags & O_NONBLOCK); } static int cx18_encoder_cmd(struct file *file, void *fh, struct v4l2_encoder_cmd *enc) { struct cx18_open_id *id = fh2id(fh); struct cx18 *cx = id->cx; u32 h; switch (enc->cmd) { case V4L2_ENC_CMD_START: CX18_DEBUG_IOCTL("V4L2_ENC_CMD_START\n"); enc->flags = 0; return cx18_start_capture(id); case V4L2_ENC_CMD_STOP: CX18_DEBUG_IOCTL("V4L2_ENC_CMD_STOP\n"); enc->flags &= V4L2_ENC_CMD_STOP_AT_GOP_END; cx18_stop_capture(id, enc->flags & V4L2_ENC_CMD_STOP_AT_GOP_END); break; case V4L2_ENC_CMD_PAUSE: CX18_DEBUG_IOCTL("V4L2_ENC_CMD_PAUSE\n"); enc->flags = 0; if (!atomic_read(&cx->ana_capturing)) return -EPERM; if (test_and_set_bit(CX18_F_I_ENC_PAUSED, &cx->i_flags)) return 0; h = cx18_find_handle(cx); if (h == CX18_INVALID_TASK_HANDLE) { CX18_ERR("Can't find valid task handle for " "V4L2_ENC_CMD_PAUSE\n"); return -EBADFD; } cx18_mute(cx); cx18_vapi(cx, CX18_CPU_CAPTURE_PAUSE, 1, h); break; case V4L2_ENC_CMD_RESUME: CX18_DEBUG_IOCTL("V4L2_ENC_CMD_RESUME\n"); enc->flags = 0; if (!atomic_read(&cx->ana_capturing)) return -EPERM; if (!test_and_clear_bit(CX18_F_I_ENC_PAUSED, &cx->i_flags)) return 0; h = cx18_find_handle(cx); if (h == CX18_INVALID_TASK_HANDLE) { CX18_ERR("Can't find valid task handle for " "V4L2_ENC_CMD_RESUME\n"); return -EBADFD; } cx18_vapi(cx, CX18_CPU_CAPTURE_RESUME, 1, h); cx18_unmute(cx); break; default: CX18_DEBUG_IOCTL("Unknown cmd %d\n", enc->cmd); return -EINVAL; } return 0; } static int cx18_try_encoder_cmd(struct file *file, void *fh, struct v4l2_encoder_cmd *enc) { struct cx18 *cx = fh2id(fh)->cx; switch (enc->cmd) { case V4L2_ENC_CMD_START: CX18_DEBUG_IOCTL("V4L2_ENC_CMD_START\n"); enc->flags = 0; break; case V4L2_ENC_CMD_STOP: CX18_DEBUG_IOCTL("V4L2_ENC_CMD_STOP\n"); enc->flags &= V4L2_ENC_CMD_STOP_AT_GOP_END; break; case V4L2_ENC_CMD_PAUSE: CX18_DEBUG_IOCTL("V4L2_ENC_CMD_PAUSE\n"); enc->flags = 0; break; case V4L2_ENC_CMD_RESUME: CX18_DEBUG_IOCTL("V4L2_ENC_CMD_RESUME\n"); enc->flags = 0; break; default: CX18_DEBUG_IOCTL("Unknown cmd %d\n", enc->cmd); return -EINVAL; } return 0; } static int cx18_log_status(struct file *file, void *fh) { struct cx18 *cx = fh2id(fh)->cx; struct v4l2_input vidin; struct v4l2_audio audin; int i; CX18_INFO("================= START STATUS CARD #%d " "=================\n", cx->instance); CX18_INFO("Version: %s Card: %s\n", CX18_VERSION, cx->card_name); if (cx->hw_flags & CX18_HW_TVEEPROM) { struct tveeprom tv; cx18_read_eeprom(cx, &tv); } cx18_call_all(cx, core, log_status); cx18_get_input(cx, cx->active_input, &vidin); cx18_get_audio_input(cx, cx->audio_input, &audin); CX18_INFO("Video Input: %s\n", vidin.name); CX18_INFO("Audio Input: %s\n", audin.name); mutex_lock(&cx->gpio_lock); CX18_INFO("GPIO: direction 0x%08x, value 0x%08x\n", cx->gpio_dir, cx->gpio_val); mutex_unlock(&cx->gpio_lock); CX18_INFO("Tuner: %s\n", test_bit(CX18_F_I_RADIO_USER, &cx->i_flags) ? "Radio" : "TV"); v4l2_ctrl_handler_log_status(&cx->cxhdl.hdl, cx->v4l2_dev.name); CX18_INFO("Status flags: 0x%08lx\n", cx->i_flags); for (i = 0; i < CX18_MAX_STREAMS; i++) { struct cx18_stream *s = &cx->streams[i]; if (s->video_dev == NULL || s->buffers == 0) continue; CX18_INFO("Stream %s: status 0x%04lx, %d%% of %d KiB (%d buffers) in use\n", s->name, s->s_flags, atomic_read(&s->q_full.depth) * s->bufs_per_mdl * 100 / s->buffers, (s->buffers * s->buf_size) / 1024, s->buffers); } CX18_INFO("Read MPEG/VBI: %lld/%lld bytes\n", (long long)cx->mpg_data_received, (long long)cx->vbi_data_inserted); CX18_INFO("================== END STATUS CARD #%d " "==================\n", cx->instance); return 0; } static long cx18_default(struct file *file, void *fh, bool valid_prio, int cmd, void *arg) { struct cx18 *cx = fh2id(fh)->cx; switch (cmd) { case VIDIOC_INT_RESET: { u32 val = *(u32 *)arg; if ((val == 0) || (val & 0x01)) cx18_call_hw(cx, CX18_HW_GPIO_RESET_CTRL, core, reset, (u32) CX18_GPIO_RESET_Z8F0811); break; } default: return -EINVAL; } return 0; } long cx18_v4l2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct video_device *vfd = video_devdata(filp); struct cx18_open_id *id = file2id(filp); struct cx18 *cx = id->cx; long res; mutex_lock(&cx->serialize_lock); if (cx18_debug & CX18_DBGFLG_IOCTL) vfd->debug = V4L2_DEBUG_IOCTL | V4L2_DEBUG_IOCTL_ARG; res = video_ioctl2(filp, cmd, arg); vfd->debug = 0; mutex_unlock(&cx->serialize_lock); return res; } static const struct v4l2_ioctl_ops cx18_ioctl_ops = { .vidioc_querycap = cx18_querycap, .vidioc_s_audio = cx18_s_audio, .vidioc_g_audio = cx18_g_audio, .vidioc_enumaudio = cx18_enumaudio, .vidioc_enum_input = cx18_enum_input, .vidioc_cropcap = cx18_cropcap, .vidioc_s_crop = cx18_s_crop, .vidioc_g_crop = cx18_g_crop, .vidioc_g_input = cx18_g_input, .vidioc_s_input = cx18_s_input, .vidioc_g_frequency = cx18_g_frequency, .vidioc_s_frequency = cx18_s_frequency, .vidioc_s_tuner = cx18_s_tuner, .vidioc_g_tuner = cx18_g_tuner, .vidioc_g_enc_index = cx18_g_enc_index, .vidioc_g_std = cx18_g_std, .vidioc_s_std = cx18_s_std, .vidioc_log_status = cx18_log_status, .vidioc_enum_fmt_vid_cap = cx18_enum_fmt_vid_cap, .vidioc_encoder_cmd = cx18_encoder_cmd, .vidioc_try_encoder_cmd = cx18_try_encoder_cmd, .vidioc_g_fmt_vid_cap = cx18_g_fmt_vid_cap, .vidioc_g_fmt_vbi_cap = cx18_g_fmt_vbi_cap, .vidioc_g_fmt_sliced_vbi_cap = cx18_g_fmt_sliced_vbi_cap, .vidioc_s_fmt_vid_cap = cx18_s_fmt_vid_cap, .vidioc_s_fmt_vbi_cap = cx18_s_fmt_vbi_cap, .vidioc_s_fmt_sliced_vbi_cap = cx18_s_fmt_sliced_vbi_cap, .vidioc_try_fmt_vid_cap = cx18_try_fmt_vid_cap, .vidioc_try_fmt_vbi_cap = cx18_try_fmt_vbi_cap, .vidioc_try_fmt_sliced_vbi_cap = cx18_try_fmt_sliced_vbi_cap, .vidioc_g_sliced_vbi_cap = cx18_g_sliced_vbi_cap, .vidioc_g_chip_ident = cx18_g_chip_ident, #ifdef CONFIG_VIDEO_ADV_DEBUG .vidioc_g_register = cx18_g_register, .vidioc_s_register = cx18_s_register, #endif .vidioc_default = cx18_default, .vidioc_streamon = cx18_streamon, .vidioc_streamoff = cx18_streamoff, .vidioc_reqbufs = cx18_reqbufs, .vidioc_querybuf = cx18_querybuf, .vidioc_qbuf = cx18_qbuf, .vidioc_dqbuf = cx18_dqbuf, }; void cx18_set_funcs(struct video_device *vdev) { vdev->ioctl_ops = &cx18_ioctl_ops; }
gpl-2.0
AOKP-SGS2/android_kernel_samsung_espresso
drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
2639
27283
/****************************************************************************** Copyright(c) 2003 - 2004 Intel Corporation. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of version 2 of the GNU General Public License as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. The full GNU General Public License is included in this distribution in the file called LICENSE. Contact Information: James P. Ketrenos <ipw2100-admin@linux.intel.com> Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ****************************************************************************** Few modifications for Realtek's Wi-Fi drivers by Andrea Merello <andreamrl@tiscali.it> A special thanks goes to Realtek for their support ! ******************************************************************************/ #include <linux/compiler.h> //#include <linux/config.h> #include <linux/errno.h> #include <linux/if_arp.h> #include <linux/in6.h> #include <linux/in.h> #include <linux/ip.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/pci.h> #include <linux/proc_fs.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <linux/tcp.h> #include <linux/types.h> #include <linux/wireless.h> #include <linux/etherdevice.h> #include <asm/uaccess.h> #include <linux/if_vlan.h> #include "ieee80211.h" /* 802.11 Data Frame 802.11 frame_contorl for data frames - 2 bytes ,-----------------------------------------------------------------------------------------. bits | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | a | b | c | d | e | |----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|------| val | 0 | 0 | 0 | 1 | x | 0 | 0 | 0 | 1 | 0 | x | x | x | x | x | |----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|------| desc | ^-ver-^ | ^type-^ | ^-----subtype-----^ | to |from |more |retry| pwr |more |wep | | | | x=0 data,x=1 data+ack | DS | DS |frag | | mgm |data | | '-----------------------------------------------------------------------------------------' /\ | 802.11 Data Frame | ,--------- 'ctrl' expands to >-----------' | ,--'---,-------------------------------------------------------------. Bytes | 2 | 2 | 6 | 6 | 6 | 2 | 0..2312 | 4 | |------|------|---------|---------|---------|------|---------|------| Desc. | ctrl | dura | DA/RA | TA | SA | Sequ | Frame | fcs | | | tion | (BSSID) | | | ence | data | | `--------------------------------------------------| |------' Total: 28 non-data bytes `----.----' | .- 'Frame data' expands to <---------------------------' | V ,---------------------------------------------------. Bytes | 1 | 1 | 1 | 3 | 2 | 0-2304 | |------|------|---------|----------|------|---------| Desc. | SNAP | SNAP | Control |Eth Tunnel| Type | IP | | DSAP | SSAP | | | | Packet | | 0xAA | 0xAA |0x03 (UI)|0x00-00-F8| | | `-----------------------------------------| | Total: 8 non-data bytes `----.----' | .- 'IP Packet' expands, if WEP enabled, to <--' | V ,-----------------------. Bytes | 4 | 0-2296 | 4 | |-----|-----------|-----| Desc. | IV | Encrypted | ICV | | | IP Packet | | `-----------------------' Total: 8 non-data bytes 802.3 Ethernet Data Frame ,-----------------------------------------. Bytes | 6 | 6 | 2 | Variable | 4 | |-------|-------|------|-----------|------| Desc. | Dest. | Source| Type | IP Packet | fcs | | MAC | MAC | | | | `-----------------------------------------' Total: 18 non-data bytes In the event that fragmentation is required, the incoming payload is split into N parts of size ieee->fts. The first fragment contains the SNAP header and the remaining packets are just data. If encryption is enabled, each fragment payload size is reduced by enough space to add the prefix and postfix (IV and ICV totalling 8 bytes in the case of WEP) So if you have 1500 bytes of payload with ieee->fts set to 500 without encryption it will take 3 frames. With WEP it will take 4 frames as the payload of each frame is reduced to 492 bytes. * SKB visualization * * ,- skb->data * | * | ETHERNET HEADER ,-<-- PAYLOAD * | | 14 bytes from skb->data * | 2 bytes for Type --> ,T. | (sizeof ethhdr) * | | | | * |,-Dest.--. ,--Src.---. | | | * | 6 bytes| | 6 bytes | | | | * v | | | | | | * 0 | v 1 | v | v 2 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 * ^ | ^ | ^ | * | | | | | | * | | | | `T' <---- 2 bytes for Type * | | | | * | | '---SNAP--' <-------- 6 bytes for SNAP * | | * `-IV--' <-------------------- 4 bytes for IV (WEP) * * SNAP HEADER * */ static u8 P802_1H_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0xf8 }; static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 }; static inline int ieee80211_put_snap(u8 *data, u16 h_proto) { struct ieee80211_snap_hdr *snap; u8 *oui; snap = (struct ieee80211_snap_hdr *)data; snap->dsap = 0xaa; snap->ssap = 0xaa; snap->ctrl = 0x03; if (h_proto == 0x8137 || h_proto == 0x80f3) oui = P802_1H_OUI; else oui = RFC1042_OUI; snap->oui[0] = oui[0]; snap->oui[1] = oui[1]; snap->oui[2] = oui[2]; *(u16 *)(data + SNAP_SIZE) = htons(h_proto); return SNAP_SIZE + sizeof(u16); } int ieee80211_encrypt_fragment( struct ieee80211_device *ieee, struct sk_buff *frag, int hdr_len) { struct ieee80211_crypt_data* crypt = ieee->crypt[ieee->tx_keyidx]; int res; if (!(crypt && crypt->ops)) { printk("=========>%s(), crypt is null\n", __FUNCTION__); return -1; } #ifdef CONFIG_IEEE80211_CRYPT_TKIP struct ieee80211_hdr *header; if (ieee->tkip_countermeasures && crypt && crypt->ops && strcmp(crypt->ops->name, "TKIP") == 0) { header = (struct ieee80211_hdr *) frag->data; if (net_ratelimit()) { printk(KERN_DEBUG "%s: TKIP countermeasures: dropped " "TX packet to %pM\n", ieee->dev->name, header->addr1); } return -1; } #endif /* To encrypt, frame format is: * IV (4 bytes), clear payload (including SNAP), ICV (4 bytes) */ // PR: FIXME: Copied from hostap. Check fragmentation/MSDU/MPDU encryption. /* Host-based IEEE 802.11 fragmentation for TX is not yet supported, so * call both MSDU and MPDU encryption functions from here. */ atomic_inc(&crypt->refcnt); res = 0; if (crypt->ops->encrypt_msdu) res = crypt->ops->encrypt_msdu(frag, hdr_len, crypt->priv); if (res == 0 && crypt->ops->encrypt_mpdu) res = crypt->ops->encrypt_mpdu(frag, hdr_len, crypt->priv); atomic_dec(&crypt->refcnt); if (res < 0) { printk(KERN_INFO "%s: Encryption failed: len=%d.\n", ieee->dev->name, frag->len); ieee->ieee_stats.tx_discards++; return -1; } return 0; } void ieee80211_txb_free(struct ieee80211_txb *txb) { //int i; if (unlikely(!txb)) return; kfree(txb); } struct ieee80211_txb *ieee80211_alloc_txb(int nr_frags, int txb_size, int gfp_mask) { struct ieee80211_txb *txb; int i; txb = kmalloc( sizeof(struct ieee80211_txb) + (sizeof(u8*) * nr_frags), gfp_mask); if (!txb) return NULL; memset(txb, 0, sizeof(struct ieee80211_txb)); txb->nr_frags = nr_frags; txb->frag_size = txb_size; for (i = 0; i < nr_frags; i++) { txb->fragments[i] = dev_alloc_skb(txb_size); if (unlikely(!txb->fragments[i])) { i--; break; } memset(txb->fragments[i]->cb, 0, sizeof(txb->fragments[i]->cb)); } if (unlikely(i != nr_frags)) { while (i >= 0) dev_kfree_skb_any(txb->fragments[i--]); kfree(txb); return NULL; } return txb; } // Classify the to-be send data packet // Need to acquire the sent queue index. static int ieee80211_classify(struct sk_buff *skb, struct ieee80211_network *network) { struct ethhdr *eth; struct iphdr *ip; eth = (struct ethhdr *)skb->data; if (eth->h_proto != htons(ETH_P_IP)) return 0; // IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA, skb->data, skb->len); ip = ip_hdr(skb); switch (ip->tos & 0xfc) { case 0x20: return 2; case 0x40: return 1; case 0x60: return 3; case 0x80: return 4; case 0xa0: return 5; case 0xc0: return 6; case 0xe0: return 7; default: return 0; } } #define SN_LESS(a, b) (((a-b)&0x800)!=0) void ieee80211_tx_query_agg_cap(struct ieee80211_device* ieee, struct sk_buff* skb, cb_desc* tcb_desc) { PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo; PTX_TS_RECORD pTxTs = NULL; struct ieee80211_hdr_1addr* hdr = (struct ieee80211_hdr_1addr*)skb->data; if (!pHTInfo->bCurrentHTSupport||!pHTInfo->bEnableHT) return; if (!IsQoSDataFrame(skb->data)) return; if (is_multicast_ether_addr(hdr->addr1) || is_broadcast_ether_addr(hdr->addr1)) return; //check packet and mode later #ifdef TO_DO_LIST if(pTcb->PacketLength >= 4096) return; // For RTL819X, if pairwisekey = wep/tkip, we don't aggrregation. if(!Adapter->HalFunc.GetNmodeSupportBySecCfgHandler(Adapter)) return; #endif if(!ieee->GetNmodeSupportBySecCfg(ieee->dev)) { return; } if(pHTInfo->bCurrentAMPDUEnable) { if (!GetTs(ieee, (PTS_COMMON_INFO*)(&pTxTs), hdr->addr1, skb->priority, TX_DIR, true)) { printk("===>can't get TS\n"); return; } if (pTxTs->TxAdmittedBARecord.bValid == false) { TsStartAddBaProcess(ieee, pTxTs); goto FORCED_AGG_SETTING; } else if (pTxTs->bUsingBa == false) { if (SN_LESS(pTxTs->TxAdmittedBARecord.BaStartSeqCtrl.field.SeqNum, (pTxTs->TxCurSeq+1)%4096)) pTxTs->bUsingBa = true; else goto FORCED_AGG_SETTING; } if (ieee->iw_mode == IW_MODE_INFRA) { tcb_desc->bAMPDUEnable = true; tcb_desc->ampdu_factor = pHTInfo->CurrentAMPDUFactor; tcb_desc->ampdu_density = pHTInfo->CurrentMPDUDensity; } } FORCED_AGG_SETTING: switch(pHTInfo->ForcedAMPDUMode ) { case HT_AGG_AUTO: break; case HT_AGG_FORCE_ENABLE: tcb_desc->bAMPDUEnable = true; tcb_desc->ampdu_density = pHTInfo->ForcedMPDUDensity; tcb_desc->ampdu_factor = pHTInfo->ForcedAMPDUFactor; break; case HT_AGG_FORCE_DISABLE: tcb_desc->bAMPDUEnable = false; tcb_desc->ampdu_density = 0; tcb_desc->ampdu_factor = 0; break; } return; } extern void ieee80211_qurey_ShortPreambleMode(struct ieee80211_device* ieee, cb_desc* tcb_desc) { tcb_desc->bUseShortPreamble = false; if (tcb_desc->data_rate == 2) {//// 1M can only use Long Preamble. 11B spec return; } else if (ieee->current_network.capability & WLAN_CAPABILITY_SHORT_PREAMBLE) { tcb_desc->bUseShortPreamble = true; } return; } extern void ieee80211_query_HTCapShortGI(struct ieee80211_device *ieee, cb_desc *tcb_desc) { PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo; tcb_desc->bUseShortGI = false; if(!pHTInfo->bCurrentHTSupport||!pHTInfo->bEnableHT) return; if(pHTInfo->bForcedShortGI) { tcb_desc->bUseShortGI = true; return; } if((pHTInfo->bCurBW40MHz==true) && pHTInfo->bCurShortGI40MHz) tcb_desc->bUseShortGI = true; else if((pHTInfo->bCurBW40MHz==false) && pHTInfo->bCurShortGI20MHz) tcb_desc->bUseShortGI = true; } void ieee80211_query_BandwidthMode(struct ieee80211_device* ieee, cb_desc *tcb_desc) { PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo; tcb_desc->bPacketBW = false; if(!pHTInfo->bCurrentHTSupport||!pHTInfo->bEnableHT) return; if(tcb_desc->bMulticast || tcb_desc->bBroadcast) return; if((tcb_desc->data_rate & 0x80)==0) // If using legacy rate, it shall use 20MHz channel. return; //BandWidthAutoSwitch is for auto switch to 20 or 40 in long distance if(pHTInfo->bCurBW40MHz && pHTInfo->bCurTxBW40MHz && !ieee->bandwidth_auto_switch.bforced_tx20Mhz) tcb_desc->bPacketBW = true; return; } void ieee80211_query_protectionmode(struct ieee80211_device* ieee, cb_desc* tcb_desc, struct sk_buff* skb) { // Common Settings tcb_desc->bRTSSTBC = false; tcb_desc->bRTSUseShortGI = false; // Since protection frames are always sent by legacy rate, ShortGI will never be used. tcb_desc->bCTSEnable = false; // Most of protection using RTS/CTS tcb_desc->RTSSC = 0; // 20MHz: Don't care; 40MHz: Duplicate. tcb_desc->bRTSBW = false; // RTS frame bandwidth is always 20MHz if(tcb_desc->bBroadcast || tcb_desc->bMulticast)//only unicast frame will use rts/cts return; if (is_broadcast_ether_addr(skb->data+16)) //check addr3 as infrastructure add3 is DA. return; if (ieee->mode < IEEE_N_24G) //b, g mode { // (1) RTS_Threshold is compared to the MPDU, not MSDU. // (2) If there are more than one frag in this MSDU, only the first frag uses protection frame. // Other fragments are protected by previous fragment. // So we only need to check the length of first fragment. if (skb->len > ieee->rts) { tcb_desc->bRTSEnable = true; tcb_desc->rts_rate = MGN_24M; } else if (ieee->current_network.buseprotection) { // Use CTS-to-SELF in protection mode. tcb_desc->bRTSEnable = true; tcb_desc->bCTSEnable = true; tcb_desc->rts_rate = MGN_24M; } //otherwise return; return; } else {// 11n High throughput case. PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo; while (true) { //check ERP protection if (ieee->current_network.buseprotection) {// CTS-to-SELF tcb_desc->bRTSEnable = true; tcb_desc->bCTSEnable = true; tcb_desc->rts_rate = MGN_24M; break; } //check HT op mode if(pHTInfo->bCurrentHTSupport && pHTInfo->bEnableHT) { u8 HTOpMode = pHTInfo->CurrentOpMode; if((pHTInfo->bCurBW40MHz && (HTOpMode == 2 || HTOpMode == 3)) || (!pHTInfo->bCurBW40MHz && HTOpMode == 3) ) { tcb_desc->rts_rate = MGN_24M; // Rate is 24Mbps. tcb_desc->bRTSEnable = true; break; } } //check rts if (skb->len > ieee->rts) { tcb_desc->rts_rate = MGN_24M; // Rate is 24Mbps. tcb_desc->bRTSEnable = true; break; } //to do list: check MIMO power save condition. //check AMPDU aggregation for TXOP if(tcb_desc->bAMPDUEnable) { tcb_desc->rts_rate = MGN_24M; // Rate is 24Mbps. // According to 8190 design, firmware sends CF-End only if RTS/CTS is enabled. However, it degrads // throughput around 10M, so we disable of this mechanism. 2007.08.03 by Emily tcb_desc->bRTSEnable = false; break; } //check IOT action if(pHTInfo->IOTAction & HT_IOT_ACT_FORCED_CTS2SELF) { tcb_desc->bCTSEnable = true; tcb_desc->rts_rate = MGN_24M; tcb_desc->bRTSEnable = true; break; } // Totally no protection case!! goto NO_PROTECTION; } } // For test , CTS replace with RTS if( 0 ) { tcb_desc->bCTSEnable = true; tcb_desc->rts_rate = MGN_24M; tcb_desc->bRTSEnable = true; } if (ieee->current_network.capability & WLAN_CAPABILITY_SHORT_PREAMBLE) tcb_desc->bUseShortPreamble = true; if (ieee->mode == IW_MODE_MASTER) goto NO_PROTECTION; return; NO_PROTECTION: tcb_desc->bRTSEnable = false; tcb_desc->bCTSEnable = false; tcb_desc->rts_rate = 0; tcb_desc->RTSSC = 0; tcb_desc->bRTSBW = false; } void ieee80211_txrate_selectmode(struct ieee80211_device* ieee, cb_desc* tcb_desc) { #ifdef TO_DO_LIST if(!IsDataFrame(pFrame)) { pTcb->bTxDisableRateFallBack = TRUE; pTcb->bTxUseDriverAssingedRate = TRUE; pTcb->RATRIndex = 7; return; } if(pMgntInfo->ForcedDataRate!= 0) { pTcb->bTxDisableRateFallBack = TRUE; pTcb->bTxUseDriverAssingedRate = TRUE; return; } #endif if(ieee->bTxDisableRateFallBack) tcb_desc->bTxDisableRateFallBack = true; if(ieee->bTxUseDriverAssingedRate) tcb_desc->bTxUseDriverAssingedRate = true; if(!tcb_desc->bTxDisableRateFallBack || !tcb_desc->bTxUseDriverAssingedRate) { if (ieee->iw_mode == IW_MODE_INFRA || ieee->iw_mode == IW_MODE_ADHOC) tcb_desc->RATRIndex = 0; } } void ieee80211_query_seqnum(struct ieee80211_device*ieee, struct sk_buff* skb, u8* dst) { if (is_multicast_ether_addr(dst) || is_broadcast_ether_addr(dst)) return; if (IsQoSDataFrame(skb->data)) //we deal qos data only { PTX_TS_RECORD pTS = NULL; if (!GetTs(ieee, (PTS_COMMON_INFO*)(&pTS), dst, skb->priority, TX_DIR, true)) { return; } pTS->TxCurSeq = (pTS->TxCurSeq+1)%4096; } } int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) { struct ieee80211_device *ieee = netdev_priv(dev); struct ieee80211_txb *txb = NULL; struct ieee80211_hdr_3addrqos *frag_hdr; int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size; unsigned long flags; struct net_device_stats *stats = &ieee->stats; int ether_type = 0, encrypt; int bytes, fc, qos_ctl = 0, hdr_len; struct sk_buff *skb_frag; struct ieee80211_hdr_3addrqos header = { /* Ensure zero initialized */ .duration_id = 0, .seq_ctl = 0, .qos_ctl = 0 }; u8 dest[ETH_ALEN], src[ETH_ALEN]; int qos_actived = ieee->current_network.qos_data.active; struct ieee80211_crypt_data* crypt; cb_desc *tcb_desc; spin_lock_irqsave(&ieee->lock, flags); /* If there is no driver handler to take the TXB, dont' bother * creating it... */ if ((!ieee->hard_start_xmit && !(ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE))|| ((!ieee->softmac_data_hard_start_xmit && (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE)))) { printk(KERN_WARNING "%s: No xmit handler.\n", ieee->dev->name); goto success; } if(likely(ieee->raw_tx == 0)){ if (unlikely(skb->len < SNAP_SIZE + sizeof(u16))) { printk(KERN_WARNING "%s: skb too small (%d).\n", ieee->dev->name, skb->len); goto success; } memset(skb->cb, 0, sizeof(skb->cb)); ether_type = ntohs(((struct ethhdr *)skb->data)->h_proto); crypt = ieee->crypt[ieee->tx_keyidx]; encrypt = !(ether_type == ETH_P_PAE && ieee->ieee802_1x) && ieee->host_encrypt && crypt && crypt->ops; if (!encrypt && ieee->ieee802_1x && ieee->drop_unencrypted && ether_type != ETH_P_PAE) { stats->tx_dropped++; goto success; } #ifdef CONFIG_IEEE80211_DEBUG if (crypt && !encrypt && ether_type == ETH_P_PAE) { struct eapol *eap = (struct eapol *)(skb->data + sizeof(struct ethhdr) - SNAP_SIZE - sizeof(u16)); IEEE80211_DEBUG_EAP("TX: IEEE 802.11 EAPOL frame: %s\n", eap_get_type(eap->type)); } #endif /* Save source and destination addresses */ memcpy(&dest, skb->data, ETH_ALEN); memcpy(&src, skb->data+ETH_ALEN, ETH_ALEN); /* Advance the SKB to the start of the payload */ skb_pull(skb, sizeof(struct ethhdr)); /* Determine total amount of storage required for TXB packets */ bytes = skb->len + SNAP_SIZE + sizeof(u16); if (encrypt) fc = IEEE80211_FTYPE_DATA | IEEE80211_FCTL_WEP; else fc = IEEE80211_FTYPE_DATA; //if(ieee->current_network.QoS_Enable) if(qos_actived) fc |= IEEE80211_STYPE_QOS_DATA; else fc |= IEEE80211_STYPE_DATA; if (ieee->iw_mode == IW_MODE_INFRA) { fc |= IEEE80211_FCTL_TODS; /* To DS: Addr1 = BSSID, Addr2 = SA, Addr3 = DA */ memcpy(&header.addr1, ieee->current_network.bssid, ETH_ALEN); memcpy(&header.addr2, &src, ETH_ALEN); memcpy(&header.addr3, &dest, ETH_ALEN); } else if (ieee->iw_mode == IW_MODE_ADHOC) { /* not From/To DS: Addr1 = DA, Addr2 = SA, Addr3 = BSSID */ memcpy(&header.addr1, dest, ETH_ALEN); memcpy(&header.addr2, src, ETH_ALEN); memcpy(&header.addr3, ieee->current_network.bssid, ETH_ALEN); } header.frame_ctl = cpu_to_le16(fc); /* Determine fragmentation size based on destination (multicast * and broadcast are not fragmented) */ if (is_multicast_ether_addr(header.addr1) || is_broadcast_ether_addr(header.addr1)) { frag_size = MAX_FRAG_THRESHOLD; qos_ctl |= QOS_CTL_NOTCONTAIN_ACK; } else { frag_size = ieee->fts;//default:392 qos_ctl = 0; } //if (ieee->current_network.QoS_Enable) if(qos_actived) { hdr_len = IEEE80211_3ADDR_LEN + 2; skb->priority = ieee80211_classify(skb, &ieee->current_network); qos_ctl |= skb->priority; //set in the ieee80211_classify header.qos_ctl = cpu_to_le16(qos_ctl & IEEE80211_QOS_TID); } else { hdr_len = IEEE80211_3ADDR_LEN; } /* Determine amount of payload per fragment. Regardless of if * this stack is providing the full 802.11 header, one will * eventually be affixed to this fragment -- so we must account for * it when determining the amount of payload space. */ bytes_per_frag = frag_size - hdr_len; if (ieee->config & (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS)) bytes_per_frag -= IEEE80211_FCS_LEN; /* Each fragment may need to have room for encryptiong pre/postfix */ if (encrypt) bytes_per_frag -= crypt->ops->extra_prefix_len + crypt->ops->extra_postfix_len; /* Number of fragments is the total bytes_per_frag / * payload_per_fragment */ nr_frags = bytes / bytes_per_frag; bytes_last_frag = bytes % bytes_per_frag; if (bytes_last_frag) nr_frags++; else bytes_last_frag = bytes_per_frag; /* When we allocate the TXB we allocate enough space for the reserve * and full fragment bytes (bytes_per_frag doesn't include prefix, * postfix, header, FCS, etc.) */ txb = ieee80211_alloc_txb(nr_frags, frag_size + ieee->tx_headroom, GFP_ATOMIC); if (unlikely(!txb)) { printk(KERN_WARNING "%s: Could not allocate TXB\n", ieee->dev->name); goto failed; } txb->encrypted = encrypt; txb->payload_size = bytes; //if (ieee->current_network.QoS_Enable) if(qos_actived) { txb->queue_index = UP2AC(skb->priority); } else { txb->queue_index = WME_AC_BK; } for (i = 0; i < nr_frags; i++) { skb_frag = txb->fragments[i]; tcb_desc = (cb_desc *)(skb_frag->cb + MAX_DEV_ADDR_SIZE); if(qos_actived){ skb_frag->priority = skb->priority;//UP2AC(skb->priority); tcb_desc->queue_index = UP2AC(skb->priority); } else { skb_frag->priority = WME_AC_BK; tcb_desc->queue_index = WME_AC_BK; } skb_reserve(skb_frag, ieee->tx_headroom); if (encrypt){ if (ieee->hwsec_active) tcb_desc->bHwSec = 1; else tcb_desc->bHwSec = 0; skb_reserve(skb_frag, crypt->ops->extra_prefix_len); } else { tcb_desc->bHwSec = 0; } frag_hdr = (struct ieee80211_hdr_3addrqos *)skb_put(skb_frag, hdr_len); memcpy(frag_hdr, &header, hdr_len); /* If this is not the last fragment, then add the MOREFRAGS * bit to the frame control */ if (i != nr_frags - 1) { frag_hdr->frame_ctl = cpu_to_le16( fc | IEEE80211_FCTL_MOREFRAGS); bytes = bytes_per_frag; } else { /* The last fragment takes the remaining length */ bytes = bytes_last_frag; } //if(ieee->current_network.QoS_Enable) if(qos_actived) { // add 1 only indicate to corresponding seq number control 2006/7/12 frag_hdr->seq_ctl = cpu_to_le16(ieee->seq_ctrl[UP2AC(skb->priority)+1]<<4 | i); } else { frag_hdr->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0]<<4 | i); } /* Put a SNAP header on the first fragment */ if (i == 0) { ieee80211_put_snap( skb_put(skb_frag, SNAP_SIZE + sizeof(u16)), ether_type); bytes -= SNAP_SIZE + sizeof(u16); } memcpy(skb_put(skb_frag, bytes), skb->data, bytes); /* Advance the SKB... */ skb_pull(skb, bytes); /* Encryption routine will move the header forward in order * to insert the IV between the header and the payload */ if (encrypt) ieee80211_encrypt_fragment(ieee, skb_frag, hdr_len); if (ieee->config & (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS)) skb_put(skb_frag, 4); } if(qos_actived) { if (ieee->seq_ctrl[UP2AC(skb->priority) + 1] == 0xFFF) ieee->seq_ctrl[UP2AC(skb->priority) + 1] = 0; else ieee->seq_ctrl[UP2AC(skb->priority) + 1]++; } else { if (ieee->seq_ctrl[0] == 0xFFF) ieee->seq_ctrl[0] = 0; else ieee->seq_ctrl[0]++; } }else{ if (unlikely(skb->len < sizeof(struct ieee80211_hdr_3addr))) { printk(KERN_WARNING "%s: skb too small (%d).\n", ieee->dev->name, skb->len); goto success; } txb = ieee80211_alloc_txb(1, skb->len, GFP_ATOMIC); if(!txb){ printk(KERN_WARNING "%s: Could not allocate TXB\n", ieee->dev->name); goto failed; } txb->encrypted = 0; txb->payload_size = skb->len; memcpy(skb_put(txb->fragments[0],skb->len), skb->data, skb->len); } success: //WB add to fill data tcb_desc here. only first fragment is considered, need to change, and you may remove to other place. if (txb) { cb_desc *tcb_desc = (cb_desc *)(txb->fragments[0]->cb + MAX_DEV_ADDR_SIZE); tcb_desc->bTxEnableFwCalcDur = 1; if (is_multicast_ether_addr(header.addr1)) tcb_desc->bMulticast = 1; if (is_broadcast_ether_addr(header.addr1)) tcb_desc->bBroadcast = 1; ieee80211_txrate_selectmode(ieee, tcb_desc); if ( tcb_desc->bMulticast || tcb_desc->bBroadcast) tcb_desc->data_rate = ieee->basic_rate; else //tcb_desc->data_rate = CURRENT_RATE(ieee->current_network.mode, ieee->rate, ieee->HTCurrentOperaRate); tcb_desc->data_rate = CURRENT_RATE(ieee->mode, ieee->rate, ieee->HTCurrentOperaRate); ieee80211_qurey_ShortPreambleMode(ieee, tcb_desc); ieee80211_tx_query_agg_cap(ieee, txb->fragments[0], tcb_desc); ieee80211_query_HTCapShortGI(ieee, tcb_desc); ieee80211_query_BandwidthMode(ieee, tcb_desc); ieee80211_query_protectionmode(ieee, tcb_desc, txb->fragments[0]); ieee80211_query_seqnum(ieee, txb->fragments[0], header.addr1); // IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA, txb->fragments[0]->data, txb->fragments[0]->len); //IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA, tcb_desc, sizeof(cb_desc)); } spin_unlock_irqrestore(&ieee->lock, flags); dev_kfree_skb_any(skb); if (txb) { if (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE){ ieee80211_softmac_xmit(txb, ieee); }else{ if ((*ieee->hard_start_xmit)(txb, dev) == 0) { stats->tx_packets++; stats->tx_bytes += txb->payload_size; return 0; } ieee80211_txb_free(txb); } } return 0; failed: spin_unlock_irqrestore(&ieee->lock, flags); netif_stop_queue(dev); stats->tx_errors++; return 1; } EXPORT_SYMBOL(ieee80211_txb_free);
gpl-2.0
thewisenerd/kernel_sprout
arch/arm/mach-imx/mach-kzm_arm11_01.c
2639
7624
/* * KZM-ARM11-01 support * Copyright (C) 2009 Yoichi Yuasa <yuasa@linux-mips.org> * * based on code for MX31ADS, * Copyright (C) 2000 Deep Blue Solutions Ltd * Copyright (C) 2002 Shane Nay (shane@minirl.com) * Copyright 2005-2007 Freescale Semiconductor, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/gpio.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/serial_8250.h> #include <linux/smsc911x.h> #include <linux/types.h> #include <linux/regulator/machine.h> #include <linux/regulator/fixed.h> #include <asm/irq.h> #include <asm/mach-types.h> #include <asm/memory.h> #include <asm/setup.h> #include <asm/mach/arch.h> #include <asm/mach/irq.h> #include <asm/mach/map.h> #include <asm/mach/time.h> #include "common.h" #include "devices-imx31.h" #include "hardware.h" #include "iomux-mx3.h" #define KZM_ARM11_IO_ADDRESS(x) (IOMEM( \ IMX_IO_P2V_MODULE(x, MX31_CS4) ?: \ IMX_IO_P2V_MODULE(x, MX31_CS5)) ?: \ MX31_IO_ADDRESS(x)) /* * KZM-ARM11-01 Board Control Registers on FPGA */ #define KZM_ARM11_CTL1 (MX31_CS4_BASE_ADDR + 0x1000) #define KZM_ARM11_CTL2 (MX31_CS4_BASE_ADDR + 0x1001) #define KZM_ARM11_RSW1 (MX31_CS4_BASE_ADDR + 0x1002) #define KZM_ARM11_BACK_LIGHT (MX31_CS4_BASE_ADDR + 0x1004) #define KZM_ARM11_FPGA_REV (MX31_CS4_BASE_ADDR + 0x1008) #define KZM_ARM11_7SEG_LED (MX31_CS4_BASE_ADDR + 0x1010) #define KZM_ARM11_LEDS (MX31_CS4_BASE_ADDR + 0x1020) #define KZM_ARM11_DIPSW2 (MX31_CS4_BASE_ADDR + 0x1003) /* * External UART for touch panel on FPGA */ #define KZM_ARM11_16550 (MX31_CS4_BASE_ADDR + 0x1050) #if defined(CONFIG_SERIAL_8250) || defined(CONFIG_SERIAL_8250_MODULE) /* * KZM-ARM11-01 has an external UART on FPGA */ static struct plat_serial8250_port serial_platform_data[] = { { .membase = KZM_ARM11_IO_ADDRESS(KZM_ARM11_16550), .mapbase = KZM_ARM11_16550, /* irq number is run-time assigned */ .irqflags = IRQ_TYPE_EDGE_RISING, .uartclk = 14745600, .regshift = 0, .iotype = UPIO_MEM, .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_BUGGY_UART, }, {}, }; static struct resource serial8250_resources[] = { { .start = KZM_ARM11_16550, .end = KZM_ARM11_16550 + 0x10, .flags = IORESOURCE_MEM, }, { /* irq number is run-time assigned */ .flags = IORESOURCE_IRQ, }, }; static struct platform_device serial_device = { .name = "serial8250", .id = PLAT8250_DEV_PLATFORM, .dev = { .platform_data = serial_platform_data, }, .num_resources = ARRAY_SIZE(serial8250_resources), .resource = serial8250_resources, }; static int __init kzm_init_ext_uart(void) { u8 tmp; /* * GPIO 1-1: external UART interrupt line */ mxc_iomux_mode(IOMUX_MODE(MX31_PIN_GPIO1_1, IOMUX_CONFIG_GPIO)); gpio_request(IOMUX_TO_GPIO(MX31_PIN_GPIO1_1), "ext-uart-int"); gpio_direction_input(IOMUX_TO_GPIO(MX31_PIN_GPIO1_1)); /* * Unmask UART interrupt */ tmp = __raw_readb(KZM_ARM11_IO_ADDRESS(KZM_ARM11_CTL1)); tmp |= 0x2; __raw_writeb(tmp, KZM_ARM11_IO_ADDRESS(KZM_ARM11_CTL1)); serial_platform_data[0].irq = gpio_to_irq(IOMUX_TO_GPIO(MX31_PIN_GPIO1_1)); serial8250_resources[1].start = gpio_to_irq(IOMUX_TO_GPIO(MX31_PIN_GPIO1_1)); serial8250_resources[1].end = gpio_to_irq(IOMUX_TO_GPIO(MX31_PIN_GPIO1_1)); return platform_device_register(&serial_device); } #else static inline int kzm_init_ext_uart(void) { return 0; } #endif /* * SMSC LAN9118 */ #if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE) static struct smsc911x_platform_config kzm_smsc9118_config = { .phy_interface = PHY_INTERFACE_MODE_MII, .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_HIGH, .irq_type = SMSC911X_IRQ_TYPE_PUSH_PULL, .flags = SMSC911X_USE_32BIT | SMSC911X_SAVE_MAC_ADDRESS, }; static struct resource kzm_smsc9118_resources[] = { { .start = MX31_CS5_BASE_ADDR, .end = MX31_CS5_BASE_ADDR + SZ_128K - 1, .flags = IORESOURCE_MEM, }, { /* irq number is run-time assigned */ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE, }, }; static struct platform_device kzm_smsc9118_device = { .name = "smsc911x", .id = -1, .num_resources = ARRAY_SIZE(kzm_smsc9118_resources), .resource = kzm_smsc9118_resources, .dev = { .platform_data = &kzm_smsc9118_config, }, }; static struct regulator_consumer_supply dummy_supplies[] = { REGULATOR_SUPPLY("vdd33a", "smsc911x"), REGULATOR_SUPPLY("vddvario", "smsc911x"), }; static int __init kzm_init_smsc9118(void) { /* * GPIO 1-2: SMSC9118 interrupt line */ mxc_iomux_mode(IOMUX_MODE(MX31_PIN_GPIO1_2, IOMUX_CONFIG_GPIO)); gpio_request(IOMUX_TO_GPIO(MX31_PIN_GPIO1_2), "smsc9118-int"); gpio_direction_input(IOMUX_TO_GPIO(MX31_PIN_GPIO1_2)); regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies)); kzm_smsc9118_resources[1].start = gpio_to_irq(IOMUX_TO_GPIO(MX31_PIN_GPIO1_2)); kzm_smsc9118_resources[1].end = gpio_to_irq(IOMUX_TO_GPIO(MX31_PIN_GPIO1_2)); return platform_device_register(&kzm_smsc9118_device); } #else static inline int kzm_init_smsc9118(void) { return 0; } #endif #if defined(CONFIG_SERIAL_IMX) || defined(CONFIG_SERIAL_IMX_MODULE) static const struct imxuart_platform_data uart_pdata __initconst = { .flags = IMXUART_HAVE_RTSCTS, }; static void __init kzm_init_imx_uart(void) { imx31_add_imx_uart0(&uart_pdata); imx31_add_imx_uart1(&uart_pdata); } #else static inline void kzm_init_imx_uart(void) { } #endif static int kzm_pins[] __initdata = { MX31_PIN_CTS1__CTS1, MX31_PIN_RTS1__RTS1, MX31_PIN_TXD1__TXD1, MX31_PIN_RXD1__RXD1, MX31_PIN_DCD_DCE1__DCD_DCE1, MX31_PIN_RI_DCE1__RI_DCE1, MX31_PIN_DSR_DCE1__DSR_DCE1, MX31_PIN_DTR_DCE1__DTR_DCE1, MX31_PIN_CTS2__CTS2, MX31_PIN_RTS2__RTS2, MX31_PIN_TXD2__TXD2, MX31_PIN_RXD2__RXD2, MX31_PIN_DCD_DTE1__DCD_DTE2, MX31_PIN_RI_DTE1__RI_DTE2, MX31_PIN_DSR_DTE1__DSR_DTE2, MX31_PIN_DTR_DTE1__DTR_DTE2, }; /* * Board specific initialization. */ static void __init kzm_board_init(void) { imx31_soc_init(); mxc_iomux_setup_multiple_pins(kzm_pins, ARRAY_SIZE(kzm_pins), "kzm"); kzm_init_ext_uart(); kzm_init_smsc9118(); kzm_init_imx_uart(); pr_info("Clock input source is 26MHz\n"); } /* * This structure defines static mappings for the kzm-arm11-01 board. */ static struct map_desc kzm_io_desc[] __initdata = { { .virtual = (unsigned long)MX31_CS4_BASE_ADDR_VIRT, .pfn = __phys_to_pfn(MX31_CS4_BASE_ADDR), .length = MX31_CS4_SIZE, .type = MT_DEVICE }, { .virtual = (unsigned long)MX31_CS5_BASE_ADDR_VIRT, .pfn = __phys_to_pfn(MX31_CS5_BASE_ADDR), .length = MX31_CS5_SIZE, .type = MT_DEVICE }, }; /* * Set up static virtual mappings. */ static void __init kzm_map_io(void) { mx31_map_io(); iotable_init(kzm_io_desc, ARRAY_SIZE(kzm_io_desc)); } static void __init kzm_timer_init(void) { mx31_clocks_init(26000000); } MACHINE_START(KZM_ARM11_01, "Kyoto Microcomputer Co., Ltd. KZM-ARM11-01") .atag_offset = 0x100, .map_io = kzm_map_io, .init_early = imx31_init_early, .init_irq = mx31_init_irq, .handle_irq = imx31_handle_irq, .init_time = kzm_timer_init, .init_machine = kzm_board_init, .restart = mxc_restart, MACHINE_END
gpl-2.0
arjen75/ics-lge-kernel-msm7x27-chick
drivers/video/backlight/tosa_lcd.c
2639
6232
/* * LCD / Backlight control code for Sharp SL-6000x (tosa) * * Copyright (c) 2005 Dirk Opfer * Copyright (c) 2007,2008 Dmitry Baryshkov * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/device.h> #include <linux/spi/spi.h> #include <linux/i2c.h> #include <linux/slab.h> #include <linux/gpio.h> #include <linux/delay.h> #include <linux/lcd.h> #include <linux/fb.h> #include <asm/mach/sharpsl_param.h> #include <mach/tosa.h> #define POWER_IS_ON(pwr) ((pwr) <= FB_BLANK_NORMAL) #define TG_REG0_VQV 0x0001 #define TG_REG0_COLOR 0x0002 #define TG_REG0_UD 0x0004 #define TG_REG0_LR 0x0008 #define DAC_BASE 0x4e struct tosa_lcd_data { struct spi_device *spi; struct lcd_device *lcd; struct i2c_client *i2c; int lcd_power; bool is_vga; }; static int tosa_tg_send(struct spi_device *spi, int adrs, uint8_t data) { u8 buf[1]; struct spi_message msg; struct spi_transfer xfer = { .len = 1, .cs_change = 1, .tx_buf = buf, }; buf[0] = ((adrs & 0x07) << 5) | (data & 0x1f); spi_message_init(&msg); spi_message_add_tail(&xfer, &msg); return spi_sync(spi, &msg); } int tosa_bl_enable(struct spi_device *spi, int enable) { /* bl_enable GP04=1 otherwise GP04=0*/ return tosa_tg_send(spi, TG_GPODR2, enable? 0x01 : 0x00); } EXPORT_SYMBOL(tosa_bl_enable); static void tosa_lcd_tg_init(struct tosa_lcd_data *data) { /* TG on */ gpio_set_value(TOSA_GPIO_TG_ON, 0); mdelay(60); /* delayed 0clk TCTL signal for VGA */ tosa_tg_send(data->spi, TG_TPOSCTL, 0x00); /* GPOS0=powercontrol, GPOS1=GPIO, GPOS2=TCTL */ tosa_tg_send(data->spi, TG_GPOSR, 0x02); } static void tosa_lcd_tg_on(struct tosa_lcd_data *data) { struct spi_device *spi = data->spi; int value = TG_REG0_COLOR | TG_REG0_UD | TG_REG0_LR; if (data->is_vga) value |= TG_REG0_VQV; tosa_tg_send(spi, TG_PNLCTL, value); /* TG LCD pannel power up */ tosa_tg_send(spi, TG_PINICTL,0x4); mdelay(50); /* TG LCD GVSS */ tosa_tg_send(spi, TG_PINICTL,0x0); if (!data->i2c) { /* after the pannel is powered up the first time, we can access the i2c bus */ /* so probe for the DAC */ struct i2c_adapter *adap = i2c_get_adapter(0); struct i2c_board_info info = { .type = "tosa-bl", .addr = DAC_BASE, .platform_data = data->spi, }; data->i2c = i2c_new_device(adap, &info); } } static void tosa_lcd_tg_off(struct tosa_lcd_data *data) { struct spi_device *spi = data->spi; /* TG LCD VHSA off */ tosa_tg_send(spi, TG_PINICTL,0x4); mdelay(50); /* TG LCD signal off */ tosa_tg_send(spi, TG_PINICTL,0x6); mdelay(50); /* TG Off */ gpio_set_value(TOSA_GPIO_TG_ON, 1); mdelay(100); } int tosa_lcd_set_power(struct lcd_device *lcd, int power) { struct tosa_lcd_data *data = lcd_get_data(lcd); if (POWER_IS_ON(power) && !POWER_IS_ON(data->lcd_power)) tosa_lcd_tg_on(data); if (!POWER_IS_ON(power) && POWER_IS_ON(data->lcd_power)) tosa_lcd_tg_off(data); data->lcd_power = power; return 0; } static int tosa_lcd_get_power(struct lcd_device *lcd) { struct tosa_lcd_data *data = lcd_get_data(lcd); return data->lcd_power; } static int tosa_lcd_set_mode(struct lcd_device *lcd, struct fb_videomode *mode) { struct tosa_lcd_data *data = lcd_get_data(lcd); if (mode->xres == 320 || mode->yres == 320) data->is_vga = false; else data->is_vga = true; if (POWER_IS_ON(data->lcd_power)) tosa_lcd_tg_on(data); return 0; } static struct lcd_ops tosa_lcd_ops = { .set_power = tosa_lcd_set_power, .get_power = tosa_lcd_get_power, .set_mode = tosa_lcd_set_mode, }; static int __devinit tosa_lcd_probe(struct spi_device *spi) { int ret; struct tosa_lcd_data *data; data = kzalloc(sizeof(struct tosa_lcd_data), GFP_KERNEL); if (!data) return -ENOMEM; data->is_vga = true; /* default to VGA mode */ /* * bits_per_word cannot be configured in platform data */ spi->bits_per_word = 8; ret = spi_setup(spi); if (ret < 0) goto err_spi; data->spi = spi; dev_set_drvdata(&spi->dev, data); ret = gpio_request(TOSA_GPIO_TG_ON, "tg #pwr"); if (ret < 0) goto err_gpio_tg; mdelay(60); ret = gpio_direction_output(TOSA_GPIO_TG_ON, 0); if (ret < 0) goto err_gpio_dir; mdelay(60); tosa_lcd_tg_init(data); tosa_lcd_tg_on(data); data->lcd = lcd_device_register("tosa-lcd", &spi->dev, data, &tosa_lcd_ops); if (IS_ERR(data->lcd)) { ret = PTR_ERR(data->lcd); data->lcd = NULL; goto err_register; } return 0; err_register: tosa_lcd_tg_off(data); err_gpio_dir: gpio_free(TOSA_GPIO_TG_ON); err_gpio_tg: dev_set_drvdata(&spi->dev, NULL); err_spi: kfree(data); return ret; } static int __devexit tosa_lcd_remove(struct spi_device *spi) { struct tosa_lcd_data *data = dev_get_drvdata(&spi->dev); lcd_device_unregister(data->lcd); if (data->i2c) i2c_unregister_device(data->i2c); tosa_lcd_tg_off(data); gpio_free(TOSA_GPIO_TG_ON); dev_set_drvdata(&spi->dev, NULL); kfree(data); return 0; } #ifdef CONFIG_PM static int tosa_lcd_suspend(struct spi_device *spi, pm_message_t state) { struct tosa_lcd_data *data = dev_get_drvdata(&spi->dev); tosa_lcd_tg_off(data); return 0; } static int tosa_lcd_resume(struct spi_device *spi) { struct tosa_lcd_data *data = dev_get_drvdata(&spi->dev); tosa_lcd_tg_init(data); if (POWER_IS_ON(data->lcd_power)) tosa_lcd_tg_on(data); else tosa_lcd_tg_off(data); return 0; } #else #define tosa_lcd_suspend NULL #define tosa_lcd_reume NULL #endif static struct spi_driver tosa_lcd_driver = { .driver = { .name = "tosa-lcd", .owner = THIS_MODULE, }, .probe = tosa_lcd_probe, .remove = __devexit_p(tosa_lcd_remove), .suspend = tosa_lcd_suspend, .resume = tosa_lcd_resume, }; static int __init tosa_lcd_init(void) { return spi_register_driver(&tosa_lcd_driver); } static void __exit tosa_lcd_exit(void) { spi_unregister_driver(&tosa_lcd_driver); } module_init(tosa_lcd_init); module_exit(tosa_lcd_exit); MODULE_AUTHOR("Dmitry Baryshkov"); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("LCD/Backlight control for Sharp SL-6000 PDA"); MODULE_ALIAS("spi:tosa-lcd");
gpl-2.0
KINGbabasula/android_kernel_samsung_i9105P
arch/m32r/kernel/traps.c
3919
8233
/* * linux/arch/m32r/kernel/traps.c * * Copyright (C) 2001, 2002 Hirokazu Takata, Hiroyuki Kondo, * Hitoshi Yamamoto */ /* * 'traps.c' handles hardware traps and faults after we have saved some * state in 'entry.S'. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/kallsyms.h> #include <linux/stddef.h> #include <linux/ptrace.h> #include <linux/mm.h> #include <asm/page.h> #include <asm/processor.h> #include <asm/system.h> #include <asm/uaccess.h> #include <asm/io.h> #include <asm/atomic.h> #include <asm/smp.h> #include <linux/module.h> asmlinkage void alignment_check(void); asmlinkage void ei_handler(void); asmlinkage void rie_handler(void); asmlinkage void debug_trap(void); asmlinkage void cache_flushing_handler(void); asmlinkage void ill_trap(void); #ifdef CONFIG_SMP extern void smp_reschedule_interrupt(void); extern void smp_invalidate_interrupt(void); extern void smp_call_function_interrupt(void); extern void smp_ipi_timer_interrupt(void); extern void smp_flush_cache_all_interrupt(void); extern void smp_call_function_single_interrupt(void); /* * for Boot AP function */ asm ( " .section .eit_vector4,\"ax\" \n" " .global _AP_RE \n" " .global startup_AP \n" "_AP_RE: \n" " .fill 32, 4, 0 \n" "_AP_EI: bra startup_AP \n" " .previous \n" ); #endif /* CONFIG_SMP */ extern unsigned long eit_vector[]; #define BRA_INSN(func, entry) \ ((unsigned long)func - (unsigned long)eit_vector - entry*4)/4 \ + 0xff000000UL static void set_eit_vector_entries(void) { extern void default_eit_handler(void); extern void system_call(void); extern void pie_handler(void); extern void ace_handler(void); extern void tme_handler(void); extern void _flush_cache_copyback_all(void); eit_vector[0] = 0xd0c00001; /* seth r0, 0x01 */ eit_vector[1] = BRA_INSN(default_eit_handler, 1); eit_vector[4] = 0xd0c00010; /* seth r0, 0x10 */ eit_vector[5] = BRA_INSN(default_eit_handler, 5); eit_vector[8] = BRA_INSN(rie_handler, 8); eit_vector[12] = BRA_INSN(alignment_check, 12); eit_vector[16] = BRA_INSN(ill_trap, 16); eit_vector[17] = BRA_INSN(debug_trap, 17); eit_vector[18] = BRA_INSN(system_call, 18); eit_vector[19] = BRA_INSN(ill_trap, 19); eit_vector[20] = BRA_INSN(ill_trap, 20); eit_vector[21] = BRA_INSN(ill_trap, 21); eit_vector[22] = BRA_INSN(ill_trap, 22); eit_vector[23] = BRA_INSN(ill_trap, 23); eit_vector[24] = BRA_INSN(ill_trap, 24); eit_vector[25] = BRA_INSN(ill_trap, 25); eit_vector[26] = BRA_INSN(ill_trap, 26); eit_vector[27] = BRA_INSN(ill_trap, 27); eit_vector[28] = BRA_INSN(cache_flushing_handler, 28); eit_vector[29] = BRA_INSN(ill_trap, 29); eit_vector[30] = BRA_INSN(ill_trap, 30); eit_vector[31] = BRA_INSN(ill_trap, 31); eit_vector[32] = BRA_INSN(ei_handler, 32); eit_vector[64] = BRA_INSN(pie_handler, 64); #ifdef CONFIG_MMU eit_vector[68] = BRA_INSN(ace_handler, 68); eit_vector[72] = BRA_INSN(tme_handler, 72); #endif /* CONFIG_MMU */ #ifdef CONFIG_SMP eit_vector[184] = (unsigned long)smp_reschedule_interrupt; eit_vector[185] = (unsigned long)smp_invalidate_interrupt; eit_vector[186] = (unsigned long)smp_call_function_interrupt; eit_vector[187] = (unsigned long)smp_ipi_timer_interrupt; eit_vector[188] = (unsigned long)smp_flush_cache_all_interrupt; eit_vector[189] = 0; /* CPU_BOOT_IPI */ eit_vector[190] = (unsigned long)smp_call_function_single_interrupt; eit_vector[191] = 0; #endif _flush_cache_copyback_all(); } void __init trap_init(void) { set_eit_vector_entries(); /* * Should be a barrier for any external CPU state. */ cpu_init(); } static int kstack_depth_to_print = 24; static void show_trace(struct task_struct *task, unsigned long *stack) { unsigned long addr; if (!stack) stack = (unsigned long*)&stack; printk("Call Trace: "); while (!kstack_end(stack)) { addr = *stack++; if (__kernel_text_address(addr)) { printk("[<%08lx>] ", addr); print_symbol("%s\n", addr); } } printk("\n"); } void show_stack(struct task_struct *task, unsigned long *sp) { unsigned long *stack; int i; /* * debugging aid: "show_stack(NULL);" prints the * back trace for this cpu. */ if(sp==NULL) { if (task) sp = (unsigned long *)task->thread.sp; else sp=(unsigned long*)&sp; } stack = sp; for(i=0; i < kstack_depth_to_print; i++) { if (kstack_end(stack)) break; if (i && ((i % 4) == 0)) printk("\n "); printk("%08lx ", *stack++); } printk("\n"); show_trace(task, sp); } void dump_stack(void) { unsigned long stack; show_trace(current, &stack); } EXPORT_SYMBOL(dump_stack); static void show_registers(struct pt_regs *regs) { int i = 0; int in_kernel = 1; unsigned long sp; printk("CPU: %d\n", smp_processor_id()); show_regs(regs); sp = (unsigned long) (1+regs); if (user_mode(regs)) { in_kernel = 0; sp = regs->spu; printk("SPU: %08lx\n", sp); } else { printk("SPI: %08lx\n", sp); } printk("Process %s (pid: %d, process nr: %d, stackpage=%08lx)", current->comm, task_pid_nr(current), 0xffff & i, 4096+(unsigned long)current); /* * When in-kernel, we also print out the stack and code at the * time of the fault.. */ if (in_kernel) { printk("\nStack: "); show_stack(current, (unsigned long*) sp); printk("\nCode: "); if (regs->bpc < PAGE_OFFSET) goto bad; for(i=0;i<20;i++) { unsigned char c; if (__get_user(c, &((unsigned char*)regs->bpc)[i])) { bad: printk(" Bad PC value."); break; } printk("%02x ", c); } } printk("\n"); } static DEFINE_SPINLOCK(die_lock); void die(const char * str, struct pt_regs * regs, long err) { console_verbose(); spin_lock_irq(&die_lock); bust_spinlocks(1); printk("%s: %04lx\n", str, err & 0xffff); show_registers(regs); bust_spinlocks(0); spin_unlock_irq(&die_lock); do_exit(SIGSEGV); } static __inline__ void die_if_kernel(const char * str, struct pt_regs * regs, long err) { if (!user_mode(regs)) die(str, regs, err); } static __inline__ void do_trap(int trapnr, int signr, const char * str, struct pt_regs * regs, long error_code, siginfo_t *info) { if (user_mode(regs)) { /* trap_signal */ struct task_struct *tsk = current; tsk->thread.error_code = error_code; tsk->thread.trap_no = trapnr; if (info) force_sig_info(signr, info, tsk); else force_sig(signr, tsk); return; } else { /* kernel_trap */ if (!fixup_exception(regs)) die(str, regs, error_code); return; } } #define DO_ERROR(trapnr, signr, str, name) \ asmlinkage void do_##name(struct pt_regs * regs, long error_code) \ { \ do_trap(trapnr, signr, NULL, regs, error_code, NULL); \ } #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \ asmlinkage void do_##name(struct pt_regs * regs, long error_code) \ { \ siginfo_t info; \ info.si_signo = signr; \ info.si_errno = 0; \ info.si_code = sicode; \ info.si_addr = (void __user *)siaddr; \ do_trap(trapnr, signr, str, regs, error_code, &info); \ } DO_ERROR( 1, SIGTRAP, "debug trap", debug_trap) DO_ERROR_INFO(0x20, SIGILL, "reserved instruction ", rie_handler, ILL_ILLOPC, regs->bpc) DO_ERROR_INFO(0x100, SIGILL, "privileged instruction", pie_handler, ILL_PRVOPC, regs->bpc) DO_ERROR_INFO(-1, SIGILL, "illegal trap", ill_trap, ILL_ILLTRP, regs->bpc) extern int handle_unaligned_access(unsigned long, struct pt_regs *); /* This code taken from arch/sh/kernel/traps.c */ asmlinkage void do_alignment_check(struct pt_regs *regs, long error_code) { mm_segment_t oldfs; unsigned long insn; int tmp; oldfs = get_fs(); if (user_mode(regs)) { local_irq_enable(); current->thread.error_code = error_code; current->thread.trap_no = 0x17; set_fs(USER_DS); if (copy_from_user(&insn, (void *)regs->bpc, 4)) { set_fs(oldfs); goto uspace_segv; } tmp = handle_unaligned_access(insn, regs); set_fs(oldfs); if (!tmp) return; uspace_segv: printk(KERN_NOTICE "Killing process \"%s\" due to unaligned " "access\n", current->comm); force_sig(SIGSEGV, current); } else { set_fs(KERNEL_DS); if (copy_from_user(&insn, (void *)regs->bpc, 4)) { set_fs(oldfs); die("insn faulting in do_address_error", regs, 0); } handle_unaligned_access(insn, regs); set_fs(oldfs); } }
gpl-2.0
lycanthia/Find7-Kernel-Source-4.3
drivers/media/dvb/dvb-usb/anysee.c
4943
34108
/* * DVB USB Linux driver for Anysee E30 DVB-C & DVB-T USB2.0 receiver * * Copyright (C) 2007 Antti Palosaari <crope@iki.fi> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * TODO: * - add smart card reader support for Conditional Access (CA) * * Card reader in Anysee is nothing more than ISO 7816 card reader. * There is no hardware CAM in any Anysee device sold. * In my understanding it should be implemented by making own module * for ISO 7816 card reader, like dvb_ca_en50221 is implemented. This * module registers serial interface that can be used to communicate * with any ISO 7816 smart card. * * Any help according to implement serial smart card reader support * is highly welcome! */ #include "anysee.h" #include "tda1002x.h" #include "mt352.h" #include "mt352_priv.h" #include "zl10353.h" #include "tda18212.h" #include "cx24116.h" #include "stv0900.h" #include "stv6110.h" #include "isl6423.h" #include "cxd2820r.h" /* debug */ static int dvb_usb_anysee_debug; module_param_named(debug, dvb_usb_anysee_debug, int, 0644); MODULE_PARM_DESC(debug, "set debugging level" DVB_USB_DEBUG_STATUS); static int dvb_usb_anysee_delsys; module_param_named(delsys, dvb_usb_anysee_delsys, int, 0644); MODULE_PARM_DESC(delsys, "select delivery mode (0=DVB-C, 1=DVB-T)"); DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); static DEFINE_MUTEX(anysee_usb_mutex); static int anysee_ctrl_msg(struct dvb_usb_device *d, u8 *sbuf, u8 slen, u8 *rbuf, u8 rlen) { struct anysee_state *state = d->priv; int act_len, ret, i; u8 buf[64]; memcpy(&buf[0], sbuf, slen); buf[60] = state->seq++; if (mutex_lock_interruptible(&anysee_usb_mutex) < 0) return -EAGAIN; deb_xfer(">>> "); debug_dump(buf, slen, deb_xfer); /* We need receive one message more after dvb_usb_generic_rw due to weird transaction flow, which is 1 x send + 2 x receive. */ ret = dvb_usb_generic_rw(d, buf, sizeof(buf), buf, sizeof(buf), 0); if (ret) goto error_unlock; /* TODO FIXME: dvb_usb_generic_rw() fails rarely with error code -32 * (EPIPE, Broken pipe). Function supports currently msleep() as a * parameter but I would not like to use it, since according to * Documentation/timers/timers-howto.txt it should not be used such * short, under < 20ms, sleeps. Repeating failed message would be * better choice as not to add unwanted delays... * Fixing that correctly is one of those or both; * 1) use repeat if possible * 2) add suitable delay */ /* get answer, retry few times if error returned */ for (i = 0; i < 3; i++) { /* receive 2nd answer */ ret = usb_bulk_msg(d->udev, usb_rcvbulkpipe(d->udev, d->props.generic_bulk_ctrl_endpoint), buf, sizeof(buf), &act_len, 2000); if (ret) { deb_info("%s: recv bulk message failed: %d", __func__, ret); } else { deb_xfer("<<< "); debug_dump(buf, rlen, deb_xfer); if (buf[63] != 0x4f) deb_info("%s: cmd failed\n", __func__); break; } } if (ret) { /* all retries failed, it is fatal */ err("%s: recv bulk message failed: %d", __func__, ret); goto error_unlock; } /* read request, copy returned data to return buf */ if (rbuf && rlen) memcpy(rbuf, buf, rlen); error_unlock: mutex_unlock(&anysee_usb_mutex); return ret; } static int anysee_read_reg(struct dvb_usb_device *d, u16 reg, u8 *val) { u8 buf[] = {CMD_REG_READ, reg >> 8, reg & 0xff, 0x01}; int ret; ret = anysee_ctrl_msg(d, buf, sizeof(buf), val, 1); deb_info("%s: reg:%04x val:%02x\n", __func__, reg, *val); return ret; } static int anysee_write_reg(struct dvb_usb_device *d, u16 reg, u8 val) { u8 buf[] = {CMD_REG_WRITE, reg >> 8, reg & 0xff, 0x01, val}; deb_info("%s: reg:%04x val:%02x\n", __func__, reg, val); return anysee_ctrl_msg(d, buf, sizeof(buf), NULL, 0); } /* write single register with mask */ static int anysee_wr_reg_mask(struct dvb_usb_device *d, u16 reg, u8 val, u8 mask) { int ret; u8 tmp; /* no need for read if whole reg is written */ if (mask != 0xff) { ret = anysee_read_reg(d, reg, &tmp); if (ret) return ret; val &= mask; tmp &= ~mask; val |= tmp; } return anysee_write_reg(d, reg, val); } /* read single register with mask */ static int anysee_rd_reg_mask(struct dvb_usb_device *d, u16 reg, u8 *val, u8 mask) { int ret, i; u8 tmp; ret = anysee_read_reg(d, reg, &tmp); if (ret) return ret; tmp &= mask; /* find position of the first bit */ for (i = 0; i < 8; i++) { if ((mask >> i) & 0x01) break; } *val = tmp >> i; return 0; } static int anysee_get_hw_info(struct dvb_usb_device *d, u8 *id) { u8 buf[] = {CMD_GET_HW_INFO}; return anysee_ctrl_msg(d, buf, sizeof(buf), id, 3); } static int anysee_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff) { u8 buf[] = {CMD_STREAMING_CTRL, (u8)onoff, 0x00}; deb_info("%s: onoff:%02x\n", __func__, onoff); return anysee_ctrl_msg(adap->dev, buf, sizeof(buf), NULL, 0); } static int anysee_led_ctrl(struct dvb_usb_device *d, u8 mode, u8 interval) { u8 buf[] = {CMD_LED_AND_IR_CTRL, 0x01, mode, interval}; deb_info("%s: state:%02x interval:%02x\n", __func__, mode, interval); return anysee_ctrl_msg(d, buf, sizeof(buf), NULL, 0); } static int anysee_ir_ctrl(struct dvb_usb_device *d, u8 onoff) { u8 buf[] = {CMD_LED_AND_IR_CTRL, 0x02, onoff}; deb_info("%s: onoff:%02x\n", __func__, onoff); return anysee_ctrl_msg(d, buf, sizeof(buf), NULL, 0); } /* I2C */ static int anysee_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num) { struct dvb_usb_device *d = i2c_get_adapdata(adap); int ret = 0, inc, i = 0; u8 buf[52]; /* 4 + 48 (I2C WR USB command header + I2C WR max) */ if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EAGAIN; while (i < num) { if (num > i + 1 && (msg[i+1].flags & I2C_M_RD)) { if (msg[i].len > 2 || msg[i+1].len > 60) { ret = -EOPNOTSUPP; break; } buf[0] = CMD_I2C_READ; buf[1] = (msg[i].addr << 1) | 0x01; buf[2] = msg[i].buf[0]; buf[3] = msg[i].buf[1]; buf[4] = msg[i].len-1; buf[5] = msg[i+1].len; ret = anysee_ctrl_msg(d, buf, 6, msg[i+1].buf, msg[i+1].len); inc = 2; } else { if (msg[i].len > 48) { ret = -EOPNOTSUPP; break; } buf[0] = CMD_I2C_WRITE; buf[1] = (msg[i].addr << 1); buf[2] = msg[i].len; buf[3] = 0x01; memcpy(&buf[4], msg[i].buf, msg[i].len); ret = anysee_ctrl_msg(d, buf, 4 + msg[i].len, NULL, 0); inc = 1; } if (ret) break; i += inc; } mutex_unlock(&d->i2c_mutex); return ret ? ret : i; } static u32 anysee_i2c_func(struct i2c_adapter *adapter) { return I2C_FUNC_I2C; } static struct i2c_algorithm anysee_i2c_algo = { .master_xfer = anysee_master_xfer, .functionality = anysee_i2c_func, }; static int anysee_mt352_demod_init(struct dvb_frontend *fe) { static u8 clock_config[] = { CLOCK_CTL, 0x38, 0x28 }; static u8 reset[] = { RESET, 0x80 }; static u8 adc_ctl_1_cfg[] = { ADC_CTL_1, 0x40 }; static u8 agc_cfg[] = { AGC_TARGET, 0x28, 0x20 }; static u8 gpp_ctl_cfg[] = { GPP_CTL, 0x33 }; static u8 capt_range_cfg[] = { CAPT_RANGE, 0x32 }; mt352_write(fe, clock_config, sizeof(clock_config)); udelay(200); mt352_write(fe, reset, sizeof(reset)); mt352_write(fe, adc_ctl_1_cfg, sizeof(adc_ctl_1_cfg)); mt352_write(fe, agc_cfg, sizeof(agc_cfg)); mt352_write(fe, gpp_ctl_cfg, sizeof(gpp_ctl_cfg)); mt352_write(fe, capt_range_cfg, sizeof(capt_range_cfg)); return 0; } /* Callbacks for DVB USB */ static struct tda10023_config anysee_tda10023_config = { .demod_address = (0x1a >> 1), .invert = 0, .xtal = 16000000, .pll_m = 11, .pll_p = 3, .pll_n = 1, .output_mode = TDA10023_OUTPUT_MODE_PARALLEL_C, .deltaf = 0xfeeb, }; static struct mt352_config anysee_mt352_config = { .demod_address = (0x1e >> 1), .demod_init = anysee_mt352_demod_init, }; static struct zl10353_config anysee_zl10353_config = { .demod_address = (0x1e >> 1), .parallel_ts = 1, }; static struct zl10353_config anysee_zl10353_tda18212_config2 = { .demod_address = (0x1e >> 1), .parallel_ts = 1, .disable_i2c_gate_ctrl = 1, .no_tuner = 1, .if2 = 41500, }; static struct zl10353_config anysee_zl10353_tda18212_config = { .demod_address = (0x18 >> 1), .parallel_ts = 1, .disable_i2c_gate_ctrl = 1, .no_tuner = 1, .if2 = 41500, }; static struct tda10023_config anysee_tda10023_tda18212_config = { .demod_address = (0x1a >> 1), .xtal = 16000000, .pll_m = 12, .pll_p = 3, .pll_n = 1, .output_mode = TDA10023_OUTPUT_MODE_PARALLEL_B, .deltaf = 0xba02, }; static struct tda18212_config anysee_tda18212_config = { .i2c_address = (0xc0 >> 1), .if_dvbt_6 = 4150, .if_dvbt_7 = 4150, .if_dvbt_8 = 4150, .if_dvbc = 5000, }; static struct tda18212_config anysee_tda18212_config2 = { .i2c_address = 0x60 /* (0xc0 >> 1) */, .if_dvbt_6 = 3550, .if_dvbt_7 = 3700, .if_dvbt_8 = 4150, .if_dvbt2_6 = 3250, .if_dvbt2_7 = 4000, .if_dvbt2_8 = 4000, .if_dvbc = 5000, }; static struct cx24116_config anysee_cx24116_config = { .demod_address = (0xaa >> 1), .mpg_clk_pos_pol = 0x00, .i2c_wr_max = 48, }; static struct stv0900_config anysee_stv0900_config = { .demod_address = (0xd0 >> 1), .demod_mode = 0, .xtal = 8000000, .clkmode = 3, .diseqc_mode = 2, .tun1_maddress = 0, .tun1_adc = 1, /* 1 Vpp */ .path1_mode = 3, }; static struct stv6110_config anysee_stv6110_config = { .i2c_address = (0xc0 >> 1), .mclk = 16000000, .clk_div = 1, }; static struct isl6423_config anysee_isl6423_config = { .current_max = SEC_CURRENT_800m, .curlim = SEC_CURRENT_LIM_OFF, .mod_extern = 1, .addr = (0x10 >> 1), }; static struct cxd2820r_config anysee_cxd2820r_config = { .i2c_address = 0x6d, /* (0xda >> 1) */ .ts_mode = 0x38, }; /* * New USB device strings: Mfr=1, Product=2, SerialNumber=0 * Manufacturer: AMT.CO.KR * * E30 VID=04b4 PID=861f HW=2 FW=2.1 Product=???????? * PCB: ? * parts: DNOS404ZH102A(MT352, DTT7579(?)) * * E30 VID=04b4 PID=861f HW=2 FW=2.1 "anysee-T(LP)" * PCB: PCB 507T (rev1.61) * parts: DNOS404ZH103A(ZL10353, DTT7579(?)) * OEA=0a OEB=00 OEC=00 OED=ff OEE=00 * IOA=45 IOB=ff IOC=00 IOD=ff IOE=00 * * E30 Plus VID=04b4 PID=861f HW=6 FW=1.0 "anysee" * PCB: 507CD (rev1.1) * parts: DNOS404ZH103A(ZL10353, DTT7579(?)), CST56I01 * OEA=80 OEB=00 OEC=00 OED=ff OEE=fe * IOA=4f IOB=ff IOC=00 IOD=06 IOE=01 * IOD[0] ZL10353 1=enabled * IOA[7] TS 0=enabled * tuner is not behind ZL10353 I2C-gate (no care if gate disabled or not) * * E30 C Plus VID=04b4 PID=861f HW=10 FW=1.0 "anysee-DC(LP)" * PCB: 507DC (rev0.2) * parts: TDA10023, DTOS403IH102B TM, CST56I01 * OEA=80 OEB=00 OEC=00 OED=ff OEE=fe * IOA=4f IOB=ff IOC=00 IOD=26 IOE=01 * IOD[0] TDA10023 1=enabled * * E30 S2 Plus VID=04b4 PID=861f HW=11 FW=0.1 "anysee-S2(LP)" * PCB: 507SI (rev2.1) * parts: BS2N10WCC01(CX24116, CX24118), ISL6423, TDA8024 * OEA=80 OEB=00 OEC=ff OED=ff OEE=fe * IOA=4d IOB=ff IOC=00 IOD=26 IOE=01 * IOD[0] CX24116 1=enabled * * E30 C Plus VID=1c73 PID=861f HW=15 FW=1.2 "anysee-FA(LP)" * PCB: 507FA (rev0.4) * parts: TDA10023, DTOS403IH102B TM, TDA8024 * OEA=80 OEB=00 OEC=ff OED=ff OEE=ff * IOA=4d IOB=ff IOC=00 IOD=00 IOE=c0 * IOD[5] TDA10023 1=enabled * IOE[0] tuner 1=enabled * * E30 Combo Plus VID=1c73 PID=861f HW=15 FW=1.2 "anysee-FA(LP)" * PCB: 507FA (rev1.1) * parts: ZL10353, TDA10023, DTOS403IH102B TM, TDA8024 * OEA=80 OEB=00 OEC=ff OED=ff OEE=ff * IOA=4d IOB=ff IOC=00 IOD=00 IOE=c0 * DVB-C: * IOD[5] TDA10023 1=enabled * IOE[0] tuner 1=enabled * DVB-T: * IOD[0] ZL10353 1=enabled * IOE[0] tuner 0=enabled * tuner is behind ZL10353 I2C-gate * * E7 TC VID=1c73 PID=861f HW=18 FW=0.7 AMTCI=0.5 "anysee-E7TC(LP)" * PCB: 508TC (rev0.6) * parts: ZL10353, TDA10023, DNOD44CDH086A(TDA18212) * OEA=80 OEB=00 OEC=03 OED=f7 OEE=ff * IOA=4d IOB=00 IOC=cc IOD=48 IOE=e4 * IOA[7] TS 1=enabled * IOE[4] TDA18212 1=enabled * DVB-C: * IOD[6] ZL10353 0=disabled * IOD[5] TDA10023 1=enabled * IOE[0] IF 1=enabled * DVB-T: * IOD[5] TDA10023 0=disabled * IOD[6] ZL10353 1=enabled * IOE[0] IF 0=enabled * * E7 S2 VID=1c73 PID=861f HW=19 FW=0.4 AMTCI=0.5 "anysee-E7S2(LP)" * PCB: 508S2 (rev0.7) * parts: DNBU10512IST(STV0903, STV6110), ISL6423 * OEA=80 OEB=00 OEC=03 OED=f7 OEE=ff * IOA=4d IOB=00 IOC=c4 IOD=08 IOE=e4 * IOA[7] TS 1=enabled * IOE[5] STV0903 1=enabled * * E7 T2C VID=1c73 PID=861f HW=20 FW=0.1 AMTCI=0.5 "anysee-E7T2C(LP)" * PCB: 508T2C (rev0.3) * parts: DNOQ44QCH106A(CXD2820R, TDA18212), TDA8024 * OEA=80 OEB=00 OEC=03 OED=f7 OEE=ff * IOA=4d IOB=00 IOC=cc IOD=48 IOE=e4 * IOA[7] TS 1=enabled * IOE[5] CXD2820R 1=enabled * * E7 PTC VID=1c73 PID=861f HW=21 FW=0.1 AMTCI=?? "anysee-E7PTC(LP)" * PCB: 508PTC (rev0.5) * parts: ZL10353, TDA10023, DNOD44CDH086A(TDA18212) * OEA=80 OEB=00 OEC=03 OED=f7 OEE=ff * IOA=4d IOB=00 IOC=cc IOD=48 IOE=e4 * IOA[7] TS 1=enabled * IOE[4] TDA18212 1=enabled * DVB-C: * IOD[6] ZL10353 0=disabled * IOD[5] TDA10023 1=enabled * IOE[0] IF 1=enabled * DVB-T: * IOD[5] TDA10023 0=disabled * IOD[6] ZL10353 1=enabled * IOE[0] IF 0=enabled * * E7 PS2 VID=1c73 PID=861f HW=22 FW=0.1 AMTCI=?? "anysee-E7PS2(LP)" * PCB: 508PS2 (rev0.4) * parts: DNBU10512IST(STV0903, STV6110), ISL6423 * OEA=80 OEB=00 OEC=03 OED=f7 OEE=ff * IOA=4d IOB=00 IOC=c4 IOD=08 IOE=e4 * IOA[7] TS 1=enabled * IOE[5] STV0903 1=enabled */ /* external I2C gate used for DNOD44CDH086A(TDA18212) tuner module */ static int anysee_i2c_gate_ctrl(struct dvb_frontend *fe, int enable) { struct dvb_usb_adapter *adap = fe->dvb->priv; /* enable / disable tuner access on IOE[4] */ return anysee_wr_reg_mask(adap->dev, REG_IOE, (enable << 4), 0x10); } static int anysee_frontend_ctrl(struct dvb_frontend *fe, int onoff) { struct dvb_usb_adapter *adap = fe->dvb->priv; struct anysee_state *state = adap->dev->priv; int ret; deb_info("%s: fe=%d onoff=%d\n", __func__, fe->id, onoff); /* no frontend sleep control */ if (onoff == 0) return 0; switch (state->hw) { case ANYSEE_HW_507FA: /* 15 */ /* E30 Combo Plus */ /* E30 C Plus */ if ((fe->id ^ dvb_usb_anysee_delsys) == 0) { /* disable DVB-T demod on IOD[0] */ ret = anysee_wr_reg_mask(adap->dev, REG_IOD, (0 << 0), 0x01); if (ret) goto error; /* enable DVB-C demod on IOD[5] */ ret = anysee_wr_reg_mask(adap->dev, REG_IOD, (1 << 5), 0x20); if (ret) goto error; /* enable DVB-C tuner on IOE[0] */ ret = anysee_wr_reg_mask(adap->dev, REG_IOE, (1 << 0), 0x01); if (ret) goto error; } else { /* disable DVB-C demod on IOD[5] */ ret = anysee_wr_reg_mask(adap->dev, REG_IOD, (0 << 5), 0x20); if (ret) goto error; /* enable DVB-T demod on IOD[0] */ ret = anysee_wr_reg_mask(adap->dev, REG_IOD, (1 << 0), 0x01); if (ret) goto error; /* enable DVB-T tuner on IOE[0] */ ret = anysee_wr_reg_mask(adap->dev, REG_IOE, (0 << 0), 0x01); if (ret) goto error; } break; case ANYSEE_HW_508TC: /* 18 */ case ANYSEE_HW_508PTC: /* 21 */ /* E7 TC */ /* E7 PTC */ if ((fe->id ^ dvb_usb_anysee_delsys) == 0) { /* disable DVB-T demod on IOD[6] */ ret = anysee_wr_reg_mask(adap->dev, REG_IOD, (0 << 6), 0x40); if (ret) goto error; /* enable DVB-C demod on IOD[5] */ ret = anysee_wr_reg_mask(adap->dev, REG_IOD, (1 << 5), 0x20); if (ret) goto error; /* enable IF route on IOE[0] */ ret = anysee_wr_reg_mask(adap->dev, REG_IOE, (1 << 0), 0x01); if (ret) goto error; } else { /* disable DVB-C demod on IOD[5] */ ret = anysee_wr_reg_mask(adap->dev, REG_IOD, (0 << 5), 0x20); if (ret) goto error; /* enable DVB-T demod on IOD[6] */ ret = anysee_wr_reg_mask(adap->dev, REG_IOD, (1 << 6), 0x40); if (ret) goto error; /* enable IF route on IOE[0] */ ret = anysee_wr_reg_mask(adap->dev, REG_IOE, (0 << 0), 0x01); if (ret) goto error; } break; default: ret = 0; } error: return ret; } static int anysee_frontend_attach(struct dvb_usb_adapter *adap) { int ret; struct anysee_state *state = adap->dev->priv; u8 hw_info[3]; u8 tmp; struct i2c_msg msg[2] = { { .addr = anysee_tda18212_config.i2c_address, .flags = 0, .len = 1, .buf = "\x00", }, { .addr = anysee_tda18212_config.i2c_address, .flags = I2C_M_RD, .len = 1, .buf = &tmp, } }; /* detect hardware only once */ if (adap->fe_adap[0].fe == NULL) { /* Check which hardware we have. * We must do this call two times to get reliable values * (hw/fw bug). */ ret = anysee_get_hw_info(adap->dev, hw_info); if (ret) goto error; ret = anysee_get_hw_info(adap->dev, hw_info); if (ret) goto error; /* Meaning of these info bytes are guessed. */ info("firmware version:%d.%d hardware id:%d", hw_info[1], hw_info[2], hw_info[0]); state->hw = hw_info[0]; } /* set current frondend ID for devices having two frondends */ if (adap->fe_adap[0].fe) state->fe_id++; switch (state->hw) { case ANYSEE_HW_507T: /* 2 */ /* E30 */ if (state->fe_id) break; /* attach demod */ adap->fe_adap[0].fe = dvb_attach(mt352_attach, &anysee_mt352_config, &adap->dev->i2c_adap); if (adap->fe_adap[0].fe) break; /* attach demod */ adap->fe_adap[0].fe = dvb_attach(zl10353_attach, &anysee_zl10353_config, &adap->dev->i2c_adap); break; case ANYSEE_HW_507CD: /* 6 */ /* E30 Plus */ if (state->fe_id) break; /* enable DVB-T demod on IOD[0] */ ret = anysee_wr_reg_mask(adap->dev, REG_IOD, (1 << 0), 0x01); if (ret) goto error; /* enable transport stream on IOA[7] */ ret = anysee_wr_reg_mask(adap->dev, REG_IOA, (0 << 7), 0x80); if (ret) goto error; /* attach demod */ adap->fe_adap[0].fe = dvb_attach(zl10353_attach, &anysee_zl10353_config, &adap->dev->i2c_adap); break; case ANYSEE_HW_507DC: /* 10 */ /* E30 C Plus */ if (state->fe_id) break; /* enable DVB-C demod on IOD[0] */ ret = anysee_wr_reg_mask(adap->dev, REG_IOD, (1 << 0), 0x01); if (ret) goto error; /* attach demod */ adap->fe_adap[0].fe = dvb_attach(tda10023_attach, &anysee_tda10023_config, &adap->dev->i2c_adap, 0x48); break; case ANYSEE_HW_507SI: /* 11 */ /* E30 S2 Plus */ if (state->fe_id) break; /* enable DVB-S/S2 demod on IOD[0] */ ret = anysee_wr_reg_mask(adap->dev, REG_IOD, (1 << 0), 0x01); if (ret) goto error; /* attach demod */ adap->fe_adap[0].fe = dvb_attach(cx24116_attach, &anysee_cx24116_config, &adap->dev->i2c_adap); break; case ANYSEE_HW_507FA: /* 15 */ /* E30 Combo Plus */ /* E30 C Plus */ /* enable tuner on IOE[4] */ ret = anysee_wr_reg_mask(adap->dev, REG_IOE, (1 << 4), 0x10); if (ret) goto error; /* probe TDA18212 */ tmp = 0; ret = i2c_transfer(&adap->dev->i2c_adap, msg, 2); if (ret == 2 && tmp == 0xc7) deb_info("%s: TDA18212 found\n", __func__); else tmp = 0; /* disable tuner on IOE[4] */ ret = anysee_wr_reg_mask(adap->dev, REG_IOE, (0 << 4), 0x10); if (ret) goto error; if ((state->fe_id ^ dvb_usb_anysee_delsys) == 0) { /* disable DVB-T demod on IOD[0] */ ret = anysee_wr_reg_mask(adap->dev, REG_IOD, (0 << 0), 0x01); if (ret) goto error; /* enable DVB-C demod on IOD[5] */ ret = anysee_wr_reg_mask(adap->dev, REG_IOD, (1 << 5), 0x20); if (ret) goto error; /* attach demod */ if (tmp == 0xc7) { /* TDA18212 config */ adap->fe_adap[state->fe_id].fe = dvb_attach( tda10023_attach, &anysee_tda10023_tda18212_config, &adap->dev->i2c_adap, 0x48); } else { /* PLL config */ adap->fe_adap[state->fe_id].fe = dvb_attach( tda10023_attach, &anysee_tda10023_config, &adap->dev->i2c_adap, 0x48); } } else { /* disable DVB-C demod on IOD[5] */ ret = anysee_wr_reg_mask(adap->dev, REG_IOD, (0 << 5), 0x20); if (ret) goto error; /* enable DVB-T demod on IOD[0] */ ret = anysee_wr_reg_mask(adap->dev, REG_IOD, (1 << 0), 0x01); if (ret) goto error; /* attach demod */ if (tmp == 0xc7) { /* TDA18212 config */ adap->fe_adap[state->fe_id].fe = dvb_attach( zl10353_attach, &anysee_zl10353_tda18212_config2, &adap->dev->i2c_adap); } else { /* PLL config */ adap->fe_adap[state->fe_id].fe = dvb_attach( zl10353_attach, &anysee_zl10353_config, &adap->dev->i2c_adap); } } /* I2C gate for DNOD44CDH086A(TDA18212) tuner module */ if (tmp == 0xc7) { if (adap->fe_adap[state->fe_id].fe) adap->fe_adap[state->fe_id].fe->ops.i2c_gate_ctrl = anysee_i2c_gate_ctrl; } break; case ANYSEE_HW_508TC: /* 18 */ case ANYSEE_HW_508PTC: /* 21 */ /* E7 TC */ /* E7 PTC */ if ((state->fe_id ^ dvb_usb_anysee_delsys) == 0) { /* disable DVB-T demod on IOD[6] */ ret = anysee_wr_reg_mask(adap->dev, REG_IOD, (0 << 6), 0x40); if (ret) goto error; /* enable DVB-C demod on IOD[5] */ ret = anysee_wr_reg_mask(adap->dev, REG_IOD, (1 << 5), 0x20); if (ret) goto error; /* attach demod */ adap->fe_adap[state->fe_id].fe = dvb_attach(tda10023_attach, &anysee_tda10023_tda18212_config, &adap->dev->i2c_adap, 0x48); } else { /* disable DVB-C demod on IOD[5] */ ret = anysee_wr_reg_mask(adap->dev, REG_IOD, (0 << 5), 0x20); if (ret) goto error; /* enable DVB-T demod on IOD[6] */ ret = anysee_wr_reg_mask(adap->dev, REG_IOD, (1 << 6), 0x40); if (ret) goto error; /* attach demod */ adap->fe_adap[state->fe_id].fe = dvb_attach(zl10353_attach, &anysee_zl10353_tda18212_config, &adap->dev->i2c_adap); } /* I2C gate for DNOD44CDH086A(TDA18212) tuner module */ if (adap->fe_adap[state->fe_id].fe) adap->fe_adap[state->fe_id].fe->ops.i2c_gate_ctrl = anysee_i2c_gate_ctrl; state->has_ci = true; break; case ANYSEE_HW_508S2: /* 19 */ case ANYSEE_HW_508PS2: /* 22 */ /* E7 S2 */ /* E7 PS2 */ if (state->fe_id) break; /* enable DVB-S/S2 demod on IOE[5] */ ret = anysee_wr_reg_mask(adap->dev, REG_IOE, (1 << 5), 0x20); if (ret) goto error; /* attach demod */ adap->fe_adap[0].fe = dvb_attach(stv0900_attach, &anysee_stv0900_config, &adap->dev->i2c_adap, 0); state->has_ci = true; break; case ANYSEE_HW_508T2C: /* 20 */ /* E7 T2C */ if (state->fe_id) break; /* enable DVB-T/T2/C demod on IOE[5] */ ret = anysee_wr_reg_mask(adap->dev, REG_IOE, (1 << 5), 0x20); if (ret) goto error; /* attach demod */ adap->fe_adap[state->fe_id].fe = dvb_attach(cxd2820r_attach, &anysee_cxd2820r_config, &adap->dev->i2c_adap); state->has_ci = true; break; } if (!adap->fe_adap[0].fe) { /* we have no frontend :-( */ ret = -ENODEV; err("Unsupported Anysee version. " \ "Please report the <linux-media@vger.kernel.org>."); } error: return ret; } static int anysee_tuner_attach(struct dvb_usb_adapter *adap) { struct anysee_state *state = adap->dev->priv; struct dvb_frontend *fe; int ret; deb_info("%s: fe=%d\n", __func__, state->fe_id); switch (state->hw) { case ANYSEE_HW_507T: /* 2 */ /* E30 */ /* attach tuner */ fe = dvb_attach(dvb_pll_attach, adap->fe_adap[0].fe, (0xc2 >> 1), NULL, DVB_PLL_THOMSON_DTT7579); break; case ANYSEE_HW_507CD: /* 6 */ /* E30 Plus */ /* attach tuner */ fe = dvb_attach(dvb_pll_attach, adap->fe_adap[0].fe, (0xc2 >> 1), &adap->dev->i2c_adap, DVB_PLL_THOMSON_DTT7579); break; case ANYSEE_HW_507DC: /* 10 */ /* E30 C Plus */ /* attach tuner */ fe = dvb_attach(dvb_pll_attach, adap->fe_adap[0].fe, (0xc0 >> 1), &adap->dev->i2c_adap, DVB_PLL_SAMSUNG_DTOS403IH102A); break; case ANYSEE_HW_507SI: /* 11 */ /* E30 S2 Plus */ /* attach LNB controller */ fe = dvb_attach(isl6423_attach, adap->fe_adap[0].fe, &adap->dev->i2c_adap, &anysee_isl6423_config); break; case ANYSEE_HW_507FA: /* 15 */ /* E30 Combo Plus */ /* E30 C Plus */ /* Try first attach TDA18212 silicon tuner on IOE[4], if that * fails attach old simple PLL. */ /* attach tuner */ fe = dvb_attach(tda18212_attach, adap->fe_adap[state->fe_id].fe, &adap->dev->i2c_adap, &anysee_tda18212_config); if (fe) break; /* attach tuner */ fe = dvb_attach(dvb_pll_attach, adap->fe_adap[state->fe_id].fe, (0xc0 >> 1), &adap->dev->i2c_adap, DVB_PLL_SAMSUNG_DTOS403IH102A); break; case ANYSEE_HW_508TC: /* 18 */ case ANYSEE_HW_508PTC: /* 21 */ /* E7 TC */ /* E7 PTC */ /* attach tuner */ fe = dvb_attach(tda18212_attach, adap->fe_adap[state->fe_id].fe, &adap->dev->i2c_adap, &anysee_tda18212_config); break; case ANYSEE_HW_508S2: /* 19 */ case ANYSEE_HW_508PS2: /* 22 */ /* E7 S2 */ /* E7 PS2 */ /* attach tuner */ fe = dvb_attach(stv6110_attach, adap->fe_adap[0].fe, &anysee_stv6110_config, &adap->dev->i2c_adap); if (fe) { /* attach LNB controller */ fe = dvb_attach(isl6423_attach, adap->fe_adap[0].fe, &adap->dev->i2c_adap, &anysee_isl6423_config); } break; case ANYSEE_HW_508T2C: /* 20 */ /* E7 T2C */ /* attach tuner */ fe = dvb_attach(tda18212_attach, adap->fe_adap[state->fe_id].fe, &adap->dev->i2c_adap, &anysee_tda18212_config2); break; default: fe = NULL; } if (fe) ret = 0; else ret = -ENODEV; return ret; } static int anysee_rc_query(struct dvb_usb_device *d) { u8 buf[] = {CMD_GET_IR_CODE}; u8 ircode[2]; int ret; /* Remote controller is basic NEC using address byte 0x08. Anysee device RC query returns only two bytes, status and code, address byte is dropped. Also it does not return any value for NEC RCs having address byte other than 0x08. Due to that, we cannot use that device as standard NEC receiver. It could be possible make hack which reads whole code directly from device memory... */ ret = anysee_ctrl_msg(d, buf, sizeof(buf), ircode, sizeof(ircode)); if (ret) return ret; if (ircode[0]) { deb_rc("%s: key pressed %02x\n", __func__, ircode[1]); rc_keydown(d->rc_dev, 0x08 << 8 | ircode[1], 0); } return 0; } static int anysee_ci_read_attribute_mem(struct dvb_ca_en50221 *ci, int slot, int addr) { struct dvb_usb_device *d = ci->data; int ret; u8 buf[] = {CMD_CI, 0x02, 0x40 | addr >> 8, addr & 0xff, 0x00, 1}; u8 val; ret = anysee_ctrl_msg(d, buf, sizeof(buf), &val, 1); if (ret) return ret; return val; } static int anysee_ci_write_attribute_mem(struct dvb_ca_en50221 *ci, int slot, int addr, u8 val) { struct dvb_usb_device *d = ci->data; int ret; u8 buf[] = {CMD_CI, 0x03, 0x40 | addr >> 8, addr & 0xff, 0x00, 1, val}; ret = anysee_ctrl_msg(d, buf, sizeof(buf), NULL, 0); if (ret) return ret; return 0; } static int anysee_ci_read_cam_control(struct dvb_ca_en50221 *ci, int slot, u8 addr) { struct dvb_usb_device *d = ci->data; int ret; u8 buf[] = {CMD_CI, 0x04, 0x40, addr, 0x00, 1}; u8 val; ret = anysee_ctrl_msg(d, buf, sizeof(buf), &val, 1); if (ret) return ret; return val; } static int anysee_ci_write_cam_control(struct dvb_ca_en50221 *ci, int slot, u8 addr, u8 val) { struct dvb_usb_device *d = ci->data; int ret; u8 buf[] = {CMD_CI, 0x05, 0x40, addr, 0x00, 1, val}; ret = anysee_ctrl_msg(d, buf, sizeof(buf), NULL, 0); if (ret) return ret; return 0; } static int anysee_ci_slot_reset(struct dvb_ca_en50221 *ci, int slot) { struct dvb_usb_device *d = ci->data; int ret; struct anysee_state *state = d->priv; state->ci_cam_ready = jiffies + msecs_to_jiffies(1000); ret = anysee_wr_reg_mask(d, REG_IOA, (0 << 7), 0x80); if (ret) return ret; msleep(300); ret = anysee_wr_reg_mask(d, REG_IOA, (1 << 7), 0x80); if (ret) return ret; return 0; } static int anysee_ci_slot_shutdown(struct dvb_ca_en50221 *ci, int slot) { struct dvb_usb_device *d = ci->data; int ret; ret = anysee_wr_reg_mask(d, REG_IOA, (0 << 7), 0x80); if (ret) return ret; msleep(30); ret = anysee_wr_reg_mask(d, REG_IOA, (1 << 7), 0x80); if (ret) return ret; return 0; } static int anysee_ci_slot_ts_enable(struct dvb_ca_en50221 *ci, int slot) { struct dvb_usb_device *d = ci->data; int ret; ret = anysee_wr_reg_mask(d, REG_IOD, (0 << 1), 0x02); if (ret) return ret; return 0; } static int anysee_ci_poll_slot_status(struct dvb_ca_en50221 *ci, int slot, int open) { struct dvb_usb_device *d = ci->data; struct anysee_state *state = d->priv; int ret; u8 tmp; ret = anysee_rd_reg_mask(d, REG_IOC, &tmp, 0x40); if (ret) return ret; if (tmp == 0) { ret = DVB_CA_EN50221_POLL_CAM_PRESENT; if (time_after(jiffies, state->ci_cam_ready)) ret |= DVB_CA_EN50221_POLL_CAM_READY; } return ret; } static int anysee_ci_init(struct dvb_usb_device *d) { struct anysee_state *state = d->priv; int ret; state->ci.owner = THIS_MODULE; state->ci.read_attribute_mem = anysee_ci_read_attribute_mem; state->ci.write_attribute_mem = anysee_ci_write_attribute_mem; state->ci.read_cam_control = anysee_ci_read_cam_control; state->ci.write_cam_control = anysee_ci_write_cam_control; state->ci.slot_reset = anysee_ci_slot_reset; state->ci.slot_shutdown = anysee_ci_slot_shutdown; state->ci.slot_ts_enable = anysee_ci_slot_ts_enable; state->ci.poll_slot_status = anysee_ci_poll_slot_status; state->ci.data = d; ret = anysee_wr_reg_mask(d, REG_IOA, (1 << 7), 0x80); if (ret) return ret; ret = anysee_wr_reg_mask(d, REG_IOD, (0 << 2)|(0 << 1)|(0 << 0), 0x07); if (ret) return ret; ret = anysee_wr_reg_mask(d, REG_IOD, (1 << 2)|(1 << 1)|(1 << 0), 0x07); if (ret) return ret; ret = dvb_ca_en50221_init(&d->adapter[0].dvb_adap, &state->ci, 0, 1); if (ret) return ret; return 0; } static void anysee_ci_release(struct dvb_usb_device *d) { struct anysee_state *state = d->priv; /* detach CI */ if (state->has_ci) dvb_ca_en50221_release(&state->ci); return; } static int anysee_init(struct dvb_usb_device *d) { struct anysee_state *state = d->priv; int ret; /* LED light */ ret = anysee_led_ctrl(d, 0x01, 0x03); if (ret) return ret; /* enable IR */ ret = anysee_ir_ctrl(d, 1); if (ret) return ret; /* attach CI */ if (state->has_ci) { ret = anysee_ci_init(d); if (ret) { state->has_ci = false; return ret; } } return 0; } /* DVB USB Driver stuff */ static struct dvb_usb_device_properties anysee_properties; static int anysee_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct dvb_usb_device *d; struct usb_host_interface *alt; int ret; /* There is one interface with two alternate settings. Alternate setting 0 is for bulk transfer. Alternate setting 1 is for isochronous transfer. We use bulk transfer (alternate setting 0). */ if (intf->num_altsetting < 1) return -ENODEV; /* * Anysee is always warm (its USB-bridge, Cypress FX2, uploads * firmware from eeprom). If dvb_usb_device_init() succeeds that * means d is a valid pointer. */ ret = dvb_usb_device_init(intf, &anysee_properties, THIS_MODULE, &d, adapter_nr); if (ret) return ret; alt = usb_altnum_to_altsetting(intf, 0); if (alt == NULL) { deb_info("%s: no alt found!\n", __func__); return -ENODEV; } ret = usb_set_interface(d->udev, alt->desc.bInterfaceNumber, alt->desc.bAlternateSetting); if (ret) return ret; return anysee_init(d); } static void anysee_disconnect(struct usb_interface *intf) { struct dvb_usb_device *d = usb_get_intfdata(intf); anysee_ci_release(d); dvb_usb_device_exit(intf); return; } static struct usb_device_id anysee_table[] = { { USB_DEVICE(USB_VID_CYPRESS, USB_PID_ANYSEE) }, { USB_DEVICE(USB_VID_AMT, USB_PID_ANYSEE) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, anysee_table); static struct dvb_usb_device_properties anysee_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = DEVICE_SPECIFIC, .size_of_priv = sizeof(struct anysee_state), .num_adapters = 1, .adapter = { { .num_frontends = 2, .frontend_ctrl = anysee_frontend_ctrl, .fe = { { .streaming_ctrl = anysee_streaming_ctrl, .frontend_attach = anysee_frontend_attach, .tuner_attach = anysee_tuner_attach, .stream = { .type = USB_BULK, .count = 8, .endpoint = 0x82, .u = { .bulk = { .buffersize = (16*512), } } }, }, { .streaming_ctrl = anysee_streaming_ctrl, .frontend_attach = anysee_frontend_attach, .tuner_attach = anysee_tuner_attach, .stream = { .type = USB_BULK, .count = 8, .endpoint = 0x82, .u = { .bulk = { .buffersize = (16*512), } } }, } }, } }, .rc.core = { .rc_codes = RC_MAP_ANYSEE, .protocol = RC_TYPE_OTHER, .module_name = "anysee", .rc_query = anysee_rc_query, .rc_interval = 250, /* windows driver uses 500ms */ }, .i2c_algo = &anysee_i2c_algo, .generic_bulk_ctrl_endpoint = 1, .num_device_descs = 1, .devices = { { .name = "Anysee DVB USB2.0", .cold_ids = {NULL}, .warm_ids = {&anysee_table[0], &anysee_table[1], NULL}, }, } }; static struct usb_driver anysee_driver = { .name = "dvb_usb_anysee", .probe = anysee_probe, .disconnect = anysee_disconnect, .id_table = anysee_table, }; module_usb_driver(anysee_driver); MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>"); MODULE_DESCRIPTION("Driver Anysee E30 DVB-C & DVB-T USB2.0"); MODULE_LICENSE("GPL");
gpl-2.0
Dearms/android_kernel_xiaomi_msm8960
drivers/tty/serial/mpc52xx_uart.c
4943
39982
/* * Driver for the PSC of the Freescale MPC52xx PSCs configured as UARTs. * * FIXME According to the usermanual the status bits in the status register * are only updated when the peripherals access the FIFO and not when the * CPU access them. So since we use this bits to know when we stop writing * and reading, they may not be updated in-time and a race condition may * exists. But I haven't be able to prove this and I don't care. But if * any problem arises, it might worth checking. The TX/RX FIFO Stats * registers should be used in addition. * Update: Actually, they seem updated ... At least the bits we use. * * * Maintainer : Sylvain Munaut <tnt@246tNt.com> * * Some of the code has been inspired/copied from the 2.4 code written * by Dale Farnsworth <dfarnsworth@mvista.com>. * * Copyright (C) 2008 Freescale Semiconductor Inc. * John Rigby <jrigby@gmail.com> * Added support for MPC5121 * Copyright (C) 2006 Secret Lab Technologies Ltd. * Grant Likely <grant.likely@secretlab.ca> * Copyright (C) 2004-2006 Sylvain Munaut <tnt@246tNt.com> * Copyright (C) 2003 MontaVista, Software, Inc. * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. */ #undef DEBUG #include <linux/device.h> #include <linux/module.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/serial.h> #include <linux/sysrq.h> #include <linux/console.h> #include <linux/delay.h> #include <linux/io.h> #include <linux/of.h> #include <linux/of_platform.h> #include <linux/clk.h> #include <asm/mpc52xx.h> #include <asm/mpc52xx_psc.h> #if defined(CONFIG_SERIAL_MPC52xx_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) #define SUPPORT_SYSRQ #endif #include <linux/serial_core.h> /* We've been assigned a range on the "Low-density serial ports" major */ #define SERIAL_PSC_MAJOR 204 #define SERIAL_PSC_MINOR 148 #define ISR_PASS_LIMIT 256 /* Max number of iteration in the interrupt */ static struct uart_port mpc52xx_uart_ports[MPC52xx_PSC_MAXNUM]; /* Rem: - We use the read_status_mask as a shadow of * psc->mpc52xx_psc_imr * - It's important that is array is all zero on start as we * use it to know if it's initialized or not ! If it's not sure * it's cleared, then a memset(...,0,...) should be added to * the console_init */ /* lookup table for matching device nodes to index numbers */ static struct device_node *mpc52xx_uart_nodes[MPC52xx_PSC_MAXNUM]; static void mpc52xx_uart_of_enumerate(void); #define PSC(port) ((struct mpc52xx_psc __iomem *)((port)->membase)) /* Forward declaration of the interruption handling routine */ static irqreturn_t mpc52xx_uart_int(int irq, void *dev_id); static irqreturn_t mpc5xxx_uart_process_int(struct uart_port *port); /* Simple macro to test if a port is console or not. This one is taken * for serial_core.c and maybe should be moved to serial_core.h ? */ #ifdef CONFIG_SERIAL_CORE_CONSOLE #define uart_console(port) \ ((port)->cons && (port)->cons->index == (port)->line) #else #define uart_console(port) (0) #endif /* ======================================================================== */ /* PSC fifo operations for isolating differences between 52xx and 512x */ /* ======================================================================== */ struct psc_ops { void (*fifo_init)(struct uart_port *port); int (*raw_rx_rdy)(struct uart_port *port); int (*raw_tx_rdy)(struct uart_port *port); int (*rx_rdy)(struct uart_port *port); int (*tx_rdy)(struct uart_port *port); int (*tx_empty)(struct uart_port *port); void (*stop_rx)(struct uart_port *port); void (*start_tx)(struct uart_port *port); void (*stop_tx)(struct uart_port *port); void (*rx_clr_irq)(struct uart_port *port); void (*tx_clr_irq)(struct uart_port *port); void (*write_char)(struct uart_port *port, unsigned char c); unsigned char (*read_char)(struct uart_port *port); void (*cw_disable_ints)(struct uart_port *port); void (*cw_restore_ints)(struct uart_port *port); unsigned int (*set_baudrate)(struct uart_port *port, struct ktermios *new, struct ktermios *old); int (*clock)(struct uart_port *port, int enable); int (*fifoc_init)(void); void (*fifoc_uninit)(void); void (*get_irq)(struct uart_port *, struct device_node *); irqreturn_t (*handle_irq)(struct uart_port *port); }; /* setting the prescaler and divisor reg is common for all chips */ static inline void mpc52xx_set_divisor(struct mpc52xx_psc __iomem *psc, u16 prescaler, unsigned int divisor) { /* select prescaler */ out_be16(&psc->mpc52xx_psc_clock_select, prescaler); out_8(&psc->ctur, divisor >> 8); out_8(&psc->ctlr, divisor & 0xff); } #ifdef CONFIG_PPC_MPC52xx #define FIFO_52xx(port) ((struct mpc52xx_psc_fifo __iomem *)(PSC(port)+1)) static void mpc52xx_psc_fifo_init(struct uart_port *port) { struct mpc52xx_psc __iomem *psc = PSC(port); struct mpc52xx_psc_fifo __iomem *fifo = FIFO_52xx(port); out_8(&fifo->rfcntl, 0x00); out_be16(&fifo->rfalarm, 0x1ff); out_8(&fifo->tfcntl, 0x07); out_be16(&fifo->tfalarm, 0x80); port->read_status_mask |= MPC52xx_PSC_IMR_RXRDY | MPC52xx_PSC_IMR_TXRDY; out_be16(&psc->mpc52xx_psc_imr, port->read_status_mask); } static int mpc52xx_psc_raw_rx_rdy(struct uart_port *port) { return in_be16(&PSC(port)->mpc52xx_psc_status) & MPC52xx_PSC_SR_RXRDY; } static int mpc52xx_psc_raw_tx_rdy(struct uart_port *port) { return in_be16(&PSC(port)->mpc52xx_psc_status) & MPC52xx_PSC_SR_TXRDY; } static int mpc52xx_psc_rx_rdy(struct uart_port *port) { return in_be16(&PSC(port)->mpc52xx_psc_isr) & port->read_status_mask & MPC52xx_PSC_IMR_RXRDY; } static int mpc52xx_psc_tx_rdy(struct uart_port *port) { return in_be16(&PSC(port)->mpc52xx_psc_isr) & port->read_status_mask & MPC52xx_PSC_IMR_TXRDY; } static int mpc52xx_psc_tx_empty(struct uart_port *port) { return in_be16(&PSC(port)->mpc52xx_psc_status) & MPC52xx_PSC_SR_TXEMP; } static void mpc52xx_psc_start_tx(struct uart_port *port) { port->read_status_mask |= MPC52xx_PSC_IMR_TXRDY; out_be16(&PSC(port)->mpc52xx_psc_imr, port->read_status_mask); } static void mpc52xx_psc_stop_tx(struct uart_port *port) { port->read_status_mask &= ~MPC52xx_PSC_IMR_TXRDY; out_be16(&PSC(port)->mpc52xx_psc_imr, port->read_status_mask); } static void mpc52xx_psc_stop_rx(struct uart_port *port) { port->read_status_mask &= ~MPC52xx_PSC_IMR_RXRDY; out_be16(&PSC(port)->mpc52xx_psc_imr, port->read_status_mask); } static void mpc52xx_psc_rx_clr_irq(struct uart_port *port) { } static void mpc52xx_psc_tx_clr_irq(struct uart_port *port) { } static void mpc52xx_psc_write_char(struct uart_port *port, unsigned char c) { out_8(&PSC(port)->mpc52xx_psc_buffer_8, c); } static unsigned char mpc52xx_psc_read_char(struct uart_port *port) { return in_8(&PSC(port)->mpc52xx_psc_buffer_8); } static void mpc52xx_psc_cw_disable_ints(struct uart_port *port) { out_be16(&PSC(port)->mpc52xx_psc_imr, 0); } static void mpc52xx_psc_cw_restore_ints(struct uart_port *port) { out_be16(&PSC(port)->mpc52xx_psc_imr, port->read_status_mask); } static unsigned int mpc5200_psc_set_baudrate(struct uart_port *port, struct ktermios *new, struct ktermios *old) { unsigned int baud; unsigned int divisor; /* The 5200 has a fixed /32 prescaler, uartclk contains the ipb freq */ baud = uart_get_baud_rate(port, new, old, port->uartclk / (32 * 0xffff) + 1, port->uartclk / 32); divisor = (port->uartclk + 16 * baud) / (32 * baud); /* enable the /32 prescaler and set the divisor */ mpc52xx_set_divisor(PSC(port), 0xdd00, divisor); return baud; } static unsigned int mpc5200b_psc_set_baudrate(struct uart_port *port, struct ktermios *new, struct ktermios *old) { unsigned int baud; unsigned int divisor; u16 prescaler; /* The 5200B has a selectable /4 or /32 prescaler, uartclk contains the * ipb freq */ baud = uart_get_baud_rate(port, new, old, port->uartclk / (32 * 0xffff) + 1, port->uartclk / 4); divisor = (port->uartclk + 2 * baud) / (4 * baud); /* select the proper prescaler and set the divisor * prefer high prescaler for more tolerance on low baudrates */ if (divisor > 0xffff || baud <= 115200) { divisor = (divisor + 4) / 8; prescaler = 0xdd00; /* /32 */ } else prescaler = 0xff00; /* /4 */ mpc52xx_set_divisor(PSC(port), prescaler, divisor); return baud; } static void mpc52xx_psc_get_irq(struct uart_port *port, struct device_node *np) { port->irqflags = 0; port->irq = irq_of_parse_and_map(np, 0); } /* 52xx specific interrupt handler. The caller holds the port lock */ static irqreturn_t mpc52xx_psc_handle_irq(struct uart_port *port) { return mpc5xxx_uart_process_int(port); } static struct psc_ops mpc52xx_psc_ops = { .fifo_init = mpc52xx_psc_fifo_init, .raw_rx_rdy = mpc52xx_psc_raw_rx_rdy, .raw_tx_rdy = mpc52xx_psc_raw_tx_rdy, .rx_rdy = mpc52xx_psc_rx_rdy, .tx_rdy = mpc52xx_psc_tx_rdy, .tx_empty = mpc52xx_psc_tx_empty, .stop_rx = mpc52xx_psc_stop_rx, .start_tx = mpc52xx_psc_start_tx, .stop_tx = mpc52xx_psc_stop_tx, .rx_clr_irq = mpc52xx_psc_rx_clr_irq, .tx_clr_irq = mpc52xx_psc_tx_clr_irq, .write_char = mpc52xx_psc_write_char, .read_char = mpc52xx_psc_read_char, .cw_disable_ints = mpc52xx_psc_cw_disable_ints, .cw_restore_ints = mpc52xx_psc_cw_restore_ints, .set_baudrate = mpc5200_psc_set_baudrate, .get_irq = mpc52xx_psc_get_irq, .handle_irq = mpc52xx_psc_handle_irq, }; static struct psc_ops mpc5200b_psc_ops = { .fifo_init = mpc52xx_psc_fifo_init, .raw_rx_rdy = mpc52xx_psc_raw_rx_rdy, .raw_tx_rdy = mpc52xx_psc_raw_tx_rdy, .rx_rdy = mpc52xx_psc_rx_rdy, .tx_rdy = mpc52xx_psc_tx_rdy, .tx_empty = mpc52xx_psc_tx_empty, .stop_rx = mpc52xx_psc_stop_rx, .start_tx = mpc52xx_psc_start_tx, .stop_tx = mpc52xx_psc_stop_tx, .rx_clr_irq = mpc52xx_psc_rx_clr_irq, .tx_clr_irq = mpc52xx_psc_tx_clr_irq, .write_char = mpc52xx_psc_write_char, .read_char = mpc52xx_psc_read_char, .cw_disable_ints = mpc52xx_psc_cw_disable_ints, .cw_restore_ints = mpc52xx_psc_cw_restore_ints, .set_baudrate = mpc5200b_psc_set_baudrate, .get_irq = mpc52xx_psc_get_irq, .handle_irq = mpc52xx_psc_handle_irq, }; #endif /* CONFIG_MPC52xx */ #ifdef CONFIG_PPC_MPC512x #define FIFO_512x(port) ((struct mpc512x_psc_fifo __iomem *)(PSC(port)+1)) /* PSC FIFO Controller for mpc512x */ struct psc_fifoc { u32 fifoc_cmd; u32 fifoc_int; u32 fifoc_dma; u32 fifoc_axe; u32 fifoc_debug; }; static struct psc_fifoc __iomem *psc_fifoc; static unsigned int psc_fifoc_irq; static void mpc512x_psc_fifo_init(struct uart_port *port) { /* /32 prescaler */ out_be16(&PSC(port)->mpc52xx_psc_clock_select, 0xdd00); out_be32(&FIFO_512x(port)->txcmd, MPC512x_PSC_FIFO_RESET_SLICE); out_be32(&FIFO_512x(port)->txcmd, MPC512x_PSC_FIFO_ENABLE_SLICE); out_be32(&FIFO_512x(port)->txalarm, 1); out_be32(&FIFO_512x(port)->tximr, 0); out_be32(&FIFO_512x(port)->rxcmd, MPC512x_PSC_FIFO_RESET_SLICE); out_be32(&FIFO_512x(port)->rxcmd, MPC512x_PSC_FIFO_ENABLE_SLICE); out_be32(&FIFO_512x(port)->rxalarm, 1); out_be32(&FIFO_512x(port)->rximr, 0); out_be32(&FIFO_512x(port)->tximr, MPC512x_PSC_FIFO_ALARM); out_be32(&FIFO_512x(port)->rximr, MPC512x_PSC_FIFO_ALARM); } static int mpc512x_psc_raw_rx_rdy(struct uart_port *port) { return !(in_be32(&FIFO_512x(port)->rxsr) & MPC512x_PSC_FIFO_EMPTY); } static int mpc512x_psc_raw_tx_rdy(struct uart_port *port) { return !(in_be32(&FIFO_512x(port)->txsr) & MPC512x_PSC_FIFO_FULL); } static int mpc512x_psc_rx_rdy(struct uart_port *port) { return in_be32(&FIFO_512x(port)->rxsr) & in_be32(&FIFO_512x(port)->rximr) & MPC512x_PSC_FIFO_ALARM; } static int mpc512x_psc_tx_rdy(struct uart_port *port) { return in_be32(&FIFO_512x(port)->txsr) & in_be32(&FIFO_512x(port)->tximr) & MPC512x_PSC_FIFO_ALARM; } static int mpc512x_psc_tx_empty(struct uart_port *port) { return in_be32(&FIFO_512x(port)->txsr) & MPC512x_PSC_FIFO_EMPTY; } static void mpc512x_psc_stop_rx(struct uart_port *port) { unsigned long rx_fifo_imr; rx_fifo_imr = in_be32(&FIFO_512x(port)->rximr); rx_fifo_imr &= ~MPC512x_PSC_FIFO_ALARM; out_be32(&FIFO_512x(port)->rximr, rx_fifo_imr); } static void mpc512x_psc_start_tx(struct uart_port *port) { unsigned long tx_fifo_imr; tx_fifo_imr = in_be32(&FIFO_512x(port)->tximr); tx_fifo_imr |= MPC512x_PSC_FIFO_ALARM; out_be32(&FIFO_512x(port)->tximr, tx_fifo_imr); } static void mpc512x_psc_stop_tx(struct uart_port *port) { unsigned long tx_fifo_imr; tx_fifo_imr = in_be32(&FIFO_512x(port)->tximr); tx_fifo_imr &= ~MPC512x_PSC_FIFO_ALARM; out_be32(&FIFO_512x(port)->tximr, tx_fifo_imr); } static void mpc512x_psc_rx_clr_irq(struct uart_port *port) { out_be32(&FIFO_512x(port)->rxisr, in_be32(&FIFO_512x(port)->rxisr)); } static void mpc512x_psc_tx_clr_irq(struct uart_port *port) { out_be32(&FIFO_512x(port)->txisr, in_be32(&FIFO_512x(port)->txisr)); } static void mpc512x_psc_write_char(struct uart_port *port, unsigned char c) { out_8(&FIFO_512x(port)->txdata_8, c); } static unsigned char mpc512x_psc_read_char(struct uart_port *port) { return in_8(&FIFO_512x(port)->rxdata_8); } static void mpc512x_psc_cw_disable_ints(struct uart_port *port) { port->read_status_mask = in_be32(&FIFO_512x(port)->tximr) << 16 | in_be32(&FIFO_512x(port)->rximr); out_be32(&FIFO_512x(port)->tximr, 0); out_be32(&FIFO_512x(port)->rximr, 0); } static void mpc512x_psc_cw_restore_ints(struct uart_port *port) { out_be32(&FIFO_512x(port)->tximr, (port->read_status_mask >> 16) & 0x7f); out_be32(&FIFO_512x(port)->rximr, port->read_status_mask & 0x7f); } static unsigned int mpc512x_psc_set_baudrate(struct uart_port *port, struct ktermios *new, struct ktermios *old) { unsigned int baud; unsigned int divisor; /* * The "MPC5121e Microcontroller Reference Manual, Rev. 3" says on * pg. 30-10 that the chip supports a /32 and a /10 prescaler. * Furthermore, it states that "After reset, the prescaler by 10 * for the UART mode is selected", but the reset register value is * 0x0000 which means a /32 prescaler. This is wrong. * * In reality using /32 prescaler doesn't work, as it is not supported! * Use /16 or /10 prescaler, see "MPC5121e Hardware Design Guide", * Chapter 4.1 PSC in UART Mode. * Calculate with a /16 prescaler here. */ /* uartclk contains the ips freq */ baud = uart_get_baud_rate(port, new, old, port->uartclk / (16 * 0xffff) + 1, port->uartclk / 16); divisor = (port->uartclk + 8 * baud) / (16 * baud); /* enable the /16 prescaler and set the divisor */ mpc52xx_set_divisor(PSC(port), 0xdd00, divisor); return baud; } /* Init PSC FIFO Controller */ static int __init mpc512x_psc_fifoc_init(void) { struct device_node *np; np = of_find_compatible_node(NULL, NULL, "fsl,mpc5121-psc-fifo"); if (!np) { pr_err("%s: Can't find FIFOC node\n", __func__); return -ENODEV; } psc_fifoc = of_iomap(np, 0); if (!psc_fifoc) { pr_err("%s: Can't map FIFOC\n", __func__); of_node_put(np); return -ENODEV; } psc_fifoc_irq = irq_of_parse_and_map(np, 0); of_node_put(np); if (psc_fifoc_irq == 0) { pr_err("%s: Can't get FIFOC irq\n", __func__); iounmap(psc_fifoc); return -ENODEV; } return 0; } static void __exit mpc512x_psc_fifoc_uninit(void) { iounmap(psc_fifoc); } /* 512x specific interrupt handler. The caller holds the port lock */ static irqreturn_t mpc512x_psc_handle_irq(struct uart_port *port) { unsigned long fifoc_int; int psc_num; /* Read pending PSC FIFOC interrupts */ fifoc_int = in_be32(&psc_fifoc->fifoc_int); /* Check if it is an interrupt for this port */ psc_num = (port->mapbase & 0xf00) >> 8; if (test_bit(psc_num, &fifoc_int) || test_bit(psc_num + 16, &fifoc_int)) return mpc5xxx_uart_process_int(port); return IRQ_NONE; } static int mpc512x_psc_clock(struct uart_port *port, int enable) { struct clk *psc_clk; int psc_num; char clk_name[10]; if (uart_console(port)) return 0; psc_num = (port->mapbase & 0xf00) >> 8; snprintf(clk_name, sizeof(clk_name), "psc%d_clk", psc_num); psc_clk = clk_get(port->dev, clk_name); if (IS_ERR(psc_clk)) { dev_err(port->dev, "Failed to get PSC clock entry!\n"); return -ENODEV; } dev_dbg(port->dev, "%s %sable\n", clk_name, enable ? "en" : "dis"); if (enable) clk_enable(psc_clk); else clk_disable(psc_clk); return 0; } static void mpc512x_psc_get_irq(struct uart_port *port, struct device_node *np) { port->irqflags = IRQF_SHARED; port->irq = psc_fifoc_irq; } static struct psc_ops mpc512x_psc_ops = { .fifo_init = mpc512x_psc_fifo_init, .raw_rx_rdy = mpc512x_psc_raw_rx_rdy, .raw_tx_rdy = mpc512x_psc_raw_tx_rdy, .rx_rdy = mpc512x_psc_rx_rdy, .tx_rdy = mpc512x_psc_tx_rdy, .tx_empty = mpc512x_psc_tx_empty, .stop_rx = mpc512x_psc_stop_rx, .start_tx = mpc512x_psc_start_tx, .stop_tx = mpc512x_psc_stop_tx, .rx_clr_irq = mpc512x_psc_rx_clr_irq, .tx_clr_irq = mpc512x_psc_tx_clr_irq, .write_char = mpc512x_psc_write_char, .read_char = mpc512x_psc_read_char, .cw_disable_ints = mpc512x_psc_cw_disable_ints, .cw_restore_ints = mpc512x_psc_cw_restore_ints, .set_baudrate = mpc512x_psc_set_baudrate, .clock = mpc512x_psc_clock, .fifoc_init = mpc512x_psc_fifoc_init, .fifoc_uninit = mpc512x_psc_fifoc_uninit, .get_irq = mpc512x_psc_get_irq, .handle_irq = mpc512x_psc_handle_irq, }; #endif static struct psc_ops *psc_ops; /* ======================================================================== */ /* UART operations */ /* ======================================================================== */ static unsigned int mpc52xx_uart_tx_empty(struct uart_port *port) { return psc_ops->tx_empty(port) ? TIOCSER_TEMT : 0; } static void mpc52xx_uart_set_mctrl(struct uart_port *port, unsigned int mctrl) { if (mctrl & TIOCM_RTS) out_8(&PSC(port)->op1, MPC52xx_PSC_OP_RTS); else out_8(&PSC(port)->op0, MPC52xx_PSC_OP_RTS); } static unsigned int mpc52xx_uart_get_mctrl(struct uart_port *port) { unsigned int ret = TIOCM_DSR; u8 status = in_8(&PSC(port)->mpc52xx_psc_ipcr); if (!(status & MPC52xx_PSC_CTS)) ret |= TIOCM_CTS; if (!(status & MPC52xx_PSC_DCD)) ret |= TIOCM_CAR; return ret; } static void mpc52xx_uart_stop_tx(struct uart_port *port) { /* port->lock taken by caller */ psc_ops->stop_tx(port); } static void mpc52xx_uart_start_tx(struct uart_port *port) { /* port->lock taken by caller */ psc_ops->start_tx(port); } static void mpc52xx_uart_send_xchar(struct uart_port *port, char ch) { unsigned long flags; spin_lock_irqsave(&port->lock, flags); port->x_char = ch; if (ch) { /* Make sure tx interrupts are on */ /* Truly necessary ??? They should be anyway */ psc_ops->start_tx(port); } spin_unlock_irqrestore(&port->lock, flags); } static void mpc52xx_uart_stop_rx(struct uart_port *port) { /* port->lock taken by caller */ psc_ops->stop_rx(port); } static void mpc52xx_uart_enable_ms(struct uart_port *port) { struct mpc52xx_psc __iomem *psc = PSC(port); /* clear D_*-bits by reading them */ in_8(&psc->mpc52xx_psc_ipcr); /* enable CTS and DCD as IPC interrupts */ out_8(&psc->mpc52xx_psc_acr, MPC52xx_PSC_IEC_CTS | MPC52xx_PSC_IEC_DCD); port->read_status_mask |= MPC52xx_PSC_IMR_IPC; out_be16(&psc->mpc52xx_psc_imr, port->read_status_mask); } static void mpc52xx_uart_break_ctl(struct uart_port *port, int ctl) { unsigned long flags; spin_lock_irqsave(&port->lock, flags); if (ctl == -1) out_8(&PSC(port)->command, MPC52xx_PSC_START_BRK); else out_8(&PSC(port)->command, MPC52xx_PSC_STOP_BRK); spin_unlock_irqrestore(&port->lock, flags); } static int mpc52xx_uart_startup(struct uart_port *port) { struct mpc52xx_psc __iomem *psc = PSC(port); int ret; if (psc_ops->clock) { ret = psc_ops->clock(port, 1); if (ret) return ret; } /* Request IRQ */ ret = request_irq(port->irq, mpc52xx_uart_int, port->irqflags, "mpc52xx_psc_uart", port); if (ret) return ret; /* Reset/activate the port, clear and enable interrupts */ out_8(&psc->command, MPC52xx_PSC_RST_RX); out_8(&psc->command, MPC52xx_PSC_RST_TX); out_be32(&psc->sicr, 0); /* UART mode DCD ignored */ psc_ops->fifo_init(port); out_8(&psc->command, MPC52xx_PSC_TX_ENABLE); out_8(&psc->command, MPC52xx_PSC_RX_ENABLE); return 0; } static void mpc52xx_uart_shutdown(struct uart_port *port) { struct mpc52xx_psc __iomem *psc = PSC(port); /* Shut down the port. Leave TX active if on a console port */ out_8(&psc->command, MPC52xx_PSC_RST_RX); if (!uart_console(port)) out_8(&psc->command, MPC52xx_PSC_RST_TX); port->read_status_mask = 0; out_be16(&psc->mpc52xx_psc_imr, port->read_status_mask); if (psc_ops->clock) psc_ops->clock(port, 0); /* Release interrupt */ free_irq(port->irq, port); } static void mpc52xx_uart_set_termios(struct uart_port *port, struct ktermios *new, struct ktermios *old) { struct mpc52xx_psc __iomem *psc = PSC(port); unsigned long flags; unsigned char mr1, mr2; unsigned int j; unsigned int baud; /* Prepare what we're gonna write */ mr1 = 0; switch (new->c_cflag & CSIZE) { case CS5: mr1 |= MPC52xx_PSC_MODE_5_BITS; break; case CS6: mr1 |= MPC52xx_PSC_MODE_6_BITS; break; case CS7: mr1 |= MPC52xx_PSC_MODE_7_BITS; break; case CS8: default: mr1 |= MPC52xx_PSC_MODE_8_BITS; } if (new->c_cflag & PARENB) { mr1 |= (new->c_cflag & PARODD) ? MPC52xx_PSC_MODE_PARODD : MPC52xx_PSC_MODE_PAREVEN; } else mr1 |= MPC52xx_PSC_MODE_PARNONE; mr2 = 0; if (new->c_cflag & CSTOPB) mr2 |= MPC52xx_PSC_MODE_TWO_STOP; else mr2 |= ((new->c_cflag & CSIZE) == CS5) ? MPC52xx_PSC_MODE_ONE_STOP_5_BITS : MPC52xx_PSC_MODE_ONE_STOP; if (new->c_cflag & CRTSCTS) { mr1 |= MPC52xx_PSC_MODE_RXRTS; mr2 |= MPC52xx_PSC_MODE_TXCTS; } /* Get the lock */ spin_lock_irqsave(&port->lock, flags); /* Do our best to flush TX & RX, so we don't lose anything */ /* But we don't wait indefinitely ! */ j = 5000000; /* Maximum wait */ /* FIXME Can't receive chars since set_termios might be called at early * boot for the console, all stuff is not yet ready to receive at that * time and that just makes the kernel oops */ /* while (j-- && mpc52xx_uart_int_rx_chars(port)); */ while (!mpc52xx_uart_tx_empty(port) && --j) udelay(1); if (!j) printk(KERN_ERR "mpc52xx_uart.c: " "Unable to flush RX & TX fifos in-time in set_termios." "Some chars may have been lost.\n"); /* Reset the TX & RX */ out_8(&psc->command, MPC52xx_PSC_RST_RX); out_8(&psc->command, MPC52xx_PSC_RST_TX); /* Send new mode settings */ out_8(&psc->command, MPC52xx_PSC_SEL_MODE_REG_1); out_8(&psc->mode, mr1); out_8(&psc->mode, mr2); baud = psc_ops->set_baudrate(port, new, old); /* Update the per-port timeout */ uart_update_timeout(port, new->c_cflag, baud); if (UART_ENABLE_MS(port, new->c_cflag)) mpc52xx_uart_enable_ms(port); /* Reenable TX & RX */ out_8(&psc->command, MPC52xx_PSC_TX_ENABLE); out_8(&psc->command, MPC52xx_PSC_RX_ENABLE); /* We're all set, release the lock */ spin_unlock_irqrestore(&port->lock, flags); } static const char * mpc52xx_uart_type(struct uart_port *port) { /* * We keep using PORT_MPC52xx for historic reasons although it applies * for MPC512x, too, but print "MPC5xxx" to not irritate users */ return port->type == PORT_MPC52xx ? "MPC5xxx PSC" : NULL; } static void mpc52xx_uart_release_port(struct uart_port *port) { /* remapped by us ? */ if (port->flags & UPF_IOREMAP) { iounmap(port->membase); port->membase = NULL; } release_mem_region(port->mapbase, sizeof(struct mpc52xx_psc)); } static int mpc52xx_uart_request_port(struct uart_port *port) { int err; if (port->flags & UPF_IOREMAP) /* Need to remap ? */ port->membase = ioremap(port->mapbase, sizeof(struct mpc52xx_psc)); if (!port->membase) return -EINVAL; err = request_mem_region(port->mapbase, sizeof(struct mpc52xx_psc), "mpc52xx_psc_uart") != NULL ? 0 : -EBUSY; if (err && (port->flags & UPF_IOREMAP)) { iounmap(port->membase); port->membase = NULL; } return err; } static void mpc52xx_uart_config_port(struct uart_port *port, int flags) { if ((flags & UART_CONFIG_TYPE) && (mpc52xx_uart_request_port(port) == 0)) port->type = PORT_MPC52xx; } static int mpc52xx_uart_verify_port(struct uart_port *port, struct serial_struct *ser) { if (ser->type != PORT_UNKNOWN && ser->type != PORT_MPC52xx) return -EINVAL; if ((ser->irq != port->irq) || (ser->io_type != UPIO_MEM) || (ser->baud_base != port->uartclk) || (ser->iomem_base != (void *)port->mapbase) || (ser->hub6 != 0)) return -EINVAL; return 0; } static struct uart_ops mpc52xx_uart_ops = { .tx_empty = mpc52xx_uart_tx_empty, .set_mctrl = mpc52xx_uart_set_mctrl, .get_mctrl = mpc52xx_uart_get_mctrl, .stop_tx = mpc52xx_uart_stop_tx, .start_tx = mpc52xx_uart_start_tx, .send_xchar = mpc52xx_uart_send_xchar, .stop_rx = mpc52xx_uart_stop_rx, .enable_ms = mpc52xx_uart_enable_ms, .break_ctl = mpc52xx_uart_break_ctl, .startup = mpc52xx_uart_startup, .shutdown = mpc52xx_uart_shutdown, .set_termios = mpc52xx_uart_set_termios, /* .pm = mpc52xx_uart_pm, Not supported yet */ /* .set_wake = mpc52xx_uart_set_wake, Not supported yet */ .type = mpc52xx_uart_type, .release_port = mpc52xx_uart_release_port, .request_port = mpc52xx_uart_request_port, .config_port = mpc52xx_uart_config_port, .verify_port = mpc52xx_uart_verify_port }; /* ======================================================================== */ /* Interrupt handling */ /* ======================================================================== */ static inline int mpc52xx_uart_int_rx_chars(struct uart_port *port) { struct tty_struct *tty = port->state->port.tty; unsigned char ch, flag; unsigned short status; /* While we can read, do so ! */ while (psc_ops->raw_rx_rdy(port)) { /* Get the char */ ch = psc_ops->read_char(port); /* Handle sysreq char */ #ifdef SUPPORT_SYSRQ if (uart_handle_sysrq_char(port, ch)) { port->sysrq = 0; continue; } #endif /* Store it */ flag = TTY_NORMAL; port->icount.rx++; status = in_be16(&PSC(port)->mpc52xx_psc_status); if (status & (MPC52xx_PSC_SR_PE | MPC52xx_PSC_SR_FE | MPC52xx_PSC_SR_RB)) { if (status & MPC52xx_PSC_SR_RB) { flag = TTY_BREAK; uart_handle_break(port); port->icount.brk++; } else if (status & MPC52xx_PSC_SR_PE) { flag = TTY_PARITY; port->icount.parity++; } else if (status & MPC52xx_PSC_SR_FE) { flag = TTY_FRAME; port->icount.frame++; } /* Clear error condition */ out_8(&PSC(port)->command, MPC52xx_PSC_RST_ERR_STAT); } tty_insert_flip_char(tty, ch, flag); if (status & MPC52xx_PSC_SR_OE) { /* * Overrun is special, since it's * reported immediately, and doesn't * affect the current character */ tty_insert_flip_char(tty, 0, TTY_OVERRUN); port->icount.overrun++; } } spin_unlock(&port->lock); tty_flip_buffer_push(tty); spin_lock(&port->lock); return psc_ops->raw_rx_rdy(port); } static inline int mpc52xx_uart_int_tx_chars(struct uart_port *port) { struct circ_buf *xmit = &port->state->xmit; /* Process out of band chars */ if (port->x_char) { psc_ops->write_char(port, port->x_char); port->icount.tx++; port->x_char = 0; return 1; } /* Nothing to do ? */ if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { mpc52xx_uart_stop_tx(port); return 0; } /* Send chars */ while (psc_ops->raw_tx_rdy(port)) { psc_ops->write_char(port, xmit->buf[xmit->tail]); xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); port->icount.tx++; if (uart_circ_empty(xmit)) break; } /* Wake up */ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(port); /* Maybe we're done after all */ if (uart_circ_empty(xmit)) { mpc52xx_uart_stop_tx(port); return 0; } return 1; } static irqreturn_t mpc5xxx_uart_process_int(struct uart_port *port) { unsigned long pass = ISR_PASS_LIMIT; unsigned int keepgoing; u8 status; /* While we have stuff to do, we continue */ do { /* If we don't find anything to do, we stop */ keepgoing = 0; psc_ops->rx_clr_irq(port); if (psc_ops->rx_rdy(port)) keepgoing |= mpc52xx_uart_int_rx_chars(port); psc_ops->tx_clr_irq(port); if (psc_ops->tx_rdy(port)) keepgoing |= mpc52xx_uart_int_tx_chars(port); status = in_8(&PSC(port)->mpc52xx_psc_ipcr); if (status & MPC52xx_PSC_D_DCD) uart_handle_dcd_change(port, !(status & MPC52xx_PSC_DCD)); if (status & MPC52xx_PSC_D_CTS) uart_handle_cts_change(port, !(status & MPC52xx_PSC_CTS)); /* Limit number of iteration */ if (!(--pass)) keepgoing = 0; } while (keepgoing); return IRQ_HANDLED; } static irqreturn_t mpc52xx_uart_int(int irq, void *dev_id) { struct uart_port *port = dev_id; irqreturn_t ret; spin_lock(&port->lock); ret = psc_ops->handle_irq(port); spin_unlock(&port->lock); return ret; } /* ======================================================================== */ /* Console ( if applicable ) */ /* ======================================================================== */ #ifdef CONFIG_SERIAL_MPC52xx_CONSOLE static void __init mpc52xx_console_get_options(struct uart_port *port, int *baud, int *parity, int *bits, int *flow) { struct mpc52xx_psc __iomem *psc = PSC(port); unsigned char mr1; pr_debug("mpc52xx_console_get_options(port=%p)\n", port); /* Read the mode registers */ out_8(&psc->command, MPC52xx_PSC_SEL_MODE_REG_1); mr1 = in_8(&psc->mode); /* CT{U,L}R are write-only ! */ *baud = CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD; /* Parse them */ switch (mr1 & MPC52xx_PSC_MODE_BITS_MASK) { case MPC52xx_PSC_MODE_5_BITS: *bits = 5; break; case MPC52xx_PSC_MODE_6_BITS: *bits = 6; break; case MPC52xx_PSC_MODE_7_BITS: *bits = 7; break; case MPC52xx_PSC_MODE_8_BITS: default: *bits = 8; } if (mr1 & MPC52xx_PSC_MODE_PARNONE) *parity = 'n'; else *parity = mr1 & MPC52xx_PSC_MODE_PARODD ? 'o' : 'e'; } static void mpc52xx_console_write(struct console *co, const char *s, unsigned int count) { struct uart_port *port = &mpc52xx_uart_ports[co->index]; unsigned int i, j; /* Disable interrupts */ psc_ops->cw_disable_ints(port); /* Wait the TX buffer to be empty */ j = 5000000; /* Maximum wait */ while (!mpc52xx_uart_tx_empty(port) && --j) udelay(1); /* Write all the chars */ for (i = 0; i < count; i++, s++) { /* Line return handling */ if (*s == '\n') psc_ops->write_char(port, '\r'); /* Send the char */ psc_ops->write_char(port, *s); /* Wait the TX buffer to be empty */ j = 20000; /* Maximum wait */ while (!mpc52xx_uart_tx_empty(port) && --j) udelay(1); } /* Restore interrupt state */ psc_ops->cw_restore_ints(port); } static int __init mpc52xx_console_setup(struct console *co, char *options) { struct uart_port *port = &mpc52xx_uart_ports[co->index]; struct device_node *np = mpc52xx_uart_nodes[co->index]; unsigned int uartclk; struct resource res; int ret; int baud = CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD; int bits = 8; int parity = 'n'; int flow = 'n'; pr_debug("mpc52xx_console_setup co=%p, co->index=%i, options=%s\n", co, co->index, options); if ((co->index < 0) || (co->index >= MPC52xx_PSC_MAXNUM)) { pr_debug("PSC%x out of range\n", co->index); return -EINVAL; } if (!np) { pr_debug("PSC%x not found in device tree\n", co->index); return -EINVAL; } pr_debug("Console on ttyPSC%x is %s\n", co->index, mpc52xx_uart_nodes[co->index]->full_name); /* Fetch register locations */ ret = of_address_to_resource(np, 0, &res); if (ret) { pr_debug("Could not get resources for PSC%x\n", co->index); return ret; } uartclk = mpc5xxx_get_bus_frequency(np); if (uartclk == 0) { pr_debug("Could not find uart clock frequency!\n"); return -EINVAL; } /* Basic port init. Needed since we use some uart_??? func before * real init for early access */ spin_lock_init(&port->lock); port->uartclk = uartclk; port->ops = &mpc52xx_uart_ops; port->mapbase = res.start; port->membase = ioremap(res.start, sizeof(struct mpc52xx_psc)); port->irq = irq_of_parse_and_map(np, 0); if (port->membase == NULL) return -EINVAL; pr_debug("mpc52xx-psc uart at %p, mapped to %p, irq=%x, freq=%i\n", (void *)port->mapbase, port->membase, port->irq, port->uartclk); /* Setup the port parameters accoding to options */ if (options) uart_parse_options(options, &baud, &parity, &bits, &flow); else mpc52xx_console_get_options(port, &baud, &parity, &bits, &flow); pr_debug("Setting console parameters: %i %i%c1 flow=%c\n", baud, bits, parity, flow); return uart_set_options(port, co, baud, parity, bits, flow); } static struct uart_driver mpc52xx_uart_driver; static struct console mpc52xx_console = { .name = "ttyPSC", .write = mpc52xx_console_write, .device = uart_console_device, .setup = mpc52xx_console_setup, .flags = CON_PRINTBUFFER, .index = -1, /* Specified on the cmdline (e.g. console=ttyPSC0) */ .data = &mpc52xx_uart_driver, }; static int __init mpc52xx_console_init(void) { mpc52xx_uart_of_enumerate(); register_console(&mpc52xx_console); return 0; } console_initcall(mpc52xx_console_init); #define MPC52xx_PSC_CONSOLE &mpc52xx_console #else #define MPC52xx_PSC_CONSOLE NULL #endif /* ======================================================================== */ /* UART Driver */ /* ======================================================================== */ static struct uart_driver mpc52xx_uart_driver = { .driver_name = "mpc52xx_psc_uart", .dev_name = "ttyPSC", .major = SERIAL_PSC_MAJOR, .minor = SERIAL_PSC_MINOR, .nr = MPC52xx_PSC_MAXNUM, .cons = MPC52xx_PSC_CONSOLE, }; /* ======================================================================== */ /* OF Platform Driver */ /* ======================================================================== */ static struct of_device_id mpc52xx_uart_of_match[] = { #ifdef CONFIG_PPC_MPC52xx { .compatible = "fsl,mpc5200b-psc-uart", .data = &mpc5200b_psc_ops, }, { .compatible = "fsl,mpc5200-psc-uart", .data = &mpc52xx_psc_ops, }, /* binding used by old lite5200 device trees: */ { .compatible = "mpc5200-psc-uart", .data = &mpc52xx_psc_ops, }, /* binding used by efika: */ { .compatible = "mpc5200-serial", .data = &mpc52xx_psc_ops, }, #endif #ifdef CONFIG_PPC_MPC512x { .compatible = "fsl,mpc5121-psc-uart", .data = &mpc512x_psc_ops, }, #endif {}, }; static int __devinit mpc52xx_uart_of_probe(struct platform_device *op) { int idx = -1; unsigned int uartclk; struct uart_port *port = NULL; struct resource res; int ret; /* Check validity & presence */ for (idx = 0; idx < MPC52xx_PSC_MAXNUM; idx++) if (mpc52xx_uart_nodes[idx] == op->dev.of_node) break; if (idx >= MPC52xx_PSC_MAXNUM) return -EINVAL; pr_debug("Found %s assigned to ttyPSC%x\n", mpc52xx_uart_nodes[idx]->full_name, idx); /* set the uart clock to the input clock of the psc, the different * prescalers are taken into account in the set_baudrate() methods * of the respective chip */ uartclk = mpc5xxx_get_bus_frequency(op->dev.of_node); if (uartclk == 0) { dev_dbg(&op->dev, "Could not find uart clock frequency!\n"); return -EINVAL; } /* Init the port structure */ port = &mpc52xx_uart_ports[idx]; spin_lock_init(&port->lock); port->uartclk = uartclk; port->fifosize = 512; port->iotype = UPIO_MEM; port->flags = UPF_BOOT_AUTOCONF | (uart_console(port) ? 0 : UPF_IOREMAP); port->line = idx; port->ops = &mpc52xx_uart_ops; port->dev = &op->dev; /* Search for IRQ and mapbase */ ret = of_address_to_resource(op->dev.of_node, 0, &res); if (ret) return ret; port->mapbase = res.start; if (!port->mapbase) { dev_dbg(&op->dev, "Could not allocate resources for PSC\n"); return -EINVAL; } psc_ops->get_irq(port, op->dev.of_node); if (port->irq == 0) { dev_dbg(&op->dev, "Could not get irq\n"); return -EINVAL; } dev_dbg(&op->dev, "mpc52xx-psc uart at %p, irq=%x, freq=%i\n", (void *)port->mapbase, port->irq, port->uartclk); /* Add the port to the uart sub-system */ ret = uart_add_one_port(&mpc52xx_uart_driver, port); if (ret) return ret; dev_set_drvdata(&op->dev, (void *)port); return 0; } static int mpc52xx_uart_of_remove(struct platform_device *op) { struct uart_port *port = dev_get_drvdata(&op->dev); dev_set_drvdata(&op->dev, NULL); if (port) uart_remove_one_port(&mpc52xx_uart_driver, port); return 0; } #ifdef CONFIG_PM static int mpc52xx_uart_of_suspend(struct platform_device *op, pm_message_t state) { struct uart_port *port = (struct uart_port *) dev_get_drvdata(&op->dev); if (port) uart_suspend_port(&mpc52xx_uart_driver, port); return 0; } static int mpc52xx_uart_of_resume(struct platform_device *op) { struct uart_port *port = (struct uart_port *) dev_get_drvdata(&op->dev); if (port) uart_resume_port(&mpc52xx_uart_driver, port); return 0; } #endif static void mpc52xx_uart_of_assign(struct device_node *np) { int i; /* Find the first free PSC number */ for (i = 0; i < MPC52xx_PSC_MAXNUM; i++) { if (mpc52xx_uart_nodes[i] == NULL) { of_node_get(np); mpc52xx_uart_nodes[i] = np; return; } } } static void mpc52xx_uart_of_enumerate(void) { static int enum_done; struct device_node *np; const struct of_device_id *match; int i; if (enum_done) return; /* Assign index to each PSC in device tree */ for_each_matching_node(np, mpc52xx_uart_of_match) { match = of_match_node(mpc52xx_uart_of_match, np); psc_ops = match->data; mpc52xx_uart_of_assign(np); } enum_done = 1; for (i = 0; i < MPC52xx_PSC_MAXNUM; i++) { if (mpc52xx_uart_nodes[i]) pr_debug("%s assigned to ttyPSC%x\n", mpc52xx_uart_nodes[i]->full_name, i); } } MODULE_DEVICE_TABLE(of, mpc52xx_uart_of_match); static struct platform_driver mpc52xx_uart_of_driver = { .probe = mpc52xx_uart_of_probe, .remove = mpc52xx_uart_of_remove, #ifdef CONFIG_PM .suspend = mpc52xx_uart_of_suspend, .resume = mpc52xx_uart_of_resume, #endif .driver = { .name = "mpc52xx-psc-uart", .owner = THIS_MODULE, .of_match_table = mpc52xx_uart_of_match, }, }; /* ======================================================================== */ /* Module */ /* ======================================================================== */ static int __init mpc52xx_uart_init(void) { int ret; printk(KERN_INFO "Serial: MPC52xx PSC UART driver\n"); ret = uart_register_driver(&mpc52xx_uart_driver); if (ret) { printk(KERN_ERR "%s: uart_register_driver failed (%i)\n", __FILE__, ret); return ret; } mpc52xx_uart_of_enumerate(); /* * Map the PSC FIFO Controller and init if on MPC512x. */ if (psc_ops && psc_ops->fifoc_init) { ret = psc_ops->fifoc_init(); if (ret) return ret; } ret = platform_driver_register(&mpc52xx_uart_of_driver); if (ret) { printk(KERN_ERR "%s: platform_driver_register failed (%i)\n", __FILE__, ret); uart_unregister_driver(&mpc52xx_uart_driver); return ret; } return 0; } static void __exit mpc52xx_uart_exit(void) { if (psc_ops->fifoc_uninit) psc_ops->fifoc_uninit(); platform_driver_unregister(&mpc52xx_uart_of_driver); uart_unregister_driver(&mpc52xx_uart_driver); } module_init(mpc52xx_uart_init); module_exit(mpc52xx_uart_exit); MODULE_AUTHOR("Sylvain Munaut <tnt@246tNt.com>"); MODULE_DESCRIPTION("Freescale MPC52xx PSC UART"); MODULE_LICENSE("GPL");
gpl-2.0
12019/old_samsung-lt02wifi-kernel
drivers/pci/pcie/pme.c
5199
11432
/* * PCIe Native PME support * * Copyright (C) 2007 - 2009 Intel Corp * Copyright (C) 2007 - 2009 Shaohua Li <shaohua.li@intel.com> * Copyright (C) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. * * This file is subject to the terms and conditions of the GNU General Public * License V2. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/module.h> #include <linux/pci.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/device.h> #include <linux/pcieport_if.h> #include <linux/acpi.h> #include <linux/pci-acpi.h> #include <linux/pm_runtime.h> #include "../pci.h" #include "portdrv.h" /* * If this switch is set, MSI will not be used for PCIe PME signaling. This * causes the PCIe port driver to use INTx interrupts only, but it turns out * that using MSI for PCIe PME signaling doesn't play well with PCIe PME-based * wake-up from system sleep states. */ bool pcie_pme_msi_disabled; static int __init pcie_pme_setup(char *str) { if (!strncmp(str, "nomsi", 5)) pcie_pme_msi_disabled = true; return 1; } __setup("pcie_pme=", pcie_pme_setup); struct pcie_pme_service_data { spinlock_t lock; struct pcie_device *srv; struct work_struct work; bool noirq; /* Don't enable the PME interrupt used by this service. */ }; /** * pcie_pme_interrupt_enable - Enable/disable PCIe PME interrupt generation. * @dev: PCIe root port or event collector. * @enable: Enable or disable the interrupt. */ void pcie_pme_interrupt_enable(struct pci_dev *dev, bool enable) { int rtctl_pos; u16 rtctl; rtctl_pos = pci_pcie_cap(dev) + PCI_EXP_RTCTL; pci_read_config_word(dev, rtctl_pos, &rtctl); if (enable) rtctl |= PCI_EXP_RTCTL_PMEIE; else rtctl &= ~PCI_EXP_RTCTL_PMEIE; pci_write_config_word(dev, rtctl_pos, rtctl); } /** * pcie_pme_walk_bus - Scan a PCI bus for devices asserting PME#. * @bus: PCI bus to scan. * * Scan given PCI bus and all buses under it for devices asserting PME#. */ static bool pcie_pme_walk_bus(struct pci_bus *bus) { struct pci_dev *dev; bool ret = false; list_for_each_entry(dev, &bus->devices, bus_list) { /* Skip PCIe devices in case we started from a root port. */ if (!pci_is_pcie(dev) && pci_check_pme_status(dev)) { if (dev->pme_poll) dev->pme_poll = false; pci_wakeup_event(dev); pm_request_resume(&dev->dev); ret = true; } if (dev->subordinate && pcie_pme_walk_bus(dev->subordinate)) ret = true; } return ret; } /** * pcie_pme_from_pci_bridge - Check if PCIe-PCI bridge generated a PME. * @bus: Secondary bus of the bridge. * @devfn: Device/function number to check. * * PME from PCI devices under a PCIe-PCI bridge may be converted to an in-band * PCIe PME message. In such that case the bridge should use the Requester ID * of device/function number 0 on its secondary bus. */ static bool pcie_pme_from_pci_bridge(struct pci_bus *bus, u8 devfn) { struct pci_dev *dev; bool found = false; if (devfn) return false; dev = pci_dev_get(bus->self); if (!dev) return false; if (pci_is_pcie(dev) && dev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) { down_read(&pci_bus_sem); if (pcie_pme_walk_bus(bus)) found = true; up_read(&pci_bus_sem); } pci_dev_put(dev); return found; } /** * pcie_pme_handle_request - Find device that generated PME and handle it. * @port: Root port or event collector that generated the PME interrupt. * @req_id: PCIe Requester ID of the device that generated the PME. */ static void pcie_pme_handle_request(struct pci_dev *port, u16 req_id) { u8 busnr = req_id >> 8, devfn = req_id & 0xff; struct pci_bus *bus; struct pci_dev *dev; bool found = false; /* First, check if the PME is from the root port itself. */ if (port->devfn == devfn && port->bus->number == busnr) { if (port->pme_poll) port->pme_poll = false; if (pci_check_pme_status(port)) { pm_request_resume(&port->dev); found = true; } else { /* * Apparently, the root port generated the PME on behalf * of a non-PCIe device downstream. If this is done by * a root port, the Requester ID field in its status * register may contain either the root port's, or the * source device's information (PCI Express Base * Specification, Rev. 2.0, Section 6.1.9). */ down_read(&pci_bus_sem); found = pcie_pme_walk_bus(port->subordinate); up_read(&pci_bus_sem); } goto out; } /* Second, find the bus the source device is on. */ bus = pci_find_bus(pci_domain_nr(port->bus), busnr); if (!bus) goto out; /* Next, check if the PME is from a PCIe-PCI bridge. */ found = pcie_pme_from_pci_bridge(bus, devfn); if (found) goto out; /* Finally, try to find the PME source on the bus. */ down_read(&pci_bus_sem); list_for_each_entry(dev, &bus->devices, bus_list) { pci_dev_get(dev); if (dev->devfn == devfn) { found = true; break; } pci_dev_put(dev); } up_read(&pci_bus_sem); if (found) { /* The device is there, but we have to check its PME status. */ found = pci_check_pme_status(dev); if (found) { if (dev->pme_poll) dev->pme_poll = false; pci_wakeup_event(dev); pm_request_resume(&dev->dev); } pci_dev_put(dev); } else if (devfn) { /* * The device is not there, but we can still try to recover by * assuming that the PME was reported by a PCIe-PCI bridge that * used devfn different from zero. */ dev_dbg(&port->dev, "PME interrupt generated for " "non-existent device %02x:%02x.%d\n", busnr, PCI_SLOT(devfn), PCI_FUNC(devfn)); found = pcie_pme_from_pci_bridge(bus, 0); } out: if (!found) dev_dbg(&port->dev, "Spurious native PME interrupt!\n"); } /** * pcie_pme_work_fn - Work handler for PCIe PME interrupt. * @work: Work structure giving access to service data. */ static void pcie_pme_work_fn(struct work_struct *work) { struct pcie_pme_service_data *data = container_of(work, struct pcie_pme_service_data, work); struct pci_dev *port = data->srv->port; int rtsta_pos; u32 rtsta; rtsta_pos = pci_pcie_cap(port) + PCI_EXP_RTSTA; spin_lock_irq(&data->lock); for (;;) { if (data->noirq) break; pci_read_config_dword(port, rtsta_pos, &rtsta); if (rtsta & PCI_EXP_RTSTA_PME) { /* * Clear PME status of the port. If there are other * pending PMEs, the status will be set again. */ pcie_clear_root_pme_status(port); spin_unlock_irq(&data->lock); pcie_pme_handle_request(port, rtsta & 0xffff); spin_lock_irq(&data->lock); continue; } /* No need to loop if there are no more PMEs pending. */ if (!(rtsta & PCI_EXP_RTSTA_PENDING)) break; spin_unlock_irq(&data->lock); cpu_relax(); spin_lock_irq(&data->lock); } if (!data->noirq) pcie_pme_interrupt_enable(port, true); spin_unlock_irq(&data->lock); } /** * pcie_pme_irq - Interrupt handler for PCIe root port PME interrupt. * @irq: Interrupt vector. * @context: Interrupt context pointer. */ static irqreturn_t pcie_pme_irq(int irq, void *context) { struct pci_dev *port; struct pcie_pme_service_data *data; int rtsta_pos; u32 rtsta; unsigned long flags; port = ((struct pcie_device *)context)->port; data = get_service_data((struct pcie_device *)context); rtsta_pos = pci_pcie_cap(port) + PCI_EXP_RTSTA; spin_lock_irqsave(&data->lock, flags); pci_read_config_dword(port, rtsta_pos, &rtsta); if (!(rtsta & PCI_EXP_RTSTA_PME)) { spin_unlock_irqrestore(&data->lock, flags); return IRQ_NONE; } pcie_pme_interrupt_enable(port, false); spin_unlock_irqrestore(&data->lock, flags); /* We don't use pm_wq, because it's freezable. */ schedule_work(&data->work); return IRQ_HANDLED; } /** * pcie_pme_set_native - Set the PME interrupt flag for given device. * @dev: PCI device to handle. * @ign: Ignored. */ static int pcie_pme_set_native(struct pci_dev *dev, void *ign) { dev_info(&dev->dev, "Signaling PME through PCIe PME interrupt\n"); device_set_run_wake(&dev->dev, true); dev->pme_interrupt = true; return 0; } /** * pcie_pme_mark_devices - Set the PME interrupt flag for devices below a port. * @port: PCIe root port or event collector to handle. * * For each device below given root port, including the port itself (or for each * root complex integrated endpoint if @port is a root complex event collector) * set the flag indicating that it can signal run-time wake-up events via PCIe * PME interrupts. */ static void pcie_pme_mark_devices(struct pci_dev *port) { pcie_pme_set_native(port, NULL); if (port->subordinate) { pci_walk_bus(port->subordinate, pcie_pme_set_native, NULL); } else { struct pci_bus *bus = port->bus; struct pci_dev *dev; /* Check if this is a root port event collector. */ if (port->pcie_type != PCI_EXP_TYPE_RC_EC || !bus) return; down_read(&pci_bus_sem); list_for_each_entry(dev, &bus->devices, bus_list) if (pci_is_pcie(dev) && dev->pcie_type == PCI_EXP_TYPE_RC_END) pcie_pme_set_native(dev, NULL); up_read(&pci_bus_sem); } } /** * pcie_pme_probe - Initialize PCIe PME service for given root port. * @srv: PCIe service to initialize. */ static int pcie_pme_probe(struct pcie_device *srv) { struct pci_dev *port; struct pcie_pme_service_data *data; int ret; data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; spin_lock_init(&data->lock); INIT_WORK(&data->work, pcie_pme_work_fn); data->srv = srv; set_service_data(srv, data); port = srv->port; pcie_pme_interrupt_enable(port, false); pcie_clear_root_pme_status(port); ret = request_irq(srv->irq, pcie_pme_irq, IRQF_SHARED, "PCIe PME", srv); if (ret) { kfree(data); } else { pcie_pme_mark_devices(port); pcie_pme_interrupt_enable(port, true); } return ret; } /** * pcie_pme_suspend - Suspend PCIe PME service device. * @srv: PCIe service device to suspend. */ static int pcie_pme_suspend(struct pcie_device *srv) { struct pcie_pme_service_data *data = get_service_data(srv); struct pci_dev *port = srv->port; spin_lock_irq(&data->lock); pcie_pme_interrupt_enable(port, false); pcie_clear_root_pme_status(port); data->noirq = true; spin_unlock_irq(&data->lock); synchronize_irq(srv->irq); return 0; } /** * pcie_pme_resume - Resume PCIe PME service device. * @srv - PCIe service device to resume. */ static int pcie_pme_resume(struct pcie_device *srv) { struct pcie_pme_service_data *data = get_service_data(srv); struct pci_dev *port = srv->port; spin_lock_irq(&data->lock); data->noirq = false; pcie_clear_root_pme_status(port); pcie_pme_interrupt_enable(port, true); spin_unlock_irq(&data->lock); return 0; } /** * pcie_pme_remove - Prepare PCIe PME service device for removal. * @srv - PCIe service device to resume. */ static void pcie_pme_remove(struct pcie_device *srv) { pcie_pme_suspend(srv); free_irq(srv->irq, srv); kfree(get_service_data(srv)); } static struct pcie_port_service_driver pcie_pme_driver = { .name = "pcie_pme", .port_type = PCI_EXP_TYPE_ROOT_PORT, .service = PCIE_PORT_SERVICE_PME, .probe = pcie_pme_probe, .suspend = pcie_pme_suspend, .resume = pcie_pme_resume, .remove = pcie_pme_remove, }; /** * pcie_pme_service_init - Register the PCIe PME service driver. */ static int __init pcie_pme_service_init(void) { return pcie_port_service_register(&pcie_pme_driver); } module_init(pcie_pme_service_init);
gpl-2.0
wooshy1/android-tegra-nv-3.1
fs/ubifs/scan.c
5455
9657
/* * This file is part of UBIFS. * * Copyright (C) 2006-2008 Nokia Corporation * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * * Authors: Adrian Hunter * Artem Bityutskiy (Битюцкий Артём) */ /* * This file implements the scan which is a general-purpose function for * determining what nodes are in an eraseblock. The scan is used to replay the * journal, to do garbage collection. for the TNC in-the-gaps method, and by * debugging functions. */ #include "ubifs.h" /** * scan_padding_bytes - scan for padding bytes. * @buf: buffer to scan * @len: length of buffer * * This function returns the number of padding bytes on success and * %SCANNED_GARBAGE on failure. */ static int scan_padding_bytes(void *buf, int len) { int pad_len = 0, max_pad_len = min_t(int, UBIFS_PAD_NODE_SZ, len); uint8_t *p = buf; dbg_scan("not a node"); while (pad_len < max_pad_len && *p++ == UBIFS_PADDING_BYTE) pad_len += 1; if (!pad_len || (pad_len & 7)) return SCANNED_GARBAGE; dbg_scan("%d padding bytes", pad_len); return pad_len; } /** * ubifs_scan_a_node - scan for a node or padding. * @c: UBIFS file-system description object * @buf: buffer to scan * @len: length of buffer * @lnum: logical eraseblock number * @offs: offset within the logical eraseblock * @quiet: print no messages * * This function returns a scanning code to indicate what was scanned. */ int ubifs_scan_a_node(const struct ubifs_info *c, void *buf, int len, int lnum, int offs, int quiet) { struct ubifs_ch *ch = buf; uint32_t magic; magic = le32_to_cpu(ch->magic); if (magic == 0xFFFFFFFF) { dbg_scan("hit empty space"); return SCANNED_EMPTY_SPACE; } if (magic != UBIFS_NODE_MAGIC) return scan_padding_bytes(buf, len); if (len < UBIFS_CH_SZ) return SCANNED_GARBAGE; dbg_scan("scanning %s", dbg_ntype(ch->node_type)); if (ubifs_check_node(c, buf, lnum, offs, quiet, 1)) return SCANNED_A_CORRUPT_NODE; if (ch->node_type == UBIFS_PAD_NODE) { struct ubifs_pad_node *pad = buf; int pad_len = le32_to_cpu(pad->pad_len); int node_len = le32_to_cpu(ch->len); /* Validate the padding node */ if (pad_len < 0 || offs + node_len + pad_len > c->leb_size) { if (!quiet) { ubifs_err("bad pad node at LEB %d:%d", lnum, offs); dbg_dump_node(c, pad); } return SCANNED_A_BAD_PAD_NODE; } /* Make the node pads to 8-byte boundary */ if ((node_len + pad_len) & 7) { if (!quiet) dbg_err("bad padding length %d - %d", offs, offs + node_len + pad_len); return SCANNED_A_BAD_PAD_NODE; } dbg_scan("%d bytes padded, offset now %d", pad_len, ALIGN(offs + node_len + pad_len, 8)); return node_len + pad_len; } return SCANNED_A_NODE; } /** * ubifs_start_scan - create LEB scanning information at start of scan. * @c: UBIFS file-system description object * @lnum: logical eraseblock number * @offs: offset to start at (usually zero) * @sbuf: scan buffer (must be c->leb_size) * * This function returns %0 on success and a negative error code on failure. */ struct ubifs_scan_leb *ubifs_start_scan(const struct ubifs_info *c, int lnum, int offs, void *sbuf) { struct ubifs_scan_leb *sleb; int err; dbg_scan("scan LEB %d:%d", lnum, offs); sleb = kzalloc(sizeof(struct ubifs_scan_leb), GFP_NOFS); if (!sleb) return ERR_PTR(-ENOMEM); sleb->lnum = lnum; INIT_LIST_HEAD(&sleb->nodes); sleb->buf = sbuf; err = ubifs_leb_read(c, lnum, sbuf + offs, offs, c->leb_size - offs, 0); if (err && err != -EBADMSG) { ubifs_err("cannot read %d bytes from LEB %d:%d," " error %d", c->leb_size - offs, lnum, offs, err); kfree(sleb); return ERR_PTR(err); } if (err == -EBADMSG) sleb->ecc = 1; return sleb; } /** * ubifs_end_scan - update LEB scanning information at end of scan. * @c: UBIFS file-system description object * @sleb: scanning information * @lnum: logical eraseblock number * @offs: offset to start at (usually zero) * * This function returns %0 on success and a negative error code on failure. */ void ubifs_end_scan(const struct ubifs_info *c, struct ubifs_scan_leb *sleb, int lnum, int offs) { lnum = lnum; dbg_scan("stop scanning LEB %d at offset %d", lnum, offs); ubifs_assert(offs % c->min_io_size == 0); sleb->endpt = ALIGN(offs, c->min_io_size); } /** * ubifs_add_snod - add a scanned node to LEB scanning information. * @c: UBIFS file-system description object * @sleb: scanning information * @buf: buffer containing node * @offs: offset of node on flash * * This function returns %0 on success and a negative error code on failure. */ int ubifs_add_snod(const struct ubifs_info *c, struct ubifs_scan_leb *sleb, void *buf, int offs) { struct ubifs_ch *ch = buf; struct ubifs_ino_node *ino = buf; struct ubifs_scan_node *snod; snod = kmalloc(sizeof(struct ubifs_scan_node), GFP_NOFS); if (!snod) return -ENOMEM; snod->sqnum = le64_to_cpu(ch->sqnum); snod->type = ch->node_type; snod->offs = offs; snod->len = le32_to_cpu(ch->len); snod->node = buf; switch (ch->node_type) { case UBIFS_INO_NODE: case UBIFS_DENT_NODE: case UBIFS_XENT_NODE: case UBIFS_DATA_NODE: /* * The key is in the same place in all keyed * nodes. */ key_read(c, &ino->key, &snod->key); break; default: invalid_key_init(c, &snod->key); break; } list_add_tail(&snod->list, &sleb->nodes); sleb->nodes_cnt += 1; return 0; } /** * ubifs_scanned_corruption - print information after UBIFS scanned corruption. * @c: UBIFS file-system description object * @lnum: LEB number of corruption * @offs: offset of corruption * @buf: buffer containing corruption */ void ubifs_scanned_corruption(const struct ubifs_info *c, int lnum, int offs, void *buf) { int len; ubifs_err("corruption at LEB %d:%d", lnum, offs); if (dbg_is_tst_rcvry(c)) return; len = c->leb_size - offs; if (len > 8192) len = 8192; dbg_err("first %d bytes from LEB %d:%d", len, lnum, offs); print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 4, buf, len, 1); } /** * ubifs_scan - scan a logical eraseblock. * @c: UBIFS file-system description object * @lnum: logical eraseblock number * @offs: offset to start at (usually zero) * @sbuf: scan buffer (must be of @c->leb_size bytes in size) * @quiet: print no messages * * This function scans LEB number @lnum and returns complete information about * its contents. Returns the scaned information in case of success and, * %-EUCLEAN if the LEB neads recovery, and other negative error codes in case * of failure. * * If @quiet is non-zero, this function does not print large and scary * error messages and flash dumps in case of errors. */ struct ubifs_scan_leb *ubifs_scan(const struct ubifs_info *c, int lnum, int offs, void *sbuf, int quiet) { void *buf = sbuf + offs; int err, len = c->leb_size - offs; struct ubifs_scan_leb *sleb; sleb = ubifs_start_scan(c, lnum, offs, sbuf); if (IS_ERR(sleb)) return sleb; while (len >= 8) { struct ubifs_ch *ch = buf; int node_len, ret; dbg_scan("look at LEB %d:%d (%d bytes left)", lnum, offs, len); cond_resched(); ret = ubifs_scan_a_node(c, buf, len, lnum, offs, quiet); if (ret > 0) { /* Padding bytes or a valid padding node */ offs += ret; buf += ret; len -= ret; continue; } if (ret == SCANNED_EMPTY_SPACE) /* Empty space is checked later */ break; switch (ret) { case SCANNED_GARBAGE: dbg_err("garbage"); goto corrupted; case SCANNED_A_NODE: break; case SCANNED_A_CORRUPT_NODE: case SCANNED_A_BAD_PAD_NODE: dbg_err("bad node"); goto corrupted; default: dbg_err("unknown"); err = -EINVAL; goto error; } err = ubifs_add_snod(c, sleb, buf, offs); if (err) goto error; node_len = ALIGN(le32_to_cpu(ch->len), 8); offs += node_len; buf += node_len; len -= node_len; } if (offs % c->min_io_size) { if (!quiet) ubifs_err("empty space starts at non-aligned offset %d", offs); goto corrupted; } ubifs_end_scan(c, sleb, lnum, offs); for (; len > 4; offs += 4, buf = buf + 4, len -= 4) if (*(uint32_t *)buf != 0xffffffff) break; for (; len; offs++, buf++, len--) if (*(uint8_t *)buf != 0xff) { if (!quiet) ubifs_err("corrupt empty space at LEB %d:%d", lnum, offs); goto corrupted; } return sleb; corrupted: if (!quiet) { ubifs_scanned_corruption(c, lnum, offs, buf); ubifs_err("LEB %d scanning failed", lnum); } err = -EUCLEAN; ubifs_scan_destroy(sleb); return ERR_PTR(err); error: ubifs_err("LEB %d scanning failed, error %d", lnum, err); ubifs_scan_destroy(sleb); return ERR_PTR(err); } /** * ubifs_scan_destroy - destroy LEB scanning information. * @sleb: scanning information to free */ void ubifs_scan_destroy(struct ubifs_scan_leb *sleb) { struct ubifs_scan_node *node; struct list_head *head; head = &sleb->nodes; while (!list_empty(head)) { node = list_entry(head->next, struct ubifs_scan_node, list); list_del(&node->list); kfree(node); } kfree(sleb); }
gpl-2.0
PureNexusProject/android_kernel_asus_fugu
arch/arm/mach-pxa/smemc.c
6735
1713
/* * Static Memory Controller */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/io.h> #include <linux/syscore_ops.h> #include <mach/hardware.h> #include <mach/smemc.h> #ifdef CONFIG_PM static unsigned long msc[2]; static unsigned long sxcnfg, memclkcfg; static unsigned long csadrcfg[4]; static int pxa3xx_smemc_suspend(void) { msc[0] = __raw_readl(MSC0); msc[1] = __raw_readl(MSC1); sxcnfg = __raw_readl(SXCNFG); memclkcfg = __raw_readl(MEMCLKCFG); csadrcfg[0] = __raw_readl(CSADRCFG0); csadrcfg[1] = __raw_readl(CSADRCFG1); csadrcfg[2] = __raw_readl(CSADRCFG2); csadrcfg[3] = __raw_readl(CSADRCFG3); return 0; } static void pxa3xx_smemc_resume(void) { __raw_writel(msc[0], MSC0); __raw_writel(msc[1], MSC1); __raw_writel(sxcnfg, SXCNFG); __raw_writel(memclkcfg, MEMCLKCFG); __raw_writel(csadrcfg[0], CSADRCFG0); __raw_writel(csadrcfg[1], CSADRCFG1); __raw_writel(csadrcfg[2], CSADRCFG2); __raw_writel(csadrcfg[3], CSADRCFG3); /* CSMSADRCFG wakes up in its default state (0), so we need to set it */ __raw_writel(0x2, CSMSADRCFG); } static struct syscore_ops smemc_syscore_ops = { .suspend = pxa3xx_smemc_suspend, .resume = pxa3xx_smemc_resume, }; static int __init smemc_init(void) { if (cpu_is_pxa3xx()) { /* * The only documentation we have on the * Chip Select Configuration Register (CSMSADRCFG) is that * it must be programmed to 0x2. * Moreover, in the bit definitions, the second bit * (CSMSADRCFG[1]) is called "SETALWAYS". * Other bits are reserved in this register. */ __raw_writel(0x2, CSMSADRCFG); register_syscore_ops(&smemc_syscore_ops); } return 0; } subsys_initcall(smemc_init); #endif
gpl-2.0
ariev7x/S7270_foxkernel
arch/m68k/mac/oss.c
6991
5471
/* * Operating System Services (OSS) chip handling * Written by Joshua M. Thompson (funaho@jurai.org) * * * This chip is used in the IIfx in place of VIA #2. It acts like a fancy * VIA chip with prorammable interrupt levels. * * 990502 (jmt) - Major rewrite for new interrupt architecture as well as some * recent insights into OSS operational details. * 990610 (jmt) - Now taking full advantage of the OSS. Interrupts are mapped * to mostly match the A/UX interrupt scheme supported on the * VIA side. Also added support for enabling the ISM irq again * since we now have a functional IOP manager. */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/irq.h> #include <asm/bootinfo.h> #include <asm/macintosh.h> #include <asm/macints.h> #include <asm/mac_via.h> #include <asm/mac_oss.h> int oss_present; volatile struct mac_oss *oss; /* * Initialize the OSS * * The OSS "detection" code is actually in via_init() which is always called * before us. Thus we can count on oss_present being valid on entry. */ void __init oss_init(void) { int i; if (!oss_present) return; oss = (struct mac_oss *) OSS_BASE; /* Disable all interrupts. Unlike a VIA it looks like we */ /* do this by setting the source's interrupt level to zero. */ for (i = 0; i <= OSS_NUM_SOURCES; i++) { oss->irq_level[i] = 0; } } /* * Initialize OSS for Nubus access */ void __init oss_nubus_init(void) { } /* * Handle miscellaneous OSS interrupts. */ static void oss_irq(unsigned int irq, struct irq_desc *desc) { int events = oss->irq_pending & (OSS_IP_IOPSCC | OSS_IP_SCSI | OSS_IP_IOPISM); #ifdef DEBUG_IRQS if ((console_loglevel == 10) && !(events & OSS_IP_SCSI)) { printk("oss_irq: irq %u events = 0x%04X\n", irq, (int) oss->irq_pending); } #endif if (events & OSS_IP_IOPSCC) { oss->irq_pending &= ~OSS_IP_IOPSCC; generic_handle_irq(IRQ_MAC_SCC); } if (events & OSS_IP_SCSI) { oss->irq_pending &= ~OSS_IP_SCSI; generic_handle_irq(IRQ_MAC_SCSI); } if (events & OSS_IP_IOPISM) { oss->irq_pending &= ~OSS_IP_IOPISM; generic_handle_irq(IRQ_MAC_ADB); } } /* * Nubus IRQ handler, OSS style * * Unlike the VIA/RBV this is on its own autovector interrupt level. */ static void oss_nubus_irq(unsigned int irq, struct irq_desc *desc) { int events, irq_bit, i; events = oss->irq_pending & OSS_IP_NUBUS; if (!events) return; #ifdef DEBUG_NUBUS_INT if (console_loglevel > 7) { printk("oss_nubus_irq: events = 0x%04X\n", events); } #endif /* There are only six slots on the OSS, not seven */ i = 6; irq_bit = 0x40; do { --i; irq_bit >>= 1; if (events & irq_bit) { oss->irq_pending &= ~irq_bit; generic_handle_irq(NUBUS_SOURCE_BASE + i); } } while(events & (irq_bit - 1)); } /* * Register the OSS and NuBus interrupt dispatchers. * * This IRQ mapping is laid out with two things in mind: first, we try to keep * things on their own levels to avoid having to do double-dispatches. Second, * the levels match as closely as possible the alternate IRQ mapping mode (aka * "A/UX mode") available on some VIA machines. */ #define OSS_IRQLEV_IOPISM IRQ_AUTO_1 #define OSS_IRQLEV_SCSI IRQ_AUTO_2 #define OSS_IRQLEV_NUBUS IRQ_AUTO_3 #define OSS_IRQLEV_IOPSCC IRQ_AUTO_4 #define OSS_IRQLEV_VIA1 IRQ_AUTO_6 void __init oss_register_interrupts(void) { irq_set_chained_handler(OSS_IRQLEV_IOPISM, oss_irq); irq_set_chained_handler(OSS_IRQLEV_SCSI, oss_irq); irq_set_chained_handler(OSS_IRQLEV_NUBUS, oss_nubus_irq); irq_set_chained_handler(OSS_IRQLEV_IOPSCC, oss_irq); irq_set_chained_handler(OSS_IRQLEV_VIA1, via1_irq); /* OSS_VIA1 gets enabled here because it has no machspec interrupt. */ oss->irq_level[OSS_VIA1] = IRQ_AUTO_6; } /* * Enable an OSS interrupt * * It looks messy but it's rather straightforward. The switch() statement * just maps the machspec interrupt numbers to the right OSS interrupt * source (if the OSS handles that interrupt) and then sets the interrupt * level for that source to nonzero, thus enabling the interrupt. */ void oss_irq_enable(int irq) { #ifdef DEBUG_IRQUSE printk("oss_irq_enable(%d)\n", irq); #endif switch(irq) { case IRQ_MAC_SCC: oss->irq_level[OSS_IOPSCC] = OSS_IRQLEV_IOPSCC; return; case IRQ_MAC_ADB: oss->irq_level[OSS_IOPISM] = OSS_IRQLEV_IOPISM; return; case IRQ_MAC_SCSI: oss->irq_level[OSS_SCSI] = OSS_IRQLEV_SCSI; return; case IRQ_NUBUS_9: case IRQ_NUBUS_A: case IRQ_NUBUS_B: case IRQ_NUBUS_C: case IRQ_NUBUS_D: case IRQ_NUBUS_E: irq -= NUBUS_SOURCE_BASE; oss->irq_level[irq] = OSS_IRQLEV_NUBUS; return; } if (IRQ_SRC(irq) == 1) via_irq_enable(irq); } /* * Disable an OSS interrupt * * Same as above except we set the source's interrupt level to zero, * to disable the interrupt. */ void oss_irq_disable(int irq) { #ifdef DEBUG_IRQUSE printk("oss_irq_disable(%d)\n", irq); #endif switch(irq) { case IRQ_MAC_SCC: oss->irq_level[OSS_IOPSCC] = 0; return; case IRQ_MAC_ADB: oss->irq_level[OSS_IOPISM] = 0; return; case IRQ_MAC_SCSI: oss->irq_level[OSS_SCSI] = 0; return; case IRQ_NUBUS_9: case IRQ_NUBUS_A: case IRQ_NUBUS_B: case IRQ_NUBUS_C: case IRQ_NUBUS_D: case IRQ_NUBUS_E: irq -= NUBUS_SOURCE_BASE; oss->irq_level[irq] = 0; return; } if (IRQ_SRC(irq) == 1) via_irq_disable(irq); }
gpl-2.0
superr/android_kernel_surnia
drivers/media/pci/mantis/mantis_vp1041.c
7503
11247
/* Mantis VP-1041 driver Copyright (C) Manu Abraham (abraham.manu@gmail.com) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/signal.h> #include <linux/sched.h> #include <linux/interrupt.h> #include "dmxdev.h" #include "dvbdev.h" #include "dvb_demux.h" #include "dvb_frontend.h" #include "dvb_net.h" #include "mantis_common.h" #include "mantis_ioc.h" #include "mantis_dvb.h" #include "mantis_vp1041.h" #include "stb0899_reg.h" #include "stb0899_drv.h" #include "stb0899_cfg.h" #include "stb6100_cfg.h" #include "stb6100.h" #include "lnbp21.h" #define MANTIS_MODEL_NAME "VP-1041" #define MANTIS_DEV_TYPE "DSS/DVB-S/DVB-S2" static const struct stb0899_s1_reg vp1041_stb0899_s1_init_1[] = { /* 0x0000000b, *//* SYSREG */ { STB0899_DEV_ID , 0x30 }, { STB0899_DISCNTRL1 , 0x32 }, { STB0899_DISCNTRL2 , 0x80 }, { STB0899_DISRX_ST0 , 0x04 }, { STB0899_DISRX_ST1 , 0x00 }, { STB0899_DISPARITY , 0x00 }, { STB0899_DISSTATUS , 0x20 }, { STB0899_DISF22 , 0x99 }, { STB0899_DISF22RX , 0xa8 }, /* SYSREG ? */ { STB0899_ACRPRESC , 0x11 }, { STB0899_ACRDIV1 , 0x0a }, { STB0899_ACRDIV2 , 0x05 }, { STB0899_DACR1 , 0x00 }, { STB0899_DACR2 , 0x00 }, { STB0899_OUTCFG , 0x00 }, { STB0899_MODECFG , 0x00 }, { STB0899_IRQSTATUS_3 , 0xfe }, { STB0899_IRQSTATUS_2 , 0x03 }, { STB0899_IRQSTATUS_1 , 0x7c }, { STB0899_IRQSTATUS_0 , 0xf4 }, { STB0899_IRQMSK_3 , 0xf3 }, { STB0899_IRQMSK_2 , 0xfc }, { STB0899_IRQMSK_1 , 0xff }, { STB0899_IRQMSK_0 , 0xff }, { STB0899_IRQCFG , 0x00 }, { STB0899_I2CCFG , 0x88 }, { STB0899_I2CRPT , 0x58 }, { STB0899_IOPVALUE5 , 0x00 }, { STB0899_IOPVALUE4 , 0x33 }, { STB0899_IOPVALUE3 , 0x6d }, { STB0899_IOPVALUE2 , 0x90 }, { STB0899_IOPVALUE1 , 0x60 }, { STB0899_IOPVALUE0 , 0x00 }, { STB0899_GPIO00CFG , 0x82 }, { STB0899_GPIO01CFG , 0x82 }, { STB0899_GPIO02CFG , 0x82 }, { STB0899_GPIO03CFG , 0x82 }, { STB0899_GPIO04CFG , 0x82 }, { STB0899_GPIO05CFG , 0x82 }, { STB0899_GPIO06CFG , 0x82 }, { STB0899_GPIO07CFG , 0x82 }, { STB0899_GPIO08CFG , 0x82 }, { STB0899_GPIO09CFG , 0x82 }, { STB0899_GPIO10CFG , 0x82 }, { STB0899_GPIO11CFG , 0x82 }, { STB0899_GPIO12CFG , 0x82 }, { STB0899_GPIO13CFG , 0x82 }, { STB0899_GPIO14CFG , 0x82 }, { STB0899_GPIO15CFG , 0x82 }, { STB0899_GPIO16CFG , 0x82 }, { STB0899_GPIO17CFG , 0x82 }, { STB0899_GPIO18CFG , 0x82 }, { STB0899_GPIO19CFG , 0x82 }, { STB0899_GPIO20CFG , 0x82 }, { STB0899_SDATCFG , 0xb8 }, { STB0899_SCLTCFG , 0xba }, { STB0899_AGCRFCFG , 0x1c }, /* 0x11 */ { STB0899_GPIO22 , 0x82 }, /* AGCBB2CFG */ { STB0899_GPIO21 , 0x91 }, /* AGCBB1CFG */ { STB0899_DIRCLKCFG , 0x82 }, { STB0899_CLKOUT27CFG , 0x7e }, { STB0899_STDBYCFG , 0x82 }, { STB0899_CS0CFG , 0x82 }, { STB0899_CS1CFG , 0x82 }, { STB0899_DISEQCOCFG , 0x20 }, { STB0899_GPIO32CFG , 0x82 }, { STB0899_GPIO33CFG , 0x82 }, { STB0899_GPIO34CFG , 0x82 }, { STB0899_GPIO35CFG , 0x82 }, { STB0899_GPIO36CFG , 0x82 }, { STB0899_GPIO37CFG , 0x82 }, { STB0899_GPIO38CFG , 0x82 }, { STB0899_GPIO39CFG , 0x82 }, { STB0899_NCOARSE , 0x17 }, /* 0x15 = 27 Mhz Clock, F/3 = 198MHz, F/6 = 99MHz */ { STB0899_SYNTCTRL , 0x02 }, /* 0x00 = CLK from CLKI, 0x02 = CLK from XTALI */ { STB0899_FILTCTRL , 0x00 }, { STB0899_SYSCTRL , 0x01 }, { STB0899_STOPCLK1 , 0x20 }, { STB0899_STOPCLK2 , 0x00 }, { STB0899_INTBUFSTATUS , 0x00 }, { STB0899_INTBUFCTRL , 0x0a }, { 0xffff , 0xff }, }; static const struct stb0899_s1_reg vp1041_stb0899_s1_init_3[] = { { STB0899_DEMOD , 0x00 }, { STB0899_RCOMPC , 0xc9 }, { STB0899_AGC1CN , 0x01 }, { STB0899_AGC1REF , 0x10 }, { STB0899_RTC , 0x23 }, { STB0899_TMGCFG , 0x4e }, { STB0899_AGC2REF , 0x34 }, { STB0899_TLSR , 0x84 }, { STB0899_CFD , 0xf7 }, { STB0899_ACLC , 0x87 }, { STB0899_BCLC , 0x94 }, { STB0899_EQON , 0x41 }, { STB0899_LDT , 0xf1 }, { STB0899_LDT2 , 0xe3 }, { STB0899_EQUALREF , 0xb4 }, { STB0899_TMGRAMP , 0x10 }, { STB0899_TMGTHD , 0x30 }, { STB0899_IDCCOMP , 0xfd }, { STB0899_QDCCOMP , 0xff }, { STB0899_POWERI , 0x0c }, { STB0899_POWERQ , 0x0f }, { STB0899_RCOMP , 0x6c }, { STB0899_AGCIQIN , 0x80 }, { STB0899_AGC2I1 , 0x06 }, { STB0899_AGC2I2 , 0x00 }, { STB0899_TLIR , 0x30 }, { STB0899_RTF , 0x7f }, { STB0899_DSTATUS , 0x00 }, { STB0899_LDI , 0xbc }, { STB0899_CFRM , 0xea }, { STB0899_CFRL , 0x31 }, { STB0899_NIRM , 0x2b }, { STB0899_NIRL , 0x80 }, { STB0899_ISYMB , 0x1d }, { STB0899_QSYMB , 0xa6 }, { STB0899_SFRH , 0x2f }, { STB0899_SFRM , 0x68 }, { STB0899_SFRL , 0x40 }, { STB0899_SFRUPH , 0x2f }, { STB0899_SFRUPM , 0x68 }, { STB0899_SFRUPL , 0x40 }, { STB0899_EQUAI1 , 0x02 }, { STB0899_EQUAQ1 , 0xff }, { STB0899_EQUAI2 , 0x04 }, { STB0899_EQUAQ2 , 0x05 }, { STB0899_EQUAI3 , 0x02 }, { STB0899_EQUAQ3 , 0xfd }, { STB0899_EQUAI4 , 0x03 }, { STB0899_EQUAQ4 , 0x07 }, { STB0899_EQUAI5 , 0x08 }, { STB0899_EQUAQ5 , 0xf5 }, { STB0899_DSTATUS2 , 0x00 }, { STB0899_VSTATUS , 0x00 }, { STB0899_VERROR , 0x86 }, { STB0899_IQSWAP , 0x2a }, { STB0899_ECNT1M , 0x00 }, { STB0899_ECNT1L , 0x00 }, { STB0899_ECNT2M , 0x00 }, { STB0899_ECNT2L , 0x00 }, { STB0899_ECNT3M , 0x0a }, { STB0899_ECNT3L , 0xad }, { STB0899_FECAUTO1 , 0x06 }, { STB0899_FECM , 0x01 }, { STB0899_VTH12 , 0xb0 }, { STB0899_VTH23 , 0x7a }, { STB0899_VTH34 , 0x58 }, { STB0899_VTH56 , 0x38 }, { STB0899_VTH67 , 0x34 }, { STB0899_VTH78 , 0x24 }, { STB0899_PRVIT , 0xff }, { STB0899_VITSYNC , 0x19 }, { STB0899_RSULC , 0xb1 }, /* DVB = 0xb1, DSS = 0xa1 */ { STB0899_TSULC , 0x42 }, { STB0899_RSLLC , 0x41 }, { STB0899_TSLPL , 0x12 }, { STB0899_TSCFGH , 0x0c }, { STB0899_TSCFGM , 0x00 }, { STB0899_TSCFGL , 0x00 }, { STB0899_TSOUT , 0x69 }, /* 0x0d for CAM */ { STB0899_RSSYNCDEL , 0x00 }, { STB0899_TSINHDELH , 0x02 }, { STB0899_TSINHDELM , 0x00 }, { STB0899_TSINHDELL , 0x00 }, { STB0899_TSLLSTKM , 0x1b }, { STB0899_TSLLSTKL , 0xb3 }, { STB0899_TSULSTKM , 0x00 }, { STB0899_TSULSTKL , 0x00 }, { STB0899_PCKLENUL , 0xbc }, { STB0899_PCKLENLL , 0xcc }, { STB0899_RSPCKLEN , 0xbd }, { STB0899_TSSTATUS , 0x90 }, { STB0899_ERRCTRL1 , 0xb6 }, { STB0899_ERRCTRL2 , 0x95 }, { STB0899_ERRCTRL3 , 0x8d }, { STB0899_DMONMSK1 , 0x27 }, { STB0899_DMONMSK0 , 0x03 }, { STB0899_DEMAPVIT , 0x5c }, { STB0899_PLPARM , 0x19 }, { STB0899_PDELCTRL , 0x48 }, { STB0899_PDELCTRL2 , 0x00 }, { STB0899_BBHCTRL1 , 0x00 }, { STB0899_BBHCTRL2 , 0x00 }, { STB0899_HYSTTHRESH , 0x77 }, { STB0899_MATCSTM , 0x00 }, { STB0899_MATCSTL , 0x00 }, { STB0899_UPLCSTM , 0x00 }, { STB0899_UPLCSTL , 0x00 }, { STB0899_DFLCSTM , 0x00 }, { STB0899_DFLCSTL , 0x00 }, { STB0899_SYNCCST , 0x00 }, { STB0899_SYNCDCSTM , 0x00 }, { STB0899_SYNCDCSTL , 0x00 }, { STB0899_ISI_ENTRY , 0x00 }, { STB0899_ISI_BIT_EN , 0x00 }, { STB0899_MATSTRM , 0xf0 }, { STB0899_MATSTRL , 0x02 }, { STB0899_UPLSTRM , 0x45 }, { STB0899_UPLSTRL , 0x60 }, { STB0899_DFLSTRM , 0xe3 }, { STB0899_DFLSTRL , 0x00 }, { STB0899_SYNCSTR , 0x47 }, { STB0899_SYNCDSTRM , 0x05 }, { STB0899_SYNCDSTRL , 0x18 }, { STB0899_CFGPDELSTATUS1 , 0x19 }, { STB0899_CFGPDELSTATUS2 , 0x2b }, { STB0899_BBFERRORM , 0x00 }, { STB0899_BBFERRORL , 0x01 }, { STB0899_UPKTERRORM , 0x00 }, { STB0899_UPKTERRORL , 0x00 }, { 0xffff , 0xff }, }; struct stb0899_config vp1041_stb0899_config = { .init_dev = vp1041_stb0899_s1_init_1, .init_s2_demod = stb0899_s2_init_2, .init_s1_demod = vp1041_stb0899_s1_init_3, .init_s2_fec = stb0899_s2_init_4, .init_tst = stb0899_s1_init_5, .demod_address = 0x68, /* 0xd0 >> 1 */ .xtal_freq = 27000000, .inversion = IQ_SWAP_ON, /* 1 */ .lo_clk = 76500000, .hi_clk = 99000000, .esno_ave = STB0899_DVBS2_ESNO_AVE, .esno_quant = STB0899_DVBS2_ESNO_QUANT, .avframes_coarse = STB0899_DVBS2_AVFRAMES_COARSE, .avframes_fine = STB0899_DVBS2_AVFRAMES_FINE, .miss_threshold = STB0899_DVBS2_MISS_THRESHOLD, .uwp_threshold_acq = STB0899_DVBS2_UWP_THRESHOLD_ACQ, .uwp_threshold_track = STB0899_DVBS2_UWP_THRESHOLD_TRACK, .uwp_threshold_sof = STB0899_DVBS2_UWP_THRESHOLD_SOF, .sof_search_timeout = STB0899_DVBS2_SOF_SEARCH_TIMEOUT, .btr_nco_bits = STB0899_DVBS2_BTR_NCO_BITS, .btr_gain_shift_offset = STB0899_DVBS2_BTR_GAIN_SHIFT_OFFSET, .crl_nco_bits = STB0899_DVBS2_CRL_NCO_BITS, .ldpc_max_iter = STB0899_DVBS2_LDPC_MAX_ITER, .tuner_get_frequency = stb6100_get_frequency, .tuner_set_frequency = stb6100_set_frequency, .tuner_set_bandwidth = stb6100_set_bandwidth, .tuner_get_bandwidth = stb6100_get_bandwidth, .tuner_set_rfsiggain = NULL, }; struct stb6100_config vp1041_stb6100_config = { .tuner_address = 0x60, .refclock = 27000000, }; static int vp1041_frontend_init(struct mantis_pci *mantis, struct dvb_frontend *fe) { struct i2c_adapter *adapter = &mantis->adapter; int err = 0; err = mantis_frontend_power(mantis, POWER_ON); if (err == 0) { mantis_frontend_soft_reset(mantis); msleep(250); mantis->fe = dvb_attach(stb0899_attach, &vp1041_stb0899_config, adapter); if (mantis->fe) { dprintk(MANTIS_ERROR, 1, "found STB0899 DVB-S/DVB-S2 frontend @0x%02x", vp1041_stb0899_config.demod_address); if (dvb_attach(stb6100_attach, mantis->fe, &vp1041_stb6100_config, adapter)) { if (!dvb_attach(lnbp21_attach, mantis->fe, adapter, 0, 0)) dprintk(MANTIS_ERROR, 1, "No LNBP21 found!"); } } else { return -EREMOTEIO; } } else { dprintk(MANTIS_ERROR, 1, "Frontend on <%s> POWER ON failed! <%d>", adapter->name, err); return -EIO; } dprintk(MANTIS_ERROR, 1, "Done!"); return 0; } struct mantis_hwconfig vp1041_config = { .model_name = MANTIS_MODEL_NAME, .dev_type = MANTIS_DEV_TYPE, .ts_size = MANTIS_TS_188, .baud_rate = MANTIS_BAUD_9600, .parity = MANTIS_PARITY_NONE, .bytes = 0, .frontend_init = vp1041_frontend_init, .power = GPIF_A12, .reset = GPIF_A13, };
gpl-2.0
Ca1ne/EnochPrima
drivers/platform/x86/tc1100-wmi.c
8015
6756
/* * HP Compaq TC1100 Tablet WMI Extras Driver * * Copyright (C) 2007 Carlos Corbacho <carlos@strangeworlds.co.uk> * Copyright (C) 2004 Jamey Hicks <jamey.hicks@hp.com> * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/types.h> #include <acpi/acpi.h> #include <acpi/acpi_bus.h> #include <acpi/acpi_drivers.h> #include <linux/platform_device.h> #define GUID "C364AC71-36DB-495A-8494-B439D472A505" #define TC1100_INSTANCE_WIRELESS 1 #define TC1100_INSTANCE_JOGDIAL 2 MODULE_AUTHOR("Jamey Hicks, Carlos Corbacho"); MODULE_DESCRIPTION("HP Compaq TC1100 Tablet WMI Extras"); MODULE_LICENSE("GPL"); MODULE_ALIAS("wmi:C364AC71-36DB-495A-8494-B439D472A505"); static struct platform_device *tc1100_device; struct tc1100_data { u32 wireless; u32 jogdial; }; static struct tc1100_data suspend_data; /* -------------------------------------------------------------------------- Device Management -------------------------------------------------------------------------- */ static int get_state(u32 *out, u8 instance) { u32 tmp; acpi_status status; struct acpi_buffer result = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *obj; if (!out) return -EINVAL; if (instance > 2) return -ENODEV; status = wmi_query_block(GUID, instance, &result); if (ACPI_FAILURE(status)) return -ENODEV; obj = (union acpi_object *) result.pointer; if (obj && obj->type == ACPI_TYPE_INTEGER) { tmp = obj->integer.value; } else { tmp = 0; } if (result.length > 0 && result.pointer) kfree(result.pointer); switch (instance) { case TC1100_INSTANCE_WIRELESS: *out = (tmp == 3) ? 1 : 0; return 0; case TC1100_INSTANCE_JOGDIAL: *out = (tmp == 1) ? 0 : 1; return 0; default: return -ENODEV; } } static int set_state(u32 *in, u8 instance) { u32 value; acpi_status status; struct acpi_buffer input; if (!in) return -EINVAL; if (instance > 2) return -ENODEV; switch (instance) { case TC1100_INSTANCE_WIRELESS: value = (*in) ? 1 : 2; break; case TC1100_INSTANCE_JOGDIAL: value = (*in) ? 0 : 1; break; default: return -ENODEV; } input.length = sizeof(u32); input.pointer = &value; status = wmi_set_block(GUID, instance, &input); if (ACPI_FAILURE(status)) return -ENODEV; return 0; } /* -------------------------------------------------------------------------- FS Interface (/sys) -------------------------------------------------------------------------- */ /* * Read/ write bool sysfs macro */ #define show_set_bool(value, instance) \ static ssize_t \ show_bool_##value(struct device *dev, struct device_attribute *attr, \ char *buf) \ { \ u32 result; \ acpi_status status = get_state(&result, instance); \ if (ACPI_SUCCESS(status)) \ return sprintf(buf, "%d\n", result); \ return sprintf(buf, "Read error\n"); \ } \ \ static ssize_t \ set_bool_##value(struct device *dev, struct device_attribute *attr, \ const char *buf, size_t count) \ { \ u32 tmp = simple_strtoul(buf, NULL, 10); \ acpi_status status = set_state(&tmp, instance); \ if (ACPI_FAILURE(status)) \ return -EINVAL; \ return count; \ } \ static DEVICE_ATTR(value, S_IRUGO | S_IWUSR, \ show_bool_##value, set_bool_##value); show_set_bool(wireless, TC1100_INSTANCE_WIRELESS); show_set_bool(jogdial, TC1100_INSTANCE_JOGDIAL); static struct attribute *tc1100_attributes[] = { &dev_attr_wireless.attr, &dev_attr_jogdial.attr, NULL }; static struct attribute_group tc1100_attribute_group = { .attrs = tc1100_attributes, }; /* -------------------------------------------------------------------------- Driver Model -------------------------------------------------------------------------- */ static int __init tc1100_probe(struct platform_device *device) { return sysfs_create_group(&device->dev.kobj, &tc1100_attribute_group); } static int __devexit tc1100_remove(struct platform_device *device) { sysfs_remove_group(&device->dev.kobj, &tc1100_attribute_group); return 0; } #ifdef CONFIG_PM static int tc1100_suspend(struct device *dev) { int ret; ret = get_state(&suspend_data.wireless, TC1100_INSTANCE_WIRELESS); if (ret) return ret; ret = get_state(&suspend_data.jogdial, TC1100_INSTANCE_JOGDIAL); if (ret) return ret; return 0; } static int tc1100_resume(struct device *dev) { int ret; ret = set_state(&suspend_data.wireless, TC1100_INSTANCE_WIRELESS); if (ret) return ret; ret = set_state(&suspend_data.jogdial, TC1100_INSTANCE_JOGDIAL); if (ret) return ret; return 0; } static const struct dev_pm_ops tc1100_pm_ops = { .suspend = tc1100_suspend, .resume = tc1100_resume, .freeze = tc1100_suspend, .restore = tc1100_resume, }; #endif static struct platform_driver tc1100_driver = { .driver = { .name = "tc1100-wmi", .owner = THIS_MODULE, #ifdef CONFIG_PM .pm = &tc1100_pm_ops, #endif }, .remove = __devexit_p(tc1100_remove), }; static int __init tc1100_init(void) { int error; if (!wmi_has_guid(GUID)) return -ENODEV; tc1100_device = platform_device_alloc("tc1100-wmi", -1); if (!tc1100_device) return -ENOMEM; error = platform_device_add(tc1100_device); if (error) goto err_device_put; error = platform_driver_probe(&tc1100_driver, tc1100_probe); if (error) goto err_device_del; pr_info("HP Compaq TC1100 Tablet WMI Extras loaded\n"); return 0; err_device_del: platform_device_del(tc1100_device); err_device_put: platform_device_put(tc1100_device); return error; } static void __exit tc1100_exit(void) { platform_device_unregister(tc1100_device); platform_driver_unregister(&tc1100_driver); } module_init(tc1100_init); module_exit(tc1100_exit);
gpl-2.0
sktjdgns1189/android_kernel_lge_f180k
arch/mips/kernel/binfmt_elfo32.c
9295
4508
/* * Support for o32 Linux/MIPS ELF binaries. * * Copyright (C) 1999, 2001 Ralf Baechle * Copyright (C) 1999, 2001 Silicon Graphics, Inc. * * Heavily inspired by the 32-bit Sparc compat code which is * Copyright (C) 1995, 1996, 1997, 1998 David S. Miller (davem@redhat.com) * Copyright (C) 1995, 1996, 1997, 1998 Jakub Jelinek (jj@ultra.linux.cz) */ #define ELF_ARCH EM_MIPS #define ELF_CLASS ELFCLASS32 #ifdef __MIPSEB__ #define ELF_DATA ELFDATA2MSB; #else /* __MIPSEL__ */ #define ELF_DATA ELFDATA2LSB; #endif /* ELF register definitions */ #define ELF_NGREG 45 #define ELF_NFPREG 33 typedef unsigned int elf_greg_t; typedef elf_greg_t elf_gregset_t[ELF_NGREG]; typedef double elf_fpreg_t; typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; /* * This is used to ensure we don't load something for the wrong architecture. */ #define elf_check_arch(hdr) \ ({ \ int __res = 1; \ struct elfhdr *__h = (hdr); \ \ if (__h->e_machine != EM_MIPS) \ __res = 0; \ if (__h->e_ident[EI_CLASS] != ELFCLASS32) \ __res = 0; \ if ((__h->e_flags & EF_MIPS_ABI2) != 0) \ __res = 0; \ if (((__h->e_flags & EF_MIPS_ABI) != 0) && \ ((__h->e_flags & EF_MIPS_ABI) != EF_MIPS_ABI_O32)) \ __res = 0; \ \ __res; \ }) #define TASK32_SIZE 0x7fff8000UL #undef ELF_ET_DYN_BASE #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2) #include <asm/processor.h> /* * When this file is selected, we are definitely running a 64bit kernel. * So using the right regs define in asm/reg.h */ #define WANT_COMPAT_REG_H /* These MUST be defined before elf.h gets included */ extern void elf32_core_copy_regs(elf_gregset_t grp, struct pt_regs *regs); #define ELF_CORE_COPY_REGS(_dest, _regs) elf32_core_copy_regs(_dest, _regs); #define ELF_CORE_COPY_TASK_REGS(_tsk, _dest) \ ({ \ int __res = 1; \ elf32_core_copy_regs(*(_dest), task_pt_regs(_tsk)); \ __res; \ }) #include <linux/module.h> #include <linux/elfcore.h> #include <linux/compat.h> #include <linux/math64.h> #define elf_prstatus elf_prstatus32 struct elf_prstatus32 { struct elf_siginfo pr_info; /* Info associated with signal */ short pr_cursig; /* Current signal */ unsigned int pr_sigpend; /* Set of pending signals */ unsigned int pr_sighold; /* Set of held signals */ pid_t pr_pid; pid_t pr_ppid; pid_t pr_pgrp; pid_t pr_sid; struct compat_timeval pr_utime; /* User time */ struct compat_timeval pr_stime; /* System time */ struct compat_timeval pr_cutime;/* Cumulative user time */ struct compat_timeval pr_cstime;/* Cumulative system time */ elf_gregset_t pr_reg; /* GP registers */ int pr_fpvalid; /* True if math co-processor being used. */ }; #define elf_prpsinfo elf_prpsinfo32 struct elf_prpsinfo32 { char pr_state; /* numeric process state */ char pr_sname; /* char for pr_state */ char pr_zomb; /* zombie */ char pr_nice; /* nice val */ unsigned int pr_flag; /* flags */ __kernel_uid_t pr_uid; __kernel_gid_t pr_gid; pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid; /* Lots missing */ char pr_fname[16]; /* filename of executable */ char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */ }; #define elf_caddr_t u32 #define init_elf_binfmt init_elf32_binfmt #define jiffies_to_timeval jiffies_to_compat_timeval static inline void jiffies_to_compat_timeval(unsigned long jiffies, struct compat_timeval *value) { /* * Convert jiffies to nanoseconds and separate with * one divide. */ u64 nsec = (u64)jiffies * TICK_NSEC; u32 rem; value->tv_sec = div_u64_rem(nsec, NSEC_PER_SEC, &rem); value->tv_usec = rem / NSEC_PER_USEC; } void elf32_core_copy_regs(elf_gregset_t grp, struct pt_regs *regs) { int i; for (i = 0; i < EF_R0; i++) grp[i] = 0; grp[EF_R0] = 0; for (i = 1; i <= 31; i++) grp[EF_R0 + i] = (elf_greg_t) regs->regs[i]; grp[EF_R26] = 0; grp[EF_R27] = 0; grp[EF_LO] = (elf_greg_t) regs->lo; grp[EF_HI] = (elf_greg_t) regs->hi; grp[EF_CP0_EPC] = (elf_greg_t) regs->cp0_epc; grp[EF_CP0_BADVADDR] = (elf_greg_t) regs->cp0_badvaddr; grp[EF_CP0_STATUS] = (elf_greg_t) regs->cp0_status; grp[EF_CP0_CAUSE] = (elf_greg_t) regs->cp0_cause; #ifdef EF_UNUSED0 grp[EF_UNUSED0] = 0; #endif } MODULE_DESCRIPTION("Binary format loader for compatibility with o32 Linux/MIPS binaries"); MODULE_AUTHOR("Ralf Baechle (ralf@linux-mips.org)"); #undef MODULE_DESCRIPTION #undef MODULE_AUTHOR #undef TASK_SIZE #define TASK_SIZE TASK_SIZE32 #include "../../../fs/binfmt_elf.c"
gpl-2.0
PRJosh/kernel_msm
drivers/isdn/pcbit/edss1.c
9295
9083
/* * DSS.1 Finite State Machine * base: ITU-T Rec Q.931 * * Copyright (C) 1996 Universidade de Lisboa * * Written by Pedro Roque Marques (roque@di.fc.ul.pt) * * This software may be used and distributed according to the terms of * the GNU General Public License, incorporated herein by reference. */ /* * TODO: complete the FSM * move state/event descriptions to a user space logger */ #include <linux/string.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/mm.h> #include <linux/skbuff.h> #include <linux/timer.h> #include <asm/io.h> #include <linux/isdnif.h> #include "pcbit.h" #include "edss1.h" #include "layer2.h" #include "callbacks.h" const char * const isdn_state_table[] = { "Closed", "Call initiated", "Overlap sending", "Outgoing call proceeding", "NOT DEFINED", "Call delivered", "Call present", "Call received", "Connect request", "Incoming call proceeding", "Active", "Disconnect request", "Disconnect indication", "NOT DEFINED", "NOT DEFINED", "Suspend request", "NOT DEFINED", "Resume request", "NOT DEFINED", "Release Request", "NOT DEFINED", "NOT DEFINED", "NOT DEFINED", "NOT DEFINED", "NOT DEFINED", "Overlap receiving", "Select protocol on B-Channel", "Activate B-channel protocol" }; #ifdef DEBUG_ERRS static struct CauseValue { byte nr; char *descr; } cvlist[] = { {0x01, "Unallocated (unassigned) number"}, {0x02, "No route to specified transit network"}, {0x03, "No route to destination"}, {0x04, "Send special information tone"}, {0x05, "Misdialled trunk prefix"}, {0x06, "Channel unacceptable"}, {0x07, "Channel awarded and being delivered in an established channel"}, {0x08, "Preemption"}, {0x09, "Preemption - circuit reserved for reuse"}, {0x10, "Normal call clearing"}, {0x11, "User busy"}, {0x12, "No user responding"}, {0x13, "No answer from user (user alerted)"}, {0x14, "Subscriber absent"}, {0x15, "Call rejected"}, {0x16, "Number changed"}, {0x1a, "non-selected user clearing"}, {0x1b, "Destination out of order"}, {0x1c, "Invalid number format (address incomplete)"}, {0x1d, "Facility rejected"}, {0x1e, "Response to Status enquiry"}, {0x1f, "Normal, unspecified"}, {0x22, "No circuit/channel available"}, {0x26, "Network out of order"}, {0x27, "Permanent frame mode connection out-of-service"}, {0x28, "Permanent frame mode connection operational"}, {0x29, "Temporary failure"}, {0x2a, "Switching equipment congestion"}, {0x2b, "Access information discarded"}, {0x2c, "Requested circuit/channel not available"}, {0x2e, "Precedence call blocked"}, {0x2f, "Resource unavailable, unspecified"}, {0x31, "Quality of service unavailable"}, {0x32, "Requested facility not subscribed"}, {0x35, "Outgoing calls barred within CUG"}, {0x37, "Incoming calls barred within CUG"}, {0x39, "Bearer capability not authorized"}, {0x3a, "Bearer capability not presently available"}, {0x3e, "Inconsistency in designated outgoing access information and subscriber class"}, {0x3f, "Service or option not available, unspecified"}, {0x41, "Bearer capability not implemented"}, {0x42, "Channel type not implemented"}, {0x43, "Requested facility not implemented"}, {0x44, "Only restricted digital information bearer capability is available"}, {0x4f, "Service or option not implemented"}, {0x51, "Invalid call reference value"}, {0x52, "Identified channel does not exist"}, {0x53, "A suspended call exists, but this call identity does not"}, {0x54, "Call identity in use"}, {0x55, "No call suspended"}, {0x56, "Call having the requested call identity has been cleared"}, {0x57, "User not member of CUG"}, {0x58, "Incompatible destination"}, {0x5a, "Non-existent CUG"}, {0x5b, "Invalid transit network selection"}, {0x5f, "Invalid message, unspecified"}, {0x60, "Mandatory information element is missing"}, {0x61, "Message type non-existent or not implemented"}, {0x62, "Message not compatible with call state or message type non-existent or not implemented"}, {0x63, "Information element/parameter non-existent or not implemented"}, {0x64, "Invalid information element contents"}, {0x65, "Message not compatible with call state"}, {0x66, "Recovery on timer expiry"}, {0x67, "Parameter non-existent or not implemented - passed on"}, {0x6e, "Message with unrecognized parameter discarded"}, {0x6f, "Protocol error, unspecified"}, {0x7f, "Interworking, unspecified"} }; #endif static struct isdn_event_desc { unsigned short ev; char *desc; } isdn_event_table[] = { {EV_USR_SETUP_REQ, "CC->L3: Setup Request"}, {EV_USR_SETUP_RESP, "CC->L3: Setup Response"}, {EV_USR_PROCED_REQ, "CC->L3: Proceeding Request"}, {EV_USR_RELEASE_REQ, "CC->L3: Release Request"}, {EV_NET_SETUP, "NET->TE: setup "}, {EV_NET_CALL_PROC, "NET->TE: call proceeding"}, {EV_NET_SETUP_ACK, "NET->TE: setup acknowledge (more info needed)"}, {EV_NET_CONN, "NET->TE: connect"}, {EV_NET_CONN_ACK, "NET->TE: connect acknowledge"}, {EV_NET_DISC, "NET->TE: disconnect indication"}, {EV_NET_RELEASE, "NET->TE: release"}, {EV_NET_RELEASE_COMP, "NET->TE: release complete"}, {EV_NET_SELP_RESP, "Board: Select B-channel protocol ack"}, {EV_NET_ACTV_RESP, "Board: Activate B-channel protocol ack"}, {EV_TIMER, "Timeout"}, {0, "NULL"} }; char *strisdnevent(ushort ev) { struct isdn_event_desc *entry; for (entry = isdn_event_table; entry->ev; entry++) if (entry->ev == ev) break; return entry->desc; } /* * Euro ISDN finite state machine */ static struct fsm_timer_entry fsm_timers[] = { {ST_CALL_PROC, 10}, {ST_DISC_REQ, 2}, {ST_ACTIVE_SELP, 5}, {ST_ACTIVE_ACTV, 5}, {ST_INCM_PROC, 10}, {ST_CONN_REQ, 2}, {0xff, 0} }; static struct fsm_entry fsm_table[] = { /* Connect Phase */ /* Outgoing */ {ST_NULL, ST_CALL_INIT, EV_USR_SETUP_REQ, cb_out_1}, {ST_CALL_INIT, ST_OVER_SEND, EV_NET_SETUP_ACK, cb_notdone}, {ST_CALL_INIT, ST_CALL_PROC, EV_NET_CALL_PROC, NULL}, {ST_CALL_INIT, ST_NULL, EV_NET_DISC, cb_out_2}, {ST_CALL_PROC, ST_ACTIVE_SELP, EV_NET_CONN, cb_out_2}, {ST_CALL_PROC, ST_NULL, EV_NET_DISC, cb_disc_1}, {ST_CALL_PROC, ST_DISC_REQ, EV_USR_RELEASE_REQ, cb_disc_2}, /* Incoming */ {ST_NULL, ST_CALL_PRES, EV_NET_SETUP, NULL}, {ST_CALL_PRES, ST_INCM_PROC, EV_USR_PROCED_REQ, cb_in_1}, {ST_CALL_PRES, ST_DISC_REQ, EV_USR_RELEASE_REQ, cb_disc_2}, {ST_INCM_PROC, ST_CONN_REQ, EV_USR_SETUP_RESP, cb_in_2}, {ST_INCM_PROC, ST_DISC_REQ, EV_USR_RELEASE_REQ, cb_disc_2}, {ST_CONN_REQ, ST_ACTIVE_SELP, EV_NET_CONN_ACK, cb_in_3}, /* Active */ {ST_ACTIVE, ST_NULL, EV_NET_DISC, cb_disc_1}, {ST_ACTIVE, ST_DISC_REQ, EV_USR_RELEASE_REQ, cb_disc_2}, {ST_ACTIVE, ST_NULL, EV_NET_RELEASE, cb_disc_3}, /* Disconnect */ {ST_DISC_REQ, ST_NULL, EV_NET_DISC, cb_disc_1}, {ST_DISC_REQ, ST_NULL, EV_NET_RELEASE, cb_disc_3}, /* protocol selection */ {ST_ACTIVE_SELP, ST_ACTIVE_ACTV, EV_NET_SELP_RESP, cb_selp_1}, {ST_ACTIVE_SELP, ST_DISC_REQ, EV_USR_RELEASE_REQ, cb_disc_2}, {ST_ACTIVE_ACTV, ST_ACTIVE, EV_NET_ACTV_RESP, cb_open}, {ST_ACTIVE_ACTV, ST_DISC_REQ, EV_USR_RELEASE_REQ, cb_disc_2}, /* Timers */ {ST_CALL_PROC, ST_DISC_REQ, EV_TIMER, cb_disc_2}, {ST_DISC_REQ, ST_NULL, EV_TIMER, cb_disc_3}, {ST_ACTIVE_SELP, ST_DISC_REQ, EV_TIMER, cb_disc_2}, {ST_ACTIVE_ACTV, ST_DISC_REQ, EV_TIMER, cb_disc_2}, {ST_INCM_PROC, ST_DISC_REQ, EV_TIMER, cb_disc_2}, {ST_CONN_REQ, ST_CONN_REQ, EV_TIMER, cb_in_2}, {0xff, 0, 0, NULL} }; static void pcbit_fsm_timer(unsigned long data) { struct pcbit_dev *dev; struct pcbit_chan *chan; chan = (struct pcbit_chan *) data; del_timer(&chan->fsm_timer); chan->fsm_timer.function = NULL; dev = chan2dev(chan); if (dev == NULL) { printk(KERN_WARNING "pcbit: timer for unknown device\n"); return; } pcbit_fsm_event(dev, chan, EV_TIMER, NULL); } void pcbit_fsm_event(struct pcbit_dev *dev, struct pcbit_chan *chan, unsigned short event, struct callb_data *data) { struct fsm_entry *action; struct fsm_timer_entry *tentry; unsigned long flags; spin_lock_irqsave(&dev->lock, flags); for (action = fsm_table; action->init != 0xff; action++) if (action->init == chan->fsm_state && action->event == event) break; if (action->init == 0xff) { spin_unlock_irqrestore(&dev->lock, flags); printk(KERN_DEBUG "fsm error: event %x on state %x\n", event, chan->fsm_state); return; } if (chan->fsm_timer.function) { del_timer(&chan->fsm_timer); chan->fsm_timer.function = NULL; } chan->fsm_state = action->final; pcbit_state_change(dev, chan, action->init, event, action->final); for (tentry = fsm_timers; tentry->init != 0xff; tentry++) if (tentry->init == chan->fsm_state) break; if (tentry->init != 0xff) { init_timer(&chan->fsm_timer); chan->fsm_timer.function = &pcbit_fsm_timer; chan->fsm_timer.data = (ulong) chan; chan->fsm_timer.expires = jiffies + tentry->timeout * HZ; add_timer(&chan->fsm_timer); } spin_unlock_irqrestore(&dev->lock, flags); if (action->callb) action->callb(dev, chan, data); }
gpl-2.0
HackLinux/u-boot-pandaboard-ES-RevB3
arch/arm/cpu/arm926ejs/davinci/cpu.c
80
5644
/* * Copyright (C) 2004 Texas Instruments. * Copyright (C) 2009 David Brownell * * See file CREDITS for list of people who contributed to this * project. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <common.h> #include <netdev.h> #include <asm/arch/hardware.h> #include <asm/io.h> DECLARE_GLOBAL_DATA_PTR; /* offsets from PLL controller base */ #define PLLC_PLLCTL 0x100 #define PLLC_PLLM 0x110 #define PLLC_PREDIV 0x114 #define PLLC_PLLDIV1 0x118 #define PLLC_PLLDIV2 0x11c #define PLLC_PLLDIV3 0x120 #define PLLC_POSTDIV 0x128 #define PLLC_BPDIV 0x12c #define PLLC_PLLDIV4 0x160 #define PLLC_PLLDIV5 0x164 #define PLLC_PLLDIV6 0x168 #define PLLC_PLLDIV7 0x16c #define PLLC_PLLDIV8 0x170 #define PLLC_PLLDIV9 0x174 #define BIT(x) (1 << (x)) /* SOC-specific pll info */ #ifdef CONFIG_SOC_DM355 #define ARM_PLLDIV PLLC_PLLDIV1 #define DDR_PLLDIV PLLC_PLLDIV1 #endif #ifdef CONFIG_SOC_DM644X #define ARM_PLLDIV PLLC_PLLDIV2 #define DSP_PLLDIV PLLC_PLLDIV1 #define DDR_PLLDIV PLLC_PLLDIV2 #endif #ifdef CONFIG_SOC_DM646X #define DSP_PLLDIV PLLC_PLLDIV1 #define ARM_PLLDIV PLLC_PLLDIV2 #define DDR_PLLDIV PLLC_PLLDIV1 #endif #ifdef CONFIG_SOC_DA8XX unsigned int sysdiv[9] = { PLLC_PLLDIV1, PLLC_PLLDIV2, PLLC_PLLDIV3, PLLC_PLLDIV4, PLLC_PLLDIV5, PLLC_PLLDIV6, PLLC_PLLDIV7, PLLC_PLLDIV8, PLLC_PLLDIV9 }; int clk_get(enum davinci_clk_ids id) { int pre_div; int pllm; int post_div; int pll_out; unsigned int pll_base; pll_out = CONFIG_SYS_OSCIN_FREQ; if (id == DAVINCI_AUXCLK_CLKID) goto out; if ((id >> 16) == 1) pll_base = (unsigned int)davinci_pllc1_regs; else pll_base = (unsigned int)davinci_pllc0_regs; id &= 0xFFFF; /* * Lets keep this simple. Combining operations can result in * unexpected approximations */ pre_div = (readl(pll_base + PLLC_PREDIV) & DAVINCI_PLLC_DIV_MASK) + 1; pllm = readl(pll_base + PLLC_PLLM) + 1; pll_out /= pre_div; pll_out *= pllm; if (id == DAVINCI_PLLM_CLKID) goto out; post_div = (readl(pll_base + PLLC_POSTDIV) & DAVINCI_PLLC_DIV_MASK) + 1; pll_out /= post_div; if (id == DAVINCI_PLLC_CLKID) goto out; pll_out /= (readl(pll_base + sysdiv[id - 1]) & DAVINCI_PLLC_DIV_MASK) + 1; out: return pll_out; } int set_cpu_clk_info(void) { gd->bd->bi_arm_freq = clk_get(DAVINCI_ARM_CLKID) / 1000000; /* DDR PHY uses an x2 input clock */ gd->bd->bi_ddr_freq = cpu_is_da830() ? 0 : (clk_get(DAVINCI_DDR_CLKID) / 1000000); gd->bd->bi_dsp_freq = 0; return 0; } #else /* CONFIG_SOC_DA8XX */ static unsigned pll_div(volatile void *pllbase, unsigned offset) { u32 div; div = REG(pllbase + offset); return (div & BIT(15)) ? (1 + (div & 0x1f)) : 1; } static inline unsigned pll_prediv(volatile void *pllbase) { #ifdef CONFIG_SOC_DM355 /* this register read seems to fail on pll0 */ if (pllbase == (volatile void *)DAVINCI_PLL_CNTRL0_BASE) return 8; else return pll_div(pllbase, PLLC_PREDIV); #elif defined(CONFIG_SOC_DM365) return pll_div(pllbase, PLLC_PREDIV); #endif return 1; } static inline unsigned pll_postdiv(volatile void *pllbase) { #if defined(CONFIG_SOC_DM355) || defined(CONFIG_SOC_DM365) return pll_div(pllbase, PLLC_POSTDIV); #elif defined(CONFIG_SOC_DM6446) if (pllbase == (volatile void *)DAVINCI_PLL_CNTRL0_BASE) return pll_div(pllbase, PLLC_POSTDIV); #endif return 1; } static unsigned pll_sysclk_mhz(unsigned pll_addr, unsigned div) { volatile void *pllbase = (volatile void *) pll_addr; #ifdef CONFIG_SOC_DM646X unsigned base = CONFIG_REFCLK_FREQ / 1000; #else unsigned base = CONFIG_SYS_HZ_CLOCK / 1000; #endif /* the PLL might be bypassed */ if (readl(pllbase + PLLC_PLLCTL) & BIT(0)) { base /= pll_prediv(pllbase); #if defined(CONFIG_SOC_DM365) base *= 2 * (readl(pllbase + PLLC_PLLM) & 0x0ff); #else base *= 1 + (REG(pllbase + PLLC_PLLM) & 0x0ff); #endif base /= pll_postdiv(pllbase); } return DIV_ROUND_UP(base, 1000 * pll_div(pllbase, div)); } #ifdef DAVINCI_DM6467EVM unsigned int davinci_arm_clk_get() { return pll_sysclk_mhz(DAVINCI_PLL_CNTRL0_BASE, ARM_PLLDIV) * 1000000; } #endif #if defined(CONFIG_SOC_DM365) unsigned int davinci_clk_get(unsigned int div) { return pll_sysclk_mhz(DAVINCI_PLL_CNTRL0_BASE, div) * 1000000; } #endif int set_cpu_clk_info(void) { unsigned int pllbase = DAVINCI_PLL_CNTRL0_BASE; #if defined(CONFIG_SOC_DM365) pllbase = DAVINCI_PLL_CNTRL1_BASE; #endif gd->bd->bi_arm_freq = pll_sysclk_mhz(pllbase, ARM_PLLDIV); #ifdef DSP_PLLDIV gd->bd->bi_dsp_freq = pll_sysclk_mhz(DAVINCI_PLL_CNTRL0_BASE, DSP_PLLDIV); #else gd->bd->bi_dsp_freq = 0; #endif pllbase = DAVINCI_PLL_CNTRL1_BASE; #if defined(CONFIG_SOC_DM365) pllbase = DAVINCI_PLL_CNTRL0_BASE; #endif gd->bd->bi_ddr_freq = pll_sysclk_mhz(pllbase, DDR_PLLDIV) / 2; return 0; } #endif /* !CONFIG_SOC_DA8XX */ /* * Initializes on-chip ethernet controllers. * to override, implement board_eth_init() */ int cpu_eth_init(bd_t *bis) { #if defined(CONFIG_DRIVER_TI_EMAC) davinci_emac_initialize(); #endif return 0; }
gpl-2.0
vaginessa/android_kernel_samsung_golden
drivers/sensor/accelerometer/yas_acc_driver-kxdm.c
80
23183
/* * Copyright (c) 2010 Yamaha Corporation * * This software is provided 'as-is', without any express or implied * warranty. In no event will the authors be held liable for any damages * arising from the use of this software. * * Permission is granted to anyone to use this software for any purpose, * including commercial applications, and to alter it and redistribute it * freely, subject to the following restrictions: * * 1. The origin of this software must not be misrepresented; you must not * claim that you wrote the original software. If you use this software * in a product, an acknowledgment in the product documentation would be * appreciated but is not required. * 2. Altered source versions must be plainly marked as such, and must not be * misrepresented as being the original software. * 3. This notice may not be removed or altered from any source distribution. */ #include "yas.h" #define YAS_KXDM_RESOLUTION 256 /* Axes data range [um/s^2] */ #define YAS_KXDM_GRAVITY_EARTH 9806550 #define YAS_KXDM_ABSMIN_2G (-YAS_KXDM_GRAVITY_EARTH * 2) #define YAS_KXDM_ABSMAX_2G (YAS_KXDM_GRAVITY_EARTH * 2) /* Default parameters */ #define YAS_KXDM_DEFAULT_DELAY 100 #define YAS_KXDM_DEFAULT_POSITION 0 #define YAS_KXDM_MAX_DELAY 200 #define YAS_KXDM_MIN_DELAY 10 /* Registers */ #define YAS_KXDM_WHO_AM_I_REG 0x0f #define YAS_KXDM_WHO_AM_I 0x33 #define YAS_KXDM_CTRL_REG1 0x20 #define YAS_KXDM_CTRL_REG2 0x21 #define YAS_KXDM_CTRL_REG3 0x22 #define YAS_KXDM_CTRL_REG4 0x23 #define YAS_KXDM_CTRL_REG5 0x24 #define YAS_KXDM_CTRL_REG6 0x25 #define YAS_KXDM_X_ENABLE 0x01 #define YAS_KXDM_Y_ENABLE 0x02 #define YAS_KXDM_Z_ENABLE 0x04 #define YAS_KXDM_XYZ_ENABLE 0x07 #define YAS_KXDM_FS_2G 0x00 #define YAS_KXDM_FS_4G 0x10 #define YAS_KXDM_FS_8G 0x20 #define YAS_KXDM_FS_16 0x30 #define YAS_KXDM_HR_ENABLE 0x08 #define YAS_KXDM_ODR_1HZ 0x10 #define YAS_KXDM_ODR_10HZ 0x20 #define YAS_KXDM_ODR_25HZ 0x30 #define YAS_KXDM_ODR_50HZ 0x40 #define YAS_KXDM_ODR_100HZ 0x50 #define YAS_KXDM_ODR_200HZ 0x60 #define YAS_KXDM_ODR_400HZ 0x70 #define YAS_KXDM_ACC_REG 0x29 /* -------------------------------------------------------------------------- */ /* Structure definition */ /* -------------------------------------------------------------------------- */ /* Output data rate */ struct yas_kxdm_odr { unsigned long delay; /* min delay (msec) in the range of ODR */ unsigned char odr; /* bandwidth register value */ }; /* Axes data */ struct yas_kxdm_acceleration { int x; int y; int z; int x_raw; int y_raw; int z_raw; }; /* Driver private data */ struct yas_kxdm_data { int initialize; int i2c_open; int enable; int delay; int position; int threshold; int filter_enable; uint8_t odr; struct yas_vector offset; struct yas_kxdm_acceleration last; }; /* -------------------------------------------------------------------------- */ /* Data */ /* -------------------------------------------------------------------------- */ /* Control block */ static struct yas_acc_driver cb; static struct yas_acc_driver *pcb; static struct yas_kxdm_data acc_data; /* Output data rate */ static const struct yas_kxdm_odr yas_kxdm_odr_tbl[] = { {3, YAS_KXDM_ODR_400HZ}, {5, YAS_KXDM_ODR_200HZ}, {10, YAS_KXDM_ODR_100HZ}, {20, YAS_KXDM_ODR_50HZ}, {40, YAS_KXDM_ODR_25HZ}, {100, YAS_KXDM_ODR_10HZ}, {1000, YAS_KXDM_ODR_1HZ}, }; /* Transformation matrix for chip mounting position */ static const int yas_kxdm_position_map[][3][3] = { { {-1, 0, 0}, { 0, -1, 0}, { 0, 0, 1} }, /* top/upper-left */ { { 0, -1, 0}, { 1, 0, 0}, { 0, 0, 1} }, /* top/upper-right */ { { 1, 0, 0}, { 0, 1, 0}, { 0, 0, 1} }, /* top/lower-right */ { { 0, 1, 0}, {-1, 0, 0}, { 0, 0, 1} }, /* top/lower-left */ { { 1, 0, 0}, { 0, -1, 0}, { 0, 0, -1} }, /* bottom/upper-left */ { { 0, 1, 0}, { 1, 0, 0}, { 0, 0, -1} }, /* bottom/upper-right */ { {-1, 0, 0}, { 0, 1, 0}, { 0, 0, -1} }, /* bottom/lower-right */ { { 0, -1, 0}, {-1, 0, 0}, { 0, 0, -1} }, /* bottom/lower-right */ }; /* -------------------------------------------------------------------------- */ /* Prototype declaration */ /* -------------------------------------------------------------------------- */ static void yas_kxdm_init_data(void); static int yas_kxdm_ischg_enable(int); static int yas_kxdm_read_reg(unsigned char, unsigned char *, unsigned char); static int yas_kxdm_write_reg(unsigned char, unsigned char *, unsigned char); static int yas_kxdm_read_reg_byte(unsigned char); static int yas_kxdm_write_reg_byte(int, int); static int yas_kxdm_lock(void); static int yas_kxdm_unlock(void); static int yas_kxdm_i2c_open(void); static int yas_kxdm_i2c_close(void); #if 0 static int yas_kxdm_msleep(int); #endif static int yas_kxdm_power_up(void); static int yas_kxdm_power_down(void); static int yas_kxdm_init(void); static int yas_kxdm_term(void); static int yas_kxdm_get_delay(void); static int yas_kxdm_set_delay(int); static int yas_kxdm_get_offset(struct yas_vector *); static int yas_kxdm_set_offset(struct yas_vector *); static int yas_kxdm_get_enable(void); static int yas_kxdm_set_enable(int); static int yas_kxdm_get_filter(struct yas_acc_filter *); static int yas_kxdm_set_filter(struct yas_acc_filter *); static int yas_kxdm_get_filter_enable(void); static int yas_kxdm_set_filter_enable(int); static int yas_kxdm_get_position(void); static int yas_kxdm_set_position(int); static int yas_kxdm_measure(int *, int *); /* -------------------------------------------------------------------------- */ /* Local functions */ /* -------------------------------------------------------------------------- */ static void yas_kxdm_init_data(void) { acc_data.initialize = 0; acc_data.enable = 0; acc_data.delay = YAS_KXDM_DEFAULT_DELAY; acc_data.offset.v[0] = 0; acc_data.offset.v[1] = 0; acc_data.offset.v[2] = 0; acc_data.position = YAS_KXDM_DEFAULT_POSITION; acc_data.threshold = YAS_ACC_DEFAULT_FILTER_THRESH; acc_data.filter_enable = 0; acc_data.odr = 0; acc_data.last.x = 0; acc_data.last.y = 0; acc_data.last.z = 0; acc_data.last.x_raw = 0; acc_data.last.y_raw = 0; acc_data.last.z_raw = 0; } static int yas_kxdm_ischg_enable(int enable) { if (acc_data.enable == enable) return 0; return 1; } /* register access functions */ static int yas_kxdm_read_reg(unsigned char adr, unsigned char *buf, unsigned char len) { struct yas_acc_driver_callback *cbk = &pcb->callback; int err; if (acc_data.i2c_open) { err = cbk->device_read(adr, buf, len); if (err != 0) return err; return err; } return YAS_NO_ERROR; } static int yas_kxdm_write_reg(unsigned char adr, unsigned char *buf, unsigned char len) { struct yas_acc_driver_callback *cbk = &pcb->callback; int err; if (acc_data.i2c_open) { err = cbk->device_write(adr, buf, len); if (err != 0) return err; return err; } return YAS_NO_ERROR; } static int yas_kxdm_read_reg_byte(unsigned char adr) { unsigned char buf; int err; err = yas_kxdm_read_reg(adr, &buf, 1); if (err == 0) return buf; return 0; } static int yas_kxdm_write_reg_byte(int adr, int val) { return yas_kxdm_write_reg((unsigned char)adr, (unsigned char *)&val, 1); } #define yas_kxdm_read_bits(r) \ ((yas_kxdm_read_reg_byte(r##_REG) & r##_MASK) >> r##_SHIFT) #define yas_kxdm_update_bits(r, v) \ yas_kxdm_write_reg_byte(r##_REG, \ ((yas_kxdm_read_reg_byte(r##_REG) & \ ~r##_MASK) | ((v) << r##_SHIFT))) static int yas_kxdm_lock(void) { struct yas_acc_driver_callback *cbk = &pcb->callback; int err; if (cbk->lock != NULL && cbk->unlock != NULL) err = cbk->lock(); else err = YAS_NO_ERROR; return err; } static int yas_kxdm_unlock(void) { struct yas_acc_driver_callback *cbk = &pcb->callback; int err; if (cbk->lock != NULL && cbk->unlock != NULL) err = cbk->unlock(); else err = YAS_NO_ERROR; return err; } static int yas_kxdm_i2c_open(void) { struct yas_acc_driver_callback *cbk = &pcb->callback; int err; if (acc_data.i2c_open == 0) { err = cbk->device_open(); if (err != YAS_NO_ERROR) return YAS_ERROR_DEVICE_COMMUNICATION; acc_data.i2c_open = 1; } return YAS_NO_ERROR; } static int yas_kxdm_i2c_close(void) { struct yas_acc_driver_callback *cbk = &pcb->callback; int err; if (acc_data.i2c_open != 0) { err = cbk->device_close(); if (err != YAS_NO_ERROR) return YAS_ERROR_DEVICE_COMMUNICATION; acc_data.i2c_open = 0; } return YAS_NO_ERROR; } #if 0 static int yas_kxdm_msleep(int msec) { struct yas_acc_driver_callback *cbk = &pcb->callback; if (msec <= 0) return YAS_ERROR_ARG; cbk->msleep(msec); return YAS_NO_ERROR; } #endif static int yas_kxdm_power_up(void) { yas_kxdm_write_reg_byte(YAS_KXDM_CTRL_REG1, acc_data.odr | YAS_KXDM_XYZ_ENABLE); //yas_kxdm_write_reg_byte(YAS_KXDM_CTRL_REG4, YAS_KXDM_HR_ENABLE); return YAS_NO_ERROR; } static int yas_kxdm_power_down(void) { yas_kxdm_write_reg_byte(YAS_KXDM_CTRL_REG1, YAS_KXDM_XYZ_ENABLE); yas_kxdm_write_reg_byte(YAS_KXDM_CTRL_REG4, 0); return YAS_NO_ERROR; } static int yas_kxdm_init(void) { struct yas_acc_filter filter; int err; int id; /* Check intialize */ if (acc_data.initialize == 1) return YAS_ERROR_NOT_INITIALIZED; /* Init data */ yas_kxdm_init_data(); /* Open i2c */ err = yas_kxdm_i2c_open(); if (err != YAS_NO_ERROR) return err; /* Check id */ id = yas_kxdm_read_reg_byte(YAS_KXDM_WHO_AM_I_REG); if (id != YAS_KXDM_WHO_AM_I) { yas_kxdm_i2c_close(); return YAS_ERROR_CHIP_ID; } yas_kxdm_write_reg_byte(YAS_KXDM_CTRL_REG1, YAS_KXDM_XYZ_ENABLE); yas_kxdm_write_reg_byte(YAS_KXDM_CTRL_REG2, 0); yas_kxdm_write_reg_byte(YAS_KXDM_CTRL_REG3, 0); yas_kxdm_write_reg_byte(YAS_KXDM_CTRL_REG4, YAS_KXDM_FS_2G); yas_kxdm_write_reg_byte(YAS_KXDM_CTRL_REG5, 0); yas_kxdm_write_reg_byte(YAS_KXDM_CTRL_REG6, 0); acc_data.initialize = 1; yas_kxdm_set_delay(YAS_KXDM_DEFAULT_DELAY); yas_kxdm_set_position(YAS_KXDM_DEFAULT_POSITION); filter.threshold = YAS_ACC_DEFAULT_FILTER_THRESH; yas_kxdm_set_filter(&filter); return YAS_NO_ERROR; } static int yas_kxdm_term(void) { /* Check initialize */ if (acc_data.initialize == 0) return YAS_ERROR_NOT_INITIALIZED; yas_kxdm_set_enable(0); /* Close I2C */ yas_kxdm_i2c_close(); acc_data.initialize = 0; return YAS_NO_ERROR; } static int yas_kxdm_get_delay(void) { /* Check initialize */ if (acc_data.initialize == 0) return YAS_ERROR_NOT_INITIALIZED; return acc_data.delay; } static int yas_kxdm_set_delay(int delay) { int i; /* Check initialize */ if (acc_data.initialize == 0) return YAS_ERROR_NOT_INITIALIZED; /* Determine optimum odr */ for (i = 1; i < (int)(sizeof(yas_kxdm_odr_tbl) / sizeof(struct yas_kxdm_odr)) && delay >= (int)yas_kxdm_odr_tbl[i].delay; i++) ; acc_data.odr = yas_kxdm_odr_tbl[i-1].odr; acc_data.delay = delay; if (yas_kxdm_get_enable()) yas_kxdm_write_reg_byte(YAS_KXDM_CTRL_REG1, acc_data.odr| YAS_KXDM_XYZ_ENABLE); else yas_kxdm_write_reg_byte(YAS_KXDM_CTRL_REG1, YAS_KXDM_XYZ_ENABLE); return YAS_NO_ERROR; } static int yas_kxdm_get_offset(struct yas_vector *offset) { /* Check initialize */ if (acc_data.initialize == 0) return YAS_ERROR_NOT_INITIALIZED; *offset = acc_data.offset; return YAS_NO_ERROR; } static int yas_kxdm_set_offset(struct yas_vector *offset) { /* Check initialize */ if (acc_data.initialize == 0) return YAS_ERROR_NOT_INITIALIZED; acc_data.offset = *offset; return YAS_NO_ERROR; } static int yas_kxdm_get_enable(void) { /* Check initialize */ if (acc_data.initialize == 0) return YAS_ERROR_NOT_INITIALIZED; return acc_data.enable; } static int yas_kxdm_set_enable(int enable) { int err; /* Check initialize */ if (acc_data.initialize == 0) return YAS_ERROR_NOT_INITIALIZED; if (yas_kxdm_ischg_enable(enable)) { if (enable) { /* Open i2c */ err = yas_kxdm_i2c_open(); if (err != YAS_NO_ERROR) return err; yas_kxdm_write_reg_byte(YAS_KXDM_CTRL_REG1, YAS_KXDM_XYZ_ENABLE); yas_kxdm_write_reg_byte(YAS_KXDM_CTRL_REG2, 0); yas_kxdm_write_reg_byte(YAS_KXDM_CTRL_REG3, 0); yas_kxdm_write_reg_byte(YAS_KXDM_CTRL_REG4, YAS_KXDM_FS_2G); yas_kxdm_write_reg_byte(YAS_KXDM_CTRL_REG5, 0); yas_kxdm_write_reg_byte(YAS_KXDM_CTRL_REG6, 0); yas_kxdm_set_delay(acc_data.delay); yas_kxdm_power_up(); } else { yas_kxdm_power_down(); err = yas_kxdm_i2c_close(); if (err != YAS_NO_ERROR) return err; } } acc_data.enable = enable; return YAS_NO_ERROR; } static int yas_kxdm_get_filter(struct yas_acc_filter *filter) { /* Check initialize */ if (acc_data.initialize == 0) return YAS_ERROR_NOT_INITIALIZED; filter->threshold = acc_data.threshold; return YAS_NO_ERROR; } static int yas_kxdm_set_filter(struct yas_acc_filter *filter) { /* Check initialize */ if (acc_data.initialize == 0) return YAS_ERROR_NOT_INITIALIZED; acc_data.threshold = filter->threshold; return YAS_NO_ERROR; } static int yas_kxdm_get_filter_enable(void) { /* Check initialize */ if (acc_data.initialize == 0) return YAS_ERROR_NOT_INITIALIZED; return acc_data.filter_enable; } static int yas_kxdm_set_filter_enable(int enable) { /* Check initialize */ if (acc_data.initialize == 0) return YAS_ERROR_NOT_INITIALIZED; acc_data.filter_enable = enable; return YAS_NO_ERROR; } static int yas_kxdm_get_position(void) { /* Check initialize */ if (acc_data.initialize == 0) return YAS_ERROR_NOT_INITIALIZED; return acc_data.position; } static int yas_kxdm_set_position(int position) { /* Check initialize */ if (acc_data.initialize == 0) return YAS_ERROR_NOT_INITIALIZED; acc_data.position = position; return YAS_NO_ERROR; } static int yas_kxdm_data_filter(int data[], int raw[], struct yas_kxdm_acceleration *accel) { int filter_enable = acc_data.filter_enable; int threshold = acc_data.threshold; if (filter_enable) { if ((ABS(acc_data.last.x - data[0]) > threshold) || (ABS(acc_data.last.y - data[1]) > threshold) || (ABS(acc_data.last.z - data[2]) > threshold)) { accel->x = data[0]; accel->y = data[1]; accel->z = data[2]; accel->x_raw = raw[0]; accel->y_raw = raw[1]; accel->z_raw = raw[2]; } else { *accel = acc_data.last; } } else { accel->x = data[0]; accel->y = data[1]; accel->z = data[2]; accel->x_raw = raw[0]; accel->y_raw = raw[1]; accel->z_raw = raw[2]; } return YAS_NO_ERROR; } static int yas_kxdm_measure(int *out_data, int *out_raw) { struct yas_kxdm_acceleration accel; unsigned char buf[5]; int raw[3], data[3]; int pos = acc_data.position; int i, j; /* Check initialize */ if (acc_data.initialize == 0) return YAS_ERROR_NOT_INITIALIZED; /* Read acceleration data */ if (yas_kxdm_read_reg(YAS_KXDM_ACC_REG | 0x80, buf, 5) != 0) for (i = 0; i < 3; i++) raw[i] = 0; else for (i = 0; i < 3; i++) raw[i] = (int)((s8)buf[2*i] * 4); /* for X, Y, Z axis */ for (i = 0; i < 3; i++) { /* coordinate transformation */ data[i] = 0; for (j = 0; j < 3; j++) data[i] += raw[j] * yas_kxdm_position_map[pos][i][j]; /* normalization */ data[i] *= (YAS_KXDM_GRAVITY_EARTH/ YAS_KXDM_RESOLUTION); } yas_kxdm_data_filter(data, raw, &accel); out_data[0] = accel.x - acc_data.offset.v[0]; out_data[1] = accel.y - acc_data.offset.v[1]; out_data[2] = accel.z - acc_data.offset.v[2]; out_raw[0] = accel.x_raw; out_raw[1] = accel.y_raw; out_raw[2] = accel.z_raw; acc_data.last = accel; return YAS_NO_ERROR; } /* -------------------------------------------------------------------------- */ static int yas_init(void) { int err; /* Check intialize */ if (pcb == NULL) return YAS_ERROR_NOT_INITIALIZED; yas_kxdm_lock(); err = yas_kxdm_init(); yas_kxdm_unlock(); return err; } static int yas_term(void) { int err; /* Check intialize */ if (pcb == NULL) return YAS_ERROR_NOT_INITIALIZED; yas_kxdm_lock(); err = yas_kxdm_term(); yas_kxdm_unlock(); return err; } static int yas_get_delay(void) { int ret; /* Check intialize */ if (pcb == NULL) return YAS_ERROR_NOT_INITIALIZED; yas_kxdm_lock(); ret = yas_kxdm_get_delay(); yas_kxdm_unlock(); return ret; } static int yas_set_delay(int delay) { int err; /* Check intialize */ if (pcb == NULL) return YAS_ERROR_NOT_INITIALIZED; if (delay < 0 || delay > YAS_KXDM_MAX_DELAY) return YAS_ERROR_ARG; else if (delay < YAS_KXDM_MIN_DELAY) delay = YAS_KXDM_MIN_DELAY; yas_kxdm_lock(); err = yas_kxdm_set_delay(delay); yas_kxdm_unlock(); return err; } static int yas_get_offset(struct yas_vector *offset) { int err; /* Check intialize */ if (pcb == NULL) return YAS_ERROR_NOT_INITIALIZED; if (offset == NULL) return YAS_ERROR_ARG; yas_kxdm_lock(); err = yas_kxdm_get_offset(offset); yas_kxdm_unlock(); return err; } static int yas_set_offset(struct yas_vector *offset) { int err; /* Check intialize */ if (pcb == NULL) return YAS_ERROR_NOT_INITIALIZED; if (offset == NULL || offset->v[0] < YAS_KXDM_ABSMIN_2G || YAS_KXDM_ABSMAX_2G < offset->v[0] || offset->v[1] < YAS_KXDM_ABSMIN_2G || YAS_KXDM_ABSMAX_2G < offset->v[1] || offset->v[2] < YAS_KXDM_ABSMIN_2G || YAS_KXDM_ABSMAX_2G < offset->v[2]) return YAS_ERROR_ARG; yas_kxdm_lock(); err = yas_kxdm_set_offset(offset); yas_kxdm_unlock(); return err; } static int yas_get_enable(void) { int err; /* Check intialize */ if (pcb == NULL) return YAS_ERROR_NOT_INITIALIZED; yas_kxdm_lock(); err = yas_kxdm_get_enable(); yas_kxdm_unlock(); return err; } static int yas_set_enable(int enable) { int err; /* Check intialize */ if (pcb == NULL) return YAS_ERROR_NOT_INITIALIZED; if (enable != 0) enable = 1; yas_kxdm_lock(); err = yas_kxdm_set_enable(enable); yas_kxdm_unlock(); return err; } static int yas_get_filter(struct yas_acc_filter *filter) { int err; /* Check intialize */ if (pcb == NULL) return YAS_ERROR_NOT_INITIALIZED; if (filter == NULL) return YAS_ERROR_ARG; yas_kxdm_lock(); err = yas_kxdm_get_filter(filter); yas_kxdm_unlock(); return err; } static int yas_set_filter(struct yas_acc_filter *filter) { int err; /* Check intialize */ if (pcb == NULL) return YAS_ERROR_NOT_INITIALIZED; if (filter == NULL || filter->threshold < 0 || filter->threshold > YAS_KXDM_ABSMAX_2G) return YAS_ERROR_ARG; yas_kxdm_lock(); err = yas_kxdm_set_filter(filter); yas_kxdm_unlock(); return err; } static int yas_get_filter_enable(void) { int err; /* Check intialize */ if (pcb == NULL) return YAS_ERROR_NOT_INITIALIZED; yas_kxdm_lock(); err = yas_kxdm_get_filter_enable(); yas_kxdm_unlock(); return err; } static int yas_set_filter_enable(int enable) { int err; /* Check intialize */ if (pcb == NULL) return YAS_ERROR_NOT_INITIALIZED; if (enable != 0) enable = 1; yas_kxdm_lock(); err = yas_kxdm_set_filter_enable(enable); yas_kxdm_unlock(); return err; } static int yas_get_position(void) { int err; /* Check intialize */ if (pcb == NULL) return YAS_ERROR_NOT_INITIALIZED; yas_kxdm_lock(); err = yas_kxdm_get_position(); yas_kxdm_unlock(); return err; } static int yas_set_position(int position) { int err; /* Check intialize */ if (pcb == NULL) return YAS_ERROR_NOT_INITIALIZED; if (!((position >= 0) && (position <= 7))) return YAS_ERROR_ARG; yas_kxdm_lock(); err = yas_kxdm_set_position(position); yas_kxdm_unlock(); return err; } static int yas_measure(struct yas_acc_data *data) { int err; /* Check intialize */ if (pcb == NULL) return YAS_ERROR_NOT_INITIALIZED; if (data == NULL) return YAS_ERROR_ARG; yas_kxdm_lock(); err = yas_kxdm_measure(data->xyz.v, data->raw.v); yas_kxdm_unlock(); return err; } #if DEBUG static int yas_get_register(uint8_t adr, uint8_t *val) { if (pcb == NULL) return YAS_ERROR_NOT_INITIALIZED; /* Check initialize */ if (acc_data.initialize == 0) return YAS_ERROR_NOT_INITIALIZED; *val = yas_kxdm_read_reg_byte(adr); return YAS_NO_ERROR; } #endif /* -------------------------------------------------------------------------- */ /* Global function */ /* -------------------------------------------------------------------------- */ int yas_acc_driver_init(struct yas_acc_driver *f) { struct yas_acc_driver_callback *cbk; /* Check parameter */ if (f == NULL) return YAS_ERROR_ARG; cbk = &f->callback; if (cbk->device_open == NULL || cbk->device_close == NULL || cbk->device_write == NULL || cbk->device_read == NULL || cbk->msleep == NULL) return YAS_ERROR_ARG; /* Clear intialize */ yas_kxdm_term(); /* Set callback interface */ cb.callback = *cbk; /* Set driver interface */ f->init = yas_init; f->term = yas_term; f->get_delay = yas_get_delay; f->set_delay = yas_set_delay; f->get_offset = yas_get_offset; f->set_offset = yas_set_offset; f->get_enable = yas_get_enable; f->set_enable = yas_set_enable; f->get_filter = yas_get_filter; f->set_filter = yas_set_filter; f->get_filter_enable = yas_get_filter_enable; f->set_filter_enable = yas_set_filter_enable; f->get_position = yas_get_position; f->set_position = yas_set_position; f->measure = yas_measure; #if DEBUG f->get_register = yas_get_register; #endif pcb = &cb; return YAS_NO_ERROR; }
gpl-2.0
MinimumLaw/uccu-kernel
drivers/infiniband/core/cm.c
592
108504
/* * Copyright (c) 2004-2007 Intel Corporation. All rights reserved. * Copyright (c) 2004 Topspin Corporation. All rights reserved. * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/completion.h> #include <linux/dma-mapping.h> #include <linux/device.h> #include <linux/err.h> #include <linux/idr.h> #include <linux/interrupt.h> #include <linux/random.h> #include <linux/rbtree.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/sysfs.h> #include <linux/workqueue.h> #include <linux/kdev_t.h> #include <rdma/ib_cache.h> #include <rdma/ib_cm.h> #include "cm_msgs.h" MODULE_AUTHOR("Sean Hefty"); MODULE_DESCRIPTION("InfiniBand CM"); MODULE_LICENSE("Dual BSD/GPL"); static void cm_add_one(struct ib_device *device); static void cm_remove_one(struct ib_device *device); static struct ib_client cm_client = { .name = "cm", .add = cm_add_one, .remove = cm_remove_one }; static struct ib_cm { spinlock_t lock; struct list_head device_list; rwlock_t device_lock; struct rb_root listen_service_table; u64 listen_service_id; /* struct rb_root peer_service_table; todo: fix peer to peer */ struct rb_root remote_qp_table; struct rb_root remote_id_table; struct rb_root remote_sidr_table; struct idr local_id_table; __be32 random_id_operand; struct list_head timewait_list; struct workqueue_struct *wq; } cm; /* Counter indexes ordered by attribute ID */ enum { CM_REQ_COUNTER, CM_MRA_COUNTER, CM_REJ_COUNTER, CM_REP_COUNTER, CM_RTU_COUNTER, CM_DREQ_COUNTER, CM_DREP_COUNTER, CM_SIDR_REQ_COUNTER, CM_SIDR_REP_COUNTER, CM_LAP_COUNTER, CM_APR_COUNTER, CM_ATTR_COUNT, CM_ATTR_ID_OFFSET = 0x0010, }; enum { CM_XMIT, CM_XMIT_RETRIES, CM_RECV, CM_RECV_DUPLICATES, CM_COUNTER_GROUPS }; static char const counter_group_names[CM_COUNTER_GROUPS] [sizeof("cm_rx_duplicates")] = { "cm_tx_msgs", "cm_tx_retries", "cm_rx_msgs", "cm_rx_duplicates" }; struct cm_counter_group { struct kobject obj; atomic_long_t counter[CM_ATTR_COUNT]; }; struct cm_counter_attribute { struct attribute attr; int index; }; #define CM_COUNTER_ATTR(_name, _index) \ struct cm_counter_attribute cm_##_name##_counter_attr = { \ .attr = { .name = __stringify(_name), .mode = 0444 }, \ .index = _index \ } static CM_COUNTER_ATTR(req, CM_REQ_COUNTER); static CM_COUNTER_ATTR(mra, CM_MRA_COUNTER); static CM_COUNTER_ATTR(rej, CM_REJ_COUNTER); static CM_COUNTER_ATTR(rep, CM_REP_COUNTER); static CM_COUNTER_ATTR(rtu, CM_RTU_COUNTER); static CM_COUNTER_ATTR(dreq, CM_DREQ_COUNTER); static CM_COUNTER_ATTR(drep, CM_DREP_COUNTER); static CM_COUNTER_ATTR(sidr_req, CM_SIDR_REQ_COUNTER); static CM_COUNTER_ATTR(sidr_rep, CM_SIDR_REP_COUNTER); static CM_COUNTER_ATTR(lap, CM_LAP_COUNTER); static CM_COUNTER_ATTR(apr, CM_APR_COUNTER); static struct attribute *cm_counter_default_attrs[] = { &cm_req_counter_attr.attr, &cm_mra_counter_attr.attr, &cm_rej_counter_attr.attr, &cm_rep_counter_attr.attr, &cm_rtu_counter_attr.attr, &cm_dreq_counter_attr.attr, &cm_drep_counter_attr.attr, &cm_sidr_req_counter_attr.attr, &cm_sidr_rep_counter_attr.attr, &cm_lap_counter_attr.attr, &cm_apr_counter_attr.attr, NULL }; struct cm_port { struct cm_device *cm_dev; struct ib_mad_agent *mad_agent; struct kobject port_obj; u8 port_num; struct cm_counter_group counter_group[CM_COUNTER_GROUPS]; }; struct cm_device { struct list_head list; struct ib_device *ib_device; struct device *device; u8 ack_delay; struct cm_port *port[0]; }; struct cm_av { struct cm_port *port; union ib_gid dgid; struct ib_ah_attr ah_attr; u16 pkey_index; u8 timeout; }; struct cm_work { struct delayed_work work; struct list_head list; struct cm_port *port; struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */ __be32 local_id; /* Established / timewait */ __be32 remote_id; struct ib_cm_event cm_event; struct ib_sa_path_rec path[0]; }; struct cm_timewait_info { struct cm_work work; /* Must be first. */ struct list_head list; struct rb_node remote_qp_node; struct rb_node remote_id_node; __be64 remote_ca_guid; __be32 remote_qpn; u8 inserted_remote_qp; u8 inserted_remote_id; }; struct cm_id_private { struct ib_cm_id id; struct rb_node service_node; struct rb_node sidr_id_node; spinlock_t lock; /* Do not acquire inside cm.lock */ struct completion comp; atomic_t refcount; struct ib_mad_send_buf *msg; struct cm_timewait_info *timewait_info; /* todo: use alternate port on send failure */ struct cm_av av; struct cm_av alt_av; struct ib_cm_compare_data *compare_data; void *private_data; __be64 tid; __be32 local_qpn; __be32 remote_qpn; enum ib_qp_type qp_type; __be32 sq_psn; __be32 rq_psn; int timeout_ms; enum ib_mtu path_mtu; __be16 pkey; u8 private_data_len; u8 max_cm_retries; u8 peer_to_peer; u8 responder_resources; u8 initiator_depth; u8 retry_count; u8 rnr_retry_count; u8 service_timeout; u8 target_ack_delay; struct list_head work_list; atomic_t work_count; }; static void cm_work_handler(struct work_struct *work); static inline void cm_deref_id(struct cm_id_private *cm_id_priv) { if (atomic_dec_and_test(&cm_id_priv->refcount)) complete(&cm_id_priv->comp); } static int cm_alloc_msg(struct cm_id_private *cm_id_priv, struct ib_mad_send_buf **msg) { struct ib_mad_agent *mad_agent; struct ib_mad_send_buf *m; struct ib_ah *ah; mad_agent = cm_id_priv->av.port->mad_agent; ah = ib_create_ah(mad_agent->qp->pd, &cm_id_priv->av.ah_attr); if (IS_ERR(ah)) return PTR_ERR(ah); m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn, cm_id_priv->av.pkey_index, 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, GFP_ATOMIC); if (IS_ERR(m)) { ib_destroy_ah(ah); return PTR_ERR(m); } /* Timeout set by caller if response is expected. */ m->ah = ah; m->retries = cm_id_priv->max_cm_retries; atomic_inc(&cm_id_priv->refcount); m->context[0] = cm_id_priv; *msg = m; return 0; } static int cm_alloc_response_msg(struct cm_port *port, struct ib_mad_recv_wc *mad_recv_wc, struct ib_mad_send_buf **msg) { struct ib_mad_send_buf *m; struct ib_ah *ah; ah = ib_create_ah_from_wc(port->mad_agent->qp->pd, mad_recv_wc->wc, mad_recv_wc->recv_buf.grh, port->port_num); if (IS_ERR(ah)) return PTR_ERR(ah); m = ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index, 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, GFP_ATOMIC); if (IS_ERR(m)) { ib_destroy_ah(ah); return PTR_ERR(m); } m->ah = ah; *msg = m; return 0; } static void cm_free_msg(struct ib_mad_send_buf *msg) { ib_destroy_ah(msg->ah); if (msg->context[0]) cm_deref_id(msg->context[0]); ib_free_send_mad(msg); } static void * cm_copy_private_data(const void *private_data, u8 private_data_len) { void *data; if (!private_data || !private_data_len) return NULL; data = kmemdup(private_data, private_data_len, GFP_KERNEL); if (!data) return ERR_PTR(-ENOMEM); return data; } static void cm_set_private_data(struct cm_id_private *cm_id_priv, void *private_data, u8 private_data_len) { if (cm_id_priv->private_data && cm_id_priv->private_data_len) kfree(cm_id_priv->private_data); cm_id_priv->private_data = private_data; cm_id_priv->private_data_len = private_data_len; } static void cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc, struct ib_grh *grh, struct cm_av *av) { av->port = port; av->pkey_index = wc->pkey_index; ib_init_ah_from_wc(port->cm_dev->ib_device, port->port_num, wc, grh, &av->ah_attr); } static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av) { struct cm_device *cm_dev; struct cm_port *port = NULL; unsigned long flags; int ret; u8 p; read_lock_irqsave(&cm.device_lock, flags); list_for_each_entry(cm_dev, &cm.device_list, list) { if (!ib_find_cached_gid(cm_dev->ib_device, &path->sgid, &p, NULL)) { port = cm_dev->port[p-1]; break; } } read_unlock_irqrestore(&cm.device_lock, flags); if (!port) return -EINVAL; ret = ib_find_cached_pkey(cm_dev->ib_device, port->port_num, be16_to_cpu(path->pkey), &av->pkey_index); if (ret) return ret; av->port = port; ib_init_ah_from_path(cm_dev->ib_device, port->port_num, path, &av->ah_attr); av->timeout = path->packet_life_time + 1; return 0; } static int cm_alloc_id(struct cm_id_private *cm_id_priv) { unsigned long flags; int ret, id; static int next_id; do { spin_lock_irqsave(&cm.lock, flags); ret = idr_get_new_above(&cm.local_id_table, cm_id_priv, next_id, &id); if (!ret) next_id = ((unsigned) id + 1) & MAX_ID_MASK; spin_unlock_irqrestore(&cm.lock, flags); } while( (ret == -EAGAIN) && idr_pre_get(&cm.local_id_table, GFP_KERNEL) ); cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand; return ret; } static void cm_free_id(__be32 local_id) { spin_lock_irq(&cm.lock); idr_remove(&cm.local_id_table, (__force int) (local_id ^ cm.random_id_operand)); spin_unlock_irq(&cm.lock); } static struct cm_id_private * cm_get_id(__be32 local_id, __be32 remote_id) { struct cm_id_private *cm_id_priv; cm_id_priv = idr_find(&cm.local_id_table, (__force int) (local_id ^ cm.random_id_operand)); if (cm_id_priv) { if (cm_id_priv->id.remote_id == remote_id) atomic_inc(&cm_id_priv->refcount); else cm_id_priv = NULL; } return cm_id_priv; } static struct cm_id_private * cm_acquire_id(__be32 local_id, __be32 remote_id) { struct cm_id_private *cm_id_priv; spin_lock_irq(&cm.lock); cm_id_priv = cm_get_id(local_id, remote_id); spin_unlock_irq(&cm.lock); return cm_id_priv; } static void cm_mask_copy(u8 *dst, u8 *src, u8 *mask) { int i; for (i = 0; i < IB_CM_COMPARE_SIZE / sizeof(unsigned long); i++) ((unsigned long *) dst)[i] = ((unsigned long *) src)[i] & ((unsigned long *) mask)[i]; } static int cm_compare_data(struct ib_cm_compare_data *src_data, struct ib_cm_compare_data *dst_data) { u8 src[IB_CM_COMPARE_SIZE]; u8 dst[IB_CM_COMPARE_SIZE]; if (!src_data || !dst_data) return 0; cm_mask_copy(src, src_data->data, dst_data->mask); cm_mask_copy(dst, dst_data->data, src_data->mask); return memcmp(src, dst, IB_CM_COMPARE_SIZE); } static int cm_compare_private_data(u8 *private_data, struct ib_cm_compare_data *dst_data) { u8 src[IB_CM_COMPARE_SIZE]; if (!dst_data) return 0; cm_mask_copy(src, private_data, dst_data->mask); return memcmp(src, dst_data->data, IB_CM_COMPARE_SIZE); } /* * Trivial helpers to strip endian annotation and compare; the * endianness doesn't actually matter since we just need a stable * order for the RB tree. */ static int be32_lt(__be32 a, __be32 b) { return (__force u32) a < (__force u32) b; } static int be32_gt(__be32 a, __be32 b) { return (__force u32) a > (__force u32) b; } static int be64_lt(__be64 a, __be64 b) { return (__force u64) a < (__force u64) b; } static int be64_gt(__be64 a, __be64 b) { return (__force u64) a > (__force u64) b; } static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv) { struct rb_node **link = &cm.listen_service_table.rb_node; struct rb_node *parent = NULL; struct cm_id_private *cur_cm_id_priv; __be64 service_id = cm_id_priv->id.service_id; __be64 service_mask = cm_id_priv->id.service_mask; int data_cmp; while (*link) { parent = *link; cur_cm_id_priv = rb_entry(parent, struct cm_id_private, service_node); data_cmp = cm_compare_data(cm_id_priv->compare_data, cur_cm_id_priv->compare_data); if ((cur_cm_id_priv->id.service_mask & service_id) == (service_mask & cur_cm_id_priv->id.service_id) && (cm_id_priv->id.device == cur_cm_id_priv->id.device) && !data_cmp) return cur_cm_id_priv; if (cm_id_priv->id.device < cur_cm_id_priv->id.device) link = &(*link)->rb_left; else if (cm_id_priv->id.device > cur_cm_id_priv->id.device) link = &(*link)->rb_right; else if (be64_lt(service_id, cur_cm_id_priv->id.service_id)) link = &(*link)->rb_left; else if (be64_gt(service_id, cur_cm_id_priv->id.service_id)) link = &(*link)->rb_right; else if (data_cmp < 0) link = &(*link)->rb_left; else link = &(*link)->rb_right; } rb_link_node(&cm_id_priv->service_node, parent, link); rb_insert_color(&cm_id_priv->service_node, &cm.listen_service_table); return NULL; } static struct cm_id_private * cm_find_listen(struct ib_device *device, __be64 service_id, u8 *private_data) { struct rb_node *node = cm.listen_service_table.rb_node; struct cm_id_private *cm_id_priv; int data_cmp; while (node) { cm_id_priv = rb_entry(node, struct cm_id_private, service_node); data_cmp = cm_compare_private_data(private_data, cm_id_priv->compare_data); if ((cm_id_priv->id.service_mask & service_id) == cm_id_priv->id.service_id && (cm_id_priv->id.device == device) && !data_cmp) return cm_id_priv; if (device < cm_id_priv->id.device) node = node->rb_left; else if (device > cm_id_priv->id.device) node = node->rb_right; else if (be64_lt(service_id, cm_id_priv->id.service_id)) node = node->rb_left; else if (be64_gt(service_id, cm_id_priv->id.service_id)) node = node->rb_right; else if (data_cmp < 0) node = node->rb_left; else node = node->rb_right; } return NULL; } static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info *timewait_info) { struct rb_node **link = &cm.remote_id_table.rb_node; struct rb_node *parent = NULL; struct cm_timewait_info *cur_timewait_info; __be64 remote_ca_guid = timewait_info->remote_ca_guid; __be32 remote_id = timewait_info->work.remote_id; while (*link) { parent = *link; cur_timewait_info = rb_entry(parent, struct cm_timewait_info, remote_id_node); if (be32_lt(remote_id, cur_timewait_info->work.remote_id)) link = &(*link)->rb_left; else if (be32_gt(remote_id, cur_timewait_info->work.remote_id)) link = &(*link)->rb_right; else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid)) link = &(*link)->rb_left; else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid)) link = &(*link)->rb_right; else return cur_timewait_info; } timewait_info->inserted_remote_id = 1; rb_link_node(&timewait_info->remote_id_node, parent, link); rb_insert_color(&timewait_info->remote_id_node, &cm.remote_id_table); return NULL; } static struct cm_timewait_info * cm_find_remote_id(__be64 remote_ca_guid, __be32 remote_id) { struct rb_node *node = cm.remote_id_table.rb_node; struct cm_timewait_info *timewait_info; while (node) { timewait_info = rb_entry(node, struct cm_timewait_info, remote_id_node); if (be32_lt(remote_id, timewait_info->work.remote_id)) node = node->rb_left; else if (be32_gt(remote_id, timewait_info->work.remote_id)) node = node->rb_right; else if (be64_lt(remote_ca_guid, timewait_info->remote_ca_guid)) node = node->rb_left; else if (be64_gt(remote_ca_guid, timewait_info->remote_ca_guid)) node = node->rb_right; else return timewait_info; } return NULL; } static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info *timewait_info) { struct rb_node **link = &cm.remote_qp_table.rb_node; struct rb_node *parent = NULL; struct cm_timewait_info *cur_timewait_info; __be64 remote_ca_guid = timewait_info->remote_ca_guid; __be32 remote_qpn = timewait_info->remote_qpn; while (*link) { parent = *link; cur_timewait_info = rb_entry(parent, struct cm_timewait_info, remote_qp_node); if (be32_lt(remote_qpn, cur_timewait_info->remote_qpn)) link = &(*link)->rb_left; else if (be32_gt(remote_qpn, cur_timewait_info->remote_qpn)) link = &(*link)->rb_right; else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid)) link = &(*link)->rb_left; else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid)) link = &(*link)->rb_right; else return cur_timewait_info; } timewait_info->inserted_remote_qp = 1; rb_link_node(&timewait_info->remote_qp_node, parent, link); rb_insert_color(&timewait_info->remote_qp_node, &cm.remote_qp_table); return NULL; } static struct cm_id_private * cm_insert_remote_sidr(struct cm_id_private *cm_id_priv) { struct rb_node **link = &cm.remote_sidr_table.rb_node; struct rb_node *parent = NULL; struct cm_id_private *cur_cm_id_priv; union ib_gid *port_gid = &cm_id_priv->av.dgid; __be32 remote_id = cm_id_priv->id.remote_id; while (*link) { parent = *link; cur_cm_id_priv = rb_entry(parent, struct cm_id_private, sidr_id_node); if (be32_lt(remote_id, cur_cm_id_priv->id.remote_id)) link = &(*link)->rb_left; else if (be32_gt(remote_id, cur_cm_id_priv->id.remote_id)) link = &(*link)->rb_right; else { int cmp; cmp = memcmp(port_gid, &cur_cm_id_priv->av.dgid, sizeof *port_gid); if (cmp < 0) link = &(*link)->rb_left; else if (cmp > 0) link = &(*link)->rb_right; else return cur_cm_id_priv; } } rb_link_node(&cm_id_priv->sidr_id_node, parent, link); rb_insert_color(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table); return NULL; } static void cm_reject_sidr_req(struct cm_id_private *cm_id_priv, enum ib_cm_sidr_status status) { struct ib_cm_sidr_rep_param param; memset(&param, 0, sizeof param); param.status = status; ib_send_cm_sidr_rep(&cm_id_priv->id, &param); } struct ib_cm_id *ib_create_cm_id(struct ib_device *device, ib_cm_handler cm_handler, void *context) { struct cm_id_private *cm_id_priv; int ret; cm_id_priv = kzalloc(sizeof *cm_id_priv, GFP_KERNEL); if (!cm_id_priv) return ERR_PTR(-ENOMEM); cm_id_priv->id.state = IB_CM_IDLE; cm_id_priv->id.device = device; cm_id_priv->id.cm_handler = cm_handler; cm_id_priv->id.context = context; cm_id_priv->id.remote_cm_qpn = 1; ret = cm_alloc_id(cm_id_priv); if (ret) goto error; spin_lock_init(&cm_id_priv->lock); init_completion(&cm_id_priv->comp); INIT_LIST_HEAD(&cm_id_priv->work_list); atomic_set(&cm_id_priv->work_count, -1); atomic_set(&cm_id_priv->refcount, 1); return &cm_id_priv->id; error: kfree(cm_id_priv); return ERR_PTR(-ENOMEM); } EXPORT_SYMBOL(ib_create_cm_id); static struct cm_work * cm_dequeue_work(struct cm_id_private *cm_id_priv) { struct cm_work *work; if (list_empty(&cm_id_priv->work_list)) return NULL; work = list_entry(cm_id_priv->work_list.next, struct cm_work, list); list_del(&work->list); return work; } static void cm_free_work(struct cm_work *work) { if (work->mad_recv_wc) ib_free_recv_mad(work->mad_recv_wc); kfree(work); } static inline int cm_convert_to_ms(int iba_time) { /* approximate conversion to ms from 4.096us x 2^iba_time */ return 1 << max(iba_time - 8, 0); } /* * calculate: 4.096x2^ack_timeout = 4.096x2^ack_delay + 2x4.096x2^life_time * Because of how ack_timeout is stored, adding one doubles the timeout. * To avoid large timeouts, select the max(ack_delay, life_time + 1), and * increment it (round up) only if the other is within 50%. */ static u8 cm_ack_timeout(u8 ca_ack_delay, u8 packet_life_time) { int ack_timeout = packet_life_time + 1; if (ack_timeout >= ca_ack_delay) ack_timeout += (ca_ack_delay >= (ack_timeout - 1)); else ack_timeout = ca_ack_delay + (ack_timeout >= (ca_ack_delay - 1)); return min(31, ack_timeout); } static void cm_cleanup_timewait(struct cm_timewait_info *timewait_info) { if (timewait_info->inserted_remote_id) { rb_erase(&timewait_info->remote_id_node, &cm.remote_id_table); timewait_info->inserted_remote_id = 0; } if (timewait_info->inserted_remote_qp) { rb_erase(&timewait_info->remote_qp_node, &cm.remote_qp_table); timewait_info->inserted_remote_qp = 0; } } static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id) { struct cm_timewait_info *timewait_info; timewait_info = kzalloc(sizeof *timewait_info, GFP_KERNEL); if (!timewait_info) return ERR_PTR(-ENOMEM); timewait_info->work.local_id = local_id; INIT_DELAYED_WORK(&timewait_info->work.work, cm_work_handler); timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT; return timewait_info; } static void cm_enter_timewait(struct cm_id_private *cm_id_priv) { int wait_time; unsigned long flags; spin_lock_irqsave(&cm.lock, flags); cm_cleanup_timewait(cm_id_priv->timewait_info); list_add_tail(&cm_id_priv->timewait_info->list, &cm.timewait_list); spin_unlock_irqrestore(&cm.lock, flags); /* * The cm_id could be destroyed by the user before we exit timewait. * To protect against this, we search for the cm_id after exiting * timewait before notifying the user that we've exited timewait. */ cm_id_priv->id.state = IB_CM_TIMEWAIT; wait_time = cm_convert_to_ms(cm_id_priv->av.timeout); queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work, msecs_to_jiffies(wait_time)); cm_id_priv->timewait_info = NULL; } static void cm_reset_to_idle(struct cm_id_private *cm_id_priv) { unsigned long flags; cm_id_priv->id.state = IB_CM_IDLE; if (cm_id_priv->timewait_info) { spin_lock_irqsave(&cm.lock, flags); cm_cleanup_timewait(cm_id_priv->timewait_info); spin_unlock_irqrestore(&cm.lock, flags); kfree(cm_id_priv->timewait_info); cm_id_priv->timewait_info = NULL; } } static void cm_destroy_id(struct ib_cm_id *cm_id, int err) { struct cm_id_private *cm_id_priv; struct cm_work *work; cm_id_priv = container_of(cm_id, struct cm_id_private, id); retest: spin_lock_irq(&cm_id_priv->lock); switch (cm_id->state) { case IB_CM_LISTEN: cm_id->state = IB_CM_IDLE; spin_unlock_irq(&cm_id_priv->lock); spin_lock_irq(&cm.lock); rb_erase(&cm_id_priv->service_node, &cm.listen_service_table); spin_unlock_irq(&cm.lock); break; case IB_CM_SIDR_REQ_SENT: cm_id->state = IB_CM_IDLE; ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); spin_unlock_irq(&cm_id_priv->lock); break; case IB_CM_SIDR_REQ_RCVD: spin_unlock_irq(&cm_id_priv->lock); cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT); break; case IB_CM_REQ_SENT: ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); spin_unlock_irq(&cm_id_priv->lock); ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT, &cm_id_priv->id.device->node_guid, sizeof cm_id_priv->id.device->node_guid, NULL, 0); break; case IB_CM_REQ_RCVD: if (err == -ENOMEM) { /* Do not reject to allow future retries. */ cm_reset_to_idle(cm_id_priv); spin_unlock_irq(&cm_id_priv->lock); } else { spin_unlock_irq(&cm_id_priv->lock); ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0, NULL, 0); } break; case IB_CM_MRA_REQ_RCVD: case IB_CM_REP_SENT: case IB_CM_MRA_REP_RCVD: ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); /* Fall through */ case IB_CM_MRA_REQ_SENT: case IB_CM_REP_RCVD: case IB_CM_MRA_REP_SENT: spin_unlock_irq(&cm_id_priv->lock); ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0, NULL, 0); break; case IB_CM_ESTABLISHED: spin_unlock_irq(&cm_id_priv->lock); ib_send_cm_dreq(cm_id, NULL, 0); goto retest; case IB_CM_DREQ_SENT: ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); cm_enter_timewait(cm_id_priv); spin_unlock_irq(&cm_id_priv->lock); break; case IB_CM_DREQ_RCVD: spin_unlock_irq(&cm_id_priv->lock); ib_send_cm_drep(cm_id, NULL, 0); break; default: spin_unlock_irq(&cm_id_priv->lock); break; } cm_free_id(cm_id->local_id); cm_deref_id(cm_id_priv); wait_for_completion(&cm_id_priv->comp); while ((work = cm_dequeue_work(cm_id_priv)) != NULL) cm_free_work(work); kfree(cm_id_priv->compare_data); kfree(cm_id_priv->private_data); kfree(cm_id_priv); } void ib_destroy_cm_id(struct ib_cm_id *cm_id) { cm_destroy_id(cm_id, 0); } EXPORT_SYMBOL(ib_destroy_cm_id); int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask, struct ib_cm_compare_data *compare_data) { struct cm_id_private *cm_id_priv, *cur_cm_id_priv; unsigned long flags; int ret = 0; service_mask = service_mask ? service_mask : ~cpu_to_be64(0); service_id &= service_mask; if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID && (service_id != IB_CM_ASSIGN_SERVICE_ID)) return -EINVAL; cm_id_priv = container_of(cm_id, struct cm_id_private, id); if (cm_id->state != IB_CM_IDLE) return -EINVAL; if (compare_data) { cm_id_priv->compare_data = kzalloc(sizeof *compare_data, GFP_KERNEL); if (!cm_id_priv->compare_data) return -ENOMEM; cm_mask_copy(cm_id_priv->compare_data->data, compare_data->data, compare_data->mask); memcpy(cm_id_priv->compare_data->mask, compare_data->mask, IB_CM_COMPARE_SIZE); } cm_id->state = IB_CM_LISTEN; spin_lock_irqsave(&cm.lock, flags); if (service_id == IB_CM_ASSIGN_SERVICE_ID) { cm_id->service_id = cpu_to_be64(cm.listen_service_id++); cm_id->service_mask = ~cpu_to_be64(0); } else { cm_id->service_id = service_id; cm_id->service_mask = service_mask; } cur_cm_id_priv = cm_insert_listen(cm_id_priv); spin_unlock_irqrestore(&cm.lock, flags); if (cur_cm_id_priv) { cm_id->state = IB_CM_IDLE; kfree(cm_id_priv->compare_data); cm_id_priv->compare_data = NULL; ret = -EBUSY; } return ret; } EXPORT_SYMBOL(ib_cm_listen); static __be64 cm_form_tid(struct cm_id_private *cm_id_priv, enum cm_msg_sequence msg_seq) { u64 hi_tid, low_tid; hi_tid = ((u64) cm_id_priv->av.port->mad_agent->hi_tid) << 32; low_tid = (u64) ((__force u32)cm_id_priv->id.local_id | (msg_seq << 30)); return cpu_to_be64(hi_tid | low_tid); } static void cm_format_mad_hdr(struct ib_mad_hdr *hdr, __be16 attr_id, __be64 tid) { hdr->base_version = IB_MGMT_BASE_VERSION; hdr->mgmt_class = IB_MGMT_CLASS_CM; hdr->class_version = IB_CM_CLASS_VERSION; hdr->method = IB_MGMT_METHOD_SEND; hdr->attr_id = attr_id; hdr->tid = tid; } static void cm_format_req(struct cm_req_msg *req_msg, struct cm_id_private *cm_id_priv, struct ib_cm_req_param *param) { struct ib_sa_path_rec *pri_path = param->primary_path; struct ib_sa_path_rec *alt_path = param->alternate_path; cm_format_mad_hdr(&req_msg->hdr, CM_REQ_ATTR_ID, cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_REQ)); req_msg->local_comm_id = cm_id_priv->id.local_id; req_msg->service_id = param->service_id; req_msg->local_ca_guid = cm_id_priv->id.device->node_guid; cm_req_set_local_qpn(req_msg, cpu_to_be32(param->qp_num)); cm_req_set_resp_res(req_msg, param->responder_resources); cm_req_set_init_depth(req_msg, param->initiator_depth); cm_req_set_remote_resp_timeout(req_msg, param->remote_cm_response_timeout); cm_req_set_qp_type(req_msg, param->qp_type); cm_req_set_flow_ctrl(req_msg, param->flow_control); cm_req_set_starting_psn(req_msg, cpu_to_be32(param->starting_psn)); cm_req_set_local_resp_timeout(req_msg, param->local_cm_response_timeout); cm_req_set_retry_count(req_msg, param->retry_count); req_msg->pkey = param->primary_path->pkey; cm_req_set_path_mtu(req_msg, param->primary_path->mtu); cm_req_set_rnr_retry_count(req_msg, param->rnr_retry_count); cm_req_set_max_cm_retries(req_msg, param->max_cm_retries); cm_req_set_srq(req_msg, param->srq); if (pri_path->hop_limit <= 1) { req_msg->primary_local_lid = pri_path->slid; req_msg->primary_remote_lid = pri_path->dlid; } else { /* Work-around until there's a way to obtain remote LID info */ req_msg->primary_local_lid = IB_LID_PERMISSIVE; req_msg->primary_remote_lid = IB_LID_PERMISSIVE; } req_msg->primary_local_gid = pri_path->sgid; req_msg->primary_remote_gid = pri_path->dgid; cm_req_set_primary_flow_label(req_msg, pri_path->flow_label); cm_req_set_primary_packet_rate(req_msg, pri_path->rate); req_msg->primary_traffic_class = pri_path->traffic_class; req_msg->primary_hop_limit = pri_path->hop_limit; cm_req_set_primary_sl(req_msg, pri_path->sl); cm_req_set_primary_subnet_local(req_msg, (pri_path->hop_limit <= 1)); cm_req_set_primary_local_ack_timeout(req_msg, cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay, pri_path->packet_life_time)); if (alt_path) { if (alt_path->hop_limit <= 1) { req_msg->alt_local_lid = alt_path->slid; req_msg->alt_remote_lid = alt_path->dlid; } else { req_msg->alt_local_lid = IB_LID_PERMISSIVE; req_msg->alt_remote_lid = IB_LID_PERMISSIVE; } req_msg->alt_local_gid = alt_path->sgid; req_msg->alt_remote_gid = alt_path->dgid; cm_req_set_alt_flow_label(req_msg, alt_path->flow_label); cm_req_set_alt_packet_rate(req_msg, alt_path->rate); req_msg->alt_traffic_class = alt_path->traffic_class; req_msg->alt_hop_limit = alt_path->hop_limit; cm_req_set_alt_sl(req_msg, alt_path->sl); cm_req_set_alt_subnet_local(req_msg, (alt_path->hop_limit <= 1)); cm_req_set_alt_local_ack_timeout(req_msg, cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay, alt_path->packet_life_time)); } if (param->private_data && param->private_data_len) memcpy(req_msg->private_data, param->private_data, param->private_data_len); } static int cm_validate_req_param(struct ib_cm_req_param *param) { /* peer-to-peer not supported */ if (param->peer_to_peer) return -EINVAL; if (!param->primary_path) return -EINVAL; if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC) return -EINVAL; if (param->private_data && param->private_data_len > IB_CM_REQ_PRIVATE_DATA_SIZE) return -EINVAL; if (param->alternate_path && (param->alternate_path->pkey != param->primary_path->pkey || param->alternate_path->mtu != param->primary_path->mtu)) return -EINVAL; return 0; } int ib_send_cm_req(struct ib_cm_id *cm_id, struct ib_cm_req_param *param) { struct cm_id_private *cm_id_priv; struct cm_req_msg *req_msg; unsigned long flags; int ret; ret = cm_validate_req_param(param); if (ret) return ret; /* Verify that we're not in timewait. */ cm_id_priv = container_of(cm_id, struct cm_id_private, id); spin_lock_irqsave(&cm_id_priv->lock, flags); if (cm_id->state != IB_CM_IDLE) { spin_unlock_irqrestore(&cm_id_priv->lock, flags); ret = -EINVAL; goto out; } spin_unlock_irqrestore(&cm_id_priv->lock, flags); cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv-> id.local_id); if (IS_ERR(cm_id_priv->timewait_info)) { ret = PTR_ERR(cm_id_priv->timewait_info); goto out; } ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av); if (ret) goto error1; if (param->alternate_path) { ret = cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av); if (ret) goto error1; } cm_id->service_id = param->service_id; cm_id->service_mask = ~cpu_to_be64(0); cm_id_priv->timeout_ms = cm_convert_to_ms( param->primary_path->packet_life_time) * 2 + cm_convert_to_ms( param->remote_cm_response_timeout); cm_id_priv->max_cm_retries = param->max_cm_retries; cm_id_priv->initiator_depth = param->initiator_depth; cm_id_priv->responder_resources = param->responder_resources; cm_id_priv->retry_count = param->retry_count; cm_id_priv->path_mtu = param->primary_path->mtu; cm_id_priv->pkey = param->primary_path->pkey; cm_id_priv->qp_type = param->qp_type; ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg); if (ret) goto error1; req_msg = (struct cm_req_msg *) cm_id_priv->msg->mad; cm_format_req(req_msg, cm_id_priv, param); cm_id_priv->tid = req_msg->hdr.tid; cm_id_priv->msg->timeout_ms = cm_id_priv->timeout_ms; cm_id_priv->msg->context[1] = (void *) (unsigned long) IB_CM_REQ_SENT; cm_id_priv->local_qpn = cm_req_get_local_qpn(req_msg); cm_id_priv->rq_psn = cm_req_get_starting_psn(req_msg); spin_lock_irqsave(&cm_id_priv->lock, flags); ret = ib_post_send_mad(cm_id_priv->msg, NULL); if (ret) { spin_unlock_irqrestore(&cm_id_priv->lock, flags); goto error2; } BUG_ON(cm_id->state != IB_CM_IDLE); cm_id->state = IB_CM_REQ_SENT; spin_unlock_irqrestore(&cm_id_priv->lock, flags); return 0; error2: cm_free_msg(cm_id_priv->msg); error1: kfree(cm_id_priv->timewait_info); out: return ret; } EXPORT_SYMBOL(ib_send_cm_req); static int cm_issue_rej(struct cm_port *port, struct ib_mad_recv_wc *mad_recv_wc, enum ib_cm_rej_reason reason, enum cm_msg_response msg_rejected, void *ari, u8 ari_length) { struct ib_mad_send_buf *msg = NULL; struct cm_rej_msg *rej_msg, *rcv_msg; int ret; ret = cm_alloc_response_msg(port, mad_recv_wc, &msg); if (ret) return ret; /* We just need common CM header information. Cast to any message. */ rcv_msg = (struct cm_rej_msg *) mad_recv_wc->recv_buf.mad; rej_msg = (struct cm_rej_msg *) msg->mad; cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, rcv_msg->hdr.tid); rej_msg->remote_comm_id = rcv_msg->local_comm_id; rej_msg->local_comm_id = rcv_msg->remote_comm_id; cm_rej_set_msg_rejected(rej_msg, msg_rejected); rej_msg->reason = cpu_to_be16(reason); if (ari && ari_length) { cm_rej_set_reject_info_len(rej_msg, ari_length); memcpy(rej_msg->ari, ari, ari_length); } ret = ib_post_send_mad(msg, NULL); if (ret) cm_free_msg(msg); return ret; } static inline int cm_is_active_peer(__be64 local_ca_guid, __be64 remote_ca_guid, __be32 local_qpn, __be32 remote_qpn) { return (be64_to_cpu(local_ca_guid) > be64_to_cpu(remote_ca_guid) || ((local_ca_guid == remote_ca_guid) && (be32_to_cpu(local_qpn) > be32_to_cpu(remote_qpn)))); } static void cm_format_paths_from_req(struct cm_req_msg *req_msg, struct ib_sa_path_rec *primary_path, struct ib_sa_path_rec *alt_path) { memset(primary_path, 0, sizeof *primary_path); primary_path->dgid = req_msg->primary_local_gid; primary_path->sgid = req_msg->primary_remote_gid; primary_path->dlid = req_msg->primary_local_lid; primary_path->slid = req_msg->primary_remote_lid; primary_path->flow_label = cm_req_get_primary_flow_label(req_msg); primary_path->hop_limit = req_msg->primary_hop_limit; primary_path->traffic_class = req_msg->primary_traffic_class; primary_path->reversible = 1; primary_path->pkey = req_msg->pkey; primary_path->sl = cm_req_get_primary_sl(req_msg); primary_path->mtu_selector = IB_SA_EQ; primary_path->mtu = cm_req_get_path_mtu(req_msg); primary_path->rate_selector = IB_SA_EQ; primary_path->rate = cm_req_get_primary_packet_rate(req_msg); primary_path->packet_life_time_selector = IB_SA_EQ; primary_path->packet_life_time = cm_req_get_primary_local_ack_timeout(req_msg); primary_path->packet_life_time -= (primary_path->packet_life_time > 0); if (req_msg->alt_local_lid) { memset(alt_path, 0, sizeof *alt_path); alt_path->dgid = req_msg->alt_local_gid; alt_path->sgid = req_msg->alt_remote_gid; alt_path->dlid = req_msg->alt_local_lid; alt_path->slid = req_msg->alt_remote_lid; alt_path->flow_label = cm_req_get_alt_flow_label(req_msg); alt_path->hop_limit = req_msg->alt_hop_limit; alt_path->traffic_class = req_msg->alt_traffic_class; alt_path->reversible = 1; alt_path->pkey = req_msg->pkey; alt_path->sl = cm_req_get_alt_sl(req_msg); alt_path->mtu_selector = IB_SA_EQ; alt_path->mtu = cm_req_get_path_mtu(req_msg); alt_path->rate_selector = IB_SA_EQ; alt_path->rate = cm_req_get_alt_packet_rate(req_msg); alt_path->packet_life_time_selector = IB_SA_EQ; alt_path->packet_life_time = cm_req_get_alt_local_ack_timeout(req_msg); alt_path->packet_life_time -= (alt_path->packet_life_time > 0); } } static void cm_format_req_event(struct cm_work *work, struct cm_id_private *cm_id_priv, struct ib_cm_id *listen_id) { struct cm_req_msg *req_msg; struct ib_cm_req_event_param *param; req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad; param = &work->cm_event.param.req_rcvd; param->listen_id = listen_id; param->port = cm_id_priv->av.port->port_num; param->primary_path = &work->path[0]; if (req_msg->alt_local_lid) param->alternate_path = &work->path[1]; else param->alternate_path = NULL; param->remote_ca_guid = req_msg->local_ca_guid; param->remote_qkey = be32_to_cpu(req_msg->local_qkey); param->remote_qpn = be32_to_cpu(cm_req_get_local_qpn(req_msg)); param->qp_type = cm_req_get_qp_type(req_msg); param->starting_psn = be32_to_cpu(cm_req_get_starting_psn(req_msg)); param->responder_resources = cm_req_get_init_depth(req_msg); param->initiator_depth = cm_req_get_resp_res(req_msg); param->local_cm_response_timeout = cm_req_get_remote_resp_timeout(req_msg); param->flow_control = cm_req_get_flow_ctrl(req_msg); param->remote_cm_response_timeout = cm_req_get_local_resp_timeout(req_msg); param->retry_count = cm_req_get_retry_count(req_msg); param->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg); param->srq = cm_req_get_srq(req_msg); work->cm_event.private_data = &req_msg->private_data; } static void cm_process_work(struct cm_id_private *cm_id_priv, struct cm_work *work) { int ret; /* We will typically only have the current event to report. */ ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event); cm_free_work(work); while (!ret && !atomic_add_negative(-1, &cm_id_priv->work_count)) { spin_lock_irq(&cm_id_priv->lock); work = cm_dequeue_work(cm_id_priv); spin_unlock_irq(&cm_id_priv->lock); BUG_ON(!work); ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event); cm_free_work(work); } cm_deref_id(cm_id_priv); if (ret) cm_destroy_id(&cm_id_priv->id, ret); } static void cm_format_mra(struct cm_mra_msg *mra_msg, struct cm_id_private *cm_id_priv, enum cm_msg_response msg_mraed, u8 service_timeout, const void *private_data, u8 private_data_len) { cm_format_mad_hdr(&mra_msg->hdr, CM_MRA_ATTR_ID, cm_id_priv->tid); cm_mra_set_msg_mraed(mra_msg, msg_mraed); mra_msg->local_comm_id = cm_id_priv->id.local_id; mra_msg->remote_comm_id = cm_id_priv->id.remote_id; cm_mra_set_service_timeout(mra_msg, service_timeout); if (private_data && private_data_len) memcpy(mra_msg->private_data, private_data, private_data_len); } static void cm_format_rej(struct cm_rej_msg *rej_msg, struct cm_id_private *cm_id_priv, enum ib_cm_rej_reason reason, void *ari, u8 ari_length, const void *private_data, u8 private_data_len) { cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, cm_id_priv->tid); rej_msg->remote_comm_id = cm_id_priv->id.remote_id; switch(cm_id_priv->id.state) { case IB_CM_REQ_RCVD: rej_msg->local_comm_id = 0; cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ); break; case IB_CM_MRA_REQ_SENT: rej_msg->local_comm_id = cm_id_priv->id.local_id; cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ); break; case IB_CM_REP_RCVD: case IB_CM_MRA_REP_SENT: rej_msg->local_comm_id = cm_id_priv->id.local_id; cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REP); break; default: rej_msg->local_comm_id = cm_id_priv->id.local_id; cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_OTHER); break; } rej_msg->reason = cpu_to_be16(reason); if (ari && ari_length) { cm_rej_set_reject_info_len(rej_msg, ari_length); memcpy(rej_msg->ari, ari, ari_length); } if (private_data && private_data_len) memcpy(rej_msg->private_data, private_data, private_data_len); } static void cm_dup_req_handler(struct cm_work *work, struct cm_id_private *cm_id_priv) { struct ib_mad_send_buf *msg = NULL; int ret; atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. counter[CM_REQ_COUNTER]); /* Quick state check to discard duplicate REQs. */ if (cm_id_priv->id.state == IB_CM_REQ_RCVD) return; ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg); if (ret) return; spin_lock_irq(&cm_id_priv->lock); switch (cm_id_priv->id.state) { case IB_CM_MRA_REQ_SENT: cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, CM_MSG_RESPONSE_REQ, cm_id_priv->service_timeout, cm_id_priv->private_data, cm_id_priv->private_data_len); break; case IB_CM_TIMEWAIT: cm_format_rej((struct cm_rej_msg *) msg->mad, cm_id_priv, IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0); break; default: goto unlock; } spin_unlock_irq(&cm_id_priv->lock); ret = ib_post_send_mad(msg, NULL); if (ret) goto free; return; unlock: spin_unlock_irq(&cm_id_priv->lock); free: cm_free_msg(msg); } static struct cm_id_private * cm_match_req(struct cm_work *work, struct cm_id_private *cm_id_priv) { struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv; struct cm_timewait_info *timewait_info; struct cm_req_msg *req_msg; req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad; /* Check for possible duplicate REQ. */ spin_lock_irq(&cm.lock); timewait_info = cm_insert_remote_id(cm_id_priv->timewait_info); if (timewait_info) { cur_cm_id_priv = cm_get_id(timewait_info->work.local_id, timewait_info->work.remote_id); spin_unlock_irq(&cm.lock); if (cur_cm_id_priv) { cm_dup_req_handler(work, cur_cm_id_priv); cm_deref_id(cur_cm_id_priv); } return NULL; } /* Check for stale connections. */ timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info); if (timewait_info) { cm_cleanup_timewait(cm_id_priv->timewait_info); spin_unlock_irq(&cm.lock); cm_issue_rej(work->port, work->mad_recv_wc, IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ, NULL, 0); return NULL; } /* Find matching listen request. */ listen_cm_id_priv = cm_find_listen(cm_id_priv->id.device, req_msg->service_id, req_msg->private_data); if (!listen_cm_id_priv) { cm_cleanup_timewait(cm_id_priv->timewait_info); spin_unlock_irq(&cm.lock); cm_issue_rej(work->port, work->mad_recv_wc, IB_CM_REJ_INVALID_SERVICE_ID, CM_MSG_RESPONSE_REQ, NULL, 0); goto out; } atomic_inc(&listen_cm_id_priv->refcount); atomic_inc(&cm_id_priv->refcount); cm_id_priv->id.state = IB_CM_REQ_RCVD; atomic_inc(&cm_id_priv->work_count); spin_unlock_irq(&cm.lock); out: return listen_cm_id_priv; } /* * Work-around for inter-subnet connections. If the LIDs are permissive, * we need to override the LID/SL data in the REQ with the LID information * in the work completion. */ static void cm_process_routed_req(struct cm_req_msg *req_msg, struct ib_wc *wc) { if (!cm_req_get_primary_subnet_local(req_msg)) { if (req_msg->primary_local_lid == IB_LID_PERMISSIVE) { req_msg->primary_local_lid = cpu_to_be16(wc->slid); cm_req_set_primary_sl(req_msg, wc->sl); } if (req_msg->primary_remote_lid == IB_LID_PERMISSIVE) req_msg->primary_remote_lid = cpu_to_be16(wc->dlid_path_bits); } if (!cm_req_get_alt_subnet_local(req_msg)) { if (req_msg->alt_local_lid == IB_LID_PERMISSIVE) { req_msg->alt_local_lid = cpu_to_be16(wc->slid); cm_req_set_alt_sl(req_msg, wc->sl); } if (req_msg->alt_remote_lid == IB_LID_PERMISSIVE) req_msg->alt_remote_lid = cpu_to_be16(wc->dlid_path_bits); } } static int cm_req_handler(struct cm_work *work) { struct ib_cm_id *cm_id; struct cm_id_private *cm_id_priv, *listen_cm_id_priv; struct cm_req_msg *req_msg; int ret; req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad; cm_id = ib_create_cm_id(work->port->cm_dev->ib_device, NULL, NULL); if (IS_ERR(cm_id)) return PTR_ERR(cm_id); cm_id_priv = container_of(cm_id, struct cm_id_private, id); cm_id_priv->id.remote_id = req_msg->local_comm_id; cm_init_av_for_response(work->port, work->mad_recv_wc->wc, work->mad_recv_wc->recv_buf.grh, &cm_id_priv->av); cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv-> id.local_id); if (IS_ERR(cm_id_priv->timewait_info)) { ret = PTR_ERR(cm_id_priv->timewait_info); goto destroy; } cm_id_priv->timewait_info->work.remote_id = req_msg->local_comm_id; cm_id_priv->timewait_info->remote_ca_guid = req_msg->local_ca_guid; cm_id_priv->timewait_info->remote_qpn = cm_req_get_local_qpn(req_msg); listen_cm_id_priv = cm_match_req(work, cm_id_priv); if (!listen_cm_id_priv) { ret = -EINVAL; kfree(cm_id_priv->timewait_info); goto destroy; } cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler; cm_id_priv->id.context = listen_cm_id_priv->id.context; cm_id_priv->id.service_id = req_msg->service_id; cm_id_priv->id.service_mask = ~cpu_to_be64(0); cm_process_routed_req(req_msg, work->mad_recv_wc->wc); cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]); ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av); if (ret) { ib_get_cached_gid(work->port->cm_dev->ib_device, work->port->port_num, 0, &work->path[0].sgid); ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_GID, &work->path[0].sgid, sizeof work->path[0].sgid, NULL, 0); goto rejected; } if (req_msg->alt_local_lid) { ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av); if (ret) { ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID, &work->path[0].sgid, sizeof work->path[0].sgid, NULL, 0); goto rejected; } } cm_id_priv->tid = req_msg->hdr.tid; cm_id_priv->timeout_ms = cm_convert_to_ms( cm_req_get_local_resp_timeout(req_msg)); cm_id_priv->max_cm_retries = cm_req_get_max_cm_retries(req_msg); cm_id_priv->remote_qpn = cm_req_get_local_qpn(req_msg); cm_id_priv->initiator_depth = cm_req_get_resp_res(req_msg); cm_id_priv->responder_resources = cm_req_get_init_depth(req_msg); cm_id_priv->path_mtu = cm_req_get_path_mtu(req_msg); cm_id_priv->pkey = req_msg->pkey; cm_id_priv->sq_psn = cm_req_get_starting_psn(req_msg); cm_id_priv->retry_count = cm_req_get_retry_count(req_msg); cm_id_priv->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg); cm_id_priv->qp_type = cm_req_get_qp_type(req_msg); cm_format_req_event(work, cm_id_priv, &listen_cm_id_priv->id); cm_process_work(cm_id_priv, work); cm_deref_id(listen_cm_id_priv); return 0; rejected: atomic_dec(&cm_id_priv->refcount); cm_deref_id(listen_cm_id_priv); destroy: ib_destroy_cm_id(cm_id); return ret; } static void cm_format_rep(struct cm_rep_msg *rep_msg, struct cm_id_private *cm_id_priv, struct ib_cm_rep_param *param) { cm_format_mad_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid); rep_msg->local_comm_id = cm_id_priv->id.local_id; rep_msg->remote_comm_id = cm_id_priv->id.remote_id; cm_rep_set_local_qpn(rep_msg, cpu_to_be32(param->qp_num)); cm_rep_set_starting_psn(rep_msg, cpu_to_be32(param->starting_psn)); rep_msg->resp_resources = param->responder_resources; rep_msg->initiator_depth = param->initiator_depth; cm_rep_set_target_ack_delay(rep_msg, cm_id_priv->av.port->cm_dev->ack_delay); cm_rep_set_failover(rep_msg, param->failover_accepted); cm_rep_set_flow_ctrl(rep_msg, param->flow_control); cm_rep_set_rnr_retry_count(rep_msg, param->rnr_retry_count); cm_rep_set_srq(rep_msg, param->srq); rep_msg->local_ca_guid = cm_id_priv->id.device->node_guid; if (param->private_data && param->private_data_len) memcpy(rep_msg->private_data, param->private_data, param->private_data_len); } int ib_send_cm_rep(struct ib_cm_id *cm_id, struct ib_cm_rep_param *param) { struct cm_id_private *cm_id_priv; struct ib_mad_send_buf *msg; struct cm_rep_msg *rep_msg; unsigned long flags; int ret; if (param->private_data && param->private_data_len > IB_CM_REP_PRIVATE_DATA_SIZE) return -EINVAL; cm_id_priv = container_of(cm_id, struct cm_id_private, id); spin_lock_irqsave(&cm_id_priv->lock, flags); if (cm_id->state != IB_CM_REQ_RCVD && cm_id->state != IB_CM_MRA_REQ_SENT) { ret = -EINVAL; goto out; } ret = cm_alloc_msg(cm_id_priv, &msg); if (ret) goto out; rep_msg = (struct cm_rep_msg *) msg->mad; cm_format_rep(rep_msg, cm_id_priv, param); msg->timeout_ms = cm_id_priv->timeout_ms; msg->context[1] = (void *) (unsigned long) IB_CM_REP_SENT; ret = ib_post_send_mad(msg, NULL); if (ret) { spin_unlock_irqrestore(&cm_id_priv->lock, flags); cm_free_msg(msg); return ret; } cm_id->state = IB_CM_REP_SENT; cm_id_priv->msg = msg; cm_id_priv->initiator_depth = param->initiator_depth; cm_id_priv->responder_resources = param->responder_resources; cm_id_priv->rq_psn = cm_rep_get_starting_psn(rep_msg); cm_id_priv->local_qpn = cm_rep_get_local_qpn(rep_msg); out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); return ret; } EXPORT_SYMBOL(ib_send_cm_rep); static void cm_format_rtu(struct cm_rtu_msg *rtu_msg, struct cm_id_private *cm_id_priv, const void *private_data, u8 private_data_len) { cm_format_mad_hdr(&rtu_msg->hdr, CM_RTU_ATTR_ID, cm_id_priv->tid); rtu_msg->local_comm_id = cm_id_priv->id.local_id; rtu_msg->remote_comm_id = cm_id_priv->id.remote_id; if (private_data && private_data_len) memcpy(rtu_msg->private_data, private_data, private_data_len); } int ib_send_cm_rtu(struct ib_cm_id *cm_id, const void *private_data, u8 private_data_len) { struct cm_id_private *cm_id_priv; struct ib_mad_send_buf *msg; unsigned long flags; void *data; int ret; if (private_data && private_data_len > IB_CM_RTU_PRIVATE_DATA_SIZE) return -EINVAL; data = cm_copy_private_data(private_data, private_data_len); if (IS_ERR(data)) return PTR_ERR(data); cm_id_priv = container_of(cm_id, struct cm_id_private, id); spin_lock_irqsave(&cm_id_priv->lock, flags); if (cm_id->state != IB_CM_REP_RCVD && cm_id->state != IB_CM_MRA_REP_SENT) { ret = -EINVAL; goto error; } ret = cm_alloc_msg(cm_id_priv, &msg); if (ret) goto error; cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv, private_data, private_data_len); ret = ib_post_send_mad(msg, NULL); if (ret) { spin_unlock_irqrestore(&cm_id_priv->lock, flags); cm_free_msg(msg); kfree(data); return ret; } cm_id->state = IB_CM_ESTABLISHED; cm_set_private_data(cm_id_priv, data, private_data_len); spin_unlock_irqrestore(&cm_id_priv->lock, flags); return 0; error: spin_unlock_irqrestore(&cm_id_priv->lock, flags); kfree(data); return ret; } EXPORT_SYMBOL(ib_send_cm_rtu); static void cm_format_rep_event(struct cm_work *work) { struct cm_rep_msg *rep_msg; struct ib_cm_rep_event_param *param; rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad; param = &work->cm_event.param.rep_rcvd; param->remote_ca_guid = rep_msg->local_ca_guid; param->remote_qkey = be32_to_cpu(rep_msg->local_qkey); param->remote_qpn = be32_to_cpu(cm_rep_get_local_qpn(rep_msg)); param->starting_psn = be32_to_cpu(cm_rep_get_starting_psn(rep_msg)); param->responder_resources = rep_msg->initiator_depth; param->initiator_depth = rep_msg->resp_resources; param->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg); param->failover_accepted = cm_rep_get_failover(rep_msg); param->flow_control = cm_rep_get_flow_ctrl(rep_msg); param->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg); param->srq = cm_rep_get_srq(rep_msg); work->cm_event.private_data = &rep_msg->private_data; } static void cm_dup_rep_handler(struct cm_work *work) { struct cm_id_private *cm_id_priv; struct cm_rep_msg *rep_msg; struct ib_mad_send_buf *msg = NULL; int ret; rep_msg = (struct cm_rep_msg *) work->mad_recv_wc->recv_buf.mad; cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id, rep_msg->local_comm_id); if (!cm_id_priv) return; atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. counter[CM_REP_COUNTER]); ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg); if (ret) goto deref; spin_lock_irq(&cm_id_priv->lock); if (cm_id_priv->id.state == IB_CM_ESTABLISHED) cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv, cm_id_priv->private_data, cm_id_priv->private_data_len); else if (cm_id_priv->id.state == IB_CM_MRA_REP_SENT) cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, CM_MSG_RESPONSE_REP, cm_id_priv->service_timeout, cm_id_priv->private_data, cm_id_priv->private_data_len); else goto unlock; spin_unlock_irq(&cm_id_priv->lock); ret = ib_post_send_mad(msg, NULL); if (ret) goto free; goto deref; unlock: spin_unlock_irq(&cm_id_priv->lock); free: cm_free_msg(msg); deref: cm_deref_id(cm_id_priv); } static int cm_rep_handler(struct cm_work *work) { struct cm_id_private *cm_id_priv; struct cm_rep_msg *rep_msg; int ret; rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad; cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id, 0); if (!cm_id_priv) { cm_dup_rep_handler(work); return -EINVAL; } cm_format_rep_event(work); spin_lock_irq(&cm_id_priv->lock); switch (cm_id_priv->id.state) { case IB_CM_REQ_SENT: case IB_CM_MRA_REQ_RCVD: break; default: spin_unlock_irq(&cm_id_priv->lock); ret = -EINVAL; goto error; } cm_id_priv->timewait_info->work.remote_id = rep_msg->local_comm_id; cm_id_priv->timewait_info->remote_ca_guid = rep_msg->local_ca_guid; cm_id_priv->timewait_info->remote_qpn = cm_rep_get_local_qpn(rep_msg); spin_lock(&cm.lock); /* Check for duplicate REP. */ if (cm_insert_remote_id(cm_id_priv->timewait_info)) { spin_unlock(&cm.lock); spin_unlock_irq(&cm_id_priv->lock); ret = -EINVAL; goto error; } /* Check for a stale connection. */ if (cm_insert_remote_qpn(cm_id_priv->timewait_info)) { rb_erase(&cm_id_priv->timewait_info->remote_id_node, &cm.remote_id_table); cm_id_priv->timewait_info->inserted_remote_id = 0; spin_unlock(&cm.lock); spin_unlock_irq(&cm_id_priv->lock); cm_issue_rej(work->port, work->mad_recv_wc, IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP, NULL, 0); ret = -EINVAL; goto error; } spin_unlock(&cm.lock); cm_id_priv->id.state = IB_CM_REP_RCVD; cm_id_priv->id.remote_id = rep_msg->local_comm_id; cm_id_priv->remote_qpn = cm_rep_get_local_qpn(rep_msg); cm_id_priv->initiator_depth = rep_msg->resp_resources; cm_id_priv->responder_resources = rep_msg->initiator_depth; cm_id_priv->sq_psn = cm_rep_get_starting_psn(rep_msg); cm_id_priv->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg); cm_id_priv->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg); cm_id_priv->av.timeout = cm_ack_timeout(cm_id_priv->target_ack_delay, cm_id_priv->av.timeout - 1); cm_id_priv->alt_av.timeout = cm_ack_timeout(cm_id_priv->target_ack_delay, cm_id_priv->alt_av.timeout - 1); /* todo: handle peer_to_peer */ ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); ret = atomic_inc_and_test(&cm_id_priv->work_count); if (!ret) list_add_tail(&work->list, &cm_id_priv->work_list); spin_unlock_irq(&cm_id_priv->lock); if (ret) cm_process_work(cm_id_priv, work); else cm_deref_id(cm_id_priv); return 0; error: cm_deref_id(cm_id_priv); return ret; } static int cm_establish_handler(struct cm_work *work) { struct cm_id_private *cm_id_priv; int ret; /* See comment in cm_establish about lookup. */ cm_id_priv = cm_acquire_id(work->local_id, work->remote_id); if (!cm_id_priv) return -EINVAL; spin_lock_irq(&cm_id_priv->lock); if (cm_id_priv->id.state != IB_CM_ESTABLISHED) { spin_unlock_irq(&cm_id_priv->lock); goto out; } ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); ret = atomic_inc_and_test(&cm_id_priv->work_count); if (!ret) list_add_tail(&work->list, &cm_id_priv->work_list); spin_unlock_irq(&cm_id_priv->lock); if (ret) cm_process_work(cm_id_priv, work); else cm_deref_id(cm_id_priv); return 0; out: cm_deref_id(cm_id_priv); return -EINVAL; } static int cm_rtu_handler(struct cm_work *work) { struct cm_id_private *cm_id_priv; struct cm_rtu_msg *rtu_msg; int ret; rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad; cm_id_priv = cm_acquire_id(rtu_msg->remote_comm_id, rtu_msg->local_comm_id); if (!cm_id_priv) return -EINVAL; work->cm_event.private_data = &rtu_msg->private_data; spin_lock_irq(&cm_id_priv->lock); if (cm_id_priv->id.state != IB_CM_REP_SENT && cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) { spin_unlock_irq(&cm_id_priv->lock); atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. counter[CM_RTU_COUNTER]); goto out; } cm_id_priv->id.state = IB_CM_ESTABLISHED; ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); ret = atomic_inc_and_test(&cm_id_priv->work_count); if (!ret) list_add_tail(&work->list, &cm_id_priv->work_list); spin_unlock_irq(&cm_id_priv->lock); if (ret) cm_process_work(cm_id_priv, work); else cm_deref_id(cm_id_priv); return 0; out: cm_deref_id(cm_id_priv); return -EINVAL; } static void cm_format_dreq(struct cm_dreq_msg *dreq_msg, struct cm_id_private *cm_id_priv, const void *private_data, u8 private_data_len) { cm_format_mad_hdr(&dreq_msg->hdr, CM_DREQ_ATTR_ID, cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_DREQ)); dreq_msg->local_comm_id = cm_id_priv->id.local_id; dreq_msg->remote_comm_id = cm_id_priv->id.remote_id; cm_dreq_set_remote_qpn(dreq_msg, cm_id_priv->remote_qpn); if (private_data && private_data_len) memcpy(dreq_msg->private_data, private_data, private_data_len); } int ib_send_cm_dreq(struct ib_cm_id *cm_id, const void *private_data, u8 private_data_len) { struct cm_id_private *cm_id_priv; struct ib_mad_send_buf *msg; unsigned long flags; int ret; if (private_data && private_data_len > IB_CM_DREQ_PRIVATE_DATA_SIZE) return -EINVAL; cm_id_priv = container_of(cm_id, struct cm_id_private, id); spin_lock_irqsave(&cm_id_priv->lock, flags); if (cm_id->state != IB_CM_ESTABLISHED) { ret = -EINVAL; goto out; } ret = cm_alloc_msg(cm_id_priv, &msg); if (ret) { cm_enter_timewait(cm_id_priv); goto out; } cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv, private_data, private_data_len); msg->timeout_ms = cm_id_priv->timeout_ms; msg->context[1] = (void *) (unsigned long) IB_CM_DREQ_SENT; ret = ib_post_send_mad(msg, NULL); if (ret) { cm_enter_timewait(cm_id_priv); spin_unlock_irqrestore(&cm_id_priv->lock, flags); cm_free_msg(msg); return ret; } cm_id->state = IB_CM_DREQ_SENT; cm_id_priv->msg = msg; out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); return ret; } EXPORT_SYMBOL(ib_send_cm_dreq); static void cm_format_drep(struct cm_drep_msg *drep_msg, struct cm_id_private *cm_id_priv, const void *private_data, u8 private_data_len) { cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, cm_id_priv->tid); drep_msg->local_comm_id = cm_id_priv->id.local_id; drep_msg->remote_comm_id = cm_id_priv->id.remote_id; if (private_data && private_data_len) memcpy(drep_msg->private_data, private_data, private_data_len); } int ib_send_cm_drep(struct ib_cm_id *cm_id, const void *private_data, u8 private_data_len) { struct cm_id_private *cm_id_priv; struct ib_mad_send_buf *msg; unsigned long flags; void *data; int ret; if (private_data && private_data_len > IB_CM_DREP_PRIVATE_DATA_SIZE) return -EINVAL; data = cm_copy_private_data(private_data, private_data_len); if (IS_ERR(data)) return PTR_ERR(data); cm_id_priv = container_of(cm_id, struct cm_id_private, id); spin_lock_irqsave(&cm_id_priv->lock, flags); if (cm_id->state != IB_CM_DREQ_RCVD) { spin_unlock_irqrestore(&cm_id_priv->lock, flags); kfree(data); return -EINVAL; } cm_set_private_data(cm_id_priv, data, private_data_len); cm_enter_timewait(cm_id_priv); ret = cm_alloc_msg(cm_id_priv, &msg); if (ret) goto out; cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv, private_data, private_data_len); ret = ib_post_send_mad(msg, NULL); if (ret) { spin_unlock_irqrestore(&cm_id_priv->lock, flags); cm_free_msg(msg); return ret; } out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); return ret; } EXPORT_SYMBOL(ib_send_cm_drep); static int cm_issue_drep(struct cm_port *port, struct ib_mad_recv_wc *mad_recv_wc) { struct ib_mad_send_buf *msg = NULL; struct cm_dreq_msg *dreq_msg; struct cm_drep_msg *drep_msg; int ret; ret = cm_alloc_response_msg(port, mad_recv_wc, &msg); if (ret) return ret; dreq_msg = (struct cm_dreq_msg *) mad_recv_wc->recv_buf.mad; drep_msg = (struct cm_drep_msg *) msg->mad; cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, dreq_msg->hdr.tid); drep_msg->remote_comm_id = dreq_msg->local_comm_id; drep_msg->local_comm_id = dreq_msg->remote_comm_id; ret = ib_post_send_mad(msg, NULL); if (ret) cm_free_msg(msg); return ret; } static int cm_dreq_handler(struct cm_work *work) { struct cm_id_private *cm_id_priv; struct cm_dreq_msg *dreq_msg; struct ib_mad_send_buf *msg = NULL; int ret; dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad; cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id, dreq_msg->local_comm_id); if (!cm_id_priv) { atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. counter[CM_DREQ_COUNTER]); cm_issue_drep(work->port, work->mad_recv_wc); return -EINVAL; } work->cm_event.private_data = &dreq_msg->private_data; spin_lock_irq(&cm_id_priv->lock); if (cm_id_priv->local_qpn != cm_dreq_get_remote_qpn(dreq_msg)) goto unlock; switch (cm_id_priv->id.state) { case IB_CM_REP_SENT: case IB_CM_DREQ_SENT: ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); break; case IB_CM_ESTABLISHED: case IB_CM_MRA_REP_RCVD: break; case IB_CM_TIMEWAIT: atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. counter[CM_DREQ_COUNTER]); if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg)) goto unlock; cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv, cm_id_priv->private_data, cm_id_priv->private_data_len); spin_unlock_irq(&cm_id_priv->lock); if (ib_post_send_mad(msg, NULL)) cm_free_msg(msg); goto deref; case IB_CM_DREQ_RCVD: atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. counter[CM_DREQ_COUNTER]); goto unlock; default: goto unlock; } cm_id_priv->id.state = IB_CM_DREQ_RCVD; cm_id_priv->tid = dreq_msg->hdr.tid; ret = atomic_inc_and_test(&cm_id_priv->work_count); if (!ret) list_add_tail(&work->list, &cm_id_priv->work_list); spin_unlock_irq(&cm_id_priv->lock); if (ret) cm_process_work(cm_id_priv, work); else cm_deref_id(cm_id_priv); return 0; unlock: spin_unlock_irq(&cm_id_priv->lock); deref: cm_deref_id(cm_id_priv); return -EINVAL; } static int cm_drep_handler(struct cm_work *work) { struct cm_id_private *cm_id_priv; struct cm_drep_msg *drep_msg; int ret; drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad; cm_id_priv = cm_acquire_id(drep_msg->remote_comm_id, drep_msg->local_comm_id); if (!cm_id_priv) return -EINVAL; work->cm_event.private_data = &drep_msg->private_data; spin_lock_irq(&cm_id_priv->lock); if (cm_id_priv->id.state != IB_CM_DREQ_SENT && cm_id_priv->id.state != IB_CM_DREQ_RCVD) { spin_unlock_irq(&cm_id_priv->lock); goto out; } cm_enter_timewait(cm_id_priv); ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); ret = atomic_inc_and_test(&cm_id_priv->work_count); if (!ret) list_add_tail(&work->list, &cm_id_priv->work_list); spin_unlock_irq(&cm_id_priv->lock); if (ret) cm_process_work(cm_id_priv, work); else cm_deref_id(cm_id_priv); return 0; out: cm_deref_id(cm_id_priv); return -EINVAL; } int ib_send_cm_rej(struct ib_cm_id *cm_id, enum ib_cm_rej_reason reason, void *ari, u8 ari_length, const void *private_data, u8 private_data_len) { struct cm_id_private *cm_id_priv; struct ib_mad_send_buf *msg; unsigned long flags; int ret; if ((private_data && private_data_len > IB_CM_REJ_PRIVATE_DATA_SIZE) || (ari && ari_length > IB_CM_REJ_ARI_LENGTH)) return -EINVAL; cm_id_priv = container_of(cm_id, struct cm_id_private, id); spin_lock_irqsave(&cm_id_priv->lock, flags); switch (cm_id->state) { case IB_CM_REQ_SENT: case IB_CM_MRA_REQ_RCVD: case IB_CM_REQ_RCVD: case IB_CM_MRA_REQ_SENT: case IB_CM_REP_RCVD: case IB_CM_MRA_REP_SENT: ret = cm_alloc_msg(cm_id_priv, &msg); if (!ret) cm_format_rej((struct cm_rej_msg *) msg->mad, cm_id_priv, reason, ari, ari_length, private_data, private_data_len); cm_reset_to_idle(cm_id_priv); break; case IB_CM_REP_SENT: case IB_CM_MRA_REP_RCVD: ret = cm_alloc_msg(cm_id_priv, &msg); if (!ret) cm_format_rej((struct cm_rej_msg *) msg->mad, cm_id_priv, reason, ari, ari_length, private_data, private_data_len); cm_enter_timewait(cm_id_priv); break; default: ret = -EINVAL; goto out; } if (ret) goto out; ret = ib_post_send_mad(msg, NULL); if (ret) cm_free_msg(msg); out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); return ret; } EXPORT_SYMBOL(ib_send_cm_rej); static void cm_format_rej_event(struct cm_work *work) { struct cm_rej_msg *rej_msg; struct ib_cm_rej_event_param *param; rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad; param = &work->cm_event.param.rej_rcvd; param->ari = rej_msg->ari; param->ari_length = cm_rej_get_reject_info_len(rej_msg); param->reason = __be16_to_cpu(rej_msg->reason); work->cm_event.private_data = &rej_msg->private_data; } static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg) { struct cm_timewait_info *timewait_info; struct cm_id_private *cm_id_priv; __be32 remote_id; remote_id = rej_msg->local_comm_id; if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_TIMEOUT) { spin_lock_irq(&cm.lock); timewait_info = cm_find_remote_id( *((__be64 *) rej_msg->ari), remote_id); if (!timewait_info) { spin_unlock_irq(&cm.lock); return NULL; } cm_id_priv = idr_find(&cm.local_id_table, (__force int) (timewait_info->work.local_id ^ cm.random_id_operand)); if (cm_id_priv) { if (cm_id_priv->id.remote_id == remote_id) atomic_inc(&cm_id_priv->refcount); else cm_id_priv = NULL; } spin_unlock_irq(&cm.lock); } else if (cm_rej_get_msg_rejected(rej_msg) == CM_MSG_RESPONSE_REQ) cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, 0); else cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, remote_id); return cm_id_priv; } static int cm_rej_handler(struct cm_work *work) { struct cm_id_private *cm_id_priv; struct cm_rej_msg *rej_msg; int ret; rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad; cm_id_priv = cm_acquire_rejected_id(rej_msg); if (!cm_id_priv) return -EINVAL; cm_format_rej_event(work); spin_lock_irq(&cm_id_priv->lock); switch (cm_id_priv->id.state) { case IB_CM_REQ_SENT: case IB_CM_MRA_REQ_RCVD: case IB_CM_REP_SENT: case IB_CM_MRA_REP_RCVD: ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); /* fall through */ case IB_CM_REQ_RCVD: case IB_CM_MRA_REQ_SENT: if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_STALE_CONN) cm_enter_timewait(cm_id_priv); else cm_reset_to_idle(cm_id_priv); break; case IB_CM_DREQ_SENT: ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); /* fall through */ case IB_CM_REP_RCVD: case IB_CM_MRA_REP_SENT: case IB_CM_ESTABLISHED: cm_enter_timewait(cm_id_priv); break; default: spin_unlock_irq(&cm_id_priv->lock); ret = -EINVAL; goto out; } ret = atomic_inc_and_test(&cm_id_priv->work_count); if (!ret) list_add_tail(&work->list, &cm_id_priv->work_list); spin_unlock_irq(&cm_id_priv->lock); if (ret) cm_process_work(cm_id_priv, work); else cm_deref_id(cm_id_priv); return 0; out: cm_deref_id(cm_id_priv); return -EINVAL; } int ib_send_cm_mra(struct ib_cm_id *cm_id, u8 service_timeout, const void *private_data, u8 private_data_len) { struct cm_id_private *cm_id_priv; struct ib_mad_send_buf *msg; enum ib_cm_state cm_state; enum ib_cm_lap_state lap_state; enum cm_msg_response msg_response; void *data; unsigned long flags; int ret; if (private_data && private_data_len > IB_CM_MRA_PRIVATE_DATA_SIZE) return -EINVAL; data = cm_copy_private_data(private_data, private_data_len); if (IS_ERR(data)) return PTR_ERR(data); cm_id_priv = container_of(cm_id, struct cm_id_private, id); spin_lock_irqsave(&cm_id_priv->lock, flags); switch(cm_id_priv->id.state) { case IB_CM_REQ_RCVD: cm_state = IB_CM_MRA_REQ_SENT; lap_state = cm_id->lap_state; msg_response = CM_MSG_RESPONSE_REQ; break; case IB_CM_REP_RCVD: cm_state = IB_CM_MRA_REP_SENT; lap_state = cm_id->lap_state; msg_response = CM_MSG_RESPONSE_REP; break; case IB_CM_ESTABLISHED: cm_state = cm_id->state; lap_state = IB_CM_MRA_LAP_SENT; msg_response = CM_MSG_RESPONSE_OTHER; break; default: ret = -EINVAL; goto error1; } if (!(service_timeout & IB_CM_MRA_FLAG_DELAY)) { ret = cm_alloc_msg(cm_id_priv, &msg); if (ret) goto error1; cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, msg_response, service_timeout, private_data, private_data_len); ret = ib_post_send_mad(msg, NULL); if (ret) goto error2; } cm_id->state = cm_state; cm_id->lap_state = lap_state; cm_id_priv->service_timeout = service_timeout; cm_set_private_data(cm_id_priv, data, private_data_len); spin_unlock_irqrestore(&cm_id_priv->lock, flags); return 0; error1: spin_unlock_irqrestore(&cm_id_priv->lock, flags); kfree(data); return ret; error2: spin_unlock_irqrestore(&cm_id_priv->lock, flags); kfree(data); cm_free_msg(msg); return ret; } EXPORT_SYMBOL(ib_send_cm_mra); static struct cm_id_private * cm_acquire_mraed_id(struct cm_mra_msg *mra_msg) { switch (cm_mra_get_msg_mraed(mra_msg)) { case CM_MSG_RESPONSE_REQ: return cm_acquire_id(mra_msg->remote_comm_id, 0); case CM_MSG_RESPONSE_REP: case CM_MSG_RESPONSE_OTHER: return cm_acquire_id(mra_msg->remote_comm_id, mra_msg->local_comm_id); default: return NULL; } } static int cm_mra_handler(struct cm_work *work) { struct cm_id_private *cm_id_priv; struct cm_mra_msg *mra_msg; int timeout, ret; mra_msg = (struct cm_mra_msg *)work->mad_recv_wc->recv_buf.mad; cm_id_priv = cm_acquire_mraed_id(mra_msg); if (!cm_id_priv) return -EINVAL; work->cm_event.private_data = &mra_msg->private_data; work->cm_event.param.mra_rcvd.service_timeout = cm_mra_get_service_timeout(mra_msg); timeout = cm_convert_to_ms(cm_mra_get_service_timeout(mra_msg)) + cm_convert_to_ms(cm_id_priv->av.timeout); spin_lock_irq(&cm_id_priv->lock); switch (cm_id_priv->id.state) { case IB_CM_REQ_SENT: if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REQ || ib_modify_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg, timeout)) goto out; cm_id_priv->id.state = IB_CM_MRA_REQ_RCVD; break; case IB_CM_REP_SENT: if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REP || ib_modify_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg, timeout)) goto out; cm_id_priv->id.state = IB_CM_MRA_REP_RCVD; break; case IB_CM_ESTABLISHED: if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_OTHER || cm_id_priv->id.lap_state != IB_CM_LAP_SENT || ib_modify_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg, timeout)) { if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD) atomic_long_inc(&work->port-> counter_group[CM_RECV_DUPLICATES]. counter[CM_MRA_COUNTER]); goto out; } cm_id_priv->id.lap_state = IB_CM_MRA_LAP_RCVD; break; case IB_CM_MRA_REQ_RCVD: case IB_CM_MRA_REP_RCVD: atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. counter[CM_MRA_COUNTER]); /* fall through */ default: goto out; } cm_id_priv->msg->context[1] = (void *) (unsigned long) cm_id_priv->id.state; ret = atomic_inc_and_test(&cm_id_priv->work_count); if (!ret) list_add_tail(&work->list, &cm_id_priv->work_list); spin_unlock_irq(&cm_id_priv->lock); if (ret) cm_process_work(cm_id_priv, work); else cm_deref_id(cm_id_priv); return 0; out: spin_unlock_irq(&cm_id_priv->lock); cm_deref_id(cm_id_priv); return -EINVAL; } static void cm_format_lap(struct cm_lap_msg *lap_msg, struct cm_id_private *cm_id_priv, struct ib_sa_path_rec *alternate_path, const void *private_data, u8 private_data_len) { cm_format_mad_hdr(&lap_msg->hdr, CM_LAP_ATTR_ID, cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_LAP)); lap_msg->local_comm_id = cm_id_priv->id.local_id; lap_msg->remote_comm_id = cm_id_priv->id.remote_id; cm_lap_set_remote_qpn(lap_msg, cm_id_priv->remote_qpn); /* todo: need remote CM response timeout */ cm_lap_set_remote_resp_timeout(lap_msg, 0x1F); lap_msg->alt_local_lid = alternate_path->slid; lap_msg->alt_remote_lid = alternate_path->dlid; lap_msg->alt_local_gid = alternate_path->sgid; lap_msg->alt_remote_gid = alternate_path->dgid; cm_lap_set_flow_label(lap_msg, alternate_path->flow_label); cm_lap_set_traffic_class(lap_msg, alternate_path->traffic_class); lap_msg->alt_hop_limit = alternate_path->hop_limit; cm_lap_set_packet_rate(lap_msg, alternate_path->rate); cm_lap_set_sl(lap_msg, alternate_path->sl); cm_lap_set_subnet_local(lap_msg, 1); /* local only... */ cm_lap_set_local_ack_timeout(lap_msg, cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay, alternate_path->packet_life_time)); if (private_data && private_data_len) memcpy(lap_msg->private_data, private_data, private_data_len); } int ib_send_cm_lap(struct ib_cm_id *cm_id, struct ib_sa_path_rec *alternate_path, const void *private_data, u8 private_data_len) { struct cm_id_private *cm_id_priv; struct ib_mad_send_buf *msg; unsigned long flags; int ret; if (private_data && private_data_len > IB_CM_LAP_PRIVATE_DATA_SIZE) return -EINVAL; cm_id_priv = container_of(cm_id, struct cm_id_private, id); spin_lock_irqsave(&cm_id_priv->lock, flags); if (cm_id->state != IB_CM_ESTABLISHED || (cm_id->lap_state != IB_CM_LAP_UNINIT && cm_id->lap_state != IB_CM_LAP_IDLE)) { ret = -EINVAL; goto out; } ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av); if (ret) goto out; cm_id_priv->alt_av.timeout = cm_ack_timeout(cm_id_priv->target_ack_delay, cm_id_priv->alt_av.timeout - 1); ret = cm_alloc_msg(cm_id_priv, &msg); if (ret) goto out; cm_format_lap((struct cm_lap_msg *) msg->mad, cm_id_priv, alternate_path, private_data, private_data_len); msg->timeout_ms = cm_id_priv->timeout_ms; msg->context[1] = (void *) (unsigned long) IB_CM_ESTABLISHED; ret = ib_post_send_mad(msg, NULL); if (ret) { spin_unlock_irqrestore(&cm_id_priv->lock, flags); cm_free_msg(msg); return ret; } cm_id->lap_state = IB_CM_LAP_SENT; cm_id_priv->msg = msg; out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); return ret; } EXPORT_SYMBOL(ib_send_cm_lap); static void cm_format_path_from_lap(struct cm_id_private *cm_id_priv, struct ib_sa_path_rec *path, struct cm_lap_msg *lap_msg) { memset(path, 0, sizeof *path); path->dgid = lap_msg->alt_local_gid; path->sgid = lap_msg->alt_remote_gid; path->dlid = lap_msg->alt_local_lid; path->slid = lap_msg->alt_remote_lid; path->flow_label = cm_lap_get_flow_label(lap_msg); path->hop_limit = lap_msg->alt_hop_limit; path->traffic_class = cm_lap_get_traffic_class(lap_msg); path->reversible = 1; path->pkey = cm_id_priv->pkey; path->sl = cm_lap_get_sl(lap_msg); path->mtu_selector = IB_SA_EQ; path->mtu = cm_id_priv->path_mtu; path->rate_selector = IB_SA_EQ; path->rate = cm_lap_get_packet_rate(lap_msg); path->packet_life_time_selector = IB_SA_EQ; path->packet_life_time = cm_lap_get_local_ack_timeout(lap_msg); path->packet_life_time -= (path->packet_life_time > 0); } static int cm_lap_handler(struct cm_work *work) { struct cm_id_private *cm_id_priv; struct cm_lap_msg *lap_msg; struct ib_cm_lap_event_param *param; struct ib_mad_send_buf *msg = NULL; int ret; /* todo: verify LAP request and send reject APR if invalid. */ lap_msg = (struct cm_lap_msg *)work->mad_recv_wc->recv_buf.mad; cm_id_priv = cm_acquire_id(lap_msg->remote_comm_id, lap_msg->local_comm_id); if (!cm_id_priv) return -EINVAL; param = &work->cm_event.param.lap_rcvd; param->alternate_path = &work->path[0]; cm_format_path_from_lap(cm_id_priv, param->alternate_path, lap_msg); work->cm_event.private_data = &lap_msg->private_data; spin_lock_irq(&cm_id_priv->lock); if (cm_id_priv->id.state != IB_CM_ESTABLISHED) goto unlock; switch (cm_id_priv->id.lap_state) { case IB_CM_LAP_UNINIT: case IB_CM_LAP_IDLE: break; case IB_CM_MRA_LAP_SENT: atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. counter[CM_LAP_COUNTER]); if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg)) goto unlock; cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, CM_MSG_RESPONSE_OTHER, cm_id_priv->service_timeout, cm_id_priv->private_data, cm_id_priv->private_data_len); spin_unlock_irq(&cm_id_priv->lock); if (ib_post_send_mad(msg, NULL)) cm_free_msg(msg); goto deref; case IB_CM_LAP_RCVD: atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. counter[CM_LAP_COUNTER]); goto unlock; default: goto unlock; } cm_id_priv->id.lap_state = IB_CM_LAP_RCVD; cm_id_priv->tid = lap_msg->hdr.tid; cm_init_av_for_response(work->port, work->mad_recv_wc->wc, work->mad_recv_wc->recv_buf.grh, &cm_id_priv->av); cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av); ret = atomic_inc_and_test(&cm_id_priv->work_count); if (!ret) list_add_tail(&work->list, &cm_id_priv->work_list); spin_unlock_irq(&cm_id_priv->lock); if (ret) cm_process_work(cm_id_priv, work); else cm_deref_id(cm_id_priv); return 0; unlock: spin_unlock_irq(&cm_id_priv->lock); deref: cm_deref_id(cm_id_priv); return -EINVAL; } static void cm_format_apr(struct cm_apr_msg *apr_msg, struct cm_id_private *cm_id_priv, enum ib_cm_apr_status status, void *info, u8 info_length, const void *private_data, u8 private_data_len) { cm_format_mad_hdr(&apr_msg->hdr, CM_APR_ATTR_ID, cm_id_priv->tid); apr_msg->local_comm_id = cm_id_priv->id.local_id; apr_msg->remote_comm_id = cm_id_priv->id.remote_id; apr_msg->ap_status = (u8) status; if (info && info_length) { apr_msg->info_length = info_length; memcpy(apr_msg->info, info, info_length); } if (private_data && private_data_len) memcpy(apr_msg->private_data, private_data, private_data_len); } int ib_send_cm_apr(struct ib_cm_id *cm_id, enum ib_cm_apr_status status, void *info, u8 info_length, const void *private_data, u8 private_data_len) { struct cm_id_private *cm_id_priv; struct ib_mad_send_buf *msg; unsigned long flags; int ret; if ((private_data && private_data_len > IB_CM_APR_PRIVATE_DATA_SIZE) || (info && info_length > IB_CM_APR_INFO_LENGTH)) return -EINVAL; cm_id_priv = container_of(cm_id, struct cm_id_private, id); spin_lock_irqsave(&cm_id_priv->lock, flags); if (cm_id->state != IB_CM_ESTABLISHED || (cm_id->lap_state != IB_CM_LAP_RCVD && cm_id->lap_state != IB_CM_MRA_LAP_SENT)) { ret = -EINVAL; goto out; } ret = cm_alloc_msg(cm_id_priv, &msg); if (ret) goto out; cm_format_apr((struct cm_apr_msg *) msg->mad, cm_id_priv, status, info, info_length, private_data, private_data_len); ret = ib_post_send_mad(msg, NULL); if (ret) { spin_unlock_irqrestore(&cm_id_priv->lock, flags); cm_free_msg(msg); return ret; } cm_id->lap_state = IB_CM_LAP_IDLE; out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); return ret; } EXPORT_SYMBOL(ib_send_cm_apr); static int cm_apr_handler(struct cm_work *work) { struct cm_id_private *cm_id_priv; struct cm_apr_msg *apr_msg; int ret; apr_msg = (struct cm_apr_msg *)work->mad_recv_wc->recv_buf.mad; cm_id_priv = cm_acquire_id(apr_msg->remote_comm_id, apr_msg->local_comm_id); if (!cm_id_priv) return -EINVAL; /* Unmatched reply. */ work->cm_event.param.apr_rcvd.ap_status = apr_msg->ap_status; work->cm_event.param.apr_rcvd.apr_info = &apr_msg->info; work->cm_event.param.apr_rcvd.info_len = apr_msg->info_length; work->cm_event.private_data = &apr_msg->private_data; spin_lock_irq(&cm_id_priv->lock); if (cm_id_priv->id.state != IB_CM_ESTABLISHED || (cm_id_priv->id.lap_state != IB_CM_LAP_SENT && cm_id_priv->id.lap_state != IB_CM_MRA_LAP_RCVD)) { spin_unlock_irq(&cm_id_priv->lock); goto out; } cm_id_priv->id.lap_state = IB_CM_LAP_IDLE; ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); cm_id_priv->msg = NULL; ret = atomic_inc_and_test(&cm_id_priv->work_count); if (!ret) list_add_tail(&work->list, &cm_id_priv->work_list); spin_unlock_irq(&cm_id_priv->lock); if (ret) cm_process_work(cm_id_priv, work); else cm_deref_id(cm_id_priv); return 0; out: cm_deref_id(cm_id_priv); return -EINVAL; } static int cm_timewait_handler(struct cm_work *work) { struct cm_timewait_info *timewait_info; struct cm_id_private *cm_id_priv; int ret; timewait_info = (struct cm_timewait_info *)work; spin_lock_irq(&cm.lock); list_del(&timewait_info->list); spin_unlock_irq(&cm.lock); cm_id_priv = cm_acquire_id(timewait_info->work.local_id, timewait_info->work.remote_id); if (!cm_id_priv) return -EINVAL; spin_lock_irq(&cm_id_priv->lock); if (cm_id_priv->id.state != IB_CM_TIMEWAIT || cm_id_priv->remote_qpn != timewait_info->remote_qpn) { spin_unlock_irq(&cm_id_priv->lock); goto out; } cm_id_priv->id.state = IB_CM_IDLE; ret = atomic_inc_and_test(&cm_id_priv->work_count); if (!ret) list_add_tail(&work->list, &cm_id_priv->work_list); spin_unlock_irq(&cm_id_priv->lock); if (ret) cm_process_work(cm_id_priv, work); else cm_deref_id(cm_id_priv); return 0; out: cm_deref_id(cm_id_priv); return -EINVAL; } static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg, struct cm_id_private *cm_id_priv, struct ib_cm_sidr_req_param *param) { cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID, cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_SIDR)); sidr_req_msg->request_id = cm_id_priv->id.local_id; sidr_req_msg->pkey = param->path->pkey; sidr_req_msg->service_id = param->service_id; if (param->private_data && param->private_data_len) memcpy(sidr_req_msg->private_data, param->private_data, param->private_data_len); } int ib_send_cm_sidr_req(struct ib_cm_id *cm_id, struct ib_cm_sidr_req_param *param) { struct cm_id_private *cm_id_priv; struct ib_mad_send_buf *msg; unsigned long flags; int ret; if (!param->path || (param->private_data && param->private_data_len > IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE)) return -EINVAL; cm_id_priv = container_of(cm_id, struct cm_id_private, id); ret = cm_init_av_by_path(param->path, &cm_id_priv->av); if (ret) goto out; cm_id->service_id = param->service_id; cm_id->service_mask = ~cpu_to_be64(0); cm_id_priv->timeout_ms = param->timeout_ms; cm_id_priv->max_cm_retries = param->max_cm_retries; ret = cm_alloc_msg(cm_id_priv, &msg); if (ret) goto out; cm_format_sidr_req((struct cm_sidr_req_msg *) msg->mad, cm_id_priv, param); msg->timeout_ms = cm_id_priv->timeout_ms; msg->context[1] = (void *) (unsigned long) IB_CM_SIDR_REQ_SENT; spin_lock_irqsave(&cm_id_priv->lock, flags); if (cm_id->state == IB_CM_IDLE) ret = ib_post_send_mad(msg, NULL); else ret = -EINVAL; if (ret) { spin_unlock_irqrestore(&cm_id_priv->lock, flags); cm_free_msg(msg); goto out; } cm_id->state = IB_CM_SIDR_REQ_SENT; cm_id_priv->msg = msg; spin_unlock_irqrestore(&cm_id_priv->lock, flags); out: return ret; } EXPORT_SYMBOL(ib_send_cm_sidr_req); static void cm_format_sidr_req_event(struct cm_work *work, struct ib_cm_id *listen_id) { struct cm_sidr_req_msg *sidr_req_msg; struct ib_cm_sidr_req_event_param *param; sidr_req_msg = (struct cm_sidr_req_msg *) work->mad_recv_wc->recv_buf.mad; param = &work->cm_event.param.sidr_req_rcvd; param->pkey = __be16_to_cpu(sidr_req_msg->pkey); param->listen_id = listen_id; param->port = work->port->port_num; work->cm_event.private_data = &sidr_req_msg->private_data; } static int cm_sidr_req_handler(struct cm_work *work) { struct ib_cm_id *cm_id; struct cm_id_private *cm_id_priv, *cur_cm_id_priv; struct cm_sidr_req_msg *sidr_req_msg; struct ib_wc *wc; cm_id = ib_create_cm_id(work->port->cm_dev->ib_device, NULL, NULL); if (IS_ERR(cm_id)) return PTR_ERR(cm_id); cm_id_priv = container_of(cm_id, struct cm_id_private, id); /* Record SGID/SLID and request ID for lookup. */ sidr_req_msg = (struct cm_sidr_req_msg *) work->mad_recv_wc->recv_buf.mad; wc = work->mad_recv_wc->wc; cm_id_priv->av.dgid.global.subnet_prefix = cpu_to_be64(wc->slid); cm_id_priv->av.dgid.global.interface_id = 0; cm_init_av_for_response(work->port, work->mad_recv_wc->wc, work->mad_recv_wc->recv_buf.grh, &cm_id_priv->av); cm_id_priv->id.remote_id = sidr_req_msg->request_id; cm_id_priv->tid = sidr_req_msg->hdr.tid; atomic_inc(&cm_id_priv->work_count); spin_lock_irq(&cm.lock); cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv); if (cur_cm_id_priv) { spin_unlock_irq(&cm.lock); atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. counter[CM_SIDR_REQ_COUNTER]); goto out; /* Duplicate message. */ } cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD; cur_cm_id_priv = cm_find_listen(cm_id->device, sidr_req_msg->service_id, sidr_req_msg->private_data); if (!cur_cm_id_priv) { spin_unlock_irq(&cm.lock); cm_reject_sidr_req(cm_id_priv, IB_SIDR_UNSUPPORTED); goto out; /* No match. */ } atomic_inc(&cur_cm_id_priv->refcount); spin_unlock_irq(&cm.lock); cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler; cm_id_priv->id.context = cur_cm_id_priv->id.context; cm_id_priv->id.service_id = sidr_req_msg->service_id; cm_id_priv->id.service_mask = ~cpu_to_be64(0); cm_format_sidr_req_event(work, &cur_cm_id_priv->id); cm_process_work(cm_id_priv, work); cm_deref_id(cur_cm_id_priv); return 0; out: ib_destroy_cm_id(&cm_id_priv->id); return -EINVAL; } static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg, struct cm_id_private *cm_id_priv, struct ib_cm_sidr_rep_param *param) { cm_format_mad_hdr(&sidr_rep_msg->hdr, CM_SIDR_REP_ATTR_ID, cm_id_priv->tid); sidr_rep_msg->request_id = cm_id_priv->id.remote_id; sidr_rep_msg->status = param->status; cm_sidr_rep_set_qpn(sidr_rep_msg, cpu_to_be32(param->qp_num)); sidr_rep_msg->service_id = cm_id_priv->id.service_id; sidr_rep_msg->qkey = cpu_to_be32(param->qkey); if (param->info && param->info_length) memcpy(sidr_rep_msg->info, param->info, param->info_length); if (param->private_data && param->private_data_len) memcpy(sidr_rep_msg->private_data, param->private_data, param->private_data_len); } int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id, struct ib_cm_sidr_rep_param *param) { struct cm_id_private *cm_id_priv; struct ib_mad_send_buf *msg; unsigned long flags; int ret; if ((param->info && param->info_length > IB_CM_SIDR_REP_INFO_LENGTH) || (param->private_data && param->private_data_len > IB_CM_SIDR_REP_PRIVATE_DATA_SIZE)) return -EINVAL; cm_id_priv = container_of(cm_id, struct cm_id_private, id); spin_lock_irqsave(&cm_id_priv->lock, flags); if (cm_id->state != IB_CM_SIDR_REQ_RCVD) { ret = -EINVAL; goto error; } ret = cm_alloc_msg(cm_id_priv, &msg); if (ret) goto error; cm_format_sidr_rep((struct cm_sidr_rep_msg *) msg->mad, cm_id_priv, param); ret = ib_post_send_mad(msg, NULL); if (ret) { spin_unlock_irqrestore(&cm_id_priv->lock, flags); cm_free_msg(msg); return ret; } cm_id->state = IB_CM_IDLE; spin_unlock_irqrestore(&cm_id_priv->lock, flags); spin_lock_irqsave(&cm.lock, flags); rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table); spin_unlock_irqrestore(&cm.lock, flags); return 0; error: spin_unlock_irqrestore(&cm_id_priv->lock, flags); return ret; } EXPORT_SYMBOL(ib_send_cm_sidr_rep); static void cm_format_sidr_rep_event(struct cm_work *work) { struct cm_sidr_rep_msg *sidr_rep_msg; struct ib_cm_sidr_rep_event_param *param; sidr_rep_msg = (struct cm_sidr_rep_msg *) work->mad_recv_wc->recv_buf.mad; param = &work->cm_event.param.sidr_rep_rcvd; param->status = sidr_rep_msg->status; param->qkey = be32_to_cpu(sidr_rep_msg->qkey); param->qpn = be32_to_cpu(cm_sidr_rep_get_qpn(sidr_rep_msg)); param->info = &sidr_rep_msg->info; param->info_len = sidr_rep_msg->info_length; work->cm_event.private_data = &sidr_rep_msg->private_data; } static int cm_sidr_rep_handler(struct cm_work *work) { struct cm_sidr_rep_msg *sidr_rep_msg; struct cm_id_private *cm_id_priv; sidr_rep_msg = (struct cm_sidr_rep_msg *) work->mad_recv_wc->recv_buf.mad; cm_id_priv = cm_acquire_id(sidr_rep_msg->request_id, 0); if (!cm_id_priv) return -EINVAL; /* Unmatched reply. */ spin_lock_irq(&cm_id_priv->lock); if (cm_id_priv->id.state != IB_CM_SIDR_REQ_SENT) { spin_unlock_irq(&cm_id_priv->lock); goto out; } cm_id_priv->id.state = IB_CM_IDLE; ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); spin_unlock_irq(&cm_id_priv->lock); cm_format_sidr_rep_event(work); cm_process_work(cm_id_priv, work); return 0; out: cm_deref_id(cm_id_priv); return -EINVAL; } static void cm_process_send_error(struct ib_mad_send_buf *msg, enum ib_wc_status wc_status) { struct cm_id_private *cm_id_priv; struct ib_cm_event cm_event; enum ib_cm_state state; int ret; memset(&cm_event, 0, sizeof cm_event); cm_id_priv = msg->context[0]; /* Discard old sends or ones without a response. */ spin_lock_irq(&cm_id_priv->lock); state = (enum ib_cm_state) (unsigned long) msg->context[1]; if (msg != cm_id_priv->msg || state != cm_id_priv->id.state) goto discard; switch (state) { case IB_CM_REQ_SENT: case IB_CM_MRA_REQ_RCVD: cm_reset_to_idle(cm_id_priv); cm_event.event = IB_CM_REQ_ERROR; break; case IB_CM_REP_SENT: case IB_CM_MRA_REP_RCVD: cm_reset_to_idle(cm_id_priv); cm_event.event = IB_CM_REP_ERROR; break; case IB_CM_DREQ_SENT: cm_enter_timewait(cm_id_priv); cm_event.event = IB_CM_DREQ_ERROR; break; case IB_CM_SIDR_REQ_SENT: cm_id_priv->id.state = IB_CM_IDLE; cm_event.event = IB_CM_SIDR_REQ_ERROR; break; default: goto discard; } spin_unlock_irq(&cm_id_priv->lock); cm_event.param.send_status = wc_status; /* No other events can occur on the cm_id at this point. */ ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &cm_event); cm_free_msg(msg); if (ret) ib_destroy_cm_id(&cm_id_priv->id); return; discard: spin_unlock_irq(&cm_id_priv->lock); cm_free_msg(msg); } static void cm_send_handler(struct ib_mad_agent *mad_agent, struct ib_mad_send_wc *mad_send_wc) { struct ib_mad_send_buf *msg = mad_send_wc->send_buf; struct cm_port *port; u16 attr_index; port = mad_agent->context; attr_index = be16_to_cpu(((struct ib_mad_hdr *) msg->mad)->attr_id) - CM_ATTR_ID_OFFSET; /* * If the send was in response to a received message (context[0] is not * set to a cm_id), and is not a REJ, then it is a send that was * manually retried. */ if (!msg->context[0] && (attr_index != CM_REJ_COUNTER)) msg->retries = 1; atomic_long_add(1 + msg->retries, &port->counter_group[CM_XMIT].counter[attr_index]); if (msg->retries) atomic_long_add(msg->retries, &port->counter_group[CM_XMIT_RETRIES]. counter[attr_index]); switch (mad_send_wc->status) { case IB_WC_SUCCESS: case IB_WC_WR_FLUSH_ERR: cm_free_msg(msg); break; default: if (msg->context[0] && msg->context[1]) cm_process_send_error(msg, mad_send_wc->status); else cm_free_msg(msg); break; } } static void cm_work_handler(struct work_struct *_work) { struct cm_work *work = container_of(_work, struct cm_work, work.work); int ret; switch (work->cm_event.event) { case IB_CM_REQ_RECEIVED: ret = cm_req_handler(work); break; case IB_CM_MRA_RECEIVED: ret = cm_mra_handler(work); break; case IB_CM_REJ_RECEIVED: ret = cm_rej_handler(work); break; case IB_CM_REP_RECEIVED: ret = cm_rep_handler(work); break; case IB_CM_RTU_RECEIVED: ret = cm_rtu_handler(work); break; case IB_CM_USER_ESTABLISHED: ret = cm_establish_handler(work); break; case IB_CM_DREQ_RECEIVED: ret = cm_dreq_handler(work); break; case IB_CM_DREP_RECEIVED: ret = cm_drep_handler(work); break; case IB_CM_SIDR_REQ_RECEIVED: ret = cm_sidr_req_handler(work); break; case IB_CM_SIDR_REP_RECEIVED: ret = cm_sidr_rep_handler(work); break; case IB_CM_LAP_RECEIVED: ret = cm_lap_handler(work); break; case IB_CM_APR_RECEIVED: ret = cm_apr_handler(work); break; case IB_CM_TIMEWAIT_EXIT: ret = cm_timewait_handler(work); break; default: ret = -EINVAL; break; } if (ret) cm_free_work(work); } static int cm_establish(struct ib_cm_id *cm_id) { struct cm_id_private *cm_id_priv; struct cm_work *work; unsigned long flags; int ret = 0; work = kmalloc(sizeof *work, GFP_ATOMIC); if (!work) return -ENOMEM; cm_id_priv = container_of(cm_id, struct cm_id_private, id); spin_lock_irqsave(&cm_id_priv->lock, flags); switch (cm_id->state) { case IB_CM_REP_SENT: case IB_CM_MRA_REP_RCVD: cm_id->state = IB_CM_ESTABLISHED; break; case IB_CM_ESTABLISHED: ret = -EISCONN; break; default: ret = -EINVAL; break; } spin_unlock_irqrestore(&cm_id_priv->lock, flags); if (ret) { kfree(work); goto out; } /* * The CM worker thread may try to destroy the cm_id before it * can execute this work item. To prevent potential deadlock, * we need to find the cm_id once we're in the context of the * worker thread, rather than holding a reference on it. */ INIT_DELAYED_WORK(&work->work, cm_work_handler); work->local_id = cm_id->local_id; work->remote_id = cm_id->remote_id; work->mad_recv_wc = NULL; work->cm_event.event = IB_CM_USER_ESTABLISHED; queue_delayed_work(cm.wq, &work->work, 0); out: return ret; } static int cm_migrate(struct ib_cm_id *cm_id) { struct cm_id_private *cm_id_priv; unsigned long flags; int ret = 0; cm_id_priv = container_of(cm_id, struct cm_id_private, id); spin_lock_irqsave(&cm_id_priv->lock, flags); if (cm_id->state == IB_CM_ESTABLISHED && (cm_id->lap_state == IB_CM_LAP_UNINIT || cm_id->lap_state == IB_CM_LAP_IDLE)) { cm_id->lap_state = IB_CM_LAP_IDLE; cm_id_priv->av = cm_id_priv->alt_av; } else ret = -EINVAL; spin_unlock_irqrestore(&cm_id_priv->lock, flags); return ret; } int ib_cm_notify(struct ib_cm_id *cm_id, enum ib_event_type event) { int ret; switch (event) { case IB_EVENT_COMM_EST: ret = cm_establish(cm_id); break; case IB_EVENT_PATH_MIG: ret = cm_migrate(cm_id); break; default: ret = -EINVAL; } return ret; } EXPORT_SYMBOL(ib_cm_notify); static void cm_recv_handler(struct ib_mad_agent *mad_agent, struct ib_mad_recv_wc *mad_recv_wc) { struct cm_port *port = mad_agent->context; struct cm_work *work; enum ib_cm_event_type event; u16 attr_id; int paths = 0; switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) { case CM_REQ_ATTR_ID: paths = 1 + (((struct cm_req_msg *) mad_recv_wc->recv_buf.mad)-> alt_local_lid != 0); event = IB_CM_REQ_RECEIVED; break; case CM_MRA_ATTR_ID: event = IB_CM_MRA_RECEIVED; break; case CM_REJ_ATTR_ID: event = IB_CM_REJ_RECEIVED; break; case CM_REP_ATTR_ID: event = IB_CM_REP_RECEIVED; break; case CM_RTU_ATTR_ID: event = IB_CM_RTU_RECEIVED; break; case CM_DREQ_ATTR_ID: event = IB_CM_DREQ_RECEIVED; break; case CM_DREP_ATTR_ID: event = IB_CM_DREP_RECEIVED; break; case CM_SIDR_REQ_ATTR_ID: event = IB_CM_SIDR_REQ_RECEIVED; break; case CM_SIDR_REP_ATTR_ID: event = IB_CM_SIDR_REP_RECEIVED; break; case CM_LAP_ATTR_ID: paths = 1; event = IB_CM_LAP_RECEIVED; break; case CM_APR_ATTR_ID: event = IB_CM_APR_RECEIVED; break; default: ib_free_recv_mad(mad_recv_wc); return; } attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id); atomic_long_inc(&port->counter_group[CM_RECV]. counter[attr_id - CM_ATTR_ID_OFFSET]); work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths, GFP_KERNEL); if (!work) { ib_free_recv_mad(mad_recv_wc); return; } INIT_DELAYED_WORK(&work->work, cm_work_handler); work->cm_event.event = event; work->mad_recv_wc = mad_recv_wc; work->port = port; queue_delayed_work(cm.wq, &work->work, 0); } static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv, struct ib_qp_attr *qp_attr, int *qp_attr_mask) { unsigned long flags; int ret; spin_lock_irqsave(&cm_id_priv->lock, flags); switch (cm_id_priv->id.state) { case IB_CM_REQ_SENT: case IB_CM_MRA_REQ_RCVD: case IB_CM_REQ_RCVD: case IB_CM_MRA_REQ_SENT: case IB_CM_REP_RCVD: case IB_CM_MRA_REP_SENT: case IB_CM_REP_SENT: case IB_CM_MRA_REP_RCVD: case IB_CM_ESTABLISHED: *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS | IB_QP_PKEY_INDEX | IB_QP_PORT; qp_attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE; if (cm_id_priv->responder_resources) qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ | IB_ACCESS_REMOTE_ATOMIC; qp_attr->pkey_index = cm_id_priv->av.pkey_index; qp_attr->port_num = cm_id_priv->av.port->port_num; ret = 0; break; default: ret = -EINVAL; break; } spin_unlock_irqrestore(&cm_id_priv->lock, flags); return ret; } static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv, struct ib_qp_attr *qp_attr, int *qp_attr_mask) { unsigned long flags; int ret; spin_lock_irqsave(&cm_id_priv->lock, flags); switch (cm_id_priv->id.state) { case IB_CM_REQ_RCVD: case IB_CM_MRA_REQ_SENT: case IB_CM_REP_RCVD: case IB_CM_MRA_REP_SENT: case IB_CM_REP_SENT: case IB_CM_MRA_REP_RCVD: case IB_CM_ESTABLISHED: *qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU | IB_QP_DEST_QPN | IB_QP_RQ_PSN; qp_attr->ah_attr = cm_id_priv->av.ah_attr; qp_attr->path_mtu = cm_id_priv->path_mtu; qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn); qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn); if (cm_id_priv->qp_type == IB_QPT_RC) { *qp_attr_mask |= IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER; qp_attr->max_dest_rd_atomic = cm_id_priv->responder_resources; qp_attr->min_rnr_timer = 0; } if (cm_id_priv->alt_av.ah_attr.dlid) { *qp_attr_mask |= IB_QP_ALT_PATH; qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num; qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index; qp_attr->alt_timeout = cm_id_priv->alt_av.timeout; qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr; } ret = 0; break; default: ret = -EINVAL; break; } spin_unlock_irqrestore(&cm_id_priv->lock, flags); return ret; } static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv, struct ib_qp_attr *qp_attr, int *qp_attr_mask) { unsigned long flags; int ret; spin_lock_irqsave(&cm_id_priv->lock, flags); switch (cm_id_priv->id.state) { /* Allow transition to RTS before sending REP */ case IB_CM_REQ_RCVD: case IB_CM_MRA_REQ_SENT: case IB_CM_REP_RCVD: case IB_CM_MRA_REP_SENT: case IB_CM_REP_SENT: case IB_CM_MRA_REP_RCVD: case IB_CM_ESTABLISHED: if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT) { *qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN; qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn); if (cm_id_priv->qp_type == IB_QPT_RC) { *qp_attr_mask |= IB_QP_TIMEOUT | IB_QP_RETRY_CNT | IB_QP_RNR_RETRY | IB_QP_MAX_QP_RD_ATOMIC; qp_attr->timeout = cm_id_priv->av.timeout; qp_attr->retry_cnt = cm_id_priv->retry_count; qp_attr->rnr_retry = cm_id_priv->rnr_retry_count; qp_attr->max_rd_atomic = cm_id_priv->initiator_depth; } if (cm_id_priv->alt_av.ah_attr.dlid) { *qp_attr_mask |= IB_QP_PATH_MIG_STATE; qp_attr->path_mig_state = IB_MIG_REARM; } } else { *qp_attr_mask = IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE; qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num; qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index; qp_attr->alt_timeout = cm_id_priv->alt_av.timeout; qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr; qp_attr->path_mig_state = IB_MIG_REARM; } ret = 0; break; default: ret = -EINVAL; break; } spin_unlock_irqrestore(&cm_id_priv->lock, flags); return ret; } int ib_cm_init_qp_attr(struct ib_cm_id *cm_id, struct ib_qp_attr *qp_attr, int *qp_attr_mask) { struct cm_id_private *cm_id_priv; int ret; cm_id_priv = container_of(cm_id, struct cm_id_private, id); switch (qp_attr->qp_state) { case IB_QPS_INIT: ret = cm_init_qp_init_attr(cm_id_priv, qp_attr, qp_attr_mask); break; case IB_QPS_RTR: ret = cm_init_qp_rtr_attr(cm_id_priv, qp_attr, qp_attr_mask); break; case IB_QPS_RTS: ret = cm_init_qp_rts_attr(cm_id_priv, qp_attr, qp_attr_mask); break; default: ret = -EINVAL; break; } return ret; } EXPORT_SYMBOL(ib_cm_init_qp_attr); static void cm_get_ack_delay(struct cm_device *cm_dev) { struct ib_device_attr attr; if (ib_query_device(cm_dev->ib_device, &attr)) cm_dev->ack_delay = 0; /* acks will rely on packet life time */ else cm_dev->ack_delay = attr.local_ca_ack_delay; } static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr, char *buf) { struct cm_counter_group *group; struct cm_counter_attribute *cm_attr; group = container_of(obj, struct cm_counter_group, obj); cm_attr = container_of(attr, struct cm_counter_attribute, attr); return sprintf(buf, "%ld\n", atomic_long_read(&group->counter[cm_attr->index])); } static const struct sysfs_ops cm_counter_ops = { .show = cm_show_counter }; static struct kobj_type cm_counter_obj_type = { .sysfs_ops = &cm_counter_ops, .default_attrs = cm_counter_default_attrs }; static void cm_release_port_obj(struct kobject *obj) { struct cm_port *cm_port; cm_port = container_of(obj, struct cm_port, port_obj); kfree(cm_port); } static struct kobj_type cm_port_obj_type = { .release = cm_release_port_obj }; struct class cm_class = { .name = "infiniband_cm", }; EXPORT_SYMBOL(cm_class); static int cm_create_port_fs(struct cm_port *port) { int i, ret; ret = kobject_init_and_add(&port->port_obj, &cm_port_obj_type, &port->cm_dev->device->kobj, "%d", port->port_num); if (ret) { kfree(port); return ret; } for (i = 0; i < CM_COUNTER_GROUPS; i++) { ret = kobject_init_and_add(&port->counter_group[i].obj, &cm_counter_obj_type, &port->port_obj, "%s", counter_group_names[i]); if (ret) goto error; } return 0; error: while (i--) kobject_put(&port->counter_group[i].obj); kobject_put(&port->port_obj); return ret; } static void cm_remove_port_fs(struct cm_port *port) { int i; for (i = 0; i < CM_COUNTER_GROUPS; i++) kobject_put(&port->counter_group[i].obj); kobject_put(&port->port_obj); } static void cm_add_one(struct ib_device *ib_device) { struct cm_device *cm_dev; struct cm_port *port; struct ib_mad_reg_req reg_req = { .mgmt_class = IB_MGMT_CLASS_CM, .mgmt_class_version = IB_CM_CLASS_VERSION }; struct ib_port_modify port_modify = { .set_port_cap_mask = IB_PORT_CM_SUP }; unsigned long flags; int ret; u8 i; if (rdma_node_get_transport(ib_device->node_type) != RDMA_TRANSPORT_IB) return; cm_dev = kzalloc(sizeof(*cm_dev) + sizeof(*port) * ib_device->phys_port_cnt, GFP_KERNEL); if (!cm_dev) return; cm_dev->ib_device = ib_device; cm_get_ack_delay(cm_dev); cm_dev->device = device_create(&cm_class, &ib_device->dev, MKDEV(0, 0), NULL, "%s", ib_device->name); if (IS_ERR(cm_dev->device)) { kfree(cm_dev); return; } set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask); for (i = 1; i <= ib_device->phys_port_cnt; i++) { port = kzalloc(sizeof *port, GFP_KERNEL); if (!port) goto error1; cm_dev->port[i-1] = port; port->cm_dev = cm_dev; port->port_num = i; ret = cm_create_port_fs(port); if (ret) goto error1; port->mad_agent = ib_register_mad_agent(ib_device, i, IB_QPT_GSI, &reg_req, 0, cm_send_handler, cm_recv_handler, port); if (IS_ERR(port->mad_agent)) goto error2; ret = ib_modify_port(ib_device, i, 0, &port_modify); if (ret) goto error3; } ib_set_client_data(ib_device, &cm_client, cm_dev); write_lock_irqsave(&cm.device_lock, flags); list_add_tail(&cm_dev->list, &cm.device_list); write_unlock_irqrestore(&cm.device_lock, flags); return; error3: ib_unregister_mad_agent(port->mad_agent); error2: cm_remove_port_fs(port); error1: port_modify.set_port_cap_mask = 0; port_modify.clr_port_cap_mask = IB_PORT_CM_SUP; while (--i) { port = cm_dev->port[i-1]; ib_modify_port(ib_device, port->port_num, 0, &port_modify); ib_unregister_mad_agent(port->mad_agent); cm_remove_port_fs(port); } device_unregister(cm_dev->device); kfree(cm_dev); } static void cm_remove_one(struct ib_device *ib_device) { struct cm_device *cm_dev; struct cm_port *port; struct ib_port_modify port_modify = { .clr_port_cap_mask = IB_PORT_CM_SUP }; unsigned long flags; int i; cm_dev = ib_get_client_data(ib_device, &cm_client); if (!cm_dev) return; write_lock_irqsave(&cm.device_lock, flags); list_del(&cm_dev->list); write_unlock_irqrestore(&cm.device_lock, flags); for (i = 1; i <= ib_device->phys_port_cnt; i++) { port = cm_dev->port[i-1]; ib_modify_port(ib_device, port->port_num, 0, &port_modify); ib_unregister_mad_agent(port->mad_agent); flush_workqueue(cm.wq); cm_remove_port_fs(port); } device_unregister(cm_dev->device); kfree(cm_dev); } static int __init ib_cm_init(void) { int ret; memset(&cm, 0, sizeof cm); INIT_LIST_HEAD(&cm.device_list); rwlock_init(&cm.device_lock); spin_lock_init(&cm.lock); cm.listen_service_table = RB_ROOT; cm.listen_service_id = be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID); cm.remote_id_table = RB_ROOT; cm.remote_qp_table = RB_ROOT; cm.remote_sidr_table = RB_ROOT; idr_init(&cm.local_id_table); get_random_bytes(&cm.random_id_operand, sizeof cm.random_id_operand); idr_pre_get(&cm.local_id_table, GFP_KERNEL); INIT_LIST_HEAD(&cm.timewait_list); ret = class_register(&cm_class); if (ret) return -ENOMEM; cm.wq = create_workqueue("ib_cm"); if (!cm.wq) { ret = -ENOMEM; goto error1; } ret = ib_register_client(&cm_client); if (ret) goto error2; return 0; error2: destroy_workqueue(cm.wq); error1: class_unregister(&cm_class); return ret; } static void __exit ib_cm_cleanup(void) { struct cm_timewait_info *timewait_info, *tmp; spin_lock_irq(&cm.lock); list_for_each_entry(timewait_info, &cm.timewait_list, list) cancel_delayed_work(&timewait_info->work.work); spin_unlock_irq(&cm.lock); ib_unregister_client(&cm_client); destroy_workqueue(cm.wq); list_for_each_entry_safe(timewait_info, tmp, &cm.timewait_list, list) { list_del(&timewait_info->list); kfree(timewait_info); } class_unregister(&cm_class); idr_destroy(&cm.local_id_table); } module_init(ib_cm_init); module_exit(ib_cm_cleanup);
gpl-2.0
delafer/YP-GI1CW
drivers/net/sc92031.c
848
40960
/* Silan SC92031 PCI Fast Ethernet Adapter driver * * Based on vendor drivers: * Silan Fast Ethernet Netcard Driver: * MODULE_AUTHOR ("gaoyonghong"); * MODULE_DESCRIPTION ("SILAN Fast Ethernet driver"); * MODULE_LICENSE("GPL"); * 8139D Fast Ethernet driver: * (C) 2002 by gaoyonghong * MODULE_AUTHOR ("gaoyonghong"); * MODULE_DESCRIPTION ("Rsltek 8139D PCI Fast Ethernet Adapter driver"); * MODULE_LICENSE("GPL"); * Both are almost identical and seem to be based on pci-skeleton.c * * Rewritten for 2.6 by Cesar Eduardo Barros * * A datasheet for this chip can be found at * http://www.silan.com.cn/english/products/pdf/SC92031AY.pdf */ /* Note about set_mac_address: I don't know how to change the hardware * matching, so you need to enable IFF_PROMISC when using it. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/pci.h> #include <linux/dma-mapping.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/crc32.h> #include <asm/irq.h> #define SC92031_NAME "sc92031" /* BAR 0 is MMIO, BAR 1 is PIO */ #ifndef SC92031_USE_BAR #define SC92031_USE_BAR 0 #endif /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). */ static int multicast_filter_limit = 64; module_param(multicast_filter_limit, int, 0); MODULE_PARM_DESC(multicast_filter_limit, "Maximum number of filtered multicast addresses"); static int media; module_param(media, int, 0); MODULE_PARM_DESC(media, "Media type (0x00 = autodetect," " 0x01 = 10M half, 0x02 = 10M full," " 0x04 = 100M half, 0x08 = 100M full)"); /* Size of the in-memory receive ring. */ #define RX_BUF_LEN_IDX 3 /* 0==8K, 1==16K, 2==32K, 3==64K ,4==128K*/ #define RX_BUF_LEN (8192 << RX_BUF_LEN_IDX) /* Number of Tx descriptor registers. */ #define NUM_TX_DESC 4 /* max supported ethernet frame size -- must be at least (dev->mtu+14+4).*/ #define MAX_ETH_FRAME_SIZE 1536 /* Size of the Tx bounce buffers -- must be at least (dev->mtu+14+4). */ #define TX_BUF_SIZE MAX_ETH_FRAME_SIZE #define TX_BUF_TOT_LEN (TX_BUF_SIZE * NUM_TX_DESC) /* The following settings are log_2(bytes)-4: 0 == 16 bytes .. 6==1024, 7==end of packet. */ #define RX_FIFO_THRESH 7 /* Rx buffer level before first PCI xfer. */ /* Time in jiffies before concluding the transmitter is hung. */ #define TX_TIMEOUT (4*HZ) #define SILAN_STATS_NUM 2 /* number of ETHTOOL_GSTATS */ /* media options */ #define AUTOSELECT 0x00 #define M10_HALF 0x01 #define M10_FULL 0x02 #define M100_HALF 0x04 #define M100_FULL 0x08 /* Symbolic offsets to registers. */ enum silan_registers { Config0 = 0x00, // Config0 Config1 = 0x04, // Config1 RxBufWPtr = 0x08, // Rx buffer writer poiter IntrStatus = 0x0C, // Interrupt status IntrMask = 0x10, // Interrupt mask RxbufAddr = 0x14, // Rx buffer start address RxBufRPtr = 0x18, // Rx buffer read pointer Txstatusall = 0x1C, // Transmit status of all descriptors TxStatus0 = 0x20, // Transmit status (Four 32bit registers). TxAddr0 = 0x30, // Tx descriptors (also four 32bit). RxConfig = 0x40, // Rx configuration MAC0 = 0x44, // Ethernet hardware address. MAR0 = 0x4C, // Multicast filter. RxStatus0 = 0x54, // Rx status TxConfig = 0x5C, // Tx configuration PhyCtrl = 0x60, // physical control FlowCtrlConfig = 0x64, // flow control Miicmd0 = 0x68, // Mii command0 register Miicmd1 = 0x6C, // Mii command1 register Miistatus = 0x70, // Mii status register Timercnt = 0x74, // Timer counter register TimerIntr = 0x78, // Timer interrupt register PMConfig = 0x7C, // Power Manager configuration CRC0 = 0x80, // Power Manager CRC ( Two 32bit regisers) Wakeup0 = 0x88, // power Manager wakeup( Eight 64bit regiser) LSBCRC0 = 0xC8, // power Manager LSBCRC(Two 32bit regiser) TestD0 = 0xD0, TestD4 = 0xD4, TestD8 = 0xD8, }; #define MII_BMCR 0 // Basic mode control register #define MII_BMSR 1 // Basic mode status register #define MII_JAB 16 #define MII_OutputStatus 24 #define BMCR_FULLDPLX 0x0100 // Full duplex #define BMCR_ANRESTART 0x0200 // Auto negotiation restart #define BMCR_ANENABLE 0x1000 // Enable auto negotiation #define BMCR_SPEED100 0x2000 // Select 100Mbps #define BMSR_LSTATUS 0x0004 // Link status #define PHY_16_JAB_ENB 0x1000 #define PHY_16_PORT_ENB 0x1 enum IntrStatusBits { LinkFail = 0x80000000, LinkOK = 0x40000000, TimeOut = 0x20000000, RxOverflow = 0x0040, RxOK = 0x0020, TxOK = 0x0001, IntrBits = LinkFail|LinkOK|TimeOut|RxOverflow|RxOK|TxOK, }; enum TxStatusBits { TxCarrierLost = 0x20000000, TxAborted = 0x10000000, TxOutOfWindow = 0x08000000, TxNccShift = 22, EarlyTxThresShift = 16, TxStatOK = 0x8000, TxUnderrun = 0x4000, TxOwn = 0x2000, }; enum RxStatusBits { RxStatesOK = 0x80000, RxBadAlign = 0x40000, RxHugeFrame = 0x20000, RxSmallFrame = 0x10000, RxCRCOK = 0x8000, RxCrlFrame = 0x4000, Rx_Broadcast = 0x2000, Rx_Multicast = 0x1000, RxAddrMatch = 0x0800, MiiErr = 0x0400, }; enum RxConfigBits { RxFullDx = 0x80000000, RxEnb = 0x40000000, RxSmall = 0x20000000, RxHuge = 0x10000000, RxErr = 0x08000000, RxAllphys = 0x04000000, RxMulticast = 0x02000000, RxBroadcast = 0x01000000, RxLoopBack = (1 << 23) | (1 << 22), LowThresholdShift = 12, HighThresholdShift = 2, }; enum TxConfigBits { TxFullDx = 0x80000000, TxEnb = 0x40000000, TxEnbPad = 0x20000000, TxEnbHuge = 0x10000000, TxEnbFCS = 0x08000000, TxNoBackOff = 0x04000000, TxEnbPrem = 0x02000000, TxCareLostCrs = 0x1000000, TxExdCollNum = 0xf00000, TxDataRate = 0x80000, }; enum PhyCtrlconfigbits { PhyCtrlAne = 0x80000000, PhyCtrlSpd100 = 0x40000000, PhyCtrlSpd10 = 0x20000000, PhyCtrlPhyBaseAddr = 0x1f000000, PhyCtrlDux = 0x800000, PhyCtrlReset = 0x400000, }; enum FlowCtrlConfigBits { FlowCtrlFullDX = 0x80000000, FlowCtrlEnb = 0x40000000, }; enum Config0Bits { Cfg0_Reset = 0x80000000, Cfg0_Anaoff = 0x40000000, Cfg0_LDPS = 0x20000000, }; enum Config1Bits { Cfg1_EarlyRx = 1 << 31, Cfg1_EarlyTx = 1 << 30, //rx buffer size Cfg1_Rcv8K = 0x0, Cfg1_Rcv16K = 0x1, Cfg1_Rcv32K = 0x3, Cfg1_Rcv64K = 0x7, Cfg1_Rcv128K = 0xf, }; enum MiiCmd0Bits { Mii_Divider = 0x20000000, Mii_WRITE = 0x400000, Mii_READ = 0x200000, Mii_SCAN = 0x100000, Mii_Tamod = 0x80000, Mii_Drvmod = 0x40000, Mii_mdc = 0x20000, Mii_mdoen = 0x10000, Mii_mdo = 0x8000, Mii_mdi = 0x4000, }; enum MiiStatusBits { Mii_StatusBusy = 0x80000000, }; enum PMConfigBits { PM_Enable = 1 << 31, PM_LongWF = 1 << 30, PM_Magic = 1 << 29, PM_LANWake = 1 << 28, PM_LWPTN = (1 << 27 | 1<< 26), PM_LinkUp = 1 << 25, PM_WakeUp = 1 << 24, }; /* Locking rules: * priv->lock protects most of the fields of priv and most of the * hardware registers. It does not have to protect against softirqs * between sc92031_disable_interrupts and sc92031_enable_interrupts; * it also does not need to be used in ->open and ->stop while the * device interrupts are off. * Not having to protect against softirqs is very useful due to heavy * use of mdelay() at _sc92031_reset. * Functions prefixed with _sc92031_ must be called with the lock held; * functions prefixed with sc92031_ must be called without the lock held. * Use mmiowb() before unlocking if the hardware was written to. */ /* Locking rules for the interrupt: * - the interrupt and the tasklet never run at the same time * - neither run between sc92031_disable_interrupts and * sc92031_enable_interrupt */ struct sc92031_priv { spinlock_t lock; /* iomap.h cookie */ void __iomem *port_base; /* pci device structure */ struct pci_dev *pdev; /* tasklet */ struct tasklet_struct tasklet; /* CPU address of rx ring */ void *rx_ring; /* PCI address of rx ring */ dma_addr_t rx_ring_dma_addr; /* PCI address of rx ring read pointer */ dma_addr_t rx_ring_tail; /* tx ring write index */ unsigned tx_head; /* tx ring read index */ unsigned tx_tail; /* CPU address of tx bounce buffer */ void *tx_bufs; /* PCI address of tx bounce buffer */ dma_addr_t tx_bufs_dma_addr; /* copies of some hardware registers */ u32 intr_status; atomic_t intr_mask; u32 rx_config; u32 tx_config; u32 pm_config; /* copy of some flags from dev->flags */ unsigned int mc_flags; /* for ETHTOOL_GSTATS */ u64 tx_timeouts; u64 rx_loss; /* for dev->get_stats */ long rx_value; }; /* I don't know which registers can be safely read; however, I can guess * MAC0 is one of them. */ static inline void _sc92031_dummy_read(void __iomem *port_base) { ioread32(port_base + MAC0); } static u32 _sc92031_mii_wait(void __iomem *port_base) { u32 mii_status; do { udelay(10); mii_status = ioread32(port_base + Miistatus); } while (mii_status & Mii_StatusBusy); return mii_status; } static u32 _sc92031_mii_cmd(void __iomem *port_base, u32 cmd0, u32 cmd1) { iowrite32(Mii_Divider, port_base + Miicmd0); _sc92031_mii_wait(port_base); iowrite32(cmd1, port_base + Miicmd1); iowrite32(Mii_Divider | cmd0, port_base + Miicmd0); return _sc92031_mii_wait(port_base); } static void _sc92031_mii_scan(void __iomem *port_base) { _sc92031_mii_cmd(port_base, Mii_SCAN, 0x1 << 6); } static u16 _sc92031_mii_read(void __iomem *port_base, unsigned reg) { return _sc92031_mii_cmd(port_base, Mii_READ, reg << 6) >> 13; } static void _sc92031_mii_write(void __iomem *port_base, unsigned reg, u16 val) { _sc92031_mii_cmd(port_base, Mii_WRITE, (reg << 6) | ((u32)val << 11)); } static void sc92031_disable_interrupts(struct net_device *dev) { struct sc92031_priv *priv = netdev_priv(dev); void __iomem *port_base = priv->port_base; /* tell the tasklet/interrupt not to enable interrupts */ atomic_set(&priv->intr_mask, 0); wmb(); /* stop interrupts */ iowrite32(0, port_base + IntrMask); _sc92031_dummy_read(port_base); mmiowb(); /* wait for any concurrent interrupt/tasklet to finish */ synchronize_irq(dev->irq); tasklet_disable(&priv->tasklet); } static void sc92031_enable_interrupts(struct net_device *dev) { struct sc92031_priv *priv = netdev_priv(dev); void __iomem *port_base = priv->port_base; tasklet_enable(&priv->tasklet); atomic_set(&priv->intr_mask, IntrBits); wmb(); iowrite32(IntrBits, port_base + IntrMask); mmiowb(); } static void _sc92031_disable_tx_rx(struct net_device *dev) { struct sc92031_priv *priv = netdev_priv(dev); void __iomem *port_base = priv->port_base; priv->rx_config &= ~RxEnb; priv->tx_config &= ~TxEnb; iowrite32(priv->rx_config, port_base + RxConfig); iowrite32(priv->tx_config, port_base + TxConfig); } static void _sc92031_enable_tx_rx(struct net_device *dev) { struct sc92031_priv *priv = netdev_priv(dev); void __iomem *port_base = priv->port_base; priv->rx_config |= RxEnb; priv->tx_config |= TxEnb; iowrite32(priv->rx_config, port_base + RxConfig); iowrite32(priv->tx_config, port_base + TxConfig); } static void _sc92031_tx_clear(struct net_device *dev) { struct sc92031_priv *priv = netdev_priv(dev); while (priv->tx_head - priv->tx_tail > 0) { priv->tx_tail++; dev->stats.tx_dropped++; } priv->tx_head = priv->tx_tail = 0; } static void _sc92031_set_mar(struct net_device *dev) { struct sc92031_priv *priv = netdev_priv(dev); void __iomem *port_base = priv->port_base; u32 mar0 = 0, mar1 = 0; if ((dev->flags & IFF_PROMISC) || netdev_mc_count(dev) > multicast_filter_limit || (dev->flags & IFF_ALLMULTI)) mar0 = mar1 = 0xffffffff; else if (dev->flags & IFF_MULTICAST) { struct netdev_hw_addr *ha; netdev_for_each_mc_addr(ha, dev) { u32 crc; unsigned bit = 0; crc = ~ether_crc(ETH_ALEN, ha->addr); crc >>= 24; if (crc & 0x01) bit |= 0x02; if (crc & 0x02) bit |= 0x01; if (crc & 0x10) bit |= 0x20; if (crc & 0x20) bit |= 0x10; if (crc & 0x40) bit |= 0x08; if (crc & 0x80) bit |= 0x04; if (bit > 31) mar0 |= 0x1 << (bit - 32); else mar1 |= 0x1 << bit; } } iowrite32(mar0, port_base + MAR0); iowrite32(mar1, port_base + MAR0 + 4); } static void _sc92031_set_rx_config(struct net_device *dev) { struct sc92031_priv *priv = netdev_priv(dev); void __iomem *port_base = priv->port_base; unsigned int old_mc_flags; u32 rx_config_bits = 0; old_mc_flags = priv->mc_flags; if (dev->flags & IFF_PROMISC) rx_config_bits |= RxSmall | RxHuge | RxErr | RxBroadcast | RxMulticast | RxAllphys; if (dev->flags & (IFF_ALLMULTI | IFF_MULTICAST)) rx_config_bits |= RxMulticast; if (dev->flags & IFF_BROADCAST) rx_config_bits |= RxBroadcast; priv->rx_config &= ~(RxSmall | RxHuge | RxErr | RxBroadcast | RxMulticast | RxAllphys); priv->rx_config |= rx_config_bits; priv->mc_flags = dev->flags & (IFF_PROMISC | IFF_ALLMULTI | IFF_MULTICAST | IFF_BROADCAST); if (netif_carrier_ok(dev) && priv->mc_flags != old_mc_flags) iowrite32(priv->rx_config, port_base + RxConfig); } static bool _sc92031_check_media(struct net_device *dev) { struct sc92031_priv *priv = netdev_priv(dev); void __iomem *port_base = priv->port_base; u16 bmsr; bmsr = _sc92031_mii_read(port_base, MII_BMSR); rmb(); if (bmsr & BMSR_LSTATUS) { bool speed_100, duplex_full; u32 flow_ctrl_config = 0; u16 output_status = _sc92031_mii_read(port_base, MII_OutputStatus); _sc92031_mii_scan(port_base); speed_100 = output_status & 0x2; duplex_full = output_status & 0x4; /* Initial Tx/Rx configuration */ priv->rx_config = (0x40 << LowThresholdShift) | (0x1c0 << HighThresholdShift); priv->tx_config = 0x48800000; /* NOTE: vendor driver had dead code here to enable tx padding */ if (!speed_100) priv->tx_config |= 0x80000; // configure rx mode _sc92031_set_rx_config(dev); if (duplex_full) { priv->rx_config |= RxFullDx; priv->tx_config |= TxFullDx; flow_ctrl_config = FlowCtrlFullDX | FlowCtrlEnb; } else { priv->rx_config &= ~RxFullDx; priv->tx_config &= ~TxFullDx; } _sc92031_set_mar(dev); _sc92031_set_rx_config(dev); _sc92031_enable_tx_rx(dev); iowrite32(flow_ctrl_config, port_base + FlowCtrlConfig); netif_carrier_on(dev); if (printk_ratelimit()) printk(KERN_INFO "%s: link up, %sMbps, %s-duplex\n", dev->name, speed_100 ? "100" : "10", duplex_full ? "full" : "half"); return true; } else { _sc92031_mii_scan(port_base); netif_carrier_off(dev); _sc92031_disable_tx_rx(dev); if (printk_ratelimit()) printk(KERN_INFO "%s: link down\n", dev->name); return false; } } static void _sc92031_phy_reset(struct net_device *dev) { struct sc92031_priv *priv = netdev_priv(dev); void __iomem *port_base = priv->port_base; u32 phy_ctrl; phy_ctrl = ioread32(port_base + PhyCtrl); phy_ctrl &= ~(PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10); phy_ctrl |= PhyCtrlAne | PhyCtrlReset; switch (media) { default: case AUTOSELECT: phy_ctrl |= PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10; break; case M10_HALF: phy_ctrl |= PhyCtrlSpd10; break; case M10_FULL: phy_ctrl |= PhyCtrlDux | PhyCtrlSpd10; break; case M100_HALF: phy_ctrl |= PhyCtrlSpd100; break; case M100_FULL: phy_ctrl |= PhyCtrlDux | PhyCtrlSpd100; break; } iowrite32(phy_ctrl, port_base + PhyCtrl); mdelay(10); phy_ctrl &= ~PhyCtrlReset; iowrite32(phy_ctrl, port_base + PhyCtrl); mdelay(1); _sc92031_mii_write(port_base, MII_JAB, PHY_16_JAB_ENB | PHY_16_PORT_ENB); _sc92031_mii_scan(port_base); netif_carrier_off(dev); netif_stop_queue(dev); } static void _sc92031_reset(struct net_device *dev) { struct sc92031_priv *priv = netdev_priv(dev); void __iomem *port_base = priv->port_base; /* disable PM */ iowrite32(0, port_base + PMConfig); /* soft reset the chip */ iowrite32(Cfg0_Reset, port_base + Config0); mdelay(200); iowrite32(0, port_base + Config0); mdelay(10); /* disable interrupts */ iowrite32(0, port_base + IntrMask); /* clear multicast address */ iowrite32(0, port_base + MAR0); iowrite32(0, port_base + MAR0 + 4); /* init rx ring */ iowrite32(priv->rx_ring_dma_addr, port_base + RxbufAddr); priv->rx_ring_tail = priv->rx_ring_dma_addr; /* init tx ring */ _sc92031_tx_clear(dev); /* clear old register values */ priv->intr_status = 0; atomic_set(&priv->intr_mask, 0); priv->rx_config = 0; priv->tx_config = 0; priv->mc_flags = 0; /* configure rx buffer size */ /* NOTE: vendor driver had dead code here to enable early tx/rx */ iowrite32(Cfg1_Rcv64K, port_base + Config1); _sc92031_phy_reset(dev); _sc92031_check_media(dev); /* calculate rx fifo overflow */ priv->rx_value = 0; /* enable PM */ iowrite32(priv->pm_config, port_base + PMConfig); /* clear intr register */ ioread32(port_base + IntrStatus); } static void _sc92031_tx_tasklet(struct net_device *dev) { struct sc92031_priv *priv = netdev_priv(dev); void __iomem *port_base = priv->port_base; unsigned old_tx_tail; unsigned entry; u32 tx_status; old_tx_tail = priv->tx_tail; while (priv->tx_head - priv->tx_tail > 0) { entry = priv->tx_tail % NUM_TX_DESC; tx_status = ioread32(port_base + TxStatus0 + entry * 4); if (!(tx_status & (TxStatOK | TxUnderrun | TxAborted))) break; priv->tx_tail++; if (tx_status & TxStatOK) { dev->stats.tx_bytes += tx_status & 0x1fff; dev->stats.tx_packets++; /* Note: TxCarrierLost is always asserted at 100mbps. */ dev->stats.collisions += (tx_status >> 22) & 0xf; } if (tx_status & (TxOutOfWindow | TxAborted)) { dev->stats.tx_errors++; if (tx_status & TxAborted) dev->stats.tx_aborted_errors++; if (tx_status & TxCarrierLost) dev->stats.tx_carrier_errors++; if (tx_status & TxOutOfWindow) dev->stats.tx_window_errors++; } if (tx_status & TxUnderrun) dev->stats.tx_fifo_errors++; } if (priv->tx_tail != old_tx_tail) if (netif_queue_stopped(dev)) netif_wake_queue(dev); } static void _sc92031_rx_tasklet_error(struct net_device *dev, u32 rx_status, unsigned rx_size) { if(rx_size > (MAX_ETH_FRAME_SIZE + 4) || rx_size < 16) { dev->stats.rx_errors++; dev->stats.rx_length_errors++; } if (!(rx_status & RxStatesOK)) { dev->stats.rx_errors++; if (rx_status & (RxHugeFrame | RxSmallFrame)) dev->stats.rx_length_errors++; if (rx_status & RxBadAlign) dev->stats.rx_frame_errors++; if (!(rx_status & RxCRCOK)) dev->stats.rx_crc_errors++; } else { struct sc92031_priv *priv = netdev_priv(dev); priv->rx_loss++; } } static void _sc92031_rx_tasklet(struct net_device *dev) { struct sc92031_priv *priv = netdev_priv(dev); void __iomem *port_base = priv->port_base; dma_addr_t rx_ring_head; unsigned rx_len; unsigned rx_ring_offset; void *rx_ring = priv->rx_ring; rx_ring_head = ioread32(port_base + RxBufWPtr); rmb(); /* rx_ring_head is only 17 bits in the RxBufWPtr register. * we need to change it to 32 bits physical address */ rx_ring_head &= (dma_addr_t)(RX_BUF_LEN - 1); rx_ring_head |= priv->rx_ring_dma_addr & ~(dma_addr_t)(RX_BUF_LEN - 1); if (rx_ring_head < priv->rx_ring_dma_addr) rx_ring_head += RX_BUF_LEN; if (rx_ring_head >= priv->rx_ring_tail) rx_len = rx_ring_head - priv->rx_ring_tail; else rx_len = RX_BUF_LEN - (priv->rx_ring_tail - rx_ring_head); if (!rx_len) return; if (unlikely(rx_len > RX_BUF_LEN)) { if (printk_ratelimit()) printk(KERN_ERR "%s: rx packets length > rx buffer\n", dev->name); return; } rx_ring_offset = (priv->rx_ring_tail - priv->rx_ring_dma_addr) % RX_BUF_LEN; while (rx_len) { u32 rx_status; unsigned rx_size, rx_size_align, pkt_size; struct sk_buff *skb; rx_status = le32_to_cpup((__le32 *)(rx_ring + rx_ring_offset)); rmb(); rx_size = rx_status >> 20; rx_size_align = (rx_size + 3) & ~3; // for 4 bytes aligned pkt_size = rx_size - 4; // Omit the four octet CRC from the length. rx_ring_offset = (rx_ring_offset + 4) % RX_BUF_LEN; if (unlikely(rx_status == 0 || rx_size > (MAX_ETH_FRAME_SIZE + 4) || rx_size < 16 || !(rx_status & RxStatesOK))) { _sc92031_rx_tasklet_error(dev, rx_status, rx_size); break; } if (unlikely(rx_size_align + 4 > rx_len)) { if (printk_ratelimit()) printk(KERN_ERR "%s: rx_len is too small\n", dev->name); break; } rx_len -= rx_size_align + 4; skb = netdev_alloc_skb_ip_align(dev, pkt_size); if (unlikely(!skb)) { if (printk_ratelimit()) printk(KERN_ERR "%s: Couldn't allocate a skb_buff for a packet of size %u\n", dev->name, pkt_size); goto next; } if ((rx_ring_offset + pkt_size) > RX_BUF_LEN) { memcpy(skb_put(skb, RX_BUF_LEN - rx_ring_offset), rx_ring + rx_ring_offset, RX_BUF_LEN - rx_ring_offset); memcpy(skb_put(skb, pkt_size - (RX_BUF_LEN - rx_ring_offset)), rx_ring, pkt_size - (RX_BUF_LEN - rx_ring_offset)); } else { memcpy(skb_put(skb, pkt_size), rx_ring + rx_ring_offset, pkt_size); } skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); dev->stats.rx_bytes += pkt_size; dev->stats.rx_packets++; if (rx_status & Rx_Multicast) dev->stats.multicast++; next: rx_ring_offset = (rx_ring_offset + rx_size_align) % RX_BUF_LEN; } mb(); priv->rx_ring_tail = rx_ring_head; iowrite32(priv->rx_ring_tail, port_base + RxBufRPtr); } static void _sc92031_link_tasklet(struct net_device *dev) { if (_sc92031_check_media(dev)) netif_wake_queue(dev); else { netif_stop_queue(dev); dev->stats.tx_carrier_errors++; } } static void sc92031_tasklet(unsigned long data) { struct net_device *dev = (struct net_device *)data; struct sc92031_priv *priv = netdev_priv(dev); void __iomem *port_base = priv->port_base; u32 intr_status, intr_mask; intr_status = priv->intr_status; spin_lock(&priv->lock); if (unlikely(!netif_running(dev))) goto out; if (intr_status & TxOK) _sc92031_tx_tasklet(dev); if (intr_status & RxOK) _sc92031_rx_tasklet(dev); if (intr_status & RxOverflow) dev->stats.rx_errors++; if (intr_status & TimeOut) { dev->stats.rx_errors++; dev->stats.rx_length_errors++; } if (intr_status & (LinkFail | LinkOK)) _sc92031_link_tasklet(dev); out: intr_mask = atomic_read(&priv->intr_mask); rmb(); iowrite32(intr_mask, port_base + IntrMask); mmiowb(); spin_unlock(&priv->lock); } static irqreturn_t sc92031_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; struct sc92031_priv *priv = netdev_priv(dev); void __iomem *port_base = priv->port_base; u32 intr_status, intr_mask; /* mask interrupts before clearing IntrStatus */ iowrite32(0, port_base + IntrMask); _sc92031_dummy_read(port_base); intr_status = ioread32(port_base + IntrStatus); if (unlikely(intr_status == 0xffffffff)) return IRQ_NONE; // hardware has gone missing intr_status &= IntrBits; if (!intr_status) goto out_none; priv->intr_status = intr_status; tasklet_schedule(&priv->tasklet); return IRQ_HANDLED; out_none: intr_mask = atomic_read(&priv->intr_mask); rmb(); iowrite32(intr_mask, port_base + IntrMask); mmiowb(); return IRQ_NONE; } static struct net_device_stats *sc92031_get_stats(struct net_device *dev) { struct sc92031_priv *priv = netdev_priv(dev); void __iomem *port_base = priv->port_base; // FIXME I do not understand what is this trying to do. if (netif_running(dev)) { int temp; spin_lock_bh(&priv->lock); /* Update the error count. */ temp = (ioread32(port_base + RxStatus0) >> 16) & 0xffff; if (temp == 0xffff) { priv->rx_value += temp; dev->stats.rx_fifo_errors = priv->rx_value; } else dev->stats.rx_fifo_errors = temp + priv->rx_value; spin_unlock_bh(&priv->lock); } return &dev->stats; } static netdev_tx_t sc92031_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct sc92031_priv *priv = netdev_priv(dev); void __iomem *port_base = priv->port_base; unsigned len; unsigned entry; u32 tx_status; if (unlikely(skb->len > TX_BUF_SIZE)) { dev->stats.tx_dropped++; goto out; } spin_lock(&priv->lock); if (unlikely(!netif_carrier_ok(dev))) { dev->stats.tx_dropped++; goto out_unlock; } BUG_ON(priv->tx_head - priv->tx_tail >= NUM_TX_DESC); entry = priv->tx_head++ % NUM_TX_DESC; skb_copy_and_csum_dev(skb, priv->tx_bufs + entry * TX_BUF_SIZE); len = skb->len; if (len < ETH_ZLEN) { memset(priv->tx_bufs + entry * TX_BUF_SIZE + len, 0, ETH_ZLEN - len); len = ETH_ZLEN; } wmb(); if (len < 100) tx_status = len; else if (len < 300) tx_status = 0x30000 | len; else tx_status = 0x50000 | len; iowrite32(priv->tx_bufs_dma_addr + entry * TX_BUF_SIZE, port_base + TxAddr0 + entry * 4); iowrite32(tx_status, port_base + TxStatus0 + entry * 4); mmiowb(); if (priv->tx_head - priv->tx_tail >= NUM_TX_DESC) netif_stop_queue(dev); out_unlock: spin_unlock(&priv->lock); out: dev_kfree_skb(skb); return NETDEV_TX_OK; } static int sc92031_open(struct net_device *dev) { int err; struct sc92031_priv *priv = netdev_priv(dev); struct pci_dev *pdev = priv->pdev; priv->rx_ring = pci_alloc_consistent(pdev, RX_BUF_LEN, &priv->rx_ring_dma_addr); if (unlikely(!priv->rx_ring)) { err = -ENOMEM; goto out_alloc_rx_ring; } priv->tx_bufs = pci_alloc_consistent(pdev, TX_BUF_TOT_LEN, &priv->tx_bufs_dma_addr); if (unlikely(!priv->tx_bufs)) { err = -ENOMEM; goto out_alloc_tx_bufs; } priv->tx_head = priv->tx_tail = 0; err = request_irq(pdev->irq, sc92031_interrupt, IRQF_SHARED, dev->name, dev); if (unlikely(err < 0)) goto out_request_irq; priv->pm_config = 0; /* Interrupts already disabled by sc92031_stop or sc92031_probe */ spin_lock_bh(&priv->lock); _sc92031_reset(dev); mmiowb(); spin_unlock_bh(&priv->lock); sc92031_enable_interrupts(dev); if (netif_carrier_ok(dev)) netif_start_queue(dev); else netif_tx_disable(dev); return 0; out_request_irq: pci_free_consistent(pdev, TX_BUF_TOT_LEN, priv->tx_bufs, priv->tx_bufs_dma_addr); out_alloc_tx_bufs: pci_free_consistent(pdev, RX_BUF_LEN, priv->rx_ring, priv->rx_ring_dma_addr); out_alloc_rx_ring: return err; } static int sc92031_stop(struct net_device *dev) { struct sc92031_priv *priv = netdev_priv(dev); struct pci_dev *pdev = priv->pdev; netif_tx_disable(dev); /* Disable interrupts, stop Tx and Rx. */ sc92031_disable_interrupts(dev); spin_lock_bh(&priv->lock); _sc92031_disable_tx_rx(dev); _sc92031_tx_clear(dev); mmiowb(); spin_unlock_bh(&priv->lock); free_irq(pdev->irq, dev); pci_free_consistent(pdev, TX_BUF_TOT_LEN, priv->tx_bufs, priv->tx_bufs_dma_addr); pci_free_consistent(pdev, RX_BUF_LEN, priv->rx_ring, priv->rx_ring_dma_addr); return 0; } static void sc92031_set_multicast_list(struct net_device *dev) { struct sc92031_priv *priv = netdev_priv(dev); spin_lock_bh(&priv->lock); _sc92031_set_mar(dev); _sc92031_set_rx_config(dev); mmiowb(); spin_unlock_bh(&priv->lock); } static void sc92031_tx_timeout(struct net_device *dev) { struct sc92031_priv *priv = netdev_priv(dev); /* Disable interrupts by clearing the interrupt mask.*/ sc92031_disable_interrupts(dev); spin_lock(&priv->lock); priv->tx_timeouts++; _sc92031_reset(dev); mmiowb(); spin_unlock(&priv->lock); /* enable interrupts */ sc92031_enable_interrupts(dev); if (netif_carrier_ok(dev)) netif_wake_queue(dev); } #ifdef CONFIG_NET_POLL_CONTROLLER static void sc92031_poll_controller(struct net_device *dev) { disable_irq(dev->irq); if (sc92031_interrupt(dev->irq, dev) != IRQ_NONE) sc92031_tasklet((unsigned long)dev); enable_irq(dev->irq); } #endif static int sc92031_ethtool_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct sc92031_priv *priv = netdev_priv(dev); void __iomem *port_base = priv->port_base; u8 phy_address; u32 phy_ctrl; u16 output_status; spin_lock_bh(&priv->lock); phy_address = ioread32(port_base + Miicmd1) >> 27; phy_ctrl = ioread32(port_base + PhyCtrl); output_status = _sc92031_mii_read(port_base, MII_OutputStatus); _sc92031_mii_scan(port_base); mmiowb(); spin_unlock_bh(&priv->lock); cmd->supported = SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII; cmd->advertising = ADVERTISED_TP | ADVERTISED_MII; if ((phy_ctrl & (PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10)) == (PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10)) cmd->advertising |= ADVERTISED_Autoneg; if ((phy_ctrl & PhyCtrlSpd10) == PhyCtrlSpd10) cmd->advertising |= ADVERTISED_10baseT_Half; if ((phy_ctrl & (PhyCtrlSpd10 | PhyCtrlDux)) == (PhyCtrlSpd10 | PhyCtrlDux)) cmd->advertising |= ADVERTISED_10baseT_Full; if ((phy_ctrl & PhyCtrlSpd100) == PhyCtrlSpd100) cmd->advertising |= ADVERTISED_100baseT_Half; if ((phy_ctrl & (PhyCtrlSpd100 | PhyCtrlDux)) == (PhyCtrlSpd100 | PhyCtrlDux)) cmd->advertising |= ADVERTISED_100baseT_Full; if (phy_ctrl & PhyCtrlAne) cmd->advertising |= ADVERTISED_Autoneg; cmd->speed = (output_status & 0x2) ? SPEED_100 : SPEED_10; cmd->duplex = (output_status & 0x4) ? DUPLEX_FULL : DUPLEX_HALF; cmd->port = PORT_MII; cmd->phy_address = phy_address; cmd->transceiver = XCVR_INTERNAL; cmd->autoneg = (phy_ctrl & PhyCtrlAne) ? AUTONEG_ENABLE : AUTONEG_DISABLE; return 0; } static int sc92031_ethtool_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct sc92031_priv *priv = netdev_priv(dev); void __iomem *port_base = priv->port_base; u32 phy_ctrl; u32 old_phy_ctrl; if (!(cmd->speed == SPEED_10 || cmd->speed == SPEED_100)) return -EINVAL; if (!(cmd->duplex == DUPLEX_HALF || cmd->duplex == DUPLEX_FULL)) return -EINVAL; if (!(cmd->port == PORT_MII)) return -EINVAL; if (!(cmd->phy_address == 0x1f)) return -EINVAL; if (!(cmd->transceiver == XCVR_INTERNAL)) return -EINVAL; if (!(cmd->autoneg == AUTONEG_DISABLE || cmd->autoneg == AUTONEG_ENABLE)) return -EINVAL; if (cmd->autoneg == AUTONEG_ENABLE) { if (!(cmd->advertising & (ADVERTISED_Autoneg | ADVERTISED_100baseT_Full | ADVERTISED_100baseT_Half | ADVERTISED_10baseT_Full | ADVERTISED_10baseT_Half))) return -EINVAL; phy_ctrl = PhyCtrlAne; // FIXME: I'm not sure what the original code was trying to do if (cmd->advertising & ADVERTISED_Autoneg) phy_ctrl |= PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10; if (cmd->advertising & ADVERTISED_100baseT_Full) phy_ctrl |= PhyCtrlDux | PhyCtrlSpd100; if (cmd->advertising & ADVERTISED_100baseT_Half) phy_ctrl |= PhyCtrlSpd100; if (cmd->advertising & ADVERTISED_10baseT_Full) phy_ctrl |= PhyCtrlSpd10 | PhyCtrlDux; if (cmd->advertising & ADVERTISED_10baseT_Half) phy_ctrl |= PhyCtrlSpd10; } else { // FIXME: Whole branch guessed phy_ctrl = 0; if (cmd->speed == SPEED_10) phy_ctrl |= PhyCtrlSpd10; else /* cmd->speed == SPEED_100 */ phy_ctrl |= PhyCtrlSpd100; if (cmd->duplex == DUPLEX_FULL) phy_ctrl |= PhyCtrlDux; } spin_lock_bh(&priv->lock); old_phy_ctrl = ioread32(port_base + PhyCtrl); phy_ctrl |= old_phy_ctrl & ~(PhyCtrlAne | PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10); if (phy_ctrl != old_phy_ctrl) iowrite32(phy_ctrl, port_base + PhyCtrl); spin_unlock_bh(&priv->lock); return 0; } static void sc92031_ethtool_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) { struct sc92031_priv *priv = netdev_priv(dev); struct pci_dev *pdev = priv->pdev; strcpy(drvinfo->driver, SC92031_NAME); strcpy(drvinfo->bus_info, pci_name(pdev)); } static void sc92031_ethtool_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo) { struct sc92031_priv *priv = netdev_priv(dev); void __iomem *port_base = priv->port_base; u32 pm_config; spin_lock_bh(&priv->lock); pm_config = ioread32(port_base + PMConfig); spin_unlock_bh(&priv->lock); // FIXME: Guessed wolinfo->supported = WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; wolinfo->wolopts = 0; if (pm_config & PM_LinkUp) wolinfo->wolopts |= WAKE_PHY; if (pm_config & PM_Magic) wolinfo->wolopts |= WAKE_MAGIC; if (pm_config & PM_WakeUp) // FIXME: Guessed wolinfo->wolopts |= WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; } static int sc92031_ethtool_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo) { struct sc92031_priv *priv = netdev_priv(dev); void __iomem *port_base = priv->port_base; u32 pm_config; spin_lock_bh(&priv->lock); pm_config = ioread32(port_base + PMConfig) & ~(PM_LinkUp | PM_Magic | PM_WakeUp); if (wolinfo->wolopts & WAKE_PHY) pm_config |= PM_LinkUp; if (wolinfo->wolopts & WAKE_MAGIC) pm_config |= PM_Magic; // FIXME: Guessed if (wolinfo->wolopts & (WAKE_UCAST | WAKE_MCAST | WAKE_BCAST)) pm_config |= PM_WakeUp; priv->pm_config = pm_config; iowrite32(pm_config, port_base + PMConfig); mmiowb(); spin_unlock_bh(&priv->lock); return 0; } static int sc92031_ethtool_nway_reset(struct net_device *dev) { int err = 0; struct sc92031_priv *priv = netdev_priv(dev); void __iomem *port_base = priv->port_base; u16 bmcr; spin_lock_bh(&priv->lock); bmcr = _sc92031_mii_read(port_base, MII_BMCR); if (!(bmcr & BMCR_ANENABLE)) { err = -EINVAL; goto out; } _sc92031_mii_write(port_base, MII_BMCR, bmcr | BMCR_ANRESTART); out: _sc92031_mii_scan(port_base); mmiowb(); spin_unlock_bh(&priv->lock); return err; } static const char sc92031_ethtool_stats_strings[SILAN_STATS_NUM][ETH_GSTRING_LEN] = { "tx_timeout", "rx_loss", }; static void sc92031_ethtool_get_strings(struct net_device *dev, u32 stringset, u8 *data) { if (stringset == ETH_SS_STATS) memcpy(data, sc92031_ethtool_stats_strings, SILAN_STATS_NUM * ETH_GSTRING_LEN); } static int sc92031_ethtool_get_sset_count(struct net_device *dev, int sset) { switch (sset) { case ETH_SS_STATS: return SILAN_STATS_NUM; default: return -EOPNOTSUPP; } } static void sc92031_ethtool_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { struct sc92031_priv *priv = netdev_priv(dev); spin_lock_bh(&priv->lock); data[0] = priv->tx_timeouts; data[1] = priv->rx_loss; spin_unlock_bh(&priv->lock); } static const struct ethtool_ops sc92031_ethtool_ops = { .get_settings = sc92031_ethtool_get_settings, .set_settings = sc92031_ethtool_set_settings, .get_drvinfo = sc92031_ethtool_get_drvinfo, .get_wol = sc92031_ethtool_get_wol, .set_wol = sc92031_ethtool_set_wol, .nway_reset = sc92031_ethtool_nway_reset, .get_link = ethtool_op_get_link, .get_strings = sc92031_ethtool_get_strings, .get_sset_count = sc92031_ethtool_get_sset_count, .get_ethtool_stats = sc92031_ethtool_get_ethtool_stats, }; static const struct net_device_ops sc92031_netdev_ops = { .ndo_get_stats = sc92031_get_stats, .ndo_start_xmit = sc92031_start_xmit, .ndo_open = sc92031_open, .ndo_stop = sc92031_stop, .ndo_set_multicast_list = sc92031_set_multicast_list, .ndo_change_mtu = eth_change_mtu, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, .ndo_tx_timeout = sc92031_tx_timeout, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = sc92031_poll_controller, #endif }; static int __devinit sc92031_probe(struct pci_dev *pdev, const struct pci_device_id *id) { int err; void __iomem* port_base; struct net_device *dev; struct sc92031_priv *priv; u32 mac0, mac1; unsigned long base_addr; err = pci_enable_device(pdev); if (unlikely(err < 0)) goto out_enable_device; pci_set_master(pdev); err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (unlikely(err < 0)) goto out_set_dma_mask; err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); if (unlikely(err < 0)) goto out_set_dma_mask; err = pci_request_regions(pdev, SC92031_NAME); if (unlikely(err < 0)) goto out_request_regions; port_base = pci_iomap(pdev, SC92031_USE_BAR, 0); if (unlikely(!port_base)) { err = -EIO; goto out_iomap; } dev = alloc_etherdev(sizeof(struct sc92031_priv)); if (unlikely(!dev)) { err = -ENOMEM; goto out_alloc_etherdev; } pci_set_drvdata(pdev, dev); SET_NETDEV_DEV(dev, &pdev->dev); #if SC92031_USE_BAR == 0 dev->mem_start = pci_resource_start(pdev, SC92031_USE_BAR); dev->mem_end = pci_resource_end(pdev, SC92031_USE_BAR); #elif SC92031_USE_BAR == 1 dev->base_addr = pci_resource_start(pdev, SC92031_USE_BAR); #endif dev->irq = pdev->irq; /* faked with skb_copy_and_csum_dev */ dev->features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA; dev->netdev_ops = &sc92031_netdev_ops; dev->watchdog_timeo = TX_TIMEOUT; dev->ethtool_ops = &sc92031_ethtool_ops; priv = netdev_priv(dev); spin_lock_init(&priv->lock); priv->port_base = port_base; priv->pdev = pdev; tasklet_init(&priv->tasklet, sc92031_tasklet, (unsigned long)dev); /* Fudge tasklet count so the call to sc92031_enable_interrupts at * sc92031_open will work correctly */ tasklet_disable_nosync(&priv->tasklet); /* PCI PM Wakeup */ iowrite32((~PM_LongWF & ~PM_LWPTN) | PM_Enable, port_base + PMConfig); mac0 = ioread32(port_base + MAC0); mac1 = ioread32(port_base + MAC0 + 4); dev->dev_addr[0] = dev->perm_addr[0] = mac0 >> 24; dev->dev_addr[1] = dev->perm_addr[1] = mac0 >> 16; dev->dev_addr[2] = dev->perm_addr[2] = mac0 >> 8; dev->dev_addr[3] = dev->perm_addr[3] = mac0; dev->dev_addr[4] = dev->perm_addr[4] = mac1 >> 8; dev->dev_addr[5] = dev->perm_addr[5] = mac1; err = register_netdev(dev); if (err < 0) goto out_register_netdev; #if SC92031_USE_BAR == 0 base_addr = dev->mem_start; #elif SC92031_USE_BAR == 1 base_addr = dev->base_addr; #endif printk(KERN_INFO "%s: SC92031 at 0x%lx, %pM, IRQ %d\n", dev->name, base_addr, dev->dev_addr, dev->irq); return 0; out_register_netdev: free_netdev(dev); out_alloc_etherdev: pci_iounmap(pdev, port_base); out_iomap: pci_release_regions(pdev); out_request_regions: out_set_dma_mask: pci_disable_device(pdev); out_enable_device: return err; } static void __devexit sc92031_remove(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); struct sc92031_priv *priv = netdev_priv(dev); void __iomem* port_base = priv->port_base; unregister_netdev(dev); free_netdev(dev); pci_iounmap(pdev, port_base); pci_release_regions(pdev); pci_disable_device(pdev); } static int sc92031_suspend(struct pci_dev *pdev, pm_message_t state) { struct net_device *dev = pci_get_drvdata(pdev); struct sc92031_priv *priv = netdev_priv(dev); pci_save_state(pdev); if (!netif_running(dev)) goto out; netif_device_detach(dev); /* Disable interrupts, stop Tx and Rx. */ sc92031_disable_interrupts(dev); spin_lock_bh(&priv->lock); _sc92031_disable_tx_rx(dev); _sc92031_tx_clear(dev); mmiowb(); spin_unlock_bh(&priv->lock); out: pci_set_power_state(pdev, pci_choose_state(pdev, state)); return 0; } static int sc92031_resume(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); struct sc92031_priv *priv = netdev_priv(dev); pci_restore_state(pdev); pci_set_power_state(pdev, PCI_D0); if (!netif_running(dev)) goto out; /* Interrupts already disabled by sc92031_suspend */ spin_lock_bh(&priv->lock); _sc92031_reset(dev); mmiowb(); spin_unlock_bh(&priv->lock); sc92031_enable_interrupts(dev); netif_device_attach(dev); if (netif_carrier_ok(dev)) netif_wake_queue(dev); else netif_tx_disable(dev); out: return 0; } static DEFINE_PCI_DEVICE_TABLE(sc92031_pci_device_id_table) = { { PCI_DEVICE(PCI_VENDOR_ID_SILAN, 0x2031) }, { PCI_DEVICE(PCI_VENDOR_ID_SILAN, 0x8139) }, { PCI_DEVICE(0x1088, 0x2031) }, { 0, } }; MODULE_DEVICE_TABLE(pci, sc92031_pci_device_id_table); static struct pci_driver sc92031_pci_driver = { .name = SC92031_NAME, .id_table = sc92031_pci_device_id_table, .probe = sc92031_probe, .remove = __devexit_p(sc92031_remove), .suspend = sc92031_suspend, .resume = sc92031_resume, }; static int __init sc92031_init(void) { return pci_register_driver(&sc92031_pci_driver); } static void __exit sc92031_exit(void) { pci_unregister_driver(&sc92031_pci_driver); } module_init(sc92031_init); module_exit(sc92031_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Cesar Eduardo Barros <cesarb@cesarb.net>"); MODULE_DESCRIPTION("Silan SC92031 PCI Fast Ethernet Adapter driver");
gpl-2.0
zhantss/linux
drivers/regulator/ab8500-ext.c
1104
12235
/* * Copyright (C) ST-Ericsson SA 2010 * * License Terms: GNU General Public License v2 * * Authors: Bengt Jonsson <bengt.g.jonsson@stericsson.com> * * This file is based on drivers/regulator/ab8500.c * * AB8500 external regulators * * ab8500-ext supports the following regulators: * - VextSupply3 */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/err.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/regulator/driver.h> #include <linux/regulator/machine.h> #include <linux/regulator/of_regulator.h> #include <linux/mfd/abx500.h> #include <linux/mfd/abx500/ab8500.h> #include <linux/regulator/ab8500.h> /** * struct ab8500_ext_regulator_info - ab8500 regulator information * @dev: device pointer * @desc: regulator description * @rdev: regulator device * @cfg: regulator configuration (extension of regulator FW configuration) * @update_bank: bank to control on/off * @update_reg: register to control on/off * @update_mask: mask to enable/disable and set mode of regulator * @update_val: bits holding the regulator current mode * @update_val_hp: bits to set EN pin active (LPn pin deactive) * normally this means high power mode * @update_val_lp: bits to set EN pin active and LPn pin active * normally this means low power mode * @update_val_hw: bits to set regulator pins in HW control * SysClkReq pins and logic will choose mode */ struct ab8500_ext_regulator_info { struct device *dev; struct regulator_desc desc; struct regulator_dev *rdev; struct ab8500_ext_regulator_cfg *cfg; u8 update_bank; u8 update_reg; u8 update_mask; u8 update_val; u8 update_val_hp; u8 update_val_lp; u8 update_val_hw; }; static int ab8500_ext_regulator_enable(struct regulator_dev *rdev) { int ret; struct ab8500_ext_regulator_info *info = rdev_get_drvdata(rdev); u8 regval; if (info == NULL) { dev_err(rdev_get_dev(rdev), "regulator info null pointer\n"); return -EINVAL; } /* * To satisfy both HW high power request and SW request, the regulator * must be on in high power. */ if (info->cfg && info->cfg->hwreq) regval = info->update_val_hp; else regval = info->update_val; ret = abx500_mask_and_set_register_interruptible(info->dev, info->update_bank, info->update_reg, info->update_mask, regval); if (ret < 0) { dev_err(rdev_get_dev(info->rdev), "couldn't set enable bits for regulator\n"); return ret; } dev_dbg(rdev_get_dev(rdev), "%s-enable (bank, reg, mask, value): 0x%02x, 0x%02x, 0x%02x, 0x%02x\n", info->desc.name, info->update_bank, info->update_reg, info->update_mask, regval); return 0; } static int ab8500_ext_regulator_disable(struct regulator_dev *rdev) { int ret; struct ab8500_ext_regulator_info *info = rdev_get_drvdata(rdev); u8 regval; if (info == NULL) { dev_err(rdev_get_dev(rdev), "regulator info null pointer\n"); return -EINVAL; } /* * Set the regulator in HW request mode if configured */ if (info->cfg && info->cfg->hwreq) regval = info->update_val_hw; else regval = 0; ret = abx500_mask_and_set_register_interruptible(info->dev, info->update_bank, info->update_reg, info->update_mask, regval); if (ret < 0) { dev_err(rdev_get_dev(info->rdev), "couldn't set disable bits for regulator\n"); return ret; } dev_dbg(rdev_get_dev(rdev), "%s-disable (bank, reg, mask, value):" " 0x%02x, 0x%02x, 0x%02x, 0x%02x\n", info->desc.name, info->update_bank, info->update_reg, info->update_mask, regval); return 0; } static int ab8500_ext_regulator_is_enabled(struct regulator_dev *rdev) { int ret; struct ab8500_ext_regulator_info *info = rdev_get_drvdata(rdev); u8 regval; if (info == NULL) { dev_err(rdev_get_dev(rdev), "regulator info null pointer\n"); return -EINVAL; } ret = abx500_get_register_interruptible(info->dev, info->update_bank, info->update_reg, &regval); if (ret < 0) { dev_err(rdev_get_dev(rdev), "couldn't read 0x%x register\n", info->update_reg); return ret; } dev_dbg(rdev_get_dev(rdev), "%s-is_enabled (bank, reg, mask, value):" " 0x%02x, 0x%02x, 0x%02x, 0x%02x\n", info->desc.name, info->update_bank, info->update_reg, info->update_mask, regval); if (((regval & info->update_mask) == info->update_val_lp) || ((regval & info->update_mask) == info->update_val_hp)) return 1; else return 0; } static int ab8500_ext_regulator_set_mode(struct regulator_dev *rdev, unsigned int mode) { int ret = 0; struct ab8500_ext_regulator_info *info = rdev_get_drvdata(rdev); u8 regval; if (info == NULL) { dev_err(rdev_get_dev(rdev), "regulator info null pointer\n"); return -EINVAL; } switch (mode) { case REGULATOR_MODE_NORMAL: regval = info->update_val_hp; break; case REGULATOR_MODE_IDLE: regval = info->update_val_lp; break; default: return -EINVAL; } /* If regulator is enabled and info->cfg->hwreq is set, the regulator must be on in high power, so we don't need to write the register with the same value. */ if (ab8500_ext_regulator_is_enabled(rdev) && !(info->cfg && info->cfg->hwreq)) { ret = abx500_mask_and_set_register_interruptible(info->dev, info->update_bank, info->update_reg, info->update_mask, regval); if (ret < 0) { dev_err(rdev_get_dev(rdev), "Could not set regulator mode.\n"); return ret; } dev_dbg(rdev_get_dev(rdev), "%s-set_mode (bank, reg, mask, value): " "0x%x, 0x%x, 0x%x, 0x%x\n", info->desc.name, info->update_bank, info->update_reg, info->update_mask, regval); } info->update_val = regval; return 0; } static unsigned int ab8500_ext_regulator_get_mode(struct regulator_dev *rdev) { struct ab8500_ext_regulator_info *info = rdev_get_drvdata(rdev); int ret; if (info == NULL) { dev_err(rdev_get_dev(rdev), "regulator info null pointer\n"); return -EINVAL; } if (info->update_val == info->update_val_hp) ret = REGULATOR_MODE_NORMAL; else if (info->update_val == info->update_val_lp) ret = REGULATOR_MODE_IDLE; else ret = -EINVAL; return ret; } static int ab8500_ext_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV, unsigned *selector) { struct regulation_constraints *regu_constraints = rdev->constraints; if (!regu_constraints) { dev_err(rdev_get_dev(rdev), "No regulator constraints\n"); return -EINVAL; } if (regu_constraints->min_uV == min_uV && regu_constraints->max_uV == max_uV) return 0; dev_err(rdev_get_dev(rdev), "Requested min %duV max %duV != constrained min %duV max %duV\n", min_uV, max_uV, regu_constraints->min_uV, regu_constraints->max_uV); return -EINVAL; } static int ab8500_ext_list_voltage(struct regulator_dev *rdev, unsigned selector) { struct regulation_constraints *regu_constraints = rdev->constraints; if (regu_constraints == NULL) { dev_err(rdev_get_dev(rdev), "regulator constraints null pointer\n"); return -EINVAL; } /* return the uV for the fixed regulators */ if (regu_constraints->min_uV && regu_constraints->max_uV) { if (regu_constraints->min_uV == regu_constraints->max_uV) return regu_constraints->min_uV; } return -EINVAL; } static struct regulator_ops ab8500_ext_regulator_ops = { .enable = ab8500_ext_regulator_enable, .disable = ab8500_ext_regulator_disable, .is_enabled = ab8500_ext_regulator_is_enabled, .set_mode = ab8500_ext_regulator_set_mode, .get_mode = ab8500_ext_regulator_get_mode, .set_voltage = ab8500_ext_set_voltage, .list_voltage = ab8500_ext_list_voltage, }; static struct ab8500_ext_regulator_info ab8500_ext_regulator_info[AB8500_NUM_EXT_REGULATORS] = { [AB8500_EXT_SUPPLY1] = { .desc = { .name = "VEXTSUPPLY1", .ops = &ab8500_ext_regulator_ops, .type = REGULATOR_VOLTAGE, .id = AB8500_EXT_SUPPLY1, .owner = THIS_MODULE, .n_voltages = 1, }, .update_bank = 0x04, .update_reg = 0x08, .update_mask = 0x03, .update_val = 0x01, .update_val_hp = 0x01, .update_val_lp = 0x03, .update_val_hw = 0x02, }, [AB8500_EXT_SUPPLY2] = { .desc = { .name = "VEXTSUPPLY2", .ops = &ab8500_ext_regulator_ops, .type = REGULATOR_VOLTAGE, .id = AB8500_EXT_SUPPLY2, .owner = THIS_MODULE, .n_voltages = 1, }, .update_bank = 0x04, .update_reg = 0x08, .update_mask = 0x0c, .update_val = 0x04, .update_val_hp = 0x04, .update_val_lp = 0x0c, .update_val_hw = 0x08, }, [AB8500_EXT_SUPPLY3] = { .desc = { .name = "VEXTSUPPLY3", .ops = &ab8500_ext_regulator_ops, .type = REGULATOR_VOLTAGE, .id = AB8500_EXT_SUPPLY3, .owner = THIS_MODULE, .n_voltages = 1, }, .update_bank = 0x04, .update_reg = 0x08, .update_mask = 0x30, .update_val = 0x10, .update_val_hp = 0x10, .update_val_lp = 0x30, .update_val_hw = 0x20, }, }; static struct of_regulator_match ab8500_ext_regulator_match[] = { { .name = "ab8500_ext1", .driver_data = (void *) AB8500_EXT_SUPPLY1, }, { .name = "ab8500_ext2", .driver_data = (void *) AB8500_EXT_SUPPLY2, }, { .name = "ab8500_ext3", .driver_data = (void *) AB8500_EXT_SUPPLY3, }, }; static int ab8500_ext_regulator_probe(struct platform_device *pdev) { struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent); struct ab8500_platform_data *ppdata; struct ab8500_regulator_platform_data *pdata; struct device_node *np = pdev->dev.of_node; struct regulator_config config = { }; int i, err; if (np) { err = of_regulator_match(&pdev->dev, np, ab8500_ext_regulator_match, ARRAY_SIZE(ab8500_ext_regulator_match)); if (err < 0) { dev_err(&pdev->dev, "Error parsing regulator init data: %d\n", err); return err; } } if (!ab8500) { dev_err(&pdev->dev, "null mfd parent\n"); return -EINVAL; } ppdata = dev_get_platdata(ab8500->dev); if (!ppdata) { dev_err(&pdev->dev, "null parent pdata\n"); return -EINVAL; } pdata = ppdata->regulator; if (!pdata) { dev_err(&pdev->dev, "null pdata\n"); return -EINVAL; } /* make sure the platform data has the correct size */ if (pdata->num_ext_regulator != ARRAY_SIZE(ab8500_ext_regulator_info)) { dev_err(&pdev->dev, "Configuration error: size mismatch.\n"); return -EINVAL; } /* check for AB8500 2.x */ if (is_ab8500_2p0_or_earlier(ab8500)) { struct ab8500_ext_regulator_info *info; /* VextSupply3LPn is inverted on AB8500 2.x */ info = &ab8500_ext_regulator_info[AB8500_EXT_SUPPLY3]; info->update_val = 0x30; info->update_val_hp = 0x30; info->update_val_lp = 0x10; } /* register all regulators */ for (i = 0; i < ARRAY_SIZE(ab8500_ext_regulator_info); i++) { struct ab8500_ext_regulator_info *info = NULL; /* assign per-regulator data */ info = &ab8500_ext_regulator_info[i]; info->dev = &pdev->dev; info->cfg = (struct ab8500_ext_regulator_cfg *) pdata->ext_regulator[i].driver_data; config.dev = &pdev->dev; config.driver_data = info; config.of_node = ab8500_ext_regulator_match[i].of_node; config.init_data = (np) ? ab8500_ext_regulator_match[i].init_data : &pdata->ext_regulator[i]; /* register regulator with framework */ info->rdev = devm_regulator_register(&pdev->dev, &info->desc, &config); if (IS_ERR(info->rdev)) { err = PTR_ERR(info->rdev); dev_err(&pdev->dev, "failed to register regulator %s\n", info->desc.name); return err; } dev_dbg(rdev_get_dev(info->rdev), "%s-probed\n", info->desc.name); } return 0; } static struct platform_driver ab8500_ext_regulator_driver = { .probe = ab8500_ext_regulator_probe, .driver = { .name = "ab8500-ext-regulator", }, }; static int __init ab8500_ext_regulator_init(void) { int ret; ret = platform_driver_register(&ab8500_ext_regulator_driver); if (ret) pr_err("Failed to register ab8500 ext regulator: %d\n", ret); return ret; } subsys_initcall(ab8500_ext_regulator_init); static void __exit ab8500_ext_regulator_exit(void) { platform_driver_unregister(&ab8500_ext_regulator_driver); } module_exit(ab8500_ext_regulator_exit); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Bengt Jonsson <bengt.g.jonsson@stericsson.com>"); MODULE_DESCRIPTION("AB8500 external regulator driver"); MODULE_ALIAS("platform:ab8500-ext-regulator");
gpl-2.0
AndroidGX/SimpleGX-KK-4.4.4_G901F
drivers/regulator/arizona-ldo1.c
1104
6702
/* * arizona-ldo1.c -- LDO1 supply for Arizona devices * * Copyright 2012 Wolfson Microelectronics PLC. * * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/bitops.h> #include <linux/err.h> #include <linux/platform_device.h> #include <linux/regulator/driver.h> #include <linux/regulator/machine.h> #include <linux/gpio.h> #include <linux/slab.h> #include <linux/mfd/arizona/core.h> #include <linux/mfd/arizona/pdata.h> #include <linux/mfd/arizona/registers.h> struct arizona_ldo1 { struct regulator_dev *regulator; struct arizona *arizona; struct regulator_consumer_supply supply; struct regulator_init_data init_data; }; static int arizona_ldo1_hc_list_voltage(struct regulator_dev *rdev, unsigned int selector) { if (selector >= rdev->desc->n_voltages) return -EINVAL; if (selector == rdev->desc->n_voltages - 1) return 1800000; else return rdev->desc->min_uV + (rdev->desc->uV_step * selector); } static int arizona_ldo1_hc_map_voltage(struct regulator_dev *rdev, int min_uV, int max_uV) { int sel; sel = DIV_ROUND_UP(min_uV - rdev->desc->min_uV, rdev->desc->uV_step); if (sel >= rdev->desc->n_voltages) sel = rdev->desc->n_voltages - 1; return sel; } static int arizona_ldo1_hc_set_voltage_sel(struct regulator_dev *rdev, unsigned sel) { struct arizona_ldo1 *ldo = rdev_get_drvdata(rdev); struct regmap *regmap = ldo->arizona->regmap; unsigned int val; int ret; if (sel == rdev->desc->n_voltages - 1) val = ARIZONA_LDO1_HI_PWR; else val = 0; ret = regmap_update_bits(regmap, ARIZONA_LDO1_CONTROL_2, ARIZONA_LDO1_HI_PWR, val); if (ret != 0) return ret; ret = regmap_update_bits(regmap, ARIZONA_DYNAMIC_FREQUENCY_SCALING_1, ARIZONA_SUBSYS_MAX_FREQ, val); if (ret != 0) return ret; if (val) return 0; val = sel << ARIZONA_LDO1_VSEL_SHIFT; return regmap_update_bits(regmap, ARIZONA_LDO1_CONTROL_1, ARIZONA_LDO1_VSEL_MASK, val); } static int arizona_ldo1_hc_get_voltage_sel(struct regulator_dev *rdev) { struct arizona_ldo1 *ldo = rdev_get_drvdata(rdev); struct regmap *regmap = ldo->arizona->regmap; unsigned int val; int ret; ret = regmap_read(regmap, ARIZONA_LDO1_CONTROL_2, &val); if (ret != 0) return ret; if (val & ARIZONA_LDO1_HI_PWR) return rdev->desc->n_voltages - 1; ret = regmap_read(regmap, ARIZONA_LDO1_CONTROL_1, &val); if (ret != 0) return ret; return (val & ARIZONA_LDO1_VSEL_MASK) >> ARIZONA_LDO1_VSEL_SHIFT; } static struct regulator_ops arizona_ldo1_hc_ops = { .list_voltage = arizona_ldo1_hc_list_voltage, .map_voltage = arizona_ldo1_hc_map_voltage, .get_voltage_sel = arizona_ldo1_hc_get_voltage_sel, .set_voltage_sel = arizona_ldo1_hc_set_voltage_sel, .get_bypass = regulator_get_bypass_regmap, .set_bypass = regulator_set_bypass_regmap, }; static const struct regulator_desc arizona_ldo1_hc = { .name = "LDO1", .supply_name = "LDOVDD", .type = REGULATOR_VOLTAGE, .ops = &arizona_ldo1_hc_ops, .bypass_reg = ARIZONA_LDO1_CONTROL_1, .bypass_mask = ARIZONA_LDO1_BYPASS, .min_uV = 900000, .uV_step = 50000, .n_voltages = 8, .enable_time = 1500, .owner = THIS_MODULE, }; static struct regulator_ops arizona_ldo1_ops = { .list_voltage = regulator_list_voltage_linear, .map_voltage = regulator_map_voltage_linear, .get_voltage_sel = regulator_get_voltage_sel_regmap, .set_voltage_sel = regulator_set_voltage_sel_regmap, .get_bypass = regulator_get_bypass_regmap, .set_bypass = regulator_set_bypass_regmap, }; static const struct regulator_desc arizona_ldo1 = { .name = "LDO1", .supply_name = "LDOVDD", .type = REGULATOR_VOLTAGE, .ops = &arizona_ldo1_ops, .vsel_reg = ARIZONA_LDO1_CONTROL_1, .vsel_mask = ARIZONA_LDO1_VSEL_MASK, .bypass_reg = ARIZONA_LDO1_CONTROL_1, .bypass_mask = ARIZONA_LDO1_BYPASS, .min_uV = 900000, .uV_step = 50000, .n_voltages = 7, .enable_time = 500, .owner = THIS_MODULE, }; static const struct regulator_init_data arizona_ldo1_dvfs = { .constraints = { .min_uV = 1200000, .max_uV = 1800000, .valid_ops_mask = REGULATOR_CHANGE_STATUS | REGULATOR_CHANGE_VOLTAGE, }, .num_consumer_supplies = 1, }; static const struct regulator_init_data arizona_ldo1_default = { .constraints = { .valid_ops_mask = REGULATOR_CHANGE_STATUS, }, .num_consumer_supplies = 1, }; static int arizona_ldo1_probe(struct platform_device *pdev) { struct arizona *arizona = dev_get_drvdata(pdev->dev.parent); const struct regulator_desc *desc; struct regulator_config config = { }; struct arizona_ldo1 *ldo1; int ret; ldo1 = devm_kzalloc(&pdev->dev, sizeof(*ldo1), GFP_KERNEL); if (ldo1 == NULL) { dev_err(&pdev->dev, "Unable to allocate private data\n"); return -ENOMEM; } ldo1->arizona = arizona; /* * Since the chip usually supplies itself we provide some * default init_data for it. This will be overridden with * platform data if provided. */ switch (arizona->type) { case WM5102: desc = &arizona_ldo1_hc; ldo1->init_data = arizona_ldo1_dvfs; break; default: desc = &arizona_ldo1; ldo1->init_data = arizona_ldo1_default; break; } ldo1->init_data.consumer_supplies = &ldo1->supply; ldo1->supply.supply = "DCVDD"; ldo1->supply.dev_name = dev_name(arizona->dev); config.dev = arizona->dev; config.driver_data = ldo1; config.regmap = arizona->regmap; config.ena_gpio = arizona->pdata.ldoena; if (arizona->pdata.ldo1) config.init_data = arizona->pdata.ldo1; else config.init_data = &ldo1->init_data; ldo1->regulator = regulator_register(desc, &config); if (IS_ERR(ldo1->regulator)) { ret = PTR_ERR(ldo1->regulator); dev_err(arizona->dev, "Failed to register LDO1 supply: %d\n", ret); return ret; } platform_set_drvdata(pdev, ldo1); return 0; } static int arizona_ldo1_remove(struct platform_device *pdev) { struct arizona_ldo1 *ldo1 = platform_get_drvdata(pdev); regulator_unregister(ldo1->regulator); return 0; } static struct platform_driver arizona_ldo1_driver = { .probe = arizona_ldo1_probe, .remove = arizona_ldo1_remove, .driver = { .name = "arizona-ldo1", .owner = THIS_MODULE, }, }; module_platform_driver(arizona_ldo1_driver); /* Module information */ MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>"); MODULE_DESCRIPTION("Arizona LDO1 driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:arizona-ldo1");
gpl-2.0
sktjdgns1189/android_kernel_samsung_SHW-M240S
drivers/video/backlight/generic_bl.c
1104
3655
/* * Generic Backlight Driver * * Copyright (c) 2004-2008 Richard Purdie * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/mutex.h> #include <linux/fb.h> #include <linux/backlight.h> static int genericbl_intensity; static struct backlight_device *generic_backlight_device; static struct generic_bl_info *bl_machinfo; /* Flag to signal when the battery is low */ #define GENERICBL_BATTLOW BL_CORE_DRIVER1 static int genericbl_send_intensity(struct backlight_device *bd) { int intensity = bd->props.brightness; if (bd->props.power != FB_BLANK_UNBLANK) intensity = 0; if (bd->props.state & BL_CORE_FBBLANK) intensity = 0; if (bd->props.state & BL_CORE_SUSPENDED) intensity = 0; if (bd->props.state & GENERICBL_BATTLOW) intensity &= bl_machinfo->limit_mask; bl_machinfo->set_bl_intensity(intensity); genericbl_intensity = intensity; if (bl_machinfo->kick_battery) bl_machinfo->kick_battery(); return 0; } static int genericbl_get_intensity(struct backlight_device *bd) { return genericbl_intensity; } /* * Called when the battery is low to limit the backlight intensity. * If limit==0 clear any limit, otherwise limit the intensity */ void corgibl_limit_intensity(int limit) { struct backlight_device *bd = generic_backlight_device; mutex_lock(&bd->ops_lock); if (limit) bd->props.state |= GENERICBL_BATTLOW; else bd->props.state &= ~GENERICBL_BATTLOW; backlight_update_status(generic_backlight_device); mutex_unlock(&bd->ops_lock); } EXPORT_SYMBOL(corgibl_limit_intensity); static const struct backlight_ops genericbl_ops = { .options = BL_CORE_SUSPENDRESUME, .get_brightness = genericbl_get_intensity, .update_status = genericbl_send_intensity, }; static int genericbl_probe(struct platform_device *pdev) { struct backlight_properties props; struct generic_bl_info *machinfo = pdev->dev.platform_data; const char *name = "generic-bl"; struct backlight_device *bd; bl_machinfo = machinfo; if (!machinfo->limit_mask) machinfo->limit_mask = -1; if (machinfo->name) name = machinfo->name; memset(&props, 0, sizeof(struct backlight_properties)); props.max_brightness = machinfo->max_intensity; bd = backlight_device_register(name, &pdev->dev, NULL, &genericbl_ops, &props); if (IS_ERR (bd)) return PTR_ERR (bd); platform_set_drvdata(pdev, bd); bd->props.power = FB_BLANK_UNBLANK; bd->props.brightness = machinfo->default_intensity; backlight_update_status(bd); generic_backlight_device = bd; printk("Generic Backlight Driver Initialized.\n"); return 0; } static int genericbl_remove(struct platform_device *pdev) { struct backlight_device *bd = platform_get_drvdata(pdev); bd->props.power = 0; bd->props.brightness = 0; backlight_update_status(bd); backlight_device_unregister(bd); printk("Generic Backlight Driver Unloaded\n"); return 0; } static struct platform_driver genericbl_driver = { .probe = genericbl_probe, .remove = genericbl_remove, .driver = { .name = "generic-bl", }, }; static int __init genericbl_init(void) { return platform_driver_register(&genericbl_driver); } static void __exit genericbl_exit(void) { platform_driver_unregister(&genericbl_driver); } module_init(genericbl_init); module_exit(genericbl_exit); MODULE_AUTHOR("Richard Purdie <rpurdie@rpsys.net>"); MODULE_DESCRIPTION("Generic Backlight Driver"); MODULE_LICENSE("GPL");
gpl-2.0
diegoheusser/linux
arch/arm/mach-tegra/flowctrl.c
1616
4479
/* * arch/arm/mach-tegra/flowctrl.c * * functions and macros to control the flowcontroller * * Copyright (c) 2010-2012, NVIDIA Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/cpumask.h> #include <linux/init.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/of.h> #include <linux/of_address.h> #include <soc/tegra/fuse.h> #include "flowctrl.h" static u8 flowctrl_offset_halt_cpu[] = { FLOW_CTRL_HALT_CPU0_EVENTS, FLOW_CTRL_HALT_CPU1_EVENTS, FLOW_CTRL_HALT_CPU1_EVENTS + 8, FLOW_CTRL_HALT_CPU1_EVENTS + 16, }; static u8 flowctrl_offset_cpu_csr[] = { FLOW_CTRL_CPU0_CSR, FLOW_CTRL_CPU1_CSR, FLOW_CTRL_CPU1_CSR + 8, FLOW_CTRL_CPU1_CSR + 16, }; static void __iomem *tegra_flowctrl_base; static void flowctrl_update(u8 offset, u32 value) { writel(value, tegra_flowctrl_base + offset); /* ensure the update has reached the flow controller */ wmb(); readl_relaxed(tegra_flowctrl_base + offset); } u32 flowctrl_read_cpu_csr(unsigned int cpuid) { u8 offset = flowctrl_offset_cpu_csr[cpuid]; return readl(tegra_flowctrl_base + offset); } void flowctrl_write_cpu_csr(unsigned int cpuid, u32 value) { return flowctrl_update(flowctrl_offset_cpu_csr[cpuid], value); } void flowctrl_write_cpu_halt(unsigned int cpuid, u32 value) { return flowctrl_update(flowctrl_offset_halt_cpu[cpuid], value); } void flowctrl_cpu_suspend_enter(unsigned int cpuid) { unsigned int reg; int i; reg = flowctrl_read_cpu_csr(cpuid); switch (tegra_get_chip_id()) { case TEGRA20: /* clear wfe bitmap */ reg &= ~TEGRA20_FLOW_CTRL_CSR_WFE_BITMAP; /* clear wfi bitmap */ reg &= ~TEGRA20_FLOW_CTRL_CSR_WFI_BITMAP; /* pwr gating on wfe */ reg |= TEGRA20_FLOW_CTRL_CSR_WFE_CPU0 << cpuid; break; case TEGRA30: case TEGRA114: case TEGRA124: /* clear wfe bitmap */ reg &= ~TEGRA30_FLOW_CTRL_CSR_WFE_BITMAP; /* clear wfi bitmap */ reg &= ~TEGRA30_FLOW_CTRL_CSR_WFI_BITMAP; /* pwr gating on wfi */ reg |= TEGRA30_FLOW_CTRL_CSR_WFI_CPU0 << cpuid; break; } reg |= FLOW_CTRL_CSR_INTR_FLAG; /* clear intr flag */ reg |= FLOW_CTRL_CSR_EVENT_FLAG; /* clear event flag */ reg |= FLOW_CTRL_CSR_ENABLE; /* pwr gating */ flowctrl_write_cpu_csr(cpuid, reg); for (i = 0; i < num_possible_cpus(); i++) { if (i == cpuid) continue; reg = flowctrl_read_cpu_csr(i); reg |= FLOW_CTRL_CSR_EVENT_FLAG; reg |= FLOW_CTRL_CSR_INTR_FLAG; flowctrl_write_cpu_csr(i, reg); } } void flowctrl_cpu_suspend_exit(unsigned int cpuid) { unsigned int reg; /* Disable powergating via flow controller for CPU0 */ reg = flowctrl_read_cpu_csr(cpuid); switch (tegra_get_chip_id()) { case TEGRA20: /* clear wfe bitmap */ reg &= ~TEGRA20_FLOW_CTRL_CSR_WFE_BITMAP; /* clear wfi bitmap */ reg &= ~TEGRA20_FLOW_CTRL_CSR_WFI_BITMAP; break; case TEGRA30: case TEGRA114: case TEGRA124: /* clear wfe bitmap */ reg &= ~TEGRA30_FLOW_CTRL_CSR_WFE_BITMAP; /* clear wfi bitmap */ reg &= ~TEGRA30_FLOW_CTRL_CSR_WFI_BITMAP; break; } reg &= ~FLOW_CTRL_CSR_ENABLE; /* clear enable */ reg |= FLOW_CTRL_CSR_INTR_FLAG; /* clear intr */ reg |= FLOW_CTRL_CSR_EVENT_FLAG; /* clear event */ flowctrl_write_cpu_csr(cpuid, reg); } static const struct of_device_id matches[] __initconst = { { .compatible = "nvidia,tegra124-flowctrl" }, { .compatible = "nvidia,tegra114-flowctrl" }, { .compatible = "nvidia,tegra30-flowctrl" }, { .compatible = "nvidia,tegra20-flowctrl" }, { } }; void __init tegra_flowctrl_init(void) { /* hardcoded fallback if device tree node is missing */ unsigned long base = 0x60007000; unsigned long size = SZ_4K; struct device_node *np; np = of_find_matching_node(NULL, matches); if (np) { struct resource res; if (of_address_to_resource(np, 0, &res) == 0) { size = resource_size(&res); base = res.start; } of_node_put(np); } tegra_flowctrl_base = ioremap_nocache(base, size); }
gpl-2.0
sbreen94/Zeus_N8013
drivers/clocksource/sh_cmt.c
2384
16971
/* * SuperH Timer Support - CMT * * Copyright (C) 2008 Magnus Damm * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/platform_device.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/io.h> #include <linux/clk.h> #include <linux/irq.h> #include <linux/err.h> #include <linux/clocksource.h> #include <linux/clockchips.h> #include <linux/sh_timer.h> #include <linux/slab.h> struct sh_cmt_priv { void __iomem *mapbase; struct clk *clk; unsigned long width; /* 16 or 32 bit version of hardware block */ unsigned long overflow_bit; unsigned long clear_bits; struct irqaction irqaction; struct platform_device *pdev; unsigned long flags; unsigned long match_value; unsigned long next_match_value; unsigned long max_match_value; unsigned long rate; spinlock_t lock; struct clock_event_device ced; struct clocksource cs; unsigned long total_cycles; }; static DEFINE_SPINLOCK(sh_cmt_lock); #define CMSTR -1 /* shared register */ #define CMCSR 0 /* channel register */ #define CMCNT 1 /* channel register */ #define CMCOR 2 /* channel register */ static inline unsigned long sh_cmt_read(struct sh_cmt_priv *p, int reg_nr) { struct sh_timer_config *cfg = p->pdev->dev.platform_data; void __iomem *base = p->mapbase; unsigned long offs; if (reg_nr == CMSTR) { offs = 0; base -= cfg->channel_offset; } else offs = reg_nr; if (p->width == 16) offs <<= 1; else { offs <<= 2; if ((reg_nr == CMCNT) || (reg_nr == CMCOR)) return ioread32(base + offs); } return ioread16(base + offs); } static inline void sh_cmt_write(struct sh_cmt_priv *p, int reg_nr, unsigned long value) { struct sh_timer_config *cfg = p->pdev->dev.platform_data; void __iomem *base = p->mapbase; unsigned long offs; if (reg_nr == CMSTR) { offs = 0; base -= cfg->channel_offset; } else offs = reg_nr; if (p->width == 16) offs <<= 1; else { offs <<= 2; if ((reg_nr == CMCNT) || (reg_nr == CMCOR)) { iowrite32(value, base + offs); return; } } iowrite16(value, base + offs); } static unsigned long sh_cmt_get_counter(struct sh_cmt_priv *p, int *has_wrapped) { unsigned long v1, v2, v3; int o1, o2; o1 = sh_cmt_read(p, CMCSR) & p->overflow_bit; /* Make sure the timer value is stable. Stolen from acpi_pm.c */ do { o2 = o1; v1 = sh_cmt_read(p, CMCNT); v2 = sh_cmt_read(p, CMCNT); v3 = sh_cmt_read(p, CMCNT); o1 = sh_cmt_read(p, CMCSR) & p->overflow_bit; } while (unlikely((o1 != o2) || (v1 > v2 && v1 < v3) || (v2 > v3 && v2 < v1) || (v3 > v1 && v3 < v2))); *has_wrapped = o1; return v2; } static void sh_cmt_start_stop_ch(struct sh_cmt_priv *p, int start) { struct sh_timer_config *cfg = p->pdev->dev.platform_data; unsigned long flags, value; /* start stop register shared by multiple timer channels */ spin_lock_irqsave(&sh_cmt_lock, flags); value = sh_cmt_read(p, CMSTR); if (start) value |= 1 << cfg->timer_bit; else value &= ~(1 << cfg->timer_bit); sh_cmt_write(p, CMSTR, value); spin_unlock_irqrestore(&sh_cmt_lock, flags); } static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate) { int ret; /* enable clock */ ret = clk_enable(p->clk); if (ret) { dev_err(&p->pdev->dev, "cannot enable clock\n"); return ret; } /* make sure channel is disabled */ sh_cmt_start_stop_ch(p, 0); /* configure channel, periodic mode and maximum timeout */ if (p->width == 16) { *rate = clk_get_rate(p->clk) / 512; sh_cmt_write(p, CMCSR, 0x43); } else { *rate = clk_get_rate(p->clk) / 8; sh_cmt_write(p, CMCSR, 0x01a4); } sh_cmt_write(p, CMCOR, 0xffffffff); sh_cmt_write(p, CMCNT, 0); /* enable channel */ sh_cmt_start_stop_ch(p, 1); return 0; } static void sh_cmt_disable(struct sh_cmt_priv *p) { /* disable channel */ sh_cmt_start_stop_ch(p, 0); /* disable interrupts in CMT block */ sh_cmt_write(p, CMCSR, 0); /* stop clock */ clk_disable(p->clk); } /* private flags */ #define FLAG_CLOCKEVENT (1 << 0) #define FLAG_CLOCKSOURCE (1 << 1) #define FLAG_REPROGRAM (1 << 2) #define FLAG_SKIPEVENT (1 << 3) #define FLAG_IRQCONTEXT (1 << 4) static void sh_cmt_clock_event_program_verify(struct sh_cmt_priv *p, int absolute) { unsigned long new_match; unsigned long value = p->next_match_value; unsigned long delay = 0; unsigned long now = 0; int has_wrapped; now = sh_cmt_get_counter(p, &has_wrapped); p->flags |= FLAG_REPROGRAM; /* force reprogram */ if (has_wrapped) { /* we're competing with the interrupt handler. * -> let the interrupt handler reprogram the timer. * -> interrupt number two handles the event. */ p->flags |= FLAG_SKIPEVENT; return; } if (absolute) now = 0; do { /* reprogram the timer hardware, * but don't save the new match value yet. */ new_match = now + value + delay; if (new_match > p->max_match_value) new_match = p->max_match_value; sh_cmt_write(p, CMCOR, new_match); now = sh_cmt_get_counter(p, &has_wrapped); if (has_wrapped && (new_match > p->match_value)) { /* we are changing to a greater match value, * so this wrap must be caused by the counter * matching the old value. * -> first interrupt reprograms the timer. * -> interrupt number two handles the event. */ p->flags |= FLAG_SKIPEVENT; break; } if (has_wrapped) { /* we are changing to a smaller match value, * so the wrap must be caused by the counter * matching the new value. * -> save programmed match value. * -> let isr handle the event. */ p->match_value = new_match; break; } /* be safe: verify hardware settings */ if (now < new_match) { /* timer value is below match value, all good. * this makes sure we won't miss any match events. * -> save programmed match value. * -> let isr handle the event. */ p->match_value = new_match; break; } /* the counter has reached a value greater * than our new match value. and since the * has_wrapped flag isn't set we must have * programmed a too close event. * -> increase delay and retry. */ if (delay) delay <<= 1; else delay = 1; if (!delay) dev_warn(&p->pdev->dev, "too long delay\n"); } while (delay); } static void __sh_cmt_set_next(struct sh_cmt_priv *p, unsigned long delta) { if (delta > p->max_match_value) dev_warn(&p->pdev->dev, "delta out of range\n"); p->next_match_value = delta; sh_cmt_clock_event_program_verify(p, 0); } static void sh_cmt_set_next(struct sh_cmt_priv *p, unsigned long delta) { unsigned long flags; spin_lock_irqsave(&p->lock, flags); __sh_cmt_set_next(p, delta); spin_unlock_irqrestore(&p->lock, flags); } static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id) { struct sh_cmt_priv *p = dev_id; /* clear flags */ sh_cmt_write(p, CMCSR, sh_cmt_read(p, CMCSR) & p->clear_bits); /* update clock source counter to begin with if enabled * the wrap flag should be cleared by the timer specific * isr before we end up here. */ if (p->flags & FLAG_CLOCKSOURCE) p->total_cycles += p->match_value + 1; if (!(p->flags & FLAG_REPROGRAM)) p->next_match_value = p->max_match_value; p->flags |= FLAG_IRQCONTEXT; if (p->flags & FLAG_CLOCKEVENT) { if (!(p->flags & FLAG_SKIPEVENT)) { if (p->ced.mode == CLOCK_EVT_MODE_ONESHOT) { p->next_match_value = p->max_match_value; p->flags |= FLAG_REPROGRAM; } p->ced.event_handler(&p->ced); } } p->flags &= ~FLAG_SKIPEVENT; if (p->flags & FLAG_REPROGRAM) { p->flags &= ~FLAG_REPROGRAM; sh_cmt_clock_event_program_verify(p, 1); if (p->flags & FLAG_CLOCKEVENT) if ((p->ced.mode == CLOCK_EVT_MODE_SHUTDOWN) || (p->match_value == p->next_match_value)) p->flags &= ~FLAG_REPROGRAM; } p->flags &= ~FLAG_IRQCONTEXT; return IRQ_HANDLED; } static int sh_cmt_start(struct sh_cmt_priv *p, unsigned long flag) { int ret = 0; unsigned long flags; spin_lock_irqsave(&p->lock, flags); if (!(p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE))) ret = sh_cmt_enable(p, &p->rate); if (ret) goto out; p->flags |= flag; /* setup timeout if no clockevent */ if ((flag == FLAG_CLOCKSOURCE) && (!(p->flags & FLAG_CLOCKEVENT))) __sh_cmt_set_next(p, p->max_match_value); out: spin_unlock_irqrestore(&p->lock, flags); return ret; } static void sh_cmt_stop(struct sh_cmt_priv *p, unsigned long flag) { unsigned long flags; unsigned long f; spin_lock_irqsave(&p->lock, flags); f = p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE); p->flags &= ~flag; if (f && !(p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE))) sh_cmt_disable(p); /* adjust the timeout to maximum if only clocksource left */ if ((flag == FLAG_CLOCKEVENT) && (p->flags & FLAG_CLOCKSOURCE)) __sh_cmt_set_next(p, p->max_match_value); spin_unlock_irqrestore(&p->lock, flags); } static struct sh_cmt_priv *cs_to_sh_cmt(struct clocksource *cs) { return container_of(cs, struct sh_cmt_priv, cs); } static cycle_t sh_cmt_clocksource_read(struct clocksource *cs) { struct sh_cmt_priv *p = cs_to_sh_cmt(cs); unsigned long flags, raw; unsigned long value; int has_wrapped; spin_lock_irqsave(&p->lock, flags); value = p->total_cycles; raw = sh_cmt_get_counter(p, &has_wrapped); if (unlikely(has_wrapped)) raw += p->match_value + 1; spin_unlock_irqrestore(&p->lock, flags); return value + raw; } static int sh_cmt_clocksource_enable(struct clocksource *cs) { int ret; struct sh_cmt_priv *p = cs_to_sh_cmt(cs); p->total_cycles = 0; ret = sh_cmt_start(p, FLAG_CLOCKSOURCE); if (!ret) __clocksource_updatefreq_hz(cs, p->rate); return ret; } static void sh_cmt_clocksource_disable(struct clocksource *cs) { sh_cmt_stop(cs_to_sh_cmt(cs), FLAG_CLOCKSOURCE); } static void sh_cmt_clocksource_resume(struct clocksource *cs) { sh_cmt_start(cs_to_sh_cmt(cs), FLAG_CLOCKSOURCE); } static int sh_cmt_register_clocksource(struct sh_cmt_priv *p, char *name, unsigned long rating) { struct clocksource *cs = &p->cs; cs->name = name; cs->rating = rating; cs->read = sh_cmt_clocksource_read; cs->enable = sh_cmt_clocksource_enable; cs->disable = sh_cmt_clocksource_disable; cs->suspend = sh_cmt_clocksource_disable; cs->resume = sh_cmt_clocksource_resume; cs->mask = CLOCKSOURCE_MASK(sizeof(unsigned long) * 8); cs->flags = CLOCK_SOURCE_IS_CONTINUOUS; dev_info(&p->pdev->dev, "used as clock source\n"); /* Register with dummy 1 Hz value, gets updated in ->enable() */ clocksource_register_hz(cs, 1); return 0; } static struct sh_cmt_priv *ced_to_sh_cmt(struct clock_event_device *ced) { return container_of(ced, struct sh_cmt_priv, ced); } static void sh_cmt_clock_event_start(struct sh_cmt_priv *p, int periodic) { struct clock_event_device *ced = &p->ced; sh_cmt_start(p, FLAG_CLOCKEVENT); /* TODO: calculate good shift from rate and counter bit width */ ced->shift = 32; ced->mult = div_sc(p->rate, NSEC_PER_SEC, ced->shift); ced->max_delta_ns = clockevent_delta2ns(p->max_match_value, ced); ced->min_delta_ns = clockevent_delta2ns(0x1f, ced); if (periodic) sh_cmt_set_next(p, ((p->rate + HZ/2) / HZ) - 1); else sh_cmt_set_next(p, p->max_match_value); } static void sh_cmt_clock_event_mode(enum clock_event_mode mode, struct clock_event_device *ced) { struct sh_cmt_priv *p = ced_to_sh_cmt(ced); /* deal with old setting first */ switch (ced->mode) { case CLOCK_EVT_MODE_PERIODIC: case CLOCK_EVT_MODE_ONESHOT: sh_cmt_stop(p, FLAG_CLOCKEVENT); break; default: break; } switch (mode) { case CLOCK_EVT_MODE_PERIODIC: dev_info(&p->pdev->dev, "used for periodic clock events\n"); sh_cmt_clock_event_start(p, 1); break; case CLOCK_EVT_MODE_ONESHOT: dev_info(&p->pdev->dev, "used for oneshot clock events\n"); sh_cmt_clock_event_start(p, 0); break; case CLOCK_EVT_MODE_SHUTDOWN: case CLOCK_EVT_MODE_UNUSED: sh_cmt_stop(p, FLAG_CLOCKEVENT); break; default: break; } } static int sh_cmt_clock_event_next(unsigned long delta, struct clock_event_device *ced) { struct sh_cmt_priv *p = ced_to_sh_cmt(ced); BUG_ON(ced->mode != CLOCK_EVT_MODE_ONESHOT); if (likely(p->flags & FLAG_IRQCONTEXT)) p->next_match_value = delta - 1; else sh_cmt_set_next(p, delta - 1); return 0; } static void sh_cmt_register_clockevent(struct sh_cmt_priv *p, char *name, unsigned long rating) { struct clock_event_device *ced = &p->ced; memset(ced, 0, sizeof(*ced)); ced->name = name; ced->features = CLOCK_EVT_FEAT_PERIODIC; ced->features |= CLOCK_EVT_FEAT_ONESHOT; ced->rating = rating; ced->cpumask = cpumask_of(0); ced->set_next_event = sh_cmt_clock_event_next; ced->set_mode = sh_cmt_clock_event_mode; dev_info(&p->pdev->dev, "used for clock events\n"); clockevents_register_device(ced); } static int sh_cmt_register(struct sh_cmt_priv *p, char *name, unsigned long clockevent_rating, unsigned long clocksource_rating) { if (p->width == (sizeof(p->max_match_value) * 8)) p->max_match_value = ~0; else p->max_match_value = (1 << p->width) - 1; p->match_value = p->max_match_value; spin_lock_init(&p->lock); if (clockevent_rating) sh_cmt_register_clockevent(p, name, clockevent_rating); if (clocksource_rating) sh_cmt_register_clocksource(p, name, clocksource_rating); return 0; } static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev) { struct sh_timer_config *cfg = pdev->dev.platform_data; struct resource *res; int irq, ret; ret = -ENXIO; memset(p, 0, sizeof(*p)); p->pdev = pdev; if (!cfg) { dev_err(&p->pdev->dev, "missing platform data\n"); goto err0; } platform_set_drvdata(pdev, p); res = platform_get_resource(p->pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&p->pdev->dev, "failed to get I/O memory\n"); goto err0; } irq = platform_get_irq(p->pdev, 0); if (irq < 0) { dev_err(&p->pdev->dev, "failed to get irq\n"); goto err0; } /* map memory, let mapbase point to our channel */ p->mapbase = ioremap_nocache(res->start, resource_size(res)); if (p->mapbase == NULL) { dev_err(&p->pdev->dev, "failed to remap I/O memory\n"); goto err0; } /* request irq using setup_irq() (too early for request_irq()) */ p->irqaction.name = dev_name(&p->pdev->dev); p->irqaction.handler = sh_cmt_interrupt; p->irqaction.dev_id = p; p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | \ IRQF_IRQPOLL | IRQF_NOBALANCING; /* get hold of clock */ p->clk = clk_get(&p->pdev->dev, "cmt_fck"); if (IS_ERR(p->clk)) { dev_err(&p->pdev->dev, "cannot get clock\n"); ret = PTR_ERR(p->clk); goto err1; } if (resource_size(res) == 6) { p->width = 16; p->overflow_bit = 0x80; p->clear_bits = ~0x80; } else { p->width = 32; p->overflow_bit = 0x8000; p->clear_bits = ~0xc000; } ret = sh_cmt_register(p, (char *)dev_name(&p->pdev->dev), cfg->clockevent_rating, cfg->clocksource_rating); if (ret) { dev_err(&p->pdev->dev, "registration failed\n"); goto err1; } ret = setup_irq(irq, &p->irqaction); if (ret) { dev_err(&p->pdev->dev, "failed to request irq %d\n", irq); goto err1; } return 0; err1: iounmap(p->mapbase); err0: return ret; } static int __devinit sh_cmt_probe(struct platform_device *pdev) { struct sh_cmt_priv *p = platform_get_drvdata(pdev); int ret; if (p) { dev_info(&pdev->dev, "kept as earlytimer\n"); return 0; } p = kmalloc(sizeof(*p), GFP_KERNEL); if (p == NULL) { dev_err(&pdev->dev, "failed to allocate driver data\n"); return -ENOMEM; } ret = sh_cmt_setup(p, pdev); if (ret) { kfree(p); platform_set_drvdata(pdev, NULL); } return ret; } static int __devexit sh_cmt_remove(struct platform_device *pdev) { return -EBUSY; /* cannot unregister clockevent and clocksource */ } static struct platform_driver sh_cmt_device_driver = { .probe = sh_cmt_probe, .remove = __devexit_p(sh_cmt_remove), .driver = { .name = "sh_cmt", } }; static int __init sh_cmt_init(void) { return platform_driver_register(&sh_cmt_device_driver); } static void __exit sh_cmt_exit(void) { platform_driver_unregister(&sh_cmt_device_driver); } early_platform_init("earlytimer", &sh_cmt_device_driver); module_init(sh_cmt_init); module_exit(sh_cmt_exit); MODULE_AUTHOR("Magnus Damm"); MODULE_DESCRIPTION("SuperH CMT Timer Driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
ReconInstruments/jet_kernel
drivers/staging/gma500/psb_intel_display.c
2384
42487
/* * Copyright © 2006-2007 Intel Corporation * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * * Authors: * Eric Anholt <eric@anholt.net> */ #include <linux/i2c.h> #include <linux/pm_runtime.h> #include <drm/drmP.h> #include "psb_fb.h" #include "psb_drv.h" #include "psb_intel_drv.h" #include "psb_intel_reg.h" #include "psb_intel_display.h" #include "psb_powermgmt.h" struct psb_intel_clock_t { /* given values */ int n; int m1, m2; int p1, p2; /* derived values */ int dot; int vco; int m; int p; }; struct psb_intel_range_t { int min, max; }; struct psb_intel_p2_t { int dot_limit; int p2_slow, p2_fast; }; #define INTEL_P2_NUM 2 struct psb_intel_limit_t { struct psb_intel_range_t dot, vco, n, m, m1, m2, p, p1; struct psb_intel_p2_t p2; }; #define I8XX_DOT_MIN 25000 #define I8XX_DOT_MAX 350000 #define I8XX_VCO_MIN 930000 #define I8XX_VCO_MAX 1400000 #define I8XX_N_MIN 3 #define I8XX_N_MAX 16 #define I8XX_M_MIN 96 #define I8XX_M_MAX 140 #define I8XX_M1_MIN 18 #define I8XX_M1_MAX 26 #define I8XX_M2_MIN 6 #define I8XX_M2_MAX 16 #define I8XX_P_MIN 4 #define I8XX_P_MAX 128 #define I8XX_P1_MIN 2 #define I8XX_P1_MAX 33 #define I8XX_P1_LVDS_MIN 1 #define I8XX_P1_LVDS_MAX 6 #define I8XX_P2_SLOW 4 #define I8XX_P2_FAST 2 #define I8XX_P2_LVDS_SLOW 14 #define I8XX_P2_LVDS_FAST 14 /* No fast option */ #define I8XX_P2_SLOW_LIMIT 165000 #define I9XX_DOT_MIN 20000 #define I9XX_DOT_MAX 400000 #define I9XX_VCO_MIN 1400000 #define I9XX_VCO_MAX 2800000 #define I9XX_N_MIN 3 #define I9XX_N_MAX 8 #define I9XX_M_MIN 70 #define I9XX_M_MAX 120 #define I9XX_M1_MIN 10 #define I9XX_M1_MAX 20 #define I9XX_M2_MIN 5 #define I9XX_M2_MAX 9 #define I9XX_P_SDVO_DAC_MIN 5 #define I9XX_P_SDVO_DAC_MAX 80 #define I9XX_P_LVDS_MIN 7 #define I9XX_P_LVDS_MAX 98 #define I9XX_P1_MIN 1 #define I9XX_P1_MAX 8 #define I9XX_P2_SDVO_DAC_SLOW 10 #define I9XX_P2_SDVO_DAC_FAST 5 #define I9XX_P2_SDVO_DAC_SLOW_LIMIT 200000 #define I9XX_P2_LVDS_SLOW 14 #define I9XX_P2_LVDS_FAST 7 #define I9XX_P2_LVDS_SLOW_LIMIT 112000 #define INTEL_LIMIT_I8XX_DVO_DAC 0 #define INTEL_LIMIT_I8XX_LVDS 1 #define INTEL_LIMIT_I9XX_SDVO_DAC 2 #define INTEL_LIMIT_I9XX_LVDS 3 static const struct psb_intel_limit_t psb_intel_limits[] = { { /* INTEL_LIMIT_I8XX_DVO_DAC */ .dot = {.min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX}, .vco = {.min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX}, .n = {.min = I8XX_N_MIN, .max = I8XX_N_MAX}, .m = {.min = I8XX_M_MIN, .max = I8XX_M_MAX}, .m1 = {.min = I8XX_M1_MIN, .max = I8XX_M1_MAX}, .m2 = {.min = I8XX_M2_MIN, .max = I8XX_M2_MAX}, .p = {.min = I8XX_P_MIN, .max = I8XX_P_MAX}, .p1 = {.min = I8XX_P1_MIN, .max = I8XX_P1_MAX}, .p2 = {.dot_limit = I8XX_P2_SLOW_LIMIT, .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST}, }, { /* INTEL_LIMIT_I8XX_LVDS */ .dot = {.min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX}, .vco = {.min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX}, .n = {.min = I8XX_N_MIN, .max = I8XX_N_MAX}, .m = {.min = I8XX_M_MIN, .max = I8XX_M_MAX}, .m1 = {.min = I8XX_M1_MIN, .max = I8XX_M1_MAX}, .m2 = {.min = I8XX_M2_MIN, .max = I8XX_M2_MAX}, .p = {.min = I8XX_P_MIN, .max = I8XX_P_MAX}, .p1 = {.min = I8XX_P1_LVDS_MIN, .max = I8XX_P1_LVDS_MAX}, .p2 = {.dot_limit = I8XX_P2_SLOW_LIMIT, .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST}, }, { /* INTEL_LIMIT_I9XX_SDVO_DAC */ .dot = {.min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX}, .vco = {.min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX}, .n = {.min = I9XX_N_MIN, .max = I9XX_N_MAX}, .m = {.min = I9XX_M_MIN, .max = I9XX_M_MAX}, .m1 = {.min = I9XX_M1_MIN, .max = I9XX_M1_MAX}, .m2 = {.min = I9XX_M2_MIN, .max = I9XX_M2_MAX}, .p = {.min = I9XX_P_SDVO_DAC_MIN, .max = I9XX_P_SDVO_DAC_MAX}, .p1 = {.min = I9XX_P1_MIN, .max = I9XX_P1_MAX}, .p2 = {.dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST}, }, { /* INTEL_LIMIT_I9XX_LVDS */ .dot = {.min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX}, .vco = {.min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX}, .n = {.min = I9XX_N_MIN, .max = I9XX_N_MAX}, .m = {.min = I9XX_M_MIN, .max = I9XX_M_MAX}, .m1 = {.min = I9XX_M1_MIN, .max = I9XX_M1_MAX}, .m2 = {.min = I9XX_M2_MIN, .max = I9XX_M2_MAX}, .p = {.min = I9XX_P_LVDS_MIN, .max = I9XX_P_LVDS_MAX}, .p1 = {.min = I9XX_P1_MIN, .max = I9XX_P1_MAX}, /* The single-channel range is 25-112Mhz, and dual-channel * is 80-224Mhz. Prefer single channel as much as possible. */ .p2 = {.dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST}, }, }; static const struct psb_intel_limit_t *psb_intel_limit(struct drm_crtc *crtc) { const struct psb_intel_limit_t *limit; if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) limit = &psb_intel_limits[INTEL_LIMIT_I9XX_LVDS]; else limit = &psb_intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC]; return limit; } /** Derive the pixel clock for the given refclk and divisors for 8xx chips. */ static void i8xx_clock(int refclk, struct psb_intel_clock_t *clock) { clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2); clock->p = clock->p1 * clock->p2; clock->vco = refclk * clock->m / (clock->n + 2); clock->dot = clock->vco / clock->p; } /** Derive the pixel clock for the given refclk and divisors for 9xx chips. */ static void i9xx_clock(int refclk, struct psb_intel_clock_t *clock) { clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2); clock->p = clock->p1 * clock->p2; clock->vco = refclk * clock->m / (clock->n + 2); clock->dot = clock->vco / clock->p; } static void psb_intel_clock(struct drm_device *dev, int refclk, struct psb_intel_clock_t *clock) { return i9xx_clock(refclk, clock); } /** * Returns whether any output on the specified pipe is of the specified type */ bool psb_intel_pipe_has_type(struct drm_crtc *crtc, int type) { struct drm_device *dev = crtc->dev; struct drm_mode_config *mode_config = &dev->mode_config; struct drm_connector *l_entry; list_for_each_entry(l_entry, &mode_config->connector_list, head) { if (l_entry->encoder && l_entry->encoder->crtc == crtc) { struct psb_intel_output *psb_intel_output = to_psb_intel_output(l_entry); if (psb_intel_output->type == type) return true; } } return false; } #define INTELPllInvalid(s) { /* ErrorF (s) */; return false; } /** * Returns whether the given set of divisors are valid for a given refclk with * the given connectors. */ static bool psb_intel_PLL_is_valid(struct drm_crtc *crtc, struct psb_intel_clock_t *clock) { const struct psb_intel_limit_t *limit = psb_intel_limit(crtc); if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) INTELPllInvalid("p1 out of range\n"); if (clock->p < limit->p.min || limit->p.max < clock->p) INTELPllInvalid("p out of range\n"); if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2) INTELPllInvalid("m2 out of range\n"); if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) INTELPllInvalid("m1 out of range\n"); if (clock->m1 <= clock->m2) INTELPllInvalid("m1 <= m2\n"); if (clock->m < limit->m.min || limit->m.max < clock->m) INTELPllInvalid("m out of range\n"); if (clock->n < limit->n.min || limit->n.max < clock->n) INTELPllInvalid("n out of range\n"); if (clock->vco < limit->vco.min || limit->vco.max < clock->vco) INTELPllInvalid("vco out of range\n"); /* XXX: We may need to be checking "Dot clock" * depending on the multiplier, connector, etc., * rather than just a single range. */ if (clock->dot < limit->dot.min || limit->dot.max < clock->dot) INTELPllInvalid("dot out of range\n"); return true; } /** * Returns a set of divisors for the desired target clock with the given * refclk, or FALSE. The returned values represent the clock equation: * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. */ static bool psb_intel_find_best_PLL(struct drm_crtc *crtc, int target, int refclk, struct psb_intel_clock_t *best_clock) { struct drm_device *dev = crtc->dev; struct psb_intel_clock_t clock; const struct psb_intel_limit_t *limit = psb_intel_limit(crtc); int err = target; if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && (REG_READ(LVDS) & LVDS_PORT_EN) != 0) { /* * For LVDS, if the panel is on, just rely on its current * settings for dual-channel. We haven't figured out how to * reliably set up different single/dual channel state, if we * even can. */ if ((REG_READ(LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP) clock.p2 = limit->p2.p2_fast; else clock.p2 = limit->p2.p2_slow; } else { if (target < limit->p2.dot_limit) clock.p2 = limit->p2.p2_slow; else clock.p2 = limit->p2.p2_fast; } memset(best_clock, 0, sizeof(*best_clock)); for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) { for (clock.m2 = limit->m2.min; clock.m2 < clock.m1 && clock.m2 <= limit->m2.max; clock.m2++) { for (clock.n = limit->n.min; clock.n <= limit->n.max; clock.n++) { for (clock.p1 = limit->p1.min; clock.p1 <= limit->p1.max; clock.p1++) { int this_err; psb_intel_clock(dev, refclk, &clock); if (!psb_intel_PLL_is_valid (crtc, &clock)) continue; this_err = abs(clock.dot - target); if (this_err < err) { *best_clock = clock; err = this_err; } } } } } return err != target; } void psb_intel_wait_for_vblank(struct drm_device *dev) { /* Wait for 20ms, i.e. one cycle at 50hz. */ udelay(20000); } int psb_intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, struct drm_framebuffer *old_fb) { struct drm_device *dev = crtc->dev; /* struct drm_i915_master_private *master_priv; */ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb); int pipe = psb_intel_crtc->pipe; unsigned long start, offset; int dspbase = (pipe == 0 ? DSPABASE : DSPBBASE); int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF); int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE; int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR; u32 dspcntr; int ret = 0; PSB_DEBUG_ENTRY("\n"); /* no fb bound */ if (!crtc->fb) { DRM_DEBUG("No FB bound\n"); return 0; } if (!gma_power_begin(dev, true)) return 0; /* We are displaying this buffer, make sure it is actually loaded into the GTT */ ret = psb_gtt_pin(psbfb->gtt); if (ret < 0) goto psb_intel_pipe_set_base_exit; start = psbfb->gtt->offset; offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8); REG_WRITE(dspstride, crtc->fb->pitch); dspcntr = REG_READ(dspcntr_reg); dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; switch (crtc->fb->bits_per_pixel) { case 8: dspcntr |= DISPPLANE_8BPP; break; case 16: if (crtc->fb->depth == 15) dspcntr |= DISPPLANE_15_16BPP; else dspcntr |= DISPPLANE_16BPP; break; case 24: case 32: dspcntr |= DISPPLANE_32BPP_NO_ALPHA; break; default: DRM_ERROR("Unknown color depth\n"); ret = -EINVAL; psb_gtt_unpin(psbfb->gtt); goto psb_intel_pipe_set_base_exit; } REG_WRITE(dspcntr_reg, dspcntr); DRM_DEBUG("Writing base %08lX %08lX %d %d\n", start, offset, x, y); if (0 /* FIXMEAC - check what PSB needs */) { REG_WRITE(dspbase, offset); REG_READ(dspbase); REG_WRITE(dspsurf, start); REG_READ(dspsurf); } else { REG_WRITE(dspbase, start + offset); REG_READ(dspbase); } /* If there was a previous display we can now unpin it */ if (old_fb) psb_gtt_unpin(to_psb_fb(old_fb)->gtt); psb_intel_pipe_set_base_exit: gma_power_end(dev); return ret; } /** * Sets the power management mode of the pipe and plane. * * This code should probably grow support for turning the cursor off and back * on appropriately at the same time as we're turning the pipe off/on. */ static void psb_intel_crtc_dpms(struct drm_crtc *crtc, int mode) { struct drm_device *dev = crtc->dev; /* struct drm_i915_master_private *master_priv; */ /* struct drm_i915_private *dev_priv = dev->dev_private; */ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); int pipe = psb_intel_crtc->pipe; int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR; int dspbase_reg = (pipe == 0) ? DSPABASE : DSPBBASE; int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF; u32 temp; bool enabled; /* XXX: When our outputs are all unaware of DPMS modes other than off * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. */ switch (mode) { case DRM_MODE_DPMS_ON: case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: /* Enable the DPLL */ temp = REG_READ(dpll_reg); if ((temp & DPLL_VCO_ENABLE) == 0) { REG_WRITE(dpll_reg, temp); REG_READ(dpll_reg); /* Wait for the clocks to stabilize. */ udelay(150); REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE); REG_READ(dpll_reg); /* Wait for the clocks to stabilize. */ udelay(150); REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE); REG_READ(dpll_reg); /* Wait for the clocks to stabilize. */ udelay(150); } /* Enable the pipe */ temp = REG_READ(pipeconf_reg); if ((temp & PIPEACONF_ENABLE) == 0) REG_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE); /* Enable the plane */ temp = REG_READ(dspcntr_reg); if ((temp & DISPLAY_PLANE_ENABLE) == 0) { REG_WRITE(dspcntr_reg, temp | DISPLAY_PLANE_ENABLE); /* Flush the plane changes */ REG_WRITE(dspbase_reg, REG_READ(dspbase_reg)); } psb_intel_crtc_load_lut(crtc); /* Give the overlay scaler a chance to enable * if it's on this pipe */ /* psb_intel_crtc_dpms_video(crtc, true); TODO */ break; case DRM_MODE_DPMS_OFF: /* Give the overlay scaler a chance to disable * if it's on this pipe */ /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */ /* Disable the VGA plane that we never use */ REG_WRITE(VGACNTRL, VGA_DISP_DISABLE); /* Disable display plane */ temp = REG_READ(dspcntr_reg); if ((temp & DISPLAY_PLANE_ENABLE) != 0) { REG_WRITE(dspcntr_reg, temp & ~DISPLAY_PLANE_ENABLE); /* Flush the plane changes */ REG_WRITE(dspbase_reg, REG_READ(dspbase_reg)); REG_READ(dspbase_reg); } /* Next, disable display pipes */ temp = REG_READ(pipeconf_reg); if ((temp & PIPEACONF_ENABLE) != 0) { REG_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE); REG_READ(pipeconf_reg); } /* Wait for vblank for the disable to take effect. */ psb_intel_wait_for_vblank(dev); temp = REG_READ(dpll_reg); if ((temp & DPLL_VCO_ENABLE) != 0) { REG_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE); REG_READ(dpll_reg); } /* Wait for the clocks to turn off. */ udelay(150); break; } enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF; /*Set FIFO Watermarks*/ REG_WRITE(DSPARB, 0x3F3E); } static void psb_intel_crtc_prepare(struct drm_crtc *crtc) { struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); } static void psb_intel_crtc_commit(struct drm_crtc *crtc) { struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON); } void psb_intel_encoder_prepare(struct drm_encoder *encoder) { struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; /* lvds has its own version of prepare see psb_intel_lvds_prepare */ encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF); } void psb_intel_encoder_commit(struct drm_encoder *encoder) { struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; /* lvds has its own version of commit see psb_intel_lvds_commit */ encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON); } static bool psb_intel_crtc_mode_fixup(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { return true; } /** * Return the pipe currently connected to the panel fitter, * or -1 if the panel fitter is not present or not in use */ static int psb_intel_panel_fitter_pipe(struct drm_device *dev) { u32 pfit_control; pfit_control = REG_READ(PFIT_CONTROL); /* See if the panel fitter is in use */ if ((pfit_control & PFIT_ENABLE) == 0) return -1; /* Must be on PIPE 1 for PSB */ return 1; } static int psb_intel_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode, int x, int y, struct drm_framebuffer *old_fb) { struct drm_device *dev = crtc->dev; struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); int pipe = psb_intel_crtc->pipe; int fp_reg = (pipe == 0) ? FPA0 : FPB0; int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR; int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF; int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B; int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B; int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B; int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B; int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B; int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B; int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE; int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS; int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC; int refclk; struct psb_intel_clock_t clock; u32 dpll = 0, fp = 0, dspcntr, pipeconf; bool ok, is_sdvo = false, is_dvo = false; bool is_crt = false, is_lvds = false, is_tv = false; struct drm_mode_config *mode_config = &dev->mode_config; struct drm_connector *connector; list_for_each_entry(connector, &mode_config->connector_list, head) { struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector); if (!connector->encoder || connector->encoder->crtc != crtc) continue; switch (psb_intel_output->type) { case INTEL_OUTPUT_LVDS: is_lvds = true; break; case INTEL_OUTPUT_SDVO: is_sdvo = true; break; case INTEL_OUTPUT_DVO: is_dvo = true; break; case INTEL_OUTPUT_TVOUT: is_tv = true; break; case INTEL_OUTPUT_ANALOG: is_crt = true; break; } } refclk = 96000; ok = psb_intel_find_best_PLL(crtc, adjusted_mode->clock, refclk, &clock); if (!ok) { DRM_ERROR("Couldn't find PLL settings for mode!\n"); return 0; } fp = clock.n << 16 | clock.m1 << 8 | clock.m2; dpll = DPLL_VGA_MODE_DIS; if (is_lvds) { dpll |= DPLLB_MODE_LVDS; dpll |= DPLL_DVO_HIGH_SPEED; } else dpll |= DPLLB_MODE_DAC_SERIAL; if (is_sdvo) { int sdvo_pixel_multiply = adjusted_mode->clock / mode->clock; dpll |= DPLL_DVO_HIGH_SPEED; dpll |= (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES; } /* compute bitmask from p1 value */ dpll |= (1 << (clock.p1 - 1)) << 16; switch (clock.p2) { case 5: dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; break; case 7: dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; break; case 10: dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; break; case 14: dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; break; } if (is_tv) { /* XXX: just matching BIOS for now */ /* dpll |= PLL_REF_INPUT_TVCLKINBC; */ dpll |= 3; } dpll |= PLL_REF_INPUT_DREFCLK; /* setup pipeconf */ pipeconf = REG_READ(pipeconf_reg); /* Set up the display plane register */ dspcntr = DISPPLANE_GAMMA_ENABLE; if (pipe == 0) dspcntr |= DISPPLANE_SEL_PIPE_A; else dspcntr |= DISPPLANE_SEL_PIPE_B; dspcntr |= DISPLAY_PLANE_ENABLE; pipeconf |= PIPEACONF_ENABLE; dpll |= DPLL_VCO_ENABLE; /* Disable the panel fitter if it was on our pipe */ if (psb_intel_panel_fitter_pipe(dev) == pipe) REG_WRITE(PFIT_CONTROL, 0); DRM_DEBUG("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B'); drm_mode_debug_printmodeline(mode); if (dpll & DPLL_VCO_ENABLE) { REG_WRITE(fp_reg, fp); REG_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE); REG_READ(dpll_reg); udelay(150); } /* The LVDS pin pair needs to be on before the DPLLs are enabled. * This is an exception to the general rule that mode_set doesn't turn * things on. */ if (is_lvds) { u32 lvds = REG_READ(LVDS); lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP | LVDS_PIPEB_SELECT; /* Set the B0-B3 data pairs corresponding to * whether we're going to * set the DPLLs for dual-channel mode or not. */ if (clock.p2 == 7) lvds |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP; else lvds &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP); /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP) * appropriately here, but we need to look more * thoroughly into how panels behave in the two modes. */ REG_WRITE(LVDS, lvds); REG_READ(LVDS); } REG_WRITE(fp_reg, fp); REG_WRITE(dpll_reg, dpll); REG_READ(dpll_reg); /* Wait for the clocks to stabilize. */ udelay(150); /* write it again -- the BIOS does, after all */ REG_WRITE(dpll_reg, dpll); REG_READ(dpll_reg); /* Wait for the clocks to stabilize. */ udelay(150); REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16)); REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16)); REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16)); REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) | ((adjusted_mode->crtc_vtotal - 1) << 16)); REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) | ((adjusted_mode->crtc_vblank_end - 1) << 16)); REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16)); /* pipesrc and dspsize control the size that is scaled from, * which should always be the user's requested size. */ REG_WRITE(dspsize_reg, ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1)); REG_WRITE(dsppos_reg, 0); REG_WRITE(pipesrc_reg, ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); REG_WRITE(pipeconf_reg, pipeconf); REG_READ(pipeconf_reg); psb_intel_wait_for_vblank(dev); REG_WRITE(dspcntr_reg, dspcntr); /* Flush the plane changes */ { struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; crtc_funcs->mode_set_base(crtc, x, y, old_fb); } psb_intel_wait_for_vblank(dev); return 0; } /** Loads the palette/gamma unit for the CRTC with the prepared values */ void psb_intel_crtc_load_lut(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct drm_psb_private *dev_priv = (struct drm_psb_private *)dev->dev_private; struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); int palreg = PALETTE_A; int i; /* The clocks have to be on to load the palette. */ if (!crtc->enabled) return; switch (psb_intel_crtc->pipe) { case 0: break; case 1: palreg = PALETTE_B; break; case 2: palreg = PALETTE_C; break; default: DRM_ERROR("Illegal Pipe Number.\n"); return; } if (gma_power_begin(dev, false)) { for (i = 0; i < 256; i++) { REG_WRITE(palreg + 4 * i, ((psb_intel_crtc->lut_r[i] + psb_intel_crtc->lut_adj[i]) << 16) | ((psb_intel_crtc->lut_g[i] + psb_intel_crtc->lut_adj[i]) << 8) | (psb_intel_crtc->lut_b[i] + psb_intel_crtc->lut_adj[i])); } gma_power_end(dev); } else { for (i = 0; i < 256; i++) { dev_priv->save_palette_a[i] = ((psb_intel_crtc->lut_r[i] + psb_intel_crtc->lut_adj[i]) << 16) | ((psb_intel_crtc->lut_g[i] + psb_intel_crtc->lut_adj[i]) << 8) | (psb_intel_crtc->lut_b[i] + psb_intel_crtc->lut_adj[i]); } } } /** * Save HW states of giving crtc */ static void psb_intel_crtc_save(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; /* struct drm_psb_private *dev_priv = (struct drm_psb_private *)dev->dev_private; */ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state; int pipeA = (psb_intel_crtc->pipe == 0); uint32_t paletteReg; int i; DRM_DEBUG("\n"); if (!crtc_state) { DRM_DEBUG("No CRTC state found\n"); return; } crtc_state->saveDSPCNTR = REG_READ(pipeA ? DSPACNTR : DSPBCNTR); crtc_state->savePIPECONF = REG_READ(pipeA ? PIPEACONF : PIPEBCONF); crtc_state->savePIPESRC = REG_READ(pipeA ? PIPEASRC : PIPEBSRC); crtc_state->saveFP0 = REG_READ(pipeA ? FPA0 : FPB0); crtc_state->saveFP1 = REG_READ(pipeA ? FPA1 : FPB1); crtc_state->saveDPLL = REG_READ(pipeA ? DPLL_A : DPLL_B); crtc_state->saveHTOTAL = REG_READ(pipeA ? HTOTAL_A : HTOTAL_B); crtc_state->saveHBLANK = REG_READ(pipeA ? HBLANK_A : HBLANK_B); crtc_state->saveHSYNC = REG_READ(pipeA ? HSYNC_A : HSYNC_B); crtc_state->saveVTOTAL = REG_READ(pipeA ? VTOTAL_A : VTOTAL_B); crtc_state->saveVBLANK = REG_READ(pipeA ? VBLANK_A : VBLANK_B); crtc_state->saveVSYNC = REG_READ(pipeA ? VSYNC_A : VSYNC_B); crtc_state->saveDSPSTRIDE = REG_READ(pipeA ? DSPASTRIDE : DSPBSTRIDE); /*NOTE: DSPSIZE DSPPOS only for psb*/ crtc_state->saveDSPSIZE = REG_READ(pipeA ? DSPASIZE : DSPBSIZE); crtc_state->saveDSPPOS = REG_READ(pipeA ? DSPAPOS : DSPBPOS); crtc_state->saveDSPBASE = REG_READ(pipeA ? DSPABASE : DSPBBASE); DRM_DEBUG("(%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n", crtc_state->saveDSPCNTR, crtc_state->savePIPECONF, crtc_state->savePIPESRC, crtc_state->saveFP0, crtc_state->saveFP1, crtc_state->saveDPLL, crtc_state->saveHTOTAL, crtc_state->saveHBLANK, crtc_state->saveHSYNC, crtc_state->saveVTOTAL, crtc_state->saveVBLANK, crtc_state->saveVSYNC, crtc_state->saveDSPSTRIDE, crtc_state->saveDSPSIZE, crtc_state->saveDSPPOS, crtc_state->saveDSPBASE ); paletteReg = pipeA ? PALETTE_A : PALETTE_B; for (i = 0; i < 256; ++i) crtc_state->savePalette[i] = REG_READ(paletteReg + (i << 2)); } /** * Restore HW states of giving crtc */ static void psb_intel_crtc_restore(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; /* struct drm_psb_private * dev_priv = (struct drm_psb_private *)dev->dev_private; */ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state; /* struct drm_crtc_helper_funcs * crtc_funcs = crtc->helper_private; */ int pipeA = (psb_intel_crtc->pipe == 0); uint32_t paletteReg; int i; DRM_DEBUG("\n"); if (!crtc_state) { DRM_DEBUG("No crtc state\n"); return; } DRM_DEBUG( "current:(%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n", REG_READ(pipeA ? DSPACNTR : DSPBCNTR), REG_READ(pipeA ? PIPEACONF : PIPEBCONF), REG_READ(pipeA ? PIPEASRC : PIPEBSRC), REG_READ(pipeA ? FPA0 : FPB0), REG_READ(pipeA ? FPA1 : FPB1), REG_READ(pipeA ? DPLL_A : DPLL_B), REG_READ(pipeA ? HTOTAL_A : HTOTAL_B), REG_READ(pipeA ? HBLANK_A : HBLANK_B), REG_READ(pipeA ? HSYNC_A : HSYNC_B), REG_READ(pipeA ? VTOTAL_A : VTOTAL_B), REG_READ(pipeA ? VBLANK_A : VBLANK_B), REG_READ(pipeA ? VSYNC_A : VSYNC_B), REG_READ(pipeA ? DSPASTRIDE : DSPBSTRIDE), REG_READ(pipeA ? DSPASIZE : DSPBSIZE), REG_READ(pipeA ? DSPAPOS : DSPBPOS), REG_READ(pipeA ? DSPABASE : DSPBBASE) ); DRM_DEBUG( "saved: (%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n", crtc_state->saveDSPCNTR, crtc_state->savePIPECONF, crtc_state->savePIPESRC, crtc_state->saveFP0, crtc_state->saveFP1, crtc_state->saveDPLL, crtc_state->saveHTOTAL, crtc_state->saveHBLANK, crtc_state->saveHSYNC, crtc_state->saveVTOTAL, crtc_state->saveVBLANK, crtc_state->saveVSYNC, crtc_state->saveDSPSTRIDE, crtc_state->saveDSPSIZE, crtc_state->saveDSPPOS, crtc_state->saveDSPBASE ); if (crtc_state->saveDPLL & DPLL_VCO_ENABLE) { REG_WRITE(pipeA ? DPLL_A : DPLL_B, crtc_state->saveDPLL & ~DPLL_VCO_ENABLE); REG_READ(pipeA ? DPLL_A : DPLL_B); DRM_DEBUG("write dpll: %x\n", REG_READ(pipeA ? DPLL_A : DPLL_B)); udelay(150); } REG_WRITE(pipeA ? FPA0 : FPB0, crtc_state->saveFP0); REG_READ(pipeA ? FPA0 : FPB0); REG_WRITE(pipeA ? FPA1 : FPB1, crtc_state->saveFP1); REG_READ(pipeA ? FPA1 : FPB1); REG_WRITE(pipeA ? DPLL_A : DPLL_B, crtc_state->saveDPLL); REG_READ(pipeA ? DPLL_A : DPLL_B); udelay(150); REG_WRITE(pipeA ? HTOTAL_A : HTOTAL_B, crtc_state->saveHTOTAL); REG_WRITE(pipeA ? HBLANK_A : HBLANK_B, crtc_state->saveHBLANK); REG_WRITE(pipeA ? HSYNC_A : HSYNC_B, crtc_state->saveHSYNC); REG_WRITE(pipeA ? VTOTAL_A : VTOTAL_B, crtc_state->saveVTOTAL); REG_WRITE(pipeA ? VBLANK_A : VBLANK_B, crtc_state->saveVBLANK); REG_WRITE(pipeA ? VSYNC_A : VSYNC_B, crtc_state->saveVSYNC); REG_WRITE(pipeA ? DSPASTRIDE : DSPBSTRIDE, crtc_state->saveDSPSTRIDE); REG_WRITE(pipeA ? DSPASIZE : DSPBSIZE, crtc_state->saveDSPSIZE); REG_WRITE(pipeA ? DSPAPOS : DSPBPOS, crtc_state->saveDSPPOS); REG_WRITE(pipeA ? PIPEASRC : PIPEBSRC, crtc_state->savePIPESRC); REG_WRITE(pipeA ? DSPABASE : DSPBBASE, crtc_state->saveDSPBASE); REG_WRITE(pipeA ? PIPEACONF : PIPEBCONF, crtc_state->savePIPECONF); psb_intel_wait_for_vblank(dev); REG_WRITE(pipeA ? DSPACNTR : DSPBCNTR, crtc_state->saveDSPCNTR); REG_WRITE(pipeA ? DSPABASE : DSPBBASE, crtc_state->saveDSPBASE); psb_intel_wait_for_vblank(dev); paletteReg = pipeA ? PALETTE_A : PALETTE_B; for (i = 0; i < 256; ++i) REG_WRITE(paletteReg + (i << 2), crtc_state->savePalette[i]); } static int psb_intel_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, uint32_t handle, uint32_t width, uint32_t height) { struct drm_device *dev = crtc->dev; struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); int pipe = psb_intel_crtc->pipe; uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR; uint32_t base = (pipe == 0) ? CURABASE : CURBBASE; uint32_t temp; size_t addr = 0; struct gtt_range *gt; struct drm_gem_object *obj; int ret; DRM_DEBUG("\n"); /* if we want to turn of the cursor ignore width and height */ if (!handle) { DRM_DEBUG("cursor off\n"); /* turn off the cursor */ temp = CURSOR_MODE_DISABLE; if (gma_power_begin(dev, false)) { REG_WRITE(control, temp); REG_WRITE(base, 0); gma_power_end(dev); } /* Unpin the old GEM object */ if (psb_intel_crtc->cursor_obj) { gt = container_of(psb_intel_crtc->cursor_obj, struct gtt_range, gem); psb_gtt_unpin(gt); drm_gem_object_unreference(psb_intel_crtc->cursor_obj); psb_intel_crtc->cursor_obj = NULL; } return 0; } /* Currently we only support 64x64 cursors */ if (width != 64 || height != 64) { DRM_ERROR("we currently only support 64x64 cursors\n"); return -EINVAL; } obj = drm_gem_object_lookup(dev, file_priv, handle); if (!obj) return -ENOENT; if (obj->size < width * height * 4) { DRM_ERROR("buffer is to small\n"); return -ENOMEM; } gt = container_of(obj, struct gtt_range, gem); /* Pin the memory into the GTT */ ret = psb_gtt_pin(gt); if (ret) { DRM_ERROR("Can not pin down handle 0x%x\n", handle); return ret; } addr = gt->offset; /* Or resource.start ??? */ psb_intel_crtc->cursor_addr = addr; temp = 0; /* set the pipe for the cursor */ temp |= (pipe << 28); temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE; if (gma_power_begin(dev, false)) { REG_WRITE(control, temp); REG_WRITE(base, addr); gma_power_end(dev); } /* unpin the old bo */ if (psb_intel_crtc->cursor_obj && psb_intel_crtc->cursor_obj != obj) { gt = container_of(psb_intel_crtc->cursor_obj, struct gtt_range, gem); psb_gtt_unpin(gt); drm_gem_object_unreference(psb_intel_crtc->cursor_obj); psb_intel_crtc->cursor_obj = obj; } return 0; } static int psb_intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) { struct drm_device *dev = crtc->dev; struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); int pipe = psb_intel_crtc->pipe; uint32_t temp = 0; uint32_t addr; if (x < 0) { temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT); x = -x; } if (y < 0) { temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT); y = -y; } temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT); temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT); addr = psb_intel_crtc->cursor_addr; if (gma_power_begin(dev, false)) { REG_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp); REG_WRITE((pipe == 0) ? CURABASE : CURBBASE, addr); gma_power_end(dev); } return 0; } static void psb_intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue, uint32_t type, uint32_t size) { struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); int i; if (size != 256) return; for (i = 0; i < 256; i++) { psb_intel_crtc->lut_r[i] = red[i] >> 8; psb_intel_crtc->lut_g[i] = green[i] >> 8; psb_intel_crtc->lut_b[i] = blue[i] >> 8; } psb_intel_crtc_load_lut(crtc); } static int psb_crtc_set_config(struct drm_mode_set *set) { int ret; struct drm_device *dev = set->crtc->dev; struct drm_psb_private *dev_priv = dev->dev_private; if (!dev_priv->rpm_enabled) return drm_crtc_helper_set_config(set); pm_runtime_forbid(&dev->pdev->dev); ret = drm_crtc_helper_set_config(set); pm_runtime_allow(&dev->pdev->dev); return ret; } /* Returns the clock of the currently programmed mode of the given pipe. */ static int psb_intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc) { struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); int pipe = psb_intel_crtc->pipe; u32 dpll; u32 fp; struct psb_intel_clock_t clock; bool is_lvds; struct drm_psb_private *dev_priv = dev->dev_private; if (gma_power_begin(dev, false)) { dpll = REG_READ((pipe == 0) ? DPLL_A : DPLL_B); if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) fp = REG_READ((pipe == 0) ? FPA0 : FPB0); else fp = REG_READ((pipe == 0) ? FPA1 : FPB1); is_lvds = (pipe == 1) && (REG_READ(LVDS) & LVDS_PORT_EN); gma_power_end(dev); } else { dpll = (pipe == 0) ? dev_priv->saveDPLL_A : dev_priv->saveDPLL_B; if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) fp = (pipe == 0) ? dev_priv->saveFPA0 : dev_priv->saveFPB0; else fp = (pipe == 0) ? dev_priv->saveFPA1 : dev_priv->saveFPB1; is_lvds = (pipe == 1) && (dev_priv->saveLVDS & LVDS_PORT_EN); } clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT; if (is_lvds) { clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >> DPLL_FPA01_P1_POST_DIV_SHIFT); clock.p2 = 14; if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN) { /* XXX: might not be 66MHz */ i8xx_clock(66000, &clock); } else i8xx_clock(48000, &clock); } else { if (dpll & PLL_P1_DIVIDE_BY_TWO) clock.p1 = 2; else { clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >> DPLL_FPA01_P1_POST_DIV_SHIFT) + 2; } if (dpll & PLL_P2_DIVIDE_BY_4) clock.p2 = 4; else clock.p2 = 2; i8xx_clock(48000, &clock); } /* XXX: It would be nice to validate the clocks, but we can't reuse * i830PllIsValid() because it relies on the xf86_config connector * configuration being accurate, which it isn't necessarily. */ return clock.dot; } /** Returns the currently programmed mode of the given pipe. */ struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev, struct drm_crtc *crtc) { struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); int pipe = psb_intel_crtc->pipe; struct drm_display_mode *mode; int htot; int hsync; int vtot; int vsync; struct drm_psb_private *dev_priv = dev->dev_private; if (gma_power_begin(dev, false)) { htot = REG_READ((pipe == 0) ? HTOTAL_A : HTOTAL_B); hsync = REG_READ((pipe == 0) ? HSYNC_A : HSYNC_B); vtot = REG_READ((pipe == 0) ? VTOTAL_A : VTOTAL_B); vsync = REG_READ((pipe == 0) ? VSYNC_A : VSYNC_B); gma_power_end(dev); } else { htot = (pipe == 0) ? dev_priv->saveHTOTAL_A : dev_priv->saveHTOTAL_B; hsync = (pipe == 0) ? dev_priv->saveHSYNC_A : dev_priv->saveHSYNC_B; vtot = (pipe == 0) ? dev_priv->saveVTOTAL_A : dev_priv->saveVTOTAL_B; vsync = (pipe == 0) ? dev_priv->saveVSYNC_A : dev_priv->saveVSYNC_B; } mode = kzalloc(sizeof(*mode), GFP_KERNEL); if (!mode) return NULL; mode->clock = psb_intel_crtc_clock_get(dev, crtc); mode->hdisplay = (htot & 0xffff) + 1; mode->htotal = ((htot & 0xffff0000) >> 16) + 1; mode->hsync_start = (hsync & 0xffff) + 1; mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1; mode->vdisplay = (vtot & 0xffff) + 1; mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1; mode->vsync_start = (vsync & 0xffff) + 1; mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1; drm_mode_set_name(mode); drm_mode_set_crtcinfo(mode, 0); return mode; } static void psb_intel_crtc_destroy(struct drm_crtc *crtc) { struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); struct gtt_range *gt; /* Unpin the old GEM object */ if (psb_intel_crtc->cursor_obj) { gt = container_of(psb_intel_crtc->cursor_obj, struct gtt_range, gem); psb_gtt_unpin(gt); drm_gem_object_unreference(psb_intel_crtc->cursor_obj); psb_intel_crtc->cursor_obj = NULL; } kfree(psb_intel_crtc->crtc_state); drm_crtc_cleanup(crtc); kfree(psb_intel_crtc); } static const struct drm_crtc_helper_funcs psb_intel_helper_funcs = { .dpms = psb_intel_crtc_dpms, .mode_fixup = psb_intel_crtc_mode_fixup, .mode_set = psb_intel_crtc_mode_set, .mode_set_base = psb_intel_pipe_set_base, .prepare = psb_intel_crtc_prepare, .commit = psb_intel_crtc_commit, }; const struct drm_crtc_funcs psb_intel_crtc_funcs = { .save = psb_intel_crtc_save, .restore = psb_intel_crtc_restore, .cursor_set = psb_intel_crtc_cursor_set, .cursor_move = psb_intel_crtc_cursor_move, .gamma_set = psb_intel_crtc_gamma_set, .set_config = psb_crtc_set_config, .destroy = psb_intel_crtc_destroy, }; void psb_intel_crtc_init(struct drm_device *dev, int pipe, struct psb_intel_mode_device *mode_dev) { struct drm_psb_private *dev_priv = dev->dev_private; struct psb_intel_crtc *psb_intel_crtc; int i; uint16_t *r_base, *g_base, *b_base; PSB_DEBUG_ENTRY("\n"); /* We allocate a extra array of drm_connector pointers * for fbdev after the crtc */ psb_intel_crtc = kzalloc(sizeof(struct psb_intel_crtc) + (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL); if (psb_intel_crtc == NULL) return; psb_intel_crtc->crtc_state = kzalloc(sizeof(struct psb_intel_crtc_state), GFP_KERNEL); if (!psb_intel_crtc->crtc_state) { DRM_INFO("Crtc state error: No memory\n"); kfree(psb_intel_crtc); return; } drm_crtc_init(dev, &psb_intel_crtc->base, &psb_intel_crtc_funcs); drm_mode_crtc_set_gamma_size(&psb_intel_crtc->base, 256); psb_intel_crtc->pipe = pipe; psb_intel_crtc->plane = pipe; r_base = psb_intel_crtc->base.gamma_store; g_base = r_base + 256; b_base = g_base + 256; for (i = 0; i < 256; i++) { psb_intel_crtc->lut_r[i] = i; psb_intel_crtc->lut_g[i] = i; psb_intel_crtc->lut_b[i] = i; r_base[i] = i << 8; g_base[i] = i << 8; b_base[i] = i << 8; psb_intel_crtc->lut_adj[i] = 0; } psb_intel_crtc->mode_dev = mode_dev; psb_intel_crtc->cursor_addr = 0; if (IS_MRST(dev)) drm_crtc_helper_add(&psb_intel_crtc->base, &mrst_helper_funcs); else drm_crtc_helper_add(&psb_intel_crtc->base, &psb_intel_helper_funcs); /* Setup the array of drm_connector pointer array */ psb_intel_crtc->mode_set.crtc = &psb_intel_crtc->base; BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) || dev_priv->plane_to_crtc_mapping[psb_intel_crtc->plane] != NULL); dev_priv->plane_to_crtc_mapping[psb_intel_crtc->plane] = &psb_intel_crtc->base; dev_priv->pipe_to_crtc_mapping[psb_intel_crtc->pipe] = &psb_intel_crtc->base; psb_intel_crtc->mode_set.connectors = (struct drm_connector **) (psb_intel_crtc + 1); psb_intel_crtc->mode_set.num_connectors = 0; } int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_psb_private *dev_priv = dev->dev_private; struct drm_psb_get_pipe_from_crtc_id_arg *pipe_from_crtc_id = data; struct drm_mode_object *drmmode_obj; struct psb_intel_crtc *crtc; if (!dev_priv) { DRM_ERROR("called with no initialization\n"); return -EINVAL; } drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id, DRM_MODE_OBJECT_CRTC); if (!drmmode_obj) { DRM_ERROR("no such CRTC id\n"); return -EINVAL; } crtc = to_psb_intel_crtc(obj_to_crtc(drmmode_obj)); pipe_from_crtc_id->pipe = crtc->pipe; return 0; } struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev, int pipe) { struct drm_crtc *crtc = NULL; list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); if (psb_intel_crtc->pipe == pipe) break; } return crtc; } int psb_intel_connector_clones(struct drm_device *dev, int type_mask) { int index_mask = 0; struct drm_connector *connector; int entry = 0; list_for_each_entry(connector, &dev->mode_config.connector_list, head) { struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector); if (type_mask & (1 << psb_intel_output->type)) index_mask |= (1 << entry); entry++; } return index_mask; } void psb_intel_modeset_cleanup(struct drm_device *dev) { drm_mode_config_cleanup(dev); } /* current intel driver doesn't take advantage of encoders always give back the encoder for the connector */ struct drm_encoder *psb_intel_best_encoder(struct drm_connector *connector) { struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector); return &psb_intel_output->enc; }
gpl-2.0
javelinanddart/kernel_samsung_msm8660
arch/mips/cavium-octeon/executive/octeon-model.c
2896
7952
/***********************license start*************** * Author: Cavium Networks * * Contact: support@caviumnetworks.com * This file is part of the OCTEON SDK * * Copyright (c) 2003-2008 Cavium Networks * * This file is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, Version 2, as * published by the Free Software Foundation. * * This file is distributed in the hope that it will be useful, but * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or * NONINFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this file; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * or visit http://www.gnu.org/licenses/. * * This file may also be available under a different license from Cavium. * Contact Cavium Networks for more information ***********************license end**************************************/ /* * File defining functions for working with different Octeon * models. */ #include <asm/octeon/octeon.h> /** * Given the chip processor ID from COP0, this function returns a * string representing the chip model number. The string is of the * form CNXXXXpX.X-FREQ-SUFFIX. * - XXXX = The chip model number * - X.X = Chip pass number * - FREQ = Current frequency in Mhz * - SUFFIX = NSP, EXP, SCP, SSP, or CP * * @chip_id: Chip ID * * Returns Model string */ const char *octeon_model_get_string(uint32_t chip_id) { static char buffer[32]; return octeon_model_get_string_buffer(chip_id, buffer); } /* * Version of octeon_model_get_string() that takes buffer as argument, * as running early in u-boot static/global variables don't work when * running from flash. */ const char *octeon_model_get_string_buffer(uint32_t chip_id, char *buffer) { const char *family; const char *core_model; char pass[4]; int clock_mhz; const char *suffix; union cvmx_l2d_fus3 fus3; int num_cores; union cvmx_mio_fus_dat2 fus_dat2; union cvmx_mio_fus_dat3 fus_dat3; char fuse_model[10]; uint32_t fuse_data = 0; fus3.u64 = cvmx_read_csr(CVMX_L2D_FUS3); fus_dat2.u64 = cvmx_read_csr(CVMX_MIO_FUS_DAT2); fus_dat3.u64 = cvmx_read_csr(CVMX_MIO_FUS_DAT3); num_cores = cvmx_octeon_num_cores(); /* Make sure the non existent devices look disabled */ switch ((chip_id >> 8) & 0xff) { case 6: /* CN50XX */ case 2: /* CN30XX */ fus_dat3.s.nodfa_dte = 1; fus_dat3.s.nozip = 1; break; case 4: /* CN57XX or CN56XX */ fus_dat3.s.nodfa_dte = 1; break; default: break; } /* Make a guess at the suffix */ /* NSP = everything */ /* EXP = No crypto */ /* SCP = No DFA, No zip */ /* CP = No DFA, No crypto, No zip */ if (fus_dat3.s.nodfa_dte) { if (fus_dat2.s.nocrypto) suffix = "CP"; else suffix = "SCP"; } else if (fus_dat2.s.nocrypto) suffix = "EXP"; else suffix = "NSP"; /* * Assume pass number is encoded using <5:3><2:0>. Exceptions * will be fixed later. */ sprintf(pass, "%u.%u", ((chip_id >> 3) & 7) + 1, chip_id & 7); /* * Use the number of cores to determine the last 2 digits of * the model number. There are some exceptions that are fixed * later. */ switch (num_cores) { case 16: core_model = "60"; break; case 15: core_model = "58"; break; case 14: core_model = "55"; break; case 13: core_model = "52"; break; case 12: core_model = "50"; break; case 11: core_model = "48"; break; case 10: core_model = "45"; break; case 9: core_model = "42"; break; case 8: core_model = "40"; break; case 7: core_model = "38"; break; case 6: core_model = "34"; break; case 5: core_model = "32"; break; case 4: core_model = "30"; break; case 3: core_model = "25"; break; case 2: core_model = "20"; break; case 1: core_model = "10"; break; default: core_model = "XX"; break; } /* Now figure out the family, the first two digits */ switch ((chip_id >> 8) & 0xff) { case 0: /* CN38XX, CN37XX or CN36XX */ if (fus3.cn38xx.crip_512k) { /* * For some unknown reason, the 16 core one is * called 37 instead of 36. */ if (num_cores >= 16) family = "37"; else family = "36"; } else family = "38"; /* * This series of chips didn't follow the standard * pass numbering. */ switch (chip_id & 0xf) { case 0: strcpy(pass, "1.X"); break; case 1: strcpy(pass, "2.X"); break; case 3: strcpy(pass, "3.X"); break; default: strcpy(pass, "X.X"); break; } break; case 1: /* CN31XX or CN3020 */ if ((chip_id & 0x10) || fus3.cn31xx.crip_128k) family = "30"; else family = "31"; /* * This series of chips didn't follow the standard * pass numbering. */ switch (chip_id & 0xf) { case 0: strcpy(pass, "1.0"); break; case 2: strcpy(pass, "1.1"); break; default: strcpy(pass, "X.X"); break; } break; case 2: /* CN3010 or CN3005 */ family = "30"; /* A chip with half cache is an 05 */ if (fus3.cn30xx.crip_64k) core_model = "05"; /* * This series of chips didn't follow the standard * pass numbering. */ switch (chip_id & 0xf) { case 0: strcpy(pass, "1.0"); break; case 2: strcpy(pass, "1.1"); break; default: strcpy(pass, "X.X"); break; } break; case 3: /* CN58XX */ family = "58"; /* Special case. 4 core, no crypto */ if ((num_cores == 4) && fus_dat2.cn38xx.nocrypto) core_model = "29"; /* Pass 1 uses different encodings for pass numbers */ if ((chip_id & 0xFF) < 0x8) { switch (chip_id & 0x3) { case 0: strcpy(pass, "1.0"); break; case 1: strcpy(pass, "1.1"); break; case 3: strcpy(pass, "1.2"); break; default: strcpy(pass, "1.X"); break; } } break; case 4: /* CN57XX, CN56XX, CN55XX, CN54XX */ if (fus_dat2.cn56xx.raid_en) { if (fus3.cn56xx.crip_1024k) family = "55"; else family = "57"; if (fus_dat2.cn56xx.nocrypto) suffix = "SP"; else suffix = "SSP"; } else { if (fus_dat2.cn56xx.nocrypto) suffix = "CP"; else { suffix = "NSP"; if (fus_dat3.s.nozip) suffix = "SCP"; } if (fus3.cn56xx.crip_1024k) family = "54"; else family = "56"; } break; case 6: /* CN50XX */ family = "50"; break; case 7: /* CN52XX */ if (fus3.cn52xx.crip_256k) family = "51"; else family = "52"; break; default: family = "XX"; core_model = "XX"; strcpy(pass, "X.X"); suffix = "XXX"; break; } clock_mhz = octeon_get_clock_rate() / 1000000; if (family[0] != '3') { /* Check for model in fuses, overrides normal decode */ /* This is _not_ valid for Octeon CN3XXX models */ fuse_data |= cvmx_fuse_read_byte(51); fuse_data = fuse_data << 8; fuse_data |= cvmx_fuse_read_byte(50); fuse_data = fuse_data << 8; fuse_data |= cvmx_fuse_read_byte(49); fuse_data = fuse_data << 8; fuse_data |= cvmx_fuse_read_byte(48); if (fuse_data & 0x7ffff) { int model = fuse_data & 0x3fff; int suffix = (fuse_data >> 14) & 0x1f; if (suffix && model) { /* * Have both number and suffix in * fuses, so both */ sprintf(fuse_model, "%d%c", model, 'A' + suffix - 1); core_model = ""; family = fuse_model; } else if (suffix && !model) { /* * Only have suffix, so add suffix to * 'normal' model number. */ sprintf(fuse_model, "%s%c", core_model, 'A' + suffix - 1); core_model = fuse_model; } else { /* * Don't have suffix, so just use * model from fuses. */ sprintf(fuse_model, "%d", model); core_model = ""; family = fuse_model; } } } sprintf(buffer, "CN%s%sp%s-%d-%s", family, core_model, pass, clock_mhz, suffix); return buffer; }
gpl-2.0
TEAM-RAZOR-DEVICES/kernel_oneplus_msm8994
drivers/clk/tegra/clk-audio-sync.c
4176
2309
/* * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/clk-provider.h> #include <linux/slab.h> #include <linux/err.h> #include "clk.h" static unsigned long clk_sync_source_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { struct tegra_clk_sync_source *sync = to_clk_sync_source(hw); return sync->rate; } static long clk_sync_source_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *prate) { struct tegra_clk_sync_source *sync = to_clk_sync_source(hw); if (rate > sync->max_rate) return -EINVAL; else return rate; } static int clk_sync_source_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { struct tegra_clk_sync_source *sync = to_clk_sync_source(hw); sync->rate = rate; return 0; } const struct clk_ops tegra_clk_sync_source_ops = { .round_rate = clk_sync_source_round_rate, .set_rate = clk_sync_source_set_rate, .recalc_rate = clk_sync_source_recalc_rate, }; struct clk *tegra_clk_register_sync_source(const char *name, unsigned long rate, unsigned long max_rate) { struct tegra_clk_sync_source *sync; struct clk_init_data init; struct clk *clk; sync = kzalloc(sizeof(*sync), GFP_KERNEL); if (!sync) { pr_err("%s: could not allocate sync source clk\n", __func__); return ERR_PTR(-ENOMEM); } sync->rate = rate; sync->max_rate = max_rate; init.ops = &tegra_clk_sync_source_ops; init.name = name; init.flags = CLK_IS_ROOT; init.parent_names = NULL; init.num_parents = 0; /* Data in .init is copied by clk_register(), so stack variable OK */ sync->hw.init = &init; clk = clk_register(NULL, &sync->hw); if (IS_ERR(clk)) kfree(sync); return clk; }
gpl-2.0
gearslam/JB_LS970ZVC_Viper
sound/soc/codecs/lm4857.c
4944
6396
/* * LM4857 AMP driver * * Copyright 2007 Wolfson Microelectronics PLC. * Author: Graeme Gregory * graeme.gregory@wolfsonmicro.com * Copyright 2011 Lars-Peter Clausen <lars@metafoo.de> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/init.h> #include <linux/module.h> #include <linux/i2c.h> #include <linux/slab.h> #include <sound/core.h> #include <sound/soc.h> #include <sound/tlv.h> struct lm4857 { struct i2c_client *i2c; uint8_t mode; }; static const uint8_t lm4857_default_regs[] = { 0x00, 0x00, 0x00, 0x00, }; /* The register offsets in the cache array */ #define LM4857_MVOL 0 #define LM4857_LVOL 1 #define LM4857_RVOL 2 #define LM4857_CTRL 3 /* the shifts required to set these bits */ #define LM4857_3D 5 #define LM4857_WAKEUP 5 #define LM4857_EPGAIN 4 static int lm4857_write(struct snd_soc_codec *codec, unsigned int reg, unsigned int value) { uint8_t data; int ret; ret = snd_soc_cache_write(codec, reg, value); if (ret < 0) return ret; data = (reg << 6) | value; ret = i2c_master_send(codec->control_data, &data, 1); if (ret != 1) { dev_err(codec->dev, "Failed to write register: %d\n", ret); return ret; } return 0; } static unsigned int lm4857_read(struct snd_soc_codec *codec, unsigned int reg) { unsigned int val; int ret; ret = snd_soc_cache_read(codec, reg, &val); if (ret) return -1; return val; } static int lm4857_get_mode(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); struct lm4857 *lm4857 = snd_soc_codec_get_drvdata(codec); ucontrol->value.integer.value[0] = lm4857->mode; return 0; } static int lm4857_set_mode(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); struct lm4857 *lm4857 = snd_soc_codec_get_drvdata(codec); uint8_t value = ucontrol->value.integer.value[0]; lm4857->mode = value; if (codec->dapm.bias_level == SND_SOC_BIAS_ON) snd_soc_update_bits(codec, LM4857_CTRL, 0x0F, value + 6); return 1; } static int lm4857_set_bias_level(struct snd_soc_codec *codec, enum snd_soc_bias_level level) { struct lm4857 *lm4857 = snd_soc_codec_get_drvdata(codec); switch (level) { case SND_SOC_BIAS_ON: snd_soc_update_bits(codec, LM4857_CTRL, 0x0F, lm4857->mode + 6); break; case SND_SOC_BIAS_STANDBY: snd_soc_update_bits(codec, LM4857_CTRL, 0x0F, 0); break; default: break; } codec->dapm.bias_level = level; return 0; } static const char *lm4857_mode[] = { "Earpiece", "Loudspeaker", "Loudspeaker + Headphone", "Headphone", }; static const struct soc_enum lm4857_mode_enum = SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(lm4857_mode), lm4857_mode); static const struct snd_soc_dapm_widget lm4857_dapm_widgets[] = { SND_SOC_DAPM_INPUT("IN"), SND_SOC_DAPM_OUTPUT("LS"), SND_SOC_DAPM_OUTPUT("HP"), SND_SOC_DAPM_OUTPUT("EP"), }; static const DECLARE_TLV_DB_SCALE(stereo_tlv, -4050, 150, 0); static const DECLARE_TLV_DB_SCALE(mono_tlv, -3450, 150, 0); static const struct snd_kcontrol_new lm4857_controls[] = { SOC_SINGLE_TLV("Left Playback Volume", LM4857_LVOL, 0, 31, 0, stereo_tlv), SOC_SINGLE_TLV("Right Playback Volume", LM4857_RVOL, 0, 31, 0, stereo_tlv), SOC_SINGLE_TLV("Mono Playback Volume", LM4857_MVOL, 0, 31, 0, mono_tlv), SOC_SINGLE("Spk 3D Playback Switch", LM4857_LVOL, LM4857_3D, 1, 0), SOC_SINGLE("HP 3D Playback Switch", LM4857_RVOL, LM4857_3D, 1, 0), SOC_SINGLE("Fast Wakeup Playback Switch", LM4857_CTRL, LM4857_WAKEUP, 1, 0), SOC_SINGLE("Earpiece 6dB Playback Switch", LM4857_CTRL, LM4857_EPGAIN, 1, 0), SOC_ENUM_EXT("Mode", lm4857_mode_enum, lm4857_get_mode, lm4857_set_mode), }; /* There is a demux between the input signal and the output signals. * Currently there is no easy way to model it in ASoC and since it does not make * much of a difference in practice simply connect the input direclty to the * outputs. */ static const struct snd_soc_dapm_route lm4857_routes[] = { {"LS", NULL, "IN"}, {"HP", NULL, "IN"}, {"EP", NULL, "IN"}, }; static int lm4857_probe(struct snd_soc_codec *codec) { struct lm4857 *lm4857 = snd_soc_codec_get_drvdata(codec); struct snd_soc_dapm_context *dapm = &codec->dapm; int ret; codec->control_data = lm4857->i2c; ret = snd_soc_add_codec_controls(codec, lm4857_controls, ARRAY_SIZE(lm4857_controls)); if (ret) return ret; ret = snd_soc_dapm_new_controls(dapm, lm4857_dapm_widgets, ARRAY_SIZE(lm4857_dapm_widgets)); if (ret) return ret; ret = snd_soc_dapm_add_routes(dapm, lm4857_routes, ARRAY_SIZE(lm4857_routes)); if (ret) return ret; snd_soc_dapm_new_widgets(dapm); return 0; } static struct snd_soc_codec_driver soc_codec_dev_lm4857 = { .write = lm4857_write, .read = lm4857_read, .probe = lm4857_probe, .reg_cache_size = ARRAY_SIZE(lm4857_default_regs), .reg_word_size = sizeof(uint8_t), .reg_cache_default = lm4857_default_regs, .set_bias_level = lm4857_set_bias_level, }; static int __devinit lm4857_i2c_probe(struct i2c_client *i2c, const struct i2c_device_id *id) { struct lm4857 *lm4857; int ret; lm4857 = devm_kzalloc(&i2c->dev, sizeof(*lm4857), GFP_KERNEL); if (!lm4857) return -ENOMEM; i2c_set_clientdata(i2c, lm4857); lm4857->i2c = i2c; ret = snd_soc_register_codec(&i2c->dev, &soc_codec_dev_lm4857, NULL, 0); return ret; } static int __devexit lm4857_i2c_remove(struct i2c_client *i2c) { snd_soc_unregister_codec(&i2c->dev); return 0; } static const struct i2c_device_id lm4857_i2c_id[] = { { "lm4857", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, lm4857_i2c_id); static struct i2c_driver lm4857_i2c_driver = { .driver = { .name = "lm4857", .owner = THIS_MODULE, }, .probe = lm4857_i2c_probe, .remove = __devexit_p(lm4857_i2c_remove), .id_table = lm4857_i2c_id, }; static int __init lm4857_init(void) { return i2c_add_driver(&lm4857_i2c_driver); } module_init(lm4857_init); static void __exit lm4857_exit(void) { i2c_del_driver(&lm4857_i2c_driver); } module_exit(lm4857_exit); MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>"); MODULE_DESCRIPTION("LM4857 amplifier driver"); MODULE_LICENSE("GPL");
gpl-2.0
invisiblek/caf_kernel_msm
arch/mips/mipssim/sim_setup.c
5200
2178
/* * Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved. * * This program is free software; you can distribute it and/or modify it * under the terms of the GNU General Public License (Version 2) as * published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. * */ #include <linux/init.h> #include <linux/string.h> #include <linux/kernel.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/ioport.h> #include <linux/tty.h> #include <linux/serial.h> #include <linux/serial_core.h> #include <linux/serial_8250.h> #include <asm/cpu.h> #include <asm/bootinfo.h> #include <asm/mips-boards/generic.h> #include <asm/mips-boards/prom.h> #include <asm/time.h> #include <asm/mips-boards/sim.h> #include <asm/mips-boards/simint.h> #include <asm/smp-ops.h> static void __init serial_init(void); unsigned int _isbonito; const char *get_system_type(void) { return "MIPSsim"; } void __init plat_mem_setup(void) { set_io_port_base(0xbfd00000); serial_init(); } extern struct plat_smp_ops ssmtc_smp_ops; void __init prom_init(void) { set_io_port_base(0xbfd00000); prom_meminit(); if (cpu_has_mipsmt) { if (!register_vsmp_smp_ops()) return; #ifdef CONFIG_MIPS_MT_SMTC register_smp_ops(&ssmtc_smp_ops); return; #endif } register_up_smp_ops(); } static void __init serial_init(void) { #ifdef CONFIG_SERIAL_8250 struct uart_port s; memset(&s, 0, sizeof(s)); s.iobase = 0x3f8; /* hardware int 4 - the serial int, is CPU int 6 but poll for now */ s.irq = 0; s.uartclk = 1843200; s.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST; s.iotype = UPIO_PORT; s.regshift = 0; s.timeout = 4; if (early_serial_setup(&s) != 0) { printk(KERN_ERR "Serial setup failed!\n"); } #endif }
gpl-2.0
justindriggers/android_kernel_glass_glass-1
arch/powerpc/platforms/ps3/htab.c
7760
5397
/* * PS3 pagetable management routines. * * Copyright (C) 2006 Sony Computer Entertainment Inc. * Copyright 2006, 2007 Sony Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/kernel.h> #include <linux/memblock.h> #include <asm/machdep.h> #include <asm/prom.h> #include <asm/udbg.h> #include <asm/lv1call.h> #include <asm/ps3fb.h> #include "platform.h" /** * enum lpar_vas_id - id of LPAR virtual address space. * @lpar_vas_id_current: Current selected virtual address space * * Identify the target LPAR address space. */ enum ps3_lpar_vas_id { PS3_LPAR_VAS_ID_CURRENT = 0, }; static DEFINE_SPINLOCK(ps3_htab_lock); static long ps3_hpte_insert(unsigned long hpte_group, unsigned long va, unsigned long pa, unsigned long rflags, unsigned long vflags, int psize, int ssize) { int result; u64 hpte_v, hpte_r; u64 inserted_index; u64 evicted_v, evicted_r; u64 hpte_v_array[4], hpte_rs; unsigned long flags; long ret = -1; /* * lv1_insert_htab_entry() will search for victim * entry in both primary and secondary pte group */ vflags &= ~HPTE_V_SECONDARY; hpte_v = hpte_encode_v(va, psize, ssize) | vflags | HPTE_V_VALID; hpte_r = hpte_encode_r(ps3_mm_phys_to_lpar(pa), psize) | rflags; spin_lock_irqsave(&ps3_htab_lock, flags); /* talk hvc to replace entries BOLTED == 0 */ result = lv1_insert_htab_entry(PS3_LPAR_VAS_ID_CURRENT, hpte_group, hpte_v, hpte_r, HPTE_V_BOLTED, 0, &inserted_index, &evicted_v, &evicted_r); if (result) { /* all entries bolted !*/ pr_info("%s:result=%d va=%lx pa=%lx ix=%lx v=%llx r=%llx\n", __func__, result, va, pa, hpte_group, hpte_v, hpte_r); BUG(); } /* * see if the entry is inserted into secondary pteg */ result = lv1_read_htab_entries(PS3_LPAR_VAS_ID_CURRENT, inserted_index & ~0x3UL, &hpte_v_array[0], &hpte_v_array[1], &hpte_v_array[2], &hpte_v_array[3], &hpte_rs); BUG_ON(result); if (hpte_v_array[inserted_index % 4] & HPTE_V_SECONDARY) ret = (inserted_index & 7) | (1 << 3); else ret = inserted_index & 7; spin_unlock_irqrestore(&ps3_htab_lock, flags); return ret; } static long ps3_hpte_remove(unsigned long hpte_group) { panic("ps3_hpte_remove() not implemented"); return 0; } static long ps3_hpte_updatepp(unsigned long slot, unsigned long newpp, unsigned long va, int psize, int ssize, int local) { int result; u64 hpte_v, want_v, hpte_rs; u64 hpte_v_array[4]; unsigned long flags; long ret; want_v = hpte_encode_v(va, psize, ssize); spin_lock_irqsave(&ps3_htab_lock, flags); result = lv1_read_htab_entries(PS3_LPAR_VAS_ID_CURRENT, slot & ~0x3UL, &hpte_v_array[0], &hpte_v_array[1], &hpte_v_array[2], &hpte_v_array[3], &hpte_rs); if (result) { pr_info("%s: res=%d read va=%lx slot=%lx psize=%d\n", __func__, result, va, slot, psize); BUG(); } hpte_v = hpte_v_array[slot % 4]; /* * As lv1_read_htab_entries() does not give us the RPN, we can * not synthesize the new hpte_r value here, and therefore can * not update the hpte with lv1_insert_htab_entry(), so we * instead invalidate it and ask the caller to update it via * ps3_hpte_insert() by returning a -1 value. */ if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) { /* not found */ ret = -1; } else { /* entry found, just invalidate it */ result = lv1_write_htab_entry(PS3_LPAR_VAS_ID_CURRENT, slot, 0, 0); ret = -1; } spin_unlock_irqrestore(&ps3_htab_lock, flags); return ret; } static void ps3_hpte_updateboltedpp(unsigned long newpp, unsigned long ea, int psize, int ssize) { panic("ps3_hpte_updateboltedpp() not implemented"); } static void ps3_hpte_invalidate(unsigned long slot, unsigned long va, int psize, int ssize, int local) { unsigned long flags; int result; spin_lock_irqsave(&ps3_htab_lock, flags); result = lv1_write_htab_entry(PS3_LPAR_VAS_ID_CURRENT, slot, 0, 0); if (result) { pr_info("%s: res=%d va=%lx slot=%lx psize=%d\n", __func__, result, va, slot, psize); BUG(); } spin_unlock_irqrestore(&ps3_htab_lock, flags); } static void ps3_hpte_clear(void) { unsigned long hpte_count = (1UL << ppc64_pft_size) >> 4; u64 i; for (i = 0; i < hpte_count; i++) lv1_write_htab_entry(PS3_LPAR_VAS_ID_CURRENT, i, 0, 0); ps3_mm_shutdown(); ps3_mm_vas_destroy(); } void __init ps3_hpte_init(unsigned long htab_size) { ppc_md.hpte_invalidate = ps3_hpte_invalidate; ppc_md.hpte_updatepp = ps3_hpte_updatepp; ppc_md.hpte_updateboltedpp = ps3_hpte_updateboltedpp; ppc_md.hpte_insert = ps3_hpte_insert; ppc_md.hpte_remove = ps3_hpte_remove; ppc_md.hpte_clear_all = ps3_hpte_clear; ppc64_pft_size = __ilog2(htab_size); }
gpl-2.0
ardX/android_kernel_k-touch_msm8x25q
net/atm/resources.c
8272
10119
/* net/atm/resources.c - Statically allocated resources */ /* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */ /* Fixes * Arnaldo Carvalho de Melo <acme@conectiva.com.br> * 2002/01 - don't free the whole struct sock on sk->destruct time, * use the default destruct function initialized by sock_init_data */ #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ #include <linux/ctype.h> #include <linux/string.h> #include <linux/atmdev.h> #include <linux/sonet.h> #include <linux/kernel.h> /* for barrier */ #include <linux/module.h> #include <linux/bitops.h> #include <linux/capability.h> #include <linux/delay.h> #include <linux/mutex.h> #include <linux/slab.h> #include <net/sock.h> /* for struct sock */ #include "common.h" #include "resources.h" #include "addr.h" LIST_HEAD(atm_devs); DEFINE_MUTEX(atm_dev_mutex); static struct atm_dev *__alloc_atm_dev(const char *type) { struct atm_dev *dev; dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) return NULL; dev->type = type; dev->signal = ATM_PHY_SIG_UNKNOWN; dev->link_rate = ATM_OC3_PCR; spin_lock_init(&dev->lock); INIT_LIST_HEAD(&dev->local); INIT_LIST_HEAD(&dev->lecs); return dev; } static struct atm_dev *__atm_dev_lookup(int number) { struct atm_dev *dev; struct list_head *p; list_for_each(p, &atm_devs) { dev = list_entry(p, struct atm_dev, dev_list); if (dev->number == number) { atm_dev_hold(dev); return dev; } } return NULL; } struct atm_dev *atm_dev_lookup(int number) { struct atm_dev *dev; mutex_lock(&atm_dev_mutex); dev = __atm_dev_lookup(number); mutex_unlock(&atm_dev_mutex); return dev; } EXPORT_SYMBOL(atm_dev_lookup); struct atm_dev *atm_dev_register(const char *type, struct device *parent, const struct atmdev_ops *ops, int number, unsigned long *flags) { struct atm_dev *dev, *inuse; dev = __alloc_atm_dev(type); if (!dev) { pr_err("no space for dev %s\n", type); return NULL; } mutex_lock(&atm_dev_mutex); if (number != -1) { inuse = __atm_dev_lookup(number); if (inuse) { atm_dev_put(inuse); mutex_unlock(&atm_dev_mutex); kfree(dev); return NULL; } dev->number = number; } else { dev->number = 0; while ((inuse = __atm_dev_lookup(dev->number))) { atm_dev_put(inuse); dev->number++; } } dev->ops = ops; if (flags) dev->flags = *flags; else memset(&dev->flags, 0, sizeof(dev->flags)); memset(&dev->stats, 0, sizeof(dev->stats)); atomic_set(&dev->refcnt, 1); if (atm_proc_dev_register(dev) < 0) { pr_err("atm_proc_dev_register failed for dev %s\n", type); goto out_fail; } if (atm_register_sysfs(dev, parent) < 0) { pr_err("atm_register_sysfs failed for dev %s\n", type); atm_proc_dev_deregister(dev); goto out_fail; } list_add_tail(&dev->dev_list, &atm_devs); out: mutex_unlock(&atm_dev_mutex); return dev; out_fail: kfree(dev); dev = NULL; goto out; } EXPORT_SYMBOL(atm_dev_register); void atm_dev_deregister(struct atm_dev *dev) { BUG_ON(test_bit(ATM_DF_REMOVED, &dev->flags)); set_bit(ATM_DF_REMOVED, &dev->flags); /* * if we remove current device from atm_devs list, new device * with same number can appear, such we need deregister proc, * release async all vccs and remove them from vccs list too */ mutex_lock(&atm_dev_mutex); list_del(&dev->dev_list); mutex_unlock(&atm_dev_mutex); atm_dev_release_vccs(dev); atm_unregister_sysfs(dev); atm_proc_dev_deregister(dev); atm_dev_put(dev); } EXPORT_SYMBOL(atm_dev_deregister); static void copy_aal_stats(struct k_atm_aal_stats *from, struct atm_aal_stats *to) { #define __HANDLE_ITEM(i) to->i = atomic_read(&from->i) __AAL_STAT_ITEMS #undef __HANDLE_ITEM } static void subtract_aal_stats(struct k_atm_aal_stats *from, struct atm_aal_stats *to) { #define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i) __AAL_STAT_ITEMS #undef __HANDLE_ITEM } static int fetch_stats(struct atm_dev *dev, struct atm_dev_stats __user *arg, int zero) { struct atm_dev_stats tmp; int error = 0; copy_aal_stats(&dev->stats.aal0, &tmp.aal0); copy_aal_stats(&dev->stats.aal34, &tmp.aal34); copy_aal_stats(&dev->stats.aal5, &tmp.aal5); if (arg) error = copy_to_user(arg, &tmp, sizeof(tmp)); if (zero && !error) { subtract_aal_stats(&dev->stats.aal0, &tmp.aal0); subtract_aal_stats(&dev->stats.aal34, &tmp.aal34); subtract_aal_stats(&dev->stats.aal5, &tmp.aal5); } return error ? -EFAULT : 0; } int atm_dev_ioctl(unsigned int cmd, void __user *arg, int compat) { void __user *buf; int error, len, number, size = 0; struct atm_dev *dev; struct list_head *p; int *tmp_buf, *tmp_p; int __user *sioc_len; int __user *iobuf_len; #ifndef CONFIG_COMPAT compat = 0; /* Just so the compiler _knows_ */ #endif switch (cmd) { case ATM_GETNAMES: if (compat) { #ifdef CONFIG_COMPAT struct compat_atm_iobuf __user *ciobuf = arg; compat_uptr_t cbuf; iobuf_len = &ciobuf->length; if (get_user(cbuf, &ciobuf->buffer)) return -EFAULT; buf = compat_ptr(cbuf); #endif } else { struct atm_iobuf __user *iobuf = arg; iobuf_len = &iobuf->length; if (get_user(buf, &iobuf->buffer)) return -EFAULT; } if (get_user(len, iobuf_len)) return -EFAULT; mutex_lock(&atm_dev_mutex); list_for_each(p, &atm_devs) size += sizeof(int); if (size > len) { mutex_unlock(&atm_dev_mutex); return -E2BIG; } tmp_buf = kmalloc(size, GFP_ATOMIC); if (!tmp_buf) { mutex_unlock(&atm_dev_mutex); return -ENOMEM; } tmp_p = tmp_buf; list_for_each(p, &atm_devs) { dev = list_entry(p, struct atm_dev, dev_list); *tmp_p++ = dev->number; } mutex_unlock(&atm_dev_mutex); error = ((copy_to_user(buf, tmp_buf, size)) || put_user(size, iobuf_len)) ? -EFAULT : 0; kfree(tmp_buf); return error; default: break; } if (compat) { #ifdef CONFIG_COMPAT struct compat_atmif_sioc __user *csioc = arg; compat_uptr_t carg; sioc_len = &csioc->length; if (get_user(carg, &csioc->arg)) return -EFAULT; buf = compat_ptr(carg); if (get_user(len, &csioc->length)) return -EFAULT; if (get_user(number, &csioc->number)) return -EFAULT; #endif } else { struct atmif_sioc __user *sioc = arg; sioc_len = &sioc->length; if (get_user(buf, &sioc->arg)) return -EFAULT; if (get_user(len, &sioc->length)) return -EFAULT; if (get_user(number, &sioc->number)) return -EFAULT; } dev = try_then_request_module(atm_dev_lookup(number), "atm-device-%d", number); if (!dev) return -ENODEV; switch (cmd) { case ATM_GETTYPE: size = strlen(dev->type) + 1; if (copy_to_user(buf, dev->type, size)) { error = -EFAULT; goto done; } break; case ATM_GETESI: size = ESI_LEN; if (copy_to_user(buf, dev->esi, size)) { error = -EFAULT; goto done; } break; case ATM_SETESI: { int i; for (i = 0; i < ESI_LEN; i++) if (dev->esi[i]) { error = -EEXIST; goto done; } } /* fall through */ case ATM_SETESIF: { unsigned char esi[ESI_LEN]; if (!capable(CAP_NET_ADMIN)) { error = -EPERM; goto done; } if (copy_from_user(esi, buf, ESI_LEN)) { error = -EFAULT; goto done; } memcpy(dev->esi, esi, ESI_LEN); error = ESI_LEN; goto done; } case ATM_GETSTATZ: if (!capable(CAP_NET_ADMIN)) { error = -EPERM; goto done; } /* fall through */ case ATM_GETSTAT: size = sizeof(struct atm_dev_stats); error = fetch_stats(dev, buf, cmd == ATM_GETSTATZ); if (error) goto done; break; case ATM_GETCIRANGE: size = sizeof(struct atm_cirange); if (copy_to_user(buf, &dev->ci_range, size)) { error = -EFAULT; goto done; } break; case ATM_GETLINKRATE: size = sizeof(int); if (copy_to_user(buf, &dev->link_rate, size)) { error = -EFAULT; goto done; } break; case ATM_RSTADDR: if (!capable(CAP_NET_ADMIN)) { error = -EPERM; goto done; } atm_reset_addr(dev, ATM_ADDR_LOCAL); break; case ATM_ADDADDR: case ATM_DELADDR: case ATM_ADDLECSADDR: case ATM_DELLECSADDR: { struct sockaddr_atmsvc addr; if (!capable(CAP_NET_ADMIN)) { error = -EPERM; goto done; } if (copy_from_user(&addr, buf, sizeof(addr))) { error = -EFAULT; goto done; } if (cmd == ATM_ADDADDR || cmd == ATM_ADDLECSADDR) error = atm_add_addr(dev, &addr, (cmd == ATM_ADDADDR ? ATM_ADDR_LOCAL : ATM_ADDR_LECS)); else error = atm_del_addr(dev, &addr, (cmd == ATM_DELADDR ? ATM_ADDR_LOCAL : ATM_ADDR_LECS)); goto done; } case ATM_GETADDR: case ATM_GETLECSADDR: error = atm_get_addr(dev, buf, len, (cmd == ATM_GETADDR ? ATM_ADDR_LOCAL : ATM_ADDR_LECS)); if (error < 0) goto done; size = error; /* may return 0, but later on size == 0 means "don't write the length" */ error = put_user(size, sioc_len) ? -EFAULT : 0; goto done; case ATM_SETLOOP: if (__ATM_LM_XTRMT((int) (unsigned long) buf) && __ATM_LM_XTLOC((int) (unsigned long) buf) > __ATM_LM_XTRMT((int) (unsigned long) buf)) { error = -EINVAL; goto done; } /* fall through */ case ATM_SETCIRANGE: case SONET_GETSTATZ: case SONET_SETDIAG: case SONET_CLRDIAG: case SONET_SETFRAMING: if (!capable(CAP_NET_ADMIN)) { error = -EPERM; goto done; } /* fall through */ default: if (compat) { #ifdef CONFIG_COMPAT if (!dev->ops->compat_ioctl) { error = -EINVAL; goto done; } size = dev->ops->compat_ioctl(dev, cmd, buf); #endif } else { if (!dev->ops->ioctl) { error = -EINVAL; goto done; } size = dev->ops->ioctl(dev, cmd, buf); } if (size < 0) { error = (size == -ENOIOCTLCMD ? -EINVAL : size); goto done; } } if (size) error = put_user(size, sioc_len) ? -EFAULT : 0; else error = 0; done: atm_dev_put(dev); return error; } void *atm_dev_seq_start(struct seq_file *seq, loff_t *pos) { mutex_lock(&atm_dev_mutex); return seq_list_start_head(&atm_devs, *pos); } void atm_dev_seq_stop(struct seq_file *seq, void *v) { mutex_unlock(&atm_dev_mutex); } void *atm_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos) { return seq_list_next(v, &atm_devs, pos); }
gpl-2.0
penhoi/linux-3.13.11.lbrpmu
drivers/net/wireless/ath/ath9k/mci.c
81
21423
/* * Copyright (c) 2010-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <linux/dma-mapping.h> #include <linux/slab.h> #include "ath9k.h" #include "mci.h" static const u8 ath_mci_duty_cycle[] = { 55, 50, 60, 70, 80, 85, 90, 95, 98 }; static struct ath_mci_profile_info* ath_mci_find_profile(struct ath_mci_profile *mci, struct ath_mci_profile_info *info) { struct ath_mci_profile_info *entry; if (list_empty(&mci->info)) return NULL; list_for_each_entry(entry, &mci->info, list) { if (entry->conn_handle == info->conn_handle) return entry; } return NULL; } static bool ath_mci_add_profile(struct ath_common *common, struct ath_mci_profile *mci, struct ath_mci_profile_info *info) { struct ath_mci_profile_info *entry; u8 voice_priority[] = { 110, 110, 110, 112, 110, 110, 114, 116, 118 }; if ((mci->num_sco == ATH_MCI_MAX_SCO_PROFILE) && (info->type == MCI_GPM_COEX_PROFILE_VOICE)) return false; if (((NUM_PROF(mci) - mci->num_sco) == ATH_MCI_MAX_ACL_PROFILE) && (info->type != MCI_GPM_COEX_PROFILE_VOICE)) return false; entry = kzalloc(sizeof(*entry), GFP_ATOMIC); if (!entry) return false; memcpy(entry, info, 10); INC_PROF(mci, info); list_add_tail(&entry->list, &mci->info); if (info->type == MCI_GPM_COEX_PROFILE_VOICE) { if (info->voice_type < sizeof(voice_priority)) mci->voice_priority = voice_priority[info->voice_type]; else mci->voice_priority = 110; } return true; } static void ath_mci_del_profile(struct ath_common *common, struct ath_mci_profile *mci, struct ath_mci_profile_info *entry) { if (!entry) return; DEC_PROF(mci, entry); list_del(&entry->list); kfree(entry); } void ath_mci_flush_profile(struct ath_mci_profile *mci) { struct ath_mci_profile_info *info, *tinfo; mci->aggr_limit = 0; mci->num_mgmt = 0; if (list_empty(&mci->info)) return; list_for_each_entry_safe(info, tinfo, &mci->info, list) { list_del(&info->list); DEC_PROF(mci, info); kfree(info); } } static void ath_mci_adjust_aggr_limit(struct ath_btcoex *btcoex) { struct ath_mci_profile *mci = &btcoex->mci; u32 wlan_airtime = btcoex->btcoex_period * (100 - btcoex->duty_cycle) / 100; /* * Scale: wlan_airtime is in ms, aggr_limit is in 0.25 ms. * When wlan_airtime is less than 4ms, aggregation limit has to be * adjusted half of wlan_airtime to ensure that the aggregation can fit * without collision with BT traffic. */ if ((wlan_airtime <= 4) && (!mci->aggr_limit || (mci->aggr_limit > (2 * wlan_airtime)))) mci->aggr_limit = 2 * wlan_airtime; } static void ath_mci_update_scheme(struct ath_softc *sc) { struct ath_common *common = ath9k_hw_common(sc->sc_ah); struct ath_btcoex *btcoex = &sc->btcoex; struct ath_mci_profile *mci = &btcoex->mci; struct ath9k_hw_mci *mci_hw = &sc->sc_ah->btcoex_hw.mci; struct ath_mci_profile_info *info; u32 num_profile = NUM_PROF(mci); if (mci_hw->config & ATH_MCI_CONFIG_DISABLE_TUNING) goto skip_tuning; mci->aggr_limit = 0; btcoex->duty_cycle = ath_mci_duty_cycle[num_profile]; btcoex->btcoex_period = ATH_MCI_DEF_BT_PERIOD; if (NUM_PROF(mci)) btcoex->bt_stomp_type = ATH_BTCOEX_STOMP_LOW; else btcoex->bt_stomp_type = mci->num_mgmt ? ATH_BTCOEX_STOMP_ALL : ATH_BTCOEX_STOMP_LOW; if (num_profile == 1) { info = list_first_entry(&mci->info, struct ath_mci_profile_info, list); if (mci->num_sco) { if (info->T == 12) mci->aggr_limit = 8; else if (info->T == 6) { mci->aggr_limit = 6; btcoex->duty_cycle = 30; } else mci->aggr_limit = 6; ath_dbg(common, MCI, "Single SCO, aggregation limit %d 1/4 ms\n", mci->aggr_limit); } else if (mci->num_pan || mci->num_other_acl) { /* * For single PAN/FTP profile, allocate 35% for BT * to improve WLAN throughput. */ btcoex->duty_cycle = AR_SREV_9565(sc->sc_ah) ? 40 : 35; btcoex->btcoex_period = 53; ath_dbg(common, MCI, "Single PAN/FTP bt period %d ms dutycycle %d\n", btcoex->duty_cycle, btcoex->btcoex_period); } else if (mci->num_hid) { btcoex->duty_cycle = 30; mci->aggr_limit = 6; ath_dbg(common, MCI, "Multiple attempt/timeout single HID " "aggregation limit 1.5 ms dutycycle 30%%\n"); } } else if (num_profile == 2) { if (mci->num_hid == 2) btcoex->duty_cycle = 30; mci->aggr_limit = 6; ath_dbg(common, MCI, "Two BT profiles aggr limit 1.5 ms dutycycle %d%%\n", btcoex->duty_cycle); } else if (num_profile >= 3) { mci->aggr_limit = 4; ath_dbg(common, MCI, "Three or more profiles aggregation limit 1 ms\n"); } skip_tuning: if (IS_CHAN_2GHZ(sc->sc_ah->curchan)) { if (IS_CHAN_HT(sc->sc_ah->curchan)) ath_mci_adjust_aggr_limit(btcoex); else btcoex->btcoex_period >>= 1; } ath9k_btcoex_timer_pause(sc); ath9k_hw_btcoex_disable(sc->sc_ah); if (IS_CHAN_5GHZ(sc->sc_ah->curchan)) return; btcoex->duty_cycle += (mci->num_bdr ? ATH_MCI_BDR_DUTY_CYCLE : 0); if (btcoex->duty_cycle > ATH_MCI_MAX_DUTY_CYCLE) btcoex->duty_cycle = ATH_MCI_MAX_DUTY_CYCLE; btcoex->btcoex_no_stomp = btcoex->btcoex_period * 1000 * (100 - btcoex->duty_cycle) / 100; ath9k_hw_btcoex_enable(sc->sc_ah); ath9k_btcoex_timer_resume(sc); } static void ath_mci_cal_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload) { struct ath_hw *ah = sc->sc_ah; struct ath_common *common = ath9k_hw_common(ah); struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci; u32 payload[4] = {0, 0, 0, 0}; switch (opcode) { case MCI_GPM_BT_CAL_REQ: if (mci_hw->bt_state == MCI_BT_AWAKE) { mci_hw->bt_state = MCI_BT_CAL_START; ath9k_queue_reset(sc, RESET_TYPE_MCI); } ath_dbg(common, MCI, "MCI State : %d\n", mci_hw->bt_state); break; case MCI_GPM_BT_CAL_GRANT: MCI_GPM_SET_CAL_TYPE(payload, MCI_GPM_WLAN_CAL_DONE); ar9003_mci_send_message(sc->sc_ah, MCI_GPM, 0, payload, 16, false, true); break; default: ath_dbg(common, MCI, "Unknown GPM CAL message\n"); break; } } static void ath9k_mci_work(struct work_struct *work) { struct ath_softc *sc = container_of(work, struct ath_softc, mci_work); ath_mci_update_scheme(sc); } static void ath_mci_update_stomp_txprio(u8 cur_txprio, u8 *stomp_prio) { if (cur_txprio < stomp_prio[ATH_BTCOEX_STOMP_NONE]) stomp_prio[ATH_BTCOEX_STOMP_NONE] = cur_txprio; if (cur_txprio > stomp_prio[ATH_BTCOEX_STOMP_ALL]) stomp_prio[ATH_BTCOEX_STOMP_ALL] = cur_txprio; if ((cur_txprio > ATH_MCI_HI_PRIO) && (cur_txprio < stomp_prio[ATH_BTCOEX_STOMP_LOW])) stomp_prio[ATH_BTCOEX_STOMP_LOW] = cur_txprio; } static void ath_mci_set_concur_txprio(struct ath_softc *sc) { struct ath_btcoex *btcoex = &sc->btcoex; struct ath_mci_profile *mci = &btcoex->mci; u8 stomp_txprio[ATH_BTCOEX_STOMP_MAX]; memset(stomp_txprio, 0, sizeof(stomp_txprio)); if (mci->num_mgmt) { stomp_txprio[ATH_BTCOEX_STOMP_ALL] = ATH_MCI_INQUIRY_PRIO; if (!mci->num_pan && !mci->num_other_acl) stomp_txprio[ATH_BTCOEX_STOMP_NONE] = ATH_MCI_INQUIRY_PRIO; } else { u8 prof_prio[] = { 50, 90, 94, 52 };/* RFCOMM, A2DP, HID, PAN */ stomp_txprio[ATH_BTCOEX_STOMP_LOW] = stomp_txprio[ATH_BTCOEX_STOMP_NONE] = 0xff; if (mci->num_sco) ath_mci_update_stomp_txprio(mci->voice_priority, stomp_txprio); if (mci->num_other_acl) ath_mci_update_stomp_txprio(prof_prio[0], stomp_txprio); if (mci->num_a2dp) ath_mci_update_stomp_txprio(prof_prio[1], stomp_txprio); if (mci->num_hid) ath_mci_update_stomp_txprio(prof_prio[2], stomp_txprio); if (mci->num_pan) ath_mci_update_stomp_txprio(prof_prio[3], stomp_txprio); if (stomp_txprio[ATH_BTCOEX_STOMP_NONE] == 0xff) stomp_txprio[ATH_BTCOEX_STOMP_NONE] = 0; if (stomp_txprio[ATH_BTCOEX_STOMP_LOW] == 0xff) stomp_txprio[ATH_BTCOEX_STOMP_LOW] = 0; } ath9k_hw_btcoex_set_concur_txprio(sc->sc_ah, stomp_txprio); } static u8 ath_mci_process_profile(struct ath_softc *sc, struct ath_mci_profile_info *info) { struct ath_common *common = ath9k_hw_common(sc->sc_ah); struct ath_btcoex *btcoex = &sc->btcoex; struct ath_mci_profile *mci = &btcoex->mci; struct ath_mci_profile_info *entry = NULL; entry = ath_mci_find_profile(mci, info); if (entry) { /* * Two MCI interrupts are generated while connecting to * headset and A2DP profile, but only one MCI interrupt * is generated with last added profile type while disconnecting * both profiles. * So while adding second profile type decrement * the first one. */ if (entry->type != info->type) { DEC_PROF(mci, entry); INC_PROF(mci, info); } memcpy(entry, info, 10); } if (info->start) { if (!entry && !ath_mci_add_profile(common, mci, info)) return 0; } else ath_mci_del_profile(common, mci, entry); ath_mci_set_concur_txprio(sc); return 1; } static u8 ath_mci_process_status(struct ath_softc *sc, struct ath_mci_profile_status *status) { struct ath_btcoex *btcoex = &sc->btcoex; struct ath_mci_profile *mci = &btcoex->mci; struct ath_mci_profile_info info; int i = 0, old_num_mgmt = mci->num_mgmt; /* Link status type are not handled */ if (status->is_link) return 0; info.conn_handle = status->conn_handle; if (ath_mci_find_profile(mci, &info)) return 0; if (status->conn_handle >= ATH_MCI_MAX_PROFILE) return 0; if (status->is_critical) __set_bit(status->conn_handle, mci->status); else __clear_bit(status->conn_handle, mci->status); mci->num_mgmt = 0; do { if (test_bit(i, mci->status)) mci->num_mgmt++; } while (++i < ATH_MCI_MAX_PROFILE); ath_mci_set_concur_txprio(sc); if (old_num_mgmt != mci->num_mgmt) return 1; return 0; } static void ath_mci_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload) { struct ath_hw *ah = sc->sc_ah; struct ath_mci_profile_info profile_info; struct ath_mci_profile_status profile_status; struct ath_common *common = ath9k_hw_common(sc->sc_ah); u8 major, minor, update_scheme = 0; u32 seq_num; if (ar9003_mci_state(ah, MCI_STATE_NEED_FLUSH_BT_INFO) && ar9003_mci_state(ah, MCI_STATE_ENABLE)) { ath_dbg(common, MCI, "(MCI) Need to flush BT profiles\n"); ath_mci_flush_profile(&sc->btcoex.mci); ar9003_mci_state(ah, MCI_STATE_SEND_STATUS_QUERY); } switch (opcode) { case MCI_GPM_COEX_VERSION_QUERY: ar9003_mci_state(ah, MCI_STATE_SEND_WLAN_COEX_VERSION); break; case MCI_GPM_COEX_VERSION_RESPONSE: major = *(rx_payload + MCI_GPM_COEX_B_MAJOR_VERSION); minor = *(rx_payload + MCI_GPM_COEX_B_MINOR_VERSION); ar9003_mci_set_bt_version(ah, major, minor); break; case MCI_GPM_COEX_STATUS_QUERY: ar9003_mci_send_wlan_channels(ah); break; case MCI_GPM_COEX_BT_PROFILE_INFO: memcpy(&profile_info, (rx_payload + MCI_GPM_COEX_B_PROFILE_TYPE), 10); if ((profile_info.type == MCI_GPM_COEX_PROFILE_UNKNOWN) || (profile_info.type >= MCI_GPM_COEX_PROFILE_MAX)) { ath_dbg(common, MCI, "Illegal profile type = %d, state = %d\n", profile_info.type, profile_info.start); break; } update_scheme += ath_mci_process_profile(sc, &profile_info); break; case MCI_GPM_COEX_BT_STATUS_UPDATE: profile_status.is_link = *(rx_payload + MCI_GPM_COEX_B_STATUS_TYPE); profile_status.conn_handle = *(rx_payload + MCI_GPM_COEX_B_STATUS_LINKID); profile_status.is_critical = *(rx_payload + MCI_GPM_COEX_B_STATUS_STATE); seq_num = *((u32 *)(rx_payload + 12)); ath_dbg(common, MCI, "BT_Status_Update: is_link=%d, linkId=%d, state=%d, SEQ=%u\n", profile_status.is_link, profile_status.conn_handle, profile_status.is_critical, seq_num); update_scheme += ath_mci_process_status(sc, &profile_status); break; default: ath_dbg(common, MCI, "Unknown GPM COEX message = 0x%02x\n", opcode); break; } if (update_scheme) ieee80211_queue_work(sc->hw, &sc->mci_work); } int ath_mci_setup(struct ath_softc *sc) { struct ath_common *common = ath9k_hw_common(sc->sc_ah); struct ath_mci_coex *mci = &sc->mci_coex; struct ath_mci_buf *buf = &mci->sched_buf; int ret; buf->bf_addr = dmam_alloc_coherent(sc->dev, ATH_MCI_SCHED_BUF_SIZE + ATH_MCI_GPM_BUF_SIZE, &buf->bf_paddr, GFP_KERNEL); if (buf->bf_addr == NULL) { ath_dbg(common, FATAL, "MCI buffer alloc failed\n"); return -ENOMEM; } memset(buf->bf_addr, MCI_GPM_RSVD_PATTERN, ATH_MCI_SCHED_BUF_SIZE + ATH_MCI_GPM_BUF_SIZE); mci->sched_buf.bf_len = ATH_MCI_SCHED_BUF_SIZE; mci->gpm_buf.bf_len = ATH_MCI_GPM_BUF_SIZE; mci->gpm_buf.bf_addr = (u8 *)mci->sched_buf.bf_addr + mci->sched_buf.bf_len; mci->gpm_buf.bf_paddr = mci->sched_buf.bf_paddr + mci->sched_buf.bf_len; ret = ar9003_mci_setup(sc->sc_ah, mci->gpm_buf.bf_paddr, mci->gpm_buf.bf_addr, (mci->gpm_buf.bf_len >> 4), mci->sched_buf.bf_paddr); if (ret) { ath_err(common, "Failed to initialize MCI\n"); return ret; } INIT_WORK(&sc->mci_work, ath9k_mci_work); ath_dbg(common, MCI, "MCI Initialized\n"); return 0; } void ath_mci_cleanup(struct ath_softc *sc) { struct ath_common *common = ath9k_hw_common(sc->sc_ah); struct ath_hw *ah = sc->sc_ah; ar9003_mci_cleanup(ah); ath_dbg(common, MCI, "MCI De-Initialized\n"); } void ath_mci_intr(struct ath_softc *sc) { struct ath_mci_coex *mci = &sc->mci_coex; struct ath_hw *ah = sc->sc_ah; struct ath_common *common = ath9k_hw_common(ah); struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci; u32 mci_int, mci_int_rxmsg; u32 offset, subtype, opcode; u32 *pgpm; u32 more_data = MCI_GPM_MORE; bool skip_gpm = false; ar9003_mci_get_interrupt(sc->sc_ah, &mci_int, &mci_int_rxmsg); if (ar9003_mci_state(ah, MCI_STATE_ENABLE) == 0) { ar9003_mci_get_next_gpm_offset(ah, true, NULL); return; } if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE) { u32 payload[4] = { 0xffffffff, 0xffffffff, 0xffffffff, 0xffffff00}; /* * The following REMOTE_RESET and SYS_WAKING used to sent * only when BT wake up. Now they are always sent, as a * recovery method to reset BT MCI's RX alignment. */ ar9003_mci_send_message(ah, MCI_REMOTE_RESET, 0, payload, 16, true, false); ar9003_mci_send_message(ah, MCI_SYS_WAKING, 0, NULL, 0, true, false); mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE; ar9003_mci_state(ah, MCI_STATE_RESET_REQ_WAKE); /* * always do this for recovery and 2G/5G toggling and LNA_TRANS */ ar9003_mci_state(ah, MCI_STATE_SET_BT_AWAKE); } if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING) { mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING; if ((mci_hw->bt_state == MCI_BT_SLEEP) && (ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP) != MCI_BT_SLEEP)) ar9003_mci_state(ah, MCI_STATE_SET_BT_AWAKE); } if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING) { mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING; if ((mci_hw->bt_state == MCI_BT_AWAKE) && (ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP) != MCI_BT_AWAKE)) mci_hw->bt_state = MCI_BT_SLEEP; } if ((mci_int & AR_MCI_INTERRUPT_RX_INVALID_HDR) || (mci_int & AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT)) { ar9003_mci_state(ah, MCI_STATE_RECOVER_RX); skip_gpm = true; } if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO) { mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO; offset = ar9003_mci_state(ah, MCI_STATE_LAST_SCHD_MSG_OFFSET); } if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_GPM) { mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_GPM; while (more_data == MCI_GPM_MORE) { if (test_bit(SC_OP_HW_RESET, &sc->sc_flags)) return; pgpm = mci->gpm_buf.bf_addr; offset = ar9003_mci_get_next_gpm_offset(ah, false, &more_data); if (offset == MCI_GPM_INVALID) break; pgpm += (offset >> 2); /* * The first dword is timer. * The real data starts from 2nd dword. */ subtype = MCI_GPM_TYPE(pgpm); opcode = MCI_GPM_OPCODE(pgpm); if (skip_gpm) goto recycle; if (MCI_GPM_IS_CAL_TYPE(subtype)) { ath_mci_cal_msg(sc, subtype, (u8 *)pgpm); } else { switch (subtype) { case MCI_GPM_COEX_AGENT: ath_mci_msg(sc, opcode, (u8 *)pgpm); break; default: break; } } recycle: MCI_GPM_RECYCLE(pgpm); } } if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_HW_MSG_MASK) { if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL) mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL; if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_LNA_INFO) mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_LNA_INFO; if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_CONT_INFO) { int value_dbm = MS(mci_hw->cont_status, AR_MCI_CONT_RSSI_POWER); mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_CONT_INFO; ath_dbg(common, MCI, "MCI CONT_INFO: (%s) pri = %d pwr = %d dBm\n", MS(mci_hw->cont_status, AR_MCI_CONT_TXRX) ? "tx" : "rx", MS(mci_hw->cont_status, AR_MCI_CONT_PRIORITY), value_dbm); } if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_CONT_NACK) mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_CONT_NACK; if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_CONT_RST) mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_CONT_RST; } if ((mci_int & AR_MCI_INTERRUPT_RX_INVALID_HDR) || (mci_int & AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT)) { mci_int &= ~(AR_MCI_INTERRUPT_RX_INVALID_HDR | AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT); ath_mci_msg(sc, MCI_GPM_COEX_NOOP, NULL); } } void ath_mci_enable(struct ath_softc *sc) { struct ath_common *common = ath9k_hw_common(sc->sc_ah); if (!common->btcoex_enabled) return; if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_MCI) sc->sc_ah->imask |= ATH9K_INT_MCI; } void ath9k_mci_update_wlan_channels(struct ath_softc *sc, bool allow_all) { struct ath_hw *ah = sc->sc_ah; struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; struct ath9k_channel *chan = ah->curchan; u32 channelmap[] = {0x00000000, 0xffff0000, 0xffffffff, 0x7fffffff}; int i; s16 chan_start, chan_end; u16 wlan_chan; if (!chan || !IS_CHAN_2GHZ(chan)) return; if (allow_all) goto send_wlan_chan; wlan_chan = chan->channel - 2402; chan_start = wlan_chan - 10; chan_end = wlan_chan + 10; if (IS_CHAN_HT40PLUS(chan)) chan_end += 20; else if (IS_CHAN_HT40MINUS(chan)) chan_start -= 20; /* adjust side band */ chan_start -= 7; chan_end += 7; if (chan_start <= 0) chan_start = 0; if (chan_end >= ATH_MCI_NUM_BT_CHANNELS) chan_end = ATH_MCI_NUM_BT_CHANNELS - 1; ath_dbg(ath9k_hw_common(ah), MCI, "WLAN current channel %d mask BT channel %d - %d\n", wlan_chan, chan_start, chan_end); for (i = chan_start; i < chan_end; i++) MCI_GPM_CLR_CHANNEL_BIT(&channelmap, i); send_wlan_chan: /* update and send wlan channels info to BT */ for (i = 0; i < 4; i++) mci->wlan_channels[i] = channelmap[i]; ar9003_mci_send_wlan_channels(ah); ar9003_mci_state(ah, MCI_STATE_SEND_VERSION_QUERY); } void ath9k_mci_set_txpower(struct ath_softc *sc, bool setchannel, bool concur_tx) { struct ath_hw *ah = sc->sc_ah; struct ath9k_hw_mci *mci_hw = &sc->sc_ah->btcoex_hw.mci; bool old_concur_tx = mci_hw->concur_tx; if (!(mci_hw->config & ATH_MCI_CONFIG_CONCUR_TX)) { mci_hw->concur_tx = false; return; } if (!IS_CHAN_2GHZ(ah->curchan)) return; if (setchannel) { struct ath9k_hw_cal_data *caldata = &sc->caldata; if (IS_CHAN_HT40PLUS(ah->curchan) && (ah->curchan->channel > caldata->channel) && (ah->curchan->channel <= caldata->channel + 20)) return; if (IS_CHAN_HT40MINUS(ah->curchan) && (ah->curchan->channel < caldata->channel) && (ah->curchan->channel >= caldata->channel - 20)) return; mci_hw->concur_tx = false; } else mci_hw->concur_tx = concur_tx; if (old_concur_tx != mci_hw->concur_tx) ath9k_hw_set_txpowerlimit(ah, sc->config.txpowlimit, false); } static void ath9k_mci_stomp_audio(struct ath_softc *sc) { struct ath_hw *ah = sc->sc_ah; struct ath_btcoex *btcoex = &sc->btcoex; struct ath_mci_profile *mci = &btcoex->mci; if (!mci->num_sco && !mci->num_a2dp) return; if (ah->stats.avgbrssi > 25) { btcoex->stomp_audio = 0; return; } btcoex->stomp_audio++; } void ath9k_mci_update_rssi(struct ath_softc *sc) { struct ath_hw *ah = sc->sc_ah; struct ath_btcoex *btcoex = &sc->btcoex; struct ath9k_hw_mci *mci_hw = &sc->sc_ah->btcoex_hw.mci; ath9k_mci_stomp_audio(sc); if (!(mci_hw->config & ATH_MCI_CONFIG_CONCUR_TX)) return; if (ah->stats.avgbrssi >= 40) { if (btcoex->rssi_count < 0) btcoex->rssi_count = 0; if (++btcoex->rssi_count >= ATH_MCI_CONCUR_TX_SWITCH) { btcoex->rssi_count = 0; ath9k_mci_set_txpower(sc, false, true); } } else { if (btcoex->rssi_count > 0) btcoex->rssi_count = 0; if (--btcoex->rssi_count <= -ATH_MCI_CONCUR_TX_SWITCH) { btcoex->rssi_count = 0; ath9k_mci_set_txpower(sc, false, false); } } }
gpl-2.0
blakearnold/MPR
drivers/auxdisplay/ks0108.c
81
4634
/* * Filename: ks0108.c * Version: 0.1.0 * Description: ks0108 LCD Controller driver * License: GPLv2 * Depends: parport * * Author: Copyright (C) Miguel Ojeda Sandonis <maxextreme@gmail.com> * Date: 2006-10-31 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/init.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/fs.h> #include <linux/io.h> #include <linux/parport.h> #include <linux/uaccess.h> #include <linux/ks0108.h> #define KS0108_NAME "ks0108" /* * Module Parameters */ static unsigned int ks0108_port = CONFIG_KS0108_PORT; module_param(ks0108_port, uint, S_IRUGO); MODULE_PARM_DESC(ks0108_port, "Parallel port where the LCD is connected"); static unsigned int ks0108_delay = CONFIG_KS0108_DELAY; module_param(ks0108_delay, uint, S_IRUGO); MODULE_PARM_DESC(ks0108_delay, "Delay between each control writing (microseconds)"); /* * Device */ static struct parport *ks0108_parport; static struct pardevice *ks0108_pardevice; /* * ks0108 Exported Commands (don't lock) * * You _should_ lock in the top driver: This functions _should not_ * get race conditions in any way. Locking for each byte here would be * so slow and useless. * * There are not bit definitions because they are not flags, * just arbitrary combinations defined by the documentation for each * function in the ks0108 LCD controller. If you want to know what means * a specific combination, look at the function's name. * * The ks0108_writecontrol bits need to be reverted ^(0,1,3) because * the parallel port also revert them using a "not" logic gate. */ #define bit(n) (((unsigned char)1)<<(n)) void ks0108_writedata(unsigned char byte) { parport_write_data(ks0108_parport, byte); } void ks0108_writecontrol(unsigned char byte) { udelay(ks0108_delay); parport_write_control(ks0108_parport, byte ^ (bit(0) | bit(1) | bit(3))); } void ks0108_displaystate(unsigned char state) { ks0108_writedata((state ? bit(0) : 0) | bit(1) | bit(2) | bit(3) | bit(4) | bit(5)); } void ks0108_startline(unsigned char startline) { ks0108_writedata(min(startline,(unsigned char)63) | bit(6) | bit(7)); } void ks0108_address(unsigned char address) { ks0108_writedata(min(address,(unsigned char)63) | bit(6)); } void ks0108_page(unsigned char page) { ks0108_writedata(min(page,(unsigned char)7) | bit(3) | bit(4) | bit(5) | bit(7)); } EXPORT_SYMBOL_GPL(ks0108_writedata); EXPORT_SYMBOL_GPL(ks0108_writecontrol); EXPORT_SYMBOL_GPL(ks0108_displaystate); EXPORT_SYMBOL_GPL(ks0108_startline); EXPORT_SYMBOL_GPL(ks0108_address); EXPORT_SYMBOL_GPL(ks0108_page); /* * Is the module inited? */ static unsigned char ks0108_inited; unsigned char ks0108_isinited(void) { return ks0108_inited; } EXPORT_SYMBOL_GPL(ks0108_isinited); /* * Module Init & Exit */ static int __init ks0108_init(void) { int result; int ret = -EINVAL; ks0108_parport = parport_find_base(ks0108_port); if (ks0108_parport == NULL) { printk(KERN_ERR KS0108_NAME ": ERROR: " "parport didn't find %i port\n", ks0108_port); goto none; } ks0108_pardevice = parport_register_device(ks0108_parport, KS0108_NAME, NULL, NULL, NULL, PARPORT_DEV_EXCL, NULL); if (ks0108_pardevice == NULL) { printk(KERN_ERR KS0108_NAME ": ERROR: " "parport didn't register new device\n"); goto none; } result = parport_claim(ks0108_pardevice); if (result != 0) { printk(KERN_ERR KS0108_NAME ": ERROR: " "can't claim %i parport, maybe in use\n", ks0108_port); ret = result; goto registered; } ks0108_inited = 1; return 0; registered: parport_unregister_device(ks0108_pardevice); none: return ret; } static void __exit ks0108_exit(void) { parport_release(ks0108_pardevice); parport_unregister_device(ks0108_pardevice); } module_init(ks0108_init); module_exit(ks0108_exit); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Miguel Ojeda Sandonis <maxextreme@gmail.com>"); MODULE_DESCRIPTION("ks0108 LCD Controller driver");
gpl-2.0
JustAkan/jolla-kernel_bullhead
drivers/mtd/nand/nand_ids.c
337
8564
/* * drivers/mtd/nandids.c * * Copyright (C) 2002 Thomas Gleixner (tglx@linutronix.de) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/module.h> #include <linux/mtd/nand.h> #include <linux/sizes.h> #define LP_OPTIONS NAND_SAMSUNG_LP_OPTIONS #define LP_OPTIONS16 (LP_OPTIONS | NAND_BUSWIDTH_16) #define SP_OPTIONS NAND_NEED_READRDY #define SP_OPTIONS16 (SP_OPTIONS | NAND_BUSWIDTH_16) /* * The chip ID list: * name, device ID, page size, chip size in MiB, eraseblock size, options * * If page size and eraseblock size are 0, the sizes are taken from the * extended chip ID. */ struct nand_flash_dev nand_flash_ids[] = { /* * Some incompatible NAND chips share device ID's and so must be * listed by full ID. We list them first so that we can easily identify * the most specific match. */ {"TC58NYG1S3H 2G 1.8V 8-bit", { .id = {0x98, 0xaa, 0x90, 0x15, 0x00, 0x00, 0x00, 0x00} }, SZ_2K, SZ_256, SZ_128K, 0, 4, 128, 8}, {"MT29F4G08ABBDAHC 4G 3.3V 8-bit", { .id = {0x2c, 0xac, 0x90, 0x15, 0x00, 0x00, 0x00, 0x00} }, SZ_2K, SZ_512, SZ_128K, 0, 4, 64, 4}, {"MT29F8G08ABBCAH4 8G 3.3V 8-bit", { .id = {0x2c, 0xa3, 0x90, 0x26, 0x00, 0x00, 0x00, 0x00} }, SZ_4K, SZ_1K, SZ_256K, 0, 4, 224, 8}, {"MT29RZ4B2DZZHGSK 4G 3.3V 8-bit", { .id = {0x2c, 0xac, 0x90, 0x26, 0x00, 0x00, 0x00, 0x00} }, SZ_4K, SZ_512, SZ_256K, 0, 4, 224, 8}, {"TC58NVG2S0F 4G 3.3V 8-bit", { .id = {0x98, 0xdc, 0x90, 0x26, 0x76, 0x15, 0x01, 0x08} }, SZ_4K, SZ_512, SZ_256K, 0, 8, 224}, {"TC58NVG3S0F 8G 3.3V 8-bit", { .id = {0x98, 0xd3, 0x90, 0x26, 0x76, 0x15, 0x02, 0x08} }, SZ_4K, SZ_1K, SZ_256K, 0, 8, 232}, {"TC58NVG5D2 32G 3.3V 8-bit", { .id = {0x98, 0xd7, 0x94, 0x32, 0x76, 0x56, 0x09, 0x00} }, SZ_8K, SZ_4K, SZ_1M, 0, 8, 640}, {"TC58NVG6D2 64G 3.3V 8-bit", { .id = {0x98, 0xde, 0x94, 0x82, 0x76, 0x56, 0x04, 0x20} }, SZ_8K, SZ_8K, SZ_2M, 0, 8, 640}, LEGACY_ID_NAND("NAND 4MiB 5V 8-bit", 0x6B, 4, SZ_8K, SP_OPTIONS), LEGACY_ID_NAND("NAND 4MiB 3,3V 8-bit", 0xE3, 4, SZ_8K, SP_OPTIONS), LEGACY_ID_NAND("NAND 4MiB 3,3V 8-bit", 0xE5, 4, SZ_8K, SP_OPTIONS), LEGACY_ID_NAND("NAND 8MiB 3,3V 8-bit", 0xD6, 8, SZ_8K, SP_OPTIONS), LEGACY_ID_NAND("NAND 8MiB 3,3V 8-bit", 0xE6, 8, SZ_8K, SP_OPTIONS), LEGACY_ID_NAND("NAND 16MiB 1,8V 8-bit", 0x33, 16, SZ_16K, SP_OPTIONS), LEGACY_ID_NAND("NAND 16MiB 3,3V 8-bit", 0x73, 16, SZ_16K, SP_OPTIONS), LEGACY_ID_NAND("NAND 16MiB 1,8V 16-bit", 0x43, 16, SZ_16K, SP_OPTIONS16), LEGACY_ID_NAND("NAND 16MiB 3,3V 16-bit", 0x53, 16, SZ_16K, SP_OPTIONS16), LEGACY_ID_NAND("NAND 32MiB 1,8V 8-bit", 0x35, 32, SZ_16K, SP_OPTIONS), LEGACY_ID_NAND("NAND 32MiB 3,3V 8-bit", 0x75, 32, SZ_16K, SP_OPTIONS), LEGACY_ID_NAND("NAND 32MiB 1,8V 16-bit", 0x45, 32, SZ_16K, SP_OPTIONS16), LEGACY_ID_NAND("NAND 32MiB 3,3V 16-bit", 0x55, 32, SZ_16K, SP_OPTIONS16), LEGACY_ID_NAND("NAND 64MiB 1,8V 8-bit", 0x36, 64, SZ_16K, SP_OPTIONS), LEGACY_ID_NAND("NAND 64MiB 3,3V 8-bit", 0x76, 64, SZ_16K, SP_OPTIONS), LEGACY_ID_NAND("NAND 64MiB 1,8V 16-bit", 0x46, 64, SZ_16K, SP_OPTIONS16), LEGACY_ID_NAND("NAND 64MiB 3,3V 16-bit", 0x56, 64, SZ_16K, SP_OPTIONS16), LEGACY_ID_NAND("NAND 128MiB 1,8V 8-bit", 0x78, 128, SZ_16K, SP_OPTIONS), LEGACY_ID_NAND("NAND 128MiB 1,8V 8-bit", 0x39, 128, SZ_16K, SP_OPTIONS), LEGACY_ID_NAND("NAND 128MiB 3,3V 8-bit", 0x79, 128, SZ_16K, SP_OPTIONS), LEGACY_ID_NAND("NAND 128MiB 1,8V 16-bit", 0x72, 128, SZ_16K, SP_OPTIONS16), LEGACY_ID_NAND("NAND 128MiB 1,8V 16-bit", 0x49, 128, SZ_16K, SP_OPTIONS16), LEGACY_ID_NAND("NAND 128MiB 3,3V 16-bit", 0x74, 128, SZ_16K, SP_OPTIONS16), LEGACY_ID_NAND("NAND 128MiB 3,3V 16-bit", 0x59, 128, SZ_16K, SP_OPTIONS16), LEGACY_ID_NAND("NAND 256MiB 3,3V 8-bit", 0x71, 256, SZ_16K, SP_OPTIONS), /* * These are the new chips with large page size. Their page size and * eraseblock size are determined from the extended ID bytes. */ /* 512 Megabit */ EXTENDED_ID_NAND("NAND 64MiB 1,8V 8-bit", 0xA2, 64, LP_OPTIONS), EXTENDED_ID_NAND("NAND 64MiB 1,8V 8-bit", 0xA0, 64, LP_OPTIONS), EXTENDED_ID_NAND("NAND 64MiB 3,3V 8-bit", 0xF2, 64, LP_OPTIONS), EXTENDED_ID_NAND("NAND 64MiB 3,3V 8-bit", 0xD0, 64, LP_OPTIONS), EXTENDED_ID_NAND("NAND 64MiB 3,3V 8-bit", 0xF0, 64, LP_OPTIONS), EXTENDED_ID_NAND("NAND 64MiB 1,8V 16-bit", 0xB2, 64, LP_OPTIONS16), EXTENDED_ID_NAND("NAND 64MiB 1,8V 16-bit", 0xB0, 64, LP_OPTIONS16), EXTENDED_ID_NAND("NAND 64MiB 3,3V 16-bit", 0xC2, 64, LP_OPTIONS16), EXTENDED_ID_NAND("NAND 64MiB 3,3V 16-bit", 0xC0, 64, LP_OPTIONS16), /* 1 Gigabit */ EXTENDED_ID_NAND("NAND 128MiB 1,8V 8-bit", 0xA1, 128, LP_OPTIONS), EXTENDED_ID_NAND("NAND 128MiB 3,3V 8-bit", 0xF1, 128, LP_OPTIONS), EXTENDED_ID_NAND("NAND 128MiB 3,3V 8-bit", 0xD1, 128, LP_OPTIONS), EXTENDED_ID_NAND("NAND 128MiB 1,8V 16-bit", 0xB1, 128, LP_OPTIONS16), EXTENDED_ID_NAND("NAND 128MiB 3,3V 16-bit", 0xC1, 128, LP_OPTIONS16), EXTENDED_ID_NAND("NAND 128MiB 1,8V 16-bit", 0xAD, 128, LP_OPTIONS16), /* 2 Gigabit */ EXTENDED_ID_NAND("NAND 256MiB 1,8V 8-bit", 0xAA, 256, LP_OPTIONS), EXTENDED_ID_NAND("NAND 256MiB 3,3V 8-bit", 0xDA, 256, LP_OPTIONS), EXTENDED_ID_NAND("NAND 256MiB 1,8V 16-bit", 0xBA, 256, LP_OPTIONS16), EXTENDED_ID_NAND("NAND 256MiB 3,3V 16-bit", 0xCA, 256, LP_OPTIONS16), /* 4 Gigabit */ EXTENDED_ID_NAND("NAND 512MiB 1,8V 8-bit", 0xAC, 512, LP_OPTIONS), EXTENDED_ID_NAND("NAND 512MiB 3,3V 8-bit", 0xDC, 512, LP_OPTIONS), EXTENDED_ID_NAND("NAND 512MiB 1,8V 16-bit", 0xBC, 512, LP_OPTIONS16), EXTENDED_ID_NAND("NAND 512MiB 3,3V 16-bit", 0xCC, 512, LP_OPTIONS16), /* 8 Gigabit */ EXTENDED_ID_NAND("NAND 1GiB 1,8V 8-bit", 0xA3, 1024, LP_OPTIONS), EXTENDED_ID_NAND("NAND 1GiB 3,3V 8-bit", 0xD3, 1024, LP_OPTIONS), EXTENDED_ID_NAND("NAND 1GiB 1,8V 16-bit", 0xB3, 1024, LP_OPTIONS16), EXTENDED_ID_NAND("NAND 1GiB 3,3V 16-bit", 0xC3, 1024, LP_OPTIONS16), /* 16 Gigabit */ EXTENDED_ID_NAND("NAND 2GiB 1,8V 8-bit", 0xA5, 2048, LP_OPTIONS), EXTENDED_ID_NAND("NAND 2GiB 3,3V 8-bit", 0xD5, 2048, LP_OPTIONS), EXTENDED_ID_NAND("NAND 2GiB 1,8V 16-bit", 0xB5, 2048, LP_OPTIONS16), EXTENDED_ID_NAND("NAND 2GiB 3,3V 16-bit", 0xC5, 2048, LP_OPTIONS16), /* 32 Gigabit */ EXTENDED_ID_NAND("NAND 4GiB 1,8V 8-bit", 0xA7, 4096, LP_OPTIONS), EXTENDED_ID_NAND("NAND 4GiB 3,3V 8-bit", 0xD7, 4096, LP_OPTIONS), EXTENDED_ID_NAND("NAND 4GiB 1,8V 16-bit", 0xB7, 4096, LP_OPTIONS16), EXTENDED_ID_NAND("NAND 4GiB 3,3V 16-bit", 0xC7, 4096, LP_OPTIONS16), /* 64 Gigabit */ EXTENDED_ID_NAND("NAND 8GiB 1,8V 8-bit", 0xAE, 8192, LP_OPTIONS), EXTENDED_ID_NAND("NAND 8GiB 3,3V 8-bit", 0xDE, 8192, LP_OPTIONS), EXTENDED_ID_NAND("NAND 8GiB 1,8V 16-bit", 0xBE, 8192, LP_OPTIONS16), EXTENDED_ID_NAND("NAND 8GiB 3,3V 16-bit", 0xCE, 8192, LP_OPTIONS16), /* 128 Gigabit */ EXTENDED_ID_NAND("NAND 16GiB 1,8V 8-bit", 0x1A, 16384, LP_OPTIONS), EXTENDED_ID_NAND("NAND 16GiB 3,3V 8-bit", 0x3A, 16384, LP_OPTIONS), EXTENDED_ID_NAND("NAND 16GiB 1,8V 16-bit", 0x2A, 16384, LP_OPTIONS16), EXTENDED_ID_NAND("NAND 16GiB 3,3V 16-bit", 0x4A, 16384, LP_OPTIONS16), /* 256 Gigabit */ EXTENDED_ID_NAND("NAND 32GiB 1,8V 8-bit", 0x1C, 32768, LP_OPTIONS), EXTENDED_ID_NAND("NAND 32GiB 3,3V 8-bit", 0x3C, 32768, LP_OPTIONS), EXTENDED_ID_NAND("NAND 32GiB 1,8V 16-bit", 0x2C, 32768, LP_OPTIONS16), EXTENDED_ID_NAND("NAND 32GiB 3,3V 16-bit", 0x4C, 32768, LP_OPTIONS16), /* 512 Gigabit */ EXTENDED_ID_NAND("NAND 64GiB 1,8V 8-bit", 0x1E, 65536, LP_OPTIONS), EXTENDED_ID_NAND("NAND 64GiB 3,3V 8-bit", 0x3E, 65536, LP_OPTIONS), EXTENDED_ID_NAND("NAND 64GiB 1,8V 16-bit", 0x2E, 65536, LP_OPTIONS16), EXTENDED_ID_NAND("NAND 64GiB 3,3V 16-bit", 0x4E, 65536, LP_OPTIONS16), {"NAND 4GiB 1,8V 8-bit", {{ 0xAC }} , 2048, 4096, 0x20000, 0 }, {NULL} }; /* Manufacturer IDs */ struct nand_manufacturers nand_manuf_ids[] = { {NAND_MFR_TOSHIBA, "Toshiba"}, {NAND_MFR_SAMSUNG, "Samsung"}, {NAND_MFR_FUJITSU, "Fujitsu"}, {NAND_MFR_NATIONAL, "National"}, {NAND_MFR_RENESAS, "Renesas"}, {NAND_MFR_STMICRO, "ST Micro"}, {NAND_MFR_HYNIX, "Hynix"}, {NAND_MFR_MICRON, "Micron"}, {NAND_MFR_AMD, "AMD/Spansion"}, {NAND_MFR_MACRONIX, "Macronix"}, {NAND_MFR_EON, "Eon"}, {NAND_MFR_ESMT, "Elite Semiconductor"}, {0x0, "Unknown"} }; EXPORT_SYMBOL(nand_manuf_ids); EXPORT_SYMBOL(nand_flash_ids); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>"); MODULE_DESCRIPTION("Nand device & manufacturer IDs");
gpl-2.0
SaberMod/binutils-saber
readline/examples/rlptytest.c
337
6531
/* * * Another test harness for the readline callback interface. * * Author: Bob Rossi <bob@brasko.net> */ #if defined (HAVE_CONFIG_H) #include <config.h> #endif #include <stdio.h> #include <sys/types.h> #include <errno.h> #include <curses.h> #include <stdlib.h> #include <unistd.h> #include <signal.h> #if 0 /* LINUX */ #include <pty.h> #else #include <util.h> #endif #ifdef READLINE_LIBRARY # include "readline.h" #else # include <readline/readline.h> #endif /** * Master/Slave PTY used to keep readline off of stdin/stdout. */ static int masterfd = -1; static int slavefd; void sigint (s) int s; { tty_reset (STDIN_FILENO); close (masterfd); close (slavefd); printf ("\n"); exit (0); } static int user_input() { int size; const int MAX = 1024; char *buf = (char *)malloc(MAX+1); size = read (STDIN_FILENO, buf, MAX); if (size == -1) return -1; size = write (masterfd, buf, size); if (size == -1) return -1; return 0; } static int readline_input() { const int MAX = 1024; char *buf = (char *)malloc(MAX+1); int size; size = read (masterfd, buf, MAX); if (size == -1) { free( buf ); buf = NULL; return -1; } buf[size] = 0; /* Display output from readline */ if ( size > 0 ) fprintf(stderr, "%s", buf); free( buf ); buf = NULL; return 0; } static void rlctx_send_user_command(char *line) { /* This happens when rl_callback_read_char gets EOF */ if ( line == NULL ) return; if (strcmp (line, "exit") == 0) { tty_reset (STDIN_FILENO); close (masterfd); close (slavefd); printf ("\n"); exit (0); } /* Don't add the enter command */ if ( line && *line != '\0' ) add_history(line); } static void custom_deprep_term_function () { } static int init_readline (int inputfd, int outputfd) { FILE *inputFILE, *outputFILE; inputFILE = fdopen (inputfd, "r"); if (!inputFILE) return -1; outputFILE = fdopen (outputfd, "w"); if (!outputFILE) return -1; rl_instream = inputFILE; rl_outstream = outputFILE; /* Tell readline what the prompt is if it needs to put it back */ rl_callback_handler_install("(rltest): ", rlctx_send_user_command); /* Set the terminal type to dumb so the output of readline can be * understood by tgdb */ if ( rl_reset_terminal("dumb") == -1 ) return -1; /* For some reason, readline can not deprep the terminal. * However, it doesn't matter because no other application is working on * the terminal besides readline */ rl_deprep_term_function = custom_deprep_term_function; using_history(); read_history(".history"); return 0; } static int main_loop(void) { fd_set rset; int max; max = (masterfd > STDIN_FILENO) ? masterfd : STDIN_FILENO; max = (max > slavefd) ? max : slavefd; for (;;) { /* Reset the fd_set, and watch for input from GDB or stdin */ FD_ZERO(&rset); FD_SET(STDIN_FILENO, &rset); FD_SET(slavefd, &rset); FD_SET(masterfd, &rset); /* Wait for input */ if (select(max + 1, &rset, NULL, NULL, NULL) == -1) { if (errno == EINTR) continue; else return -1; } /* Input received through the pty: Handle it * Wrote to masterfd, slave fd has that input, alert readline to read it. */ if (FD_ISSET(slavefd, &rset)) rl_callback_read_char(); /* Input received through the pty. * Readline read from slavefd, and it wrote to the masterfd. */ if (FD_ISSET(masterfd, &rset)) if ( readline_input() == -1 ) return -1; /* Input received: Handle it, write to masterfd (input to readline) */ if (FD_ISSET(STDIN_FILENO, &rset)) if ( user_input() == -1 ) return -1; } return 0; } /* The terminal attributes before calling tty_cbreak */ static struct termios save_termios; static struct winsize size; static enum { RESET, TCBREAK } ttystate = RESET; /* tty_cbreak: Sets terminal to cbreak mode. Also known as noncanonical mode. * 1. Signal handling is still turned on, so the user can still type those. * 2. echo is off * 3. Read in one char at a time. * * fd - The file descriptor of the terminal * * Returns: 0 on sucess, -1 on error */ int tty_cbreak(int fd){ struct termios buf; int ttysavefd = -1; if(tcgetattr(fd, &save_termios) < 0) return -1; buf = save_termios; buf.c_lflag &= ~(ECHO | ICANON); buf.c_iflag &= ~(ICRNL | INLCR); buf.c_cc[VMIN] = 1; buf.c_cc[VTIME] = 0; #if defined (VLNEXT) && defined (_POSIX_VDISABLE) buf.c_cc[VLNEXT] = _POSIX_VDISABLE; #endif #if defined (VDSUSP) && defined (_POSIX_VDISABLE) buf.c_cc[VDSUSP] = _POSIX_VDISABLE; #endif /* enable flow control; only stty start char can restart output */ #if 0 buf.c_iflag |= (IXON|IXOFF); #ifdef IXANY buf.c_iflag &= ~IXANY; #endif #endif /* disable flow control; let ^S and ^Q through to pty */ buf.c_iflag &= ~(IXON|IXOFF); #ifdef IXANY buf.c_iflag &= ~IXANY; #endif if(tcsetattr(fd, TCSAFLUSH, &buf) < 0) return -1; ttystate = TCBREAK; ttysavefd = fd; /* set size */ if(ioctl(fd, TIOCGWINSZ, (char *)&size) < 0) return -1; #ifdef DEBUG err_msg("%d rows and %d cols\n", size.ws_row, size.ws_col); #endif return (0); } int tty_off_xon_xoff (int fd) { struct termios buf; int ttysavefd = -1; if(tcgetattr(fd, &buf) < 0) return -1; buf.c_iflag &= ~(IXON|IXOFF); if(tcsetattr(fd, TCSAFLUSH, &buf) < 0) return -1; return 0; } /* tty_reset: Sets the terminal attributes back to their previous state. * PRE: tty_cbreak must have already been called. * * fd - The file descrioptor of the terminal to reset. * * Returns: 0 on success, -1 on error */ int tty_reset(int fd) { if(ttystate != TCBREAK) return (0); if(tcsetattr(fd, TCSAFLUSH, &save_termios) < 0) return (-1); ttystate = RESET; return 0; } int main() { int val; val = openpty (&masterfd, &slavefd, NULL, NULL, NULL); if (val == -1) return -1; val = tty_off_xon_xoff (masterfd); if (val == -1) return -1; val = init_readline (slavefd, slavefd); if (val == -1) return -1; val = tty_cbreak (STDIN_FILENO); if (val == -1) return -1; signal (SIGINT, sigint); val = main_loop (); tty_reset (STDIN_FILENO); if (val == -1) return -1; return 0; }
gpl-2.0
cyjia/linux
drivers/pcmcia/xxs1500_ss.c
1105
7980
/* * PCMCIA socket code for the MyCable XXS1500 system. * * Copyright (c) 2009 Manuel Lauss <manuel.lauss@gmail.com> * */ #include <linux/delay.h> #include <linux/gpio.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/ioport.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/pm.h> #include <linux/resource.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <pcmcia/ss.h> #include <pcmcia/cistpl.h> #include <asm/irq.h> #include <asm/mach-au1x00/au1000.h> #define MEM_MAP_SIZE 0x400000 #define IO_MAP_SIZE 0x1000 /* * 3.3V cards only; all interfacing is done via gpios: * * 0/1: carddetect (00 = card present, xx = huh) * 4: card irq * 204: reset (high-act) * 205: buffer enable (low-act) * 208/209: card voltage key (00,01,10,11) * 210: battwarn * 211: batdead * 214: power (low-act) */ #define GPIO_CDA 0 #define GPIO_CDB 1 #define GPIO_CARDIRQ 4 #define GPIO_RESET 204 #define GPIO_OUTEN 205 #define GPIO_VSL 208 #define GPIO_VSH 209 #define GPIO_BATTDEAD 210 #define GPIO_BATTWARN 211 #define GPIO_POWER 214 struct xxs1500_pcmcia_sock { struct pcmcia_socket socket; void *virt_io; phys_addr_t phys_io; phys_addr_t phys_attr; phys_addr_t phys_mem; /* previous flags for set_socket() */ unsigned int old_flags; }; #define to_xxs_socket(x) container_of(x, struct xxs1500_pcmcia_sock, socket) static irqreturn_t cdirq(int irq, void *data) { struct xxs1500_pcmcia_sock *sock = data; pcmcia_parse_events(&sock->socket, SS_DETECT); return IRQ_HANDLED; } static int xxs1500_pcmcia_configure(struct pcmcia_socket *skt, struct socket_state_t *state) { struct xxs1500_pcmcia_sock *sock = to_xxs_socket(skt); unsigned int changed; /* power control */ switch (state->Vcc) { case 0: gpio_set_value(GPIO_POWER, 1); /* power off */ break; case 33: gpio_set_value(GPIO_POWER, 0); /* power on */ break; case 50: default: return -EINVAL; } changed = state->flags ^ sock->old_flags; if (changed & SS_RESET) { if (state->flags & SS_RESET) { gpio_set_value(GPIO_RESET, 1); /* assert reset */ gpio_set_value(GPIO_OUTEN, 1); /* buffers off */ } else { gpio_set_value(GPIO_RESET, 0); /* deassert reset */ gpio_set_value(GPIO_OUTEN, 0); /* buffers on */ msleep(500); } } sock->old_flags = state->flags; return 0; } static int xxs1500_pcmcia_get_status(struct pcmcia_socket *skt, unsigned int *value) { unsigned int status; int i; status = 0; /* check carddetects: GPIO[0:1] must both be low */ if (!gpio_get_value(GPIO_CDA) && !gpio_get_value(GPIO_CDB)) status |= SS_DETECT; /* determine card voltage: GPIO[208:209] binary value */ i = (!!gpio_get_value(GPIO_VSL)) | ((!!gpio_get_value(GPIO_VSH)) << 1); switch (i) { case 0: case 1: case 2: status |= SS_3VCARD; /* 3V card */ break; case 3: /* 5V card, unsupported */ default: status |= SS_XVCARD; /* treated as unsupported in core */ } /* GPIO214: low active power switch */ status |= gpio_get_value(GPIO_POWER) ? 0 : SS_POWERON; /* GPIO204: high-active reset line */ status |= gpio_get_value(GPIO_RESET) ? SS_RESET : SS_READY; /* other stuff */ status |= gpio_get_value(GPIO_BATTDEAD) ? 0 : SS_BATDEAD; status |= gpio_get_value(GPIO_BATTWARN) ? 0 : SS_BATWARN; *value = status; return 0; } static int xxs1500_pcmcia_sock_init(struct pcmcia_socket *skt) { gpio_direction_input(GPIO_CDA); gpio_direction_input(GPIO_CDB); gpio_direction_input(GPIO_VSL); gpio_direction_input(GPIO_VSH); gpio_direction_input(GPIO_BATTDEAD); gpio_direction_input(GPIO_BATTWARN); gpio_direction_output(GPIO_RESET, 1); /* assert reset */ gpio_direction_output(GPIO_OUTEN, 1); /* disable buffers */ gpio_direction_output(GPIO_POWER, 1); /* power off */ return 0; } static int xxs1500_pcmcia_sock_suspend(struct pcmcia_socket *skt) { return 0; } static int au1x00_pcmcia_set_io_map(struct pcmcia_socket *skt, struct pccard_io_map *map) { struct xxs1500_pcmcia_sock *sock = to_xxs_socket(skt); map->start = (u32)sock->virt_io; map->stop = map->start + IO_MAP_SIZE; return 0; } static int au1x00_pcmcia_set_mem_map(struct pcmcia_socket *skt, struct pccard_mem_map *map) { struct xxs1500_pcmcia_sock *sock = to_xxs_socket(skt); if (map->flags & MAP_ATTRIB) map->static_start = sock->phys_attr + map->card_start; else map->static_start = sock->phys_mem + map->card_start; return 0; } static struct pccard_operations xxs1500_pcmcia_operations = { .init = xxs1500_pcmcia_sock_init, .suspend = xxs1500_pcmcia_sock_suspend, .get_status = xxs1500_pcmcia_get_status, .set_socket = xxs1500_pcmcia_configure, .set_io_map = au1x00_pcmcia_set_io_map, .set_mem_map = au1x00_pcmcia_set_mem_map, }; static int xxs1500_pcmcia_probe(struct platform_device *pdev) { struct xxs1500_pcmcia_sock *sock; struct resource *r; int ret, irq; sock = kzalloc(sizeof(struct xxs1500_pcmcia_sock), GFP_KERNEL); if (!sock) return -ENOMEM; ret = -ENODEV; /* 36bit PCMCIA Attribute area address */ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcmcia-attr"); if (!r) { dev_err(&pdev->dev, "missing 'pcmcia-attr' resource!\n"); goto out0; } sock->phys_attr = r->start; /* 36bit PCMCIA Memory area address */ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcmcia-mem"); if (!r) { dev_err(&pdev->dev, "missing 'pcmcia-mem' resource!\n"); goto out0; } sock->phys_mem = r->start; /* 36bit PCMCIA IO area address */ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcmcia-io"); if (!r) { dev_err(&pdev->dev, "missing 'pcmcia-io' resource!\n"); goto out0; } sock->phys_io = r->start; /* * PCMCIA client drivers use the inb/outb macros to access * the IO registers. Since mips_io_port_base is added * to the access address of the mips implementation of * inb/outb, we need to subtract it here because we want * to access the I/O or MEM address directly, without * going through this "mips_io_port_base" mechanism. */ sock->virt_io = (void *)(ioremap(sock->phys_io, IO_MAP_SIZE) - mips_io_port_base); if (!sock->virt_io) { dev_err(&pdev->dev, "cannot remap IO area\n"); ret = -ENOMEM; goto out0; } sock->socket.ops = &xxs1500_pcmcia_operations; sock->socket.owner = THIS_MODULE; sock->socket.pci_irq = gpio_to_irq(GPIO_CARDIRQ); sock->socket.features = SS_CAP_STATIC_MAP | SS_CAP_PCCARD; sock->socket.map_size = MEM_MAP_SIZE; sock->socket.io_offset = (unsigned long)sock->virt_io; sock->socket.dev.parent = &pdev->dev; sock->socket.resource_ops = &pccard_static_ops; platform_set_drvdata(pdev, sock); /* setup carddetect irq: use one of the 2 GPIOs as an * edge detector. */ irq = gpio_to_irq(GPIO_CDA); irq_set_irq_type(irq, IRQ_TYPE_EDGE_BOTH); ret = request_irq(irq, cdirq, 0, "pcmcia_carddetect", sock); if (ret) { dev_err(&pdev->dev, "cannot setup cd irq\n"); goto out1; } ret = pcmcia_register_socket(&sock->socket); if (ret) { dev_err(&pdev->dev, "failed to register\n"); goto out2; } printk(KERN_INFO "MyCable XXS1500 PCMCIA socket services\n"); return 0; out2: free_irq(gpio_to_irq(GPIO_CDA), sock); out1: iounmap((void *)(sock->virt_io + (u32)mips_io_port_base)); out0: kfree(sock); return ret; } static int xxs1500_pcmcia_remove(struct platform_device *pdev) { struct xxs1500_pcmcia_sock *sock = platform_get_drvdata(pdev); pcmcia_unregister_socket(&sock->socket); free_irq(gpio_to_irq(GPIO_CDA), sock); iounmap((void *)(sock->virt_io + (u32)mips_io_port_base)); kfree(sock); return 0; } static struct platform_driver xxs1500_pcmcia_socket_driver = { .driver = { .name = "xxs1500_pcmcia", }, .probe = xxs1500_pcmcia_probe, .remove = xxs1500_pcmcia_remove, }; module_platform_driver(xxs1500_pcmcia_socket_driver); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("PCMCIA Socket Services for MyCable XXS1500 systems"); MODULE_AUTHOR("Manuel Lauss");
gpl-2.0
rachitrawat/Vengeance-Kernel-U8500
security/keys/request_key_auth.c
2385
6648
/* Request key authorisation token key definition. * * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * See Documentation/security/keys-request-key.txt */ #include <linux/module.h> #include <linux/sched.h> #include <linux/err.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <asm/uaccess.h> #include "internal.h" static int request_key_auth_instantiate(struct key *, const void *, size_t); static void request_key_auth_describe(const struct key *, struct seq_file *); static void request_key_auth_revoke(struct key *); static void request_key_auth_destroy(struct key *); static long request_key_auth_read(const struct key *, char __user *, size_t); /* * The request-key authorisation key type definition. */ struct key_type key_type_request_key_auth = { .name = ".request_key_auth", .def_datalen = sizeof(struct request_key_auth), .instantiate = request_key_auth_instantiate, .describe = request_key_auth_describe, .revoke = request_key_auth_revoke, .destroy = request_key_auth_destroy, .read = request_key_auth_read, }; /* * Instantiate a request-key authorisation key. */ static int request_key_auth_instantiate(struct key *key, const void *data, size_t datalen) { key->payload.data = (struct request_key_auth *) data; return 0; } /* * Describe an authorisation token. */ static void request_key_auth_describe(const struct key *key, struct seq_file *m) { struct request_key_auth *rka = key->payload.data; seq_puts(m, "key:"); seq_puts(m, key->description); if (key_is_instantiated(key)) seq_printf(m, " pid:%d ci:%zu", rka->pid, rka->callout_len); } /* * Read the callout_info data (retrieves the callout information). * - the key's semaphore is read-locked */ static long request_key_auth_read(const struct key *key, char __user *buffer, size_t buflen) { struct request_key_auth *rka = key->payload.data; size_t datalen; long ret; datalen = rka->callout_len; ret = datalen; /* we can return the data as is */ if (buffer && buflen > 0) { if (buflen > datalen) buflen = datalen; if (copy_to_user(buffer, rka->callout_info, buflen) != 0) ret = -EFAULT; } return ret; } /* * Handle revocation of an authorisation token key. * * Called with the key sem write-locked. */ static void request_key_auth_revoke(struct key *key) { struct request_key_auth *rka = key->payload.data; kenter("{%d}", key->serial); if (rka->cred) { put_cred(rka->cred); rka->cred = NULL; } } /* * Destroy an instantiation authorisation token key. */ static void request_key_auth_destroy(struct key *key) { struct request_key_auth *rka = key->payload.data; kenter("{%d}", key->serial); if (rka->cred) { put_cred(rka->cred); rka->cred = NULL; } key_put(rka->target_key); key_put(rka->dest_keyring); kfree(rka->callout_info); kfree(rka); } /* * Create an authorisation token for /sbin/request-key or whoever to gain * access to the caller's security data. */ struct key *request_key_auth_new(struct key *target, const void *callout_info, size_t callout_len, struct key *dest_keyring) { struct request_key_auth *rka, *irka; const struct cred *cred = current->cred; struct key *authkey = NULL; char desc[20]; int ret; kenter("%d,", target->serial); /* allocate a auth record */ rka = kmalloc(sizeof(*rka), GFP_KERNEL); if (!rka) { kleave(" = -ENOMEM"); return ERR_PTR(-ENOMEM); } rka->callout_info = kmalloc(callout_len, GFP_KERNEL); if (!rka->callout_info) { kleave(" = -ENOMEM"); kfree(rka); return ERR_PTR(-ENOMEM); } /* see if the calling process is already servicing the key request of * another process */ if (cred->request_key_auth) { /* it is - use that instantiation context here too */ down_read(&cred->request_key_auth->sem); /* if the auth key has been revoked, then the key we're * servicing is already instantiated */ if (test_bit(KEY_FLAG_REVOKED, &cred->request_key_auth->flags)) goto auth_key_revoked; irka = cred->request_key_auth->payload.data; rka->cred = get_cred(irka->cred); rka->pid = irka->pid; up_read(&cred->request_key_auth->sem); } else { /* it isn't - use this process as the context */ rka->cred = get_cred(cred); rka->pid = current->pid; } rka->target_key = key_get(target); rka->dest_keyring = key_get(dest_keyring); memcpy(rka->callout_info, callout_info, callout_len); rka->callout_len = callout_len; /* allocate the auth key */ sprintf(desc, "%x", target->serial); authkey = key_alloc(&key_type_request_key_auth, desc, cred->fsuid, cred->fsgid, cred, KEY_POS_VIEW | KEY_POS_READ | KEY_POS_SEARCH | KEY_USR_VIEW, KEY_ALLOC_NOT_IN_QUOTA); if (IS_ERR(authkey)) { ret = PTR_ERR(authkey); goto error_alloc; } /* construct the auth key */ ret = key_instantiate_and_link(authkey, rka, 0, NULL, NULL); if (ret < 0) goto error_inst; kleave(" = {%d,%d}", authkey->serial, atomic_read(&authkey->usage)); return authkey; auth_key_revoked: up_read(&cred->request_key_auth->sem); kfree(rka->callout_info); kfree(rka); kleave("= -EKEYREVOKED"); return ERR_PTR(-EKEYREVOKED); error_inst: key_revoke(authkey); key_put(authkey); error_alloc: key_put(rka->target_key); key_put(rka->dest_keyring); kfree(rka->callout_info); kfree(rka); kleave("= %d", ret); return ERR_PTR(ret); } /* * See if an authorisation key is associated with a particular key. */ static int key_get_instantiation_authkey_match(const struct key *key, const void *_id) { struct request_key_auth *rka = key->payload.data; key_serial_t id = (key_serial_t)(unsigned long) _id; return rka->target_key->serial == id; } /* * Search the current process's keyrings for the authorisation key for * instantiation of a key. */ struct key *key_get_instantiation_authkey(key_serial_t target_id) { const struct cred *cred = current_cred(); struct key *authkey; key_ref_t authkey_ref; authkey_ref = search_process_keyrings( &key_type_request_key_auth, (void *) (unsigned long) target_id, key_get_instantiation_authkey_match, cred); if (IS_ERR(authkey_ref)) { authkey = ERR_CAST(authkey_ref); goto error; } authkey = key_ref_to_ptr(authkey_ref); if (test_bit(KEY_FLAG_REVOKED, &authkey->flags)) { key_put(authkey); authkey = ERR_PTR(-EKEYREVOKED); } error: return authkey; }
gpl-2.0
AOKP-SGS2/android_kernel_samsung_espresso
drivers/media/video/cx88/cx88-video.c
2385
57404
/* * * device driver for Conexant 2388x based TV cards * video4linux video interface * * (c) 2003-04 Gerd Knorr <kraxel@bytesex.org> [SuSE Labs] * * (c) 2005-2006 Mauro Carvalho Chehab <mchehab@infradead.org> * - Multituner support * - video_ioctl2 conversion * - PAL/M fixes * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/init.h> #include <linux/list.h> #include <linux/module.h> #include <linux/kmod.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/dma-mapping.h> #include <linux/delay.h> #include <linux/kthread.h> #include <asm/div64.h> #include "cx88.h" #include <media/v4l2-common.h> #include <media/v4l2-ioctl.h> #include <media/wm8775.h> MODULE_DESCRIPTION("v4l2 driver module for cx2388x based TV cards"); MODULE_AUTHOR("Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]"); MODULE_LICENSE("GPL"); /* ------------------------------------------------------------------ */ static unsigned int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET }; static unsigned int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET }; static unsigned int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET }; module_param_array(video_nr, int, NULL, 0444); module_param_array(vbi_nr, int, NULL, 0444); module_param_array(radio_nr, int, NULL, 0444); MODULE_PARM_DESC(video_nr,"video device numbers"); MODULE_PARM_DESC(vbi_nr,"vbi device numbers"); MODULE_PARM_DESC(radio_nr,"radio device numbers"); static unsigned int video_debug; module_param(video_debug,int,0644); MODULE_PARM_DESC(video_debug,"enable debug messages [video]"); static unsigned int irq_debug; module_param(irq_debug,int,0644); MODULE_PARM_DESC(irq_debug,"enable debug messages [IRQ handler]"); static unsigned int vid_limit = 16; module_param(vid_limit,int,0644); MODULE_PARM_DESC(vid_limit,"capture memory limit in megabytes"); #define dprintk(level,fmt, arg...) if (video_debug >= level) \ printk(KERN_DEBUG "%s/0: " fmt, core->name , ## arg) /* ------------------------------------------------------------------- */ /* static data */ static const struct cx8800_fmt formats[] = { { .name = "8 bpp, gray", .fourcc = V4L2_PIX_FMT_GREY, .cxformat = ColorFormatY8, .depth = 8, .flags = FORMAT_FLAGS_PACKED, },{ .name = "15 bpp RGB, le", .fourcc = V4L2_PIX_FMT_RGB555, .cxformat = ColorFormatRGB15, .depth = 16, .flags = FORMAT_FLAGS_PACKED, },{ .name = "15 bpp RGB, be", .fourcc = V4L2_PIX_FMT_RGB555X, .cxformat = ColorFormatRGB15 | ColorFormatBSWAP, .depth = 16, .flags = FORMAT_FLAGS_PACKED, },{ .name = "16 bpp RGB, le", .fourcc = V4L2_PIX_FMT_RGB565, .cxformat = ColorFormatRGB16, .depth = 16, .flags = FORMAT_FLAGS_PACKED, },{ .name = "16 bpp RGB, be", .fourcc = V4L2_PIX_FMT_RGB565X, .cxformat = ColorFormatRGB16 | ColorFormatBSWAP, .depth = 16, .flags = FORMAT_FLAGS_PACKED, },{ .name = "24 bpp RGB, le", .fourcc = V4L2_PIX_FMT_BGR24, .cxformat = ColorFormatRGB24, .depth = 24, .flags = FORMAT_FLAGS_PACKED, },{ .name = "32 bpp RGB, le", .fourcc = V4L2_PIX_FMT_BGR32, .cxformat = ColorFormatRGB32, .depth = 32, .flags = FORMAT_FLAGS_PACKED, },{ .name = "32 bpp RGB, be", .fourcc = V4L2_PIX_FMT_RGB32, .cxformat = ColorFormatRGB32 | ColorFormatBSWAP | ColorFormatWSWAP, .depth = 32, .flags = FORMAT_FLAGS_PACKED, },{ .name = "4:2:2, packed, YUYV", .fourcc = V4L2_PIX_FMT_YUYV, .cxformat = ColorFormatYUY2, .depth = 16, .flags = FORMAT_FLAGS_PACKED, },{ .name = "4:2:2, packed, UYVY", .fourcc = V4L2_PIX_FMT_UYVY, .cxformat = ColorFormatYUY2 | ColorFormatBSWAP, .depth = 16, .flags = FORMAT_FLAGS_PACKED, }, }; static const struct cx8800_fmt* format_by_fourcc(unsigned int fourcc) { unsigned int i; for (i = 0; i < ARRAY_SIZE(formats); i++) if (formats[i].fourcc == fourcc) return formats+i; return NULL; } /* ------------------------------------------------------------------- */ static const struct v4l2_queryctrl no_ctl = { .name = "42", .flags = V4L2_CTRL_FLAG_DISABLED, }; static const struct cx88_ctrl cx8800_ctls[] = { /* --- video --- */ { .v = { .id = V4L2_CID_BRIGHTNESS, .name = "Brightness", .minimum = 0x00, .maximum = 0xff, .step = 1, .default_value = 0x7f, .type = V4L2_CTRL_TYPE_INTEGER, }, .off = 128, .reg = MO_CONTR_BRIGHT, .mask = 0x00ff, .shift = 0, },{ .v = { .id = V4L2_CID_CONTRAST, .name = "Contrast", .minimum = 0, .maximum = 0xff, .step = 1, .default_value = 0x3f, .type = V4L2_CTRL_TYPE_INTEGER, }, .off = 0, .reg = MO_CONTR_BRIGHT, .mask = 0xff00, .shift = 8, },{ .v = { .id = V4L2_CID_HUE, .name = "Hue", .minimum = 0, .maximum = 0xff, .step = 1, .default_value = 0x7f, .type = V4L2_CTRL_TYPE_INTEGER, }, .off = 128, .reg = MO_HUE, .mask = 0x00ff, .shift = 0, },{ /* strictly, this only describes only U saturation. * V saturation is handled specially through code. */ .v = { .id = V4L2_CID_SATURATION, .name = "Saturation", .minimum = 0, .maximum = 0xff, .step = 1, .default_value = 0x7f, .type = V4L2_CTRL_TYPE_INTEGER, }, .off = 0, .reg = MO_UV_SATURATION, .mask = 0x00ff, .shift = 0, },{ .v = { .id = V4L2_CID_CHROMA_AGC, .name = "Chroma AGC", .minimum = 0, .maximum = 1, .default_value = 0x1, .type = V4L2_CTRL_TYPE_BOOLEAN, }, .reg = MO_INPUT_FORMAT, .mask = 1 << 10, .shift = 10, }, { .v = { .id = V4L2_CID_COLOR_KILLER, .name = "Color killer", .minimum = 0, .maximum = 1, .default_value = 0x1, .type = V4L2_CTRL_TYPE_BOOLEAN, }, .reg = MO_INPUT_FORMAT, .mask = 1 << 9, .shift = 9, }, { /* --- audio --- */ .v = { .id = V4L2_CID_AUDIO_MUTE, .name = "Mute", .minimum = 0, .maximum = 1, .default_value = 1, .type = V4L2_CTRL_TYPE_BOOLEAN, }, .reg = AUD_VOL_CTL, .sreg = SHADOW_AUD_VOL_CTL, .mask = (1 << 6), .shift = 6, },{ .v = { .id = V4L2_CID_AUDIO_VOLUME, .name = "Volume", .minimum = 0, .maximum = 0x3f, .step = 1, .default_value = 0x3f, .type = V4L2_CTRL_TYPE_INTEGER, }, .reg = AUD_VOL_CTL, .sreg = SHADOW_AUD_VOL_CTL, .mask = 0x3f, .shift = 0, },{ .v = { .id = V4L2_CID_AUDIO_BALANCE, .name = "Balance", .minimum = 0, .maximum = 0x7f, .step = 1, .default_value = 0x40, .type = V4L2_CTRL_TYPE_INTEGER, }, .reg = AUD_BAL_CTL, .sreg = SHADOW_AUD_BAL_CTL, .mask = 0x7f, .shift = 0, } }; enum { CX8800_CTLS = ARRAY_SIZE(cx8800_ctls) }; /* Must be sorted from low to high control ID! */ const u32 cx88_user_ctrls[] = { V4L2_CID_USER_CLASS, V4L2_CID_BRIGHTNESS, V4L2_CID_CONTRAST, V4L2_CID_SATURATION, V4L2_CID_HUE, V4L2_CID_AUDIO_VOLUME, V4L2_CID_AUDIO_BALANCE, V4L2_CID_AUDIO_MUTE, V4L2_CID_CHROMA_AGC, V4L2_CID_COLOR_KILLER, 0 }; EXPORT_SYMBOL(cx88_user_ctrls); static const u32 * const ctrl_classes[] = { cx88_user_ctrls, NULL }; int cx8800_ctrl_query(struct cx88_core *core, struct v4l2_queryctrl *qctrl) { int i; if (qctrl->id < V4L2_CID_BASE || qctrl->id >= V4L2_CID_LASTP1) return -EINVAL; for (i = 0; i < CX8800_CTLS; i++) if (cx8800_ctls[i].v.id == qctrl->id) break; if (i == CX8800_CTLS) { *qctrl = no_ctl; return 0; } *qctrl = cx8800_ctls[i].v; /* Report chroma AGC as inactive when SECAM is selected */ if (cx8800_ctls[i].v.id == V4L2_CID_CHROMA_AGC && core->tvnorm & V4L2_STD_SECAM) qctrl->flags |= V4L2_CTRL_FLAG_INACTIVE; return 0; } EXPORT_SYMBOL(cx8800_ctrl_query); /* ------------------------------------------------------------------- */ /* resource management */ static int res_get(struct cx8800_dev *dev, struct cx8800_fh *fh, unsigned int bit) { struct cx88_core *core = dev->core; if (fh->resources & bit) /* have it already allocated */ return 1; /* is it free? */ mutex_lock(&core->lock); if (dev->resources & bit) { /* no, someone else uses it */ mutex_unlock(&core->lock); return 0; } /* it's free, grab it */ fh->resources |= bit; dev->resources |= bit; dprintk(1,"res: get %d\n",bit); mutex_unlock(&core->lock); return 1; } static int res_check(struct cx8800_fh *fh, unsigned int bit) { return (fh->resources & bit); } static int res_locked(struct cx8800_dev *dev, unsigned int bit) { return (dev->resources & bit); } static void res_free(struct cx8800_dev *dev, struct cx8800_fh *fh, unsigned int bits) { struct cx88_core *core = dev->core; BUG_ON((fh->resources & bits) != bits); mutex_lock(&core->lock); fh->resources &= ~bits; dev->resources &= ~bits; dprintk(1,"res: put %d\n",bits); mutex_unlock(&core->lock); } /* ------------------------------------------------------------------ */ int cx88_video_mux(struct cx88_core *core, unsigned int input) { /* struct cx88_core *core = dev->core; */ dprintk(1,"video_mux: %d [vmux=%d,gpio=0x%x,0x%x,0x%x,0x%x]\n", input, INPUT(input).vmux, INPUT(input).gpio0,INPUT(input).gpio1, INPUT(input).gpio2,INPUT(input).gpio3); core->input = input; cx_andor(MO_INPUT_FORMAT, 0x03 << 14, INPUT(input).vmux << 14); cx_write(MO_GP3_IO, INPUT(input).gpio3); cx_write(MO_GP0_IO, INPUT(input).gpio0); cx_write(MO_GP1_IO, INPUT(input).gpio1); cx_write(MO_GP2_IO, INPUT(input).gpio2); switch (INPUT(input).type) { case CX88_VMUX_SVIDEO: cx_set(MO_AFECFG_IO, 0x00000001); cx_set(MO_INPUT_FORMAT, 0x00010010); cx_set(MO_FILTER_EVEN, 0x00002020); cx_set(MO_FILTER_ODD, 0x00002020); break; default: cx_clear(MO_AFECFG_IO, 0x00000001); cx_clear(MO_INPUT_FORMAT, 0x00010010); cx_clear(MO_FILTER_EVEN, 0x00002020); cx_clear(MO_FILTER_ODD, 0x00002020); break; } /* if there are audioroutes defined, we have an external ADC to deal with audio */ if (INPUT(input).audioroute) { /* The wm8775 module has the "2" route hardwired into the initialization. Some boards may use different routes for different inputs. HVR-1300 surely does */ if (core->board.audio_chip && core->board.audio_chip == V4L2_IDENT_WM8775) { call_all(core, audio, s_routing, INPUT(input).audioroute, 0, 0); } /* cx2388's C-ADC is connected to the tuner only. When used with S-Video, that ADC is busy dealing with chroma, so an external must be used for baseband audio */ if (INPUT(input).type != CX88_VMUX_TELEVISION && INPUT(input).type != CX88_VMUX_CABLE) { /* "I2S ADC mode" */ core->tvaudio = WW_I2SADC; cx88_set_tvaudio(core); } else { /* Normal mode */ cx_write(AUD_I2SCNTL, 0x0); cx_clear(AUD_CTL, EN_I2SIN_ENABLE); } } return 0; } EXPORT_SYMBOL(cx88_video_mux); /* ------------------------------------------------------------------ */ static int start_video_dma(struct cx8800_dev *dev, struct cx88_dmaqueue *q, struct cx88_buffer *buf) { struct cx88_core *core = dev->core; /* setup fifo + format */ cx88_sram_channel_setup(core, &cx88_sram_channels[SRAM_CH21], buf->bpl, buf->risc.dma); cx88_set_scale(core, buf->vb.width, buf->vb.height, buf->vb.field); cx_write(MO_COLOR_CTRL, buf->fmt->cxformat | ColorFormatGamma); /* reset counter */ cx_write(MO_VIDY_GPCNTRL,GP_COUNT_CONTROL_RESET); q->count = 1; /* enable irqs */ cx_set(MO_PCI_INTMSK, core->pci_irqmask | PCI_INT_VIDINT); /* Enables corresponding bits at PCI_INT_STAT: bits 0 to 4: video, audio, transport stream, VIP, Host bit 7: timer bits 8 and 9: DMA complete for: SRC, DST bits 10 and 11: BERR signal asserted for RISC: RD, WR bits 12 to 15: BERR signal asserted for: BRDG, SRC, DST, IPB */ cx_set(MO_VID_INTMSK, 0x0f0011); /* enable capture */ cx_set(VID_CAPTURE_CONTROL,0x06); /* start dma */ cx_set(MO_DEV_CNTRL2, (1<<5)); cx_set(MO_VID_DMACNTRL, 0x11); /* Planar Y and packed FIFO and RISC enable */ return 0; } #ifdef CONFIG_PM static int stop_video_dma(struct cx8800_dev *dev) { struct cx88_core *core = dev->core; /* stop dma */ cx_clear(MO_VID_DMACNTRL, 0x11); /* disable capture */ cx_clear(VID_CAPTURE_CONTROL,0x06); /* disable irqs */ cx_clear(MO_PCI_INTMSK, PCI_INT_VIDINT); cx_clear(MO_VID_INTMSK, 0x0f0011); return 0; } #endif static int restart_video_queue(struct cx8800_dev *dev, struct cx88_dmaqueue *q) { struct cx88_core *core = dev->core; struct cx88_buffer *buf, *prev; if (!list_empty(&q->active)) { buf = list_entry(q->active.next, struct cx88_buffer, vb.queue); dprintk(2,"restart_queue [%p/%d]: restart dma\n", buf, buf->vb.i); start_video_dma(dev, q, buf); list_for_each_entry(buf, &q->active, vb.queue) buf->count = q->count++; mod_timer(&q->timeout, jiffies+BUFFER_TIMEOUT); return 0; } prev = NULL; for (;;) { if (list_empty(&q->queued)) return 0; buf = list_entry(q->queued.next, struct cx88_buffer, vb.queue); if (NULL == prev) { list_move_tail(&buf->vb.queue, &q->active); start_video_dma(dev, q, buf); buf->vb.state = VIDEOBUF_ACTIVE; buf->count = q->count++; mod_timer(&q->timeout, jiffies+BUFFER_TIMEOUT); dprintk(2,"[%p/%d] restart_queue - first active\n", buf,buf->vb.i); } else if (prev->vb.width == buf->vb.width && prev->vb.height == buf->vb.height && prev->fmt == buf->fmt) { list_move_tail(&buf->vb.queue, &q->active); buf->vb.state = VIDEOBUF_ACTIVE; buf->count = q->count++; prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma); dprintk(2,"[%p/%d] restart_queue - move to active\n", buf,buf->vb.i); } else { return 0; } prev = buf; } } /* ------------------------------------------------------------------ */ static int buffer_setup(struct videobuf_queue *q, unsigned int *count, unsigned int *size) { struct cx8800_fh *fh = q->priv_data; *size = fh->fmt->depth*fh->width*fh->height >> 3; if (0 == *count) *count = 32; if (*size * *count > vid_limit * 1024 * 1024) *count = (vid_limit * 1024 * 1024) / *size; return 0; } static int buffer_prepare(struct videobuf_queue *q, struct videobuf_buffer *vb, enum v4l2_field field) { struct cx8800_fh *fh = q->priv_data; struct cx8800_dev *dev = fh->dev; struct cx88_core *core = dev->core; struct cx88_buffer *buf = container_of(vb,struct cx88_buffer,vb); struct videobuf_dmabuf *dma=videobuf_to_dma(&buf->vb); int rc, init_buffer = 0; BUG_ON(NULL == fh->fmt); if (fh->width < 48 || fh->width > norm_maxw(core->tvnorm) || fh->height < 32 || fh->height > norm_maxh(core->tvnorm)) return -EINVAL; buf->vb.size = (fh->width * fh->height * fh->fmt->depth) >> 3; if (0 != buf->vb.baddr && buf->vb.bsize < buf->vb.size) return -EINVAL; if (buf->fmt != fh->fmt || buf->vb.width != fh->width || buf->vb.height != fh->height || buf->vb.field != field) { buf->fmt = fh->fmt; buf->vb.width = fh->width; buf->vb.height = fh->height; buf->vb.field = field; init_buffer = 1; } if (VIDEOBUF_NEEDS_INIT == buf->vb.state) { init_buffer = 1; if (0 != (rc = videobuf_iolock(q,&buf->vb,NULL))) goto fail; } if (init_buffer) { buf->bpl = buf->vb.width * buf->fmt->depth >> 3; switch (buf->vb.field) { case V4L2_FIELD_TOP: cx88_risc_buffer(dev->pci, &buf->risc, dma->sglist, 0, UNSET, buf->bpl, 0, buf->vb.height); break; case V4L2_FIELD_BOTTOM: cx88_risc_buffer(dev->pci, &buf->risc, dma->sglist, UNSET, 0, buf->bpl, 0, buf->vb.height); break; case V4L2_FIELD_INTERLACED: cx88_risc_buffer(dev->pci, &buf->risc, dma->sglist, 0, buf->bpl, buf->bpl, buf->bpl, buf->vb.height >> 1); break; case V4L2_FIELD_SEQ_TB: cx88_risc_buffer(dev->pci, &buf->risc, dma->sglist, 0, buf->bpl * (buf->vb.height >> 1), buf->bpl, 0, buf->vb.height >> 1); break; case V4L2_FIELD_SEQ_BT: cx88_risc_buffer(dev->pci, &buf->risc, dma->sglist, buf->bpl * (buf->vb.height >> 1), 0, buf->bpl, 0, buf->vb.height >> 1); break; default: BUG(); } } dprintk(2,"[%p/%d] buffer_prepare - %dx%d %dbpp \"%s\" - dma=0x%08lx\n", buf, buf->vb.i, fh->width, fh->height, fh->fmt->depth, fh->fmt->name, (unsigned long)buf->risc.dma); buf->vb.state = VIDEOBUF_PREPARED; return 0; fail: cx88_free_buffer(q,buf); return rc; } static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb) { struct cx88_buffer *buf = container_of(vb,struct cx88_buffer,vb); struct cx88_buffer *prev; struct cx8800_fh *fh = vq->priv_data; struct cx8800_dev *dev = fh->dev; struct cx88_core *core = dev->core; struct cx88_dmaqueue *q = &dev->vidq; /* add jump to stopper */ buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_IRQ1 | RISC_CNT_INC); buf->risc.jmp[1] = cpu_to_le32(q->stopper.dma); if (!list_empty(&q->queued)) { list_add_tail(&buf->vb.queue,&q->queued); buf->vb.state = VIDEOBUF_QUEUED; dprintk(2,"[%p/%d] buffer_queue - append to queued\n", buf, buf->vb.i); } else if (list_empty(&q->active)) { list_add_tail(&buf->vb.queue,&q->active); start_video_dma(dev, q, buf); buf->vb.state = VIDEOBUF_ACTIVE; buf->count = q->count++; mod_timer(&q->timeout, jiffies+BUFFER_TIMEOUT); dprintk(2,"[%p/%d] buffer_queue - first active\n", buf, buf->vb.i); } else { prev = list_entry(q->active.prev, struct cx88_buffer, vb.queue); if (prev->vb.width == buf->vb.width && prev->vb.height == buf->vb.height && prev->fmt == buf->fmt) { list_add_tail(&buf->vb.queue,&q->active); buf->vb.state = VIDEOBUF_ACTIVE; buf->count = q->count++; prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma); dprintk(2,"[%p/%d] buffer_queue - append to active\n", buf, buf->vb.i); } else { list_add_tail(&buf->vb.queue,&q->queued); buf->vb.state = VIDEOBUF_QUEUED; dprintk(2,"[%p/%d] buffer_queue - first queued\n", buf, buf->vb.i); } } } static void buffer_release(struct videobuf_queue *q, struct videobuf_buffer *vb) { struct cx88_buffer *buf = container_of(vb,struct cx88_buffer,vb); cx88_free_buffer(q,buf); } static const struct videobuf_queue_ops cx8800_video_qops = { .buf_setup = buffer_setup, .buf_prepare = buffer_prepare, .buf_queue = buffer_queue, .buf_release = buffer_release, }; /* ------------------------------------------------------------------ */ /* ------------------------------------------------------------------ */ static struct videobuf_queue* get_queue(struct cx8800_fh *fh) { switch (fh->type) { case V4L2_BUF_TYPE_VIDEO_CAPTURE: return &fh->vidq; case V4L2_BUF_TYPE_VBI_CAPTURE: return &fh->vbiq; default: BUG(); return NULL; } } static int get_ressource(struct cx8800_fh *fh) { switch (fh->type) { case V4L2_BUF_TYPE_VIDEO_CAPTURE: return RESOURCE_VIDEO; case V4L2_BUF_TYPE_VBI_CAPTURE: return RESOURCE_VBI; default: BUG(); return 0; } } static int video_open(struct file *file) { struct video_device *vdev = video_devdata(file); struct cx8800_dev *dev = video_drvdata(file); struct cx88_core *core = dev->core; struct cx8800_fh *fh; enum v4l2_buf_type type = 0; int radio = 0; switch (vdev->vfl_type) { case VFL_TYPE_GRABBER: type = V4L2_BUF_TYPE_VIDEO_CAPTURE; break; case VFL_TYPE_VBI: type = V4L2_BUF_TYPE_VBI_CAPTURE; break; case VFL_TYPE_RADIO: radio = 1; break; } dprintk(1, "open dev=%s radio=%d type=%s\n", video_device_node_name(vdev), radio, v4l2_type_names[type]); /* allocate + initialize per filehandle data */ fh = kzalloc(sizeof(*fh),GFP_KERNEL); if (unlikely(!fh)) return -ENOMEM; file->private_data = fh; fh->dev = dev; fh->radio = radio; fh->type = type; fh->width = 320; fh->height = 240; fh->fmt = format_by_fourcc(V4L2_PIX_FMT_BGR24); mutex_lock(&core->lock); videobuf_queue_sg_init(&fh->vidq, &cx8800_video_qops, &dev->pci->dev, &dev->slock, V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_INTERLACED, sizeof(struct cx88_buffer), fh, NULL); videobuf_queue_sg_init(&fh->vbiq, &cx8800_vbi_qops, &dev->pci->dev, &dev->slock, V4L2_BUF_TYPE_VBI_CAPTURE, V4L2_FIELD_SEQ_TB, sizeof(struct cx88_buffer), fh, NULL); if (fh->radio) { dprintk(1,"video_open: setting radio device\n"); cx_write(MO_GP3_IO, core->board.radio.gpio3); cx_write(MO_GP0_IO, core->board.radio.gpio0); cx_write(MO_GP1_IO, core->board.radio.gpio1); cx_write(MO_GP2_IO, core->board.radio.gpio2); if (core->board.radio.audioroute) { if(core->board.audio_chip && core->board.audio_chip == V4L2_IDENT_WM8775) { call_all(core, audio, s_routing, core->board.radio.audioroute, 0, 0); } /* "I2S ADC mode" */ core->tvaudio = WW_I2SADC; cx88_set_tvaudio(core); } else { /* FM Mode */ core->tvaudio = WW_FM; cx88_set_tvaudio(core); cx88_set_stereo(core,V4L2_TUNER_MODE_STEREO,1); } call_all(core, tuner, s_radio); } core->users++; mutex_unlock(&core->lock); return 0; } static ssize_t video_read(struct file *file, char __user *data, size_t count, loff_t *ppos) { struct cx8800_fh *fh = file->private_data; switch (fh->type) { case V4L2_BUF_TYPE_VIDEO_CAPTURE: if (res_locked(fh->dev,RESOURCE_VIDEO)) return -EBUSY; return videobuf_read_one(&fh->vidq, data, count, ppos, file->f_flags & O_NONBLOCK); case V4L2_BUF_TYPE_VBI_CAPTURE: if (!res_get(fh->dev,fh,RESOURCE_VBI)) return -EBUSY; return videobuf_read_stream(&fh->vbiq, data, count, ppos, 1, file->f_flags & O_NONBLOCK); default: BUG(); return 0; } } static unsigned int video_poll(struct file *file, struct poll_table_struct *wait) { struct cx8800_fh *fh = file->private_data; struct cx88_buffer *buf; unsigned int rc = POLLERR; if (V4L2_BUF_TYPE_VBI_CAPTURE == fh->type) { if (!res_get(fh->dev,fh,RESOURCE_VBI)) return POLLERR; return videobuf_poll_stream(file, &fh->vbiq, wait); } mutex_lock(&fh->vidq.vb_lock); if (res_check(fh,RESOURCE_VIDEO)) { /* streaming capture */ if (list_empty(&fh->vidq.stream)) goto done; buf = list_entry(fh->vidq.stream.next,struct cx88_buffer,vb.stream); } else { /* read() capture */ buf = (struct cx88_buffer*)fh->vidq.read_buf; if (NULL == buf) goto done; } poll_wait(file, &buf->vb.done, wait); if (buf->vb.state == VIDEOBUF_DONE || buf->vb.state == VIDEOBUF_ERROR) rc = POLLIN|POLLRDNORM; else rc = 0; done: mutex_unlock(&fh->vidq.vb_lock); return rc; } static int video_release(struct file *file) { struct cx8800_fh *fh = file->private_data; struct cx8800_dev *dev = fh->dev; /* turn off overlay */ if (res_check(fh, RESOURCE_OVERLAY)) { /* FIXME */ res_free(dev,fh,RESOURCE_OVERLAY); } /* stop video capture */ if (res_check(fh, RESOURCE_VIDEO)) { videobuf_queue_cancel(&fh->vidq); res_free(dev,fh,RESOURCE_VIDEO); } if (fh->vidq.read_buf) { buffer_release(&fh->vidq,fh->vidq.read_buf); kfree(fh->vidq.read_buf); } /* stop vbi capture */ if (res_check(fh, RESOURCE_VBI)) { videobuf_stop(&fh->vbiq); res_free(dev,fh,RESOURCE_VBI); } videobuf_mmap_free(&fh->vidq); videobuf_mmap_free(&fh->vbiq); mutex_lock(&dev->core->lock); file->private_data = NULL; kfree(fh); dev->core->users--; if (!dev->core->users) call_all(dev->core, core, s_power, 0); mutex_unlock(&dev->core->lock); return 0; } static int video_mmap(struct file *file, struct vm_area_struct * vma) { struct cx8800_fh *fh = file->private_data; return videobuf_mmap_mapper(get_queue(fh), vma); } /* ------------------------------------------------------------------ */ /* VIDEO CTRL IOCTLS */ int cx88_get_control (struct cx88_core *core, struct v4l2_control *ctl) { const struct cx88_ctrl *c = NULL; u32 value; int i; for (i = 0; i < CX8800_CTLS; i++) if (cx8800_ctls[i].v.id == ctl->id) c = &cx8800_ctls[i]; if (unlikely(NULL == c)) return -EINVAL; value = c->sreg ? cx_sread(c->sreg) : cx_read(c->reg); switch (ctl->id) { case V4L2_CID_AUDIO_BALANCE: ctl->value = ((value & 0x7f) < 0x40) ? ((value & 0x7f) + 0x40) : (0x7f - (value & 0x7f)); break; case V4L2_CID_AUDIO_VOLUME: ctl->value = 0x3f - (value & 0x3f); break; default: ctl->value = ((value + (c->off << c->shift)) & c->mask) >> c->shift; break; } dprintk(1,"get_control id=0x%X(%s) ctrl=0x%02x, reg=0x%02x val=0x%02x (mask 0x%02x)%s\n", ctl->id, c->v.name, ctl->value, c->reg, value,c->mask, c->sreg ? " [shadowed]" : ""); return 0; } EXPORT_SYMBOL(cx88_get_control); int cx88_set_control(struct cx88_core *core, struct v4l2_control *ctl) { const struct cx88_ctrl *c = NULL; u32 value,mask; int i; for (i = 0; i < CX8800_CTLS; i++) { if (cx8800_ctls[i].v.id == ctl->id) { c = &cx8800_ctls[i]; } } if (unlikely(NULL == c)) return -EINVAL; if (ctl->value < c->v.minimum) ctl->value = c->v.minimum; if (ctl->value > c->v.maximum) ctl->value = c->v.maximum; /* Pass changes onto any WM8775 */ if (core->board.audio_chip == V4L2_IDENT_WM8775) { struct v4l2_control client_ctl; memset(&client_ctl, 0, sizeof(client_ctl)); client_ctl.id = ctl->id; switch (ctl->id) { case V4L2_CID_AUDIO_MUTE: client_ctl.value = ctl->value; break; case V4L2_CID_AUDIO_VOLUME: client_ctl.value = (ctl->value) ? (0x90 + ctl->value) << 8 : 0; break; case V4L2_CID_AUDIO_BALANCE: client_ctl.value = ctl->value << 9; break; default: client_ctl.id = 0; break; } if (client_ctl.id) call_hw(core, WM8775_GID, core, s_ctrl, &client_ctl); } mask=c->mask; switch (ctl->id) { case V4L2_CID_AUDIO_BALANCE: value = (ctl->value < 0x40) ? (0x7f - ctl->value) : (ctl->value - 0x40); break; case V4L2_CID_AUDIO_VOLUME: value = 0x3f - (ctl->value & 0x3f); break; case V4L2_CID_SATURATION: /* special v_sat handling */ value = ((ctl->value - c->off) << c->shift) & c->mask; if (core->tvnorm & V4L2_STD_SECAM) { /* For SECAM, both U and V sat should be equal */ value=value<<8|value; } else { /* Keeps U Saturation proportional to V Sat */ value=(value*0x5a)/0x7f<<8|value; } mask=0xffff; break; case V4L2_CID_CHROMA_AGC: /* Do not allow chroma AGC to be enabled for SECAM */ value = ((ctl->value - c->off) << c->shift) & c->mask; if (core->tvnorm & V4L2_STD_SECAM && value) return -EINVAL; break; default: value = ((ctl->value - c->off) << c->shift) & c->mask; break; } dprintk(1,"set_control id=0x%X(%s) ctrl=0x%02x, reg=0x%02x val=0x%02x (mask 0x%02x)%s\n", ctl->id, c->v.name, ctl->value, c->reg, value, mask, c->sreg ? " [shadowed]" : ""); if (c->sreg) { cx_sandor(c->sreg, c->reg, mask, value); } else { cx_andor(c->reg, mask, value); } return 0; } EXPORT_SYMBOL(cx88_set_control); static void init_controls(struct cx88_core *core) { struct v4l2_control ctrl; int i; for (i = 0; i < CX8800_CTLS; i++) { ctrl.id=cx8800_ctls[i].v.id; ctrl.value=cx8800_ctls[i].v.default_value; cx88_set_control(core, &ctrl); } } /* ------------------------------------------------------------------ */ /* VIDEO IOCTLS */ static int vidioc_g_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct cx8800_fh *fh = priv; f->fmt.pix.width = fh->width; f->fmt.pix.height = fh->height; f->fmt.pix.field = fh->vidq.field; f->fmt.pix.pixelformat = fh->fmt->fourcc; f->fmt.pix.bytesperline = (f->fmt.pix.width * fh->fmt->depth) >> 3; f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline; return 0; } static int vidioc_try_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct cx88_core *core = ((struct cx8800_fh *)priv)->dev->core; const struct cx8800_fmt *fmt; enum v4l2_field field; unsigned int maxw, maxh; fmt = format_by_fourcc(f->fmt.pix.pixelformat); if (NULL == fmt) return -EINVAL; field = f->fmt.pix.field; maxw = norm_maxw(core->tvnorm); maxh = norm_maxh(core->tvnorm); if (V4L2_FIELD_ANY == field) { field = (f->fmt.pix.height > maxh/2) ? V4L2_FIELD_INTERLACED : V4L2_FIELD_BOTTOM; } switch (field) { case V4L2_FIELD_TOP: case V4L2_FIELD_BOTTOM: maxh = maxh / 2; break; case V4L2_FIELD_INTERLACED: break; default: return -EINVAL; } f->fmt.pix.field = field; v4l_bound_align_image(&f->fmt.pix.width, 48, maxw, 2, &f->fmt.pix.height, 32, maxh, 0, 0); f->fmt.pix.bytesperline = (f->fmt.pix.width * fmt->depth) >> 3; f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline; return 0; } static int vidioc_s_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct cx8800_fh *fh = priv; int err = vidioc_try_fmt_vid_cap (file,priv,f); if (0 != err) return err; fh->fmt = format_by_fourcc(f->fmt.pix.pixelformat); fh->width = f->fmt.pix.width; fh->height = f->fmt.pix.height; fh->vidq.field = f->fmt.pix.field; return 0; } static int vidioc_querycap (struct file *file, void *priv, struct v4l2_capability *cap) { struct cx8800_dev *dev = ((struct cx8800_fh *)priv)->dev; struct cx88_core *core = dev->core; strcpy(cap->driver, "cx8800"); strlcpy(cap->card, core->board.name, sizeof(cap->card)); sprintf(cap->bus_info,"PCI:%s",pci_name(dev->pci)); cap->version = CX88_VERSION_CODE; cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE | V4L2_CAP_STREAMING | V4L2_CAP_VBI_CAPTURE; if (UNSET != core->board.tuner_type) cap->capabilities |= V4L2_CAP_TUNER; return 0; } static int vidioc_enum_fmt_vid_cap (struct file *file, void *priv, struct v4l2_fmtdesc *f) { if (unlikely(f->index >= ARRAY_SIZE(formats))) return -EINVAL; strlcpy(f->description,formats[f->index].name,sizeof(f->description)); f->pixelformat = formats[f->index].fourcc; return 0; } static int vidioc_reqbufs (struct file *file, void *priv, struct v4l2_requestbuffers *p) { struct cx8800_fh *fh = priv; return (videobuf_reqbufs(get_queue(fh), p)); } static int vidioc_querybuf (struct file *file, void *priv, struct v4l2_buffer *p) { struct cx8800_fh *fh = priv; return (videobuf_querybuf(get_queue(fh), p)); } static int vidioc_qbuf (struct file *file, void *priv, struct v4l2_buffer *p) { struct cx8800_fh *fh = priv; return (videobuf_qbuf(get_queue(fh), p)); } static int vidioc_dqbuf (struct file *file, void *priv, struct v4l2_buffer *p) { struct cx8800_fh *fh = priv; return (videobuf_dqbuf(get_queue(fh), p, file->f_flags & O_NONBLOCK)); } static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i) { struct cx8800_fh *fh = priv; struct cx8800_dev *dev = fh->dev; /* We should remember that this driver also supports teletext, */ /* so we have to test if the v4l2_buf_type is VBI capture data. */ if (unlikely((fh->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) && (fh->type != V4L2_BUF_TYPE_VBI_CAPTURE))) return -EINVAL; if (unlikely(i != fh->type)) return -EINVAL; if (unlikely(!res_get(dev,fh,get_ressource(fh)))) return -EBUSY; return videobuf_streamon(get_queue(fh)); } static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i) { struct cx8800_fh *fh = priv; struct cx8800_dev *dev = fh->dev; int err, res; if ((fh->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) && (fh->type != V4L2_BUF_TYPE_VBI_CAPTURE)) return -EINVAL; if (i != fh->type) return -EINVAL; res = get_ressource(fh); err = videobuf_streamoff(get_queue(fh)); if (err < 0) return err; res_free(dev,fh,res); return 0; } static int vidioc_s_std (struct file *file, void *priv, v4l2_std_id *tvnorms) { struct cx88_core *core = ((struct cx8800_fh *)priv)->dev->core; mutex_lock(&core->lock); cx88_set_tvnorm(core,*tvnorms); mutex_unlock(&core->lock); return 0; } /* only one input in this sample driver */ int cx88_enum_input (struct cx88_core *core,struct v4l2_input *i) { static const char * const iname[] = { [ CX88_VMUX_COMPOSITE1 ] = "Composite1", [ CX88_VMUX_COMPOSITE2 ] = "Composite2", [ CX88_VMUX_COMPOSITE3 ] = "Composite3", [ CX88_VMUX_COMPOSITE4 ] = "Composite4", [ CX88_VMUX_SVIDEO ] = "S-Video", [ CX88_VMUX_TELEVISION ] = "Television", [ CX88_VMUX_CABLE ] = "Cable TV", [ CX88_VMUX_DVB ] = "DVB", [ CX88_VMUX_DEBUG ] = "for debug only", }; unsigned int n = i->index; if (n >= 4) return -EINVAL; if (0 == INPUT(n).type) return -EINVAL; i->type = V4L2_INPUT_TYPE_CAMERA; strcpy(i->name,iname[INPUT(n).type]); if ((CX88_VMUX_TELEVISION == INPUT(n).type) || (CX88_VMUX_CABLE == INPUT(n).type)) { i->type = V4L2_INPUT_TYPE_TUNER; i->std = CX88_NORMS; } return 0; } EXPORT_SYMBOL(cx88_enum_input); static int vidioc_enum_input (struct file *file, void *priv, struct v4l2_input *i) { struct cx88_core *core = ((struct cx8800_fh *)priv)->dev->core; return cx88_enum_input (core,i); } static int vidioc_g_input (struct file *file, void *priv, unsigned int *i) { struct cx88_core *core = ((struct cx8800_fh *)priv)->dev->core; *i = core->input; return 0; } static int vidioc_s_input (struct file *file, void *priv, unsigned int i) { struct cx88_core *core = ((struct cx8800_fh *)priv)->dev->core; if (i >= 4) return -EINVAL; mutex_lock(&core->lock); cx88_newstation(core); cx88_video_mux(core,i); mutex_unlock(&core->lock); return 0; } static int vidioc_queryctrl (struct file *file, void *priv, struct v4l2_queryctrl *qctrl) { struct cx88_core *core = ((struct cx8800_fh *)priv)->dev->core; qctrl->id = v4l2_ctrl_next(ctrl_classes, qctrl->id); if (unlikely(qctrl->id == 0)) return -EINVAL; return cx8800_ctrl_query(core, qctrl); } static int vidioc_g_ctrl (struct file *file, void *priv, struct v4l2_control *ctl) { struct cx88_core *core = ((struct cx8800_fh *)priv)->dev->core; return cx88_get_control(core,ctl); } static int vidioc_s_ctrl (struct file *file, void *priv, struct v4l2_control *ctl) { struct cx88_core *core = ((struct cx8800_fh *)priv)->dev->core; return cx88_set_control(core,ctl); } static int vidioc_g_tuner (struct file *file, void *priv, struct v4l2_tuner *t) { struct cx88_core *core = ((struct cx8800_fh *)priv)->dev->core; u32 reg; if (unlikely(UNSET == core->board.tuner_type)) return -EINVAL; if (0 != t->index) return -EINVAL; strcpy(t->name, "Television"); t->type = V4L2_TUNER_ANALOG_TV; t->capability = V4L2_TUNER_CAP_NORM; t->rangehigh = 0xffffffffUL; cx88_get_stereo(core ,t); reg = cx_read(MO_DEVICE_STATUS); t->signal = (reg & (1<<5)) ? 0xffff : 0x0000; return 0; } static int vidioc_s_tuner (struct file *file, void *priv, struct v4l2_tuner *t) { struct cx88_core *core = ((struct cx8800_fh *)priv)->dev->core; if (UNSET == core->board.tuner_type) return -EINVAL; if (0 != t->index) return -EINVAL; cx88_set_stereo(core, t->audmode, 1); return 0; } static int vidioc_g_frequency (struct file *file, void *priv, struct v4l2_frequency *f) { struct cx8800_fh *fh = priv; struct cx88_core *core = fh->dev->core; if (unlikely(UNSET == core->board.tuner_type)) return -EINVAL; /* f->type = fh->radio ? V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV; */ f->type = fh->radio ? V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV; f->frequency = core->freq; call_all(core, tuner, g_frequency, f); return 0; } int cx88_set_freq (struct cx88_core *core, struct v4l2_frequency *f) { if (unlikely(UNSET == core->board.tuner_type)) return -EINVAL; if (unlikely(f->tuner != 0)) return -EINVAL; mutex_lock(&core->lock); core->freq = f->frequency; cx88_newstation(core); call_all(core, tuner, s_frequency, f); /* When changing channels it is required to reset TVAUDIO */ msleep (10); cx88_set_tvaudio(core); mutex_unlock(&core->lock); return 0; } EXPORT_SYMBOL(cx88_set_freq); static int vidioc_s_frequency (struct file *file, void *priv, struct v4l2_frequency *f) { struct cx8800_fh *fh = priv; struct cx88_core *core = fh->dev->core; if (unlikely(0 == fh->radio && f->type != V4L2_TUNER_ANALOG_TV)) return -EINVAL; if (unlikely(1 == fh->radio && f->type != V4L2_TUNER_RADIO)) return -EINVAL; return cx88_set_freq (core,f); } #ifdef CONFIG_VIDEO_ADV_DEBUG static int vidioc_g_register (struct file *file, void *fh, struct v4l2_dbg_register *reg) { struct cx88_core *core = ((struct cx8800_fh*)fh)->dev->core; if (!v4l2_chip_match_host(&reg->match)) return -EINVAL; /* cx2388x has a 24-bit register space */ reg->val = cx_read(reg->reg & 0xffffff); reg->size = 4; return 0; } static int vidioc_s_register (struct file *file, void *fh, struct v4l2_dbg_register *reg) { struct cx88_core *core = ((struct cx8800_fh*)fh)->dev->core; if (!v4l2_chip_match_host(&reg->match)) return -EINVAL; cx_write(reg->reg & 0xffffff, reg->val); return 0; } #endif /* ----------------------------------------------------------- */ /* RADIO ESPECIFIC IOCTLS */ /* ----------------------------------------------------------- */ static int radio_querycap (struct file *file, void *priv, struct v4l2_capability *cap) { struct cx8800_dev *dev = ((struct cx8800_fh *)priv)->dev; struct cx88_core *core = dev->core; strcpy(cap->driver, "cx8800"); strlcpy(cap->card, core->board.name, sizeof(cap->card)); sprintf(cap->bus_info,"PCI:%s", pci_name(dev->pci)); cap->version = CX88_VERSION_CODE; cap->capabilities = V4L2_CAP_TUNER; return 0; } static int radio_g_tuner (struct file *file, void *priv, struct v4l2_tuner *t) { struct cx88_core *core = ((struct cx8800_fh *)priv)->dev->core; if (unlikely(t->index > 0)) return -EINVAL; strcpy(t->name, "Radio"); t->type = V4L2_TUNER_RADIO; call_all(core, tuner, g_tuner, t); return 0; } static int radio_enum_input (struct file *file, void *priv, struct v4l2_input *i) { if (i->index != 0) return -EINVAL; strcpy(i->name,"Radio"); i->type = V4L2_INPUT_TYPE_TUNER; return 0; } static int radio_g_audio (struct file *file, void *priv, struct v4l2_audio *a) { if (unlikely(a->index)) return -EINVAL; strcpy(a->name,"Radio"); return 0; } /* FIXME: Should add a standard for radio */ static int radio_s_tuner (struct file *file, void *priv, struct v4l2_tuner *t) { struct cx88_core *core = ((struct cx8800_fh *)priv)->dev->core; if (0 != t->index) return -EINVAL; call_all(core, tuner, s_tuner, t); return 0; } static int radio_s_audio (struct file *file, void *fh, struct v4l2_audio *a) { return 0; } static int radio_s_input (struct file *file, void *fh, unsigned int i) { return 0; } static int radio_queryctrl (struct file *file, void *priv, struct v4l2_queryctrl *c) { int i; if (c->id < V4L2_CID_BASE || c->id >= V4L2_CID_LASTP1) return -EINVAL; if (c->id == V4L2_CID_AUDIO_MUTE || c->id == V4L2_CID_AUDIO_VOLUME || c->id == V4L2_CID_AUDIO_BALANCE) { for (i = 0; i < CX8800_CTLS; i++) { if (cx8800_ctls[i].v.id == c->id) break; } if (i == CX8800_CTLS) return -EINVAL; *c = cx8800_ctls[i].v; } else *c = no_ctl; return 0; } /* ----------------------------------------------------------- */ static void cx8800_vid_timeout(unsigned long data) { struct cx8800_dev *dev = (struct cx8800_dev*)data; struct cx88_core *core = dev->core; struct cx88_dmaqueue *q = &dev->vidq; struct cx88_buffer *buf; unsigned long flags; cx88_sram_channel_dump(core, &cx88_sram_channels[SRAM_CH21]); cx_clear(MO_VID_DMACNTRL, 0x11); cx_clear(VID_CAPTURE_CONTROL, 0x06); spin_lock_irqsave(&dev->slock,flags); while (!list_empty(&q->active)) { buf = list_entry(q->active.next, struct cx88_buffer, vb.queue); list_del(&buf->vb.queue); buf->vb.state = VIDEOBUF_ERROR; wake_up(&buf->vb.done); printk("%s/0: [%p/%d] timeout - dma=0x%08lx\n", core->name, buf, buf->vb.i, (unsigned long)buf->risc.dma); } restart_video_queue(dev,q); spin_unlock_irqrestore(&dev->slock,flags); } static const char *cx88_vid_irqs[32] = { "y_risci1", "u_risci1", "v_risci1", "vbi_risc1", "y_risci2", "u_risci2", "v_risci2", "vbi_risc2", "y_oflow", "u_oflow", "v_oflow", "vbi_oflow", "y_sync", "u_sync", "v_sync", "vbi_sync", "opc_err", "par_err", "rip_err", "pci_abort", }; static void cx8800_vid_irq(struct cx8800_dev *dev) { struct cx88_core *core = dev->core; u32 status, mask, count; status = cx_read(MO_VID_INTSTAT); mask = cx_read(MO_VID_INTMSK); if (0 == (status & mask)) return; cx_write(MO_VID_INTSTAT, status); if (irq_debug || (status & mask & ~0xff)) cx88_print_irqbits(core->name, "irq vid", cx88_vid_irqs, ARRAY_SIZE(cx88_vid_irqs), status, mask); /* risc op code error */ if (status & (1 << 16)) { printk(KERN_WARNING "%s/0: video risc op code error\n",core->name); cx_clear(MO_VID_DMACNTRL, 0x11); cx_clear(VID_CAPTURE_CONTROL, 0x06); cx88_sram_channel_dump(core, &cx88_sram_channels[SRAM_CH21]); } /* risc1 y */ if (status & 0x01) { spin_lock(&dev->slock); count = cx_read(MO_VIDY_GPCNT); cx88_wakeup(core, &dev->vidq, count); spin_unlock(&dev->slock); } /* risc1 vbi */ if (status & 0x08) { spin_lock(&dev->slock); count = cx_read(MO_VBI_GPCNT); cx88_wakeup(core, &dev->vbiq, count); spin_unlock(&dev->slock); } /* risc2 y */ if (status & 0x10) { dprintk(2,"stopper video\n"); spin_lock(&dev->slock); restart_video_queue(dev,&dev->vidq); spin_unlock(&dev->slock); } /* risc2 vbi */ if (status & 0x80) { dprintk(2,"stopper vbi\n"); spin_lock(&dev->slock); cx8800_restart_vbi_queue(dev,&dev->vbiq); spin_unlock(&dev->slock); } } static irqreturn_t cx8800_irq(int irq, void *dev_id) { struct cx8800_dev *dev = dev_id; struct cx88_core *core = dev->core; u32 status; int loop, handled = 0; for (loop = 0; loop < 10; loop++) { status = cx_read(MO_PCI_INTSTAT) & (core->pci_irqmask | PCI_INT_VIDINT); if (0 == status) goto out; cx_write(MO_PCI_INTSTAT, status); handled = 1; if (status & core->pci_irqmask) cx88_core_irq(core,status); if (status & PCI_INT_VIDINT) cx8800_vid_irq(dev); }; if (10 == loop) { printk(KERN_WARNING "%s/0: irq loop -- clearing mask\n", core->name); cx_write(MO_PCI_INTMSK,0); } out: return IRQ_RETVAL(handled); } /* ----------------------------------------------------------- */ /* exported stuff */ static const struct v4l2_file_operations video_fops = { .owner = THIS_MODULE, .open = video_open, .release = video_release, .read = video_read, .poll = video_poll, .mmap = video_mmap, .unlocked_ioctl = video_ioctl2, }; static const struct v4l2_ioctl_ops video_ioctl_ops = { .vidioc_querycap = vidioc_querycap, .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap, .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap, .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap, .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap, .vidioc_g_fmt_vbi_cap = cx8800_vbi_fmt, .vidioc_try_fmt_vbi_cap = cx8800_vbi_fmt, .vidioc_s_fmt_vbi_cap = cx8800_vbi_fmt, .vidioc_reqbufs = vidioc_reqbufs, .vidioc_querybuf = vidioc_querybuf, .vidioc_qbuf = vidioc_qbuf, .vidioc_dqbuf = vidioc_dqbuf, .vidioc_s_std = vidioc_s_std, .vidioc_enum_input = vidioc_enum_input, .vidioc_g_input = vidioc_g_input, .vidioc_s_input = vidioc_s_input, .vidioc_queryctrl = vidioc_queryctrl, .vidioc_g_ctrl = vidioc_g_ctrl, .vidioc_s_ctrl = vidioc_s_ctrl, .vidioc_streamon = vidioc_streamon, .vidioc_streamoff = vidioc_streamoff, .vidioc_g_tuner = vidioc_g_tuner, .vidioc_s_tuner = vidioc_s_tuner, .vidioc_g_frequency = vidioc_g_frequency, .vidioc_s_frequency = vidioc_s_frequency, #ifdef CONFIG_VIDEO_ADV_DEBUG .vidioc_g_register = vidioc_g_register, .vidioc_s_register = vidioc_s_register, #endif }; static struct video_device cx8800_vbi_template; static const struct video_device cx8800_video_template = { .name = "cx8800-video", .fops = &video_fops, .ioctl_ops = &video_ioctl_ops, .tvnorms = CX88_NORMS, .current_norm = V4L2_STD_NTSC_M, }; static const struct v4l2_file_operations radio_fops = { .owner = THIS_MODULE, .open = video_open, .release = video_release, .unlocked_ioctl = video_ioctl2, }; static const struct v4l2_ioctl_ops radio_ioctl_ops = { .vidioc_querycap = radio_querycap, .vidioc_g_tuner = radio_g_tuner, .vidioc_enum_input = radio_enum_input, .vidioc_g_audio = radio_g_audio, .vidioc_s_tuner = radio_s_tuner, .vidioc_s_audio = radio_s_audio, .vidioc_s_input = radio_s_input, .vidioc_queryctrl = radio_queryctrl, .vidioc_g_ctrl = vidioc_g_ctrl, .vidioc_s_ctrl = vidioc_s_ctrl, .vidioc_g_frequency = vidioc_g_frequency, .vidioc_s_frequency = vidioc_s_frequency, #ifdef CONFIG_VIDEO_ADV_DEBUG .vidioc_g_register = vidioc_g_register, .vidioc_s_register = vidioc_s_register, #endif }; static const struct video_device cx8800_radio_template = { .name = "cx8800-radio", .fops = &radio_fops, .ioctl_ops = &radio_ioctl_ops, }; /* ----------------------------------------------------------- */ static void cx8800_unregister_video(struct cx8800_dev *dev) { if (dev->radio_dev) { if (video_is_registered(dev->radio_dev)) video_unregister_device(dev->radio_dev); else video_device_release(dev->radio_dev); dev->radio_dev = NULL; } if (dev->vbi_dev) { if (video_is_registered(dev->vbi_dev)) video_unregister_device(dev->vbi_dev); else video_device_release(dev->vbi_dev); dev->vbi_dev = NULL; } if (dev->video_dev) { if (video_is_registered(dev->video_dev)) video_unregister_device(dev->video_dev); else video_device_release(dev->video_dev); dev->video_dev = NULL; } } static int __devinit cx8800_initdev(struct pci_dev *pci_dev, const struct pci_device_id *pci_id) { struct cx8800_dev *dev; struct cx88_core *core; int err; dev = kzalloc(sizeof(*dev),GFP_KERNEL); if (NULL == dev) return -ENOMEM; /* pci init */ dev->pci = pci_dev; if (pci_enable_device(pci_dev)) { err = -EIO; goto fail_free; } core = cx88_core_get(dev->pci); if (NULL == core) { err = -EINVAL; goto fail_free; } dev->core = core; /* print pci info */ dev->pci_rev = pci_dev->revision; pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &dev->pci_lat); printk(KERN_INFO "%s/0: found at %s, rev: %d, irq: %d, " "latency: %d, mmio: 0x%llx\n", core->name, pci_name(pci_dev), dev->pci_rev, pci_dev->irq, dev->pci_lat,(unsigned long long)pci_resource_start(pci_dev,0)); pci_set_master(pci_dev); if (!pci_dma_supported(pci_dev,DMA_BIT_MASK(32))) { printk("%s/0: Oops: no 32bit PCI DMA ???\n",core->name); err = -EIO; goto fail_core; } /* Initialize VBI template */ memcpy( &cx8800_vbi_template, &cx8800_video_template, sizeof(cx8800_vbi_template) ); strcpy(cx8800_vbi_template.name,"cx8800-vbi"); /* initialize driver struct */ spin_lock_init(&dev->slock); core->tvnorm = cx8800_video_template.current_norm; /* init video dma queues */ INIT_LIST_HEAD(&dev->vidq.active); INIT_LIST_HEAD(&dev->vidq.queued); dev->vidq.timeout.function = cx8800_vid_timeout; dev->vidq.timeout.data = (unsigned long)dev; init_timer(&dev->vidq.timeout); cx88_risc_stopper(dev->pci,&dev->vidq.stopper, MO_VID_DMACNTRL,0x11,0x00); /* init vbi dma queues */ INIT_LIST_HEAD(&dev->vbiq.active); INIT_LIST_HEAD(&dev->vbiq.queued); dev->vbiq.timeout.function = cx8800_vbi_timeout; dev->vbiq.timeout.data = (unsigned long)dev; init_timer(&dev->vbiq.timeout); cx88_risc_stopper(dev->pci,&dev->vbiq.stopper, MO_VID_DMACNTRL,0x88,0x00); /* get irq */ err = request_irq(pci_dev->irq, cx8800_irq, IRQF_SHARED | IRQF_DISABLED, core->name, dev); if (err < 0) { printk(KERN_ERR "%s/0: can't get IRQ %d\n", core->name,pci_dev->irq); goto fail_core; } cx_set(MO_PCI_INTMSK, core->pci_irqmask); /* load and configure helper modules */ if (core->board.audio_chip == V4L2_IDENT_WM8775) { struct i2c_board_info wm8775_info = { .type = "wm8775", .addr = 0x36 >> 1, .platform_data = &core->wm8775_data, }; struct v4l2_subdev *sd; if (core->boardnr == CX88_BOARD_HAUPPAUGE_NOVASPLUS_S1) core->wm8775_data.is_nova_s = true; else core->wm8775_data.is_nova_s = false; sd = v4l2_i2c_new_subdev_board(&core->v4l2_dev, &core->i2c_adap, &wm8775_info, NULL); if (sd != NULL) sd->grp_id = WM8775_GID; } if (core->board.audio_chip == V4L2_IDENT_TVAUDIO) { /* This probes for a tda9874 as is used on some Pixelview Ultra boards. */ v4l2_i2c_new_subdev(&core->v4l2_dev, &core->i2c_adap, "tvaudio", 0, I2C_ADDRS(0xb0 >> 1)); } switch (core->boardnr) { case CX88_BOARD_DVICO_FUSIONHDTV_5_GOLD: case CX88_BOARD_DVICO_FUSIONHDTV_7_GOLD: { static const struct i2c_board_info rtc_info = { I2C_BOARD_INFO("isl1208", 0x6f) }; request_module("rtc-isl1208"); core->i2c_rtc = i2c_new_device(&core->i2c_adap, &rtc_info); } /* break intentionally omitted */ case CX88_BOARD_DVICO_FUSIONHDTV_5_PCI_NANO: request_module("ir-kbd-i2c"); } /* Sets device info at pci_dev */ pci_set_drvdata(pci_dev, dev); /* initial device configuration */ mutex_lock(&core->lock); cx88_set_tvnorm(core, core->tvnorm); init_controls(core); cx88_video_mux(core, 0); /* register v4l devices */ dev->video_dev = cx88_vdev_init(core,dev->pci, &cx8800_video_template,"video"); video_set_drvdata(dev->video_dev, dev); err = video_register_device(dev->video_dev,VFL_TYPE_GRABBER, video_nr[core->nr]); if (err < 0) { printk(KERN_ERR "%s/0: can't register video device\n", core->name); goto fail_unreg; } printk(KERN_INFO "%s/0: registered device %s [v4l2]\n", core->name, video_device_node_name(dev->video_dev)); dev->vbi_dev = cx88_vdev_init(core,dev->pci,&cx8800_vbi_template,"vbi"); video_set_drvdata(dev->vbi_dev, dev); err = video_register_device(dev->vbi_dev,VFL_TYPE_VBI, vbi_nr[core->nr]); if (err < 0) { printk(KERN_ERR "%s/0: can't register vbi device\n", core->name); goto fail_unreg; } printk(KERN_INFO "%s/0: registered device %s\n", core->name, video_device_node_name(dev->vbi_dev)); if (core->board.radio.type == CX88_RADIO) { dev->radio_dev = cx88_vdev_init(core,dev->pci, &cx8800_radio_template,"radio"); video_set_drvdata(dev->radio_dev, dev); err = video_register_device(dev->radio_dev,VFL_TYPE_RADIO, radio_nr[core->nr]); if (err < 0) { printk(KERN_ERR "%s/0: can't register radio device\n", core->name); goto fail_unreg; } printk(KERN_INFO "%s/0: registered device %s\n", core->name, video_device_node_name(dev->radio_dev)); } /* start tvaudio thread */ if (core->board.tuner_type != TUNER_ABSENT) { core->kthread = kthread_run(cx88_audio_thread, core, "cx88 tvaudio"); if (IS_ERR(core->kthread)) { err = PTR_ERR(core->kthread); printk(KERN_ERR "%s/0: failed to create cx88 audio thread, err=%d\n", core->name, err); } } mutex_unlock(&core->lock); return 0; fail_unreg: cx8800_unregister_video(dev); free_irq(pci_dev->irq, dev); mutex_unlock(&core->lock); fail_core: cx88_core_put(core,dev->pci); fail_free: kfree(dev); return err; } static void __devexit cx8800_finidev(struct pci_dev *pci_dev) { struct cx8800_dev *dev = pci_get_drvdata(pci_dev); struct cx88_core *core = dev->core; /* stop thread */ if (core->kthread) { kthread_stop(core->kthread); core->kthread = NULL; } if (core->ir) cx88_ir_stop(core); cx88_shutdown(core); /* FIXME */ pci_disable_device(pci_dev); /* unregister stuff */ free_irq(pci_dev->irq, dev); cx8800_unregister_video(dev); pci_set_drvdata(pci_dev, NULL); /* free memory */ btcx_riscmem_free(dev->pci,&dev->vidq.stopper); cx88_core_put(core,dev->pci); kfree(dev); } #ifdef CONFIG_PM static int cx8800_suspend(struct pci_dev *pci_dev, pm_message_t state) { struct cx8800_dev *dev = pci_get_drvdata(pci_dev); struct cx88_core *core = dev->core; /* stop video+vbi capture */ spin_lock(&dev->slock); if (!list_empty(&dev->vidq.active)) { printk("%s/0: suspend video\n", core->name); stop_video_dma(dev); del_timer(&dev->vidq.timeout); } if (!list_empty(&dev->vbiq.active)) { printk("%s/0: suspend vbi\n", core->name); cx8800_stop_vbi_dma(dev); del_timer(&dev->vbiq.timeout); } spin_unlock(&dev->slock); if (core->ir) cx88_ir_stop(core); /* FIXME -- shutdown device */ cx88_shutdown(core); pci_save_state(pci_dev); if (0 != pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state))) { pci_disable_device(pci_dev); dev->state.disabled = 1; } return 0; } static int cx8800_resume(struct pci_dev *pci_dev) { struct cx8800_dev *dev = pci_get_drvdata(pci_dev); struct cx88_core *core = dev->core; int err; if (dev->state.disabled) { err=pci_enable_device(pci_dev); if (err) { printk(KERN_ERR "%s/0: can't enable device\n", core->name); return err; } dev->state.disabled = 0; } err= pci_set_power_state(pci_dev, PCI_D0); if (err) { printk(KERN_ERR "%s/0: can't set power state\n", core->name); pci_disable_device(pci_dev); dev->state.disabled = 1; return err; } pci_restore_state(pci_dev); /* FIXME: re-initialize hardware */ cx88_reset(core); if (core->ir) cx88_ir_start(core); cx_set(MO_PCI_INTMSK, core->pci_irqmask); /* restart video+vbi capture */ spin_lock(&dev->slock); if (!list_empty(&dev->vidq.active)) { printk("%s/0: resume video\n", core->name); restart_video_queue(dev,&dev->vidq); } if (!list_empty(&dev->vbiq.active)) { printk("%s/0: resume vbi\n", core->name); cx8800_restart_vbi_queue(dev,&dev->vbiq); } spin_unlock(&dev->slock); return 0; } #endif /* ----------------------------------------------------------- */ static const struct pci_device_id cx8800_pci_tbl[] = { { .vendor = 0x14f1, .device = 0x8800, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, },{ /* --- end of list --- */ } }; MODULE_DEVICE_TABLE(pci, cx8800_pci_tbl); static struct pci_driver cx8800_pci_driver = { .name = "cx8800", .id_table = cx8800_pci_tbl, .probe = cx8800_initdev, .remove = __devexit_p(cx8800_finidev), #ifdef CONFIG_PM .suspend = cx8800_suspend, .resume = cx8800_resume, #endif }; static int __init cx8800_init(void) { printk(KERN_INFO "cx88/0: cx2388x v4l2 driver version %d.%d.%d loaded\n", (CX88_VERSION_CODE >> 16) & 0xff, (CX88_VERSION_CODE >> 8) & 0xff, CX88_VERSION_CODE & 0xff); #ifdef SNAPSHOT printk(KERN_INFO "cx2388x: snapshot date %04d-%02d-%02d\n", SNAPSHOT/10000, (SNAPSHOT/100)%100, SNAPSHOT%100); #endif return pci_register_driver(&cx8800_pci_driver); } static void __exit cx8800_fini(void) { pci_unregister_driver(&cx8800_pci_driver); } module_init(cx8800_init); module_exit(cx8800_fini); /* ----------------------------------------------------------- */ /* * Local variables: * c-basic-offset: 8 * End: * kate: eol "unix"; indent-width 3; remove-trailing-space on; replace-trailing-space-save on; tab-width 8; replace-tabs off; space-indent off; mixed-indent off */
gpl-2.0
NooNameR/k3_bravo
drivers3/net/vxge/vxge-traffic.c
2385
68222
/****************************************************************************** * This software may be used and distributed according to the terms of * the GNU General Public License (GPL), incorporated herein by reference. * Drivers based on or derived from this code fall under the GPL and must * retain the authorship, copyright and license notice. This file is not * a complete program and may only be used when the entire operating * system is licensed under the GPL. * See the file COPYING in this distribution for more information. * * vxge-traffic.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O * Virtualized Server Adapter. * Copyright(c) 2002-2010 Exar Corp. ******************************************************************************/ #include <linux/etherdevice.h> #include <linux/prefetch.h> #include "vxge-traffic.h" #include "vxge-config.h" #include "vxge-main.h" /* * vxge_hw_vpath_intr_enable - Enable vpath interrupts. * @vp: Virtual Path handle. * * Enable vpath interrupts. The function is to be executed the last in * vpath initialization sequence. * * See also: vxge_hw_vpath_intr_disable() */ enum vxge_hw_status vxge_hw_vpath_intr_enable(struct __vxge_hw_vpath_handle *vp) { u64 val64; struct __vxge_hw_virtualpath *vpath; struct vxge_hw_vpath_reg __iomem *vp_reg; enum vxge_hw_status status = VXGE_HW_OK; if (vp == NULL) { status = VXGE_HW_ERR_INVALID_HANDLE; goto exit; } vpath = vp->vpath; if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { status = VXGE_HW_ERR_VPATH_NOT_OPEN; goto exit; } vp_reg = vpath->vp_reg; writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_reg); __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, &vp_reg->general_errors_reg); __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, &vp_reg->pci_config_errors_reg); __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, &vp_reg->mrpcim_to_vpath_alarm_reg); __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, &vp_reg->srpcim_to_vpath_alarm_reg); __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, &vp_reg->vpath_ppif_int_status); __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, &vp_reg->srpcim_msg_to_vpath_reg); __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, &vp_reg->vpath_pcipif_int_status); __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, &vp_reg->prc_alarm_reg); __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, &vp_reg->wrdma_alarm_status); __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, &vp_reg->asic_ntwk_vp_err_reg); __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, &vp_reg->xgmac_vp_int_status); val64 = readq(&vp_reg->vpath_general_int_status); /* Mask unwanted interrupts */ __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, &vp_reg->vpath_pcipif_int_mask); __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, &vp_reg->srpcim_msg_to_vpath_mask); __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, &vp_reg->srpcim_to_vpath_alarm_mask); __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, &vp_reg->mrpcim_to_vpath_alarm_mask); __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, &vp_reg->pci_config_errors_mask); /* Unmask the individual interrupts */ writeq((u32)vxge_bVALn((VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO1_OVRFLOW| VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO2_OVRFLOW| VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ| VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR), 0, 32), &vp_reg->general_errors_mask); __vxge_hw_pio_mem_write32_upper( (u32)vxge_bVALn((VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_OVRWR| VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_OVRWR| VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_POISON| VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_POISON| VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR| VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_DMA_ERR), 0, 32), &vp_reg->kdfcctl_errors_mask); __vxge_hw_pio_mem_write32_upper(0, &vp_reg->vpath_ppif_int_mask); __vxge_hw_pio_mem_write32_upper( (u32)vxge_bVALn(VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP, 0, 32), &vp_reg->prc_alarm_mask); __vxge_hw_pio_mem_write32_upper(0, &vp_reg->wrdma_alarm_mask); __vxge_hw_pio_mem_write32_upper(0, &vp_reg->xgmac_vp_int_mask); if (vpath->hldev->first_vp_id != vpath->vp_id) __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, &vp_reg->asic_ntwk_vp_err_mask); else __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(( VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_FAULT | VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_OK), 0, 32), &vp_reg->asic_ntwk_vp_err_mask); __vxge_hw_pio_mem_write32_upper(0, &vp_reg->vpath_general_int_mask); exit: return status; } /* * vxge_hw_vpath_intr_disable - Disable vpath interrupts. * @vp: Virtual Path handle. * * Disable vpath interrupts. The function is to be executed the last in * vpath initialization sequence. * * See also: vxge_hw_vpath_intr_enable() */ enum vxge_hw_status vxge_hw_vpath_intr_disable( struct __vxge_hw_vpath_handle *vp) { u64 val64; struct __vxge_hw_virtualpath *vpath; enum vxge_hw_status status = VXGE_HW_OK; struct vxge_hw_vpath_reg __iomem *vp_reg; if (vp == NULL) { status = VXGE_HW_ERR_INVALID_HANDLE; goto exit; } vpath = vp->vpath; if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { status = VXGE_HW_ERR_VPATH_NOT_OPEN; goto exit; } vp_reg = vpath->vp_reg; __vxge_hw_pio_mem_write32_upper( (u32)VXGE_HW_INTR_MASK_ALL, &vp_reg->vpath_general_int_mask); val64 = VXGE_HW_TIM_CLR_INT_EN_VP(1 << (16 - vpath->vp_id)); writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_mask); __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, &vp_reg->general_errors_mask); __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, &vp_reg->pci_config_errors_mask); __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, &vp_reg->mrpcim_to_vpath_alarm_mask); __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, &vp_reg->srpcim_to_vpath_alarm_mask); __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, &vp_reg->vpath_ppif_int_mask); __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, &vp_reg->srpcim_msg_to_vpath_mask); __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, &vp_reg->vpath_pcipif_int_mask); __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, &vp_reg->wrdma_alarm_mask); __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, &vp_reg->prc_alarm_mask); __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, &vp_reg->xgmac_vp_int_mask); __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, &vp_reg->asic_ntwk_vp_err_mask); exit: return status; } void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_fifo *fifo) { struct vxge_hw_vpath_reg __iomem *vp_reg; struct vxge_hw_vp_config *config; u64 val64; if (fifo->config->enable != VXGE_HW_FIFO_ENABLE) return; vp_reg = fifo->vp_reg; config = container_of(fifo->config, struct vxge_hw_vp_config, fifo); if (config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) { config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE; val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]); val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI; fifo->tim_tti_cfg1_saved = val64; writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]); } } void vxge_hw_vpath_dynamic_rti_ci_set(struct __vxge_hw_ring *ring) { u64 val64 = ring->tim_rti_cfg1_saved; val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI; ring->tim_rti_cfg1_saved = val64; writeq(val64, &ring->vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]); } void vxge_hw_vpath_dynamic_tti_rtimer_set(struct __vxge_hw_fifo *fifo) { u64 val64 = fifo->tim_tti_cfg3_saved; u64 timer = (fifo->rtimer * 1000) / 272; val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff); if (timer) val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) | VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(5); writeq(val64, &fifo->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]); /* tti_cfg3_saved is not updated again because it is * initialized at one place only - init time. */ } void vxge_hw_vpath_dynamic_rti_rtimer_set(struct __vxge_hw_ring *ring) { u64 val64 = ring->tim_rti_cfg3_saved; u64 timer = (ring->rtimer * 1000) / 272; val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff); if (timer) val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) | VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(4); writeq(val64, &ring->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]); /* rti_cfg3_saved is not updated again because it is * initialized at one place only - init time. */ } /** * vxge_hw_channel_msix_mask - Mask MSIX Vector. * @channeh: Channel for rx or tx handle * @msix_id: MSIX ID * * The function masks the msix interrupt for the given msix_id * * Returns: 0 */ void vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channel, int msix_id) { __vxge_hw_pio_mem_write32_upper( (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32), &channel->common_reg->set_msix_mask_vect[msix_id%4]); } /** * vxge_hw_channel_msix_unmask - Unmask the MSIX Vector. * @channeh: Channel for rx or tx handle * @msix_id: MSI ID * * The function unmasks the msix interrupt for the given msix_id * * Returns: 0 */ void vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channel, int msix_id) { __vxge_hw_pio_mem_write32_upper( (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32), &channel->common_reg->clear_msix_mask_vect[msix_id%4]); } /** * vxge_hw_channel_msix_clear - Unmask the MSIX Vector. * @channel: Channel for rx or tx handle * @msix_id: MSI ID * * The function unmasks the msix interrupt for the given msix_id * if configured in MSIX oneshot mode * * Returns: 0 */ void vxge_hw_channel_msix_clear(struct __vxge_hw_channel *channel, int msix_id) { __vxge_hw_pio_mem_write32_upper( (u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32), &channel->common_reg->clr_msix_one_shot_vec[msix_id % 4]); } /** * vxge_hw_device_set_intr_type - Updates the configuration * with new interrupt type. * @hldev: HW device handle. * @intr_mode: New interrupt type */ u32 vxge_hw_device_set_intr_type(struct __vxge_hw_device *hldev, u32 intr_mode) { if ((intr_mode != VXGE_HW_INTR_MODE_IRQLINE) && (intr_mode != VXGE_HW_INTR_MODE_MSIX) && (intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) && (intr_mode != VXGE_HW_INTR_MODE_DEF)) intr_mode = VXGE_HW_INTR_MODE_IRQLINE; hldev->config.intr_mode = intr_mode; return intr_mode; } /** * vxge_hw_device_intr_enable - Enable interrupts. * @hldev: HW device handle. * @op: One of the enum vxge_hw_device_intr enumerated values specifying * the type(s) of interrupts to enable. * * Enable Titan interrupts. The function is to be executed the last in * Titan initialization sequence. * * See also: vxge_hw_device_intr_disable() */ void vxge_hw_device_intr_enable(struct __vxge_hw_device *hldev) { u32 i; u64 val64; u32 val32; vxge_hw_device_mask_all(hldev); for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { if (!(hldev->vpaths_deployed & vxge_mBIT(i))) continue; vxge_hw_vpath_intr_enable( VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i])); } if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE) { val64 = hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] | hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX]; if (val64 != 0) { writeq(val64, &hldev->common_reg->tim_int_status0); writeq(~val64, &hldev->common_reg->tim_int_mask0); } val32 = hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] | hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX]; if (val32 != 0) { __vxge_hw_pio_mem_write32_upper(val32, &hldev->common_reg->tim_int_status1); __vxge_hw_pio_mem_write32_upper(~val32, &hldev->common_reg->tim_int_mask1); } } val64 = readq(&hldev->common_reg->titan_general_int_status); vxge_hw_device_unmask_all(hldev); } /** * vxge_hw_device_intr_disable - Disable Titan interrupts. * @hldev: HW device handle. * @op: One of the enum vxge_hw_device_intr enumerated values specifying * the type(s) of interrupts to disable. * * Disable Titan interrupts. * * See also: vxge_hw_device_intr_enable() */ void vxge_hw_device_intr_disable(struct __vxge_hw_device *hldev) { u32 i; vxge_hw_device_mask_all(hldev); /* mask all the tim interrupts */ writeq(VXGE_HW_INTR_MASK_ALL, &hldev->common_reg->tim_int_mask0); __vxge_hw_pio_mem_write32_upper(VXGE_HW_DEFAULT_32, &hldev->common_reg->tim_int_mask1); for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { if (!(hldev->vpaths_deployed & vxge_mBIT(i))) continue; vxge_hw_vpath_intr_disable( VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i])); } } /** * vxge_hw_device_mask_all - Mask all device interrupts. * @hldev: HW device handle. * * Mask all device interrupts. * * See also: vxge_hw_device_unmask_all() */ void vxge_hw_device_mask_all(struct __vxge_hw_device *hldev) { u64 val64; val64 = VXGE_HW_TITAN_MASK_ALL_INT_ALARM | VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC; __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), &hldev->common_reg->titan_mask_all_int); } /** * vxge_hw_device_unmask_all - Unmask all device interrupts. * @hldev: HW device handle. * * Unmask all device interrupts. * * See also: vxge_hw_device_mask_all() */ void vxge_hw_device_unmask_all(struct __vxge_hw_device *hldev) { u64 val64 = 0; if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE) val64 = VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC; __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), &hldev->common_reg->titan_mask_all_int); } /** * vxge_hw_device_flush_io - Flush io writes. * @hldev: HW device handle. * * The function performs a read operation to flush io writes. * * Returns: void */ void vxge_hw_device_flush_io(struct __vxge_hw_device *hldev) { u32 val32; val32 = readl(&hldev->common_reg->titan_general_int_status); } /** * __vxge_hw_device_handle_error - Handle error * @hldev: HW device * @vp_id: Vpath Id * @type: Error type. Please see enum vxge_hw_event{} * * Handle error. */ static enum vxge_hw_status __vxge_hw_device_handle_error(struct __vxge_hw_device *hldev, u32 vp_id, enum vxge_hw_event type) { switch (type) { case VXGE_HW_EVENT_UNKNOWN: break; case VXGE_HW_EVENT_RESET_START: case VXGE_HW_EVENT_RESET_COMPLETE: case VXGE_HW_EVENT_LINK_DOWN: case VXGE_HW_EVENT_LINK_UP: goto out; case VXGE_HW_EVENT_ALARM_CLEARED: goto out; case VXGE_HW_EVENT_ECCERR: case VXGE_HW_EVENT_MRPCIM_ECCERR: goto out; case VXGE_HW_EVENT_FIFO_ERR: case VXGE_HW_EVENT_VPATH_ERR: case VXGE_HW_EVENT_CRITICAL_ERR: case VXGE_HW_EVENT_SERR: break; case VXGE_HW_EVENT_SRPCIM_SERR: case VXGE_HW_EVENT_MRPCIM_SERR: goto out; case VXGE_HW_EVENT_SLOT_FREEZE: break; default: vxge_assert(0); goto out; } /* notify driver */ if (hldev->uld_callbacks.crit_err) hldev->uld_callbacks.crit_err( (struct __vxge_hw_device *)hldev, type, vp_id); out: return VXGE_HW_OK; } /* * __vxge_hw_device_handle_link_down_ind * @hldev: HW device handle. * * Link down indication handler. The function is invoked by HW when * Titan indicates that the link is down. */ static enum vxge_hw_status __vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev) { /* * If the previous link state is not down, return. */ if (hldev->link_state == VXGE_HW_LINK_DOWN) goto exit; hldev->link_state = VXGE_HW_LINK_DOWN; /* notify driver */ if (hldev->uld_callbacks.link_down) hldev->uld_callbacks.link_down(hldev); exit: return VXGE_HW_OK; } /* * __vxge_hw_device_handle_link_up_ind * @hldev: HW device handle. * * Link up indication handler. The function is invoked by HW when * Titan indicates that the link is up for programmable amount of time. */ static enum vxge_hw_status __vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev) { /* * If the previous link state is not down, return. */ if (hldev->link_state == VXGE_HW_LINK_UP) goto exit; hldev->link_state = VXGE_HW_LINK_UP; /* notify driver */ if (hldev->uld_callbacks.link_up) hldev->uld_callbacks.link_up(hldev); exit: return VXGE_HW_OK; } /* * __vxge_hw_vpath_alarm_process - Process Alarms. * @vpath: Virtual Path. * @skip_alarms: Do not clear the alarms * * Process vpath alarms. * */ static enum vxge_hw_status __vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath, u32 skip_alarms) { u64 val64; u64 alarm_status; u64 pic_status; struct __vxge_hw_device *hldev = NULL; enum vxge_hw_event alarm_event = VXGE_HW_EVENT_UNKNOWN; u64 mask64; struct vxge_hw_vpath_stats_sw_info *sw_stats; struct vxge_hw_vpath_reg __iomem *vp_reg; if (vpath == NULL) { alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN, alarm_event); goto out2; } hldev = vpath->hldev; vp_reg = vpath->vp_reg; alarm_status = readq(&vp_reg->vpath_general_int_status); if (alarm_status == VXGE_HW_ALL_FOXES) { alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_SLOT_FREEZE, alarm_event); goto out; } sw_stats = vpath->sw_stats; if (alarm_status & ~( VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT | VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT | VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT | VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT)) { sw_stats->error_stats.unknown_alarms++; alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN, alarm_event); goto out; } if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT) { val64 = readq(&vp_reg->xgmac_vp_int_status); if (val64 & VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT) { val64 = readq(&vp_reg->asic_ntwk_vp_err_reg); if (((val64 & VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT) && (!(val64 & VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK))) || ((val64 & VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR) && (!(val64 & VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR) ))) { sw_stats->error_stats.network_sustained_fault++; writeq( VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT, &vp_reg->asic_ntwk_vp_err_mask); __vxge_hw_device_handle_link_down_ind(hldev); alarm_event = VXGE_HW_SET_LEVEL( VXGE_HW_EVENT_LINK_DOWN, alarm_event); } if (((val64 & VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) && (!(val64 & VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT))) || ((val64 & VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR) && (!(val64 & VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR) ))) { sw_stats->error_stats.network_sustained_ok++; writeq( VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK, &vp_reg->asic_ntwk_vp_err_mask); __vxge_hw_device_handle_link_up_ind(hldev); alarm_event = VXGE_HW_SET_LEVEL( VXGE_HW_EVENT_LINK_UP, alarm_event); } writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->asic_ntwk_vp_err_reg); alarm_event = VXGE_HW_SET_LEVEL( VXGE_HW_EVENT_ALARM_CLEARED, alarm_event); if (skip_alarms) return VXGE_HW_OK; } } if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT) { pic_status = readq(&vp_reg->vpath_ppif_int_status); if (pic_status & VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT) { val64 = readq(&vp_reg->general_errors_reg); mask64 = readq(&vp_reg->general_errors_mask); if ((val64 & VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET) & ~mask64) { sw_stats->error_stats.ini_serr_det++; alarm_event = VXGE_HW_SET_LEVEL( VXGE_HW_EVENT_SERR, alarm_event); } if ((val64 & VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW) & ~mask64) { sw_stats->error_stats.dblgen_fifo0_overflow++; alarm_event = VXGE_HW_SET_LEVEL( VXGE_HW_EVENT_FIFO_ERR, alarm_event); } if ((val64 & VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR) & ~mask64) sw_stats->error_stats.statsb_pif_chain_error++; if ((val64 & VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ) & ~mask64) sw_stats->error_stats.statsb_drop_timeout++; if ((val64 & VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS) & ~mask64) sw_stats->error_stats.target_illegal_access++; if (!skip_alarms) { writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->general_errors_reg); alarm_event = VXGE_HW_SET_LEVEL( VXGE_HW_EVENT_ALARM_CLEARED, alarm_event); } } if (pic_status & VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT) { val64 = readq(&vp_reg->kdfcctl_errors_reg); mask64 = readq(&vp_reg->kdfcctl_errors_mask); if ((val64 & VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR) & ~mask64) { sw_stats->error_stats.kdfcctl_fifo0_overwrite++; alarm_event = VXGE_HW_SET_LEVEL( VXGE_HW_EVENT_FIFO_ERR, alarm_event); } if ((val64 & VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON) & ~mask64) { sw_stats->error_stats.kdfcctl_fifo0_poison++; alarm_event = VXGE_HW_SET_LEVEL( VXGE_HW_EVENT_FIFO_ERR, alarm_event); } if ((val64 & VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR) & ~mask64) { sw_stats->error_stats.kdfcctl_fifo0_dma_error++; alarm_event = VXGE_HW_SET_LEVEL( VXGE_HW_EVENT_FIFO_ERR, alarm_event); } if (!skip_alarms) { writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_reg); alarm_event = VXGE_HW_SET_LEVEL( VXGE_HW_EVENT_ALARM_CLEARED, alarm_event); } } } if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT) { val64 = readq(&vp_reg->wrdma_alarm_status); if (val64 & VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT) { val64 = readq(&vp_reg->prc_alarm_reg); mask64 = readq(&vp_reg->prc_alarm_mask); if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP)& ~mask64) sw_stats->error_stats.prc_ring_bumps++; if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR) & ~mask64) { sw_stats->error_stats.prc_rxdcm_sc_err++; alarm_event = VXGE_HW_SET_LEVEL( VXGE_HW_EVENT_VPATH_ERR, alarm_event); } if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT) & ~mask64) { sw_stats->error_stats.prc_rxdcm_sc_abort++; alarm_event = VXGE_HW_SET_LEVEL( VXGE_HW_EVENT_VPATH_ERR, alarm_event); } if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR) & ~mask64) { sw_stats->error_stats.prc_quanta_size_err++; alarm_event = VXGE_HW_SET_LEVEL( VXGE_HW_EVENT_VPATH_ERR, alarm_event); } if (!skip_alarms) { writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->prc_alarm_reg); alarm_event = VXGE_HW_SET_LEVEL( VXGE_HW_EVENT_ALARM_CLEARED, alarm_event); } } } out: hldev->stats.sw_dev_err_stats.vpath_alarms++; out2: if ((alarm_event == VXGE_HW_EVENT_ALARM_CLEARED) || (alarm_event == VXGE_HW_EVENT_UNKNOWN)) return VXGE_HW_OK; __vxge_hw_device_handle_error(hldev, vpath->vp_id, alarm_event); if (alarm_event == VXGE_HW_EVENT_SERR) return VXGE_HW_ERR_CRITICAL; return (alarm_event == VXGE_HW_EVENT_SLOT_FREEZE) ? VXGE_HW_ERR_SLOT_FREEZE : (alarm_event == VXGE_HW_EVENT_FIFO_ERR) ? VXGE_HW_ERR_FIFO : VXGE_HW_ERR_VPATH; } /** * vxge_hw_device_begin_irq - Begin IRQ processing. * @hldev: HW device handle. * @skip_alarms: Do not clear the alarms * @reason: "Reason" for the interrupt, the value of Titan's * general_int_status register. * * The function performs two actions, It first checks whether (shared IRQ) the * interrupt was raised by the device. Next, it masks the device interrupts. * * Note: * vxge_hw_device_begin_irq() does not flush MMIO writes through the * bridge. Therefore, two back-to-back interrupts are potentially possible. * * Returns: 0, if the interrupt is not "ours" (note that in this case the * device remain enabled). * Otherwise, vxge_hw_device_begin_irq() returns 64bit general adapter * status. */ enum vxge_hw_status vxge_hw_device_begin_irq(struct __vxge_hw_device *hldev, u32 skip_alarms, u64 *reason) { u32 i; u64 val64; u64 adapter_status; u64 vpath_mask; enum vxge_hw_status ret = VXGE_HW_OK; val64 = readq(&hldev->common_reg->titan_general_int_status); if (unlikely(!val64)) { /* not Titan interrupt */ *reason = 0; ret = VXGE_HW_ERR_WRONG_IRQ; goto exit; } if (unlikely(val64 == VXGE_HW_ALL_FOXES)) { adapter_status = readq(&hldev->common_reg->adapter_status); if (adapter_status == VXGE_HW_ALL_FOXES) { __vxge_hw_device_handle_error(hldev, NULL_VPID, VXGE_HW_EVENT_SLOT_FREEZE); *reason = 0; ret = VXGE_HW_ERR_SLOT_FREEZE; goto exit; } } hldev->stats.sw_dev_info_stats.total_intr_cnt++; *reason = val64; vpath_mask = hldev->vpaths_deployed >> (64 - VXGE_HW_MAX_VIRTUAL_PATHS); if (val64 & VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(vpath_mask)) { hldev->stats.sw_dev_info_stats.traffic_intr_cnt++; return VXGE_HW_OK; } hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt++; if (unlikely(val64 & VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_ALARM_INT)) { enum vxge_hw_status error_level = VXGE_HW_OK; hldev->stats.sw_dev_err_stats.vpath_alarms++; for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { if (!(hldev->vpaths_deployed & vxge_mBIT(i))) continue; ret = __vxge_hw_vpath_alarm_process( &hldev->virtual_paths[i], skip_alarms); error_level = VXGE_HW_SET_LEVEL(ret, error_level); if (unlikely((ret == VXGE_HW_ERR_CRITICAL) || (ret == VXGE_HW_ERR_SLOT_FREEZE))) break; } ret = error_level; } exit: return ret; } /** * vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the * condition that has caused the Tx and RX interrupt. * @hldev: HW device. * * Acknowledge (that is, clear) the condition that has caused * the Tx and Rx interrupt. * See also: vxge_hw_device_begin_irq(), * vxge_hw_device_mask_tx_rx(), vxge_hw_device_unmask_tx_rx(). */ void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev) { if ((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) || (hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) { writeq((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] | hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX]), &hldev->common_reg->tim_int_status0); } if ((hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) || (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) { __vxge_hw_pio_mem_write32_upper( (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] | hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX]), &hldev->common_reg->tim_int_status1); } } /* * vxge_hw_channel_dtr_alloc - Allocate a dtr from the channel * @channel: Channel * @dtrh: Buffer to return the DTR pointer * * Allocates a dtr from the reserve array. If the reserve array is empty, * it swaps the reserve and free arrays. * */ static enum vxge_hw_status vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh) { void **tmp_arr; if (channel->reserve_ptr - channel->reserve_top > 0) { _alloc_after_swap: *dtrh = channel->reserve_arr[--channel->reserve_ptr]; return VXGE_HW_OK; } /* switch between empty and full arrays */ /* the idea behind such a design is that by having free and reserved * arrays separated we basically separated irq and non-irq parts. * i.e. no additional lock need to be done when we free a resource */ if (channel->length - channel->free_ptr > 0) { tmp_arr = channel->reserve_arr; channel->reserve_arr = channel->free_arr; channel->free_arr = tmp_arr; channel->reserve_ptr = channel->length; channel->reserve_top = channel->free_ptr; channel->free_ptr = channel->length; channel->stats->reserve_free_swaps_cnt++; goto _alloc_after_swap; } channel->stats->full_cnt++; *dtrh = NULL; return VXGE_HW_INF_OUT_OF_DESCRIPTORS; } /* * vxge_hw_channel_dtr_post - Post a dtr to the channel * @channelh: Channel * @dtrh: DTR pointer * * Posts a dtr to work array. * */ static void vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh) { vxge_assert(channel->work_arr[channel->post_index] == NULL); channel->work_arr[channel->post_index++] = dtrh; /* wrap-around */ if (channel->post_index == channel->length) channel->post_index = 0; } /* * vxge_hw_channel_dtr_try_complete - Returns next completed dtr * @channel: Channel * @dtr: Buffer to return the next completed DTR pointer * * Returns the next completed dtr with out removing it from work array * */ void vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel, void **dtrh) { vxge_assert(channel->compl_index < channel->length); *dtrh = channel->work_arr[channel->compl_index]; prefetch(*dtrh); } /* * vxge_hw_channel_dtr_complete - Removes next completed dtr from the work array * @channel: Channel handle * * Removes the next completed dtr from work array * */ void vxge_hw_channel_dtr_complete(struct __vxge_hw_channel *channel) { channel->work_arr[channel->compl_index] = NULL; /* wrap-around */ if (++channel->compl_index == channel->length) channel->compl_index = 0; channel->stats->total_compl_cnt++; } /* * vxge_hw_channel_dtr_free - Frees a dtr * @channel: Channel handle * @dtr: DTR pointer * * Returns the dtr to free array * */ void vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh) { channel->free_arr[--channel->free_ptr] = dtrh; } /* * vxge_hw_channel_dtr_count * @channel: Channel handle. Obtained via vxge_hw_channel_open(). * * Retrieve number of DTRs available. This function can not be called * from data path. ring_initial_replenishi() is the only user. */ int vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel) { return (channel->reserve_ptr - channel->reserve_top) + (channel->length - channel->free_ptr); } /** * vxge_hw_ring_rxd_reserve - Reserve ring descriptor. * @ring: Handle to the ring object used for receive * @rxdh: Reserved descriptor. On success HW fills this "out" parameter * with a valid handle. * * Reserve Rx descriptor for the subsequent filling-in driver * and posting on the corresponding channel (@channelh) * via vxge_hw_ring_rxd_post(). * * Returns: VXGE_HW_OK - success. * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available. * */ enum vxge_hw_status vxge_hw_ring_rxd_reserve(struct __vxge_hw_ring *ring, void **rxdh) { enum vxge_hw_status status; struct __vxge_hw_channel *channel; channel = &ring->channel; status = vxge_hw_channel_dtr_alloc(channel, rxdh); if (status == VXGE_HW_OK) { struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)*rxdh; rxdp->control_0 = rxdp->control_1 = 0; } return status; } /** * vxge_hw_ring_rxd_free - Free descriptor. * @ring: Handle to the ring object used for receive * @rxdh: Descriptor handle. * * Free the reserved descriptor. This operation is "symmetrical" to * vxge_hw_ring_rxd_reserve. The "free-ing" completes the descriptor's * lifecycle. * * After free-ing (see vxge_hw_ring_rxd_free()) the descriptor again can * be: * * - reserved (vxge_hw_ring_rxd_reserve); * * - posted (vxge_hw_ring_rxd_post); * * - completed (vxge_hw_ring_rxd_next_completed); * * - and recycled again (vxge_hw_ring_rxd_free). * * For alternative state transitions and more details please refer to * the design doc. * */ void vxge_hw_ring_rxd_free(struct __vxge_hw_ring *ring, void *rxdh) { struct __vxge_hw_channel *channel; channel = &ring->channel; vxge_hw_channel_dtr_free(channel, rxdh); } /** * vxge_hw_ring_rxd_pre_post - Prepare rxd and post * @ring: Handle to the ring object used for receive * @rxdh: Descriptor handle. * * This routine prepares a rxd and posts */ void vxge_hw_ring_rxd_pre_post(struct __vxge_hw_ring *ring, void *rxdh) { struct __vxge_hw_channel *channel; channel = &ring->channel; vxge_hw_channel_dtr_post(channel, rxdh); } /** * vxge_hw_ring_rxd_post_post - Process rxd after post. * @ring: Handle to the ring object used for receive * @rxdh: Descriptor handle. * * Processes rxd after post */ void vxge_hw_ring_rxd_post_post(struct __vxge_hw_ring *ring, void *rxdh) { struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh; struct __vxge_hw_channel *channel; channel = &ring->channel; rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER; if (ring->stats->common_stats.usage_cnt > 0) ring->stats->common_stats.usage_cnt--; } /** * vxge_hw_ring_rxd_post - Post descriptor on the ring. * @ring: Handle to the ring object used for receive * @rxdh: Descriptor obtained via vxge_hw_ring_rxd_reserve(). * * Post descriptor on the ring. * Prior to posting the descriptor should be filled in accordance with * Host/Titan interface specification for a given service (LL, etc.). * */ void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring, void *rxdh) { struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh; struct __vxge_hw_channel *channel; channel = &ring->channel; wmb(); rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER; vxge_hw_channel_dtr_post(channel, rxdh); if (ring->stats->common_stats.usage_cnt > 0) ring->stats->common_stats.usage_cnt--; } /** * vxge_hw_ring_rxd_post_post_wmb - Process rxd after post with memory barrier. * @ring: Handle to the ring object used for receive * @rxdh: Descriptor handle. * * Processes rxd after post with memory barrier. */ void vxge_hw_ring_rxd_post_post_wmb(struct __vxge_hw_ring *ring, void *rxdh) { wmb(); vxge_hw_ring_rxd_post_post(ring, rxdh); } /** * vxge_hw_ring_rxd_next_completed - Get the _next_ completed descriptor. * @ring: Handle to the ring object used for receive * @rxdh: Descriptor handle. Returned by HW. * @t_code: Transfer code, as per Titan User Guide, * Receive Descriptor Format. Returned by HW. * * Retrieve the _next_ completed descriptor. * HW uses ring callback (*vxge_hw_ring_callback_f) to notifiy * driver of new completed descriptors. After that * the driver can use vxge_hw_ring_rxd_next_completed to retrieve the rest * completions (the very first completion is passed by HW via * vxge_hw_ring_callback_f). * * Implementation-wise, the driver is free to call * vxge_hw_ring_rxd_next_completed either immediately from inside the * ring callback, or in a deferred fashion and separate (from HW) * context. * * Non-zero @t_code means failure to fill-in receive buffer(s) * of the descriptor. * For instance, parity error detected during the data transfer. * In this case Titan will complete the descriptor and indicate * for the host that the received data is not to be used. * For details please refer to Titan User Guide. * * Returns: VXGE_HW_OK - success. * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors * are currently available for processing. * * See also: vxge_hw_ring_callback_f{}, * vxge_hw_fifo_rxd_next_completed(), enum vxge_hw_status{}. */ enum vxge_hw_status vxge_hw_ring_rxd_next_completed( struct __vxge_hw_ring *ring, void **rxdh, u8 *t_code) { struct __vxge_hw_channel *channel; struct vxge_hw_ring_rxd_1 *rxdp; enum vxge_hw_status status = VXGE_HW_OK; u64 control_0, own; channel = &ring->channel; vxge_hw_channel_dtr_try_complete(channel, rxdh); rxdp = (struct vxge_hw_ring_rxd_1 *)*rxdh; if (rxdp == NULL) { status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS; goto exit; } control_0 = rxdp->control_0; own = control_0 & VXGE_HW_RING_RXD_LIST_OWN_ADAPTER; *t_code = (u8)VXGE_HW_RING_RXD_T_CODE_GET(control_0); /* check whether it is not the end */ if (!own || *t_code == VXGE_HW_RING_T_CODE_FRM_DROP) { vxge_assert(((struct vxge_hw_ring_rxd_1 *)rxdp)->host_control != 0); ++ring->cmpl_cnt; vxge_hw_channel_dtr_complete(channel); vxge_assert(*t_code != VXGE_HW_RING_RXD_T_CODE_UNUSED); ring->stats->common_stats.usage_cnt++; if (ring->stats->common_stats.usage_max < ring->stats->common_stats.usage_cnt) ring->stats->common_stats.usage_max = ring->stats->common_stats.usage_cnt; status = VXGE_HW_OK; goto exit; } /* reset it. since we don't want to return * garbage to the driver */ *rxdh = NULL; status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS; exit: return status; } /** * vxge_hw_ring_handle_tcode - Handle transfer code. * @ring: Handle to the ring object used for receive * @rxdh: Descriptor handle. * @t_code: One of the enumerated (and documented in the Titan user guide) * "transfer codes". * * Handle descriptor's transfer code. The latter comes with each completed * descriptor. * * Returns: one of the enum vxge_hw_status{} enumerated types. * VXGE_HW_OK - for success. * VXGE_HW_ERR_CRITICAL - when encounters critical error. */ enum vxge_hw_status vxge_hw_ring_handle_tcode( struct __vxge_hw_ring *ring, void *rxdh, u8 t_code) { struct __vxge_hw_channel *channel; enum vxge_hw_status status = VXGE_HW_OK; channel = &ring->channel; /* If the t_code is not supported and if the * t_code is other than 0x5 (unparseable packet * such as unknown UPV6 header), Drop it !!! */ if (t_code == VXGE_HW_RING_T_CODE_OK || t_code == VXGE_HW_RING_T_CODE_L3_PKT_ERR) { status = VXGE_HW_OK; goto exit; } if (t_code > VXGE_HW_RING_T_CODE_MULTI_ERR) { status = VXGE_HW_ERR_INVALID_TCODE; goto exit; } ring->stats->rxd_t_code_err_cnt[t_code]++; exit: return status; } /** * __vxge_hw_non_offload_db_post - Post non offload doorbell * * @fifo: fifohandle * @txdl_ptr: The starting location of the TxDL in host memory * @num_txds: The highest TxD in this TxDL (0 to 255 means 1 to 256) * @no_snoop: No snoop flags * * This function posts a non-offload doorbell to doorbell FIFO * */ static void __vxge_hw_non_offload_db_post(struct __vxge_hw_fifo *fifo, u64 txdl_ptr, u32 num_txds, u32 no_snoop) { struct __vxge_hw_channel *channel; channel = &fifo->channel; writeq(VXGE_HW_NODBW_TYPE(VXGE_HW_NODBW_TYPE_NODBW) | VXGE_HW_NODBW_LAST_TXD_NUMBER(num_txds) | VXGE_HW_NODBW_GET_NO_SNOOP(no_snoop), &fifo->nofl_db->control_0); mmiowb(); writeq(txdl_ptr, &fifo->nofl_db->txdl_ptr); mmiowb(); } /** * vxge_hw_fifo_free_txdl_count_get - returns the number of txdls available in * the fifo * @fifoh: Handle to the fifo object used for non offload send */ u32 vxge_hw_fifo_free_txdl_count_get(struct __vxge_hw_fifo *fifoh) { return vxge_hw_channel_dtr_count(&fifoh->channel); } /** * vxge_hw_fifo_txdl_reserve - Reserve fifo descriptor. * @fifoh: Handle to the fifo object used for non offload send * @txdlh: Reserved descriptor. On success HW fills this "out" parameter * with a valid handle. * @txdl_priv: Buffer to return the pointer to per txdl space * * Reserve a single TxDL (that is, fifo descriptor) * for the subsequent filling-in by driver) * and posting on the corresponding channel (@channelh) * via vxge_hw_fifo_txdl_post(). * * Note: it is the responsibility of driver to reserve multiple descriptors * for lengthy (e.g., LSO) transmit operation. A single fifo descriptor * carries up to configured number (fifo.max_frags) of contiguous buffers. * * Returns: VXGE_HW_OK - success; * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available * */ enum vxge_hw_status vxge_hw_fifo_txdl_reserve( struct __vxge_hw_fifo *fifo, void **txdlh, void **txdl_priv) { struct __vxge_hw_channel *channel; enum vxge_hw_status status; int i; channel = &fifo->channel; status = vxge_hw_channel_dtr_alloc(channel, txdlh); if (status == VXGE_HW_OK) { struct vxge_hw_fifo_txd *txdp = (struct vxge_hw_fifo_txd *)*txdlh; struct __vxge_hw_fifo_txdl_priv *priv; priv = __vxge_hw_fifo_txdl_priv(fifo, txdp); /* reset the TxDL's private */ priv->align_dma_offset = 0; priv->align_vaddr_start = priv->align_vaddr; priv->align_used_frags = 0; priv->frags = 0; priv->alloc_frags = fifo->config->max_frags; priv->next_txdl_priv = NULL; *txdl_priv = (void *)(size_t)txdp->host_control; for (i = 0; i < fifo->config->max_frags; i++) { txdp = ((struct vxge_hw_fifo_txd *)*txdlh) + i; txdp->control_0 = txdp->control_1 = 0; } } return status; } /** * vxge_hw_fifo_txdl_buffer_set - Set transmit buffer pointer in the * descriptor. * @fifo: Handle to the fifo object used for non offload send * @txdlh: Descriptor handle. * @frag_idx: Index of the data buffer in the caller's scatter-gather list * (of buffers). * @dma_pointer: DMA address of the data buffer referenced by @frag_idx. * @size: Size of the data buffer (in bytes). * * This API is part of the preparation of the transmit descriptor for posting * (via vxge_hw_fifo_txdl_post()). The related "preparation" APIs include * vxge_hw_fifo_txdl_mss_set() and vxge_hw_fifo_txdl_cksum_set_bits(). * All three APIs fill in the fields of the fifo descriptor, * in accordance with the Titan specification. * */ void vxge_hw_fifo_txdl_buffer_set(struct __vxge_hw_fifo *fifo, void *txdlh, u32 frag_idx, dma_addr_t dma_pointer, u32 size) { struct __vxge_hw_fifo_txdl_priv *txdl_priv; struct vxge_hw_fifo_txd *txdp, *txdp_last; struct __vxge_hw_channel *channel; channel = &fifo->channel; txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh); txdp = (struct vxge_hw_fifo_txd *)txdlh + txdl_priv->frags; if (frag_idx != 0) txdp->control_0 = txdp->control_1 = 0; else { txdp->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE( VXGE_HW_FIFO_TXD_GATHER_CODE_FIRST); txdp->control_1 |= fifo->interrupt_type; txdp->control_1 |= VXGE_HW_FIFO_TXD_INT_NUMBER( fifo->tx_intr_num); if (txdl_priv->frags) { txdp_last = (struct vxge_hw_fifo_txd *)txdlh + (txdl_priv->frags - 1); txdp_last->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE( VXGE_HW_FIFO_TXD_GATHER_CODE_LAST); } } vxge_assert(frag_idx < txdl_priv->alloc_frags); txdp->buffer_pointer = (u64)dma_pointer; txdp->control_0 |= VXGE_HW_FIFO_TXD_BUFFER_SIZE(size); fifo->stats->total_buffers++; txdl_priv->frags++; } /** * vxge_hw_fifo_txdl_post - Post descriptor on the fifo channel. * @fifo: Handle to the fifo object used for non offload send * @txdlh: Descriptor obtained via vxge_hw_fifo_txdl_reserve() * @frags: Number of contiguous buffers that are part of a single * transmit operation. * * Post descriptor on the 'fifo' type channel for transmission. * Prior to posting the descriptor should be filled in accordance with * Host/Titan interface specification for a given service (LL, etc.). * */ void vxge_hw_fifo_txdl_post(struct __vxge_hw_fifo *fifo, void *txdlh) { struct __vxge_hw_fifo_txdl_priv *txdl_priv; struct vxge_hw_fifo_txd *txdp_last; struct vxge_hw_fifo_txd *txdp_first; struct __vxge_hw_channel *channel; channel = &fifo->channel; txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh); txdp_first = (struct vxge_hw_fifo_txd *)txdlh; txdp_last = (struct vxge_hw_fifo_txd *)txdlh + (txdl_priv->frags - 1); txdp_last->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(VXGE_HW_FIFO_TXD_GATHER_CODE_LAST); txdp_first->control_0 |= VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER; vxge_hw_channel_dtr_post(&fifo->channel, txdlh); __vxge_hw_non_offload_db_post(fifo, (u64)txdl_priv->dma_addr, txdl_priv->frags - 1, fifo->no_snoop_bits); fifo->stats->total_posts++; fifo->stats->common_stats.usage_cnt++; if (fifo->stats->common_stats.usage_max < fifo->stats->common_stats.usage_cnt) fifo->stats->common_stats.usage_max = fifo->stats->common_stats.usage_cnt; } /** * vxge_hw_fifo_txdl_next_completed - Retrieve next completed descriptor. * @fifo: Handle to the fifo object used for non offload send * @txdlh: Descriptor handle. Returned by HW. * @t_code: Transfer code, as per Titan User Guide, * Transmit Descriptor Format. * Returned by HW. * * Retrieve the _next_ completed descriptor. * HW uses channel callback (*vxge_hw_channel_callback_f) to notifiy * driver of new completed descriptors. After that * the driver can use vxge_hw_fifo_txdl_next_completed to retrieve the rest * completions (the very first completion is passed by HW via * vxge_hw_channel_callback_f). * * Implementation-wise, the driver is free to call * vxge_hw_fifo_txdl_next_completed either immediately from inside the * channel callback, or in a deferred fashion and separate (from HW) * context. * * Non-zero @t_code means failure to process the descriptor. * The failure could happen, for instance, when the link is * down, in which case Titan completes the descriptor because it * is not able to send the data out. * * For details please refer to Titan User Guide. * * Returns: VXGE_HW_OK - success. * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors * are currently available for processing. * */ enum vxge_hw_status vxge_hw_fifo_txdl_next_completed( struct __vxge_hw_fifo *fifo, void **txdlh, enum vxge_hw_fifo_tcode *t_code) { struct __vxge_hw_channel *channel; struct vxge_hw_fifo_txd *txdp; enum vxge_hw_status status = VXGE_HW_OK; channel = &fifo->channel; vxge_hw_channel_dtr_try_complete(channel, txdlh); txdp = (struct vxge_hw_fifo_txd *)*txdlh; if (txdp == NULL) { status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS; goto exit; } /* check whether host owns it */ if (!(txdp->control_0 & VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER)) { vxge_assert(txdp->host_control != 0); vxge_hw_channel_dtr_complete(channel); *t_code = (u8)VXGE_HW_FIFO_TXD_T_CODE_GET(txdp->control_0); if (fifo->stats->common_stats.usage_cnt > 0) fifo->stats->common_stats.usage_cnt--; status = VXGE_HW_OK; goto exit; } /* no more completions */ *txdlh = NULL; status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS; exit: return status; } /** * vxge_hw_fifo_handle_tcode - Handle transfer code. * @fifo: Handle to the fifo object used for non offload send * @txdlh: Descriptor handle. * @t_code: One of the enumerated (and documented in the Titan user guide) * "transfer codes". * * Handle descriptor's transfer code. The latter comes with each completed * descriptor. * * Returns: one of the enum vxge_hw_status{} enumerated types. * VXGE_HW_OK - for success. * VXGE_HW_ERR_CRITICAL - when encounters critical error. */ enum vxge_hw_status vxge_hw_fifo_handle_tcode(struct __vxge_hw_fifo *fifo, void *txdlh, enum vxge_hw_fifo_tcode t_code) { struct __vxge_hw_channel *channel; enum vxge_hw_status status = VXGE_HW_OK; channel = &fifo->channel; if (((t_code & 0x7) < 0) || ((t_code & 0x7) > 0x4)) { status = VXGE_HW_ERR_INVALID_TCODE; goto exit; } fifo->stats->txd_t_code_err_cnt[t_code]++; exit: return status; } /** * vxge_hw_fifo_txdl_free - Free descriptor. * @fifo: Handle to the fifo object used for non offload send * @txdlh: Descriptor handle. * * Free the reserved descriptor. This operation is "symmetrical" to * vxge_hw_fifo_txdl_reserve. The "free-ing" completes the descriptor's * lifecycle. * * After free-ing (see vxge_hw_fifo_txdl_free()) the descriptor again can * be: * * - reserved (vxge_hw_fifo_txdl_reserve); * * - posted (vxge_hw_fifo_txdl_post); * * - completed (vxge_hw_fifo_txdl_next_completed); * * - and recycled again (vxge_hw_fifo_txdl_free). * * For alternative state transitions and more details please refer to * the design doc. * */ void vxge_hw_fifo_txdl_free(struct __vxge_hw_fifo *fifo, void *txdlh) { struct __vxge_hw_fifo_txdl_priv *txdl_priv; u32 max_frags; struct __vxge_hw_channel *channel; channel = &fifo->channel; txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, (struct vxge_hw_fifo_txd *)txdlh); max_frags = fifo->config->max_frags; vxge_hw_channel_dtr_free(channel, txdlh); } /** * vxge_hw_vpath_mac_addr_add - Add the mac address entry for this vpath * to MAC address table. * @vp: Vpath handle. * @macaddr: MAC address to be added for this vpath into the list * @macaddr_mask: MAC address mask for macaddr * @duplicate_mode: Duplicate MAC address add mode. Please see * enum vxge_hw_vpath_mac_addr_add_mode{} * * Adds the given mac address and mac address mask into the list for this * vpath. * see also: vxge_hw_vpath_mac_addr_delete, vxge_hw_vpath_mac_addr_get and * vxge_hw_vpath_mac_addr_get_next * */ enum vxge_hw_status vxge_hw_vpath_mac_addr_add( struct __vxge_hw_vpath_handle *vp, u8 (macaddr)[ETH_ALEN], u8 (macaddr_mask)[ETH_ALEN], enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode) { u32 i; u64 data1 = 0ULL; u64 data2 = 0ULL; enum vxge_hw_status status = VXGE_HW_OK; if (vp == NULL) { status = VXGE_HW_ERR_INVALID_HANDLE; goto exit; } for (i = 0; i < ETH_ALEN; i++) { data1 <<= 8; data1 |= (u8)macaddr[i]; data2 <<= 8; data2 |= (u8)macaddr_mask[i]; } switch (duplicate_mode) { case VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE: i = 0; break; case VXGE_HW_VPATH_MAC_ADDR_DISCARD_DUPLICATE: i = 1; break; case VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE: i = 2; break; default: i = 0; break; } status = __vxge_hw_vpath_rts_table_set(vp, VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY, VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA, 0, VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1), VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2)| VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MODE(i)); exit: return status; } /** * vxge_hw_vpath_mac_addr_get - Get the first mac address entry for this vpath * from MAC address table. * @vp: Vpath handle. * @macaddr: First MAC address entry for this vpath in the list * @macaddr_mask: MAC address mask for macaddr * * Returns the first mac address and mac address mask in the list for this * vpath. * see also: vxge_hw_vpath_mac_addr_get_next * */ enum vxge_hw_status vxge_hw_vpath_mac_addr_get( struct __vxge_hw_vpath_handle *vp, u8 (macaddr)[ETH_ALEN], u8 (macaddr_mask)[ETH_ALEN]) { u32 i; u64 data1 = 0ULL; u64 data2 = 0ULL; enum vxge_hw_status status = VXGE_HW_OK; if (vp == NULL) { status = VXGE_HW_ERR_INVALID_HANDLE; goto exit; } status = __vxge_hw_vpath_rts_table_get(vp, VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY, VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA, 0, &data1, &data2); if (status != VXGE_HW_OK) goto exit; data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1); data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2); for (i = ETH_ALEN; i > 0; i--) { macaddr[i-1] = (u8)(data1 & 0xFF); data1 >>= 8; macaddr_mask[i-1] = (u8)(data2 & 0xFF); data2 >>= 8; } exit: return status; } /** * vxge_hw_vpath_mac_addr_get_next - Get the next mac address entry for this * vpath * from MAC address table. * @vp: Vpath handle. * @macaddr: Next MAC address entry for this vpath in the list * @macaddr_mask: MAC address mask for macaddr * * Returns the next mac address and mac address mask in the list for this * vpath. * see also: vxge_hw_vpath_mac_addr_get * */ enum vxge_hw_status vxge_hw_vpath_mac_addr_get_next( struct __vxge_hw_vpath_handle *vp, u8 (macaddr)[ETH_ALEN], u8 (macaddr_mask)[ETH_ALEN]) { u32 i; u64 data1 = 0ULL; u64 data2 = 0ULL; enum vxge_hw_status status = VXGE_HW_OK; if (vp == NULL) { status = VXGE_HW_ERR_INVALID_HANDLE; goto exit; } status = __vxge_hw_vpath_rts_table_get(vp, VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY, VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA, 0, &data1, &data2); if (status != VXGE_HW_OK) goto exit; data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1); data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2); for (i = ETH_ALEN; i > 0; i--) { macaddr[i-1] = (u8)(data1 & 0xFF); data1 >>= 8; macaddr_mask[i-1] = (u8)(data2 & 0xFF); data2 >>= 8; } exit: return status; } /** * vxge_hw_vpath_mac_addr_delete - Delete the mac address entry for this vpath * to MAC address table. * @vp: Vpath handle. * @macaddr: MAC address to be added for this vpath into the list * @macaddr_mask: MAC address mask for macaddr * * Delete the given mac address and mac address mask into the list for this * vpath. * see also: vxge_hw_vpath_mac_addr_add, vxge_hw_vpath_mac_addr_get and * vxge_hw_vpath_mac_addr_get_next * */ enum vxge_hw_status vxge_hw_vpath_mac_addr_delete( struct __vxge_hw_vpath_handle *vp, u8 (macaddr)[ETH_ALEN], u8 (macaddr_mask)[ETH_ALEN]) { u32 i; u64 data1 = 0ULL; u64 data2 = 0ULL; enum vxge_hw_status status = VXGE_HW_OK; if (vp == NULL) { status = VXGE_HW_ERR_INVALID_HANDLE; goto exit; } for (i = 0; i < ETH_ALEN; i++) { data1 <<= 8; data1 |= (u8)macaddr[i]; data2 <<= 8; data2 |= (u8)macaddr_mask[i]; } status = __vxge_hw_vpath_rts_table_set(vp, VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY, VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA, 0, VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1), VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2)); exit: return status; } /** * vxge_hw_vpath_vid_add - Add the vlan id entry for this vpath * to vlan id table. * @vp: Vpath handle. * @vid: vlan id to be added for this vpath into the list * * Adds the given vlan id into the list for this vpath. * see also: vxge_hw_vpath_vid_delete, vxge_hw_vpath_vid_get and * vxge_hw_vpath_vid_get_next * */ enum vxge_hw_status vxge_hw_vpath_vid_add(struct __vxge_hw_vpath_handle *vp, u64 vid) { enum vxge_hw_status status = VXGE_HW_OK; if (vp == NULL) { status = VXGE_HW_ERR_INVALID_HANDLE; goto exit; } status = __vxge_hw_vpath_rts_table_set(vp, VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY, VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID, 0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0); exit: return status; } /** * vxge_hw_vpath_vid_get - Get the first vid entry for this vpath * from vlan id table. * @vp: Vpath handle. * @vid: Buffer to return vlan id * * Returns the first vlan id in the list for this vpath. * see also: vxge_hw_vpath_vid_get_next * */ enum vxge_hw_status vxge_hw_vpath_vid_get(struct __vxge_hw_vpath_handle *vp, u64 *vid) { u64 data; enum vxge_hw_status status = VXGE_HW_OK; if (vp == NULL) { status = VXGE_HW_ERR_INVALID_HANDLE; goto exit; } status = __vxge_hw_vpath_rts_table_get(vp, VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY, VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID, 0, vid, &data); *vid = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_VLAN_ID(*vid); exit: return status; } /** * vxge_hw_vpath_vid_delete - Delete the vlan id entry for this vpath * to vlan id table. * @vp: Vpath handle. * @vid: vlan id to be added for this vpath into the list * * Adds the given vlan id into the list for this vpath. * see also: vxge_hw_vpath_vid_add, vxge_hw_vpath_vid_get and * vxge_hw_vpath_vid_get_next * */ enum vxge_hw_status vxge_hw_vpath_vid_delete(struct __vxge_hw_vpath_handle *vp, u64 vid) { enum vxge_hw_status status = VXGE_HW_OK; if (vp == NULL) { status = VXGE_HW_ERR_INVALID_HANDLE; goto exit; } status = __vxge_hw_vpath_rts_table_set(vp, VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY, VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID, 0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0); exit: return status; } /** * vxge_hw_vpath_promisc_enable - Enable promiscuous mode. * @vp: Vpath handle. * * Enable promiscuous mode of Titan-e operation. * * See also: vxge_hw_vpath_promisc_disable(). */ enum vxge_hw_status vxge_hw_vpath_promisc_enable( struct __vxge_hw_vpath_handle *vp) { u64 val64; struct __vxge_hw_virtualpath *vpath; enum vxge_hw_status status = VXGE_HW_OK; if ((vp == NULL) || (vp->vpath->ringh == NULL)) { status = VXGE_HW_ERR_INVALID_HANDLE; goto exit; } vpath = vp->vpath; /* Enable promiscuous mode for function 0 only */ if (!(vpath->hldev->access_rights & VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) return VXGE_HW_OK; val64 = readq(&vpath->vp_reg->rxmac_vcfg0); if (!(val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN)) { val64 |= VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN | VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN | VXGE_HW_RXMAC_VCFG0_BCAST_EN | VXGE_HW_RXMAC_VCFG0_ALL_VID_EN; writeq(val64, &vpath->vp_reg->rxmac_vcfg0); } exit: return status; } /** * vxge_hw_vpath_promisc_disable - Disable promiscuous mode. * @vp: Vpath handle. * * Disable promiscuous mode of Titan-e operation. * * See also: vxge_hw_vpath_promisc_enable(). */ enum vxge_hw_status vxge_hw_vpath_promisc_disable( struct __vxge_hw_vpath_handle *vp) { u64 val64; struct __vxge_hw_virtualpath *vpath; enum vxge_hw_status status = VXGE_HW_OK; if ((vp == NULL) || (vp->vpath->ringh == NULL)) { status = VXGE_HW_ERR_INVALID_HANDLE; goto exit; } vpath = vp->vpath; val64 = readq(&vpath->vp_reg->rxmac_vcfg0); if (val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN) { val64 &= ~(VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN | VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN | VXGE_HW_RXMAC_VCFG0_ALL_VID_EN); writeq(val64, &vpath->vp_reg->rxmac_vcfg0); } exit: return status; } /* * vxge_hw_vpath_bcast_enable - Enable broadcast * @vp: Vpath handle. * * Enable receiving broadcasts. */ enum vxge_hw_status vxge_hw_vpath_bcast_enable( struct __vxge_hw_vpath_handle *vp) { u64 val64; struct __vxge_hw_virtualpath *vpath; enum vxge_hw_status status = VXGE_HW_OK; if ((vp == NULL) || (vp->vpath->ringh == NULL)) { status = VXGE_HW_ERR_INVALID_HANDLE; goto exit; } vpath = vp->vpath; val64 = readq(&vpath->vp_reg->rxmac_vcfg0); if (!(val64 & VXGE_HW_RXMAC_VCFG0_BCAST_EN)) { val64 |= VXGE_HW_RXMAC_VCFG0_BCAST_EN; writeq(val64, &vpath->vp_reg->rxmac_vcfg0); } exit: return status; } /** * vxge_hw_vpath_mcast_enable - Enable multicast addresses. * @vp: Vpath handle. * * Enable Titan-e multicast addresses. * Returns: VXGE_HW_OK on success. * */ enum vxge_hw_status vxge_hw_vpath_mcast_enable( struct __vxge_hw_vpath_handle *vp) { u64 val64; struct __vxge_hw_virtualpath *vpath; enum vxge_hw_status status = VXGE_HW_OK; if ((vp == NULL) || (vp->vpath->ringh == NULL)) { status = VXGE_HW_ERR_INVALID_HANDLE; goto exit; } vpath = vp->vpath; val64 = readq(&vpath->vp_reg->rxmac_vcfg0); if (!(val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN)) { val64 |= VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN; writeq(val64, &vpath->vp_reg->rxmac_vcfg0); } exit: return status; } /** * vxge_hw_vpath_mcast_disable - Disable multicast addresses. * @vp: Vpath handle. * * Disable Titan-e multicast addresses. * Returns: VXGE_HW_OK - success. * VXGE_HW_ERR_INVALID_HANDLE - Invalid handle * */ enum vxge_hw_status vxge_hw_vpath_mcast_disable(struct __vxge_hw_vpath_handle *vp) { u64 val64; struct __vxge_hw_virtualpath *vpath; enum vxge_hw_status status = VXGE_HW_OK; if ((vp == NULL) || (vp->vpath->ringh == NULL)) { status = VXGE_HW_ERR_INVALID_HANDLE; goto exit; } vpath = vp->vpath; val64 = readq(&vpath->vp_reg->rxmac_vcfg0); if (val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN) { val64 &= ~VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN; writeq(val64, &vpath->vp_reg->rxmac_vcfg0); } exit: return status; } /* * vxge_hw_vpath_alarm_process - Process Alarms. * @vpath: Virtual Path. * @skip_alarms: Do not clear the alarms * * Process vpath alarms. * */ enum vxge_hw_status vxge_hw_vpath_alarm_process( struct __vxge_hw_vpath_handle *vp, u32 skip_alarms) { enum vxge_hw_status status = VXGE_HW_OK; if (vp == NULL) { status = VXGE_HW_ERR_INVALID_HANDLE; goto exit; } status = __vxge_hw_vpath_alarm_process(vp->vpath, skip_alarms); exit: return status; } /** * vxge_hw_vpath_msix_set - Associate MSIX vectors with TIM interrupts and * alrms * @vp: Virtual Path handle. * @tim_msix_id: MSIX vectors associated with VXGE_HW_MAX_INTR_PER_VP number of * interrupts(Can be repeated). If fifo or ring are not enabled * the MSIX vector for that should be set to 0 * @alarm_msix_id: MSIX vector for alarm. * * This API will associate a given MSIX vector numbers with the four TIM * interrupts and alarm interrupt. */ void vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id, int alarm_msix_id) { u64 val64; struct __vxge_hw_virtualpath *vpath = vp->vpath; struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg; u32 vp_id = vp->vpath->vp_id; val64 = VXGE_HW_INTERRUPT_CFG0_GROUP0_MSIX_FOR_TXTI( (vp_id * 4) + tim_msix_id[0]) | VXGE_HW_INTERRUPT_CFG0_GROUP1_MSIX_FOR_TXTI( (vp_id * 4) + tim_msix_id[1]); writeq(val64, &vp_reg->interrupt_cfg0); writeq(VXGE_HW_INTERRUPT_CFG2_ALARM_MAP_TO_MSG( (vpath->hldev->first_vp_id * 4) + alarm_msix_id), &vp_reg->interrupt_cfg2); if (vpath->hldev->config.intr_mode == VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) { __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn( VXGE_HW_ONE_SHOT_VECT0_EN_ONE_SHOT_VECT0_EN, 0, 32), &vp_reg->one_shot_vect0_en); __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn( VXGE_HW_ONE_SHOT_VECT1_EN_ONE_SHOT_VECT1_EN, 0, 32), &vp_reg->one_shot_vect1_en); __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn( VXGE_HW_ONE_SHOT_VECT2_EN_ONE_SHOT_VECT2_EN, 0, 32), &vp_reg->one_shot_vect2_en); } } /** * vxge_hw_vpath_msix_mask - Mask MSIX Vector. * @vp: Virtual Path handle. * @msix_id: MSIX ID * * The function masks the msix interrupt for the given msix_id * * Returns: 0, * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range * status. * See also: */ void vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vp, int msix_id) { struct __vxge_hw_device *hldev = vp->vpath->hldev; __vxge_hw_pio_mem_write32_upper( (u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32), &hldev->common_reg->set_msix_mask_vect[msix_id % 4]); } /** * vxge_hw_vpath_msix_clear - Clear MSIX Vector. * @vp: Virtual Path handle. * @msix_id: MSI ID * * The function clears the msix interrupt for the given msix_id * * Returns: 0, * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range * status. * See also: */ void vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id) { struct __vxge_hw_device *hldev = vp->vpath->hldev; if ((hldev->config.intr_mode == VXGE_HW_INTR_MODE_MSIX_ONE_SHOT)) __vxge_hw_pio_mem_write32_upper( (u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32), &hldev->common_reg->clr_msix_one_shot_vec[msix_id % 4]); else __vxge_hw_pio_mem_write32_upper( (u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32), &hldev->common_reg->clear_msix_mask_vect[msix_id % 4]); } /** * vxge_hw_vpath_msix_unmask - Unmask the MSIX Vector. * @vp: Virtual Path handle. * @msix_id: MSI ID * * The function unmasks the msix interrupt for the given msix_id * * Returns: 0, * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range * status. * See also: */ void vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vp, int msix_id) { struct __vxge_hw_device *hldev = vp->vpath->hldev; __vxge_hw_pio_mem_write32_upper( (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32), &hldev->common_reg->clear_msix_mask_vect[msix_id%4]); } /** * vxge_hw_vpath_inta_mask_tx_rx - Mask Tx and Rx interrupts. * @vp: Virtual Path handle. * * Mask Tx and Rx vpath interrupts. * * See also: vxge_hw_vpath_inta_mask_tx_rx() */ void vxge_hw_vpath_inta_mask_tx_rx(struct __vxge_hw_vpath_handle *vp) { u64 tim_int_mask0[4] = {[0 ...3] = 0}; u32 tim_int_mask1[4] = {[0 ...3] = 0}; u64 val64; struct __vxge_hw_device *hldev = vp->vpath->hldev; VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0, tim_int_mask1, vp->vpath->vp_id); val64 = readq(&hldev->common_reg->tim_int_mask0); if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) || (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) { writeq((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] | tim_int_mask0[VXGE_HW_VPATH_INTR_RX] | val64), &hldev->common_reg->tim_int_mask0); } val64 = readl(&hldev->common_reg->tim_int_mask1); if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) || (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) { __vxge_hw_pio_mem_write32_upper( (tim_int_mask1[VXGE_HW_VPATH_INTR_TX] | tim_int_mask1[VXGE_HW_VPATH_INTR_RX] | val64), &hldev->common_reg->tim_int_mask1); } } /** * vxge_hw_vpath_inta_unmask_tx_rx - Unmask Tx and Rx interrupts. * @vp: Virtual Path handle. * * Unmask Tx and Rx vpath interrupts. * * See also: vxge_hw_vpath_inta_mask_tx_rx() */ void vxge_hw_vpath_inta_unmask_tx_rx(struct __vxge_hw_vpath_handle *vp) { u64 tim_int_mask0[4] = {[0 ...3] = 0}; u32 tim_int_mask1[4] = {[0 ...3] = 0}; u64 val64; struct __vxge_hw_device *hldev = vp->vpath->hldev; VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0, tim_int_mask1, vp->vpath->vp_id); val64 = readq(&hldev->common_reg->tim_int_mask0); if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) || (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) { writeq((~(tim_int_mask0[VXGE_HW_VPATH_INTR_TX] | tim_int_mask0[VXGE_HW_VPATH_INTR_RX])) & val64, &hldev->common_reg->tim_int_mask0); } if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) || (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) { __vxge_hw_pio_mem_write32_upper( (~(tim_int_mask1[VXGE_HW_VPATH_INTR_TX] | tim_int_mask1[VXGE_HW_VPATH_INTR_RX])) & val64, &hldev->common_reg->tim_int_mask1); } } /** * vxge_hw_vpath_poll_rx - Poll Rx Virtual Path for completed * descriptors and process the same. * @ring: Handle to the ring object used for receive * * The function polls the Rx for the completed descriptors and calls * the driver via supplied completion callback. * * Returns: VXGE_HW_OK, if the polling is completed successful. * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed * descriptors available which are yet to be processed. * * See also: vxge_hw_vpath_poll_rx() */ enum vxge_hw_status vxge_hw_vpath_poll_rx(struct __vxge_hw_ring *ring) { u8 t_code; enum vxge_hw_status status = VXGE_HW_OK; void *first_rxdh; u64 val64 = 0; int new_count = 0; ring->cmpl_cnt = 0; status = vxge_hw_ring_rxd_next_completed(ring, &first_rxdh, &t_code); if (status == VXGE_HW_OK) ring->callback(ring, first_rxdh, t_code, ring->channel.userdata); if (ring->cmpl_cnt != 0) { ring->doorbell_cnt += ring->cmpl_cnt; if (ring->doorbell_cnt >= ring->rxds_limit) { /* * Each RxD is of 4 qwords, update the number of * qwords replenished */ new_count = (ring->doorbell_cnt * 4); /* For each block add 4 more qwords */ ring->total_db_cnt += ring->doorbell_cnt; if (ring->total_db_cnt >= ring->rxds_per_block) { new_count += 4; /* Reset total count */ ring->total_db_cnt %= ring->rxds_per_block; } writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(new_count), &ring->vp_reg->prc_rxd_doorbell); val64 = readl(&ring->common_reg->titan_general_int_status); ring->doorbell_cnt = 0; } } return status; } /** * vxge_hw_vpath_poll_tx - Poll Tx for completed descriptors and process * the same. * @fifo: Handle to the fifo object used for non offload send * * The function polls the Tx for the completed descriptors and calls * the driver via supplied completion callback. * * Returns: VXGE_HW_OK, if the polling is completed successful. * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed * descriptors available which are yet to be processed. */ enum vxge_hw_status vxge_hw_vpath_poll_tx(struct __vxge_hw_fifo *fifo, struct sk_buff ***skb_ptr, int nr_skb, int *more) { enum vxge_hw_fifo_tcode t_code; void *first_txdlh; enum vxge_hw_status status = VXGE_HW_OK; struct __vxge_hw_channel *channel; channel = &fifo->channel; status = vxge_hw_fifo_txdl_next_completed(fifo, &first_txdlh, &t_code); if (status == VXGE_HW_OK) if (fifo->callback(fifo, first_txdlh, t_code, channel->userdata, skb_ptr, nr_skb, more) != VXGE_HW_OK) status = VXGE_HW_COMPLETIONS_REMAIN; return status; }
gpl-2.0
MaxiCM/Samsung_STE_Kernel
net/mac80211/ht.c
2385
9690
/* * HT handling * * Copyright 2003, Jouni Malinen <jkmaline@cc.hut.fi> * Copyright 2002-2005, Instant802 Networks, Inc. * Copyright 2005-2006, Devicescape Software, Inc. * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> * Copyright 2007, Michael Wu <flamingice@sourmilk.net> * Copyright 2007-2010, Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/ieee80211.h> #include <net/mac80211.h> #include "ieee80211_i.h" #include "rate.h" void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_supported_band *sband, struct ieee80211_ht_cap *ht_cap_ie, struct ieee80211_sta_ht_cap *ht_cap) { u8 ampdu_info, tx_mcs_set_cap; int i, max_tx_streams; BUG_ON(!ht_cap); memset(ht_cap, 0, sizeof(*ht_cap)); if (!ht_cap_ie || !sband->ht_cap.ht_supported) return; ht_cap->ht_supported = true; /* * The bits listed in this expression should be * the same for the peer and us, if the station * advertises more then we can't use those thus * we mask them out. */ ht_cap->cap = le16_to_cpu(ht_cap_ie->cap_info) & (sband->ht_cap.cap | ~(IEEE80211_HT_CAP_LDPC_CODING | IEEE80211_HT_CAP_SUP_WIDTH_20_40 | IEEE80211_HT_CAP_GRN_FLD | IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_DSSSCCK40)); /* * The STBC bits are asymmetric -- if we don't have * TX then mask out the peer's RX and vice versa. */ if (!(sband->ht_cap.cap & IEEE80211_HT_CAP_TX_STBC)) ht_cap->cap &= ~IEEE80211_HT_CAP_RX_STBC; if (!(sband->ht_cap.cap & IEEE80211_HT_CAP_RX_STBC)) ht_cap->cap &= ~IEEE80211_HT_CAP_TX_STBC; ampdu_info = ht_cap_ie->ampdu_params_info; ht_cap->ampdu_factor = ampdu_info & IEEE80211_HT_AMPDU_PARM_FACTOR; ht_cap->ampdu_density = (ampdu_info & IEEE80211_HT_AMPDU_PARM_DENSITY) >> 2; /* own MCS TX capabilities */ tx_mcs_set_cap = sband->ht_cap.mcs.tx_params; /* Copy peer MCS TX capabilities, the driver might need them. */ ht_cap->mcs.tx_params = ht_cap_ie->mcs.tx_params; /* can we TX with MCS rates? */ if (!(tx_mcs_set_cap & IEEE80211_HT_MCS_TX_DEFINED)) return; /* Counting from 0, therefore +1 */ if (tx_mcs_set_cap & IEEE80211_HT_MCS_TX_RX_DIFF) max_tx_streams = ((tx_mcs_set_cap & IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK) >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT) + 1; else max_tx_streams = IEEE80211_HT_MCS_TX_MAX_STREAMS; /* * 802.11n-2009 20.3.5 / 20.6 says: * - indices 0 to 7 and 32 are single spatial stream * - 8 to 31 are multiple spatial streams using equal modulation * [8..15 for two streams, 16..23 for three and 24..31 for four] * - remainder are multiple spatial streams using unequal modulation */ for (i = 0; i < max_tx_streams; i++) ht_cap->mcs.rx_mask[i] = sband->ht_cap.mcs.rx_mask[i] & ht_cap_ie->mcs.rx_mask[i]; if (tx_mcs_set_cap & IEEE80211_HT_MCS_TX_UNEQUAL_MODULATION) for (i = IEEE80211_HT_MCS_UNEQUAL_MODULATION_START_BYTE; i < IEEE80211_HT_MCS_MASK_LEN; i++) ht_cap->mcs.rx_mask[i] = sband->ht_cap.mcs.rx_mask[i] & ht_cap_ie->mcs.rx_mask[i]; /* handle MCS rate 32 too */ if (sband->ht_cap.mcs.rx_mask[32/8] & ht_cap_ie->mcs.rx_mask[32/8] & 1) ht_cap->mcs.rx_mask[32/8] |= 1; } void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta, bool tx) { int i; cancel_work_sync(&sta->ampdu_mlme.work); for (i = 0; i < STA_TID_NUM; i++) { __ieee80211_stop_tx_ba_session(sta, i, WLAN_BACK_INITIATOR, tx); __ieee80211_stop_rx_ba_session(sta, i, WLAN_BACK_RECIPIENT, WLAN_REASON_QSTA_LEAVE_QBSS, tx); } } void ieee80211_ba_session_work(struct work_struct *work) { struct sta_info *sta = container_of(work, struct sta_info, ampdu_mlme.work); struct tid_ampdu_tx *tid_tx; int tid; /* * When this flag is set, new sessions should be * blocked, and existing sessions will be torn * down by the code that set the flag, so this * need not run. */ if (test_sta_flags(sta, WLAN_STA_BLOCK_BA)) return; mutex_lock(&sta->ampdu_mlme.mtx); for (tid = 0; tid < STA_TID_NUM; tid++) { if (test_and_clear_bit(tid, sta->ampdu_mlme.tid_rx_timer_expired)) ___ieee80211_stop_rx_ba_session( sta, tid, WLAN_BACK_RECIPIENT, WLAN_REASON_QSTA_TIMEOUT, true); tid_tx = sta->ampdu_mlme.tid_start_tx[tid]; if (tid_tx) { /* * Assign it over to the normal tid_tx array * where it "goes live". */ spin_lock_bh(&sta->lock); sta->ampdu_mlme.tid_start_tx[tid] = NULL; /* could there be a race? */ if (sta->ampdu_mlme.tid_tx[tid]) kfree(tid_tx); else ieee80211_assign_tid_tx(sta, tid, tid_tx); spin_unlock_bh(&sta->lock); ieee80211_tx_ba_session_handle_start(sta, tid); continue; } tid_tx = rcu_dereference_protected_tid_tx(sta, tid); if (tid_tx && test_and_clear_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state)) ___ieee80211_stop_tx_ba_session(sta, tid, WLAN_BACK_INITIATOR, true); } mutex_unlock(&sta->ampdu_mlme.mtx); } void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata, const u8 *da, u16 tid, u16 initiator, u16 reason_code) { struct ieee80211_local *local = sdata->local; struct sk_buff *skb; struct ieee80211_mgmt *mgmt; u16 params; skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom); if (!skb) { printk(KERN_ERR "%s: failed to allocate buffer " "for delba frame\n", sdata->name); return; } skb_reserve(skb, local->hw.extra_tx_headroom); mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); memset(mgmt, 0, 24); memcpy(mgmt->da, da, ETH_ALEN); memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); if (sdata->vif.type == NL80211_IFTYPE_AP || sdata->vif.type == NL80211_IFTYPE_AP_VLAN) memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN); else if (sdata->vif.type == NL80211_IFTYPE_STATION) memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN); mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ACTION); skb_put(skb, 1 + sizeof(mgmt->u.action.u.delba)); mgmt->u.action.category = WLAN_CATEGORY_BACK; mgmt->u.action.u.delba.action_code = WLAN_ACTION_DELBA; params = (u16)(initiator << 11); /* bit 11 initiator */ params |= (u16)(tid << 12); /* bit 15:12 TID number */ mgmt->u.action.u.delba.params = cpu_to_le16(params); mgmt->u.action.u.delba.reason_code = cpu_to_le16(reason_code); ieee80211_tx_skb(sdata, skb); } void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata, struct sta_info *sta, struct ieee80211_mgmt *mgmt, size_t len) { u16 tid, params; u16 initiator; params = le16_to_cpu(mgmt->u.action.u.delba.params); tid = (params & IEEE80211_DELBA_PARAM_TID_MASK) >> 12; initiator = (params & IEEE80211_DELBA_PARAM_INITIATOR_MASK) >> 11; #ifdef CONFIG_MAC80211_HT_DEBUG if (net_ratelimit()) printk(KERN_DEBUG "delba from %pM (%s) tid %d reason code %d\n", mgmt->sa, initiator ? "initiator" : "recipient", tid, le16_to_cpu(mgmt->u.action.u.delba.reason_code)); #endif /* CONFIG_MAC80211_HT_DEBUG */ if (initiator == WLAN_BACK_INITIATOR) __ieee80211_stop_rx_ba_session(sta, tid, WLAN_BACK_INITIATOR, 0, true); else __ieee80211_stop_tx_ba_session(sta, tid, WLAN_BACK_RECIPIENT, true); } int ieee80211_send_smps_action(struct ieee80211_sub_if_data *sdata, enum ieee80211_smps_mode smps, const u8 *da, const u8 *bssid) { struct ieee80211_local *local = sdata->local; struct sk_buff *skb; struct ieee80211_mgmt *action_frame; /* 27 = header + category + action + smps mode */ skb = dev_alloc_skb(27 + local->hw.extra_tx_headroom); if (!skb) return -ENOMEM; skb_reserve(skb, local->hw.extra_tx_headroom); action_frame = (void *)skb_put(skb, 27); memcpy(action_frame->da, da, ETH_ALEN); memcpy(action_frame->sa, sdata->dev->dev_addr, ETH_ALEN); memcpy(action_frame->bssid, bssid, ETH_ALEN); action_frame->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ACTION); action_frame->u.action.category = WLAN_CATEGORY_HT; action_frame->u.action.u.ht_smps.action = WLAN_HT_ACTION_SMPS; switch (smps) { case IEEE80211_SMPS_AUTOMATIC: case IEEE80211_SMPS_NUM_MODES: WARN_ON(1); case IEEE80211_SMPS_OFF: action_frame->u.action.u.ht_smps.smps_control = WLAN_HT_SMPS_CONTROL_DISABLED; break; case IEEE80211_SMPS_STATIC: action_frame->u.action.u.ht_smps.smps_control = WLAN_HT_SMPS_CONTROL_STATIC; break; case IEEE80211_SMPS_DYNAMIC: action_frame->u.action.u.ht_smps.smps_control = WLAN_HT_SMPS_CONTROL_DYNAMIC; break; } /* we'll do more on status of this frame */ IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS; ieee80211_tx_skb(sdata, skb); return 0; } void ieee80211_request_smps_work(struct work_struct *work) { struct ieee80211_sub_if_data *sdata = container_of(work, struct ieee80211_sub_if_data, u.mgd.request_smps_work); mutex_lock(&sdata->u.mgd.mtx); __ieee80211_request_smps(sdata, sdata->u.mgd.driver_smps_mode); mutex_unlock(&sdata->u.mgd.mtx); } void ieee80211_request_smps(struct ieee80211_vif *vif, enum ieee80211_smps_mode smps_mode) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); if (WARN_ON(vif->type != NL80211_IFTYPE_STATION)) return; if (WARN_ON(smps_mode == IEEE80211_SMPS_OFF)) smps_mode = IEEE80211_SMPS_AUTOMATIC; sdata->u.mgd.driver_smps_mode = smps_mode; ieee80211_queue_work(&sdata->local->hw, &sdata->u.mgd.request_smps_work); } /* this might change ... don't want non-open drivers using it */ EXPORT_SYMBOL_GPL(ieee80211_request_smps);
gpl-2.0
charlie-wong/itop-zImage
drivers/gpu/drm/radeon/rs690.c
2641
26532
/* * Copyright 2008 Advanced Micro Devices, Inc. * Copyright 2008 Red Hat Inc. * Copyright 2009 Jerome Glisse. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Dave Airlie * Alex Deucher * Jerome Glisse */ #include "drmP.h" #include "radeon.h" #include "radeon_asic.h" #include "atom.h" #include "rs690d.h" static int rs690_mc_wait_for_idle(struct radeon_device *rdev) { unsigned i; uint32_t tmp; for (i = 0; i < rdev->usec_timeout; i++) { /* read MC_STATUS */ tmp = RREG32_MC(R_000090_MC_SYSTEM_STATUS); if (G_000090_MC_SYSTEM_IDLE(tmp)) return 0; udelay(1); } return -1; } static void rs690_gpu_init(struct radeon_device *rdev) { /* FIXME: is this correct ? */ r420_pipes_init(rdev); if (rs690_mc_wait_for_idle(rdev)) { printk(KERN_WARNING "Failed to wait MC idle while " "programming pipes. Bad things might happen.\n"); } } union igp_info { struct _ATOM_INTEGRATED_SYSTEM_INFO info; struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_v2; }; void rs690_pm_info(struct radeon_device *rdev) { int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); union igp_info *info; uint16_t data_offset; uint8_t frev, crev; fixed20_12 tmp; if (atom_parse_data_header(rdev->mode_info.atom_context, index, NULL, &frev, &crev, &data_offset)) { info = (union igp_info *)(rdev->mode_info.atom_context->bios + data_offset); /* Get various system informations from bios */ switch (crev) { case 1: tmp.full = dfixed_const(100); rdev->pm.igp_sideport_mclk.full = dfixed_const(le32_to_cpu(info->info.ulBootUpMemoryClock)); rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp); if (le16_to_cpu(info->info.usK8MemoryClock)) rdev->pm.igp_system_mclk.full = dfixed_const(le16_to_cpu(info->info.usK8MemoryClock)); else if (rdev->clock.default_mclk) { rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk); rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp); } else rdev->pm.igp_system_mclk.full = dfixed_const(400); rdev->pm.igp_ht_link_clk.full = dfixed_const(le16_to_cpu(info->info.usFSBClock)); rdev->pm.igp_ht_link_width.full = dfixed_const(info->info.ucHTLinkWidth); break; case 2: tmp.full = dfixed_const(100); rdev->pm.igp_sideport_mclk.full = dfixed_const(le32_to_cpu(info->info_v2.ulBootUpSidePortClock)); rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp); if (le32_to_cpu(info->info_v2.ulBootUpUMAClock)) rdev->pm.igp_system_mclk.full = dfixed_const(le32_to_cpu(info->info_v2.ulBootUpUMAClock)); else if (rdev->clock.default_mclk) rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk); else rdev->pm.igp_system_mclk.full = dfixed_const(66700); rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp); rdev->pm.igp_ht_link_clk.full = dfixed_const(le32_to_cpu(info->info_v2.ulHTLinkFreq)); rdev->pm.igp_ht_link_clk.full = dfixed_div(rdev->pm.igp_ht_link_clk, tmp); rdev->pm.igp_ht_link_width.full = dfixed_const(le16_to_cpu(info->info_v2.usMinHTLinkWidth)); break; default: /* We assume the slower possible clock ie worst case */ rdev->pm.igp_sideport_mclk.full = dfixed_const(200); rdev->pm.igp_system_mclk.full = dfixed_const(200); rdev->pm.igp_ht_link_clk.full = dfixed_const(1000); rdev->pm.igp_ht_link_width.full = dfixed_const(8); DRM_ERROR("No integrated system info for your GPU, using safe default\n"); break; } } else { /* We assume the slower possible clock ie worst case */ rdev->pm.igp_sideport_mclk.full = dfixed_const(200); rdev->pm.igp_system_mclk.full = dfixed_const(200); rdev->pm.igp_ht_link_clk.full = dfixed_const(1000); rdev->pm.igp_ht_link_width.full = dfixed_const(8); DRM_ERROR("No integrated system info for your GPU, using safe default\n"); } /* Compute various bandwidth */ /* k8_bandwidth = (memory_clk / 2) * 2 * 8 * 0.5 = memory_clk * 4 */ tmp.full = dfixed_const(4); rdev->pm.k8_bandwidth.full = dfixed_mul(rdev->pm.igp_system_mclk, tmp); /* ht_bandwidth = ht_clk * 2 * ht_width / 8 * 0.8 * = ht_clk * ht_width / 5 */ tmp.full = dfixed_const(5); rdev->pm.ht_bandwidth.full = dfixed_mul(rdev->pm.igp_ht_link_clk, rdev->pm.igp_ht_link_width); rdev->pm.ht_bandwidth.full = dfixed_div(rdev->pm.ht_bandwidth, tmp); if (tmp.full < rdev->pm.max_bandwidth.full) { /* HT link is a limiting factor */ rdev->pm.max_bandwidth.full = tmp.full; } /* sideport_bandwidth = (sideport_clk / 2) * 2 * 2 * 0.7 * = (sideport_clk * 14) / 10 */ tmp.full = dfixed_const(14); rdev->pm.sideport_bandwidth.full = dfixed_mul(rdev->pm.igp_sideport_mclk, tmp); tmp.full = dfixed_const(10); rdev->pm.sideport_bandwidth.full = dfixed_div(rdev->pm.sideport_bandwidth, tmp); } void rs690_mc_init(struct radeon_device *rdev) { u64 base; rs400_gart_adjust_size(rdev); rdev->mc.vram_is_ddr = true; rdev->mc.vram_width = 128; rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); rdev->mc.mc_vram_size = rdev->mc.real_vram_size; rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); rdev->mc.visible_vram_size = rdev->mc.aper_size; base = RREG32_MC(R_000100_MCCFG_FB_LOCATION); base = G_000100_MC_FB_START(base) << 16; rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); rs690_pm_info(rdev); radeon_vram_location(rdev, &rdev->mc, base); rdev->mc.gtt_base_align = rdev->mc.gtt_size - 1; radeon_gtt_location(rdev, &rdev->mc); radeon_update_bandwidth_info(rdev); } void rs690_line_buffer_adjust(struct radeon_device *rdev, struct drm_display_mode *mode1, struct drm_display_mode *mode2) { u32 tmp; /* * Line Buffer Setup * There is a single line buffer shared by both display controllers. * R_006520_DC_LB_MEMORY_SPLIT controls how that line buffer is shared between * the display controllers. The paritioning can either be done * manually or via one of four preset allocations specified in bits 1:0: * 0 - line buffer is divided in half and shared between crtc * 1 - D1 gets 3/4 of the line buffer, D2 gets 1/4 * 2 - D1 gets the whole buffer * 3 - D1 gets 1/4 of the line buffer, D2 gets 3/4 * Setting bit 2 of R_006520_DC_LB_MEMORY_SPLIT controls switches to manual * allocation mode. In manual allocation mode, D1 always starts at 0, * D1 end/2 is specified in bits 14:4; D2 allocation follows D1. */ tmp = RREG32(R_006520_DC_LB_MEMORY_SPLIT) & C_006520_DC_LB_MEMORY_SPLIT; tmp &= ~C_006520_DC_LB_MEMORY_SPLIT_MODE; /* auto */ if (mode1 && mode2) { if (mode1->hdisplay > mode2->hdisplay) { if (mode1->hdisplay > 2560) tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q; else tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; } else if (mode2->hdisplay > mode1->hdisplay) { if (mode2->hdisplay > 2560) tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q; else tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; } else tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; } else if (mode1) { tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_ONLY; } else if (mode2) { tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q; } WREG32(R_006520_DC_LB_MEMORY_SPLIT, tmp); } struct rs690_watermark { u32 lb_request_fifo_depth; fixed20_12 num_line_pair; fixed20_12 estimated_width; fixed20_12 worst_case_latency; fixed20_12 consumption_rate; fixed20_12 active_time; fixed20_12 dbpp; fixed20_12 priority_mark_max; fixed20_12 priority_mark; fixed20_12 sclk; }; void rs690_crtc_bandwidth_compute(struct radeon_device *rdev, struct radeon_crtc *crtc, struct rs690_watermark *wm) { struct drm_display_mode *mode = &crtc->base.mode; fixed20_12 a, b, c; fixed20_12 pclk, request_fifo_depth, tolerable_latency, estimated_width; fixed20_12 consumption_time, line_time, chunk_time, read_delay_latency; if (!crtc->base.enabled) { /* FIXME: wouldn't it better to set priority mark to maximum */ wm->lb_request_fifo_depth = 4; return; } if (crtc->vsc.full > dfixed_const(2)) wm->num_line_pair.full = dfixed_const(2); else wm->num_line_pair.full = dfixed_const(1); b.full = dfixed_const(mode->crtc_hdisplay); c.full = dfixed_const(256); a.full = dfixed_div(b, c); request_fifo_depth.full = dfixed_mul(a, wm->num_line_pair); request_fifo_depth.full = dfixed_ceil(request_fifo_depth); if (a.full < dfixed_const(4)) { wm->lb_request_fifo_depth = 4; } else { wm->lb_request_fifo_depth = dfixed_trunc(request_fifo_depth); } /* Determine consumption rate * pclk = pixel clock period(ns) = 1000 / (mode.clock / 1000) * vtaps = number of vertical taps, * vsc = vertical scaling ratio, defined as source/destination * hsc = horizontal scaling ration, defined as source/destination */ a.full = dfixed_const(mode->clock); b.full = dfixed_const(1000); a.full = dfixed_div(a, b); pclk.full = dfixed_div(b, a); if (crtc->rmx_type != RMX_OFF) { b.full = dfixed_const(2); if (crtc->vsc.full > b.full) b.full = crtc->vsc.full; b.full = dfixed_mul(b, crtc->hsc); c.full = dfixed_const(2); b.full = dfixed_div(b, c); consumption_time.full = dfixed_div(pclk, b); } else { consumption_time.full = pclk.full; } a.full = dfixed_const(1); wm->consumption_rate.full = dfixed_div(a, consumption_time); /* Determine line time * LineTime = total time for one line of displayhtotal * LineTime = total number of horizontal pixels * pclk = pixel clock period(ns) */ a.full = dfixed_const(crtc->base.mode.crtc_htotal); line_time.full = dfixed_mul(a, pclk); /* Determine active time * ActiveTime = time of active region of display within one line, * hactive = total number of horizontal active pixels * htotal = total number of horizontal pixels */ a.full = dfixed_const(crtc->base.mode.crtc_htotal); b.full = dfixed_const(crtc->base.mode.crtc_hdisplay); wm->active_time.full = dfixed_mul(line_time, b); wm->active_time.full = dfixed_div(wm->active_time, a); /* Maximun bandwidth is the minimun bandwidth of all component */ rdev->pm.max_bandwidth = rdev->pm.core_bandwidth; if (rdev->mc.igp_sideport_enabled) { if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full && rdev->pm.sideport_bandwidth.full) rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth; read_delay_latency.full = dfixed_const(370 * 800 * 1000); read_delay_latency.full = dfixed_div(read_delay_latency, rdev->pm.igp_sideport_mclk); } else { if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full && rdev->pm.k8_bandwidth.full) rdev->pm.max_bandwidth = rdev->pm.k8_bandwidth; if (rdev->pm.max_bandwidth.full > rdev->pm.ht_bandwidth.full && rdev->pm.ht_bandwidth.full) rdev->pm.max_bandwidth = rdev->pm.ht_bandwidth; read_delay_latency.full = dfixed_const(5000); } /* sclk = system clocks(ns) = 1000 / max_bandwidth / 16 */ a.full = dfixed_const(16); rdev->pm.sclk.full = dfixed_mul(rdev->pm.max_bandwidth, a); a.full = dfixed_const(1000); rdev->pm.sclk.full = dfixed_div(a, rdev->pm.sclk); /* Determine chunk time * ChunkTime = the time it takes the DCP to send one chunk of data * to the LB which consists of pipeline delay and inter chunk gap * sclk = system clock(ns) */ a.full = dfixed_const(256 * 13); chunk_time.full = dfixed_mul(rdev->pm.sclk, a); a.full = dfixed_const(10); chunk_time.full = dfixed_div(chunk_time, a); /* Determine the worst case latency * NumLinePair = Number of line pairs to request(1=2 lines, 2=4 lines) * WorstCaseLatency = worst case time from urgent to when the MC starts * to return data * READ_DELAY_IDLE_MAX = constant of 1us * ChunkTime = time it takes the DCP to send one chunk of data to the LB * which consists of pipeline delay and inter chunk gap */ if (dfixed_trunc(wm->num_line_pair) > 1) { a.full = dfixed_const(3); wm->worst_case_latency.full = dfixed_mul(a, chunk_time); wm->worst_case_latency.full += read_delay_latency.full; } else { a.full = dfixed_const(2); wm->worst_case_latency.full = dfixed_mul(a, chunk_time); wm->worst_case_latency.full += read_delay_latency.full; } /* Determine the tolerable latency * TolerableLatency = Any given request has only 1 line time * for the data to be returned * LBRequestFifoDepth = Number of chunk requests the LB can * put into the request FIFO for a display * LineTime = total time for one line of display * ChunkTime = the time it takes the DCP to send one chunk * of data to the LB which consists of * pipeline delay and inter chunk gap */ if ((2+wm->lb_request_fifo_depth) >= dfixed_trunc(request_fifo_depth)) { tolerable_latency.full = line_time.full; } else { tolerable_latency.full = dfixed_const(wm->lb_request_fifo_depth - 2); tolerable_latency.full = request_fifo_depth.full - tolerable_latency.full; tolerable_latency.full = dfixed_mul(tolerable_latency, chunk_time); tolerable_latency.full = line_time.full - tolerable_latency.full; } /* We assume worst case 32bits (4 bytes) */ wm->dbpp.full = dfixed_const(4 * 8); /* Determine the maximum priority mark * width = viewport width in pixels */ a.full = dfixed_const(16); wm->priority_mark_max.full = dfixed_const(crtc->base.mode.crtc_hdisplay); wm->priority_mark_max.full = dfixed_div(wm->priority_mark_max, a); wm->priority_mark_max.full = dfixed_ceil(wm->priority_mark_max); /* Determine estimated width */ estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full; estimated_width.full = dfixed_div(estimated_width, consumption_time); if (dfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) { wm->priority_mark.full = dfixed_const(10); } else { a.full = dfixed_const(16); wm->priority_mark.full = dfixed_div(estimated_width, a); wm->priority_mark.full = dfixed_ceil(wm->priority_mark); wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full; } } void rs690_bandwidth_update(struct radeon_device *rdev) { struct drm_display_mode *mode0 = NULL; struct drm_display_mode *mode1 = NULL; struct rs690_watermark wm0; struct rs690_watermark wm1; u32 tmp; u32 d1mode_priority_a_cnt = S_006548_D1MODE_PRIORITY_A_OFF(1); u32 d2mode_priority_a_cnt = S_006548_D1MODE_PRIORITY_A_OFF(1); fixed20_12 priority_mark02, priority_mark12, fill_rate; fixed20_12 a, b; radeon_update_display_priority(rdev); if (rdev->mode_info.crtcs[0]->base.enabled) mode0 = &rdev->mode_info.crtcs[0]->base.mode; if (rdev->mode_info.crtcs[1]->base.enabled) mode1 = &rdev->mode_info.crtcs[1]->base.mode; /* * Set display0/1 priority up in the memory controller for * modes if the user specifies HIGH for displaypriority * option. */ if ((rdev->disp_priority == 2) && ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740))) { tmp = RREG32_MC(R_000104_MC_INIT_MISC_LAT_TIMER); tmp &= C_000104_MC_DISP0R_INIT_LAT; tmp &= C_000104_MC_DISP1R_INIT_LAT; if (mode0) tmp |= S_000104_MC_DISP0R_INIT_LAT(1); if (mode1) tmp |= S_000104_MC_DISP1R_INIT_LAT(1); WREG32_MC(R_000104_MC_INIT_MISC_LAT_TIMER, tmp); } rs690_line_buffer_adjust(rdev, mode0, mode1); if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) WREG32(R_006C9C_DCP_CONTROL, 0); if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880)) WREG32(R_006C9C_DCP_CONTROL, 2); rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0); rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1); tmp = (wm0.lb_request_fifo_depth - 1); tmp |= (wm1.lb_request_fifo_depth - 1) << 16; WREG32(R_006D58_LB_MAX_REQ_OUTSTANDING, tmp); if (mode0 && mode1) { if (dfixed_trunc(wm0.dbpp) > 64) a.full = dfixed_mul(wm0.dbpp, wm0.num_line_pair); else a.full = wm0.num_line_pair.full; if (dfixed_trunc(wm1.dbpp) > 64) b.full = dfixed_mul(wm1.dbpp, wm1.num_line_pair); else b.full = wm1.num_line_pair.full; a.full += b.full; fill_rate.full = dfixed_div(wm0.sclk, a); if (wm0.consumption_rate.full > fill_rate.full) { b.full = wm0.consumption_rate.full - fill_rate.full; b.full = dfixed_mul(b, wm0.active_time); a.full = dfixed_mul(wm0.worst_case_latency, wm0.consumption_rate); a.full = a.full + b.full; b.full = dfixed_const(16 * 1000); priority_mark02.full = dfixed_div(a, b); } else { a.full = dfixed_mul(wm0.worst_case_latency, wm0.consumption_rate); b.full = dfixed_const(16 * 1000); priority_mark02.full = dfixed_div(a, b); } if (wm1.consumption_rate.full > fill_rate.full) { b.full = wm1.consumption_rate.full - fill_rate.full; b.full = dfixed_mul(b, wm1.active_time); a.full = dfixed_mul(wm1.worst_case_latency, wm1.consumption_rate); a.full = a.full + b.full; b.full = dfixed_const(16 * 1000); priority_mark12.full = dfixed_div(a, b); } else { a.full = dfixed_mul(wm1.worst_case_latency, wm1.consumption_rate); b.full = dfixed_const(16 * 1000); priority_mark12.full = dfixed_div(a, b); } if (wm0.priority_mark.full > priority_mark02.full) priority_mark02.full = wm0.priority_mark.full; if (dfixed_trunc(priority_mark02) < 0) priority_mark02.full = 0; if (wm0.priority_mark_max.full > priority_mark02.full) priority_mark02.full = wm0.priority_mark_max.full; if (wm1.priority_mark.full > priority_mark12.full) priority_mark12.full = wm1.priority_mark.full; if (dfixed_trunc(priority_mark12) < 0) priority_mark12.full = 0; if (wm1.priority_mark_max.full > priority_mark12.full) priority_mark12.full = wm1.priority_mark_max.full; d1mode_priority_a_cnt = dfixed_trunc(priority_mark02); d2mode_priority_a_cnt = dfixed_trunc(priority_mark12); if (rdev->disp_priority == 2) { d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1); d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1); } } else if (mode0) { if (dfixed_trunc(wm0.dbpp) > 64) a.full = dfixed_mul(wm0.dbpp, wm0.num_line_pair); else a.full = wm0.num_line_pair.full; fill_rate.full = dfixed_div(wm0.sclk, a); if (wm0.consumption_rate.full > fill_rate.full) { b.full = wm0.consumption_rate.full - fill_rate.full; b.full = dfixed_mul(b, wm0.active_time); a.full = dfixed_mul(wm0.worst_case_latency, wm0.consumption_rate); a.full = a.full + b.full; b.full = dfixed_const(16 * 1000); priority_mark02.full = dfixed_div(a, b); } else { a.full = dfixed_mul(wm0.worst_case_latency, wm0.consumption_rate); b.full = dfixed_const(16 * 1000); priority_mark02.full = dfixed_div(a, b); } if (wm0.priority_mark.full > priority_mark02.full) priority_mark02.full = wm0.priority_mark.full; if (dfixed_trunc(priority_mark02) < 0) priority_mark02.full = 0; if (wm0.priority_mark_max.full > priority_mark02.full) priority_mark02.full = wm0.priority_mark_max.full; d1mode_priority_a_cnt = dfixed_trunc(priority_mark02); if (rdev->disp_priority == 2) d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1); } else if (mode1) { if (dfixed_trunc(wm1.dbpp) > 64) a.full = dfixed_mul(wm1.dbpp, wm1.num_line_pair); else a.full = wm1.num_line_pair.full; fill_rate.full = dfixed_div(wm1.sclk, a); if (wm1.consumption_rate.full > fill_rate.full) { b.full = wm1.consumption_rate.full - fill_rate.full; b.full = dfixed_mul(b, wm1.active_time); a.full = dfixed_mul(wm1.worst_case_latency, wm1.consumption_rate); a.full = a.full + b.full; b.full = dfixed_const(16 * 1000); priority_mark12.full = dfixed_div(a, b); } else { a.full = dfixed_mul(wm1.worst_case_latency, wm1.consumption_rate); b.full = dfixed_const(16 * 1000); priority_mark12.full = dfixed_div(a, b); } if (wm1.priority_mark.full > priority_mark12.full) priority_mark12.full = wm1.priority_mark.full; if (dfixed_trunc(priority_mark12) < 0) priority_mark12.full = 0; if (wm1.priority_mark_max.full > priority_mark12.full) priority_mark12.full = wm1.priority_mark_max.full; d2mode_priority_a_cnt = dfixed_trunc(priority_mark12); if (rdev->disp_priority == 2) d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1); } WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt); WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt); WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt); WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt); } uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg) { uint32_t r; WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg)); r = RREG32(R_00007C_MC_DATA); WREG32(R_000078_MC_INDEX, ~C_000078_MC_IND_ADDR); return r; } void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) { WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg) | S_000078_MC_IND_WR_EN(1)); WREG32(R_00007C_MC_DATA, v); WREG32(R_000078_MC_INDEX, 0x7F); } void rs690_mc_program(struct radeon_device *rdev) { struct rv515_mc_save save; /* Stops all mc clients */ rv515_mc_stop(rdev, &save); /* Wait for mc idle */ if (rs690_mc_wait_for_idle(rdev)) dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n"); /* Program MC, should be a 32bits limited address space */ WREG32_MC(R_000100_MCCFG_FB_LOCATION, S_000100_MC_FB_START(rdev->mc.vram_start >> 16) | S_000100_MC_FB_TOP(rdev->mc.vram_end >> 16)); WREG32(R_000134_HDP_FB_LOCATION, S_000134_HDP_FB_START(rdev->mc.vram_start >> 16)); rv515_mc_resume(rdev, &save); } static int rs690_startup(struct radeon_device *rdev) { int r; rs690_mc_program(rdev); /* Resume clock */ rv515_clock_startup(rdev); /* Initialize GPU configuration (# pipes, ...) */ rs690_gpu_init(rdev); /* Initialize GART (initialize after TTM so we can allocate * memory through TTM but finalize after TTM) */ r = rs400_gart_enable(rdev); if (r) return r; /* allocate wb buffer */ r = radeon_wb_init(rdev); if (r) return r; /* Enable IRQ */ rs600_irq_set(rdev); rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); /* 1M ring buffer */ r = r100_cp_init(rdev, 1024 * 1024); if (r) { dev_err(rdev->dev, "failed initializing CP (%d).\n", r); return r; } r = r100_ib_init(rdev); if (r) { dev_err(rdev->dev, "failed initializing IB (%d).\n", r); return r; } r = r600_audio_init(rdev); if (r) { dev_err(rdev->dev, "failed initializing audio\n"); return r; } return 0; } int rs690_resume(struct radeon_device *rdev) { /* Make sur GART are not working */ rs400_gart_disable(rdev); /* Resume clock before doing reset */ rv515_clock_startup(rdev); /* Reset gpu before posting otherwise ATOM will enter infinite loop */ if (radeon_asic_reset(rdev)) { dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", RREG32(R_000E40_RBBM_STATUS), RREG32(R_0007C0_CP_STAT)); } /* post */ atom_asic_init(rdev->mode_info.atom_context); /* Resume clock after posting */ rv515_clock_startup(rdev); /* Initialize surface registers */ radeon_surface_init(rdev); return rs690_startup(rdev); } int rs690_suspend(struct radeon_device *rdev) { r600_audio_fini(rdev); r100_cp_disable(rdev); radeon_wb_disable(rdev); rs600_irq_disable(rdev); rs400_gart_disable(rdev); return 0; } void rs690_fini(struct radeon_device *rdev) { r600_audio_fini(rdev); r100_cp_fini(rdev); radeon_wb_fini(rdev); r100_ib_fini(rdev); radeon_gem_fini(rdev); rs400_gart_fini(rdev); radeon_irq_kms_fini(rdev); radeon_fence_driver_fini(rdev); radeon_bo_fini(rdev); radeon_atombios_fini(rdev); kfree(rdev->bios); rdev->bios = NULL; } int rs690_init(struct radeon_device *rdev) { int r; /* Disable VGA */ rv515_vga_render_disable(rdev); /* Initialize scratch registers */ radeon_scratch_init(rdev); /* Initialize surface registers */ radeon_surface_init(rdev); /* restore some register to sane defaults */ r100_restore_sanity(rdev); /* TODO: disable VGA need to use VGA request */ /* BIOS*/ if (!radeon_get_bios(rdev)) { if (ASIC_IS_AVIVO(rdev)) return -EINVAL; } if (rdev->is_atom_bios) { r = radeon_atombios_init(rdev); if (r) return r; } else { dev_err(rdev->dev, "Expecting atombios for RV515 GPU\n"); return -EINVAL; } /* Reset gpu before posting otherwise ATOM will enter infinite loop */ if (radeon_asic_reset(rdev)) { dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", RREG32(R_000E40_RBBM_STATUS), RREG32(R_0007C0_CP_STAT)); } /* check if cards are posted or not */ if (radeon_boot_test_post_card(rdev) == false) return -EINVAL; /* Initialize clocks */ radeon_get_clock_info(rdev->ddev); /* initialize memory controller */ rs690_mc_init(rdev); rv515_debugfs(rdev); /* Fence driver */ r = radeon_fence_driver_init(rdev); if (r) return r; r = radeon_irq_kms_init(rdev); if (r) return r; /* Memory manager */ r = radeon_bo_init(rdev); if (r) return r; r = rs400_gart_init(rdev); if (r) return r; rs600_set_safe_registers(rdev); rdev->accel_working = true; r = rs690_startup(rdev); if (r) { /* Somethings want wront with the accel init stop accel */ dev_err(rdev->dev, "Disabling GPU acceleration\n"); r100_cp_fini(rdev); radeon_wb_fini(rdev); r100_ib_fini(rdev); rs400_gart_fini(rdev); radeon_irq_kms_fini(rdev); rdev->accel_working = false; } return 0; }
gpl-2.0
poondog/M8
arch/powerpc/platforms/cell/iommu.c
4433
35389
/* * IOMMU implementation for Cell Broadband Processor Architecture * * (C) Copyright IBM Corporation 2006-2008 * * Author: Jeremy Kerr <jk@ozlabs.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #undef DEBUG #include <linux/kernel.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/notifier.h> #include <linux/of.h> #include <linux/of_platform.h> #include <linux/slab.h> #include <linux/memblock.h> #include <asm/prom.h> #include <asm/iommu.h> #include <asm/machdep.h> #include <asm/pci-bridge.h> #include <asm/udbg.h> #include <asm/firmware.h> #include <asm/cell-regs.h> #include "interrupt.h" /* Define CELL_IOMMU_REAL_UNMAP to actually unmap non-used pages * instead of leaving them mapped to some dummy page. This can be * enabled once the appropriate workarounds for spider bugs have * been enabled */ #define CELL_IOMMU_REAL_UNMAP /* Define CELL_IOMMU_STRICT_PROTECTION to enforce protection of * IO PTEs based on the transfer direction. That can be enabled * once spider-net has been fixed to pass the correct direction * to the DMA mapping functions */ #define CELL_IOMMU_STRICT_PROTECTION #define NR_IOMMUS 2 /* IOC mmap registers */ #define IOC_Reg_Size 0x2000 #define IOC_IOPT_CacheInvd 0x908 #define IOC_IOPT_CacheInvd_NE_Mask 0xffe0000000000000ul #define IOC_IOPT_CacheInvd_IOPTE_Mask 0x000003fffffffff8ul #define IOC_IOPT_CacheInvd_Busy 0x0000000000000001ul #define IOC_IOST_Origin 0x918 #define IOC_IOST_Origin_E 0x8000000000000000ul #define IOC_IOST_Origin_HW 0x0000000000000800ul #define IOC_IOST_Origin_HL 0x0000000000000400ul #define IOC_IO_ExcpStat 0x920 #define IOC_IO_ExcpStat_V 0x8000000000000000ul #define IOC_IO_ExcpStat_SPF_Mask 0x6000000000000000ul #define IOC_IO_ExcpStat_SPF_S 0x6000000000000000ul #define IOC_IO_ExcpStat_SPF_P 0x2000000000000000ul #define IOC_IO_ExcpStat_ADDR_Mask 0x00000007fffff000ul #define IOC_IO_ExcpStat_RW_Mask 0x0000000000000800ul #define IOC_IO_ExcpStat_IOID_Mask 0x00000000000007fful #define IOC_IO_ExcpMask 0x928 #define IOC_IO_ExcpMask_SFE 0x4000000000000000ul #define IOC_IO_ExcpMask_PFE 0x2000000000000000ul #define IOC_IOCmd_Offset 0x1000 #define IOC_IOCmd_Cfg 0xc00 #define IOC_IOCmd_Cfg_TE 0x0000800000000000ul /* Segment table entries */ #define IOSTE_V 0x8000000000000000ul /* valid */ #define IOSTE_H 0x4000000000000000ul /* cache hint */ #define IOSTE_PT_Base_RPN_Mask 0x3ffffffffffff000ul /* base RPN of IOPT */ #define IOSTE_NPPT_Mask 0x0000000000000fe0ul /* no. pages in IOPT */ #define IOSTE_PS_Mask 0x0000000000000007ul /* page size */ #define IOSTE_PS_4K 0x0000000000000001ul /* - 4kB */ #define IOSTE_PS_64K 0x0000000000000003ul /* - 64kB */ #define IOSTE_PS_1M 0x0000000000000005ul /* - 1MB */ #define IOSTE_PS_16M 0x0000000000000007ul /* - 16MB */ /* IOMMU sizing */ #define IO_SEGMENT_SHIFT 28 #define IO_PAGENO_BITS(shift) (IO_SEGMENT_SHIFT - (shift)) /* The high bit needs to be set on every DMA address */ #define SPIDER_DMA_OFFSET 0x80000000ul struct iommu_window { struct list_head list; struct cbe_iommu *iommu; unsigned long offset; unsigned long size; unsigned int ioid; struct iommu_table table; }; #define NAMESIZE 8 struct cbe_iommu { int nid; char name[NAMESIZE]; void __iomem *xlate_regs; void __iomem *cmd_regs; unsigned long *stab; unsigned long *ptab; void *pad_page; struct list_head windows; }; /* Static array of iommus, one per node * each contains a list of windows, keyed from dma_window property * - on bus setup, look for a matching window, or create one * - on dev setup, assign iommu_table ptr */ static struct cbe_iommu iommus[NR_IOMMUS]; static int cbe_nr_iommus; static void invalidate_tce_cache(struct cbe_iommu *iommu, unsigned long *pte, long n_ptes) { u64 __iomem *reg; u64 val; long n; reg = iommu->xlate_regs + IOC_IOPT_CacheInvd; while (n_ptes > 0) { /* we can invalidate up to 1 << 11 PTEs at once */ n = min(n_ptes, 1l << 11); val = (((n /*- 1*/) << 53) & IOC_IOPT_CacheInvd_NE_Mask) | (__pa(pte) & IOC_IOPT_CacheInvd_IOPTE_Mask) | IOC_IOPT_CacheInvd_Busy; out_be64(reg, val); while (in_be64(reg) & IOC_IOPT_CacheInvd_Busy) ; n_ptes -= n; pte += n; } } static int tce_build_cell(struct iommu_table *tbl, long index, long npages, unsigned long uaddr, enum dma_data_direction direction, struct dma_attrs *attrs) { int i; unsigned long *io_pte, base_pte; struct iommu_window *window = container_of(tbl, struct iommu_window, table); /* implementing proper protection causes problems with the spidernet * driver - check mapping directions later, but allow read & write by * default for now.*/ #ifdef CELL_IOMMU_STRICT_PROTECTION /* to avoid referencing a global, we use a trick here to setup the * protection bit. "prot" is setup to be 3 fields of 4 bits apprended * together for each of the 3 supported direction values. It is then * shifted left so that the fields matching the desired direction * lands on the appropriate bits, and other bits are masked out. */ const unsigned long prot = 0xc48; base_pte = ((prot << (52 + 4 * direction)) & (CBE_IOPTE_PP_W | CBE_IOPTE_PP_R)) | CBE_IOPTE_M | CBE_IOPTE_SO_RW | (window->ioid & CBE_IOPTE_IOID_Mask); #else base_pte = CBE_IOPTE_PP_W | CBE_IOPTE_PP_R | CBE_IOPTE_M | CBE_IOPTE_SO_RW | (window->ioid & CBE_IOPTE_IOID_Mask); #endif if (unlikely(dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs))) base_pte &= ~CBE_IOPTE_SO_RW; io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset); for (i = 0; i < npages; i++, uaddr += IOMMU_PAGE_SIZE) io_pte[i] = base_pte | (__pa(uaddr) & CBE_IOPTE_RPN_Mask); mb(); invalidate_tce_cache(window->iommu, io_pte, npages); pr_debug("tce_build_cell(index=%lx,n=%lx,dir=%d,base_pte=%lx)\n", index, npages, direction, base_pte); return 0; } static void tce_free_cell(struct iommu_table *tbl, long index, long npages) { int i; unsigned long *io_pte, pte; struct iommu_window *window = container_of(tbl, struct iommu_window, table); pr_debug("tce_free_cell(index=%lx,n=%lx)\n", index, npages); #ifdef CELL_IOMMU_REAL_UNMAP pte = 0; #else /* spider bridge does PCI reads after freeing - insert a mapping * to a scratch page instead of an invalid entry */ pte = CBE_IOPTE_PP_R | CBE_IOPTE_M | CBE_IOPTE_SO_RW | __pa(window->iommu->pad_page) | (window->ioid & CBE_IOPTE_IOID_Mask); #endif io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset); for (i = 0; i < npages; i++) io_pte[i] = pte; mb(); invalidate_tce_cache(window->iommu, io_pte, npages); } static irqreturn_t ioc_interrupt(int irq, void *data) { unsigned long stat, spf; struct cbe_iommu *iommu = data; stat = in_be64(iommu->xlate_regs + IOC_IO_ExcpStat); spf = stat & IOC_IO_ExcpStat_SPF_Mask; /* Might want to rate limit it */ printk(KERN_ERR "iommu: DMA exception 0x%016lx\n", stat); printk(KERN_ERR " V=%d, SPF=[%c%c], RW=%s, IOID=0x%04x\n", !!(stat & IOC_IO_ExcpStat_V), (spf == IOC_IO_ExcpStat_SPF_S) ? 'S' : ' ', (spf == IOC_IO_ExcpStat_SPF_P) ? 'P' : ' ', (stat & IOC_IO_ExcpStat_RW_Mask) ? "Read" : "Write", (unsigned int)(stat & IOC_IO_ExcpStat_IOID_Mask)); printk(KERN_ERR " page=0x%016lx\n", stat & IOC_IO_ExcpStat_ADDR_Mask); /* clear interrupt */ stat &= ~IOC_IO_ExcpStat_V; out_be64(iommu->xlate_regs + IOC_IO_ExcpStat, stat); return IRQ_HANDLED; } static int cell_iommu_find_ioc(int nid, unsigned long *base) { struct device_node *np; struct resource r; *base = 0; /* First look for new style /be nodes */ for_each_node_by_name(np, "ioc") { if (of_node_to_nid(np) != nid) continue; if (of_address_to_resource(np, 0, &r)) { printk(KERN_ERR "iommu: can't get address for %s\n", np->full_name); continue; } *base = r.start; of_node_put(np); return 0; } /* Ok, let's try the old way */ for_each_node_by_type(np, "cpu") { const unsigned int *nidp; const unsigned long *tmp; nidp = of_get_property(np, "node-id", NULL); if (nidp && *nidp == nid) { tmp = of_get_property(np, "ioc-translation", NULL); if (tmp) { *base = *tmp; of_node_put(np); return 0; } } } return -ENODEV; } static void cell_iommu_setup_stab(struct cbe_iommu *iommu, unsigned long dbase, unsigned long dsize, unsigned long fbase, unsigned long fsize) { struct page *page; unsigned long segments, stab_size; segments = max(dbase + dsize, fbase + fsize) >> IO_SEGMENT_SHIFT; pr_debug("%s: iommu[%d]: segments: %lu\n", __func__, iommu->nid, segments); /* set up the segment table */ stab_size = segments * sizeof(unsigned long); page = alloc_pages_node(iommu->nid, GFP_KERNEL, get_order(stab_size)); BUG_ON(!page); iommu->stab = page_address(page); memset(iommu->stab, 0, stab_size); } static unsigned long *cell_iommu_alloc_ptab(struct cbe_iommu *iommu, unsigned long base, unsigned long size, unsigned long gap_base, unsigned long gap_size, unsigned long page_shift) { struct page *page; int i; unsigned long reg, segments, pages_per_segment, ptab_size, n_pte_pages, start_seg, *ptab; start_seg = base >> IO_SEGMENT_SHIFT; segments = size >> IO_SEGMENT_SHIFT; pages_per_segment = 1ull << IO_PAGENO_BITS(page_shift); /* PTEs for each segment must start on a 4K bounday */ pages_per_segment = max(pages_per_segment, (1 << 12) / sizeof(unsigned long)); ptab_size = segments * pages_per_segment * sizeof(unsigned long); pr_debug("%s: iommu[%d]: ptab_size: %lu, order: %d\n", __func__, iommu->nid, ptab_size, get_order(ptab_size)); page = alloc_pages_node(iommu->nid, GFP_KERNEL, get_order(ptab_size)); BUG_ON(!page); ptab = page_address(page); memset(ptab, 0, ptab_size); /* number of 4K pages needed for a page table */ n_pte_pages = (pages_per_segment * sizeof(unsigned long)) >> 12; pr_debug("%s: iommu[%d]: stab at %p, ptab at %p, n_pte_pages: %lu\n", __func__, iommu->nid, iommu->stab, ptab, n_pte_pages); /* initialise the STEs */ reg = IOSTE_V | ((n_pte_pages - 1) << 5); switch (page_shift) { case 12: reg |= IOSTE_PS_4K; break; case 16: reg |= IOSTE_PS_64K; break; case 20: reg |= IOSTE_PS_1M; break; case 24: reg |= IOSTE_PS_16M; break; default: BUG(); } gap_base = gap_base >> IO_SEGMENT_SHIFT; gap_size = gap_size >> IO_SEGMENT_SHIFT; pr_debug("Setting up IOMMU stab:\n"); for (i = start_seg; i < (start_seg + segments); i++) { if (i >= gap_base && i < (gap_base + gap_size)) { pr_debug("\toverlap at %d, skipping\n", i); continue; } iommu->stab[i] = reg | (__pa(ptab) + (n_pte_pages << 12) * (i - start_seg)); pr_debug("\t[%d] 0x%016lx\n", i, iommu->stab[i]); } return ptab; } static void cell_iommu_enable_hardware(struct cbe_iommu *iommu) { int ret; unsigned long reg, xlate_base; unsigned int virq; if (cell_iommu_find_ioc(iommu->nid, &xlate_base)) panic("%s: missing IOC register mappings for node %d\n", __func__, iommu->nid); iommu->xlate_regs = ioremap(xlate_base, IOC_Reg_Size); iommu->cmd_regs = iommu->xlate_regs + IOC_IOCmd_Offset; /* ensure that the STEs have updated */ mb(); /* setup interrupts for the iommu. */ reg = in_be64(iommu->xlate_regs + IOC_IO_ExcpStat); out_be64(iommu->xlate_regs + IOC_IO_ExcpStat, reg & ~IOC_IO_ExcpStat_V); out_be64(iommu->xlate_regs + IOC_IO_ExcpMask, IOC_IO_ExcpMask_PFE | IOC_IO_ExcpMask_SFE); virq = irq_create_mapping(NULL, IIC_IRQ_IOEX_ATI | (iommu->nid << IIC_IRQ_NODE_SHIFT)); BUG_ON(virq == NO_IRQ); ret = request_irq(virq, ioc_interrupt, 0, iommu->name, iommu); BUG_ON(ret); /* set the IOC segment table origin register (and turn on the iommu) */ reg = IOC_IOST_Origin_E | __pa(iommu->stab) | IOC_IOST_Origin_HW; out_be64(iommu->xlate_regs + IOC_IOST_Origin, reg); in_be64(iommu->xlate_regs + IOC_IOST_Origin); /* turn on IO translation */ reg = in_be64(iommu->cmd_regs + IOC_IOCmd_Cfg) | IOC_IOCmd_Cfg_TE; out_be64(iommu->cmd_regs + IOC_IOCmd_Cfg, reg); } static void cell_iommu_setup_hardware(struct cbe_iommu *iommu, unsigned long base, unsigned long size) { cell_iommu_setup_stab(iommu, base, size, 0, 0); iommu->ptab = cell_iommu_alloc_ptab(iommu, base, size, 0, 0, IOMMU_PAGE_SHIFT); cell_iommu_enable_hardware(iommu); } #if 0/* Unused for now */ static struct iommu_window *find_window(struct cbe_iommu *iommu, unsigned long offset, unsigned long size) { struct iommu_window *window; /* todo: check for overlapping (but not equal) windows) */ list_for_each_entry(window, &(iommu->windows), list) { if (window->offset == offset && window->size == size) return window; } return NULL; } #endif static inline u32 cell_iommu_get_ioid(struct device_node *np) { const u32 *ioid; ioid = of_get_property(np, "ioid", NULL); if (ioid == NULL) { printk(KERN_WARNING "iommu: missing ioid for %s using 0\n", np->full_name); return 0; } return *ioid; } static struct iommu_window * __init cell_iommu_setup_window(struct cbe_iommu *iommu, struct device_node *np, unsigned long offset, unsigned long size, unsigned long pte_offset) { struct iommu_window *window; struct page *page; u32 ioid; ioid = cell_iommu_get_ioid(np); window = kzalloc_node(sizeof(*window), GFP_KERNEL, iommu->nid); BUG_ON(window == NULL); window->offset = offset; window->size = size; window->ioid = ioid; window->iommu = iommu; window->table.it_blocksize = 16; window->table.it_base = (unsigned long)iommu->ptab; window->table.it_index = iommu->nid; window->table.it_offset = (offset >> IOMMU_PAGE_SHIFT) + pte_offset; window->table.it_size = size >> IOMMU_PAGE_SHIFT; iommu_init_table(&window->table, iommu->nid); pr_debug("\tioid %d\n", window->ioid); pr_debug("\tblocksize %ld\n", window->table.it_blocksize); pr_debug("\tbase 0x%016lx\n", window->table.it_base); pr_debug("\toffset 0x%lx\n", window->table.it_offset); pr_debug("\tsize %ld\n", window->table.it_size); list_add(&window->list, &iommu->windows); if (offset != 0) return window; /* We need to map and reserve the first IOMMU page since it's used * by the spider workaround. In theory, we only need to do that when * running on spider but it doesn't really matter. * * This code also assumes that we have a window that starts at 0, * which is the case on all spider based blades. */ page = alloc_pages_node(iommu->nid, GFP_KERNEL, 0); BUG_ON(!page); iommu->pad_page = page_address(page); clear_page(iommu->pad_page); __set_bit(0, window->table.it_map); tce_build_cell(&window->table, window->table.it_offset, 1, (unsigned long)iommu->pad_page, DMA_TO_DEVICE, NULL); window->table.it_hint = window->table.it_blocksize; return window; } static struct cbe_iommu *cell_iommu_for_node(int nid) { int i; for (i = 0; i < cbe_nr_iommus; i++) if (iommus[i].nid == nid) return &iommus[i]; return NULL; } static unsigned long cell_dma_direct_offset; static unsigned long dma_iommu_fixed_base; /* iommu_fixed_is_weak is set if booted with iommu_fixed=weak */ static int iommu_fixed_is_weak; static struct iommu_table *cell_get_iommu_table(struct device *dev) { struct iommu_window *window; struct cbe_iommu *iommu; /* Current implementation uses the first window available in that * node's iommu. We -might- do something smarter later though it may * never be necessary */ iommu = cell_iommu_for_node(dev_to_node(dev)); if (iommu == NULL || list_empty(&iommu->windows)) { printk(KERN_ERR "iommu: missing iommu for %s (node %d)\n", dev->of_node ? dev->of_node->full_name : "?", dev_to_node(dev)); return NULL; } window = list_entry(iommu->windows.next, struct iommu_window, list); return &window->table; } /* A coherent allocation implies strong ordering */ static void *dma_fixed_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag, struct dma_attrs *attrs) { if (iommu_fixed_is_weak) return iommu_alloc_coherent(dev, cell_get_iommu_table(dev), size, dma_handle, device_to_mask(dev), flag, dev_to_node(dev)); else return dma_direct_ops.alloc(dev, size, dma_handle, flag, attrs); } static void dma_fixed_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs) { if (iommu_fixed_is_weak) iommu_free_coherent(cell_get_iommu_table(dev), size, vaddr, dma_handle); else dma_direct_ops.free(dev, size, vaddr, dma_handle, attrs); } static dma_addr_t dma_fixed_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction, struct dma_attrs *attrs) { if (iommu_fixed_is_weak == dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs)) return dma_direct_ops.map_page(dev, page, offset, size, direction, attrs); else return iommu_map_page(dev, cell_get_iommu_table(dev), page, offset, size, device_to_mask(dev), direction, attrs); } static void dma_fixed_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction direction, struct dma_attrs *attrs) { if (iommu_fixed_is_weak == dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs)) dma_direct_ops.unmap_page(dev, dma_addr, size, direction, attrs); else iommu_unmap_page(cell_get_iommu_table(dev), dma_addr, size, direction, attrs); } static int dma_fixed_map_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction, struct dma_attrs *attrs) { if (iommu_fixed_is_weak == dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs)) return dma_direct_ops.map_sg(dev, sg, nents, direction, attrs); else return iommu_map_sg(dev, cell_get_iommu_table(dev), sg, nents, device_to_mask(dev), direction, attrs); } static void dma_fixed_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction, struct dma_attrs *attrs) { if (iommu_fixed_is_weak == dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs)) dma_direct_ops.unmap_sg(dev, sg, nents, direction, attrs); else iommu_unmap_sg(cell_get_iommu_table(dev), sg, nents, direction, attrs); } static int dma_fixed_dma_supported(struct device *dev, u64 mask) { return mask == DMA_BIT_MASK(64); } static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask); struct dma_map_ops dma_iommu_fixed_ops = { .alloc = dma_fixed_alloc_coherent, .free = dma_fixed_free_coherent, .map_sg = dma_fixed_map_sg, .unmap_sg = dma_fixed_unmap_sg, .dma_supported = dma_fixed_dma_supported, .set_dma_mask = dma_set_mask_and_switch, .map_page = dma_fixed_map_page, .unmap_page = dma_fixed_unmap_page, }; static void cell_dma_dev_setup_fixed(struct device *dev); static void cell_dma_dev_setup(struct device *dev) { /* Order is important here, these are not mutually exclusive */ if (get_dma_ops(dev) == &dma_iommu_fixed_ops) cell_dma_dev_setup_fixed(dev); else if (get_pci_dma_ops() == &dma_iommu_ops) set_iommu_table_base(dev, cell_get_iommu_table(dev)); else if (get_pci_dma_ops() == &dma_direct_ops) set_dma_offset(dev, cell_dma_direct_offset); else BUG(); } static void cell_pci_dma_dev_setup(struct pci_dev *dev) { cell_dma_dev_setup(&dev->dev); } static int cell_of_bus_notify(struct notifier_block *nb, unsigned long action, void *data) { struct device *dev = data; /* We are only intereted in device addition */ if (action != BUS_NOTIFY_ADD_DEVICE) return 0; /* We use the PCI DMA ops */ dev->archdata.dma_ops = get_pci_dma_ops(); cell_dma_dev_setup(dev); return 0; } static struct notifier_block cell_of_bus_notifier = { .notifier_call = cell_of_bus_notify }; static int __init cell_iommu_get_window(struct device_node *np, unsigned long *base, unsigned long *size) { const void *dma_window; unsigned long index; /* Use ibm,dma-window if available, else, hard code ! */ dma_window = of_get_property(np, "ibm,dma-window", NULL); if (dma_window == NULL) { *base = 0; *size = 0x80000000u; return -ENODEV; } of_parse_dma_window(np, dma_window, &index, base, size); return 0; } static struct cbe_iommu * __init cell_iommu_alloc(struct device_node *np) { struct cbe_iommu *iommu; int nid, i; /* Get node ID */ nid = of_node_to_nid(np); if (nid < 0) { printk(KERN_ERR "iommu: failed to get node for %s\n", np->full_name); return NULL; } pr_debug("iommu: setting up iommu for node %d (%s)\n", nid, np->full_name); /* XXX todo: If we can have multiple windows on the same IOMMU, which * isn't the case today, we probably want here to check wether the * iommu for that node is already setup. * However, there might be issue with getting the size right so let's * ignore that for now. We might want to completely get rid of the * multiple window support since the cell iommu supports per-page ioids */ if (cbe_nr_iommus >= NR_IOMMUS) { printk(KERN_ERR "iommu: too many IOMMUs detected ! (%s)\n", np->full_name); return NULL; } /* Init base fields */ i = cbe_nr_iommus++; iommu = &iommus[i]; iommu->stab = NULL; iommu->nid = nid; snprintf(iommu->name, sizeof(iommu->name), "iommu%d", i); INIT_LIST_HEAD(&iommu->windows); return iommu; } static void __init cell_iommu_init_one(struct device_node *np, unsigned long offset) { struct cbe_iommu *iommu; unsigned long base, size; iommu = cell_iommu_alloc(np); if (!iommu) return; /* Obtain a window for it */ cell_iommu_get_window(np, &base, &size); pr_debug("\ttranslating window 0x%lx...0x%lx\n", base, base + size - 1); /* Initialize the hardware */ cell_iommu_setup_hardware(iommu, base, size); /* Setup the iommu_table */ cell_iommu_setup_window(iommu, np, base, size, offset >> IOMMU_PAGE_SHIFT); } static void __init cell_disable_iommus(void) { int node; unsigned long base, val; void __iomem *xregs, *cregs; /* Make sure IOC translation is disabled on all nodes */ for_each_online_node(node) { if (cell_iommu_find_ioc(node, &base)) continue; xregs = ioremap(base, IOC_Reg_Size); if (xregs == NULL) continue; cregs = xregs + IOC_IOCmd_Offset; pr_debug("iommu: cleaning up iommu on node %d\n", node); out_be64(xregs + IOC_IOST_Origin, 0); (void)in_be64(xregs + IOC_IOST_Origin); val = in_be64(cregs + IOC_IOCmd_Cfg); val &= ~IOC_IOCmd_Cfg_TE; out_be64(cregs + IOC_IOCmd_Cfg, val); (void)in_be64(cregs + IOC_IOCmd_Cfg); iounmap(xregs); } } static int __init cell_iommu_init_disabled(void) { struct device_node *np = NULL; unsigned long base = 0, size; /* When no iommu is present, we use direct DMA ops */ set_pci_dma_ops(&dma_direct_ops); /* First make sure all IOC translation is turned off */ cell_disable_iommus(); /* If we have no Axon, we set up the spider DMA magic offset */ if (of_find_node_by_name(NULL, "axon") == NULL) cell_dma_direct_offset = SPIDER_DMA_OFFSET; /* Now we need to check to see where the memory is mapped * in PCI space. We assume that all busses use the same dma * window which is always the case so far on Cell, thus we * pick up the first pci-internal node we can find and check * the DMA window from there. */ for_each_node_by_name(np, "axon") { if (np->parent == NULL || np->parent->parent != NULL) continue; if (cell_iommu_get_window(np, &base, &size) == 0) break; } if (np == NULL) { for_each_node_by_name(np, "pci-internal") { if (np->parent == NULL || np->parent->parent != NULL) continue; if (cell_iommu_get_window(np, &base, &size) == 0) break; } } of_node_put(np); /* If we found a DMA window, we check if it's big enough to enclose * all of physical memory. If not, we force enable IOMMU */ if (np && size < memblock_end_of_DRAM()) { printk(KERN_WARNING "iommu: force-enabled, dma window" " (%ldMB) smaller than total memory (%lldMB)\n", size >> 20, memblock_end_of_DRAM() >> 20); return -ENODEV; } cell_dma_direct_offset += base; if (cell_dma_direct_offset != 0) ppc_md.pci_dma_dev_setup = cell_pci_dma_dev_setup; printk("iommu: disabled, direct DMA offset is 0x%lx\n", cell_dma_direct_offset); return 0; } /* * Fixed IOMMU mapping support * * This code adds support for setting up a fixed IOMMU mapping on certain * cell machines. For 64-bit devices this avoids the performance overhead of * mapping and unmapping pages at runtime. 32-bit devices are unable to use * the fixed mapping. * * The fixed mapping is established at boot, and maps all of physical memory * 1:1 into device space at some offset. On machines with < 30 GB of memory * we setup the fixed mapping immediately above the normal IOMMU window. * * For example a machine with 4GB of memory would end up with the normal * IOMMU window from 0-2GB and the fixed mapping window from 2GB to 6GB. In * this case a 64-bit device wishing to DMA to 1GB would be told to DMA to * 3GB, plus any offset required by firmware. The firmware offset is encoded * in the "dma-ranges" property. * * On machines with 30GB or more of memory, we are unable to place the fixed * mapping above the normal IOMMU window as we would run out of address space. * Instead we move the normal IOMMU window to coincide with the hash page * table, this region does not need to be part of the fixed mapping as no * device should ever be DMA'ing to it. We then setup the fixed mapping * from 0 to 32GB. */ static u64 cell_iommu_get_fixed_address(struct device *dev) { u64 cpu_addr, size, best_size, dev_addr = OF_BAD_ADDR; struct device_node *np; const u32 *ranges = NULL; int i, len, best, naddr, nsize, pna, range_size; np = of_node_get(dev->of_node); while (1) { naddr = of_n_addr_cells(np); nsize = of_n_size_cells(np); np = of_get_next_parent(np); if (!np) break; ranges = of_get_property(np, "dma-ranges", &len); /* Ignore empty ranges, they imply no translation required */ if (ranges && len > 0) break; } if (!ranges) { dev_dbg(dev, "iommu: no dma-ranges found\n"); goto out; } len /= sizeof(u32); pna = of_n_addr_cells(np); range_size = naddr + nsize + pna; /* dma-ranges format: * child addr : naddr cells * parent addr : pna cells * size : nsize cells */ for (i = 0, best = -1, best_size = 0; i < len; i += range_size) { cpu_addr = of_translate_dma_address(np, ranges + i + naddr); size = of_read_number(ranges + i + naddr + pna, nsize); if (cpu_addr == 0 && size > best_size) { best = i; best_size = size; } } if (best >= 0) { dev_addr = of_read_number(ranges + best, naddr); } else dev_dbg(dev, "iommu: no suitable range found!\n"); out: of_node_put(np); return dev_addr; } static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask) { if (!dev->dma_mask || !dma_supported(dev, dma_mask)) return -EIO; if (dma_mask == DMA_BIT_MASK(64) && cell_iommu_get_fixed_address(dev) != OF_BAD_ADDR) { dev_dbg(dev, "iommu: 64-bit OK, using fixed ops\n"); set_dma_ops(dev, &dma_iommu_fixed_ops); } else { dev_dbg(dev, "iommu: not 64-bit, using default ops\n"); set_dma_ops(dev, get_pci_dma_ops()); } cell_dma_dev_setup(dev); *dev->dma_mask = dma_mask; return 0; } static void cell_dma_dev_setup_fixed(struct device *dev) { u64 addr; addr = cell_iommu_get_fixed_address(dev) + dma_iommu_fixed_base; set_dma_offset(dev, addr); dev_dbg(dev, "iommu: fixed addr = %llx\n", addr); } static void insert_16M_pte(unsigned long addr, unsigned long *ptab, unsigned long base_pte) { unsigned long segment, offset; segment = addr >> IO_SEGMENT_SHIFT; offset = (addr >> 24) - (segment << IO_PAGENO_BITS(24)); ptab = ptab + (segment * (1 << 12) / sizeof(unsigned long)); pr_debug("iommu: addr %lx ptab %p segment %lx offset %lx\n", addr, ptab, segment, offset); ptab[offset] = base_pte | (__pa(addr) & CBE_IOPTE_RPN_Mask); } static void cell_iommu_setup_fixed_ptab(struct cbe_iommu *iommu, struct device_node *np, unsigned long dbase, unsigned long dsize, unsigned long fbase, unsigned long fsize) { unsigned long base_pte, uaddr, ioaddr, *ptab; ptab = cell_iommu_alloc_ptab(iommu, fbase, fsize, dbase, dsize, 24); dma_iommu_fixed_base = fbase; pr_debug("iommu: mapping 0x%lx pages from 0x%lx\n", fsize, fbase); base_pte = CBE_IOPTE_PP_W | CBE_IOPTE_PP_R | CBE_IOPTE_M | (cell_iommu_get_ioid(np) & CBE_IOPTE_IOID_Mask); if (iommu_fixed_is_weak) pr_info("IOMMU: Using weak ordering for fixed mapping\n"); else { pr_info("IOMMU: Using strong ordering for fixed mapping\n"); base_pte |= CBE_IOPTE_SO_RW; } for (uaddr = 0; uaddr < fsize; uaddr += (1 << 24)) { /* Don't touch the dynamic region */ ioaddr = uaddr + fbase; if (ioaddr >= dbase && ioaddr < (dbase + dsize)) { pr_debug("iommu: fixed/dynamic overlap, skipping\n"); continue; } insert_16M_pte(uaddr, ptab, base_pte); } mb(); } static int __init cell_iommu_fixed_mapping_init(void) { unsigned long dbase, dsize, fbase, fsize, hbase, hend; struct cbe_iommu *iommu; struct device_node *np; /* The fixed mapping is only supported on axon machines */ np = of_find_node_by_name(NULL, "axon"); of_node_put(np); if (!np) { pr_debug("iommu: fixed mapping disabled, no axons found\n"); return -1; } /* We must have dma-ranges properties for fixed mapping to work */ np = of_find_node_with_property(NULL, "dma-ranges"); of_node_put(np); if (!np) { pr_debug("iommu: no dma-ranges found, no fixed mapping\n"); return -1; } /* The default setup is to have the fixed mapping sit after the * dynamic region, so find the top of the largest IOMMU window * on any axon, then add the size of RAM and that's our max value. * If that is > 32GB we have to do other shennanigans. */ fbase = 0; for_each_node_by_name(np, "axon") { cell_iommu_get_window(np, &dbase, &dsize); fbase = max(fbase, dbase + dsize); } fbase = _ALIGN_UP(fbase, 1 << IO_SEGMENT_SHIFT); fsize = memblock_phys_mem_size(); if ((fbase + fsize) <= 0x800000000ul) hbase = 0; /* use the device tree window */ else { /* If we're over 32 GB we need to cheat. We can't map all of * RAM with the fixed mapping, and also fit the dynamic * region. So try to place the dynamic region where the hash * table sits, drivers never need to DMA to it, we don't * need a fixed mapping for that area. */ if (!htab_address) { pr_debug("iommu: htab is NULL, on LPAR? Huh?\n"); return -1; } hbase = __pa(htab_address); hend = hbase + htab_size_bytes; /* The window must start and end on a segment boundary */ if ((hbase != _ALIGN_UP(hbase, 1 << IO_SEGMENT_SHIFT)) || (hend != _ALIGN_UP(hend, 1 << IO_SEGMENT_SHIFT))) { pr_debug("iommu: hash window not segment aligned\n"); return -1; } /* Check the hash window fits inside the real DMA window */ for_each_node_by_name(np, "axon") { cell_iommu_get_window(np, &dbase, &dsize); if (hbase < dbase || (hend > (dbase + dsize))) { pr_debug("iommu: hash window doesn't fit in" "real DMA window\n"); return -1; } } fbase = 0; } /* Setup the dynamic regions */ for_each_node_by_name(np, "axon") { iommu = cell_iommu_alloc(np); BUG_ON(!iommu); if (hbase == 0) cell_iommu_get_window(np, &dbase, &dsize); else { dbase = hbase; dsize = htab_size_bytes; } printk(KERN_DEBUG "iommu: node %d, dynamic window 0x%lx-0x%lx " "fixed window 0x%lx-0x%lx\n", iommu->nid, dbase, dbase + dsize, fbase, fbase + fsize); cell_iommu_setup_stab(iommu, dbase, dsize, fbase, fsize); iommu->ptab = cell_iommu_alloc_ptab(iommu, dbase, dsize, 0, 0, IOMMU_PAGE_SHIFT); cell_iommu_setup_fixed_ptab(iommu, np, dbase, dsize, fbase, fsize); cell_iommu_enable_hardware(iommu); cell_iommu_setup_window(iommu, np, dbase, dsize, 0); } dma_iommu_ops.set_dma_mask = dma_set_mask_and_switch; set_pci_dma_ops(&dma_iommu_ops); return 0; } static int iommu_fixed_disabled; static int __init setup_iommu_fixed(char *str) { struct device_node *pciep; if (strcmp(str, "off") == 0) iommu_fixed_disabled = 1; /* If we can find a pcie-endpoint in the device tree assume that * we're on a triblade or a CAB so by default the fixed mapping * should be set to be weakly ordered; but only if the boot * option WASN'T set for strong ordering */ pciep = of_find_node_by_type(NULL, "pcie-endpoint"); if (strcmp(str, "weak") == 0 || (pciep && strcmp(str, "strong") != 0)) iommu_fixed_is_weak = 1; of_node_put(pciep); return 1; } __setup("iommu_fixed=", setup_iommu_fixed); static u64 cell_dma_get_required_mask(struct device *dev) { struct dma_map_ops *dma_ops; if (!dev->dma_mask) return 0; if (!iommu_fixed_disabled && cell_iommu_get_fixed_address(dev) != OF_BAD_ADDR) return DMA_BIT_MASK(64); dma_ops = get_dma_ops(dev); if (dma_ops->get_required_mask) return dma_ops->get_required_mask(dev); WARN_ONCE(1, "no get_required_mask in %p ops", dma_ops); return DMA_BIT_MASK(64); } static int __init cell_iommu_init(void) { struct device_node *np; /* If IOMMU is disabled or we have little enough RAM to not need * to enable it, we setup a direct mapping. * * Note: should we make sure we have the IOMMU actually disabled ? */ if (iommu_is_off || (!iommu_force_on && memblock_end_of_DRAM() <= 0x80000000ull)) if (cell_iommu_init_disabled() == 0) goto bail; /* Setup various ppc_md. callbacks */ ppc_md.pci_dma_dev_setup = cell_pci_dma_dev_setup; ppc_md.dma_get_required_mask = cell_dma_get_required_mask; ppc_md.tce_build = tce_build_cell; ppc_md.tce_free = tce_free_cell; if (!iommu_fixed_disabled && cell_iommu_fixed_mapping_init() == 0) goto bail; /* Create an iommu for each /axon node. */ for_each_node_by_name(np, "axon") { if (np->parent == NULL || np->parent->parent != NULL) continue; cell_iommu_init_one(np, 0); } /* Create an iommu for each toplevel /pci-internal node for * old hardware/firmware */ for_each_node_by_name(np, "pci-internal") { if (np->parent == NULL || np->parent->parent != NULL) continue; cell_iommu_init_one(np, SPIDER_DMA_OFFSET); } /* Setup default PCI iommu ops */ set_pci_dma_ops(&dma_iommu_ops); bail: /* Register callbacks on OF platform device addition/removal * to handle linking them to the right DMA operations */ bus_register_notifier(&platform_bus_type, &cell_of_bus_notifier); return 0; } machine_arch_initcall(cell, cell_iommu_init); machine_arch_initcall(celleb_native, cell_iommu_init);
gpl-2.0
ProtouProject/android_kernel_msm
arch/x86/kernel/cpu/mcheck/mce-apei.c
4945
4329
/* * Bridge between MCE and APEI * * On some machine, corrected memory errors are reported via APEI * generic hardware error source (GHES) instead of corrected Machine * Check. These corrected memory errors can be reported to user space * through /dev/mcelog via faking a corrected Machine Check, so that * the error memory page can be offlined by /sbin/mcelog if the error * count for one page is beyond the threshold. * * For fatal MCE, save MCE record into persistent storage via ERST, so * that the MCE record can be logged after reboot via ERST. * * Copyright 2010 Intel Corp. * Author: Huang Ying <ying.huang@intel.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/export.h> #include <linux/kernel.h> #include <linux/acpi.h> #include <linux/cper.h> #include <acpi/apei.h> #include <asm/mce.h> #include "mce-internal.h" void apei_mce_report_mem_error(int corrected, struct cper_sec_mem_err *mem_err) { struct mce m; /* Only corrected MC is reported */ if (!corrected) return; mce_setup(&m); m.bank = 1; /* Fake a memory read corrected error with unknown channel */ m.status = MCI_STATUS_VAL | MCI_STATUS_EN | MCI_STATUS_ADDRV | 0x9f; m.addr = mem_err->physical_addr; mce_log(&m); mce_notify_irq(); } EXPORT_SYMBOL_GPL(apei_mce_report_mem_error); #define CPER_CREATOR_MCE \ UUID_LE(0x75a574e3, 0x5052, 0x4b29, 0x8a, 0x8e, 0xbe, 0x2c, \ 0x64, 0x90, 0xb8, 0x9d) #define CPER_SECTION_TYPE_MCE \ UUID_LE(0xfe08ffbe, 0x95e4, 0x4be7, 0xbc, 0x73, 0x40, 0x96, \ 0x04, 0x4a, 0x38, 0xfc) /* * CPER specification (in UEFI specification 2.3 appendix N) requires * byte-packed. */ struct cper_mce_record { struct cper_record_header hdr; struct cper_section_descriptor sec_hdr; struct mce mce; } __packed; int apei_write_mce(struct mce *m) { struct cper_mce_record rcd; memset(&rcd, 0, sizeof(rcd)); memcpy(rcd.hdr.signature, CPER_SIG_RECORD, CPER_SIG_SIZE); rcd.hdr.revision = CPER_RECORD_REV; rcd.hdr.signature_end = CPER_SIG_END; rcd.hdr.section_count = 1; rcd.hdr.error_severity = CPER_SEV_FATAL; /* timestamp, platform_id, partition_id are all invalid */ rcd.hdr.validation_bits = 0; rcd.hdr.record_length = sizeof(rcd); rcd.hdr.creator_id = CPER_CREATOR_MCE; rcd.hdr.notification_type = CPER_NOTIFY_MCE; rcd.hdr.record_id = cper_next_record_id(); rcd.hdr.flags = CPER_HW_ERROR_FLAGS_PREVERR; rcd.sec_hdr.section_offset = (void *)&rcd.mce - (void *)&rcd; rcd.sec_hdr.section_length = sizeof(rcd.mce); rcd.sec_hdr.revision = CPER_SEC_REV; /* fru_id and fru_text is invalid */ rcd.sec_hdr.validation_bits = 0; rcd.sec_hdr.flags = CPER_SEC_PRIMARY; rcd.sec_hdr.section_type = CPER_SECTION_TYPE_MCE; rcd.sec_hdr.section_severity = CPER_SEV_FATAL; memcpy(&rcd.mce, m, sizeof(*m)); return erst_write(&rcd.hdr); } ssize_t apei_read_mce(struct mce *m, u64 *record_id) { struct cper_mce_record rcd; int rc, pos; rc = erst_get_record_id_begin(&pos); if (rc) return rc; retry: rc = erst_get_record_id_next(&pos, record_id); if (rc) goto out; /* no more record */ if (*record_id == APEI_ERST_INVALID_RECORD_ID) goto out; rc = erst_read(*record_id, &rcd.hdr, sizeof(rcd)); /* someone else has cleared the record, try next one */ if (rc == -ENOENT) goto retry; else if (rc < 0) goto out; /* try to skip other type records in storage */ else if (rc != sizeof(rcd) || uuid_le_cmp(rcd.hdr.creator_id, CPER_CREATOR_MCE)) goto retry; memcpy(m, &rcd.mce, sizeof(*m)); rc = sizeof(*m); out: erst_get_record_id_end(); return rc; } /* Check whether there is record in ERST */ int apei_check_mce(void) { return erst_get_record_count(); } int apei_clear_mce(u64 record_id) { return erst_clear(record_id); }
gpl-2.0
shskyinfo/android_kernel_lge_e610
drivers/usb/host/fhci-hcd.c
4945
19534
/* * Freescale QUICC Engine USB Host Controller Driver * * Copyright (c) Freescale Semicondutor, Inc. 2006. * Shlomi Gridish <gridish@freescale.com> * Jerry Huang <Chang-Ming.Huang@freescale.com> * Copyright (c) Logic Product Development, Inc. 2007 * Peter Barada <peterb@logicpd.com> * Copyright (c) MontaVista Software, Inc. 2008. * Anton Vorontsov <avorontsov@ru.mvista.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/module.h> #include <linux/types.h> #include <linux/spinlock.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/list.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/usb.h> #include <linux/usb/hcd.h> #include <linux/of_platform.h> #include <linux/of_gpio.h> #include <linux/slab.h> #include <asm/qe.h> #include <asm/fsl_gtm.h> #include "fhci.h" void fhci_start_sof_timer(struct fhci_hcd *fhci) { fhci_dbg(fhci, "-> %s\n", __func__); /* clear frame_n */ out_be16(&fhci->pram->frame_num, 0); out_be16(&fhci->regs->usb_sof_tmr, 0); setbits8(&fhci->regs->usb_mod, USB_MODE_SFTE); fhci_dbg(fhci, "<- %s\n", __func__); } void fhci_stop_sof_timer(struct fhci_hcd *fhci) { fhci_dbg(fhci, "-> %s\n", __func__); clrbits8(&fhci->regs->usb_mod, USB_MODE_SFTE); gtm_stop_timer16(fhci->timer); fhci_dbg(fhci, "<- %s\n", __func__); } u16 fhci_get_sof_timer_count(struct fhci_usb *usb) { return be16_to_cpu(in_be16(&usb->fhci->regs->usb_sof_tmr) / 12); } /* initialize the endpoint zero */ static u32 endpoint_zero_init(struct fhci_usb *usb, enum fhci_mem_alloc data_mem, u32 ring_len) { u32 rc; rc = fhci_create_ep(usb, data_mem, ring_len); if (rc) return rc; /* inilialize endpoint registers */ fhci_init_ep_registers(usb, usb->ep0, data_mem); return 0; } /* enable the USB interrupts */ void fhci_usb_enable_interrupt(struct fhci_usb *usb) { struct fhci_hcd *fhci = usb->fhci; if (usb->intr_nesting_cnt == 1) { /* initialize the USB interrupt */ enable_irq(fhci_to_hcd(fhci)->irq); /* initialize the event register and mask register */ out_be16(&usb->fhci->regs->usb_event, 0xffff); out_be16(&usb->fhci->regs->usb_mask, usb->saved_msk); /* enable the timer interrupts */ enable_irq(fhci->timer->irq); } else if (usb->intr_nesting_cnt > 1) fhci_info(fhci, "unbalanced USB interrupts nesting\n"); usb->intr_nesting_cnt--; } /* disable the usb interrupt */ void fhci_usb_disable_interrupt(struct fhci_usb *usb) { struct fhci_hcd *fhci = usb->fhci; if (usb->intr_nesting_cnt == 0) { /* disable the timer interrupt */ disable_irq_nosync(fhci->timer->irq); /* disable the usb interrupt */ disable_irq_nosync(fhci_to_hcd(fhci)->irq); out_be16(&usb->fhci->regs->usb_mask, 0); } usb->intr_nesting_cnt++; } /* enable the USB controller */ static u32 fhci_usb_enable(struct fhci_hcd *fhci) { struct fhci_usb *usb = fhci->usb_lld; out_be16(&usb->fhci->regs->usb_event, 0xffff); out_be16(&usb->fhci->regs->usb_mask, usb->saved_msk); setbits8(&usb->fhci->regs->usb_mod, USB_MODE_EN); mdelay(100); return 0; } /* disable the USB controller */ static u32 fhci_usb_disable(struct fhci_hcd *fhci) { struct fhci_usb *usb = fhci->usb_lld; fhci_usb_disable_interrupt(usb); fhci_port_disable(fhci); /* disable the usb controller */ if (usb->port_status == FHCI_PORT_FULL || usb->port_status == FHCI_PORT_LOW) fhci_device_disconnected_interrupt(fhci); clrbits8(&usb->fhci->regs->usb_mod, USB_MODE_EN); return 0; } /* check the bus state by polling the QE bit on the IO ports */ int fhci_ioports_check_bus_state(struct fhci_hcd *fhci) { u8 bits = 0; /* check USBOE,if transmitting,exit */ if (!gpio_get_value(fhci->gpios[GPIO_USBOE])) return -1; /* check USBRP */ if (gpio_get_value(fhci->gpios[GPIO_USBRP])) bits |= 0x2; /* check USBRN */ if (gpio_get_value(fhci->gpios[GPIO_USBRN])) bits |= 0x1; return bits; } static void fhci_mem_free(struct fhci_hcd *fhci) { struct ed *ed; struct ed *next_ed; struct td *td; struct td *next_td; list_for_each_entry_safe(ed, next_ed, &fhci->empty_eds, node) { list_del(&ed->node); kfree(ed); } list_for_each_entry_safe(td, next_td, &fhci->empty_tds, node) { list_del(&td->node); kfree(td); } kfree(fhci->vroot_hub); fhci->vroot_hub = NULL; kfree(fhci->hc_list); fhci->hc_list = NULL; } static int fhci_mem_init(struct fhci_hcd *fhci) { int i; fhci->hc_list = kzalloc(sizeof(*fhci->hc_list), GFP_KERNEL); if (!fhci->hc_list) goto err; INIT_LIST_HEAD(&fhci->hc_list->ctrl_list); INIT_LIST_HEAD(&fhci->hc_list->bulk_list); INIT_LIST_HEAD(&fhci->hc_list->iso_list); INIT_LIST_HEAD(&fhci->hc_list->intr_list); INIT_LIST_HEAD(&fhci->hc_list->done_list); fhci->vroot_hub = kzalloc(sizeof(*fhci->vroot_hub), GFP_KERNEL); if (!fhci->vroot_hub) goto err; INIT_LIST_HEAD(&fhci->empty_eds); INIT_LIST_HEAD(&fhci->empty_tds); /* initialize work queue to handle done list */ fhci_tasklet.data = (unsigned long)fhci; fhci->process_done_task = &fhci_tasklet; for (i = 0; i < MAX_TDS; i++) { struct td *td; td = kmalloc(sizeof(*td), GFP_KERNEL); if (!td) goto err; fhci_recycle_empty_td(fhci, td); } for (i = 0; i < MAX_EDS; i++) { struct ed *ed; ed = kmalloc(sizeof(*ed), GFP_KERNEL); if (!ed) goto err; fhci_recycle_empty_ed(fhci, ed); } fhci->active_urbs = 0; return 0; err: fhci_mem_free(fhci); return -ENOMEM; } /* destroy the fhci_usb structure */ static void fhci_usb_free(void *lld) { struct fhci_usb *usb = lld; struct fhci_hcd *fhci; if (usb) { fhci = usb->fhci; fhci_config_transceiver(fhci, FHCI_PORT_POWER_OFF); fhci_ep0_free(usb); kfree(usb->actual_frame); kfree(usb); } } /* initialize the USB */ static int fhci_usb_init(struct fhci_hcd *fhci) { struct fhci_usb *usb = fhci->usb_lld; memset_io(usb->fhci->pram, 0, FHCI_PRAM_SIZE); usb->port_status = FHCI_PORT_DISABLED; usb->max_frame_usage = FRAME_TIME_USAGE; usb->sw_transaction_time = SW_FIX_TIME_BETWEEN_TRANSACTION; usb->actual_frame = kzalloc(sizeof(*usb->actual_frame), GFP_KERNEL); if (!usb->actual_frame) { fhci_usb_free(usb); return -ENOMEM; } INIT_LIST_HEAD(&usb->actual_frame->tds_list); /* initializing registers on chip, clear frame number */ out_be16(&fhci->pram->frame_num, 0); /* clear rx state */ out_be32(&fhci->pram->rx_state, 0); /* set mask register */ usb->saved_msk = (USB_E_TXB_MASK | USB_E_TXE1_MASK | USB_E_IDLE_MASK | USB_E_RESET_MASK | USB_E_SFT_MASK | USB_E_MSF_MASK); out_8(&usb->fhci->regs->usb_mod, USB_MODE_HOST | USB_MODE_EN); /* clearing the mask register */ out_be16(&usb->fhci->regs->usb_mask, 0); /* initialing the event register */ out_be16(&usb->fhci->regs->usb_event, 0xffff); if (endpoint_zero_init(usb, DEFAULT_DATA_MEM, DEFAULT_RING_LEN) != 0) { fhci_usb_free(usb); return -EINVAL; } return 0; } /* initialize the fhci_usb struct and the corresponding data staruct */ static struct fhci_usb *fhci_create_lld(struct fhci_hcd *fhci) { struct fhci_usb *usb; /* allocate memory for SCC data structure */ usb = kzalloc(sizeof(*usb), GFP_KERNEL); if (!usb) { fhci_err(fhci, "no memory for SCC data struct\n"); return NULL; } usb->fhci = fhci; usb->hc_list = fhci->hc_list; usb->vroot_hub = fhci->vroot_hub; usb->transfer_confirm = fhci_transfer_confirm_callback; return usb; } static int fhci_start(struct usb_hcd *hcd) { int ret; struct fhci_hcd *fhci = hcd_to_fhci(hcd); ret = fhci_mem_init(fhci); if (ret) { fhci_err(fhci, "failed to allocate memory\n"); goto err; } fhci->usb_lld = fhci_create_lld(fhci); if (!fhci->usb_lld) { fhci_err(fhci, "low level driver config failed\n"); ret = -ENOMEM; goto err; } ret = fhci_usb_init(fhci); if (ret) { fhci_err(fhci, "low level driver initialize failed\n"); goto err; } spin_lock_init(&fhci->lock); /* connect the virtual root hub */ fhci->vroot_hub->dev_num = 1; /* this field may be needed to fix */ fhci->vroot_hub->hub.wHubStatus = 0; fhci->vroot_hub->hub.wHubChange = 0; fhci->vroot_hub->port.wPortStatus = 0; fhci->vroot_hub->port.wPortChange = 0; hcd->state = HC_STATE_RUNNING; /* * From here on, khubd concurrently accesses the root * hub; drivers will be talking to enumerated devices. * (On restart paths, khubd already knows about the root * hub and could find work as soon as we wrote FLAG_CF.) * * Before this point the HC was idle/ready. After, khubd * and device drivers may start it running. */ fhci_usb_enable(fhci); return 0; err: fhci_mem_free(fhci); return ret; } static void fhci_stop(struct usb_hcd *hcd) { struct fhci_hcd *fhci = hcd_to_fhci(hcd); fhci_usb_disable_interrupt(fhci->usb_lld); fhci_usb_disable(fhci); fhci_usb_free(fhci->usb_lld); fhci->usb_lld = NULL; fhci_mem_free(fhci); } static int fhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) { struct fhci_hcd *fhci = hcd_to_fhci(hcd); u32 pipe = urb->pipe; int ret; int i; int size = 0; struct urb_priv *urb_priv; unsigned long flags; switch (usb_pipetype(pipe)) { case PIPE_CONTROL: /* 1 td fro setup,1 for ack */ size = 2; case PIPE_BULK: /* one td for every 4096 bytes(can be up to 8k) */ size += urb->transfer_buffer_length / 4096; /* ...add for any remaining bytes... */ if ((urb->transfer_buffer_length % 4096) != 0) size++; /* ..and maybe a zero length packet to wrap it up */ if (size == 0) size++; else if ((urb->transfer_flags & URB_ZERO_PACKET) != 0 && (urb->transfer_buffer_length % usb_maxpacket(urb->dev, pipe, usb_pipeout(pipe))) != 0) size++; break; case PIPE_ISOCHRONOUS: size = urb->number_of_packets; if (size <= 0) return -EINVAL; for (i = 0; i < urb->number_of_packets; i++) { urb->iso_frame_desc[i].actual_length = 0; urb->iso_frame_desc[i].status = (u32) (-EXDEV); } break; case PIPE_INTERRUPT: size = 1; } /* allocate the private part of the URB */ urb_priv = kzalloc(sizeof(*urb_priv), mem_flags); if (!urb_priv) return -ENOMEM; /* allocate the private part of the URB */ urb_priv->tds = kcalloc(size, sizeof(*urb_priv->tds), mem_flags); if (!urb_priv->tds) { kfree(urb_priv); return -ENOMEM; } spin_lock_irqsave(&fhci->lock, flags); ret = usb_hcd_link_urb_to_ep(hcd, urb); if (ret) goto err; /* fill the private part of the URB */ urb_priv->num_of_tds = size; urb->status = -EINPROGRESS; urb->actual_length = 0; urb->error_count = 0; urb->hcpriv = urb_priv; fhci_queue_urb(fhci, urb); err: if (ret) { kfree(urb_priv->tds); kfree(urb_priv); } spin_unlock_irqrestore(&fhci->lock, flags); return ret; } /* dequeue FHCI URB */ static int fhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) { struct fhci_hcd *fhci = hcd_to_fhci(hcd); struct fhci_usb *usb = fhci->usb_lld; int ret = -EINVAL; unsigned long flags; if (!urb || !urb->dev || !urb->dev->bus) goto out; spin_lock_irqsave(&fhci->lock, flags); ret = usb_hcd_check_unlink_urb(hcd, urb, status); if (ret) goto out2; if (usb->port_status != FHCI_PORT_DISABLED) { struct urb_priv *urb_priv; /* * flag the urb's data for deletion in some upcoming * SF interrupt's delete list processing */ urb_priv = urb->hcpriv; if (!urb_priv || (urb_priv->state == URB_DEL)) goto out2; urb_priv->state = URB_DEL; /* already pending? */ urb_priv->ed->state = FHCI_ED_URB_DEL; } else { fhci_urb_complete_free(fhci, urb); } out2: spin_unlock_irqrestore(&fhci->lock, flags); out: return ret; } static void fhci_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *ep) { struct fhci_hcd *fhci; struct ed *ed; unsigned long flags; fhci = hcd_to_fhci(hcd); spin_lock_irqsave(&fhci->lock, flags); ed = ep->hcpriv; if (ed) { while (ed->td_head != NULL) { struct td *td = fhci_remove_td_from_ed(ed); fhci_urb_complete_free(fhci, td->urb); } fhci_recycle_empty_ed(fhci, ed); ep->hcpriv = NULL; } spin_unlock_irqrestore(&fhci->lock, flags); } static int fhci_get_frame_number(struct usb_hcd *hcd) { struct fhci_hcd *fhci = hcd_to_fhci(hcd); return get_frame_num(fhci); } static const struct hc_driver fhci_driver = { .description = "fsl,usb-fhci", .product_desc = "FHCI HOST Controller", .hcd_priv_size = sizeof(struct fhci_hcd), /* generic hardware linkage */ .irq = fhci_irq, .flags = HCD_USB11 | HCD_MEMORY, /* basic lifecycle operation */ .start = fhci_start, .stop = fhci_stop, /* managing i/o requests and associated device resources */ .urb_enqueue = fhci_urb_enqueue, .urb_dequeue = fhci_urb_dequeue, .endpoint_disable = fhci_endpoint_disable, /* scheduling support */ .get_frame_number = fhci_get_frame_number, /* root hub support */ .hub_status_data = fhci_hub_status_data, .hub_control = fhci_hub_control, }; static int __devinit of_fhci_probe(struct platform_device *ofdev) { struct device *dev = &ofdev->dev; struct device_node *node = dev->of_node; struct usb_hcd *hcd; struct fhci_hcd *fhci; struct resource usb_regs; unsigned long pram_addr; unsigned int usb_irq; const char *sprop; const u32 *iprop; int size; int ret; int i; int j; if (usb_disabled()) return -ENODEV; sprop = of_get_property(node, "mode", NULL); if (sprop && strcmp(sprop, "host")) return -ENODEV; hcd = usb_create_hcd(&fhci_driver, dev, dev_name(dev)); if (!hcd) { dev_err(dev, "could not create hcd\n"); return -ENOMEM; } fhci = hcd_to_fhci(hcd); hcd->self.controller = dev; dev_set_drvdata(dev, hcd); iprop = of_get_property(node, "hub-power-budget", &size); if (iprop && size == sizeof(*iprop)) hcd->power_budget = *iprop; /* FHCI registers. */ ret = of_address_to_resource(node, 0, &usb_regs); if (ret) { dev_err(dev, "could not get regs\n"); goto err_regs; } hcd->regs = ioremap(usb_regs.start, resource_size(&usb_regs)); if (!hcd->regs) { dev_err(dev, "could not ioremap regs\n"); ret = -ENOMEM; goto err_regs; } fhci->regs = hcd->regs; /* Parameter RAM. */ iprop = of_get_property(node, "reg", &size); if (!iprop || size < sizeof(*iprop) * 4) { dev_err(dev, "can't get pram offset\n"); ret = -EINVAL; goto err_pram; } pram_addr = cpm_muram_alloc(FHCI_PRAM_SIZE, 64); if (IS_ERR_VALUE(pram_addr)) { dev_err(dev, "failed to allocate usb pram\n"); ret = -ENOMEM; goto err_pram; } qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, QE_CR_SUBBLOCK_USB, QE_CR_PROTOCOL_UNSPECIFIED, pram_addr); fhci->pram = cpm_muram_addr(pram_addr); /* GPIOs and pins */ for (i = 0; i < NUM_GPIOS; i++) { int gpio; enum of_gpio_flags flags; gpio = of_get_gpio_flags(node, i, &flags); fhci->gpios[i] = gpio; fhci->alow_gpios[i] = flags & OF_GPIO_ACTIVE_LOW; if (!gpio_is_valid(gpio)) { if (i < GPIO_SPEED) { dev_err(dev, "incorrect GPIO%d: %d\n", i, gpio); goto err_gpios; } else { dev_info(dev, "assuming board doesn't have " "%s gpio\n", i == GPIO_SPEED ? "speed" : "power"); continue; } } ret = gpio_request(gpio, dev_name(dev)); if (ret) { dev_err(dev, "failed to request gpio %d", i); goto err_gpios; } if (i >= GPIO_SPEED) { ret = gpio_direction_output(gpio, 0); if (ret) { dev_err(dev, "failed to set gpio %d as " "an output\n", i); i++; goto err_gpios; } } } for (j = 0; j < NUM_PINS; j++) { fhci->pins[j] = qe_pin_request(node, j); if (IS_ERR(fhci->pins[j])) { ret = PTR_ERR(fhci->pins[j]); dev_err(dev, "can't get pin %d: %d\n", j, ret); goto err_pins; } } /* Frame limit timer and its interrupt. */ fhci->timer = gtm_get_timer16(); if (IS_ERR(fhci->timer)) { ret = PTR_ERR(fhci->timer); dev_err(dev, "failed to request qe timer: %i", ret); goto err_get_timer; } ret = request_irq(fhci->timer->irq, fhci_frame_limit_timer_irq, 0, "qe timer (usb)", hcd); if (ret) { dev_err(dev, "failed to request timer irq"); goto err_timer_irq; } /* USB Host interrupt. */ usb_irq = irq_of_parse_and_map(node, 0); if (usb_irq == NO_IRQ) { dev_err(dev, "could not get usb irq\n"); ret = -EINVAL; goto err_usb_irq; } /* Clocks. */ sprop = of_get_property(node, "fsl,fullspeed-clock", NULL); if (sprop) { fhci->fullspeed_clk = qe_clock_source(sprop); if (fhci->fullspeed_clk == QE_CLK_DUMMY) { dev_err(dev, "wrong fullspeed-clock\n"); ret = -EINVAL; goto err_clocks; } } sprop = of_get_property(node, "fsl,lowspeed-clock", NULL); if (sprop) { fhci->lowspeed_clk = qe_clock_source(sprop); if (fhci->lowspeed_clk == QE_CLK_DUMMY) { dev_err(dev, "wrong lowspeed-clock\n"); ret = -EINVAL; goto err_clocks; } } if (fhci->fullspeed_clk == QE_CLK_NONE && fhci->lowspeed_clk == QE_CLK_NONE) { dev_err(dev, "no clocks specified\n"); ret = -EINVAL; goto err_clocks; } dev_info(dev, "at 0x%p, irq %d\n", hcd->regs, usb_irq); fhci_config_transceiver(fhci, FHCI_PORT_POWER_OFF); /* Start with full-speed, if possible. */ if (fhci->fullspeed_clk != QE_CLK_NONE) { fhci_config_transceiver(fhci, FHCI_PORT_FULL); qe_usb_clock_set(fhci->fullspeed_clk, USB_CLOCK); } else { fhci_config_transceiver(fhci, FHCI_PORT_LOW); qe_usb_clock_set(fhci->lowspeed_clk, USB_CLOCK >> 3); } /* Clear and disable any pending interrupts. */ out_be16(&fhci->regs->usb_event, 0xffff); out_be16(&fhci->regs->usb_mask, 0); ret = usb_add_hcd(hcd, usb_irq, 0); if (ret < 0) goto err_add_hcd; fhci_dfs_create(fhci); return 0; err_add_hcd: err_clocks: irq_dispose_mapping(usb_irq); err_usb_irq: free_irq(fhci->timer->irq, hcd); err_timer_irq: gtm_put_timer16(fhci->timer); err_get_timer: err_pins: while (--j >= 0) qe_pin_free(fhci->pins[j]); err_gpios: while (--i >= 0) { if (gpio_is_valid(fhci->gpios[i])) gpio_free(fhci->gpios[i]); } cpm_muram_free(pram_addr); err_pram: iounmap(hcd->regs); err_regs: usb_put_hcd(hcd); return ret; } static int __devexit fhci_remove(struct device *dev) { struct usb_hcd *hcd = dev_get_drvdata(dev); struct fhci_hcd *fhci = hcd_to_fhci(hcd); int i; int j; usb_remove_hcd(hcd); free_irq(fhci->timer->irq, hcd); gtm_put_timer16(fhci->timer); cpm_muram_free(cpm_muram_offset(fhci->pram)); for (i = 0; i < NUM_GPIOS; i++) { if (!gpio_is_valid(fhci->gpios[i])) continue; gpio_free(fhci->gpios[i]); } for (j = 0; j < NUM_PINS; j++) qe_pin_free(fhci->pins[j]); fhci_dfs_destroy(fhci); usb_put_hcd(hcd); return 0; } static int __devexit of_fhci_remove(struct platform_device *ofdev) { return fhci_remove(&ofdev->dev); } static const struct of_device_id of_fhci_match[] = { { .compatible = "fsl,mpc8323-qe-usb", }, {}, }; MODULE_DEVICE_TABLE(of, of_fhci_match); static struct platform_driver of_fhci_driver = { .driver = { .name = "fsl,usb-fhci", .owner = THIS_MODULE, .of_match_table = of_fhci_match, }, .probe = of_fhci_probe, .remove = __devexit_p(of_fhci_remove), }; module_platform_driver(of_fhci_driver); MODULE_DESCRIPTION("USB Freescale Host Controller Interface Driver"); MODULE_AUTHOR("Shlomi Gridish <gridish@freescale.com>, " "Jerry Huang <Chang-Ming.Huang@freescale.com>, " "Anton Vorontsov <avorontsov@ru.mvista.com>"); MODULE_LICENSE("GPL");
gpl-2.0
droidroidz/USCC_R970_kernel
sound/soc/davinci/davinci-pcm.c
4945
25941
/* * ALSA PCM interface for the TI DAVINCI processor * * Author: Vladimir Barinov, <vbarinov@embeddedalley.com> * Copyright: (C) 2007 MontaVista Software, Inc., <source@mvista.com> * added SRAM ping/pong (C) 2008 Troy Kisky <troy.kisky@boundarydevices.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/dma-mapping.h> #include <linux/kernel.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <asm/dma.h> #include <mach/edma.h> #include <mach/sram.h> #include "davinci-pcm.h" #ifdef DEBUG static void print_buf_info(int slot, char *name) { struct edmacc_param p; if (slot < 0) return; edma_read_slot(slot, &p); printk(KERN_DEBUG "%s: 0x%x, opt=%x, src=%x, a_b_cnt=%x dst=%x\n", name, slot, p.opt, p.src, p.a_b_cnt, p.dst); printk(KERN_DEBUG " src_dst_bidx=%x link_bcntrld=%x src_dst_cidx=%x ccnt=%x\n", p.src_dst_bidx, p.link_bcntrld, p.src_dst_cidx, p.ccnt); } #else static void print_buf_info(int slot, char *name) { } #endif #define DAVINCI_PCM_FMTBITS (\ SNDRV_PCM_FMTBIT_S8 |\ SNDRV_PCM_FMTBIT_U8 |\ SNDRV_PCM_FMTBIT_S16_LE |\ SNDRV_PCM_FMTBIT_S16_BE |\ SNDRV_PCM_FMTBIT_U16_LE |\ SNDRV_PCM_FMTBIT_U16_BE |\ SNDRV_PCM_FMTBIT_S24_LE |\ SNDRV_PCM_FMTBIT_S24_BE |\ SNDRV_PCM_FMTBIT_U24_LE |\ SNDRV_PCM_FMTBIT_U24_BE |\ SNDRV_PCM_FMTBIT_S32_LE |\ SNDRV_PCM_FMTBIT_S32_BE |\ SNDRV_PCM_FMTBIT_U32_LE |\ SNDRV_PCM_FMTBIT_U32_BE) static struct snd_pcm_hardware pcm_hardware_playback = { .info = (SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME| SNDRV_PCM_INFO_BATCH), .formats = DAVINCI_PCM_FMTBITS, .rates = (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_22050 | SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_KNOT), .rate_min = 8000, .rate_max = 96000, .channels_min = 2, .channels_max = 384, .buffer_bytes_max = 128 * 1024, .period_bytes_min = 32, .period_bytes_max = 8 * 1024, .periods_min = 16, .periods_max = 255, .fifo_size = 0, }; static struct snd_pcm_hardware pcm_hardware_capture = { .info = (SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_BATCH), .formats = DAVINCI_PCM_FMTBITS, .rates = (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_22050 | SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_KNOT), .rate_min = 8000, .rate_max = 96000, .channels_min = 2, .channels_max = 384, .buffer_bytes_max = 128 * 1024, .period_bytes_min = 32, .period_bytes_max = 8 * 1024, .periods_min = 16, .periods_max = 255, .fifo_size = 0, }; /* * How ping/pong works.... * * Playback: * ram_params - copys 2*ping_size from start of SDRAM to iram, * links to ram_link2 * ram_link2 - copys rest of SDRAM to iram in ping_size units, * links to ram_link * ram_link - copys entire SDRAM to iram in ping_size uints, * links to self * * asp_params - same as asp_link[0] * asp_link[0] - copys from lower half of iram to asp port * links to asp_link[1], triggers iram copy event on completion * asp_link[1] - copys from upper half of iram to asp port * links to asp_link[0], triggers iram copy event on completion * triggers interrupt only needed to let upper SOC levels update position * in stream on completion * * When playback is started: * ram_params started * asp_params started * * Capture: * ram_params - same as ram_link, * links to ram_link * ram_link - same as playback * links to self * * asp_params - same as playback * asp_link[0] - same as playback * asp_link[1] - same as playback * * When capture is started: * asp_params started */ struct davinci_runtime_data { spinlock_t lock; int period; /* current DMA period */ int asp_channel; /* Master DMA channel */ int asp_link[2]; /* asp parameter link channel, ping/pong */ struct davinci_pcm_dma_params *params; /* DMA params */ int ram_channel; int ram_link; int ram_link2; struct edmacc_param asp_params; struct edmacc_param ram_params; }; static void davinci_pcm_period_elapsed(struct snd_pcm_substream *substream) { struct davinci_runtime_data *prtd = substream->runtime->private_data; struct snd_pcm_runtime *runtime = substream->runtime; prtd->period++; if (unlikely(prtd->period >= runtime->periods)) prtd->period = 0; } static void davinci_pcm_period_reset(struct snd_pcm_substream *substream) { struct davinci_runtime_data *prtd = substream->runtime->private_data; prtd->period = 0; } /* * Not used with ping/pong */ static void davinci_pcm_enqueue_dma(struct snd_pcm_substream *substream) { struct davinci_runtime_data *prtd = substream->runtime->private_data; struct snd_pcm_runtime *runtime = substream->runtime; unsigned int period_size; unsigned int dma_offset; dma_addr_t dma_pos; dma_addr_t src, dst; unsigned short src_bidx, dst_bidx; unsigned short src_cidx, dst_cidx; unsigned int data_type; unsigned short acnt; unsigned int count; unsigned int fifo_level; period_size = snd_pcm_lib_period_bytes(substream); dma_offset = prtd->period * period_size; dma_pos = runtime->dma_addr + dma_offset; fifo_level = prtd->params->fifo_level; pr_debug("davinci_pcm: audio_set_dma_params_play channel = %d " "dma_ptr = %x period_size=%x\n", prtd->asp_link[0], dma_pos, period_size); data_type = prtd->params->data_type; count = period_size / data_type; if (fifo_level) count /= fifo_level; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { src = dma_pos; dst = prtd->params->dma_addr; src_bidx = data_type; dst_bidx = 0; src_cidx = data_type * fifo_level; dst_cidx = 0; } else { src = prtd->params->dma_addr; dst = dma_pos; src_bidx = 0; dst_bidx = data_type; src_cidx = 0; dst_cidx = data_type * fifo_level; } acnt = prtd->params->acnt; edma_set_src(prtd->asp_link[0], src, INCR, W8BIT); edma_set_dest(prtd->asp_link[0], dst, INCR, W8BIT); edma_set_src_index(prtd->asp_link[0], src_bidx, src_cidx); edma_set_dest_index(prtd->asp_link[0], dst_bidx, dst_cidx); if (!fifo_level) edma_set_transfer_params(prtd->asp_link[0], acnt, count, 1, 0, ASYNC); else edma_set_transfer_params(prtd->asp_link[0], acnt, fifo_level, count, fifo_level, ABSYNC); } static void davinci_pcm_dma_irq(unsigned link, u16 ch_status, void *data) { struct snd_pcm_substream *substream = data; struct davinci_runtime_data *prtd = substream->runtime->private_data; print_buf_info(prtd->ram_channel, "i ram_channel"); pr_debug("davinci_pcm: link=%d, status=0x%x\n", link, ch_status); if (unlikely(ch_status != DMA_COMPLETE)) return; if (snd_pcm_running(substream)) { spin_lock(&prtd->lock); if (prtd->ram_channel < 0) { /* No ping/pong must fix up link dma data*/ davinci_pcm_enqueue_dma(substream); } davinci_pcm_period_elapsed(substream); spin_unlock(&prtd->lock); snd_pcm_period_elapsed(substream); } } static int allocate_sram(struct snd_pcm_substream *substream, unsigned size, struct snd_pcm_hardware *ppcm) { struct snd_dma_buffer *buf = &substream->dma_buffer; struct snd_dma_buffer *iram_dma = NULL; dma_addr_t iram_phys = 0; void *iram_virt = NULL; if (buf->private_data || !size) return 0; ppcm->period_bytes_max = size; iram_virt = sram_alloc(size, &iram_phys); if (!iram_virt) goto exit1; iram_dma = kzalloc(sizeof(*iram_dma), GFP_KERNEL); if (!iram_dma) goto exit2; iram_dma->area = iram_virt; iram_dma->addr = iram_phys; memset(iram_dma->area, 0, size); iram_dma->bytes = size; buf->private_data = iram_dma; return 0; exit2: if (iram_virt) sram_free(iram_virt, size); exit1: return -ENOMEM; } /* * Only used with ping/pong. * This is called after runtime->dma_addr, period_bytes and data_type are valid */ static int ping_pong_dma_setup(struct snd_pcm_substream *substream) { unsigned short ram_src_cidx, ram_dst_cidx; struct snd_pcm_runtime *runtime = substream->runtime; struct davinci_runtime_data *prtd = runtime->private_data; struct snd_dma_buffer *iram_dma = (struct snd_dma_buffer *)substream->dma_buffer.private_data; struct davinci_pcm_dma_params *params = prtd->params; unsigned int data_type = params->data_type; unsigned int acnt = params->acnt; /* divide by 2 for ping/pong */ unsigned int ping_size = snd_pcm_lib_period_bytes(substream) >> 1; unsigned int fifo_level = prtd->params->fifo_level; unsigned int count; if ((data_type == 0) || (data_type > 4)) { printk(KERN_ERR "%s: data_type=%i\n", __func__, data_type); return -EINVAL; } if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { dma_addr_t asp_src_pong = iram_dma->addr + ping_size; ram_src_cidx = ping_size; ram_dst_cidx = -ping_size; edma_set_src(prtd->asp_link[1], asp_src_pong, INCR, W8BIT); edma_set_src_index(prtd->asp_link[0], data_type, data_type * fifo_level); edma_set_src_index(prtd->asp_link[1], data_type, data_type * fifo_level); edma_set_src(prtd->ram_link, runtime->dma_addr, INCR, W32BIT); } else { dma_addr_t asp_dst_pong = iram_dma->addr + ping_size; ram_src_cidx = -ping_size; ram_dst_cidx = ping_size; edma_set_dest(prtd->asp_link[1], asp_dst_pong, INCR, W8BIT); edma_set_dest_index(prtd->asp_link[0], data_type, data_type * fifo_level); edma_set_dest_index(prtd->asp_link[1], data_type, data_type * fifo_level); edma_set_dest(prtd->ram_link, runtime->dma_addr, INCR, W32BIT); } if (!fifo_level) { count = ping_size / data_type; edma_set_transfer_params(prtd->asp_link[0], acnt, count, 1, 0, ASYNC); edma_set_transfer_params(prtd->asp_link[1], acnt, count, 1, 0, ASYNC); } else { count = ping_size / (data_type * fifo_level); edma_set_transfer_params(prtd->asp_link[0], acnt, fifo_level, count, fifo_level, ABSYNC); edma_set_transfer_params(prtd->asp_link[1], acnt, fifo_level, count, fifo_level, ABSYNC); } edma_set_src_index(prtd->ram_link, ping_size, ram_src_cidx); edma_set_dest_index(prtd->ram_link, ping_size, ram_dst_cidx); edma_set_transfer_params(prtd->ram_link, ping_size, 2, runtime->periods, 2, ASYNC); /* init master params */ edma_read_slot(prtd->asp_link[0], &prtd->asp_params); edma_read_slot(prtd->ram_link, &prtd->ram_params); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { struct edmacc_param p_ram; /* Copy entire iram buffer before playback started */ prtd->ram_params.a_b_cnt = (1 << 16) | (ping_size << 1); /* 0 dst_bidx */ prtd->ram_params.src_dst_bidx = (ping_size << 1); /* 0 dst_cidx */ prtd->ram_params.src_dst_cidx = (ping_size << 1); prtd->ram_params.ccnt = 1; /* Skip 1st period */ edma_read_slot(prtd->ram_link, &p_ram); p_ram.src += (ping_size << 1); p_ram.ccnt -= 1; edma_write_slot(prtd->ram_link2, &p_ram); /* * When 1st started, ram -> iram dma channel will fill the * entire iram. Then, whenever a ping/pong asp buffer finishes, * 1/2 iram will be filled. */ prtd->ram_params.link_bcntrld = EDMA_CHAN_SLOT(prtd->ram_link2) << 5; } return 0; } /* 1 asp tx or rx channel using 2 parameter channels * 1 ram to/from iram channel using 1 parameter channel * * Playback * ram copy channel kicks off first, * 1st ram copy of entire iram buffer completion kicks off asp channel * asp tcc always kicks off ram copy of 1/2 iram buffer * * Record * asp channel starts, tcc kicks off ram copy */ static int request_ping_pong(struct snd_pcm_substream *substream, struct davinci_runtime_data *prtd, struct snd_dma_buffer *iram_dma) { dma_addr_t asp_src_ping; dma_addr_t asp_dst_ping; int ret; struct davinci_pcm_dma_params *params = prtd->params; /* Request ram master channel */ ret = prtd->ram_channel = edma_alloc_channel(EDMA_CHANNEL_ANY, davinci_pcm_dma_irq, substream, prtd->params->ram_chan_q); if (ret < 0) goto exit1; /* Request ram link channel */ ret = prtd->ram_link = edma_alloc_slot( EDMA_CTLR(prtd->ram_channel), EDMA_SLOT_ANY); if (ret < 0) goto exit2; ret = prtd->asp_link[1] = edma_alloc_slot( EDMA_CTLR(prtd->asp_channel), EDMA_SLOT_ANY); if (ret < 0) goto exit3; prtd->ram_link2 = -1; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { ret = prtd->ram_link2 = edma_alloc_slot( EDMA_CTLR(prtd->ram_channel), EDMA_SLOT_ANY); if (ret < 0) goto exit4; } /* circle ping-pong buffers */ edma_link(prtd->asp_link[0], prtd->asp_link[1]); edma_link(prtd->asp_link[1], prtd->asp_link[0]); /* circle ram buffers */ edma_link(prtd->ram_link, prtd->ram_link); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { asp_src_ping = iram_dma->addr; asp_dst_ping = params->dma_addr; /* fifo */ } else { asp_src_ping = params->dma_addr; /* fifo */ asp_dst_ping = iram_dma->addr; } /* ping */ edma_set_src(prtd->asp_link[0], asp_src_ping, INCR, W16BIT); edma_set_dest(prtd->asp_link[0], asp_dst_ping, INCR, W16BIT); edma_set_src_index(prtd->asp_link[0], 0, 0); edma_set_dest_index(prtd->asp_link[0], 0, 0); edma_read_slot(prtd->asp_link[0], &prtd->asp_params); prtd->asp_params.opt &= ~(TCCMODE | EDMA_TCC(0x3f) | TCINTEN); prtd->asp_params.opt |= TCCHEN | EDMA_TCC(prtd->ram_channel & 0x3f); edma_write_slot(prtd->asp_link[0], &prtd->asp_params); /* pong */ edma_set_src(prtd->asp_link[1], asp_src_ping, INCR, W16BIT); edma_set_dest(prtd->asp_link[1], asp_dst_ping, INCR, W16BIT); edma_set_src_index(prtd->asp_link[1], 0, 0); edma_set_dest_index(prtd->asp_link[1], 0, 0); edma_read_slot(prtd->asp_link[1], &prtd->asp_params); prtd->asp_params.opt &= ~(TCCMODE | EDMA_TCC(0x3f)); /* interrupt after every pong completion */ prtd->asp_params.opt |= TCINTEN | TCCHEN | EDMA_TCC(prtd->ram_channel & 0x3f); edma_write_slot(prtd->asp_link[1], &prtd->asp_params); /* ram */ edma_set_src(prtd->ram_link, iram_dma->addr, INCR, W32BIT); edma_set_dest(prtd->ram_link, iram_dma->addr, INCR, W32BIT); pr_debug("%s: audio dma channels/slots in use for ram:%u %u %u," "for asp:%u %u %u\n", __func__, prtd->ram_channel, prtd->ram_link, prtd->ram_link2, prtd->asp_channel, prtd->asp_link[0], prtd->asp_link[1]); return 0; exit4: edma_free_channel(prtd->asp_link[1]); prtd->asp_link[1] = -1; exit3: edma_free_channel(prtd->ram_link); prtd->ram_link = -1; exit2: edma_free_channel(prtd->ram_channel); prtd->ram_channel = -1; exit1: return ret; } static int davinci_pcm_dma_request(struct snd_pcm_substream *substream) { struct snd_dma_buffer *iram_dma; struct davinci_runtime_data *prtd = substream->runtime->private_data; struct davinci_pcm_dma_params *params = prtd->params; int ret; if (!params) return -ENODEV; /* Request asp master DMA channel */ ret = prtd->asp_channel = edma_alloc_channel(params->channel, davinci_pcm_dma_irq, substream, prtd->params->asp_chan_q); if (ret < 0) goto exit1; /* Request asp link channels */ ret = prtd->asp_link[0] = edma_alloc_slot( EDMA_CTLR(prtd->asp_channel), EDMA_SLOT_ANY); if (ret < 0) goto exit2; iram_dma = (struct snd_dma_buffer *)substream->dma_buffer.private_data; if (iram_dma) { if (request_ping_pong(substream, prtd, iram_dma) == 0) return 0; printk(KERN_WARNING "%s: dma channel allocation failed," "not using sram\n", __func__); } /* Issue transfer completion IRQ when the channel completes a * transfer, then always reload from the same slot (by a kind * of loopback link). The completion IRQ handler will update * the reload slot with a new buffer. * * REVISIT save p_ram here after setting up everything except * the buffer and its length (ccnt) ... use it as a template * so davinci_pcm_enqueue_dma() takes less time in IRQ. */ edma_read_slot(prtd->asp_link[0], &prtd->asp_params); prtd->asp_params.opt |= TCINTEN | EDMA_TCC(EDMA_CHAN_SLOT(prtd->asp_channel)); prtd->asp_params.link_bcntrld = EDMA_CHAN_SLOT(prtd->asp_link[0]) << 5; edma_write_slot(prtd->asp_link[0], &prtd->asp_params); return 0; exit2: edma_free_channel(prtd->asp_channel); prtd->asp_channel = -1; exit1: return ret; } static int davinci_pcm_trigger(struct snd_pcm_substream *substream, int cmd) { struct davinci_runtime_data *prtd = substream->runtime->private_data; int ret = 0; spin_lock(&prtd->lock); switch (cmd) { case SNDRV_PCM_TRIGGER_START: edma_start(prtd->asp_channel); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK && prtd->ram_channel >= 0) { /* copy 1st iram buffer */ edma_start(prtd->ram_channel); } break; case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: edma_resume(prtd->asp_channel); break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: edma_pause(prtd->asp_channel); break; default: ret = -EINVAL; break; } spin_unlock(&prtd->lock); return ret; } static int davinci_pcm_prepare(struct snd_pcm_substream *substream) { struct davinci_runtime_data *prtd = substream->runtime->private_data; davinci_pcm_period_reset(substream); if (prtd->ram_channel >= 0) { int ret = ping_pong_dma_setup(substream); if (ret < 0) return ret; edma_write_slot(prtd->ram_channel, &prtd->ram_params); edma_write_slot(prtd->asp_channel, &prtd->asp_params); print_buf_info(prtd->ram_channel, "ram_channel"); print_buf_info(prtd->ram_link, "ram_link"); print_buf_info(prtd->ram_link2, "ram_link2"); print_buf_info(prtd->asp_channel, "asp_channel"); print_buf_info(prtd->asp_link[0], "asp_link[0]"); print_buf_info(prtd->asp_link[1], "asp_link[1]"); /* * There is a phase offset of 2 periods between the position * used by dma setup and the position reported in the pointer * function. * * The phase offset, when not using ping-pong buffers, is due to * the two consecutive calls to davinci_pcm_enqueue_dma() below. * * Whereas here, with ping-pong buffers, the phase is due to * there being an entire buffer transfer complete before the * first dma completion event triggers davinci_pcm_dma_irq(). */ davinci_pcm_period_elapsed(substream); davinci_pcm_period_elapsed(substream); return 0; } davinci_pcm_enqueue_dma(substream); davinci_pcm_period_elapsed(substream); /* Copy self-linked parameter RAM entry into master channel */ edma_read_slot(prtd->asp_link[0], &prtd->asp_params); edma_write_slot(prtd->asp_channel, &prtd->asp_params); davinci_pcm_enqueue_dma(substream); davinci_pcm_period_elapsed(substream); return 0; } static snd_pcm_uframes_t davinci_pcm_pointer(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct davinci_runtime_data *prtd = runtime->private_data; unsigned int offset; int asp_count; unsigned int period_size = snd_pcm_lib_period_bytes(substream); /* * There is a phase offset of 2 periods between the position used by dma * setup and the position reported in the pointer function. Either +2 in * the dma setup or -2 here in the pointer function (with wrapping, * both) accounts for this offset -- choose the latter since it makes * the first-time setup clearer. */ spin_lock(&prtd->lock); asp_count = prtd->period - 2; spin_unlock(&prtd->lock); if (asp_count < 0) asp_count += runtime->periods; asp_count *= period_size; offset = bytes_to_frames(runtime, asp_count); if (offset >= runtime->buffer_size) offset = 0; return offset; } static int davinci_pcm_open(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct davinci_runtime_data *prtd; struct snd_pcm_hardware *ppcm; int ret = 0; struct snd_soc_pcm_runtime *rtd = substream->private_data; struct davinci_pcm_dma_params *pa; struct davinci_pcm_dma_params *params; pa = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream); if (!pa) return -ENODEV; params = &pa[substream->stream]; ppcm = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ? &pcm_hardware_playback : &pcm_hardware_capture; allocate_sram(substream, params->sram_size, ppcm); snd_soc_set_runtime_hwparams(substream, ppcm); /* ensure that buffer size is a multiple of period size */ ret = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS); if (ret < 0) return ret; prtd = kzalloc(sizeof(struct davinci_runtime_data), GFP_KERNEL); if (prtd == NULL) return -ENOMEM; spin_lock_init(&prtd->lock); prtd->params = params; prtd->asp_channel = -1; prtd->asp_link[0] = prtd->asp_link[1] = -1; prtd->ram_channel = -1; prtd->ram_link = -1; prtd->ram_link2 = -1; runtime->private_data = prtd; ret = davinci_pcm_dma_request(substream); if (ret) { printk(KERN_ERR "davinci_pcm: Failed to get dma channels\n"); kfree(prtd); } return ret; } static int davinci_pcm_close(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct davinci_runtime_data *prtd = runtime->private_data; if (prtd->ram_channel >= 0) edma_stop(prtd->ram_channel); if (prtd->asp_channel >= 0) edma_stop(prtd->asp_channel); if (prtd->asp_link[0] >= 0) edma_unlink(prtd->asp_link[0]); if (prtd->asp_link[1] >= 0) edma_unlink(prtd->asp_link[1]); if (prtd->ram_link >= 0) edma_unlink(prtd->ram_link); if (prtd->asp_link[0] >= 0) edma_free_slot(prtd->asp_link[0]); if (prtd->asp_link[1] >= 0) edma_free_slot(prtd->asp_link[1]); if (prtd->asp_channel >= 0) edma_free_channel(prtd->asp_channel); if (prtd->ram_link >= 0) edma_free_slot(prtd->ram_link); if (prtd->ram_link2 >= 0) edma_free_slot(prtd->ram_link2); if (prtd->ram_channel >= 0) edma_free_channel(prtd->ram_channel); kfree(prtd); return 0; } static int davinci_pcm_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { return snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params)); } static int davinci_pcm_hw_free(struct snd_pcm_substream *substream) { return snd_pcm_lib_free_pages(substream); } static int davinci_pcm_mmap(struct snd_pcm_substream *substream, struct vm_area_struct *vma) { struct snd_pcm_runtime *runtime = substream->runtime; return dma_mmap_writecombine(substream->pcm->card->dev, vma, runtime->dma_area, runtime->dma_addr, runtime->dma_bytes); } static struct snd_pcm_ops davinci_pcm_ops = { .open = davinci_pcm_open, .close = davinci_pcm_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = davinci_pcm_hw_params, .hw_free = davinci_pcm_hw_free, .prepare = davinci_pcm_prepare, .trigger = davinci_pcm_trigger, .pointer = davinci_pcm_pointer, .mmap = davinci_pcm_mmap, }; static int davinci_pcm_preallocate_dma_buffer(struct snd_pcm *pcm, int stream, size_t size) { struct snd_pcm_substream *substream = pcm->streams[stream].substream; struct snd_dma_buffer *buf = &substream->dma_buffer; buf->dev.type = SNDRV_DMA_TYPE_DEV; buf->dev.dev = pcm->card->dev; buf->private_data = NULL; buf->area = dma_alloc_writecombine(pcm->card->dev, size, &buf->addr, GFP_KERNEL); pr_debug("davinci_pcm: preallocate_dma_buffer: area=%p, addr=%p, " "size=%d\n", (void *) buf->area, (void *) buf->addr, size); if (!buf->area) return -ENOMEM; buf->bytes = size; return 0; } static void davinci_pcm_free(struct snd_pcm *pcm) { struct snd_pcm_substream *substream; struct snd_dma_buffer *buf; int stream; for (stream = 0; stream < 2; stream++) { struct snd_dma_buffer *iram_dma; substream = pcm->streams[stream].substream; if (!substream) continue; buf = &substream->dma_buffer; if (!buf->area) continue; dma_free_writecombine(pcm->card->dev, buf->bytes, buf->area, buf->addr); buf->area = NULL; iram_dma = buf->private_data; if (iram_dma) { sram_free(iram_dma->area, iram_dma->bytes); kfree(iram_dma); } } } static u64 davinci_pcm_dmamask = DMA_BIT_MASK(32); static int davinci_pcm_new(struct snd_soc_pcm_runtime *rtd) { struct snd_card *card = rtd->card->snd_card; struct snd_pcm *pcm = rtd->pcm; int ret; if (!card->dev->dma_mask) card->dev->dma_mask = &davinci_pcm_dmamask; if (!card->dev->coherent_dma_mask) card->dev->coherent_dma_mask = DMA_BIT_MASK(32); if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) { ret = davinci_pcm_preallocate_dma_buffer(pcm, SNDRV_PCM_STREAM_PLAYBACK, pcm_hardware_playback.buffer_bytes_max); if (ret) return ret; } if (pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream) { ret = davinci_pcm_preallocate_dma_buffer(pcm, SNDRV_PCM_STREAM_CAPTURE, pcm_hardware_capture.buffer_bytes_max); if (ret) return ret; } return 0; } static struct snd_soc_platform_driver davinci_soc_platform = { .ops = &davinci_pcm_ops, .pcm_new = davinci_pcm_new, .pcm_free = davinci_pcm_free, }; static int __devinit davinci_soc_platform_probe(struct platform_device *pdev) { return snd_soc_register_platform(&pdev->dev, &davinci_soc_platform); } static int __devexit davinci_soc_platform_remove(struct platform_device *pdev) { snd_soc_unregister_platform(&pdev->dev); return 0; } static struct platform_driver davinci_pcm_driver = { .driver = { .name = "davinci-pcm-audio", .owner = THIS_MODULE, }, .probe = davinci_soc_platform_probe, .remove = __devexit_p(davinci_soc_platform_remove), }; module_platform_driver(davinci_pcm_driver); MODULE_AUTHOR("Vladimir Barinov"); MODULE_DESCRIPTION("TI DAVINCI PCM DMA module"); MODULE_LICENSE("GPL");
gpl-2.0
kjjjnob/android_kernel_lge_g3
arch/mips/jz4740/gpio.c
4945
13857
/* * Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de> * JZ4740 platform GPIO support * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/io.h> #include <linux/gpio.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/bitops.h> #include <linux/debugfs.h> #include <linux/seq_file.h> #include <asm/mach-jz4740/base.h> #include "irq.h" #define JZ4740_GPIO_BASE_A (32*0) #define JZ4740_GPIO_BASE_B (32*1) #define JZ4740_GPIO_BASE_C (32*2) #define JZ4740_GPIO_BASE_D (32*3) #define JZ4740_GPIO_NUM_A 32 #define JZ4740_GPIO_NUM_B 32 #define JZ4740_GPIO_NUM_C 31 #define JZ4740_GPIO_NUM_D 32 #define JZ4740_IRQ_GPIO_BASE_A (JZ4740_IRQ_GPIO(0) + JZ4740_GPIO_BASE_A) #define JZ4740_IRQ_GPIO_BASE_B (JZ4740_IRQ_GPIO(0) + JZ4740_GPIO_BASE_B) #define JZ4740_IRQ_GPIO_BASE_C (JZ4740_IRQ_GPIO(0) + JZ4740_GPIO_BASE_C) #define JZ4740_IRQ_GPIO_BASE_D (JZ4740_IRQ_GPIO(0) + JZ4740_GPIO_BASE_D) #define JZ_REG_GPIO_PIN 0x00 #define JZ_REG_GPIO_DATA 0x10 #define JZ_REG_GPIO_DATA_SET 0x14 #define JZ_REG_GPIO_DATA_CLEAR 0x18 #define JZ_REG_GPIO_MASK 0x20 #define JZ_REG_GPIO_MASK_SET 0x24 #define JZ_REG_GPIO_MASK_CLEAR 0x28 #define JZ_REG_GPIO_PULL 0x30 #define JZ_REG_GPIO_PULL_SET 0x34 #define JZ_REG_GPIO_PULL_CLEAR 0x38 #define JZ_REG_GPIO_FUNC 0x40 #define JZ_REG_GPIO_FUNC_SET 0x44 #define JZ_REG_GPIO_FUNC_CLEAR 0x48 #define JZ_REG_GPIO_SELECT 0x50 #define JZ_REG_GPIO_SELECT_SET 0x54 #define JZ_REG_GPIO_SELECT_CLEAR 0x58 #define JZ_REG_GPIO_DIRECTION 0x60 #define JZ_REG_GPIO_DIRECTION_SET 0x64 #define JZ_REG_GPIO_DIRECTION_CLEAR 0x68 #define JZ_REG_GPIO_TRIGGER 0x70 #define JZ_REG_GPIO_TRIGGER_SET 0x74 #define JZ_REG_GPIO_TRIGGER_CLEAR 0x78 #define JZ_REG_GPIO_FLAG 0x80 #define JZ_REG_GPIO_FLAG_CLEAR 0x14 #define GPIO_TO_BIT(gpio) BIT(gpio & 0x1f) #define GPIO_TO_REG(gpio, reg) (gpio_to_jz_gpio_chip(gpio)->base + (reg)) #define CHIP_TO_REG(chip, reg) (gpio_chip_to_jz_gpio_chip(chip)->base + (reg)) struct jz_gpio_chip { unsigned int irq; unsigned int irq_base; uint32_t edge_trigger_both; void __iomem *base; struct gpio_chip gpio_chip; }; static struct jz_gpio_chip jz4740_gpio_chips[]; static inline struct jz_gpio_chip *gpio_to_jz_gpio_chip(unsigned int gpio) { return &jz4740_gpio_chips[gpio >> 5]; } static inline struct jz_gpio_chip *gpio_chip_to_jz_gpio_chip(struct gpio_chip *gpio_chip) { return container_of(gpio_chip, struct jz_gpio_chip, gpio_chip); } static inline struct jz_gpio_chip *irq_to_jz_gpio_chip(struct irq_data *data) { struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data); return gc->private; } static inline void jz_gpio_write_bit(unsigned int gpio, unsigned int reg) { writel(GPIO_TO_BIT(gpio), GPIO_TO_REG(gpio, reg)); } int jz_gpio_set_function(int gpio, enum jz_gpio_function function) { if (function == JZ_GPIO_FUNC_NONE) { jz_gpio_write_bit(gpio, JZ_REG_GPIO_FUNC_CLEAR); jz_gpio_write_bit(gpio, JZ_REG_GPIO_SELECT_CLEAR); jz_gpio_write_bit(gpio, JZ_REG_GPIO_TRIGGER_CLEAR); } else { jz_gpio_write_bit(gpio, JZ_REG_GPIO_FUNC_SET); jz_gpio_write_bit(gpio, JZ_REG_GPIO_TRIGGER_CLEAR); switch (function) { case JZ_GPIO_FUNC1: jz_gpio_write_bit(gpio, JZ_REG_GPIO_SELECT_CLEAR); break; case JZ_GPIO_FUNC3: jz_gpio_write_bit(gpio, JZ_REG_GPIO_TRIGGER_SET); case JZ_GPIO_FUNC2: /* Falltrough */ jz_gpio_write_bit(gpio, JZ_REG_GPIO_SELECT_SET); break; default: BUG(); break; } } return 0; } EXPORT_SYMBOL_GPL(jz_gpio_set_function); int jz_gpio_bulk_request(const struct jz_gpio_bulk_request *request, size_t num) { size_t i; int ret; for (i = 0; i < num; ++i, ++request) { ret = gpio_request(request->gpio, request->name); if (ret) goto err; jz_gpio_set_function(request->gpio, request->function); } return 0; err: for (--request; i > 0; --i, --request) { gpio_free(request->gpio); jz_gpio_set_function(request->gpio, JZ_GPIO_FUNC_NONE); } return ret; } EXPORT_SYMBOL_GPL(jz_gpio_bulk_request); void jz_gpio_bulk_free(const struct jz_gpio_bulk_request *request, size_t num) { size_t i; for (i = 0; i < num; ++i, ++request) { gpio_free(request->gpio); jz_gpio_set_function(request->gpio, JZ_GPIO_FUNC_NONE); } } EXPORT_SYMBOL_GPL(jz_gpio_bulk_free); void jz_gpio_bulk_suspend(const struct jz_gpio_bulk_request *request, size_t num) { size_t i; for (i = 0; i < num; ++i, ++request) { jz_gpio_set_function(request->gpio, JZ_GPIO_FUNC_NONE); jz_gpio_write_bit(request->gpio, JZ_REG_GPIO_DIRECTION_CLEAR); jz_gpio_write_bit(request->gpio, JZ_REG_GPIO_PULL_SET); } } EXPORT_SYMBOL_GPL(jz_gpio_bulk_suspend); void jz_gpio_bulk_resume(const struct jz_gpio_bulk_request *request, size_t num) { size_t i; for (i = 0; i < num; ++i, ++request) jz_gpio_set_function(request->gpio, request->function); } EXPORT_SYMBOL_GPL(jz_gpio_bulk_resume); void jz_gpio_enable_pullup(unsigned gpio) { jz_gpio_write_bit(gpio, JZ_REG_GPIO_PULL_CLEAR); } EXPORT_SYMBOL_GPL(jz_gpio_enable_pullup); void jz_gpio_disable_pullup(unsigned gpio) { jz_gpio_write_bit(gpio, JZ_REG_GPIO_PULL_SET); } EXPORT_SYMBOL_GPL(jz_gpio_disable_pullup); static int jz_gpio_get_value(struct gpio_chip *chip, unsigned gpio) { return !!(readl(CHIP_TO_REG(chip, JZ_REG_GPIO_PIN)) & BIT(gpio)); } static void jz_gpio_set_value(struct gpio_chip *chip, unsigned gpio, int value) { uint32_t __iomem *reg = CHIP_TO_REG(chip, JZ_REG_GPIO_DATA_SET); reg += !value; writel(BIT(gpio), reg); } static int jz_gpio_direction_output(struct gpio_chip *chip, unsigned gpio, int value) { writel(BIT(gpio), CHIP_TO_REG(chip, JZ_REG_GPIO_DIRECTION_SET)); jz_gpio_set_value(chip, gpio, value); return 0; } static int jz_gpio_direction_input(struct gpio_chip *chip, unsigned gpio) { writel(BIT(gpio), CHIP_TO_REG(chip, JZ_REG_GPIO_DIRECTION_CLEAR)); return 0; } int jz_gpio_port_direction_input(int port, uint32_t mask) { writel(mask, GPIO_TO_REG(port, JZ_REG_GPIO_DIRECTION_CLEAR)); return 0; } EXPORT_SYMBOL(jz_gpio_port_direction_input); int jz_gpio_port_direction_output(int port, uint32_t mask) { writel(mask, GPIO_TO_REG(port, JZ_REG_GPIO_DIRECTION_SET)); return 0; } EXPORT_SYMBOL(jz_gpio_port_direction_output); void jz_gpio_port_set_value(int port, uint32_t value, uint32_t mask) { writel(~value & mask, GPIO_TO_REG(port, JZ_REG_GPIO_DATA_CLEAR)); writel(value & mask, GPIO_TO_REG(port, JZ_REG_GPIO_DATA_SET)); } EXPORT_SYMBOL(jz_gpio_port_set_value); uint32_t jz_gpio_port_get_value(int port, uint32_t mask) { uint32_t value = readl(GPIO_TO_REG(port, JZ_REG_GPIO_PIN)); return value & mask; } EXPORT_SYMBOL(jz_gpio_port_get_value); int gpio_to_irq(unsigned gpio) { return JZ4740_IRQ_GPIO(0) + gpio; } EXPORT_SYMBOL_GPL(gpio_to_irq); int irq_to_gpio(unsigned irq) { return irq - JZ4740_IRQ_GPIO(0); } EXPORT_SYMBOL_GPL(irq_to_gpio); #define IRQ_TO_BIT(irq) BIT(irq_to_gpio(irq) & 0x1f) static void jz_gpio_check_trigger_both(struct jz_gpio_chip *chip, unsigned int irq) { uint32_t value; void __iomem *reg; uint32_t mask = IRQ_TO_BIT(irq); if (!(chip->edge_trigger_both & mask)) return; reg = chip->base; value = readl(chip->base + JZ_REG_GPIO_PIN); if (value & mask) reg += JZ_REG_GPIO_DIRECTION_CLEAR; else reg += JZ_REG_GPIO_DIRECTION_SET; writel(mask, reg); } static void jz_gpio_irq_demux_handler(unsigned int irq, struct irq_desc *desc) { uint32_t flag; unsigned int gpio_irq; struct jz_gpio_chip *chip = irq_desc_get_handler_data(desc); flag = readl(chip->base + JZ_REG_GPIO_FLAG); if (!flag) return; gpio_irq = chip->irq_base + __fls(flag); jz_gpio_check_trigger_both(chip, gpio_irq); generic_handle_irq(gpio_irq); }; static inline void jz_gpio_set_irq_bit(struct irq_data *data, unsigned int reg) { struct jz_gpio_chip *chip = irq_to_jz_gpio_chip(data); writel(IRQ_TO_BIT(data->irq), chip->base + reg); } static void jz_gpio_irq_unmask(struct irq_data *data) { struct jz_gpio_chip *chip = irq_to_jz_gpio_chip(data); jz_gpio_check_trigger_both(chip, data->irq); irq_gc_unmask_enable_reg(data); }; /* TODO: Check if function is gpio */ static unsigned int jz_gpio_irq_startup(struct irq_data *data) { jz_gpio_set_irq_bit(data, JZ_REG_GPIO_SELECT_SET); jz_gpio_irq_unmask(data); return 0; } static void jz_gpio_irq_shutdown(struct irq_data *data) { irq_gc_mask_disable_reg(data); /* Set direction to input */ jz_gpio_set_irq_bit(data, JZ_REG_GPIO_DIRECTION_CLEAR); jz_gpio_set_irq_bit(data, JZ_REG_GPIO_SELECT_CLEAR); } static int jz_gpio_irq_set_type(struct irq_data *data, unsigned int flow_type) { struct jz_gpio_chip *chip = irq_to_jz_gpio_chip(data); unsigned int irq = data->irq; if (flow_type == IRQ_TYPE_EDGE_BOTH) { uint32_t value = readl(chip->base + JZ_REG_GPIO_PIN); if (value & IRQ_TO_BIT(irq)) flow_type = IRQ_TYPE_EDGE_FALLING; else flow_type = IRQ_TYPE_EDGE_RISING; chip->edge_trigger_both |= IRQ_TO_BIT(irq); } else { chip->edge_trigger_both &= ~IRQ_TO_BIT(irq); } switch (flow_type) { case IRQ_TYPE_EDGE_RISING: jz_gpio_set_irq_bit(data, JZ_REG_GPIO_DIRECTION_SET); jz_gpio_set_irq_bit(data, JZ_REG_GPIO_TRIGGER_SET); break; case IRQ_TYPE_EDGE_FALLING: jz_gpio_set_irq_bit(data, JZ_REG_GPIO_DIRECTION_CLEAR); jz_gpio_set_irq_bit(data, JZ_REG_GPIO_TRIGGER_SET); break; case IRQ_TYPE_LEVEL_HIGH: jz_gpio_set_irq_bit(data, JZ_REG_GPIO_DIRECTION_SET); jz_gpio_set_irq_bit(data, JZ_REG_GPIO_TRIGGER_CLEAR); break; case IRQ_TYPE_LEVEL_LOW: jz_gpio_set_irq_bit(data, JZ_REG_GPIO_DIRECTION_CLEAR); jz_gpio_set_irq_bit(data, JZ_REG_GPIO_TRIGGER_CLEAR); break; default: return -EINVAL; } return 0; } static int jz_gpio_irq_set_wake(struct irq_data *data, unsigned int on) { struct jz_gpio_chip *chip = irq_to_jz_gpio_chip(data); irq_gc_set_wake(data, on); irq_set_irq_wake(chip->irq, on); return 0; } #define JZ4740_GPIO_CHIP(_bank) { \ .irq_base = JZ4740_IRQ_GPIO_BASE_ ## _bank, \ .gpio_chip = { \ .label = "Bank " # _bank, \ .owner = THIS_MODULE, \ .set = jz_gpio_set_value, \ .get = jz_gpio_get_value, \ .direction_output = jz_gpio_direction_output, \ .direction_input = jz_gpio_direction_input, \ .base = JZ4740_GPIO_BASE_ ## _bank, \ .ngpio = JZ4740_GPIO_NUM_ ## _bank, \ }, \ } static struct jz_gpio_chip jz4740_gpio_chips[] = { JZ4740_GPIO_CHIP(A), JZ4740_GPIO_CHIP(B), JZ4740_GPIO_CHIP(C), JZ4740_GPIO_CHIP(D), }; static void jz4740_gpio_chip_init(struct jz_gpio_chip *chip, unsigned int id) { struct irq_chip_generic *gc; struct irq_chip_type *ct; chip->base = ioremap(JZ4740_GPIO_BASE_ADDR + (id * 0x100), 0x100); chip->irq = JZ4740_IRQ_INTC_GPIO(id); irq_set_handler_data(chip->irq, chip); irq_set_chained_handler(chip->irq, jz_gpio_irq_demux_handler); gc = irq_alloc_generic_chip(chip->gpio_chip.label, 1, chip->irq_base, chip->base, handle_level_irq); gc->wake_enabled = IRQ_MSK(chip->gpio_chip.ngpio); gc->private = chip; ct = gc->chip_types; ct->regs.enable = JZ_REG_GPIO_MASK_CLEAR; ct->regs.disable = JZ_REG_GPIO_MASK_SET; ct->regs.ack = JZ_REG_GPIO_FLAG_CLEAR; ct->chip.name = "GPIO"; ct->chip.irq_mask = irq_gc_mask_disable_reg; ct->chip.irq_unmask = jz_gpio_irq_unmask; ct->chip.irq_ack = irq_gc_ack_set_bit; ct->chip.irq_suspend = jz4740_irq_suspend; ct->chip.irq_resume = jz4740_irq_resume; ct->chip.irq_startup = jz_gpio_irq_startup; ct->chip.irq_shutdown = jz_gpio_irq_shutdown; ct->chip.irq_set_type = jz_gpio_irq_set_type; ct->chip.irq_set_wake = jz_gpio_irq_set_wake; ct->chip.flags = IRQCHIP_SET_TYPE_MASKED; irq_setup_generic_chip(gc, IRQ_MSK(chip->gpio_chip.ngpio), IRQ_GC_INIT_NESTED_LOCK, 0, IRQ_NOPROBE | IRQ_LEVEL); gpiochip_add(&chip->gpio_chip); } static int __init jz4740_gpio_init(void) { unsigned int i; for (i = 0; i < ARRAY_SIZE(jz4740_gpio_chips); ++i) jz4740_gpio_chip_init(&jz4740_gpio_chips[i], i); printk(KERN_INFO "JZ4740 GPIO initialized\n"); return 0; } arch_initcall(jz4740_gpio_init); #ifdef CONFIG_DEBUG_FS static inline void gpio_seq_reg(struct seq_file *s, struct jz_gpio_chip *chip, const char *name, unsigned int reg) { seq_printf(s, "\t%s: %08x\n", name, readl(chip->base + reg)); } static int gpio_regs_show(struct seq_file *s, void *unused) { struct jz_gpio_chip *chip = jz4740_gpio_chips; int i; for (i = 0; i < ARRAY_SIZE(jz4740_gpio_chips); ++i, ++chip) { seq_printf(s, "==GPIO %d==\n", i); gpio_seq_reg(s, chip, "Pin", JZ_REG_GPIO_PIN); gpio_seq_reg(s, chip, "Data", JZ_REG_GPIO_DATA); gpio_seq_reg(s, chip, "Mask", JZ_REG_GPIO_MASK); gpio_seq_reg(s, chip, "Pull", JZ_REG_GPIO_PULL); gpio_seq_reg(s, chip, "Func", JZ_REG_GPIO_FUNC); gpio_seq_reg(s, chip, "Select", JZ_REG_GPIO_SELECT); gpio_seq_reg(s, chip, "Direction", JZ_REG_GPIO_DIRECTION); gpio_seq_reg(s, chip, "Trigger", JZ_REG_GPIO_TRIGGER); gpio_seq_reg(s, chip, "Flag", JZ_REG_GPIO_FLAG); } return 0; } static int gpio_regs_open(struct inode *inode, struct file *file) { return single_open(file, gpio_regs_show, NULL); } static const struct file_operations gpio_regs_operations = { .open = gpio_regs_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int __init gpio_debugfs_init(void) { (void) debugfs_create_file("jz_regs_gpio", S_IFREG | S_IRUGO, NULL, NULL, &gpio_regs_operations); return 0; } subsys_initcall(gpio_debugfs_init); #endif
gpl-2.0
juldiadia/kernel_stock_g3815
drivers/staging/comedi/drivers/adv_pci1723.c
5457
14321
/******************************************************************************* comedi/drivers/pci1723.c COMEDI - Linux Control and Measurement Device Interface Copyright (C) 2000 David A. Schleef <ds@schleef.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. *******************************************************************************/ /* Driver: adv_pci1723 Description: Advantech PCI-1723 Author: yonggang <rsmgnu@gmail.com>, Ian Abbott <abbotti@mev.co.uk> Devices: [Advantech] PCI-1723 (adv_pci1723) Updated: Mon, 14 Apr 2008 15:12:56 +0100 Status: works Configuration Options: [0] - PCI bus of device (optional) [1] - PCI slot of device (optional) If bus/slot is not specified, the first supported PCI device found will be used. Subdevice 0 is 8-channel AO, 16-bit, range +/- 10 V. Subdevice 1 is 16-channel DIO. The channels are configurable as input or output in 2 groups (0 to 7, 8 to 15). Configuring any channel implicitly configures all channels in the same group. TODO: 1. Add the two milliamp ranges to the AO subdevice (0 to 20 mA, 4 to 20 mA). 2. Read the initial ranges and values of the AO subdevice at start-up instead of reinitializing them. 3. Implement calibration. */ #include "../comedidev.h" #include "comedi_pci.h" #define PCI_VENDOR_ID_ADVANTECH 0x13fe /* Advantech PCI vendor ID */ /* hardware types of the cards */ #define TYPE_PCI1723 0 #define IORANGE_1723 0x2A /* all the registers for the pci1723 board */ #define PCI1723_DA(N) ((N)<<1) /* W: D/A register N (0 to 7) */ #define PCI1723_SYN_SET 0x12 /* synchronized set register */ #define PCI1723_ALL_CHNNELE_SYN_STROBE 0x12 /* synchronized status register */ #define PCI1723_RANGE_CALIBRATION_MODE 0x14 /* range and calibration mode */ #define PCI1723_RANGE_CALIBRATION_STATUS 0x14 /* range and calibration status */ #define PCI1723_CONTROL_CMD_CALIBRATION_FUN 0x16 /* * SADC control command for * calibration function */ #define PCI1723_STATUS_CMD_CALIBRATION_FUN 0x16 /* * SADC control status for * calibration function */ #define PCI1723_CALIBRATION_PARA_STROBE 0x18 /* Calibration parameter strobe */ #define PCI1723_DIGITAL_IO_PORT_SET 0x1A /* Digital I/O port setting */ #define PCI1723_DIGITAL_IO_PORT_MODE 0x1A /* Digital I/O port mode */ #define PCI1723_WRITE_DIGITAL_OUTPUT_CMD 0x1C /* Write digital output command */ #define PCI1723_READ_DIGITAL_INPUT_DATA 0x1C /* Read digital input data */ #define PCI1723_WRITE_CAL_CMD 0x1E /* Write calibration command */ #define PCI1723_READ_CAL_STATUS 0x1E /* Read calibration status */ #define PCI1723_SYN_STROBE 0x20 /* Synchronized strobe */ #define PCI1723_RESET_ALL_CHN_STROBE 0x22 /* Reset all D/A channels strobe */ #define PCI1723_RESET_CAL_CONTROL_STROBE 0x24 /* * Reset the calibration * controller strobe */ #define PCI1723_CHANGE_CHA_OUTPUT_TYPE_STROBE 0x26 /* * Change D/A channels output * type strobe */ #define PCI1723_SELECT_CALIBRATION 0x28 /* Select the calibration Ref_V */ /* static unsigned short pci_list_builded=0; =1 list of card is know */ static const struct comedi_lrange range_pci1723 = { 1, { BIP_RANGE(10) } }; /* * Board descriptions for pci1723 boards. */ struct pci1723_board { const char *name; int vendor_id; /* PCI vendor a device ID of card */ int device_id; int iorange; char cardtype; int n_aochan; /* num of D/A chans */ int n_diochan; /* num of DIO chans */ int ao_maxdata; /* resolution of D/A */ const struct comedi_lrange *rangelist_ao; /* rangelist for D/A */ }; static const struct pci1723_board boardtypes[] = { { .name = "pci1723", .vendor_id = PCI_VENDOR_ID_ADVANTECH, .device_id = 0x1723, .iorange = IORANGE_1723, .cardtype = TYPE_PCI1723, .n_aochan = 8, .n_diochan = 16, .ao_maxdata = 0xffff, .rangelist_ao = &range_pci1723, }, }; /* * This is used by modprobe to translate PCI IDs to drivers. * Should only be used for PCI and ISA-PnP devices */ static DEFINE_PCI_DEVICE_TABLE(pci1723_pci_table) = { { PCI_DEVICE(PCI_VENDOR_ID_ADVANTECH, 0x1723) }, { 0 } }; MODULE_DEVICE_TABLE(pci, pci1723_pci_table); /* * The struct comedi_driver structure tells the Comedi core module * which functions to call to configure/deconfigure (attach/detach) * the board, and also about the kernel module that contains * the device code. */ static int pci1723_attach(struct comedi_device *dev, struct comedi_devconfig *it); static int pci1723_detach(struct comedi_device *dev); #define n_boardtypes (sizeof(boardtypes)/sizeof(struct pci1723_board)) static struct comedi_driver driver_pci1723 = { .driver_name = "adv_pci1723", .module = THIS_MODULE, .attach = pci1723_attach, .detach = pci1723_detach, }; /* This structure is for data unique to this hardware driver. */ struct pci1723_private { int valid; /* card is usable; */ struct pci_dev *pcidev; unsigned char da_range[8]; /* D/A output range for each channel */ short ao_data[8]; /* data output buffer */ }; /* The following macro to make it easy to access the private structure. */ #define devpriv ((struct pci1723_private *)dev->private) #define this_board boardtypes /* * The pci1723 card reset; */ static int pci1723_reset(struct comedi_device *dev) { int i; DPRINTK("adv_pci1723 EDBG: BGN: pci1723_reset(...)\n"); outw(0x01, dev->iobase + PCI1723_SYN_SET); /* set synchronous output mode */ for (i = 0; i < 8; i++) { /* set all outputs to 0V */ devpriv->ao_data[i] = 0x8000; outw(devpriv->ao_data[i], dev->iobase + PCI1723_DA(i)); /* set all ranges to +/- 10V */ devpriv->da_range[i] = 0; outw(((devpriv->da_range[i] << 4) | i), PCI1723_RANGE_CALIBRATION_MODE); } outw(0, dev->iobase + PCI1723_CHANGE_CHA_OUTPUT_TYPE_STROBE); /* update ranges */ outw(0, dev->iobase + PCI1723_SYN_STROBE); /* update outputs */ /* set asynchronous output mode */ outw(0, dev->iobase + PCI1723_SYN_SET); DPRINTK("adv_pci1723 EDBG: END: pci1723_reset(...)\n"); return 0; } static int pci1723_insn_read_ao(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int n, chan; chan = CR_CHAN(insn->chanspec); DPRINTK(" adv_PCI1723 DEBUG: pci1723_insn_read_ao() -----\n"); for (n = 0; n < insn->n; n++) data[n] = devpriv->ao_data[chan]; return n; } /* analog data output; */ static int pci1723_ao_write_winsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int n, chan; chan = CR_CHAN(insn->chanspec); DPRINTK("PCI1723: the pci1723_ao_write_winsn() ------\n"); for (n = 0; n < insn->n; n++) { devpriv->ao_data[chan] = data[n]; outw(data[n], dev->iobase + PCI1723_DA(chan)); } return n; } /* digital i/o config/query */ static int pci1723_dio_insn_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned int mask; unsigned int bits; unsigned short dio_mode; mask = 1 << CR_CHAN(insn->chanspec); if (mask & 0x00FF) bits = 0x00FF; else bits = 0xFF00; switch (data[0]) { case INSN_CONFIG_DIO_INPUT: s->io_bits &= ~bits; break; case INSN_CONFIG_DIO_OUTPUT: s->io_bits |= bits; break; case INSN_CONFIG_DIO_QUERY: data[1] = (s->io_bits & bits) ? COMEDI_OUTPUT : COMEDI_INPUT; return insn->n; default: return -EINVAL; } /* update hardware DIO mode */ dio_mode = 0x0000; /* low byte output, high byte output */ if ((s->io_bits & 0x00FF) == 0) dio_mode |= 0x0001; /* low byte input */ if ((s->io_bits & 0xFF00) == 0) dio_mode |= 0x0002; /* high byte input */ outw(dio_mode, dev->iobase + PCI1723_DIGITAL_IO_PORT_SET); return 1; } /* digital i/o bits read/write */ static int pci1723_dio_insn_bits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { if (data[0]) { s->state &= ~data[0]; s->state |= (data[0] & data[1]); outw(s->state, dev->iobase + PCI1723_WRITE_DIGITAL_OUTPUT_CMD); } data[1] = inw(dev->iobase + PCI1723_READ_DIGITAL_INPUT_DATA); return 2; } /* * Attach is called by the Comedi core to configure the driver * for a pci1723 board. */ static int pci1723_attach(struct comedi_device *dev, struct comedi_devconfig *it) { struct comedi_subdevice *s; int ret, subdev, n_subdevices; struct pci_dev *pcidev; unsigned int iobase; unsigned char pci_bus, pci_slot, pci_func; int opt_bus, opt_slot; const char *errstr; printk(KERN_ERR "comedi%d: adv_pci1723: board=%s", dev->minor, this_board->name); opt_bus = it->options[0]; opt_slot = it->options[1]; ret = alloc_private(dev, sizeof(struct pci1723_private)); if (ret < 0) { printk(" - Allocation failed!\n"); return -ENOMEM; } /* Look for matching PCI device */ errstr = "not found!"; pcidev = NULL; while (NULL != (pcidev = pci_get_device(PCI_VENDOR_ID_ADVANTECH, this_board->device_id, pcidev))) { /* Found matching vendor/device. */ if (opt_bus || opt_slot) { /* Check bus/slot. */ if (opt_bus != pcidev->bus->number || opt_slot != PCI_SLOT(pcidev->devfn)) continue; /* no match */ } /* * Look for device that isn't in use. * Enable PCI device and request regions. */ if (comedi_pci_enable(pcidev, "adv_pci1723")) { errstr = "failed to enable PCI device and request regions!"; continue; } break; } if (!pcidev) { if (opt_bus || opt_slot) { printk(KERN_ERR " - Card at b:s %d:%d %s\n", opt_bus, opt_slot, errstr); } else { printk(KERN_ERR " - Card %s\n", errstr); } return -EIO; } pci_bus = pcidev->bus->number; pci_slot = PCI_SLOT(pcidev->devfn); pci_func = PCI_FUNC(pcidev->devfn); iobase = pci_resource_start(pcidev, 2); printk(KERN_ERR ", b:s:f=%d:%d:%d, io=0x%4x", pci_bus, pci_slot, pci_func, iobase); dev->iobase = iobase; dev->board_name = this_board->name; devpriv->pcidev = pcidev; n_subdevices = 0; if (this_board->n_aochan) n_subdevices++; if (this_board->n_diochan) n_subdevices++; ret = alloc_subdevices(dev, n_subdevices); if (ret < 0) { printk(" - Allocation failed!\n"); return ret; } pci1723_reset(dev); subdev = 0; if (this_board->n_aochan) { s = dev->subdevices + subdev; dev->write_subdev = s; s->type = COMEDI_SUBD_AO; s->subdev_flags = SDF_WRITEABLE | SDF_GROUND | SDF_COMMON; s->n_chan = this_board->n_aochan; s->maxdata = this_board->ao_maxdata; s->len_chanlist = this_board->n_aochan; s->range_table = this_board->rangelist_ao; s->insn_write = pci1723_ao_write_winsn; s->insn_read = pci1723_insn_read_ao; /* read DIO config */ switch (inw(dev->iobase + PCI1723_DIGITAL_IO_PORT_MODE) & 0x03) { case 0x00: /* low byte output, high byte output */ s->io_bits = 0xFFFF; break; case 0x01: /* low byte input, high byte output */ s->io_bits = 0xFF00; break; case 0x02: /* low byte output, high byte input */ s->io_bits = 0x00FF; break; case 0x03: /* low byte input, high byte input */ s->io_bits = 0x0000; break; } /* read DIO port state */ s->state = inw(dev->iobase + PCI1723_READ_DIGITAL_INPUT_DATA); subdev++; } if (this_board->n_diochan) { s = dev->subdevices + subdev; s->type = COMEDI_SUBD_DIO; s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_GROUND | SDF_COMMON; s->n_chan = this_board->n_diochan; s->maxdata = 1; s->len_chanlist = this_board->n_diochan; s->range_table = &range_digital; s->insn_config = pci1723_dio_insn_config; s->insn_bits = pci1723_dio_insn_bits; subdev++; } devpriv->valid = 1; pci1723_reset(dev); return 0; } /* * _detach is called to deconfigure a device. It should deallocate * resources. * This function is also called when _attach() fails, so it should be * careful not to release resources that were not necessarily * allocated by _attach(). dev->private and dev->subdevices are * deallocated automatically by the core. */ static int pci1723_detach(struct comedi_device *dev) { printk(KERN_ERR "comedi%d: pci1723: remove\n", dev->minor); if (dev->private) { if (devpriv->valid) pci1723_reset(dev); if (devpriv->pcidev) { if (dev->iobase) comedi_pci_disable(devpriv->pcidev); pci_dev_put(devpriv->pcidev); } } return 0; } /* * A convenient macro that defines init_module() and cleanup_module(), * as necessary. */ static int __devinit driver_pci1723_pci_probe(struct pci_dev *dev, const struct pci_device_id *ent) { return comedi_pci_auto_config(dev, driver_pci1723.driver_name); } static void __devexit driver_pci1723_pci_remove(struct pci_dev *dev) { comedi_pci_auto_unconfig(dev); } static struct pci_driver driver_pci1723_pci_driver = { .id_table = pci1723_pci_table, .probe = &driver_pci1723_pci_probe, .remove = __devexit_p(&driver_pci1723_pci_remove) }; static int __init driver_pci1723_init_module(void) { int retval; retval = comedi_driver_register(&driver_pci1723); if (retval < 0) return retval; driver_pci1723_pci_driver.name = (char *)driver_pci1723.driver_name; return pci_register_driver(&driver_pci1723_pci_driver); } static void __exit driver_pci1723_cleanup_module(void) { pci_unregister_driver(&driver_pci1723_pci_driver); comedi_driver_unregister(&driver_pci1723); } module_init(driver_pci1723_init_module); module_exit(driver_pci1723_cleanup_module); MODULE_AUTHOR("Comedi http://www.comedi.org"); MODULE_DESCRIPTION("Comedi low-level driver"); MODULE_LICENSE("GPL");
gpl-2.0
samno1607/Xperia-Z-Source-Differences-JB
arch/sh/kernel/cpu/sh2a/setup-mxg.c
7505
6585
/* * Renesas MX-G (R8A03022BG) Setup * * Copyright (C) 2008, 2009 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/platform_device.h> #include <linux/init.h> #include <linux/serial.h> #include <linux/serial_sci.h> #include <linux/sh_timer.h> enum { UNUSED = 0, /* interrupt sources */ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7, IRQ8, IRQ9, IRQ10, IRQ11, IRQ12, IRQ13, IRQ14, IRQ15, PINT0, PINT1, PINT2, PINT3, PINT4, PINT5, PINT6, PINT7, SINT8, SINT7, SINT6, SINT5, SINT4, SINT3, SINT2, SINT1, SCIF0, SCIF1, MTU2_GROUP1, MTU2_GROUP2, MTU2_GROUP3, MTU2_GROUP4, MTU2_GROUP5, MTU2_TGI3B, MTU2_TGI3C, /* interrupt groups */ PINT, }; static struct intc_vect vectors[] __initdata = { INTC_IRQ(IRQ0, 64), INTC_IRQ(IRQ1, 65), INTC_IRQ(IRQ2, 66), INTC_IRQ(IRQ3, 67), INTC_IRQ(IRQ4, 68), INTC_IRQ(IRQ5, 69), INTC_IRQ(IRQ6, 70), INTC_IRQ(IRQ7, 71), INTC_IRQ(IRQ8, 72), INTC_IRQ(IRQ9, 73), INTC_IRQ(IRQ10, 74), INTC_IRQ(IRQ11, 75), INTC_IRQ(IRQ12, 76), INTC_IRQ(IRQ13, 77), INTC_IRQ(IRQ14, 78), INTC_IRQ(IRQ15, 79), INTC_IRQ(PINT0, 80), INTC_IRQ(PINT1, 81), INTC_IRQ(PINT2, 82), INTC_IRQ(PINT3, 83), INTC_IRQ(PINT4, 84), INTC_IRQ(PINT5, 85), INTC_IRQ(PINT6, 86), INTC_IRQ(PINT7, 87), INTC_IRQ(SINT8, 94), INTC_IRQ(SINT7, 95), INTC_IRQ(SINT6, 96), INTC_IRQ(SINT5, 97), INTC_IRQ(SINT4, 98), INTC_IRQ(SINT3, 99), INTC_IRQ(SINT2, 100), INTC_IRQ(SINT1, 101), INTC_IRQ(SCIF0, 220), INTC_IRQ(SCIF0, 221), INTC_IRQ(SCIF0, 222), INTC_IRQ(SCIF0, 223), INTC_IRQ(SCIF1, 224), INTC_IRQ(SCIF1, 225), INTC_IRQ(SCIF1, 226), INTC_IRQ(SCIF1, 227), INTC_IRQ(MTU2_GROUP1, 228), INTC_IRQ(MTU2_GROUP1, 229), INTC_IRQ(MTU2_GROUP1, 230), INTC_IRQ(MTU2_GROUP1, 231), INTC_IRQ(MTU2_GROUP1, 232), INTC_IRQ(MTU2_GROUP1, 233), INTC_IRQ(MTU2_GROUP2, 234), INTC_IRQ(MTU2_GROUP2, 235), INTC_IRQ(MTU2_GROUP2, 236), INTC_IRQ(MTU2_GROUP2, 237), INTC_IRQ(MTU2_GROUP2, 238), INTC_IRQ(MTU2_GROUP2, 239), INTC_IRQ(MTU2_GROUP3, 240), INTC_IRQ(MTU2_GROUP3, 241), INTC_IRQ(MTU2_GROUP3, 242), INTC_IRQ(MTU2_GROUP3, 243), INTC_IRQ(MTU2_TGI3B, 244), INTC_IRQ(MTU2_TGI3C, 245), INTC_IRQ(MTU2_GROUP4, 246), INTC_IRQ(MTU2_GROUP4, 247), INTC_IRQ(MTU2_GROUP4, 248), INTC_IRQ(MTU2_GROUP4, 249), INTC_IRQ(MTU2_GROUP4, 250), INTC_IRQ(MTU2_GROUP4, 251), INTC_IRQ(MTU2_GROUP5, 252), INTC_IRQ(MTU2_GROUP5, 253), INTC_IRQ(MTU2_GROUP5, 254), INTC_IRQ(MTU2_GROUP5, 255), }; static struct intc_group groups[] __initdata = { INTC_GROUP(PINT, PINT0, PINT1, PINT2, PINT3, PINT4, PINT5, PINT6, PINT7), }; static struct intc_prio_reg prio_registers[] __initdata = { { 0xfffd9418, 0, 16, 4, /* IPR01 */ { IRQ0, IRQ1, IRQ2, IRQ3 } }, { 0xfffd941a, 0, 16, 4, /* IPR02 */ { IRQ4, IRQ5, IRQ6, IRQ7 } }, { 0xfffd941c, 0, 16, 4, /* IPR03 */ { IRQ8, IRQ9, IRQ10, IRQ11 } }, { 0xfffd941e, 0, 16, 4, /* IPR04 */ { IRQ12, IRQ13, IRQ14, IRQ15 } }, { 0xfffd9420, 0, 16, 4, /* IPR05 */ { PINT, 0, 0, 0 } }, { 0xfffd9800, 0, 16, 4, /* IPR06 */ { } }, { 0xfffd9802, 0, 16, 4, /* IPR07 */ { } }, { 0xfffd9804, 0, 16, 4, /* IPR08 */ { } }, { 0xfffd9806, 0, 16, 4, /* IPR09 */ { } }, { 0xfffd9808, 0, 16, 4, /* IPR10 */ { } }, { 0xfffd980a, 0, 16, 4, /* IPR11 */ { } }, { 0xfffd980c, 0, 16, 4, /* IPR12 */ { } }, { 0xfffd980e, 0, 16, 4, /* IPR13 */ { } }, { 0xfffd9810, 0, 16, 4, /* IPR14 */ { 0, 0, 0, SCIF0 } }, { 0xfffd9812, 0, 16, 4, /* IPR15 */ { SCIF1, MTU2_GROUP1, MTU2_GROUP2, MTU2_GROUP3 } }, { 0xfffd9814, 0, 16, 4, /* IPR16 */ { MTU2_TGI3B, MTU2_TGI3C, MTU2_GROUP4, MTU2_GROUP5 } }, }; static struct intc_mask_reg mask_registers[] __initdata = { { 0xfffd9408, 0, 16, /* PINTER */ { 0, 0, 0, 0, 0, 0, 0, 0, PINT7, PINT6, PINT5, PINT4, PINT3, PINT2, PINT1, PINT0 } }, }; static DECLARE_INTC_DESC(intc_desc, "mxg", vectors, groups, mask_registers, prio_registers, NULL); static struct sh_timer_config mtu2_0_platform_data = { .channel_offset = -0x80, .timer_bit = 0, .clockevent_rating = 200, }; static struct resource mtu2_0_resources[] = { [0] = { .start = 0xff801300, .end = 0xff801326, .flags = IORESOURCE_MEM, }, [1] = { .start = 228, .flags = IORESOURCE_IRQ, }, }; static struct platform_device mtu2_0_device = { .name = "sh_mtu2", .id = 0, .dev = { .platform_data = &mtu2_0_platform_data, }, .resource = mtu2_0_resources, .num_resources = ARRAY_SIZE(mtu2_0_resources), }; static struct sh_timer_config mtu2_1_platform_data = { .channel_offset = -0x100, .timer_bit = 1, .clockevent_rating = 200, }; static struct resource mtu2_1_resources[] = { [0] = { .start = 0xff801380, .end = 0xff801390, .flags = IORESOURCE_MEM, }, [1] = { .start = 234, .flags = IORESOURCE_IRQ, }, }; static struct platform_device mtu2_1_device = { .name = "sh_mtu2", .id = 1, .dev = { .platform_data = &mtu2_1_platform_data, }, .resource = mtu2_1_resources, .num_resources = ARRAY_SIZE(mtu2_1_resources), }; static struct sh_timer_config mtu2_2_platform_data = { .channel_offset = 0x80, .timer_bit = 2, .clockevent_rating = 200, }; static struct resource mtu2_2_resources[] = { [0] = { .start = 0xff801000, .end = 0xff80100a, .flags = IORESOURCE_MEM, }, [1] = { .start = 240, .flags = IORESOURCE_IRQ, }, }; static struct platform_device mtu2_2_device = { .name = "sh_mtu2", .id = 2, .dev = { .platform_data = &mtu2_2_platform_data, }, .resource = mtu2_2_resources, .num_resources = ARRAY_SIZE(mtu2_2_resources), }; static struct plat_sci_port scif0_platform_data = { .mapbase = 0xff804000, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 220, 220, 220, 220 }, }; static struct platform_device scif0_device = { .name = "sh-sci", .id = 0, .dev = { .platform_data = &scif0_platform_data, }, }; static struct platform_device *mxg_devices[] __initdata = { &scif0_device, &mtu2_0_device, &mtu2_1_device, &mtu2_2_device, }; static int __init mxg_devices_setup(void) { return platform_add_devices(mxg_devices, ARRAY_SIZE(mxg_devices)); } arch_initcall(mxg_devices_setup); void __init plat_irq_setup(void) { register_intc_controller(&intc_desc); } static struct platform_device *mxg_early_devices[] __initdata = { &scif0_device, &mtu2_0_device, &mtu2_1_device, &mtu2_2_device, }; void __init plat_early_device_setup(void) { early_platform_add_devices(mxg_early_devices, ARRAY_SIZE(mxg_early_devices)); }
gpl-2.0
schqiushui/android_kernel_htc_msm8974
arch/sh/kernel/cpu/sh4a/setup-sh7770.c
7505
19247
/* * SH7770 Setup * * Copyright (C) 2006 - 2008 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/platform_device.h> #include <linux/init.h> #include <linux/serial.h> #include <linux/serial_sci.h> #include <linux/sh_timer.h> #include <linux/io.h> static struct plat_sci_port scif0_platform_data = { .mapbase = 0xff923000, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE, .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 61, 61, 61, 61 }, }; static struct platform_device scif0_device = { .name = "sh-sci", .id = 0, .dev = { .platform_data = &scif0_platform_data, }, }; static struct plat_sci_port scif1_platform_data = { .mapbase = 0xff924000, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE, .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 62, 62, 62, 62 }, }; static struct platform_device scif1_device = { .name = "sh-sci", .id = 1, .dev = { .platform_data = &scif1_platform_data, }, }; static struct plat_sci_port scif2_platform_data = { .mapbase = 0xff925000, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE, .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 63, 63, 63, 63 }, }; static struct platform_device scif2_device = { .name = "sh-sci", .id = 2, .dev = { .platform_data = &scif2_platform_data, }, }; static struct plat_sci_port scif3_platform_data = { .mapbase = 0xff926000, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE, .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 64, 64, 64, 64 }, }; static struct platform_device scif3_device = { .name = "sh-sci", .id = 3, .dev = { .platform_data = &scif3_platform_data, }, }; static struct plat_sci_port scif4_platform_data = { .mapbase = 0xff927000, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE, .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 65, 65, 65, 65 }, }; static struct platform_device scif4_device = { .name = "sh-sci", .id = 4, .dev = { .platform_data = &scif4_platform_data, }, }; static struct plat_sci_port scif5_platform_data = { .mapbase = 0xff928000, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE, .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 66, 66, 66, 66 }, }; static struct platform_device scif5_device = { .name = "sh-sci", .id = 5, .dev = { .platform_data = &scif5_platform_data, }, }; static struct plat_sci_port scif6_platform_data = { .mapbase = 0xff929000, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE, .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 67, 67, 67, 67 }, }; static struct platform_device scif6_device = { .name = "sh-sci", .id = 6, .dev = { .platform_data = &scif6_platform_data, }, }; static struct plat_sci_port scif7_platform_data = { .mapbase = 0xff92a000, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE, .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 68, 68, 68, 68 }, }; static struct platform_device scif7_device = { .name = "sh-sci", .id = 7, .dev = { .platform_data = &scif7_platform_data, }, }; static struct plat_sci_port scif8_platform_data = { .mapbase = 0xff92b000, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE, .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 69, 69, 69, 69 }, }; static struct platform_device scif8_device = { .name = "sh-sci", .id = 8, .dev = { .platform_data = &scif8_platform_data, }, }; static struct plat_sci_port scif9_platform_data = { .mapbase = 0xff92c000, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE, .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 70, 70, 70, 70 }, }; static struct platform_device scif9_device = { .name = "sh-sci", .id = 9, .dev = { .platform_data = &scif9_platform_data, }, }; static struct sh_timer_config tmu0_platform_data = { .channel_offset = 0x04, .timer_bit = 0, .clockevent_rating = 200, }; static struct resource tmu0_resources[] = { [0] = { .start = 0xffd80008, .end = 0xffd80013, .flags = IORESOURCE_MEM, }, [1] = { .start = 16, .flags = IORESOURCE_IRQ, }, }; static struct platform_device tmu0_device = { .name = "sh_tmu", .id = 0, .dev = { .platform_data = &tmu0_platform_data, }, .resource = tmu0_resources, .num_resources = ARRAY_SIZE(tmu0_resources), }; static struct sh_timer_config tmu1_platform_data = { .channel_offset = 0x10, .timer_bit = 1, .clocksource_rating = 200, }; static struct resource tmu1_resources[] = { [0] = { .start = 0xffd80014, .end = 0xffd8001f, .flags = IORESOURCE_MEM, }, [1] = { .start = 17, .flags = IORESOURCE_IRQ, }, }; static struct platform_device tmu1_device = { .name = "sh_tmu", .id = 1, .dev = { .platform_data = &tmu1_platform_data, }, .resource = tmu1_resources, .num_resources = ARRAY_SIZE(tmu1_resources), }; static struct sh_timer_config tmu2_platform_data = { .channel_offset = 0x1c, .timer_bit = 2, }; static struct resource tmu2_resources[] = { [0] = { .start = 0xffd80020, .end = 0xffd8002f, .flags = IORESOURCE_MEM, }, [1] = { .start = 18, .flags = IORESOURCE_IRQ, }, }; static struct platform_device tmu2_device = { .name = "sh_tmu", .id = 2, .dev = { .platform_data = &tmu2_platform_data, }, .resource = tmu2_resources, .num_resources = ARRAY_SIZE(tmu2_resources), }; static struct sh_timer_config tmu3_platform_data = { .channel_offset = 0x04, .timer_bit = 0, }; static struct resource tmu3_resources[] = { [0] = { .start = 0xffd81008, .end = 0xffd81013, .flags = IORESOURCE_MEM, }, [1] = { .start = 19, .flags = IORESOURCE_IRQ, }, }; static struct platform_device tmu3_device = { .name = "sh_tmu", .id = 3, .dev = { .platform_data = &tmu3_platform_data, }, .resource = tmu3_resources, .num_resources = ARRAY_SIZE(tmu3_resources), }; static struct sh_timer_config tmu4_platform_data = { .channel_offset = 0x10, .timer_bit = 1, }; static struct resource tmu4_resources[] = { [0] = { .start = 0xffd81014, .end = 0xffd8101f, .flags = IORESOURCE_MEM, }, [1] = { .start = 20, .flags = IORESOURCE_IRQ, }, }; static struct platform_device tmu4_device = { .name = "sh_tmu", .id = 4, .dev = { .platform_data = &tmu4_platform_data, }, .resource = tmu4_resources, .num_resources = ARRAY_SIZE(tmu4_resources), }; static struct sh_timer_config tmu5_platform_data = { .channel_offset = 0x1c, .timer_bit = 2, }; static struct resource tmu5_resources[] = { [0] = { .start = 0xffd81020, .end = 0xffd8102f, .flags = IORESOURCE_MEM, }, [1] = { .start = 21, .flags = IORESOURCE_IRQ, }, }; static struct platform_device tmu5_device = { .name = "sh_tmu", .id = 5, .dev = { .platform_data = &tmu5_platform_data, }, .resource = tmu5_resources, .num_resources = ARRAY_SIZE(tmu5_resources), }; static struct sh_timer_config tmu6_platform_data = { .channel_offset = 0x04, .timer_bit = 0, }; static struct resource tmu6_resources[] = { [0] = { .start = 0xffd82008, .end = 0xffd82013, .flags = IORESOURCE_MEM, }, [1] = { .start = 22, .flags = IORESOURCE_IRQ, }, }; static struct platform_device tmu6_device = { .name = "sh_tmu", .id = 6, .dev = { .platform_data = &tmu6_platform_data, }, .resource = tmu6_resources, .num_resources = ARRAY_SIZE(tmu6_resources), }; static struct sh_timer_config tmu7_platform_data = { .channel_offset = 0x10, .timer_bit = 1, }; static struct resource tmu7_resources[] = { [0] = { .start = 0xffd82014, .end = 0xffd8201f, .flags = IORESOURCE_MEM, }, [1] = { .start = 23, .flags = IORESOURCE_IRQ, }, }; static struct platform_device tmu7_device = { .name = "sh_tmu", .id = 7, .dev = { .platform_data = &tmu7_platform_data, }, .resource = tmu7_resources, .num_resources = ARRAY_SIZE(tmu7_resources), }; static struct sh_timer_config tmu8_platform_data = { .channel_offset = 0x1c, .timer_bit = 2, }; static struct resource tmu8_resources[] = { [0] = { .start = 0xffd82020, .end = 0xffd8202b, .flags = IORESOURCE_MEM, }, [1] = { .start = 24, .flags = IORESOURCE_IRQ, }, }; static struct platform_device tmu8_device = { .name = "sh_tmu", .id = 8, .dev = { .platform_data = &tmu8_platform_data, }, .resource = tmu8_resources, .num_resources = ARRAY_SIZE(tmu8_resources), }; static struct platform_device *sh7770_devices[] __initdata = { &scif0_device, &scif1_device, &scif2_device, &scif3_device, &scif4_device, &scif5_device, &scif6_device, &scif7_device, &scif8_device, &scif9_device, &tmu0_device, &tmu1_device, &tmu2_device, &tmu3_device, &tmu4_device, &tmu5_device, &tmu6_device, &tmu7_device, &tmu8_device, }; static int __init sh7770_devices_setup(void) { return platform_add_devices(sh7770_devices, ARRAY_SIZE(sh7770_devices)); } arch_initcall(sh7770_devices_setup); static struct platform_device *sh7770_early_devices[] __initdata = { &scif0_device, &scif1_device, &scif2_device, &scif3_device, &scif4_device, &scif5_device, &scif6_device, &scif7_device, &scif8_device, &scif9_device, &tmu0_device, &tmu1_device, &tmu2_device, &tmu3_device, &tmu4_device, &tmu5_device, &tmu6_device, &tmu7_device, &tmu8_device, }; void __init plat_early_device_setup(void) { early_platform_add_devices(sh7770_early_devices, ARRAY_SIZE(sh7770_early_devices)); } enum { UNUSED = 0, /* interrupt sources */ IRL_LLLL, IRL_LLLH, IRL_LLHL, IRL_LLHH, IRL_LHLL, IRL_LHLH, IRL_LHHL, IRL_LHHH, IRL_HLLL, IRL_HLLH, IRL_HLHL, IRL_HLHH, IRL_HHLL, IRL_HHLH, IRL_HHHL, IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, GPIO, TMU0, TMU1, TMU2, TMU2_TICPI, TMU3, TMU4, TMU5, TMU5_TICPI, TMU6, TMU7, TMU8, HAC, IPI, SPDIF, HUDI, I2C, DMAC0_DMINT0, DMAC0_DMINT1, DMAC0_DMINT2, I2S0, I2S1, I2S2, I2S3, SRC_RX, SRC_TX, SRC_SPDIF, DU, VIDEO_IN, REMOTE, YUV, USB, ATAPI, CAN, GPS, GFX2D, GFX3D_MBX, GFX3D_DMAC, EXBUS_ATA, SPI0, SPI1, SCIF089, SCIF1234, SCIF567, ADC, BBDMAC_0_3, BBDMAC_4_7, BBDMAC_8_10, BBDMAC_11_14, BBDMAC_15_18, BBDMAC_19_22, BBDMAC_23_26, BBDMAC_27, BBDMAC_28, BBDMAC_29, BBDMAC_30, BBDMAC_31, /* interrupt groups */ TMU, DMAC, I2S, SRC, GFX3D, SPI, SCIF, BBDMAC, }; static struct intc_vect vectors[] __initdata = { INTC_VECT(GPIO, 0x3e0), INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420), INTC_VECT(TMU2, 0x440), INTC_VECT(TMU2_TICPI, 0x460), INTC_VECT(TMU3, 0x480), INTC_VECT(TMU4, 0x4a0), INTC_VECT(TMU5, 0x4c0), INTC_VECT(TMU5_TICPI, 0x4e0), INTC_VECT(TMU6, 0x500), INTC_VECT(TMU7, 0x520), INTC_VECT(TMU8, 0x540), INTC_VECT(HAC, 0x580), INTC_VECT(IPI, 0x5c0), INTC_VECT(SPDIF, 0x5e0), INTC_VECT(HUDI, 0x600), INTC_VECT(I2C, 0x620), INTC_VECT(DMAC0_DMINT0, 0x640), INTC_VECT(DMAC0_DMINT1, 0x660), INTC_VECT(DMAC0_DMINT2, 0x680), INTC_VECT(I2S0, 0x6a0), INTC_VECT(I2S1, 0x6c0), INTC_VECT(I2S2, 0x6e0), INTC_VECT(I2S3, 0x700), INTC_VECT(SRC_RX, 0x720), INTC_VECT(SRC_TX, 0x740), INTC_VECT(SRC_SPDIF, 0x760), INTC_VECT(DU, 0x780), INTC_VECT(VIDEO_IN, 0x7a0), INTC_VECT(REMOTE, 0x7c0), INTC_VECT(YUV, 0x7e0), INTC_VECT(USB, 0x840), INTC_VECT(ATAPI, 0x860), INTC_VECT(CAN, 0x880), INTC_VECT(GPS, 0x8a0), INTC_VECT(GFX2D, 0x8c0), INTC_VECT(GFX3D_MBX, 0x900), INTC_VECT(GFX3D_DMAC, 0x920), INTC_VECT(EXBUS_ATA, 0x940), INTC_VECT(SPI0, 0x960), INTC_VECT(SPI1, 0x980), INTC_VECT(SCIF089, 0x9a0), INTC_VECT(SCIF1234, 0x9c0), INTC_VECT(SCIF1234, 0x9e0), INTC_VECT(SCIF1234, 0xa00), INTC_VECT(SCIF1234, 0xa20), INTC_VECT(SCIF567, 0xa40), INTC_VECT(SCIF567, 0xa60), INTC_VECT(SCIF567, 0xa80), INTC_VECT(SCIF089, 0xaa0), INTC_VECT(SCIF089, 0xac0), INTC_VECT(ADC, 0xb20), INTC_VECT(BBDMAC_0_3, 0xba0), INTC_VECT(BBDMAC_0_3, 0xbc0), INTC_VECT(BBDMAC_0_3, 0xbe0), INTC_VECT(BBDMAC_0_3, 0xc00), INTC_VECT(BBDMAC_4_7, 0xc20), INTC_VECT(BBDMAC_4_7, 0xc40), INTC_VECT(BBDMAC_4_7, 0xc60), INTC_VECT(BBDMAC_4_7, 0xc80), INTC_VECT(BBDMAC_8_10, 0xca0), INTC_VECT(BBDMAC_8_10, 0xcc0), INTC_VECT(BBDMAC_8_10, 0xce0), INTC_VECT(BBDMAC_11_14, 0xd00), INTC_VECT(BBDMAC_11_14, 0xd20), INTC_VECT(BBDMAC_11_14, 0xd40), INTC_VECT(BBDMAC_11_14, 0xd60), INTC_VECT(BBDMAC_15_18, 0xd80), INTC_VECT(BBDMAC_15_18, 0xda0), INTC_VECT(BBDMAC_15_18, 0xdc0), INTC_VECT(BBDMAC_15_18, 0xde0), INTC_VECT(BBDMAC_19_22, 0xe00), INTC_VECT(BBDMAC_19_22, 0xe20), INTC_VECT(BBDMAC_19_22, 0xe40), INTC_VECT(BBDMAC_19_22, 0xe60), INTC_VECT(BBDMAC_23_26, 0xe80), INTC_VECT(BBDMAC_23_26, 0xea0), INTC_VECT(BBDMAC_23_26, 0xec0), INTC_VECT(BBDMAC_23_26, 0xee0), INTC_VECT(BBDMAC_27, 0xf00), INTC_VECT(BBDMAC_28, 0xf20), INTC_VECT(BBDMAC_29, 0xf40), INTC_VECT(BBDMAC_30, 0xf60), INTC_VECT(BBDMAC_31, 0xf80), }; static struct intc_group groups[] __initdata = { INTC_GROUP(TMU, TMU0, TMU1, TMU2, TMU2_TICPI, TMU3, TMU4, TMU5, TMU5_TICPI, TMU6, TMU7, TMU8), INTC_GROUP(DMAC, DMAC0_DMINT0, DMAC0_DMINT1, DMAC0_DMINT2), INTC_GROUP(I2S, I2S0, I2S1, I2S2, I2S3), INTC_GROUP(SRC, SRC_RX, SRC_TX, SRC_SPDIF), INTC_GROUP(GFX3D, GFX3D_MBX, GFX3D_DMAC), INTC_GROUP(SPI, SPI0, SPI1), INTC_GROUP(SCIF, SCIF089, SCIF1234, SCIF567), INTC_GROUP(BBDMAC, BBDMAC_0_3, BBDMAC_4_7, BBDMAC_8_10, BBDMAC_11_14, BBDMAC_15_18, BBDMAC_19_22, BBDMAC_23_26, BBDMAC_27, BBDMAC_28, BBDMAC_29, BBDMAC_30, BBDMAC_31), }; static struct intc_mask_reg mask_registers[] __initdata = { { 0xffe00040, 0xffe00044, 32, /* INT2MSKR / INT2MSKCR */ { 0, BBDMAC, ADC, SCIF, SPI, EXBUS_ATA, GFX3D, GFX2D, GPS, CAN, ATAPI, USB, YUV, REMOTE, VIDEO_IN, DU, SRC, I2S, DMAC, I2C, HUDI, SPDIF, IPI, HAC, TMU, GPIO } }, }; static struct intc_prio_reg prio_registers[] __initdata = { { 0xffe00000, 0, 32, 8, /* INT2PRI0 */ { GPIO, TMU0, 0, HAC } }, { 0xffe00004, 0, 32, 8, /* INT2PRI1 */ { IPI, SPDIF, HUDI, I2C } }, { 0xffe00008, 0, 32, 8, /* INT2PRI2 */ { DMAC, I2S, SRC, DU } }, { 0xffe0000c, 0, 32, 8, /* INT2PRI3 */ { VIDEO_IN, REMOTE, YUV, USB } }, { 0xffe00010, 0, 32, 8, /* INT2PRI4 */ { ATAPI, CAN, GPS, GFX2D } }, { 0xffe00014, 0, 32, 8, /* INT2PRI5 */ { 0, GFX3D, EXBUS_ATA, SPI } }, { 0xffe00018, 0, 32, 8, /* INT2PRI6 */ { SCIF1234, SCIF567, SCIF089 } }, { 0xffe0001c, 0, 32, 8, /* INT2PRI7 */ { ADC, 0, 0, BBDMAC_0_3 } }, { 0xffe00020, 0, 32, 8, /* INT2PRI8 */ { BBDMAC_4_7, BBDMAC_8_10, BBDMAC_11_14, BBDMAC_15_18 } }, { 0xffe00024, 0, 32, 8, /* INT2PRI9 */ { BBDMAC_19_22, BBDMAC_23_26, BBDMAC_27, BBDMAC_28 } }, { 0xffe00028, 0, 32, 8, /* INT2PRI10 */ { BBDMAC_29, BBDMAC_30, BBDMAC_31 } }, { 0xffe0002c, 0, 32, 8, /* INT2PRI11 */ { TMU1, TMU2, TMU2_TICPI, TMU3 } }, { 0xffe00030, 0, 32, 8, /* INT2PRI12 */ { TMU4, TMU5, TMU5_TICPI, TMU6 } }, { 0xffe00034, 0, 32, 8, /* INT2PRI13 */ { TMU7, TMU8 } }, }; static DECLARE_INTC_DESC(intc_desc, "sh7770", vectors, groups, mask_registers, prio_registers, NULL); /* Support for external interrupt pins in IRQ mode */ static struct intc_vect irq_vectors[] __initdata = { INTC_VECT(IRQ0, 0x240), INTC_VECT(IRQ1, 0x280), INTC_VECT(IRQ2, 0x2c0), INTC_VECT(IRQ3, 0x300), INTC_VECT(IRQ4, 0x340), INTC_VECT(IRQ5, 0x380), }; static struct intc_mask_reg irq_mask_registers[] __initdata = { { 0xffd00044, 0xffd00064, 32, /* INTMSK0 / INTMSKCLR0 */ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, } }, }; static struct intc_prio_reg irq_prio_registers[] __initdata = { { 0xffd00010, 0, 32, 4, /* INTPRI */ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, } }, }; static struct intc_sense_reg irq_sense_registers[] __initdata = { { 0xffd0001c, 32, 2, /* ICR1 */ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, } }, }; static DECLARE_INTC_DESC(intc_irq_desc, "sh7770-irq", irq_vectors, NULL, irq_mask_registers, irq_prio_registers, irq_sense_registers); /* External interrupt pins in IRL mode */ static struct intc_vect irl_vectors[] __initdata = { INTC_VECT(IRL_LLLL, 0x200), INTC_VECT(IRL_LLLH, 0x220), INTC_VECT(IRL_LLHL, 0x240), INTC_VECT(IRL_LLHH, 0x260), INTC_VECT(IRL_LHLL, 0x280), INTC_VECT(IRL_LHLH, 0x2a0), INTC_VECT(IRL_LHHL, 0x2c0), INTC_VECT(IRL_LHHH, 0x2e0), INTC_VECT(IRL_HLLL, 0x300), INTC_VECT(IRL_HLLH, 0x320), INTC_VECT(IRL_HLHL, 0x340), INTC_VECT(IRL_HLHH, 0x360), INTC_VECT(IRL_HHLL, 0x380), INTC_VECT(IRL_HHLH, 0x3a0), INTC_VECT(IRL_HHHL, 0x3c0), }; static struct intc_mask_reg irl3210_mask_registers[] __initdata = { { 0xffd40080, 0xffd40084, 32, /* INTMSK2 / INTMSKCLR2 */ { IRL_LLLL, IRL_LLLH, IRL_LLHL, IRL_LLHH, IRL_LHLL, IRL_LHLH, IRL_LHHL, IRL_LHHH, IRL_HLLL, IRL_HLLH, IRL_HLHL, IRL_HLHH, IRL_HHLL, IRL_HHLH, IRL_HHHL, } }, }; static struct intc_mask_reg irl7654_mask_registers[] __initdata = { { 0xffd40080, 0xffd40084, 32, /* INTMSK2 / INTMSKCLR2 */ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, IRL_LLLL, IRL_LLLH, IRL_LLHL, IRL_LLHH, IRL_LHLL, IRL_LHLH, IRL_LHHL, IRL_LHHH, IRL_HLLL, IRL_HLLH, IRL_HLHL, IRL_HLHH, IRL_HHLL, IRL_HHLH, IRL_HHHL, } }, }; static DECLARE_INTC_DESC(intc_irl7654_desc, "sh7780-irl7654", irl_vectors, NULL, irl7654_mask_registers, NULL, NULL); static DECLARE_INTC_DESC(intc_irl3210_desc, "sh7780-irl3210", irl_vectors, NULL, irl3210_mask_registers, NULL, NULL); #define INTC_ICR0 0xffd00000 #define INTC_INTMSK0 0xffd00044 #define INTC_INTMSK1 0xffd00048 #define INTC_INTMSK2 0xffd40080 #define INTC_INTMSKCLR1 0xffd00068 #define INTC_INTMSKCLR2 0xffd40084 void __init plat_irq_setup(void) { /* disable IRQ7-0 */ __raw_writel(0xff000000, INTC_INTMSK0); /* disable IRL3-0 + IRL7-4 */ __raw_writel(0xc0000000, INTC_INTMSK1); __raw_writel(0xfffefffe, INTC_INTMSK2); /* select IRL mode for IRL3-0 + IRL7-4 */ __raw_writel(__raw_readl(INTC_ICR0) & ~0x00c00000, INTC_ICR0); /* disable holding function, ie enable "SH-4 Mode" */ __raw_writel(__raw_readl(INTC_ICR0) | 0x00200000, INTC_ICR0); register_intc_controller(&intc_desc); } void __init plat_irq_setup_pins(int mode) { switch (mode) { case IRQ_MODE_IRQ: /* select IRQ mode for IRL3-0 + IRL7-4 */ __raw_writel(__raw_readl(INTC_ICR0) | 0x00c00000, INTC_ICR0); register_intc_controller(&intc_irq_desc); break; case IRQ_MODE_IRL7654: /* enable IRL7-4 but don't provide any masking */ __raw_writel(0x40000000, INTC_INTMSKCLR1); __raw_writel(0x0000fffe, INTC_INTMSKCLR2); break; case IRQ_MODE_IRL3210: /* enable IRL0-3 but don't provide any masking */ __raw_writel(0x80000000, INTC_INTMSKCLR1); __raw_writel(0xfffe0000, INTC_INTMSKCLR2); break; case IRQ_MODE_IRL7654_MASK: /* enable IRL7-4 and mask using cpu intc controller */ __raw_writel(0x40000000, INTC_INTMSKCLR1); register_intc_controller(&intc_irl7654_desc); break; case IRQ_MODE_IRL3210_MASK: /* enable IRL0-3 and mask using cpu intc controller */ __raw_writel(0x80000000, INTC_INTMSKCLR1); register_intc_controller(&intc_irl3210_desc); break; default: BUG(); } }
gpl-2.0
lolhi/ef52-kernel
drivers/net/can/softing/softing_cs.c
8017
9896
/* * Copyright (C) 2008-2010 * * - Kurt Van Dijck, EIA Electronics * * This program is free software; you can redistribute it and/or modify * it under the terms of the version 2 of the GNU General Public License * as published by the Free Software Foundation * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/slab.h> #include <pcmcia/cistpl.h> #include <pcmcia/ds.h> #include "softing_platform.h" static int softingcs_index; static spinlock_t softingcs_index_lock; static int softingcs_reset(struct platform_device *pdev, int v); static int softingcs_enable_irq(struct platform_device *pdev, int v); /* * platform_data descriptions */ #define MHZ (1000*1000) static const struct softing_platform_data softingcs_platform_data[] = { { .name = "CANcard", .manf = 0x0168, .prod = 0x001, .generation = 1, .nbus = 2, .freq = 16 * MHZ, .max_brp = 32, .max_sjw = 4, .dpram_size = 0x0800, .boot = {0x0000, 0x000000, fw_dir "bcard.bin",}, .load = {0x0120, 0x00f600, fw_dir "ldcard.bin",}, .app = {0x0010, 0x0d0000, fw_dir "cancard.bin",}, .reset = softingcs_reset, .enable_irq = softingcs_enable_irq, }, { .name = "CANcard-NEC", .manf = 0x0168, .prod = 0x002, .generation = 1, .nbus = 2, .freq = 16 * MHZ, .max_brp = 32, .max_sjw = 4, .dpram_size = 0x0800, .boot = {0x0000, 0x000000, fw_dir "bcard.bin",}, .load = {0x0120, 0x00f600, fw_dir "ldcard.bin",}, .app = {0x0010, 0x0d0000, fw_dir "cancard.bin",}, .reset = softingcs_reset, .enable_irq = softingcs_enable_irq, }, { .name = "CANcard-SJA", .manf = 0x0168, .prod = 0x004, .generation = 1, .nbus = 2, .freq = 20 * MHZ, .max_brp = 32, .max_sjw = 4, .dpram_size = 0x0800, .boot = {0x0000, 0x000000, fw_dir "bcard.bin",}, .load = {0x0120, 0x00f600, fw_dir "ldcard.bin",}, .app = {0x0010, 0x0d0000, fw_dir "cansja.bin",}, .reset = softingcs_reset, .enable_irq = softingcs_enable_irq, }, { .name = "CANcard-2", .manf = 0x0168, .prod = 0x005, .generation = 2, .nbus = 2, .freq = 24 * MHZ, .max_brp = 64, .max_sjw = 4, .dpram_size = 0x1000, .boot = {0x0000, 0x000000, fw_dir "bcard2.bin",}, .load = {0x0120, 0x00f600, fw_dir "ldcard2.bin",}, .app = {0x0010, 0x0d0000, fw_dir "cancrd2.bin",}, .reset = softingcs_reset, .enable_irq = NULL, }, { .name = "Vector-CANcard", .manf = 0x0168, .prod = 0x081, .generation = 1, .nbus = 2, .freq = 16 * MHZ, .max_brp = 64, .max_sjw = 4, .dpram_size = 0x0800, .boot = {0x0000, 0x000000, fw_dir "bcard.bin",}, .load = {0x0120, 0x00f600, fw_dir "ldcard.bin",}, .app = {0x0010, 0x0d0000, fw_dir "cancard.bin",}, .reset = softingcs_reset, .enable_irq = softingcs_enable_irq, }, { .name = "Vector-CANcard-SJA", .manf = 0x0168, .prod = 0x084, .generation = 1, .nbus = 2, .freq = 20 * MHZ, .max_brp = 32, .max_sjw = 4, .dpram_size = 0x0800, .boot = {0x0000, 0x000000, fw_dir "bcard.bin",}, .load = {0x0120, 0x00f600, fw_dir "ldcard.bin",}, .app = {0x0010, 0x0d0000, fw_dir "cansja.bin",}, .reset = softingcs_reset, .enable_irq = softingcs_enable_irq, }, { .name = "Vector-CANcard-2", .manf = 0x0168, .prod = 0x085, .generation = 2, .nbus = 2, .freq = 24 * MHZ, .max_brp = 64, .max_sjw = 4, .dpram_size = 0x1000, .boot = {0x0000, 0x000000, fw_dir "bcard2.bin",}, .load = {0x0120, 0x00f600, fw_dir "ldcard2.bin",}, .app = {0x0010, 0x0d0000, fw_dir "cancrd2.bin",}, .reset = softingcs_reset, .enable_irq = NULL, }, { .name = "EDICcard-NEC", .manf = 0x0168, .prod = 0x102, .generation = 1, .nbus = 2, .freq = 16 * MHZ, .max_brp = 64, .max_sjw = 4, .dpram_size = 0x0800, .boot = {0x0000, 0x000000, fw_dir "bcard.bin",}, .load = {0x0120, 0x00f600, fw_dir "ldcard.bin",}, .app = {0x0010, 0x0d0000, fw_dir "cancard.bin",}, .reset = softingcs_reset, .enable_irq = softingcs_enable_irq, }, { .name = "EDICcard-2", .manf = 0x0168, .prod = 0x105, .generation = 2, .nbus = 2, .freq = 24 * MHZ, .max_brp = 64, .max_sjw = 4, .dpram_size = 0x1000, .boot = {0x0000, 0x000000, fw_dir "bcard2.bin",}, .load = {0x0120, 0x00f600, fw_dir "ldcard2.bin",}, .app = {0x0010, 0x0d0000, fw_dir "cancrd2.bin",}, .reset = softingcs_reset, .enable_irq = NULL, }, { 0, 0, }, }; MODULE_FIRMWARE(fw_dir "bcard.bin"); MODULE_FIRMWARE(fw_dir "ldcard.bin"); MODULE_FIRMWARE(fw_dir "cancard.bin"); MODULE_FIRMWARE(fw_dir "cansja.bin"); MODULE_FIRMWARE(fw_dir "bcard2.bin"); MODULE_FIRMWARE(fw_dir "ldcard2.bin"); MODULE_FIRMWARE(fw_dir "cancrd2.bin"); static __devinit const struct softing_platform_data *softingcs_find_platform_data(unsigned int manf, unsigned int prod) { const struct softing_platform_data *lp; for (lp = softingcs_platform_data; lp->manf; ++lp) { if ((lp->manf == manf) && (lp->prod == prod)) return lp; } return NULL; } /* * platformdata callbacks */ static int softingcs_reset(struct platform_device *pdev, int v) { struct pcmcia_device *pcmcia = to_pcmcia_dev(pdev->dev.parent); dev_dbg(&pdev->dev, "pcmcia config [2] %02x\n", v ? 0 : 0x20); return pcmcia_write_config_byte(pcmcia, 2, v ? 0 : 0x20); } static int softingcs_enable_irq(struct platform_device *pdev, int v) { struct pcmcia_device *pcmcia = to_pcmcia_dev(pdev->dev.parent); dev_dbg(&pdev->dev, "pcmcia config [0] %02x\n", v ? 0x60 : 0); return pcmcia_write_config_byte(pcmcia, 0, v ? 0x60 : 0); } /* * pcmcia check */ static __devinit int softingcs_probe_config(struct pcmcia_device *pcmcia, void *priv_data) { struct softing_platform_data *pdat = priv_data; struct resource *pres; int memspeed = 0; WARN_ON(!pdat); pres = pcmcia->resource[PCMCIA_IOMEM_0]; if (resource_size(pres) < 0x1000) return -ERANGE; pres->flags |= WIN_MEMORY_TYPE_CM | WIN_ENABLE; if (pdat->generation < 2) { pres->flags |= WIN_USE_WAIT | WIN_DATA_WIDTH_8; memspeed = 3; } else { pres->flags |= WIN_DATA_WIDTH_16; } return pcmcia_request_window(pcmcia, pres, memspeed); } static __devexit void softingcs_remove(struct pcmcia_device *pcmcia) { struct platform_device *pdev = pcmcia->priv; /* free bits */ platform_device_unregister(pdev); /* release pcmcia stuff */ pcmcia_disable_device(pcmcia); } /* * platform_device wrapper * pdev->resource has 2 entries: io & irq */ static void softingcs_pdev_release(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); kfree(pdev); } static __devinit int softingcs_probe(struct pcmcia_device *pcmcia) { int ret; struct platform_device *pdev; const struct softing_platform_data *pdat; struct resource *pres; struct dev { struct platform_device pdev; struct resource res[2]; } *dev; /* find matching platform_data */ pdat = softingcs_find_platform_data(pcmcia->manf_id, pcmcia->card_id); if (!pdat) return -ENOTTY; /* setup pcmcia device */ pcmcia->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IOMEM | CONF_AUTO_SET_VPP | CONF_AUTO_CHECK_VCC; ret = pcmcia_loop_config(pcmcia, softingcs_probe_config, (void *)pdat); if (ret) goto pcmcia_failed; ret = pcmcia_enable_device(pcmcia); if (ret < 0) goto pcmcia_failed; pres = pcmcia->resource[PCMCIA_IOMEM_0]; if (!pres) { ret = -EBADF; goto pcmcia_bad; } /* create softing platform device */ dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) { ret = -ENOMEM; goto mem_failed; } dev->pdev.resource = dev->res; dev->pdev.num_resources = ARRAY_SIZE(dev->res); dev->pdev.dev.release = softingcs_pdev_release; pdev = &dev->pdev; pdev->dev.platform_data = (void *)pdat; pdev->dev.parent = &pcmcia->dev; pcmcia->priv = pdev; /* platform device resources */ pdev->resource[0].flags = IORESOURCE_MEM; pdev->resource[0].start = pres->start; pdev->resource[0].end = pres->end; pdev->resource[1].flags = IORESOURCE_IRQ; pdev->resource[1].start = pcmcia->irq; pdev->resource[1].end = pdev->resource[1].start; /* platform device setup */ spin_lock(&softingcs_index_lock); pdev->id = softingcs_index++; spin_unlock(&softingcs_index_lock); pdev->name = "softing"; dev_set_name(&pdev->dev, "softingcs.%i", pdev->id); ret = platform_device_register(pdev); if (ret < 0) goto platform_failed; dev_info(&pcmcia->dev, "created %s\n", dev_name(&pdev->dev)); return 0; platform_failed: kfree(dev); mem_failed: pcmcia_bad: pcmcia_failed: pcmcia_disable_device(pcmcia); pcmcia->priv = NULL; return ret ?: -ENODEV; } static const struct pcmcia_device_id softingcs_ids[] = { /* softing */ PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0001), PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0002), PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0004), PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0005), /* vector, manufacturer? */ PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0081), PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0084), PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0085), /* EDIC */ PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0102), PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0105), PCMCIA_DEVICE_NULL, }; MODULE_DEVICE_TABLE(pcmcia, softingcs_ids); static struct pcmcia_driver softingcs_driver = { .owner = THIS_MODULE, .name = "softingcs", .id_table = softingcs_ids, .probe = softingcs_probe, .remove = __devexit_p(softingcs_remove), }; static int __init softingcs_start(void) { spin_lock_init(&softingcs_index_lock); return pcmcia_register_driver(&softingcs_driver); } static void __exit softingcs_stop(void) { pcmcia_unregister_driver(&softingcs_driver); } module_init(softingcs_start); module_exit(softingcs_stop); MODULE_DESCRIPTION("softing CANcard driver" ", links PCMCIA card to softing driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
talnoah/m8
drivers/w1/masters/omap_hdq.c
8017
18222
/* * drivers/w1/masters/omap_hdq.c * * Copyright (C) 2007 Texas Instruments, Inc. * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/interrupt.h> #include <linux/slab.h> #include <linux/err.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/sched.h> #include <asm/irq.h> #include <mach/hardware.h> #include "../w1.h" #include "../w1_int.h" #define MOD_NAME "OMAP_HDQ:" #define OMAP_HDQ_REVISION 0x00 #define OMAP_HDQ_TX_DATA 0x04 #define OMAP_HDQ_RX_DATA 0x08 #define OMAP_HDQ_CTRL_STATUS 0x0c #define OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK (1<<6) #define OMAP_HDQ_CTRL_STATUS_CLOCKENABLE (1<<5) #define OMAP_HDQ_CTRL_STATUS_GO (1<<4) #define OMAP_HDQ_CTRL_STATUS_INITIALIZATION (1<<2) #define OMAP_HDQ_CTRL_STATUS_DIR (1<<1) #define OMAP_HDQ_CTRL_STATUS_MODE (1<<0) #define OMAP_HDQ_INT_STATUS 0x10 #define OMAP_HDQ_INT_STATUS_TXCOMPLETE (1<<2) #define OMAP_HDQ_INT_STATUS_RXCOMPLETE (1<<1) #define OMAP_HDQ_INT_STATUS_TIMEOUT (1<<0) #define OMAP_HDQ_SYSCONFIG 0x14 #define OMAP_HDQ_SYSCONFIG_SOFTRESET (1<<1) #define OMAP_HDQ_SYSCONFIG_AUTOIDLE (1<<0) #define OMAP_HDQ_SYSSTATUS 0x18 #define OMAP_HDQ_SYSSTATUS_RESETDONE (1<<0) #define OMAP_HDQ_FLAG_CLEAR 0 #define OMAP_HDQ_FLAG_SET 1 #define OMAP_HDQ_TIMEOUT (HZ/5) #define OMAP_HDQ_MAX_USER 4 static DECLARE_WAIT_QUEUE_HEAD(hdq_wait_queue); static int w1_id; struct hdq_data { struct device *dev; void __iomem *hdq_base; /* lock status update */ struct mutex hdq_mutex; int hdq_usecount; struct clk *hdq_ick; struct clk *hdq_fck; u8 hdq_irqstatus; /* device lock */ spinlock_t hdq_spinlock; /* * Used to control the call to omap_hdq_get and omap_hdq_put. * HDQ Protocol: Write the CMD|REG_address first, followed by * the data wrire or read. */ int init_trans; }; static int __devinit omap_hdq_probe(struct platform_device *pdev); static int omap_hdq_remove(struct platform_device *pdev); static struct platform_driver omap_hdq_driver = { .probe = omap_hdq_probe, .remove = omap_hdq_remove, .driver = { .name = "omap_hdq", }, }; static u8 omap_w1_read_byte(void *_hdq); static void omap_w1_write_byte(void *_hdq, u8 byte); static u8 omap_w1_reset_bus(void *_hdq); static void omap_w1_search_bus(void *_hdq, struct w1_master *master_dev, u8 search_type, w1_slave_found_callback slave_found); static struct w1_bus_master omap_w1_master = { .read_byte = omap_w1_read_byte, .write_byte = omap_w1_write_byte, .reset_bus = omap_w1_reset_bus, .search = omap_w1_search_bus, }; /* HDQ register I/O routines */ static inline u8 hdq_reg_in(struct hdq_data *hdq_data, u32 offset) { return __raw_readb(hdq_data->hdq_base + offset); } static inline void hdq_reg_out(struct hdq_data *hdq_data, u32 offset, u8 val) { __raw_writeb(val, hdq_data->hdq_base + offset); } static inline u8 hdq_reg_merge(struct hdq_data *hdq_data, u32 offset, u8 val, u8 mask) { u8 new_val = (__raw_readb(hdq_data->hdq_base + offset) & ~mask) | (val & mask); __raw_writeb(new_val, hdq_data->hdq_base + offset); return new_val; } /* * Wait for one or more bits in flag change. * HDQ_FLAG_SET: wait until any bit in the flag is set. * HDQ_FLAG_CLEAR: wait until all bits in the flag are cleared. * return 0 on success and -ETIMEDOUT in the case of timeout. */ static int hdq_wait_for_flag(struct hdq_data *hdq_data, u32 offset, u8 flag, u8 flag_set, u8 *status) { int ret = 0; unsigned long timeout = jiffies + OMAP_HDQ_TIMEOUT; if (flag_set == OMAP_HDQ_FLAG_CLEAR) { /* wait for the flag clear */ while (((*status = hdq_reg_in(hdq_data, offset)) & flag) && time_before(jiffies, timeout)) { schedule_timeout_uninterruptible(1); } if (*status & flag) ret = -ETIMEDOUT; } else if (flag_set == OMAP_HDQ_FLAG_SET) { /* wait for the flag set */ while (!((*status = hdq_reg_in(hdq_data, offset)) & flag) && time_before(jiffies, timeout)) { schedule_timeout_uninterruptible(1); } if (!(*status & flag)) ret = -ETIMEDOUT; } else return -EINVAL; return ret; } /* write out a byte and fill *status with HDQ_INT_STATUS */ static int hdq_write_byte(struct hdq_data *hdq_data, u8 val, u8 *status) { int ret; u8 tmp_status; unsigned long irqflags; *status = 0; spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags); /* clear interrupt flags via a dummy read */ hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS); /* ISR loads it with new INT_STATUS */ hdq_data->hdq_irqstatus = 0; spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags); hdq_reg_out(hdq_data, OMAP_HDQ_TX_DATA, val); /* set the GO bit */ hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, OMAP_HDQ_CTRL_STATUS_GO, OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO); /* wait for the TXCOMPLETE bit */ ret = wait_event_timeout(hdq_wait_queue, hdq_data->hdq_irqstatus, OMAP_HDQ_TIMEOUT); if (ret == 0) { dev_dbg(hdq_data->dev, "TX wait elapsed\n"); goto out; } *status = hdq_data->hdq_irqstatus; /* check irqstatus */ if (!(*status & OMAP_HDQ_INT_STATUS_TXCOMPLETE)) { dev_dbg(hdq_data->dev, "timeout waiting for" "TXCOMPLETE/RXCOMPLETE, %x", *status); ret = -ETIMEDOUT; goto out; } /* wait for the GO bit return to zero */ ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_CTRL_STATUS, OMAP_HDQ_CTRL_STATUS_GO, OMAP_HDQ_FLAG_CLEAR, &tmp_status); if (ret) { dev_dbg(hdq_data->dev, "timeout waiting GO bit" "return to zero, %x", tmp_status); } out: return ret; } /* HDQ Interrupt service routine */ static irqreturn_t hdq_isr(int irq, void *_hdq) { struct hdq_data *hdq_data = _hdq; unsigned long irqflags; spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags); hdq_data->hdq_irqstatus = hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS); spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags); dev_dbg(hdq_data->dev, "hdq_isr: %x", hdq_data->hdq_irqstatus); if (hdq_data->hdq_irqstatus & (OMAP_HDQ_INT_STATUS_TXCOMPLETE | OMAP_HDQ_INT_STATUS_RXCOMPLETE | OMAP_HDQ_INT_STATUS_TIMEOUT)) { /* wake up sleeping process */ wake_up(&hdq_wait_queue); } return IRQ_HANDLED; } /* HDQ Mode: always return success */ static u8 omap_w1_reset_bus(void *_hdq) { return 0; } /* W1 search callback function */ static void omap_w1_search_bus(void *_hdq, struct w1_master *master_dev, u8 search_type, w1_slave_found_callback slave_found) { u64 module_id, rn_le, cs, id; if (w1_id) module_id = w1_id; else module_id = 0x1; rn_le = cpu_to_le64(module_id); /* * HDQ might not obey truly the 1-wire spec. * So calculate CRC based on module parameter. */ cs = w1_calc_crc8((u8 *)&rn_le, 7); id = (cs << 56) | module_id; slave_found(master_dev, id); } static int _omap_hdq_reset(struct hdq_data *hdq_data) { int ret; u8 tmp_status; hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG, OMAP_HDQ_SYSCONFIG_SOFTRESET); /* * Select HDQ mode & enable clocks. * It is observed that INT flags can't be cleared via a read and GO/INIT * won't return to zero if interrupt is disabled. So we always enable * interrupt. */ hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS, OMAP_HDQ_CTRL_STATUS_CLOCKENABLE | OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK); /* wait for reset to complete */ ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_SYSSTATUS, OMAP_HDQ_SYSSTATUS_RESETDONE, OMAP_HDQ_FLAG_SET, &tmp_status); if (ret) dev_dbg(hdq_data->dev, "timeout waiting HDQ reset, %x", tmp_status); else { hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS, OMAP_HDQ_CTRL_STATUS_CLOCKENABLE | OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK); hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG, OMAP_HDQ_SYSCONFIG_AUTOIDLE); } return ret; } /* Issue break pulse to the device */ static int omap_hdq_break(struct hdq_data *hdq_data) { int ret = 0; u8 tmp_status; unsigned long irqflags; ret = mutex_lock_interruptible(&hdq_data->hdq_mutex); if (ret < 0) { dev_dbg(hdq_data->dev, "Could not acquire mutex\n"); ret = -EINTR; goto rtn; } spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags); /* clear interrupt flags via a dummy read */ hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS); /* ISR loads it with new INT_STATUS */ hdq_data->hdq_irqstatus = 0; spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags); /* set the INIT and GO bit */ hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, OMAP_HDQ_CTRL_STATUS_INITIALIZATION | OMAP_HDQ_CTRL_STATUS_GO, OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_INITIALIZATION | OMAP_HDQ_CTRL_STATUS_GO); /* wait for the TIMEOUT bit */ ret = wait_event_timeout(hdq_wait_queue, hdq_data->hdq_irqstatus, OMAP_HDQ_TIMEOUT); if (ret == 0) { dev_dbg(hdq_data->dev, "break wait elapsed\n"); ret = -EINTR; goto out; } tmp_status = hdq_data->hdq_irqstatus; /* check irqstatus */ if (!(tmp_status & OMAP_HDQ_INT_STATUS_TIMEOUT)) { dev_dbg(hdq_data->dev, "timeout waiting for TIMEOUT, %x", tmp_status); ret = -ETIMEDOUT; goto out; } /* * wait for both INIT and GO bits rerurn to zero. * zero wait time expected for interrupt mode. */ ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_CTRL_STATUS, OMAP_HDQ_CTRL_STATUS_INITIALIZATION | OMAP_HDQ_CTRL_STATUS_GO, OMAP_HDQ_FLAG_CLEAR, &tmp_status); if (ret) dev_dbg(hdq_data->dev, "timeout waiting INIT&GO bits" "return to zero, %x", tmp_status); out: mutex_unlock(&hdq_data->hdq_mutex); rtn: return ret; } static int hdq_read_byte(struct hdq_data *hdq_data, u8 *val) { int ret = 0; u8 status; unsigned long timeout = jiffies + OMAP_HDQ_TIMEOUT; ret = mutex_lock_interruptible(&hdq_data->hdq_mutex); if (ret < 0) { ret = -EINTR; goto rtn; } if (!hdq_data->hdq_usecount) { ret = -EINVAL; goto out; } if (!(hdq_data->hdq_irqstatus & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) { hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO, OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO); /* * The RX comes immediately after TX. It * triggers another interrupt before we * sleep. So we have to wait for RXCOMPLETE bit. */ while (!(hdq_data->hdq_irqstatus & OMAP_HDQ_INT_STATUS_RXCOMPLETE) && time_before(jiffies, timeout)) { schedule_timeout_uninterruptible(1); } hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, 0, OMAP_HDQ_CTRL_STATUS_DIR); status = hdq_data->hdq_irqstatus; /* check irqstatus */ if (!(status & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) { dev_dbg(hdq_data->dev, "timeout waiting for" "RXCOMPLETE, %x", status); ret = -ETIMEDOUT; goto out; } } /* the data is ready. Read it in! */ *val = hdq_reg_in(hdq_data, OMAP_HDQ_RX_DATA); out: mutex_unlock(&hdq_data->hdq_mutex); rtn: return 0; } /* Enable clocks and set the controller to HDQ mode */ static int omap_hdq_get(struct hdq_data *hdq_data) { int ret = 0; ret = mutex_lock_interruptible(&hdq_data->hdq_mutex); if (ret < 0) { ret = -EINTR; goto rtn; } if (OMAP_HDQ_MAX_USER == hdq_data->hdq_usecount) { dev_dbg(hdq_data->dev, "attempt to exceed the max use count"); ret = -EINVAL; goto out; } else { hdq_data->hdq_usecount++; try_module_get(THIS_MODULE); if (1 == hdq_data->hdq_usecount) { if (clk_enable(hdq_data->hdq_ick)) { dev_dbg(hdq_data->dev, "Can not enable ick\n"); ret = -ENODEV; goto clk_err; } if (clk_enable(hdq_data->hdq_fck)) { dev_dbg(hdq_data->dev, "Can not enable fck\n"); clk_disable(hdq_data->hdq_ick); ret = -ENODEV; goto clk_err; } /* make sure HDQ is out of reset */ if (!(hdq_reg_in(hdq_data, OMAP_HDQ_SYSSTATUS) & OMAP_HDQ_SYSSTATUS_RESETDONE)) { ret = _omap_hdq_reset(hdq_data); if (ret) /* back up the count */ hdq_data->hdq_usecount--; } else { /* select HDQ mode & enable clocks */ hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS, OMAP_HDQ_CTRL_STATUS_CLOCKENABLE | OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK); hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG, OMAP_HDQ_SYSCONFIG_AUTOIDLE); hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS); } } } clk_err: clk_put(hdq_data->hdq_ick); clk_put(hdq_data->hdq_fck); out: mutex_unlock(&hdq_data->hdq_mutex); rtn: return ret; } /* Disable clocks to the module */ static int omap_hdq_put(struct hdq_data *hdq_data) { int ret = 0; ret = mutex_lock_interruptible(&hdq_data->hdq_mutex); if (ret < 0) return -EINTR; if (0 == hdq_data->hdq_usecount) { dev_dbg(hdq_data->dev, "attempt to decrement use count" "when it is zero"); ret = -EINVAL; } else { hdq_data->hdq_usecount--; module_put(THIS_MODULE); if (0 == hdq_data->hdq_usecount) { clk_disable(hdq_data->hdq_ick); clk_disable(hdq_data->hdq_fck); } } mutex_unlock(&hdq_data->hdq_mutex); return ret; } /* Read a byte of data from the device */ static u8 omap_w1_read_byte(void *_hdq) { struct hdq_data *hdq_data = _hdq; u8 val = 0; int ret; ret = hdq_read_byte(hdq_data, &val); if (ret) { ret = mutex_lock_interruptible(&hdq_data->hdq_mutex); if (ret < 0) { dev_dbg(hdq_data->dev, "Could not acquire mutex\n"); return -EINTR; } hdq_data->init_trans = 0; mutex_unlock(&hdq_data->hdq_mutex); omap_hdq_put(hdq_data); return -1; } /* Write followed by a read, release the module */ if (hdq_data->init_trans) { ret = mutex_lock_interruptible(&hdq_data->hdq_mutex); if (ret < 0) { dev_dbg(hdq_data->dev, "Could not acquire mutex\n"); return -EINTR; } hdq_data->init_trans = 0; mutex_unlock(&hdq_data->hdq_mutex); omap_hdq_put(hdq_data); } return val; } /* Write a byte of data to the device */ static void omap_w1_write_byte(void *_hdq, u8 byte) { struct hdq_data *hdq_data = _hdq; int ret; u8 status; /* First write to initialize the transfer */ if (hdq_data->init_trans == 0) omap_hdq_get(hdq_data); ret = mutex_lock_interruptible(&hdq_data->hdq_mutex); if (ret < 0) { dev_dbg(hdq_data->dev, "Could not acquire mutex\n"); return; } hdq_data->init_trans++; mutex_unlock(&hdq_data->hdq_mutex); ret = hdq_write_byte(hdq_data, byte, &status); if (ret == 0) { dev_dbg(hdq_data->dev, "TX failure:Ctrl status %x\n", status); return; } /* Second write, data transferred. Release the module */ if (hdq_data->init_trans > 1) { omap_hdq_put(hdq_data); ret = mutex_lock_interruptible(&hdq_data->hdq_mutex); if (ret < 0) { dev_dbg(hdq_data->dev, "Could not acquire mutex\n"); return; } hdq_data->init_trans = 0; mutex_unlock(&hdq_data->hdq_mutex); } return; } static int __devinit omap_hdq_probe(struct platform_device *pdev) { struct hdq_data *hdq_data; struct resource *res; int ret, irq; u8 rev; hdq_data = kmalloc(sizeof(*hdq_data), GFP_KERNEL); if (!hdq_data) { dev_dbg(&pdev->dev, "unable to allocate memory\n"); ret = -ENOMEM; goto err_kmalloc; } hdq_data->dev = &pdev->dev; platform_set_drvdata(pdev, hdq_data); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_dbg(&pdev->dev, "unable to get resource\n"); ret = -ENXIO; goto err_resource; } hdq_data->hdq_base = ioremap(res->start, SZ_4K); if (!hdq_data->hdq_base) { dev_dbg(&pdev->dev, "ioremap failed\n"); ret = -EINVAL; goto err_ioremap; } /* get interface & functional clock objects */ hdq_data->hdq_ick = clk_get(&pdev->dev, "ick"); if (IS_ERR(hdq_data->hdq_ick)) { dev_dbg(&pdev->dev, "Can't get HDQ ick clock object\n"); ret = PTR_ERR(hdq_data->hdq_ick); goto err_ick; } hdq_data->hdq_fck = clk_get(&pdev->dev, "fck"); if (IS_ERR(hdq_data->hdq_fck)) { dev_dbg(&pdev->dev, "Can't get HDQ fck clock object\n"); ret = PTR_ERR(hdq_data->hdq_fck); goto err_fck; } hdq_data->hdq_usecount = 0; mutex_init(&hdq_data->hdq_mutex); if (clk_enable(hdq_data->hdq_ick)) { dev_dbg(&pdev->dev, "Can not enable ick\n"); ret = -ENODEV; goto err_intfclk; } if (clk_enable(hdq_data->hdq_fck)) { dev_dbg(&pdev->dev, "Can not enable fck\n"); ret = -ENODEV; goto err_fnclk; } rev = hdq_reg_in(hdq_data, OMAP_HDQ_REVISION); dev_info(&pdev->dev, "OMAP HDQ Hardware Rev %c.%c. Driver in %s mode\n", (rev >> 4) + '0', (rev & 0x0f) + '0', "Interrupt"); spin_lock_init(&hdq_data->hdq_spinlock); irq = platform_get_irq(pdev, 0); if (irq < 0) { ret = -ENXIO; goto err_irq; } ret = request_irq(irq, hdq_isr, IRQF_DISABLED, "omap_hdq", hdq_data); if (ret < 0) { dev_dbg(&pdev->dev, "could not request irq\n"); goto err_irq; } omap_hdq_break(hdq_data); /* don't clock the HDQ until it is needed */ clk_disable(hdq_data->hdq_ick); clk_disable(hdq_data->hdq_fck); omap_w1_master.data = hdq_data; ret = w1_add_master_device(&omap_w1_master); if (ret) { dev_dbg(&pdev->dev, "Failure in registering w1 master\n"); goto err_w1; } return 0; err_w1: err_irq: clk_disable(hdq_data->hdq_fck); err_fnclk: clk_disable(hdq_data->hdq_ick); err_intfclk: clk_put(hdq_data->hdq_fck); err_fck: clk_put(hdq_data->hdq_ick); err_ick: iounmap(hdq_data->hdq_base); err_ioremap: err_resource: platform_set_drvdata(pdev, NULL); kfree(hdq_data); err_kmalloc: return ret; } static int omap_hdq_remove(struct platform_device *pdev) { struct hdq_data *hdq_data = platform_get_drvdata(pdev); mutex_lock(&hdq_data->hdq_mutex); if (hdq_data->hdq_usecount) { dev_dbg(&pdev->dev, "removed when use count is not zero\n"); mutex_unlock(&hdq_data->hdq_mutex); return -EBUSY; } mutex_unlock(&hdq_data->hdq_mutex); /* remove module dependency */ clk_put(hdq_data->hdq_ick); clk_put(hdq_data->hdq_fck); free_irq(INT_24XX_HDQ_IRQ, hdq_data); platform_set_drvdata(pdev, NULL); iounmap(hdq_data->hdq_base); kfree(hdq_data); return 0; } static int __init omap_hdq_init(void) { return platform_driver_register(&omap_hdq_driver); } module_init(omap_hdq_init); static void __exit omap_hdq_exit(void) { platform_driver_unregister(&omap_hdq_driver); } module_exit(omap_hdq_exit); module_param(w1_id, int, S_IRUSR); MODULE_PARM_DESC(w1_id, "1-wire id for the slave detection"); MODULE_AUTHOR("Texas Instruments"); MODULE_DESCRIPTION("HDQ driver Library"); MODULE_LICENSE("GPL");
gpl-2.0
schmatzler/zte-kernel-tureis
drivers/staging/rtl8192u/r819xU_cmdpkt.c
9041
22690
/****************************************************************************** (c) Copyright 2008, RealTEK Technologies Inc. All Rights Reserved. Module: r819xusb_cmdpkt.c (RTL8190 TX/RX command packet handler Source C File) Note: The module is responsible for handling TX and RX command packet. 1. TX : Send set and query configuration command packet. 2. RX : Receive tx feedback, beacon state, query configuration command packet. Function: Export: Abbrev: History: Data Who Remark 05/06/2008 amy Create initial version porting from windows driver. ******************************************************************************/ #include "r8192U.h" #include "r819xU_cmdpkt.h" /*---------------------------Define Local Constant---------------------------*/ /* Debug constant*/ #define CMPK_DEBOUNCE_CNT 1 /* 2007/10/24 MH Add for printing a range of data. */ #define CMPK_PRINT(Address)\ {\ unsigned char i;\ u32 temp[10];\ \ memcpy(temp, Address, 40);\ for (i = 0; i <40; i+=4)\ printk("\r\n %08x", temp[i]);\ }\ /*---------------------------Define functions---------------------------------*/ rt_status SendTxCommandPacket( struct net_device *dev, void* pData, u32 DataLen ) { rt_status rtStatus = RT_STATUS_SUCCESS; struct r8192_priv *priv = ieee80211_priv(dev); struct sk_buff *skb; cb_desc *tcb_desc; unsigned char *ptr_buf; //bool bLastInitPacket = false; //PlatformAcquireSpinLock(Adapter, RT_TX_SPINLOCK); //Get TCB and local buffer from common pool. (It is shared by CmdQ, MgntQ, and USB coalesce DataQ) skb = dev_alloc_skb(USB_HWDESC_HEADER_LEN + DataLen + 4); memcpy((unsigned char *)(skb->cb),&dev,sizeof(dev)); tcb_desc = (cb_desc*)(skb->cb + MAX_DEV_ADDR_SIZE); tcb_desc->queue_index = TXCMD_QUEUE; tcb_desc->bCmdOrInit = DESC_PACKET_TYPE_NORMAL; tcb_desc->bLastIniPkt = 0; skb_reserve(skb, USB_HWDESC_HEADER_LEN); ptr_buf = skb_put(skb, DataLen); memcpy(ptr_buf,pData,DataLen); tcb_desc->txbuf_size= (u16)DataLen; if(!priv->ieee80211->check_nic_enough_desc(dev,tcb_desc->queue_index)|| (!skb_queue_empty(&priv->ieee80211->skb_waitQ[tcb_desc->queue_index]))||\ (priv->ieee80211->queue_stop) ) { RT_TRACE(COMP_FIRMWARE,"===================NULL packet==================================> tx full!\n"); skb_queue_tail(&priv->ieee80211->skb_waitQ[tcb_desc->queue_index], skb); } else { priv->ieee80211->softmac_hard_start_xmit(skb,dev); } //PlatformReleaseSpinLock(Adapter, RT_TX_SPINLOCK); return rtStatus; } /*----------------------------------------------------------------------------- * Function: cmpk_message_handle_tx() * * Overview: Driver internal module can call the API to send message to * firmware side. For example, you can send a debug command packet. * Or you can send a request for FW to modify RLX4181 LBUS HW bank. * Otherwise, you can change MAC/PHT/RF register by firmware at * run time. We do not support message more than one segment now. * * Input: NONE * * Output: NONE * * Return: NONE * * Revised History: * When Who Remark * 05/06/2008 amy porting from windows code. * *---------------------------------------------------------------------------*/ extern rt_status cmpk_message_handle_tx( struct net_device *dev, u8* codevirtualaddress, u32 packettype, u32 buffer_len) { bool rt_status = true; #ifdef RTL8192U return rt_status; #else struct r8192_priv *priv = ieee80211_priv(dev); u16 frag_threshold; u16 frag_length, frag_offset = 0; //u16 total_size; //int i; rt_firmware *pfirmware = priv->pFirmware; struct sk_buff *skb; unsigned char *seg_ptr; cb_desc *tcb_desc; u8 bLastIniPkt; firmware_init_param(dev); //Fragmentation might be required frag_threshold = pfirmware->cmdpacket_frag_thresold; do { if((buffer_len - frag_offset) > frag_threshold) { frag_length = frag_threshold ; bLastIniPkt = 0; } else { frag_length = buffer_len - frag_offset; bLastIniPkt = 1; } /* Allocate skb buffer to contain firmware info and tx descriptor info * add 4 to avoid packet appending overflow. * */ #ifdef RTL8192U skb = dev_alloc_skb(USB_HWDESC_HEADER_LEN + frag_length + 4); #else skb = dev_alloc_skb(frag_length + 4); #endif memcpy((unsigned char *)(skb->cb),&dev,sizeof(dev)); tcb_desc = (cb_desc*)(skb->cb + MAX_DEV_ADDR_SIZE); tcb_desc->queue_index = TXCMD_QUEUE; tcb_desc->bCmdOrInit = packettype; tcb_desc->bLastIniPkt = bLastIniPkt; #ifdef RTL8192U skb_reserve(skb, USB_HWDESC_HEADER_LEN); #endif seg_ptr = skb_put(skb, buffer_len); /* * Transform from little endian to big endian * and pending zero */ memcpy(seg_ptr,codevirtualaddress,buffer_len); tcb_desc->txbuf_size= (u16)buffer_len; if(!priv->ieee80211->check_nic_enough_desc(dev,tcb_desc->queue_index)|| (!skb_queue_empty(&priv->ieee80211->skb_waitQ[tcb_desc->queue_index]))||\ (priv->ieee80211->queue_stop) ) { RT_TRACE(COMP_FIRMWARE,"=====================================================> tx full!\n"); skb_queue_tail(&priv->ieee80211->skb_waitQ[tcb_desc->queue_index], skb); } else { priv->ieee80211->softmac_hard_start_xmit(skb,dev); } codevirtualaddress += frag_length; frag_offset += frag_length; }while(frag_offset < buffer_len); return rt_status; #endif } /* CMPK_Message_Handle_Tx */ /*----------------------------------------------------------------------------- * Function: cmpk_counttxstatistic() * * Overview: * * Input: PADAPTER pAdapter - . * CMPK_TXFB_T *psTx_FB - . * * Output: NONE * * Return: NONE * * Revised History: * When Who Remark * 05/12/2008 amy Create Version 0 porting from windows code. * *---------------------------------------------------------------------------*/ static void cmpk_count_txstatistic( struct net_device *dev, cmpk_txfb_t *pstx_fb) { struct r8192_priv *priv = ieee80211_priv(dev); #ifdef ENABLE_PS RT_RF_POWER_STATE rtState; pAdapter->HalFunc.GetHwRegHandler(pAdapter, HW_VAR_RF_STATE, (pu1Byte)(&rtState)); // When RF is off, we should not count the packet for hw/sw synchronize // reason, ie. there may be a duration while sw switch is changed and hw // switch is being changed. 2006.12.04, by shien chang. if (rtState == eRfOff) { return; } #endif #ifdef TODO if(pAdapter->bInHctTest) return; #endif /* We can not know the packet length and transmit type: broadcast or uni or multicast. So the relative statistics must be collected in tx feedback info. */ if (pstx_fb->tok) { priv->stats.txfeedbackok++; priv->stats.txoktotal++; priv->stats.txokbytestotal += pstx_fb->pkt_length; priv->stats.txokinperiod++; /* We can not make sure broadcast/multicast or unicast mode. */ if (pstx_fb->pkt_type == PACKET_MULTICAST) { priv->stats.txmulticast++; priv->stats.txbytesmulticast += pstx_fb->pkt_length; } else if (pstx_fb->pkt_type == PACKET_BROADCAST) { priv->stats.txbroadcast++; priv->stats.txbytesbroadcast += pstx_fb->pkt_length; } else { priv->stats.txunicast++; priv->stats.txbytesunicast += pstx_fb->pkt_length; } } else { priv->stats.txfeedbackfail++; priv->stats.txerrtotal++; priv->stats.txerrbytestotal += pstx_fb->pkt_length; /* We can not make sure broadcast/multicast or unicast mode. */ if (pstx_fb->pkt_type == PACKET_MULTICAST) { priv->stats.txerrmulticast++; } else if (pstx_fb->pkt_type == PACKET_BROADCAST) { priv->stats.txerrbroadcast++; } else { priv->stats.txerrunicast++; } } priv->stats.txretrycount += pstx_fb->retry_cnt; priv->stats.txfeedbackretry += pstx_fb->retry_cnt; } /* cmpk_CountTxStatistic */ /*----------------------------------------------------------------------------- * Function: cmpk_handle_tx_feedback() * * Overview: The function is responsible for extract the message inside TX * feedbck message from firmware. It will contain dedicated info in * ws-06-0063-rtl8190-command-packet-specification. Please * refer to chapter "TX Feedback Element". We have to read 20 bytes * in the command packet. * * Input: struct net_device * dev * u8 * pmsg - Msg Ptr of the command packet. * * Output: NONE * * Return: NONE * * Revised History: * When Who Remark * 05/08/2008 amy Create Version 0 porting from windows code. * *---------------------------------------------------------------------------*/ static void cmpk_handle_tx_feedback( struct net_device *dev, u8 * pmsg) { struct r8192_priv *priv = ieee80211_priv(dev); cmpk_txfb_t rx_tx_fb; /* */ priv->stats.txfeedback++; /* 0. Display received message. */ //cmpk_Display_Message(CMPK_RX_TX_FB_SIZE, pMsg); /* 1. Extract TX feedback info from RFD to temp structure buffer. */ /* It seems that FW use big endian(MIPS) and DRV use little endian in windows OS. So we have to read the content byte by byte or transfer endian type before copy the message copy. */ /* 2007/07/05 MH Use pointer to transfer structure memory. */ //memcpy((UINT8 *)&rx_tx_fb, pMsg, sizeof(CMPK_TXFB_T)); memcpy((u8*)&rx_tx_fb, pmsg, sizeof(cmpk_txfb_t)); /* 2. Use tx feedback info to count TX statistics. */ cmpk_count_txstatistic(dev, &rx_tx_fb); /* 2007/01/17 MH Comment previous method for TX statistic function. */ /* Collect info TX feedback packet to fill TCB. */ /* We can not know the packet length and transmit type: broadcast or uni or multicast. */ //CountTxStatistics( pAdapter, &tcb ); } /* cmpk_Handle_Tx_Feedback */ void cmdpkt_beacontimerinterrupt_819xusb( struct net_device *dev ) { struct r8192_priv *priv = ieee80211_priv(dev); u16 tx_rate; { // // 070117, rcnjko: 87B have to S/W beacon for DTM encryption_cmn. // if(priv->ieee80211->current_network.mode == IEEE_A || priv->ieee80211->current_network.mode == IEEE_N_5G || (priv->ieee80211->current_network.mode == IEEE_N_24G && (!priv->ieee80211->pHTInfo->bCurSuppCCK))) { tx_rate = 60; DMESG("send beacon frame tx rate is 6Mbpm\n"); } else { tx_rate =10; DMESG("send beacon frame tx rate is 1Mbpm\n"); } rtl819xusb_beacon_tx(dev,tx_rate); // HW Beacon } } /*----------------------------------------------------------------------------- * Function: cmpk_handle_interrupt_status() * * Overview: The function is responsible for extract the message from * firmware. It will contain dedicated info in * ws-07-0063-v06-rtl819x-command-packet-specification-070315.doc. * Please refer to chapter "Interrupt Status Element". * * Input: struct net_device *dev, * u8* pmsg - Message Pointer of the command packet. * * Output: NONE * * Return: NONE * * Revised History: * When Who Remark * 05/12/2008 amy Add this for rtl8192 porting from windows code. * *---------------------------------------------------------------------------*/ static void cmpk_handle_interrupt_status( struct net_device *dev, u8* pmsg) { cmpk_intr_sta_t rx_intr_status; /* */ struct r8192_priv *priv = ieee80211_priv(dev); DMESG("---> cmpk_Handle_Interrupt_Status()\n"); /* 0. Display received message. */ //cmpk_Display_Message(CMPK_RX_BEACON_STATE_SIZE, pMsg); /* 1. Extract TX feedback info from RFD to temp structure buffer. */ /* It seems that FW use big endian(MIPS) and DRV use little endian in windows OS. So we have to read the content byte by byte or transfer endian type before copy the message copy. */ //rx_bcn_state.Element_ID = pMsg[0]; //rx_bcn_state.Length = pMsg[1]; rx_intr_status.length = pmsg[1]; if (rx_intr_status.length != (sizeof(cmpk_intr_sta_t) - 2)) { DMESG("cmpk_Handle_Interrupt_Status: wrong length!\n"); return; } // Statistics of beacon for ad-hoc mode. if( priv->ieee80211->iw_mode == IW_MODE_ADHOC) { //2 maybe need endian transform? rx_intr_status.interrupt_status = *((u32 *)(pmsg + 4)); //rx_intr_status.InterruptStatus = N2H4BYTE(*((UINT32 *)(pMsg + 4))); DMESG("interrupt status = 0x%x\n", rx_intr_status.interrupt_status); if (rx_intr_status.interrupt_status & ISR_TxBcnOk) { priv->ieee80211->bibsscoordinator = true; priv->stats.txbeaconokint++; } else if (rx_intr_status.interrupt_status & ISR_TxBcnErr) { priv->ieee80211->bibsscoordinator = false; priv->stats.txbeaconerr++; } if (rx_intr_status.interrupt_status & ISR_BcnTimerIntr) { cmdpkt_beacontimerinterrupt_819xusb(dev); } } // Other informations in interrupt status we need? DMESG("<---- cmpk_handle_interrupt_status()\n"); } /* cmpk_handle_interrupt_status */ /*----------------------------------------------------------------------------- * Function: cmpk_handle_query_config_rx() * * Overview: The function is responsible for extract the message from * firmware. It will contain dedicated info in * ws-06-0063-rtl8190-command-packet-specification. Please * refer to chapter "Beacon State Element". * * Input: u8 * pmsg - Message Pointer of the command packet. * * Output: NONE * * Return: NONE * * Revised History: * When Who Remark * 05/12/2008 amy Create Version 0 porting from windows code. * *---------------------------------------------------------------------------*/ static void cmpk_handle_query_config_rx( struct net_device *dev, u8* pmsg) { cmpk_query_cfg_t rx_query_cfg; /* */ /* 0. Display received message. */ //cmpk_Display_Message(CMPK_RX_BEACON_STATE_SIZE, pMsg); /* 1. Extract TX feedback info from RFD to temp structure buffer. */ /* It seems that FW use big endian(MIPS) and DRV use little endian in windows OS. So we have to read the content byte by byte or transfer endian type before copy the message copy. */ //rx_query_cfg.Element_ID = pMsg[0]; //rx_query_cfg.Length = pMsg[1]; rx_query_cfg.cfg_action = (pmsg[4] & 0x80000000)>>31; rx_query_cfg.cfg_type = (pmsg[4] & 0x60) >> 5; rx_query_cfg.cfg_size = (pmsg[4] & 0x18) >> 3; rx_query_cfg.cfg_page = (pmsg[6] & 0x0F) >> 0; rx_query_cfg.cfg_offset = pmsg[7]; rx_query_cfg.value = (pmsg[8] << 24) | (pmsg[9] << 16) | (pmsg[10] << 8) | (pmsg[11] << 0); rx_query_cfg.mask = (pmsg[12] << 24) | (pmsg[13] << 16) | (pmsg[14] << 8) | (pmsg[15] << 0); } /* cmpk_Handle_Query_Config_Rx */ /*----------------------------------------------------------------------------- * Function: cmpk_count_tx_status() * * Overview: Count aggregated tx status from firmwar of one type rx command * packet element id = RX_TX_STATUS. * * Input: NONE * * Output: NONE * * Return: NONE * * Revised History: * When Who Remark * 05/12/2008 amy Create Version 0 porting from windows code. * *---------------------------------------------------------------------------*/ static void cmpk_count_tx_status( struct net_device *dev, cmpk_tx_status_t *pstx_status) { struct r8192_priv *priv = ieee80211_priv(dev); #ifdef ENABLE_PS RT_RF_POWER_STATE rtstate; pAdapter->HalFunc.GetHwRegHandler(pAdapter, HW_VAR_RF_STATE, (pu1Byte)(&rtState)); // When RF is off, we should not count the packet for hw/sw synchronize // reason, ie. there may be a duration while sw switch is changed and hw // switch is being changed. 2006.12.04, by shien chang. if (rtState == eRfOff) { return; } #endif priv->stats.txfeedbackok += pstx_status->txok; priv->stats.txoktotal += pstx_status->txok; priv->stats.txfeedbackfail += pstx_status->txfail; priv->stats.txerrtotal += pstx_status->txfail; priv->stats.txretrycount += pstx_status->txretry; priv->stats.txfeedbackretry += pstx_status->txretry; //pAdapter->TxStats.NumTxOkBytesTotal += psTx_FB->pkt_length; //pAdapter->TxStats.NumTxErrBytesTotal += psTx_FB->pkt_length; //pAdapter->MgntInfo.LinkDetectInfo.NumTxOkInPeriod++; priv->stats.txmulticast += pstx_status->txmcok; priv->stats.txbroadcast += pstx_status->txbcok; priv->stats.txunicast += pstx_status->txucok; priv->stats.txerrmulticast += pstx_status->txmcfail; priv->stats.txerrbroadcast += pstx_status->txbcfail; priv->stats.txerrunicast += pstx_status->txucfail; priv->stats.txbytesmulticast += pstx_status->txmclength; priv->stats.txbytesbroadcast += pstx_status->txbclength; priv->stats.txbytesunicast += pstx_status->txuclength; priv->stats.last_packet_rate = pstx_status->rate; } /* cmpk_CountTxStatus */ /*----------------------------------------------------------------------------- * Function: cmpk_handle_tx_status() * * Overview: Firmware add a new tx feedback status to reduce rx command * packet buffer operation load. * * Input: NONE * * Output: NONE * * Return: NONE * * Revised History: * When Who Remark * 05/12/2008 amy Create Version 0 porting from windows code. * *---------------------------------------------------------------------------*/ static void cmpk_handle_tx_status( struct net_device *dev, u8* pmsg) { cmpk_tx_status_t rx_tx_sts; /* */ memcpy((void*)&rx_tx_sts, (void*)pmsg, sizeof(cmpk_tx_status_t)); /* 2. Use tx feedback info to count TX statistics. */ cmpk_count_tx_status(dev, &rx_tx_sts); } /* cmpk_Handle_Tx_Status */ /*----------------------------------------------------------------------------- * Function: cmpk_handle_tx_rate_history() * * Overview: Firmware add a new tx rate history * * Input: NONE * * Output: NONE * * Return: NONE * * Revised History: * When Who Remark * 05/12/2008 amy Create Version 0 porting from windows code. * *---------------------------------------------------------------------------*/ static void cmpk_handle_tx_rate_history( struct net_device *dev, u8* pmsg) { cmpk_tx_rahis_t *ptxrate; // RT_RF_POWER_STATE rtState; u8 i, j; u16 length = sizeof(cmpk_tx_rahis_t); u32 *ptemp; struct r8192_priv *priv = ieee80211_priv(dev); #ifdef ENABLE_PS pAdapter->HalFunc.GetHwRegHandler(pAdapter, HW_VAR_RF_STATE, (pu1Byte)(&rtState)); // When RF is off, we should not count the packet for hw/sw synchronize // reason, ie. there may be a duration while sw switch is changed and hw // switch is being changed. 2006.12.04, by shien chang. if (rtState == eRfOff) { return; } #endif ptemp = (u32 *)pmsg; // // Do endian transfer to word alignment(16 bits) for windows system. // You must do different endian transfer for linux and MAC OS // for (i = 0; i < (length/4); i++) { u16 temp1, temp2; temp1 = ptemp[i]&0x0000FFFF; temp2 = ptemp[i]>>16; ptemp[i] = (temp1<<16)|temp2; } ptxrate = (cmpk_tx_rahis_t *)pmsg; if (ptxrate == NULL ) { return; } for (i = 0; i < 16; i++) { // Collect CCK rate packet num if (i < 4) priv->stats.txrate.cck[i] += ptxrate->cck[i]; // Collect OFDM rate packet num if (i< 8) priv->stats.txrate.ofdm[i] += ptxrate->ofdm[i]; for (j = 0; j < 4; j++) priv->stats.txrate.ht_mcs[j][i] += ptxrate->ht_mcs[j][i]; } } /* cmpk_Handle_Tx_Rate_History */ /*----------------------------------------------------------------------------- * Function: cmpk_message_handle_rx() * * Overview: In the function, we will capture different RX command packet * info. Every RX command packet element has different message * length and meaning in content. We only support three type of RX * command packet now. Please refer to document * ws-06-0063-rtl8190-command-packet-specification. * * Input: NONE * * Output: NONE * * Return: NONE * * Revised History: * When Who Remark * 05/06/2008 amy Create Version 0 porting from windows code. * *---------------------------------------------------------------------------*/ extern u32 cmpk_message_handle_rx( struct net_device *dev, struct ieee80211_rx_stats *pstats) { // u32 debug_level = DBG_LOUD; struct r8192_priv *priv = ieee80211_priv(dev); int total_length; u8 cmd_length, exe_cnt = 0; u8 element_id; u8 *pcmd_buff; /* 0. Check inpt arguments. If is is a command queue message or pointer is null. */ if (/*(prfd->queue_id != CMPK_RX_QUEUE_ID) || */(pstats== NULL)) { /* Print error message. */ /*RT_TRACE(COMP_SEND, DebugLevel, ("\n\r[CMPK]-->Err queue id or pointer"));*/ return 0; /* This is not a command packet. */ } /* 1. Read received command packet message length from RFD. */ total_length = pstats->Length; /* 2. Read virtual address from RFD. */ pcmd_buff = pstats->virtual_address; /* 3. Read command pakcet element id and length. */ element_id = pcmd_buff[0]; /*RT_TRACE(COMP_SEND, DebugLevel, ("\n\r[CMPK]-->element ID=%d Len=%d", element_id, total_length));*/ /* 4. Check every received command packet conent according to different element type. Because FW may aggregate RX command packet to minimize transmit time between DRV and FW.*/ // Add a counter to prevent to locked in the loop too long while (total_length > 0 || exe_cnt++ >100) { /* 2007/01/17 MH We support aggregation of different cmd in the same packet. */ element_id = pcmd_buff[0]; switch(element_id) { case RX_TX_FEEDBACK: cmpk_handle_tx_feedback (dev, pcmd_buff); cmd_length = CMPK_RX_TX_FB_SIZE; break; case RX_INTERRUPT_STATUS: cmpk_handle_interrupt_status(dev, pcmd_buff); cmd_length = sizeof(cmpk_intr_sta_t); break; case BOTH_QUERY_CONFIG: cmpk_handle_query_config_rx(dev, pcmd_buff); cmd_length = CMPK_BOTH_QUERY_CONFIG_SIZE; break; case RX_TX_STATUS: cmpk_handle_tx_status(dev, pcmd_buff); cmd_length = CMPK_RX_TX_STS_SIZE; break; case RX_TX_PER_PKT_FEEDBACK: // You must at lease add a switch case element here, // Otherwise, we will jump to default case. //DbgPrint("CCX Test\r\n"); cmd_length = CMPK_RX_TX_FB_SIZE; break; case RX_TX_RATE_HISTORY: //DbgPrint(" rx tx rate history\r\n"); cmpk_handle_tx_rate_history(dev, pcmd_buff); cmd_length = CMPK_TX_RAHIS_SIZE; break; default: RT_TRACE(COMP_ERR, "---->cmpk_message_handle_rx():unknow CMD Element\n"); return 1; /* This is a command packet. */ } // 2007/01/22 MH Display received rx command packet info. //cmpk_Display_Message(cmd_length, pcmd_buff); // 2007/01/22 MH Add to display tx statistic. //cmpk_DisplayTxStatistic(pAdapter); /* 2007/03/09 MH Collect sidderent cmd element pkt num. */ priv->stats.rxcmdpkt[element_id]++; total_length -= cmd_length; pcmd_buff += cmd_length; } /* while (total_length > 0) */ return 1; /* This is a command packet. */ } /* CMPK_Message_Handle_Rx */
gpl-2.0
knone1/Shamu
drivers/isdn/hisax/l3dss1.c
9553
78840
/* $Id: l3dss1.c,v 2.32.2.3 2004/01/13 14:31:25 keil Exp $ * * EURO/DSS1 D-channel protocol * * German 1TR6 D-channel protocol * * Author Karsten Keil * based on the teles driver from Jan den Ouden * Copyright by Karsten Keil <keil@isdn4linux.de> * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * * For changes and modifications please read * Documentation/isdn/HiSax.cert * * Thanks to Jan den Ouden * Fritz Elfert * */ #include "hisax.h" #include "isdnl3.h" #include "l3dss1.h" #include <linux/ctype.h> #include <linux/slab.h> extern char *HiSax_getrev(const char *revision); static const char *dss1_revision = "$Revision: 2.32.2.3 $"; #define EXT_BEARER_CAPS 1 #define MsgHead(ptr, cref, mty) \ *ptr++ = 0x8; \ if (cref == -1) { \ *ptr++ = 0x0; \ } else { \ *ptr++ = 0x1; \ *ptr++ = cref^0x80; \ } \ *ptr++ = mty /**********************************************/ /* get a new invoke id for remote operations. */ /* Only a return value != 0 is valid */ /**********************************************/ static unsigned char new_invoke_id(struct PStack *p) { unsigned char retval; int i; i = 32; /* maximum search depth */ retval = p->prot.dss1.last_invoke_id + 1; /* try new id */ while ((i) && (p->prot.dss1.invoke_used[retval >> 3] == 0xFF)) { p->prot.dss1.last_invoke_id = (retval & 0xF8) + 8; i--; } if (i) { while (p->prot.dss1.invoke_used[retval >> 3] & (1 << (retval & 7))) retval++; } else retval = 0; p->prot.dss1.last_invoke_id = retval; p->prot.dss1.invoke_used[retval >> 3] |= (1 << (retval & 7)); return (retval); } /* new_invoke_id */ /*************************/ /* free a used invoke id */ /*************************/ static void free_invoke_id(struct PStack *p, unsigned char id) { if (!id) return; /* 0 = invalid value */ p->prot.dss1.invoke_used[id >> 3] &= ~(1 << (id & 7)); } /* free_invoke_id */ /**********************************************************/ /* create a new l3 process and fill in dss1 specific data */ /**********************************************************/ static struct l3_process *dss1_new_l3_process(struct PStack *st, int cr) { struct l3_process *proc; if (!(proc = new_l3_process(st, cr))) return (NULL); proc->prot.dss1.invoke_id = 0; proc->prot.dss1.remote_operation = 0; proc->prot.dss1.uus1_data[0] = '\0'; return (proc); } /* dss1_new_l3_process */ /************************************************/ /* free a l3 process and all dss1 specific data */ /************************************************/ static void dss1_release_l3_process(struct l3_process *p) { free_invoke_id(p->st, p->prot.dss1.invoke_id); release_l3_process(p); } /* dss1_release_l3_process */ /********************************************************/ /* search a process with invoke id id and dummy callref */ /********************************************************/ static struct l3_process * l3dss1_search_dummy_proc(struct PStack *st, int id) { struct l3_process *pc = st->l3.proc; /* start of processes */ if (!id) return (NULL); while (pc) { if ((pc->callref == -1) && (pc->prot.dss1.invoke_id == id)) return (pc); pc = pc->next; } return (NULL); } /* l3dss1_search_dummy_proc */ /*******************************************************************/ /* called when a facility message with a dummy callref is received */ /* and a return result is delivered. id specifies the invoke id. */ /*******************************************************************/ static void l3dss1_dummy_return_result(struct PStack *st, int id, u_char *p, u_char nlen) { isdn_ctrl ic; struct IsdnCardState *cs; struct l3_process *pc = NULL; if ((pc = l3dss1_search_dummy_proc(st, id))) { L3DelTimer(&pc->timer); /* remove timer */ cs = pc->st->l1.hardware; ic.driver = cs->myid; ic.command = ISDN_STAT_PROT; ic.arg = DSS1_STAT_INVOKE_RES; ic.parm.dss1_io.hl_id = pc->prot.dss1.invoke_id; ic.parm.dss1_io.ll_id = pc->prot.dss1.ll_id; ic.parm.dss1_io.proc = pc->prot.dss1.proc; ic.parm.dss1_io.timeout = 0; ic.parm.dss1_io.datalen = nlen; ic.parm.dss1_io.data = p; free_invoke_id(pc->st, pc->prot.dss1.invoke_id); pc->prot.dss1.invoke_id = 0; /* reset id */ cs->iif.statcallb(&ic); dss1_release_l3_process(pc); } else l3_debug(st, "dummy return result id=0x%x result len=%d", id, nlen); } /* l3dss1_dummy_return_result */ /*******************************************************************/ /* called when a facility message with a dummy callref is received */ /* and a return error is delivered. id specifies the invoke id. */ /*******************************************************************/ static void l3dss1_dummy_error_return(struct PStack *st, int id, ulong error) { isdn_ctrl ic; struct IsdnCardState *cs; struct l3_process *pc = NULL; if ((pc = l3dss1_search_dummy_proc(st, id))) { L3DelTimer(&pc->timer); /* remove timer */ cs = pc->st->l1.hardware; ic.driver = cs->myid; ic.command = ISDN_STAT_PROT; ic.arg = DSS1_STAT_INVOKE_ERR; ic.parm.dss1_io.hl_id = pc->prot.dss1.invoke_id; ic.parm.dss1_io.ll_id = pc->prot.dss1.ll_id; ic.parm.dss1_io.proc = pc->prot.dss1.proc; ic.parm.dss1_io.timeout = error; ic.parm.dss1_io.datalen = 0; ic.parm.dss1_io.data = NULL; free_invoke_id(pc->st, pc->prot.dss1.invoke_id); pc->prot.dss1.invoke_id = 0; /* reset id */ cs->iif.statcallb(&ic); dss1_release_l3_process(pc); } else l3_debug(st, "dummy return error id=0x%x error=0x%lx", id, error); } /* l3dss1_error_return */ /*******************************************************************/ /* called when a facility message with a dummy callref is received */ /* and a invoke is delivered. id specifies the invoke id. */ /*******************************************************************/ static void l3dss1_dummy_invoke(struct PStack *st, int cr, int id, int ident, u_char *p, u_char nlen) { isdn_ctrl ic; struct IsdnCardState *cs; l3_debug(st, "dummy invoke %s id=0x%x ident=0x%x datalen=%d", (cr == -1) ? "local" : "broadcast", id, ident, nlen); if (cr >= -1) return; /* ignore local data */ cs = st->l1.hardware; ic.driver = cs->myid; ic.command = ISDN_STAT_PROT; ic.arg = DSS1_STAT_INVOKE_BRD; ic.parm.dss1_io.hl_id = id; ic.parm.dss1_io.ll_id = 0; ic.parm.dss1_io.proc = ident; ic.parm.dss1_io.timeout = 0; ic.parm.dss1_io.datalen = nlen; ic.parm.dss1_io.data = p; cs->iif.statcallb(&ic); } /* l3dss1_dummy_invoke */ static void l3dss1_parse_facility(struct PStack *st, struct l3_process *pc, int cr, u_char *p) { int qd_len = 0; unsigned char nlen = 0, ilen, cp_tag; int ident, id; ulong err_ret; if (pc) st = pc->st; /* valid Stack */ else if ((!st) || (cr >= 0)) return; /* neither pc nor st specified */ p++; qd_len = *p++; if (qd_len == 0) { l3_debug(st, "qd_len == 0"); return; } if ((*p & 0x1F) != 0x11) { /* Service discriminator, supplementary service */ l3_debug(st, "supplementary service != 0x11"); return; } while (qd_len > 0 && !(*p & 0x80)) { /* extension ? */ p++; qd_len--; } if (qd_len < 2) { l3_debug(st, "qd_len < 2"); return; } p++; qd_len--; if ((*p & 0xE0) != 0xA0) { /* class and form */ l3_debug(st, "class and form != 0xA0"); return; } cp_tag = *p & 0x1F; /* remember tag value */ p++; qd_len--; if (qd_len < 1) { l3_debug(st, "qd_len < 1"); return; } if (*p & 0x80) { /* length format indefinite or limited */ nlen = *p++ & 0x7F; /* number of len bytes or indefinite */ if ((qd_len-- < ((!nlen) ? 3 : (1 + nlen))) || (nlen > 1)) { l3_debug(st, "length format error or not implemented"); return; } if (nlen == 1) { nlen = *p++; /* complete length */ qd_len--; } else { qd_len -= 2; /* trailing null bytes */ if ((*(p + qd_len)) || (*(p + qd_len + 1))) { l3_debug(st, "length format indefinite error"); return; } nlen = qd_len; } } else { nlen = *p++; qd_len--; } if (qd_len < nlen) { l3_debug(st, "qd_len < nlen"); return; } qd_len -= nlen; if (nlen < 2) { l3_debug(st, "nlen < 2"); return; } if (*p != 0x02) { /* invoke identifier tag */ l3_debug(st, "invoke identifier tag !=0x02"); return; } p++; nlen--; if (*p & 0x80) { /* length format */ l3_debug(st, "invoke id length format 2"); return; } ilen = *p++; nlen--; if (ilen > nlen || ilen == 0) { l3_debug(st, "ilen > nlen || ilen == 0"); return; } nlen -= ilen; id = 0; while (ilen > 0) { id = (id << 8) | (*p++ & 0xFF); /* invoke identifier */ ilen--; } switch (cp_tag) { /* component tag */ case 1: /* invoke */ if (nlen < 2) { l3_debug(st, "nlen < 2 22"); return; } if (*p != 0x02) { /* operation value */ l3_debug(st, "operation value !=0x02"); return; } p++; nlen--; ilen = *p++; nlen--; if (ilen > nlen || ilen == 0) { l3_debug(st, "ilen > nlen || ilen == 0 22"); return; } nlen -= ilen; ident = 0; while (ilen > 0) { ident = (ident << 8) | (*p++ & 0xFF); ilen--; } if (!pc) { l3dss1_dummy_invoke(st, cr, id, ident, p, nlen); return; } #ifdef CONFIG_DE_AOC { #define FOO1(s, a, b) \ while (nlen > 1) { \ int ilen = p[1]; \ if (nlen < ilen + 2) { \ l3_debug(st, "FOO1 nlen < ilen+2"); \ return; \ } \ nlen -= ilen + 2; \ if ((*p & 0xFF) == (a)) { \ int nlen = ilen; \ p += 2; \ b; \ } else { \ p += ilen + 2; \ } \ } switch (ident) { case 0x22: /* during */ FOO1("1A", 0x30, FOO1("1C", 0xA1, FOO1("1D", 0x30, FOO1("1E", 0x02, ( { ident = 0; nlen = (nlen) ? nlen : 0; /* Make gcc happy */ while (ilen > 0) { ident = (ident << 8) | *p++; ilen--; } if (ident > pc->para.chargeinfo) { pc->para.chargeinfo = ident; st->l3.l3l4(st, CC_CHARGE | INDICATION, pc); } if (st->l3.debug & L3_DEB_CHARGE) { if (*(p + 2) == 0) { l3_debug(st, "charging info during %d", pc->para.chargeinfo); } else { l3_debug(st, "charging info final %d", pc->para.chargeinfo); } } } ))))) break; case 0x24: /* final */ FOO1("2A", 0x30, FOO1("2B", 0x30, FOO1("2C", 0xA1, FOO1("2D", 0x30, FOO1("2E", 0x02, ( { ident = 0; nlen = (nlen) ? nlen : 0; /* Make gcc happy */ while (ilen > 0) { ident = (ident << 8) | *p++; ilen--; } if (ident > pc->para.chargeinfo) { pc->para.chargeinfo = ident; st->l3.l3l4(st, CC_CHARGE | INDICATION, pc); } if (st->l3.debug & L3_DEB_CHARGE) { l3_debug(st, "charging info final %d", pc->para.chargeinfo); } } )))))) break; default: l3_debug(st, "invoke break invalid ident %02x", ident); break; } #undef FOO1 } #else /* not CONFIG_DE_AOC */ l3_debug(st, "invoke break"); #endif /* not CONFIG_DE_AOC */ break; case 2: /* return result */ /* if no process available handle separately */ if (!pc) { if (cr == -1) l3dss1_dummy_return_result(st, id, p, nlen); return; } if ((pc->prot.dss1.invoke_id) && (pc->prot.dss1.invoke_id == id)) { /* Diversion successful */ free_invoke_id(st, pc->prot.dss1.invoke_id); pc->prot.dss1.remote_result = 0; /* success */ pc->prot.dss1.invoke_id = 0; pc->redir_result = pc->prot.dss1.remote_result; st->l3.l3l4(st, CC_REDIR | INDICATION, pc); } /* Diversion successful */ else l3_debug(st, "return error unknown identifier"); break; case 3: /* return error */ err_ret = 0; if (nlen < 2) { l3_debug(st, "return error nlen < 2"); return; } if (*p != 0x02) { /* result tag */ l3_debug(st, "invoke error tag !=0x02"); return; } p++; nlen--; if (*p > 4) { /* length format */ l3_debug(st, "invoke return errlen > 4 "); return; } ilen = *p++; nlen--; if (ilen > nlen || ilen == 0) { l3_debug(st, "error return ilen > nlen || ilen == 0"); return; } nlen -= ilen; while (ilen > 0) { err_ret = (err_ret << 8) | (*p++ & 0xFF); /* error value */ ilen--; } /* if no process available handle separately */ if (!pc) { if (cr == -1) l3dss1_dummy_error_return(st, id, err_ret); return; } if ((pc->prot.dss1.invoke_id) && (pc->prot.dss1.invoke_id == id)) { /* Deflection error */ free_invoke_id(st, pc->prot.dss1.invoke_id); pc->prot.dss1.remote_result = err_ret; /* result */ pc->prot.dss1.invoke_id = 0; pc->redir_result = pc->prot.dss1.remote_result; st->l3.l3l4(st, CC_REDIR | INDICATION, pc); } /* Deflection error */ else l3_debug(st, "return result unknown identifier"); break; default: l3_debug(st, "facility default break tag=0x%02x", cp_tag); break; } } static void l3dss1_message(struct l3_process *pc, u_char mt) { struct sk_buff *skb; u_char *p; if (!(skb = l3_alloc_skb(4))) return; p = skb_put(skb, 4); MsgHead(p, pc->callref, mt); l3_msg(pc->st, DL_DATA | REQUEST, skb); } static void l3dss1_message_cause(struct l3_process *pc, u_char mt, u_char cause) { struct sk_buff *skb; u_char tmp[16]; u_char *p = tmp; int l; MsgHead(p, pc->callref, mt); *p++ = IE_CAUSE; *p++ = 0x2; *p++ = 0x80; *p++ = cause | 0x80; l = p - tmp; if (!(skb = l3_alloc_skb(l))) return; memcpy(skb_put(skb, l), tmp, l); l3_msg(pc->st, DL_DATA | REQUEST, skb); } static void l3dss1_status_send(struct l3_process *pc, u_char pr, void *arg) { u_char tmp[16]; u_char *p = tmp; int l; struct sk_buff *skb; MsgHead(p, pc->callref, MT_STATUS); *p++ = IE_CAUSE; *p++ = 0x2; *p++ = 0x80; *p++ = pc->para.cause | 0x80; *p++ = IE_CALL_STATE; *p++ = 0x1; *p++ = pc->state & 0x3f; l = p - tmp; if (!(skb = l3_alloc_skb(l))) return; memcpy(skb_put(skb, l), tmp, l); l3_msg(pc->st, DL_DATA | REQUEST, skb); } static void l3dss1_msg_without_setup(struct l3_process *pc, u_char pr, void *arg) { /* This routine is called if here was no SETUP made (checks in dss1up and in * l3dss1_setup) and a RELEASE_COMPLETE have to be sent with an error code * MT_STATUS_ENQUIRE in the NULL state is handled too */ u_char tmp[16]; u_char *p = tmp; int l; struct sk_buff *skb; switch (pc->para.cause) { case 81: /* invalid callreference */ case 88: /* incomp destination */ case 96: /* mandory IE missing */ case 100: /* invalid IE contents */ case 101: /* incompatible Callstate */ MsgHead(p, pc->callref, MT_RELEASE_COMPLETE); *p++ = IE_CAUSE; *p++ = 0x2; *p++ = 0x80; *p++ = pc->para.cause | 0x80; break; default: printk(KERN_ERR "HiSax l3dss1_msg_without_setup wrong cause %d\n", pc->para.cause); return; } l = p - tmp; if (!(skb = l3_alloc_skb(l))) return; memcpy(skb_put(skb, l), tmp, l); l3_msg(pc->st, DL_DATA | REQUEST, skb); dss1_release_l3_process(pc); } static int ie_ALERTING[] = {IE_BEARER, IE_CHANNEL_ID | IE_MANDATORY_1, IE_FACILITY, IE_PROGRESS, IE_DISPLAY, IE_SIGNAL, IE_HLC, IE_USER_USER, -1}; static int ie_CALL_PROCEEDING[] = {IE_BEARER, IE_CHANNEL_ID | IE_MANDATORY_1, IE_FACILITY, IE_PROGRESS, IE_DISPLAY, IE_HLC, -1}; static int ie_CONNECT[] = {IE_BEARER, IE_CHANNEL_ID | IE_MANDATORY_1, IE_FACILITY, IE_PROGRESS, IE_DISPLAY, IE_DATE, IE_SIGNAL, IE_CONNECT_PN, IE_CONNECT_SUB, IE_LLC, IE_HLC, IE_USER_USER, -1}; static int ie_CONNECT_ACKNOWLEDGE[] = {IE_CHANNEL_ID, IE_DISPLAY, IE_SIGNAL, -1}; static int ie_DISCONNECT[] = {IE_CAUSE | IE_MANDATORY, IE_FACILITY, IE_PROGRESS, IE_DISPLAY, IE_SIGNAL, IE_USER_USER, -1}; static int ie_INFORMATION[] = {IE_COMPLETE, IE_DISPLAY, IE_KEYPAD, IE_SIGNAL, IE_CALLED_PN, -1}; static int ie_NOTIFY[] = {IE_BEARER, IE_NOTIFY | IE_MANDATORY, IE_DISPLAY, -1}; static int ie_PROGRESS[] = {IE_BEARER, IE_CAUSE, IE_FACILITY, IE_PROGRESS | IE_MANDATORY, IE_DISPLAY, IE_HLC, IE_USER_USER, -1}; static int ie_RELEASE[] = {IE_CAUSE | IE_MANDATORY_1, IE_FACILITY, IE_DISPLAY, IE_SIGNAL, IE_USER_USER, -1}; /* a RELEASE_COMPLETE with errors don't require special actions static int ie_RELEASE_COMPLETE[] = {IE_CAUSE | IE_MANDATORY_1, IE_DISPLAY, IE_SIGNAL, IE_USER_USER, -1}; */ static int ie_RESUME_ACKNOWLEDGE[] = {IE_CHANNEL_ID | IE_MANDATORY, IE_FACILITY, IE_DISPLAY, -1}; static int ie_RESUME_REJECT[] = {IE_CAUSE | IE_MANDATORY, IE_DISPLAY, -1}; static int ie_SETUP[] = {IE_COMPLETE, IE_BEARER | IE_MANDATORY, IE_CHANNEL_ID | IE_MANDATORY, IE_FACILITY, IE_PROGRESS, IE_NET_FAC, IE_DISPLAY, IE_KEYPAD, IE_SIGNAL, IE_CALLING_PN, IE_CALLING_SUB, IE_CALLED_PN, IE_CALLED_SUB, IE_REDIR_NR, IE_LLC, IE_HLC, IE_USER_USER, -1}; static int ie_SETUP_ACKNOWLEDGE[] = {IE_CHANNEL_ID | IE_MANDATORY, IE_FACILITY, IE_PROGRESS, IE_DISPLAY, IE_SIGNAL, -1}; static int ie_STATUS[] = {IE_CAUSE | IE_MANDATORY, IE_CALL_STATE | IE_MANDATORY, IE_DISPLAY, -1}; static int ie_STATUS_ENQUIRY[] = {IE_DISPLAY, -1}; static int ie_SUSPEND_ACKNOWLEDGE[] = {IE_DISPLAY, IE_FACILITY, -1}; static int ie_SUSPEND_REJECT[] = {IE_CAUSE | IE_MANDATORY, IE_DISPLAY, -1}; /* not used * static int ie_CONGESTION_CONTROL[] = {IE_CONGESTION | IE_MANDATORY, * IE_CAUSE | IE_MANDATORY, IE_DISPLAY, -1}; * static int ie_USER_INFORMATION[] = {IE_MORE_DATA, IE_USER_USER | IE_MANDATORY, -1}; * static int ie_RESTART[] = {IE_CHANNEL_ID, IE_DISPLAY, IE_RESTART_IND | * IE_MANDATORY, -1}; */ static int ie_FACILITY[] = {IE_FACILITY | IE_MANDATORY, IE_DISPLAY, -1}; static int comp_required[] = {1, 2, 3, 5, 6, 7, 9, 10, 11, 14, 15, -1}; static int l3_valid_states[] = {0, 1, 2, 3, 4, 6, 7, 8, 9, 10, 11, 12, 15, 17, 19, 25, -1}; struct ie_len { int ie; int len; }; static struct ie_len max_ie_len[] = { {IE_SEGMENT, 4}, {IE_BEARER, 12}, {IE_CAUSE, 32}, {IE_CALL_ID, 10}, {IE_CALL_STATE, 3}, {IE_CHANNEL_ID, 34}, {IE_FACILITY, 255}, {IE_PROGRESS, 4}, {IE_NET_FAC, 255}, {IE_NOTIFY, 3}, {IE_DISPLAY, 82}, {IE_DATE, 8}, {IE_KEYPAD, 34}, {IE_SIGNAL, 3}, {IE_INFORATE, 6}, {IE_E2E_TDELAY, 11}, {IE_TDELAY_SEL, 5}, {IE_PACK_BINPARA, 3}, {IE_PACK_WINSIZE, 4}, {IE_PACK_SIZE, 4}, {IE_CUG, 7}, {IE_REV_CHARGE, 3}, {IE_CALLING_PN, 24}, {IE_CALLING_SUB, 23}, {IE_CALLED_PN, 24}, {IE_CALLED_SUB, 23}, {IE_REDIR_NR, 255}, {IE_TRANS_SEL, 255}, {IE_RESTART_IND, 3}, {IE_LLC, 18}, {IE_HLC, 5}, {IE_USER_USER, 131}, {-1, 0}, }; static int getmax_ie_len(u_char ie) { int i = 0; while (max_ie_len[i].ie != -1) { if (max_ie_len[i].ie == ie) return (max_ie_len[i].len); i++; } return (255); } static int ie_in_set(struct l3_process *pc, u_char ie, int *checklist) { int ret = 1; while (*checklist != -1) { if ((*checklist & 0xff) == ie) { if (ie & 0x80) return (-ret); else return (ret); } ret++; checklist++; } return (0); } static int check_infoelements(struct l3_process *pc, struct sk_buff *skb, int *checklist) { int *cl = checklist; u_char mt; u_char *p, ie; int l, newpos, oldpos; int err_seq = 0, err_len = 0, err_compr = 0, err_ureg = 0; u_char codeset = 0; u_char old_codeset = 0; u_char codelock = 1; p = skb->data; /* skip cr */ p++; l = (*p++) & 0xf; p += l; mt = *p++; oldpos = 0; while ((p - skb->data) < skb->len) { if ((*p & 0xf0) == 0x90) { /* shift codeset */ old_codeset = codeset; codeset = *p & 7; if (*p & 0x08) codelock = 0; else codelock = 1; if (pc->debug & L3_DEB_CHECK) l3_debug(pc->st, "check IE shift%scodeset %d->%d", codelock ? " locking " : " ", old_codeset, codeset); p++; continue; } if (!codeset) { /* only codeset 0 */ if ((newpos = ie_in_set(pc, *p, cl))) { if (newpos > 0) { if (newpos < oldpos) err_seq++; else oldpos = newpos; } } else { if (ie_in_set(pc, *p, comp_required)) err_compr++; else err_ureg++; } } ie = *p++; if (ie & 0x80) { l = 1; } else { l = *p++; p += l; l += 2; } if (!codeset && (l > getmax_ie_len(ie))) err_len++; if (!codelock) { if (pc->debug & L3_DEB_CHECK) l3_debug(pc->st, "check IE shift back codeset %d->%d", codeset, old_codeset); codeset = old_codeset; codelock = 1; } } if (err_compr | err_ureg | err_len | err_seq) { if (pc->debug & L3_DEB_CHECK) l3_debug(pc->st, "check IE MT(%x) %d/%d/%d/%d", mt, err_compr, err_ureg, err_len, err_seq); if (err_compr) return (ERR_IE_COMPREHENSION); if (err_ureg) return (ERR_IE_UNRECOGNIZED); if (err_len) return (ERR_IE_LENGTH); if (err_seq) return (ERR_IE_SEQUENCE); } return (0); } /* verify if a message type exists and contain no IE error */ static int l3dss1_check_messagetype_validity(struct l3_process *pc, int mt, void *arg) { switch (mt) { case MT_ALERTING: case MT_CALL_PROCEEDING: case MT_CONNECT: case MT_CONNECT_ACKNOWLEDGE: case MT_DISCONNECT: case MT_INFORMATION: case MT_FACILITY: case MT_NOTIFY: case MT_PROGRESS: case MT_RELEASE: case MT_RELEASE_COMPLETE: case MT_SETUP: case MT_SETUP_ACKNOWLEDGE: case MT_RESUME_ACKNOWLEDGE: case MT_RESUME_REJECT: case MT_SUSPEND_ACKNOWLEDGE: case MT_SUSPEND_REJECT: case MT_USER_INFORMATION: case MT_RESTART: case MT_RESTART_ACKNOWLEDGE: case MT_CONGESTION_CONTROL: case MT_STATUS: case MT_STATUS_ENQUIRY: if (pc->debug & L3_DEB_CHECK) l3_debug(pc->st, "l3dss1_check_messagetype_validity mt(%x) OK", mt); break; case MT_RESUME: /* RESUME only in user->net */ case MT_SUSPEND: /* SUSPEND only in user->net */ default: if (pc->debug & (L3_DEB_CHECK | L3_DEB_WARN)) l3_debug(pc->st, "l3dss1_check_messagetype_validity mt(%x) fail", mt); pc->para.cause = 97; l3dss1_status_send(pc, 0, NULL); return (1); } return (0); } static void l3dss1_std_ie_err(struct l3_process *pc, int ret) { if (pc->debug & L3_DEB_CHECK) l3_debug(pc->st, "check_infoelements ret %d", ret); switch (ret) { case 0: break; case ERR_IE_COMPREHENSION: pc->para.cause = 96; l3dss1_status_send(pc, 0, NULL); break; case ERR_IE_UNRECOGNIZED: pc->para.cause = 99; l3dss1_status_send(pc, 0, NULL); break; case ERR_IE_LENGTH: pc->para.cause = 100; l3dss1_status_send(pc, 0, NULL); break; case ERR_IE_SEQUENCE: default: break; } } static int l3dss1_get_channel_id(struct l3_process *pc, struct sk_buff *skb) { u_char *p; p = skb->data; if ((p = findie(p, skb->len, IE_CHANNEL_ID, 0))) { p++; if (*p != 1) { /* len for BRI = 1 */ if (pc->debug & L3_DEB_WARN) l3_debug(pc->st, "wrong chid len %d", *p); return (-2); } p++; if (*p & 0x60) { /* only base rate interface */ if (pc->debug & L3_DEB_WARN) l3_debug(pc->st, "wrong chid %x", *p); return (-3); } return (*p & 0x3); } else return (-1); } static int l3dss1_get_cause(struct l3_process *pc, struct sk_buff *skb) { u_char l, i = 0; u_char *p; p = skb->data; pc->para.cause = 31; pc->para.loc = 0; if ((p = findie(p, skb->len, IE_CAUSE, 0))) { p++; l = *p++; if (l > 30) return (1); if (l) { pc->para.loc = *p++; l--; } else { return (2); } if (l && !(pc->para.loc & 0x80)) { l--; p++; /* skip recommendation */ } if (l) { pc->para.cause = *p++; l--; if (!(pc->para.cause & 0x80)) return (3); } else return (4); while (l && (i < 6)) { pc->para.diag[i++] = *p++; l--; } } else return (-1); return (0); } static void l3dss1_msg_with_uus(struct l3_process *pc, u_char cmd) { struct sk_buff *skb; u_char tmp[16 + 40]; u_char *p = tmp; int l; MsgHead(p, pc->callref, cmd); if (pc->prot.dss1.uus1_data[0]) { *p++ = IE_USER_USER; /* UUS info element */ *p++ = strlen(pc->prot.dss1.uus1_data) + 1; *p++ = 0x04; /* IA5 chars */ strcpy(p, pc->prot.dss1.uus1_data); p += strlen(pc->prot.dss1.uus1_data); pc->prot.dss1.uus1_data[0] = '\0'; } l = p - tmp; if (!(skb = l3_alloc_skb(l))) return; memcpy(skb_put(skb, l), tmp, l); l3_msg(pc->st, DL_DATA | REQUEST, skb); } /* l3dss1_msg_with_uus */ static void l3dss1_release_req(struct l3_process *pc, u_char pr, void *arg) { StopAllL3Timer(pc); newl3state(pc, 19); if (!pc->prot.dss1.uus1_data[0]) l3dss1_message(pc, MT_RELEASE); else l3dss1_msg_with_uus(pc, MT_RELEASE); L3AddTimer(&pc->timer, T308, CC_T308_1); } static void l3dss1_release_cmpl(struct l3_process *pc, u_char pr, void *arg) { struct sk_buff *skb = arg; int ret; if ((ret = l3dss1_get_cause(pc, skb)) > 0) { if (pc->debug & L3_DEB_WARN) l3_debug(pc->st, "RELCMPL get_cause ret(%d)", ret); } else if (ret < 0) pc->para.cause = NO_CAUSE; StopAllL3Timer(pc); newl3state(pc, 0); pc->st->l3.l3l4(pc->st, CC_RELEASE | CONFIRM, pc); dss1_release_l3_process(pc); } #ifdef EXT_BEARER_CAPS static u_char * EncodeASyncParams(u_char *p, u_char si2) { // 7c 06 88 90 21 42 00 bb p[0] = 0; p[1] = 0x40; // Intermediate rate: 16 kbit/s jj 2000.02.19 p[2] = 0x80; if (si2 & 32) // 7 data bits p[2] += 16; else // 8 data bits p[2] += 24; if (si2 & 16) // 2 stop bits p[2] += 96; else // 1 stop bit p[2] += 32; if (si2 & 8) // even parity p[2] += 2; else // no parity p[2] += 3; switch (si2 & 0x07) { case 0: p[0] = 66; // 1200 bit/s break; case 1: p[0] = 88; // 1200/75 bit/s break; case 2: p[0] = 87; // 75/1200 bit/s break; case 3: p[0] = 67; // 2400 bit/s break; case 4: p[0] = 69; // 4800 bit/s break; case 5: p[0] = 72; // 9600 bit/s break; case 6: p[0] = 73; // 14400 bit/s break; case 7: p[0] = 75; // 19200 bit/s break; } return p + 3; } static u_char EncodeSyncParams(u_char si2, u_char ai) { switch (si2) { case 0: return ai + 2; // 1200 bit/s case 1: return ai + 24; // 1200/75 bit/s case 2: return ai + 23; // 75/1200 bit/s case 3: return ai + 3; // 2400 bit/s case 4: return ai + 5; // 4800 bit/s case 5: return ai + 8; // 9600 bit/s case 6: return ai + 9; // 14400 bit/s case 7: return ai + 11; // 19200 bit/s case 8: return ai + 14; // 48000 bit/s case 9: return ai + 15; // 56000 bit/s case 15: return ai + 40; // negotiate bit/s default: break; } return ai; } static u_char DecodeASyncParams(u_char si2, u_char *p) { u_char info; switch (p[5]) { case 66: // 1200 bit/s break; // si2 don't change case 88: // 1200/75 bit/s si2 += 1; break; case 87: // 75/1200 bit/s si2 += 2; break; case 67: // 2400 bit/s si2 += 3; break; case 69: // 4800 bit/s si2 += 4; break; case 72: // 9600 bit/s si2 += 5; break; case 73: // 14400 bit/s si2 += 6; break; case 75: // 19200 bit/s si2 += 7; break; } info = p[7] & 0x7f; if ((info & 16) && (!(info & 8))) // 7 data bits si2 += 32; // else 8 data bits if ((info & 96) == 96) // 2 stop bits si2 += 16; // else 1 stop bit if ((info & 2) && (!(info & 1))) // even parity si2 += 8; // else no parity return si2; } static u_char DecodeSyncParams(u_char si2, u_char info) { info &= 0x7f; switch (info) { case 40: // bit/s negotiation failed ai := 165 not 175! return si2 + 15; case 15: // 56000 bit/s failed, ai := 0 not 169 ! return si2 + 9; case 14: // 48000 bit/s return si2 + 8; case 11: // 19200 bit/s return si2 + 7; case 9: // 14400 bit/s return si2 + 6; case 8: // 9600 bit/s return si2 + 5; case 5: // 4800 bit/s return si2 + 4; case 3: // 2400 bit/s return si2 + 3; case 23: // 75/1200 bit/s return si2 + 2; case 24: // 1200/75 bit/s return si2 + 1; default: // 1200 bit/s return si2; } } static u_char DecodeSI2(struct sk_buff *skb) { u_char *p; //, *pend=skb->data + skb->len; if ((p = findie(skb->data, skb->len, 0x7c, 0))) { switch (p[4] & 0x0f) { case 0x01: if (p[1] == 0x04) // sync. Bitratenadaption return DecodeSyncParams(160, p[5]); // V.110/X.30 else if (p[1] == 0x06) // async. Bitratenadaption return DecodeASyncParams(192, p); // V.110/X.30 break; case 0x08: // if (p[5] == 0x02) // sync. Bitratenadaption if (p[1] > 3) return DecodeSyncParams(176, p[5]); // V.120 break; } } return 0; } #endif static void l3dss1_setup_req(struct l3_process *pc, u_char pr, void *arg) { struct sk_buff *skb; u_char tmp[128]; u_char *p = tmp; u_char channel = 0; u_char send_keypad; u_char screen = 0x80; u_char *teln; u_char *msn; u_char *sub; u_char *sp; int l; MsgHead(p, pc->callref, MT_SETUP); teln = pc->para.setup.phone; #ifndef CONFIG_HISAX_NO_KEYPAD send_keypad = (strchr(teln, '*') || strchr(teln, '#')) ? 1 : 0; #else send_keypad = 0; #endif #ifndef CONFIG_HISAX_NO_SENDCOMPLETE if (!send_keypad) *p++ = 0xa1; /* complete indicator */ #endif /* * Set Bearer Capability, Map info from 1TR6-convention to EDSS1 */ switch (pc->para.setup.si1) { case 1: /* Telephony */ *p++ = IE_BEARER; *p++ = 0x3; /* Length */ *p++ = 0x90; /* Coding Std. CCITT, 3.1 kHz audio */ *p++ = 0x90; /* Circuit-Mode 64kbps */ *p++ = 0xa3; /* A-Law Audio */ break; case 5: /* Datatransmission 64k, BTX */ case 7: /* Datatransmission 64k */ default: *p++ = IE_BEARER; *p++ = 0x2; /* Length */ *p++ = 0x88; /* Coding Std. CCITT, unrestr. dig. Inform. */ *p++ = 0x90; /* Circuit-Mode 64kbps */ break; } if (send_keypad) { *p++ = IE_KEYPAD; *p++ = strlen(teln); while (*teln) *p++ = (*teln++) & 0x7F; } /* * What about info2? Mapping to High-Layer-Compatibility? */ if ((*teln) && (!send_keypad)) { /* parse number for special things */ if (!isdigit(*teln)) { switch (0x5f & *teln) { case 'C': channel = 0x08; case 'P': channel |= 0x80; teln++; if (*teln == '1') channel |= 0x01; else channel |= 0x02; break; case 'R': screen = 0xA0; break; case 'D': screen = 0x80; break; default: if (pc->debug & L3_DEB_WARN) l3_debug(pc->st, "Wrong MSN Code"); break; } teln++; } } if (channel) { *p++ = IE_CHANNEL_ID; *p++ = 1; *p++ = channel; } msn = pc->para.setup.eazmsn; sub = NULL; sp = msn; while (*sp) { if ('.' == *sp) { sub = sp; *sp = 0; } else sp++; } if (*msn) { *p++ = IE_CALLING_PN; *p++ = strlen(msn) + (screen ? 2 : 1); /* Classify as AnyPref. */ if (screen) { *p++ = 0x01; /* Ext = '0'B, Type = '000'B, Plan = '0001'B. */ *p++ = screen; } else *p++ = 0x81; /* Ext = '1'B, Type = '000'B, Plan = '0001'B. */ while (*msn) *p++ = *msn++ & 0x7f; } if (sub) { *sub++ = '.'; *p++ = IE_CALLING_SUB; *p++ = strlen(sub) + 2; *p++ = 0x80; /* NSAP coded */ *p++ = 0x50; /* local IDI format */ while (*sub) *p++ = *sub++ & 0x7f; } sub = NULL; sp = teln; while (*sp) { if ('.' == *sp) { sub = sp; *sp = 0; } else sp++; } if (!send_keypad) { *p++ = IE_CALLED_PN; *p++ = strlen(teln) + 1; /* Classify as AnyPref. */ *p++ = 0x81; /* Ext = '1'B, Type = '000'B, Plan = '0001'B. */ while (*teln) *p++ = *teln++ & 0x7f; if (sub) { *sub++ = '.'; *p++ = IE_CALLED_SUB; *p++ = strlen(sub) + 2; *p++ = 0x80; /* NSAP coded */ *p++ = 0x50; /* local IDI format */ while (*sub) *p++ = *sub++ & 0x7f; } } #ifdef EXT_BEARER_CAPS if ((pc->para.setup.si2 >= 160) && (pc->para.setup.si2 <= 175)) { // sync. Bitratenadaption, V.110/X.30 *p++ = IE_LLC; *p++ = 0x04; *p++ = 0x88; *p++ = 0x90; *p++ = 0x21; *p++ = EncodeSyncParams(pc->para.setup.si2 - 160, 0x80); } else if ((pc->para.setup.si2 >= 176) && (pc->para.setup.si2 <= 191)) { // sync. Bitratenadaption, V.120 *p++ = IE_LLC; *p++ = 0x05; *p++ = 0x88; *p++ = 0x90; *p++ = 0x28; *p++ = EncodeSyncParams(pc->para.setup.si2 - 176, 0); *p++ = 0x82; } else if (pc->para.setup.si2 >= 192) { // async. Bitratenadaption, V.110/X.30 *p++ = IE_LLC; *p++ = 0x06; *p++ = 0x88; *p++ = 0x90; *p++ = 0x21; p = EncodeASyncParams(p, pc->para.setup.si2 - 192); #ifndef CONFIG_HISAX_NO_LLC } else { switch (pc->para.setup.si1) { case 1: /* Telephony */ *p++ = IE_LLC; *p++ = 0x3; /* Length */ *p++ = 0x90; /* Coding Std. CCITT, 3.1 kHz audio */ *p++ = 0x90; /* Circuit-Mode 64kbps */ *p++ = 0xa3; /* A-Law Audio */ break; case 5: /* Datatransmission 64k, BTX */ case 7: /* Datatransmission 64k */ default: *p++ = IE_LLC; *p++ = 0x2; /* Length */ *p++ = 0x88; /* Coding Std. CCITT, unrestr. dig. Inform. */ *p++ = 0x90; /* Circuit-Mode 64kbps */ break; } #endif } #endif l = p - tmp; if (!(skb = l3_alloc_skb(l))) return; memcpy(skb_put(skb, l), tmp, l); L3DelTimer(&pc->timer); L3AddTimer(&pc->timer, T303, CC_T303); newl3state(pc, 1); l3_msg(pc->st, DL_DATA | REQUEST, skb); } static void l3dss1_call_proc(struct l3_process *pc, u_char pr, void *arg) { struct sk_buff *skb = arg; int id, ret; if ((id = l3dss1_get_channel_id(pc, skb)) >= 0) { if ((0 == id) || ((3 == id) && (0x10 == pc->para.moderate))) { if (pc->debug & L3_DEB_WARN) l3_debug(pc->st, "setup answer with wrong chid %x", id); pc->para.cause = 100; l3dss1_status_send(pc, pr, NULL); return; } pc->para.bchannel = id; } else if (1 == pc->state) { if (pc->debug & L3_DEB_WARN) l3_debug(pc->st, "setup answer wrong chid (ret %d)", id); if (id == -1) pc->para.cause = 96; else pc->para.cause = 100; l3dss1_status_send(pc, pr, NULL); return; } /* Now we are on none mandatory IEs */ ret = check_infoelements(pc, skb, ie_CALL_PROCEEDING); if (ERR_IE_COMPREHENSION == ret) { l3dss1_std_ie_err(pc, ret); return; } L3DelTimer(&pc->timer); newl3state(pc, 3); L3AddTimer(&pc->timer, T310, CC_T310); if (ret) /* STATUS for none mandatory IE errors after actions are taken */ l3dss1_std_ie_err(pc, ret); pc->st->l3.l3l4(pc->st, CC_PROCEEDING | INDICATION, pc); } static void l3dss1_setup_ack(struct l3_process *pc, u_char pr, void *arg) { struct sk_buff *skb = arg; int id, ret; if ((id = l3dss1_get_channel_id(pc, skb)) >= 0) { if ((0 == id) || ((3 == id) && (0x10 == pc->para.moderate))) { if (pc->debug & L3_DEB_WARN) l3_debug(pc->st, "setup answer with wrong chid %x", id); pc->para.cause = 100; l3dss1_status_send(pc, pr, NULL); return; } pc->para.bchannel = id; } else { if (pc->debug & L3_DEB_WARN) l3_debug(pc->st, "setup answer wrong chid (ret %d)", id); if (id == -1) pc->para.cause = 96; else pc->para.cause = 100; l3dss1_status_send(pc, pr, NULL); return; } /* Now we are on none mandatory IEs */ ret = check_infoelements(pc, skb, ie_SETUP_ACKNOWLEDGE); if (ERR_IE_COMPREHENSION == ret) { l3dss1_std_ie_err(pc, ret); return; } L3DelTimer(&pc->timer); newl3state(pc, 2); L3AddTimer(&pc->timer, T304, CC_T304); if (ret) /* STATUS for none mandatory IE errors after actions are taken */ l3dss1_std_ie_err(pc, ret); pc->st->l3.l3l4(pc->st, CC_MORE_INFO | INDICATION, pc); } static void l3dss1_disconnect(struct l3_process *pc, u_char pr, void *arg) { struct sk_buff *skb = arg; u_char *p; int ret; u_char cause = 0; StopAllL3Timer(pc); if ((ret = l3dss1_get_cause(pc, skb))) { if (pc->debug & L3_DEB_WARN) l3_debug(pc->st, "DISC get_cause ret(%d)", ret); if (ret < 0) cause = 96; else if (ret > 0) cause = 100; } if ((p = findie(skb->data, skb->len, IE_FACILITY, 0))) l3dss1_parse_facility(pc->st, pc, pc->callref, p); ret = check_infoelements(pc, skb, ie_DISCONNECT); if (ERR_IE_COMPREHENSION == ret) cause = 96; else if ((!cause) && (ERR_IE_UNRECOGNIZED == ret)) cause = 99; ret = pc->state; newl3state(pc, 12); if (cause) newl3state(pc, 19); if (11 != ret) pc->st->l3.l3l4(pc->st, CC_DISCONNECT | INDICATION, pc); else if (!cause) l3dss1_release_req(pc, pr, NULL); if (cause) { l3dss1_message_cause(pc, MT_RELEASE, cause); L3AddTimer(&pc->timer, T308, CC_T308_1); } } static void l3dss1_connect(struct l3_process *pc, u_char pr, void *arg) { struct sk_buff *skb = arg; int ret; ret = check_infoelements(pc, skb, ie_CONNECT); if (ERR_IE_COMPREHENSION == ret) { l3dss1_std_ie_err(pc, ret); return; } L3DelTimer(&pc->timer); /* T310 */ newl3state(pc, 10); pc->para.chargeinfo = 0; /* here should inserted COLP handling KKe */ if (ret) l3dss1_std_ie_err(pc, ret); pc->st->l3.l3l4(pc->st, CC_SETUP | CONFIRM, pc); } static void l3dss1_alerting(struct l3_process *pc, u_char pr, void *arg) { struct sk_buff *skb = arg; int ret; ret = check_infoelements(pc, skb, ie_ALERTING); if (ERR_IE_COMPREHENSION == ret) { l3dss1_std_ie_err(pc, ret); return; } L3DelTimer(&pc->timer); /* T304 */ newl3state(pc, 4); if (ret) l3dss1_std_ie_err(pc, ret); pc->st->l3.l3l4(pc->st, CC_ALERTING | INDICATION, pc); } static void l3dss1_setup(struct l3_process *pc, u_char pr, void *arg) { u_char *p; int bcfound = 0; char tmp[80]; struct sk_buff *skb = arg; int id; int err = 0; /* * Bearer Capabilities */ p = skb->data; /* only the first occurrence 'll be detected ! */ if ((p = findie(p, skb->len, 0x04, 0))) { if ((p[1] < 2) || (p[1] > 11)) err = 1; else { pc->para.setup.si2 = 0; switch (p[2] & 0x7f) { case 0x00: /* Speech */ case 0x10: /* 3.1 Khz audio */ pc->para.setup.si1 = 1; break; case 0x08: /* Unrestricted digital information */ pc->para.setup.si1 = 7; /* JIM, 05.11.97 I wanna set service indicator 2 */ #ifdef EXT_BEARER_CAPS pc->para.setup.si2 = DecodeSI2(skb); #endif break; case 0x09: /* Restricted digital information */ pc->para.setup.si1 = 2; break; case 0x11: /* Unrestr. digital information with * tones/announcements ( or 7 kHz audio */ pc->para.setup.si1 = 3; break; case 0x18: /* Video */ pc->para.setup.si1 = 4; break; default: err = 2; break; } switch (p[3] & 0x7f) { case 0x40: /* packed mode */ pc->para.setup.si1 = 8; break; case 0x10: /* 64 kbit */ case 0x11: /* 2*64 kbit */ case 0x13: /* 384 kbit */ case 0x15: /* 1536 kbit */ case 0x17: /* 1920 kbit */ pc->para.moderate = p[3] & 0x7f; break; default: err = 3; break; } } if (pc->debug & L3_DEB_SI) l3_debug(pc->st, "SI=%d, AI=%d", pc->para.setup.si1, pc->para.setup.si2); if (err) { if (pc->debug & L3_DEB_WARN) l3_debug(pc->st, "setup with wrong bearer(l=%d:%x,%x)", p[1], p[2], p[3]); pc->para.cause = 100; l3dss1_msg_without_setup(pc, pr, NULL); return; } } else { if (pc->debug & L3_DEB_WARN) l3_debug(pc->st, "setup without bearer capabilities"); /* ETS 300-104 1.3.3 */ pc->para.cause = 96; l3dss1_msg_without_setup(pc, pr, NULL); return; } /* * Channel Identification */ if ((id = l3dss1_get_channel_id(pc, skb)) >= 0) { if ((pc->para.bchannel = id)) { if ((3 == id) && (0x10 == pc->para.moderate)) { if (pc->debug & L3_DEB_WARN) l3_debug(pc->st, "setup with wrong chid %x", id); pc->para.cause = 100; l3dss1_msg_without_setup(pc, pr, NULL); return; } bcfound++; } else { if (pc->debug & L3_DEB_WARN) l3_debug(pc->st, "setup without bchannel, call waiting"); bcfound++; } } else { if (pc->debug & L3_DEB_WARN) l3_debug(pc->st, "setup with wrong chid ret %d", id); if (id == -1) pc->para.cause = 96; else pc->para.cause = 100; l3dss1_msg_without_setup(pc, pr, NULL); return; } /* Now we are on none mandatory IEs */ err = check_infoelements(pc, skb, ie_SETUP); if (ERR_IE_COMPREHENSION == err) { pc->para.cause = 96; l3dss1_msg_without_setup(pc, pr, NULL); return; } p = skb->data; if ((p = findie(p, skb->len, 0x70, 0))) iecpy(pc->para.setup.eazmsn, p, 1); else pc->para.setup.eazmsn[0] = 0; p = skb->data; if ((p = findie(p, skb->len, 0x71, 0))) { /* Called party subaddress */ if ((p[1] >= 2) && (p[2] == 0x80) && (p[3] == 0x50)) { tmp[0] = '.'; iecpy(&tmp[1], p, 2); strcat(pc->para.setup.eazmsn, tmp); } else if (pc->debug & L3_DEB_WARN) l3_debug(pc->st, "wrong called subaddress"); } p = skb->data; if ((p = findie(p, skb->len, 0x6c, 0))) { pc->para.setup.plan = p[2]; if (p[2] & 0x80) { iecpy(pc->para.setup.phone, p, 1); pc->para.setup.screen = 0; } else { iecpy(pc->para.setup.phone, p, 2); pc->para.setup.screen = p[3]; } } else { pc->para.setup.phone[0] = 0; pc->para.setup.plan = 0; pc->para.setup.screen = 0; } p = skb->data; if ((p = findie(p, skb->len, 0x6d, 0))) { /* Calling party subaddress */ if ((p[1] >= 2) && (p[2] == 0x80) && (p[3] == 0x50)) { tmp[0] = '.'; iecpy(&tmp[1], p, 2); strcat(pc->para.setup.phone, tmp); } else if (pc->debug & L3_DEB_WARN) l3_debug(pc->st, "wrong calling subaddress"); } newl3state(pc, 6); if (err) /* STATUS for none mandatory IE errors after actions are taken */ l3dss1_std_ie_err(pc, err); pc->st->l3.l3l4(pc->st, CC_SETUP | INDICATION, pc); } static void l3dss1_reset(struct l3_process *pc, u_char pr, void *arg) { dss1_release_l3_process(pc); } static void l3dss1_disconnect_req(struct l3_process *pc, u_char pr, void *arg) { struct sk_buff *skb; u_char tmp[16 + 40]; u_char *p = tmp; int l; u_char cause = 16; if (pc->para.cause != NO_CAUSE) cause = pc->para.cause; StopAllL3Timer(pc); MsgHead(p, pc->callref, MT_DISCONNECT); *p++ = IE_CAUSE; *p++ = 0x2; *p++ = 0x80; *p++ = cause | 0x80; if (pc->prot.dss1.uus1_data[0]) { *p++ = IE_USER_USER; /* UUS info element */ *p++ = strlen(pc->prot.dss1.uus1_data) + 1; *p++ = 0x04; /* IA5 chars */ strcpy(p, pc->prot.dss1.uus1_data); p += strlen(pc->prot.dss1.uus1_data); pc->prot.dss1.uus1_data[0] = '\0'; } l = p - tmp; if (!(skb = l3_alloc_skb(l))) return; memcpy(skb_put(skb, l), tmp, l); newl3state(pc, 11); l3_msg(pc->st, DL_DATA | REQUEST, skb); L3AddTimer(&pc->timer, T305, CC_T305); } static void l3dss1_setup_rsp(struct l3_process *pc, u_char pr, void *arg) { if (!pc->para.bchannel) { if (pc->debug & L3_DEB_WARN) l3_debug(pc->st, "D-chan connect for waiting call"); l3dss1_disconnect_req(pc, pr, arg); return; } newl3state(pc, 8); l3dss1_message(pc, MT_CONNECT); L3DelTimer(&pc->timer); L3AddTimer(&pc->timer, T313, CC_T313); } static void l3dss1_connect_ack(struct l3_process *pc, u_char pr, void *arg) { struct sk_buff *skb = arg; int ret; ret = check_infoelements(pc, skb, ie_CONNECT_ACKNOWLEDGE); if (ERR_IE_COMPREHENSION == ret) { l3dss1_std_ie_err(pc, ret); return; } newl3state(pc, 10); L3DelTimer(&pc->timer); if (ret) l3dss1_std_ie_err(pc, ret); pc->st->l3.l3l4(pc->st, CC_SETUP_COMPL | INDICATION, pc); } static void l3dss1_reject_req(struct l3_process *pc, u_char pr, void *arg) { struct sk_buff *skb; u_char tmp[16]; u_char *p = tmp; int l; u_char cause = 21; if (pc->para.cause != NO_CAUSE) cause = pc->para.cause; MsgHead(p, pc->callref, MT_RELEASE_COMPLETE); *p++ = IE_CAUSE; *p++ = 0x2; *p++ = 0x80; *p++ = cause | 0x80; l = p - tmp; if (!(skb = l3_alloc_skb(l))) return; memcpy(skb_put(skb, l), tmp, l); l3_msg(pc->st, DL_DATA | REQUEST, skb); pc->st->l3.l3l4(pc->st, CC_RELEASE | INDICATION, pc); newl3state(pc, 0); dss1_release_l3_process(pc); } static void l3dss1_release(struct l3_process *pc, u_char pr, void *arg) { struct sk_buff *skb = arg; u_char *p; int ret, cause = 0; StopAllL3Timer(pc); if ((ret = l3dss1_get_cause(pc, skb)) > 0) { if (pc->debug & L3_DEB_WARN) l3_debug(pc->st, "REL get_cause ret(%d)", ret); } else if (ret < 0) pc->para.cause = NO_CAUSE; if ((p = findie(skb->data, skb->len, IE_FACILITY, 0))) { l3dss1_parse_facility(pc->st, pc, pc->callref, p); } if ((ret < 0) && (pc->state != 11)) cause = 96; else if (ret > 0) cause = 100; ret = check_infoelements(pc, skb, ie_RELEASE); if (ERR_IE_COMPREHENSION == ret) cause = 96; else if ((ERR_IE_UNRECOGNIZED == ret) && (!cause)) cause = 99; if (cause) l3dss1_message_cause(pc, MT_RELEASE_COMPLETE, cause); else l3dss1_message(pc, MT_RELEASE_COMPLETE); pc->st->l3.l3l4(pc->st, CC_RELEASE | INDICATION, pc); newl3state(pc, 0); dss1_release_l3_process(pc); } static void l3dss1_alert_req(struct l3_process *pc, u_char pr, void *arg) { newl3state(pc, 7); if (!pc->prot.dss1.uus1_data[0]) l3dss1_message(pc, MT_ALERTING); else l3dss1_msg_with_uus(pc, MT_ALERTING); } static void l3dss1_proceed_req(struct l3_process *pc, u_char pr, void *arg) { newl3state(pc, 9); l3dss1_message(pc, MT_CALL_PROCEEDING); pc->st->l3.l3l4(pc->st, CC_PROCEED_SEND | INDICATION, pc); } static void l3dss1_setup_ack_req(struct l3_process *pc, u_char pr, void *arg) { newl3state(pc, 25); L3DelTimer(&pc->timer); L3AddTimer(&pc->timer, T302, CC_T302); l3dss1_message(pc, MT_SETUP_ACKNOWLEDGE); } /********************************************/ /* deliver a incoming display message to HL */ /********************************************/ static void l3dss1_deliver_display(struct l3_process *pc, int pr, u_char *infp) { u_char len; isdn_ctrl ic; struct IsdnCardState *cs; char *p; if (*infp++ != IE_DISPLAY) return; if ((len = *infp++) > 80) return; /* total length <= 82 */ if (!pc->chan) return; p = ic.parm.display; while (len--) *p++ = *infp++; *p = '\0'; ic.command = ISDN_STAT_DISPLAY; cs = pc->st->l1.hardware; ic.driver = cs->myid; ic.arg = pc->chan->chan; cs->iif.statcallb(&ic); } /* l3dss1_deliver_display */ static void l3dss1_progress(struct l3_process *pc, u_char pr, void *arg) { struct sk_buff *skb = arg; int err = 0; u_char *p; if ((p = findie(skb->data, skb->len, IE_PROGRESS, 0))) { if (p[1] != 2) { err = 1; pc->para.cause = 100; } else if (!(p[2] & 0x70)) { switch (p[2]) { case 0x80: case 0x81: case 0x82: case 0x84: case 0x85: case 0x87: case 0x8a: switch (p[3]) { case 0x81: case 0x82: case 0x83: case 0x84: case 0x88: break; default: err = 2; pc->para.cause = 100; break; } break; default: err = 3; pc->para.cause = 100; break; } } } else { pc->para.cause = 96; err = 4; } if (err) { if (pc->debug & L3_DEB_WARN) l3_debug(pc->st, "progress error %d", err); l3dss1_status_send(pc, pr, NULL); return; } /* Now we are on none mandatory IEs */ err = check_infoelements(pc, skb, ie_PROGRESS); if (err) l3dss1_std_ie_err(pc, err); if (ERR_IE_COMPREHENSION != err) pc->st->l3.l3l4(pc->st, CC_PROGRESS | INDICATION, pc); } static void l3dss1_notify(struct l3_process *pc, u_char pr, void *arg) { struct sk_buff *skb = arg; int err = 0; u_char *p; if ((p = findie(skb->data, skb->len, IE_NOTIFY, 0))) { if (p[1] != 1) { err = 1; pc->para.cause = 100; } else { switch (p[2]) { case 0x80: case 0x81: case 0x82: break; default: pc->para.cause = 100; err = 2; break; } } } else { pc->para.cause = 96; err = 3; } if (err) { if (pc->debug & L3_DEB_WARN) l3_debug(pc->st, "notify error %d", err); l3dss1_status_send(pc, pr, NULL); return; } /* Now we are on none mandatory IEs */ err = check_infoelements(pc, skb, ie_NOTIFY); if (err) l3dss1_std_ie_err(pc, err); if (ERR_IE_COMPREHENSION != err) pc->st->l3.l3l4(pc->st, CC_NOTIFY | INDICATION, pc); } static void l3dss1_status_enq(struct l3_process *pc, u_char pr, void *arg) { int ret; struct sk_buff *skb = arg; ret = check_infoelements(pc, skb, ie_STATUS_ENQUIRY); l3dss1_std_ie_err(pc, ret); pc->para.cause = 30; /* response to STATUS_ENQUIRY */ l3dss1_status_send(pc, pr, NULL); } static void l3dss1_information(struct l3_process *pc, u_char pr, void *arg) { int ret; struct sk_buff *skb = arg; u_char *p; char tmp[32]; ret = check_infoelements(pc, skb, ie_INFORMATION); if (ret) l3dss1_std_ie_err(pc, ret); if (pc->state == 25) { /* overlap receiving */ L3DelTimer(&pc->timer); p = skb->data; if ((p = findie(p, skb->len, 0x70, 0))) { iecpy(tmp, p, 1); strcat(pc->para.setup.eazmsn, tmp); pc->st->l3.l3l4(pc->st, CC_MORE_INFO | INDICATION, pc); } L3AddTimer(&pc->timer, T302, CC_T302); } } /******************************/ /* handle deflection requests */ /******************************/ static void l3dss1_redir_req(struct l3_process *pc, u_char pr, void *arg) { struct sk_buff *skb; u_char tmp[128]; u_char *p = tmp; u_char *subp; u_char len_phone = 0; u_char len_sub = 0; int l; strcpy(pc->prot.dss1.uus1_data, pc->chan->setup.eazmsn); /* copy uus element if available */ if (!pc->chan->setup.phone[0]) { pc->para.cause = -1; l3dss1_disconnect_req(pc, pr, arg); /* disconnect immediately */ return; } /* only uus */ if (pc->prot.dss1.invoke_id) free_invoke_id(pc->st, pc->prot.dss1.invoke_id); if (!(pc->prot.dss1.invoke_id = new_invoke_id(pc->st))) return; MsgHead(p, pc->callref, MT_FACILITY); for (subp = pc->chan->setup.phone; (*subp) && (*subp != '.'); subp++) len_phone++; /* len of phone number */ if (*subp++ == '.') len_sub = strlen(subp) + 2; /* length including info subaddress element */ *p++ = 0x1c; /* Facility info element */ *p++ = len_phone + len_sub + 2 + 2 + 8 + 3 + 3; /* length of element */ *p++ = 0x91; /* remote operations protocol */ *p++ = 0xa1; /* invoke component */ *p++ = len_phone + len_sub + 2 + 2 + 8 + 3; /* length of data */ *p++ = 0x02; /* invoke id tag, integer */ *p++ = 0x01; /* length */ *p++ = pc->prot.dss1.invoke_id; /* invoke id */ *p++ = 0x02; /* operation value tag, integer */ *p++ = 0x01; /* length */ *p++ = 0x0D; /* Call Deflect */ *p++ = 0x30; /* sequence phone number */ *p++ = len_phone + 2 + 2 + 3 + len_sub; /* length */ *p++ = 0x30; /* Deflected to UserNumber */ *p++ = len_phone + 2 + len_sub; /* length */ *p++ = 0x80; /* NumberDigits */ *p++ = len_phone; /* length */ for (l = 0; l < len_phone; l++) *p++ = pc->chan->setup.phone[l]; if (len_sub) { *p++ = 0x04; /* called party subaddress */ *p++ = len_sub - 2; while (*subp) *p++ = *subp++; } *p++ = 0x01; /* screening identifier */ *p++ = 0x01; *p++ = pc->chan->setup.screen; l = p - tmp; if (!(skb = l3_alloc_skb(l))) return; memcpy(skb_put(skb, l), tmp, l); l3_msg(pc->st, DL_DATA | REQUEST, skb); } /* l3dss1_redir_req */ /********************************************/ /* handle deflection request in early state */ /********************************************/ static void l3dss1_redir_req_early(struct l3_process *pc, u_char pr, void *arg) { l3dss1_proceed_req(pc, pr, arg); l3dss1_redir_req(pc, pr, arg); } /* l3dss1_redir_req_early */ /***********************************************/ /* handle special commands for this protocol. */ /* Examples are call independent services like */ /* remote operations with dummy callref. */ /***********************************************/ static int l3dss1_cmd_global(struct PStack *st, isdn_ctrl *ic) { u_char id; u_char temp[265]; u_char *p = temp; int i, l, proc_len; struct sk_buff *skb; struct l3_process *pc = NULL; switch (ic->arg) { case DSS1_CMD_INVOKE: if (ic->parm.dss1_io.datalen < 0) return (-2); /* invalid parameter */ for (proc_len = 1, i = ic->parm.dss1_io.proc >> 8; i; i++) i = i >> 8; /* add one byte */ l = ic->parm.dss1_io.datalen + proc_len + 8; /* length excluding ie header */ if (l > 255) return (-2); /* too long */ if (!(id = new_invoke_id(st))) return (0); /* first get a invoke id -> return if no available */ i = -1; MsgHead(p, i, MT_FACILITY); /* build message head */ *p++ = 0x1C; /* Facility IE */ *p++ = l; /* length of ie */ *p++ = 0x91; /* remote operations */ *p++ = 0xA1; /* invoke */ *p++ = l - 3; /* length of invoke */ *p++ = 0x02; /* invoke id tag */ *p++ = 0x01; /* length is 1 */ *p++ = id; /* invoke id */ *p++ = 0x02; /* operation */ *p++ = proc_len; /* length of operation */ for (i = proc_len; i; i--) *p++ = (ic->parm.dss1_io.proc >> (i - 1)) & 0xFF; memcpy(p, ic->parm.dss1_io.data, ic->parm.dss1_io.datalen); /* copy data */ l = (p - temp) + ic->parm.dss1_io.datalen; /* total length */ if (ic->parm.dss1_io.timeout > 0) if (!(pc = dss1_new_l3_process(st, -1))) { free_invoke_id(st, id); return (-2); } pc->prot.dss1.ll_id = ic->parm.dss1_io.ll_id; /* remember id */ pc->prot.dss1.proc = ic->parm.dss1_io.proc; /* and procedure */ if (!(skb = l3_alloc_skb(l))) { free_invoke_id(st, id); if (pc) dss1_release_l3_process(pc); return (-2); } memcpy(skb_put(skb, l), temp, l); if (pc) { pc->prot.dss1.invoke_id = id; /* remember id */ L3AddTimer(&pc->timer, ic->parm.dss1_io.timeout, CC_TDSS1_IO | REQUEST); } l3_msg(st, DL_DATA | REQUEST, skb); ic->parm.dss1_io.hl_id = id; /* return id */ return (0); case DSS1_CMD_INVOKE_ABORT: if ((pc = l3dss1_search_dummy_proc(st, ic->parm.dss1_io.hl_id))) { L3DelTimer(&pc->timer); /* remove timer */ dss1_release_l3_process(pc); return (0); } else { l3_debug(st, "l3dss1_cmd_global abort unknown id"); return (-2); } break; default: l3_debug(st, "l3dss1_cmd_global unknown cmd 0x%lx", ic->arg); return (-1); } /* switch ic-> arg */ return (-1); } /* l3dss1_cmd_global */ static void l3dss1_io_timer(struct l3_process *pc) { isdn_ctrl ic; struct IsdnCardState *cs = pc->st->l1.hardware; L3DelTimer(&pc->timer); /* remove timer */ ic.driver = cs->myid; ic.command = ISDN_STAT_PROT; ic.arg = DSS1_STAT_INVOKE_ERR; ic.parm.dss1_io.hl_id = pc->prot.dss1.invoke_id; ic.parm.dss1_io.ll_id = pc->prot.dss1.ll_id; ic.parm.dss1_io.proc = pc->prot.dss1.proc; ic.parm.dss1_io.timeout = -1; ic.parm.dss1_io.datalen = 0; ic.parm.dss1_io.data = NULL; free_invoke_id(pc->st, pc->prot.dss1.invoke_id); pc->prot.dss1.invoke_id = 0; /* reset id */ cs->iif.statcallb(&ic); dss1_release_l3_process(pc); } /* l3dss1_io_timer */ static void l3dss1_release_ind(struct l3_process *pc, u_char pr, void *arg) { u_char *p; struct sk_buff *skb = arg; int callState = 0; p = skb->data; if ((p = findie(p, skb->len, IE_CALL_STATE, 0))) { p++; if (1 == *p++) callState = *p; } if (callState == 0) { /* ETS 300-104 7.6.1, 8.6.1, 10.6.1... and 16.1 * set down layer 3 without sending any message */ pc->st->l3.l3l4(pc->st, CC_RELEASE | INDICATION, pc); newl3state(pc, 0); dss1_release_l3_process(pc); } else { pc->st->l3.l3l4(pc->st, CC_IGNORE | INDICATION, pc); } } static void l3dss1_dummy(struct l3_process *pc, u_char pr, void *arg) { } static void l3dss1_t302(struct l3_process *pc, u_char pr, void *arg) { L3DelTimer(&pc->timer); pc->para.loc = 0; pc->para.cause = 28; /* invalid number */ l3dss1_disconnect_req(pc, pr, NULL); pc->st->l3.l3l4(pc->st, CC_SETUP_ERR, pc); } static void l3dss1_t303(struct l3_process *pc, u_char pr, void *arg) { if (pc->N303 > 0) { pc->N303--; L3DelTimer(&pc->timer); l3dss1_setup_req(pc, pr, arg); } else { L3DelTimer(&pc->timer); l3dss1_message_cause(pc, MT_RELEASE_COMPLETE, 102); pc->st->l3.l3l4(pc->st, CC_NOSETUP_RSP, pc); dss1_release_l3_process(pc); } } static void l3dss1_t304(struct l3_process *pc, u_char pr, void *arg) { L3DelTimer(&pc->timer); pc->para.loc = 0; pc->para.cause = 102; l3dss1_disconnect_req(pc, pr, NULL); pc->st->l3.l3l4(pc->st, CC_SETUP_ERR, pc); } static void l3dss1_t305(struct l3_process *pc, u_char pr, void *arg) { u_char tmp[16]; u_char *p = tmp; int l; struct sk_buff *skb; u_char cause = 16; L3DelTimer(&pc->timer); if (pc->para.cause != NO_CAUSE) cause = pc->para.cause; MsgHead(p, pc->callref, MT_RELEASE); *p++ = IE_CAUSE; *p++ = 0x2; *p++ = 0x80; *p++ = cause | 0x80; l = p - tmp; if (!(skb = l3_alloc_skb(l))) return; memcpy(skb_put(skb, l), tmp, l); newl3state(pc, 19); l3_msg(pc->st, DL_DATA | REQUEST, skb); L3AddTimer(&pc->timer, T308, CC_T308_1); } static void l3dss1_t310(struct l3_process *pc, u_char pr, void *arg) { L3DelTimer(&pc->timer); pc->para.loc = 0; pc->para.cause = 102; l3dss1_disconnect_req(pc, pr, NULL); pc->st->l3.l3l4(pc->st, CC_SETUP_ERR, pc); } static void l3dss1_t313(struct l3_process *pc, u_char pr, void *arg) { L3DelTimer(&pc->timer); pc->para.loc = 0; pc->para.cause = 102; l3dss1_disconnect_req(pc, pr, NULL); pc->st->l3.l3l4(pc->st, CC_CONNECT_ERR, pc); } static void l3dss1_t308_1(struct l3_process *pc, u_char pr, void *arg) { newl3state(pc, 19); L3DelTimer(&pc->timer); l3dss1_message(pc, MT_RELEASE); L3AddTimer(&pc->timer, T308, CC_T308_2); } static void l3dss1_t308_2(struct l3_process *pc, u_char pr, void *arg) { L3DelTimer(&pc->timer); pc->st->l3.l3l4(pc->st, CC_RELEASE_ERR, pc); dss1_release_l3_process(pc); } static void l3dss1_t318(struct l3_process *pc, u_char pr, void *arg) { L3DelTimer(&pc->timer); pc->para.cause = 102; /* Timer expiry */ pc->para.loc = 0; /* local */ pc->st->l3.l3l4(pc->st, CC_RESUME_ERR, pc); newl3state(pc, 19); l3dss1_message(pc, MT_RELEASE); L3AddTimer(&pc->timer, T308, CC_T308_1); } static void l3dss1_t319(struct l3_process *pc, u_char pr, void *arg) { L3DelTimer(&pc->timer); pc->para.cause = 102; /* Timer expiry */ pc->para.loc = 0; /* local */ pc->st->l3.l3l4(pc->st, CC_SUSPEND_ERR, pc); newl3state(pc, 10); } static void l3dss1_restart(struct l3_process *pc, u_char pr, void *arg) { L3DelTimer(&pc->timer); pc->st->l3.l3l4(pc->st, CC_RELEASE | INDICATION, pc); dss1_release_l3_process(pc); } static void l3dss1_status(struct l3_process *pc, u_char pr, void *arg) { u_char *p; struct sk_buff *skb = arg; int ret; u_char cause = 0, callState = 0; if ((ret = l3dss1_get_cause(pc, skb))) { if (pc->debug & L3_DEB_WARN) l3_debug(pc->st, "STATUS get_cause ret(%d)", ret); if (ret < 0) cause = 96; else if (ret > 0) cause = 100; } if ((p = findie(skb->data, skb->len, IE_CALL_STATE, 0))) { p++; if (1 == *p++) { callState = *p; if (!ie_in_set(pc, *p, l3_valid_states)) cause = 100; } else cause = 100; } else cause = 96; if (!cause) { /* no error before */ ret = check_infoelements(pc, skb, ie_STATUS); if (ERR_IE_COMPREHENSION == ret) cause = 96; else if (ERR_IE_UNRECOGNIZED == ret) cause = 99; } if (cause) { u_char tmp; if (pc->debug & L3_DEB_WARN) l3_debug(pc->st, "STATUS error(%d/%d)", ret, cause); tmp = pc->para.cause; pc->para.cause = cause; l3dss1_status_send(pc, 0, NULL); if (cause == 99) pc->para.cause = tmp; else return; } cause = pc->para.cause; if (((cause & 0x7f) == 111) && (callState == 0)) { /* ETS 300-104 7.6.1, 8.6.1, 10.6.1... * if received MT_STATUS with cause == 111 and call * state == 0, then we must set down layer 3 */ pc->st->l3.l3l4(pc->st, CC_RELEASE | INDICATION, pc); newl3state(pc, 0); dss1_release_l3_process(pc); } } static void l3dss1_facility(struct l3_process *pc, u_char pr, void *arg) { struct sk_buff *skb = arg; int ret; ret = check_infoelements(pc, skb, ie_FACILITY); l3dss1_std_ie_err(pc, ret); { u_char *p; if ((p = findie(skb->data, skb->len, IE_FACILITY, 0))) l3dss1_parse_facility(pc->st, pc, pc->callref, p); } } static void l3dss1_suspend_req(struct l3_process *pc, u_char pr, void *arg) { struct sk_buff *skb; u_char tmp[32]; u_char *p = tmp; u_char i, l; u_char *msg = pc->chan->setup.phone; MsgHead(p, pc->callref, MT_SUSPEND); l = *msg++; if (l && (l <= 10)) { /* Max length 10 octets */ *p++ = IE_CALL_ID; *p++ = l; for (i = 0; i < l; i++) *p++ = *msg++; } else if (l) { l3_debug(pc->st, "SUS wrong CALL_ID len %d", l); return; } l = p - tmp; if (!(skb = l3_alloc_skb(l))) return; memcpy(skb_put(skb, l), tmp, l); l3_msg(pc->st, DL_DATA | REQUEST, skb); newl3state(pc, 15); L3AddTimer(&pc->timer, T319, CC_T319); } static void l3dss1_suspend_ack(struct l3_process *pc, u_char pr, void *arg) { struct sk_buff *skb = arg; int ret; L3DelTimer(&pc->timer); newl3state(pc, 0); pc->para.cause = NO_CAUSE; pc->st->l3.l3l4(pc->st, CC_SUSPEND | CONFIRM, pc); /* We don't handle suspend_ack for IE errors now */ if ((ret = check_infoelements(pc, skb, ie_SUSPEND_ACKNOWLEDGE))) if (pc->debug & L3_DEB_WARN) l3_debug(pc->st, "SUSPACK check ie(%d)", ret); dss1_release_l3_process(pc); } static void l3dss1_suspend_rej(struct l3_process *pc, u_char pr, void *arg) { struct sk_buff *skb = arg; int ret; if ((ret = l3dss1_get_cause(pc, skb))) { if (pc->debug & L3_DEB_WARN) l3_debug(pc->st, "SUSP_REJ get_cause ret(%d)", ret); if (ret < 0) pc->para.cause = 96; else pc->para.cause = 100; l3dss1_status_send(pc, pr, NULL); return; } ret = check_infoelements(pc, skb, ie_SUSPEND_REJECT); if (ERR_IE_COMPREHENSION == ret) { l3dss1_std_ie_err(pc, ret); return; } L3DelTimer(&pc->timer); pc->st->l3.l3l4(pc->st, CC_SUSPEND_ERR, pc); newl3state(pc, 10); if (ret) /* STATUS for none mandatory IE errors after actions are taken */ l3dss1_std_ie_err(pc, ret); } static void l3dss1_resume_req(struct l3_process *pc, u_char pr, void *arg) { struct sk_buff *skb; u_char tmp[32]; u_char *p = tmp; u_char i, l; u_char *msg = pc->para.setup.phone; MsgHead(p, pc->callref, MT_RESUME); l = *msg++; if (l && (l <= 10)) { /* Max length 10 octets */ *p++ = IE_CALL_ID; *p++ = l; for (i = 0; i < l; i++) *p++ = *msg++; } else if (l) { l3_debug(pc->st, "RES wrong CALL_ID len %d", l); return; } l = p - tmp; if (!(skb = l3_alloc_skb(l))) return; memcpy(skb_put(skb, l), tmp, l); l3_msg(pc->st, DL_DATA | REQUEST, skb); newl3state(pc, 17); L3AddTimer(&pc->timer, T318, CC_T318); } static void l3dss1_resume_ack(struct l3_process *pc, u_char pr, void *arg) { struct sk_buff *skb = arg; int id, ret; if ((id = l3dss1_get_channel_id(pc, skb)) > 0) { if ((0 == id) || ((3 == id) && (0x10 == pc->para.moderate))) { if (pc->debug & L3_DEB_WARN) l3_debug(pc->st, "resume ack with wrong chid %x", id); pc->para.cause = 100; l3dss1_status_send(pc, pr, NULL); return; } pc->para.bchannel = id; } else if (1 == pc->state) { if (pc->debug & L3_DEB_WARN) l3_debug(pc->st, "resume ack without chid (ret %d)", id); pc->para.cause = 96; l3dss1_status_send(pc, pr, NULL); return; } ret = check_infoelements(pc, skb, ie_RESUME_ACKNOWLEDGE); if (ERR_IE_COMPREHENSION == ret) { l3dss1_std_ie_err(pc, ret); return; } L3DelTimer(&pc->timer); pc->st->l3.l3l4(pc->st, CC_RESUME | CONFIRM, pc); newl3state(pc, 10); if (ret) /* STATUS for none mandatory IE errors after actions are taken */ l3dss1_std_ie_err(pc, ret); } static void l3dss1_resume_rej(struct l3_process *pc, u_char pr, void *arg) { struct sk_buff *skb = arg; int ret; if ((ret = l3dss1_get_cause(pc, skb))) { if (pc->debug & L3_DEB_WARN) l3_debug(pc->st, "RES_REJ get_cause ret(%d)", ret); if (ret < 0) pc->para.cause = 96; else pc->para.cause = 100; l3dss1_status_send(pc, pr, NULL); return; } ret = check_infoelements(pc, skb, ie_RESUME_REJECT); if (ERR_IE_COMPREHENSION == ret) { l3dss1_std_ie_err(pc, ret); return; } L3DelTimer(&pc->timer); pc->st->l3.l3l4(pc->st, CC_RESUME_ERR, pc); newl3state(pc, 0); if (ret) /* STATUS for none mandatory IE errors after actions are taken */ l3dss1_std_ie_err(pc, ret); dss1_release_l3_process(pc); } static void l3dss1_global_restart(struct l3_process *pc, u_char pr, void *arg) { u_char tmp[32]; u_char *p; u_char ri, ch = 0, chan = 0; int l; struct sk_buff *skb = arg; struct l3_process *up; newl3state(pc, 2); L3DelTimer(&pc->timer); p = skb->data; if ((p = findie(p, skb->len, IE_RESTART_IND, 0))) { ri = p[2]; l3_debug(pc->st, "Restart %x", ri); } else { l3_debug(pc->st, "Restart without restart IE"); ri = 0x86; } p = skb->data; if ((p = findie(p, skb->len, IE_CHANNEL_ID, 0))) { chan = p[2] & 3; ch = p[2]; if (pc->st->l3.debug) l3_debug(pc->st, "Restart for channel %d", chan); } newl3state(pc, 2); up = pc->st->l3.proc; while (up) { if ((ri & 7) == 7) up->st->lli.l4l3(up->st, CC_RESTART | REQUEST, up); else if (up->para.bchannel == chan) up->st->lli.l4l3(up->st, CC_RESTART | REQUEST, up); up = up->next; } p = tmp; MsgHead(p, pc->callref, MT_RESTART_ACKNOWLEDGE); if (chan) { *p++ = IE_CHANNEL_ID; *p++ = 1; *p++ = ch | 0x80; } *p++ = 0x79; /* RESTART Ind */ *p++ = 1; *p++ = ri; l = p - tmp; if (!(skb = l3_alloc_skb(l))) return; memcpy(skb_put(skb, l), tmp, l); newl3state(pc, 0); l3_msg(pc->st, DL_DATA | REQUEST, skb); } static void l3dss1_dl_reset(struct l3_process *pc, u_char pr, void *arg) { pc->para.cause = 0x29; /* Temporary failure */ pc->para.loc = 0; l3dss1_disconnect_req(pc, pr, NULL); pc->st->l3.l3l4(pc->st, CC_SETUP_ERR, pc); } static void l3dss1_dl_release(struct l3_process *pc, u_char pr, void *arg) { newl3state(pc, 0); pc->para.cause = 0x1b; /* Destination out of order */ pc->para.loc = 0; pc->st->l3.l3l4(pc->st, CC_RELEASE | INDICATION, pc); release_l3_process(pc); } static void l3dss1_dl_reestablish(struct l3_process *pc, u_char pr, void *arg) { L3DelTimer(&pc->timer); L3AddTimer(&pc->timer, T309, CC_T309); l3_msg(pc->st, DL_ESTABLISH | REQUEST, NULL); } static void l3dss1_dl_reest_status(struct l3_process *pc, u_char pr, void *arg) { L3DelTimer(&pc->timer); pc->para.cause = 0x1F; /* normal, unspecified */ l3dss1_status_send(pc, 0, NULL); } /* *INDENT-OFF* */ static struct stateentry downstatelist[] = { {SBIT(0), CC_SETUP | REQUEST, l3dss1_setup_req}, {SBIT(0), CC_RESUME | REQUEST, l3dss1_resume_req}, {SBIT(1) | SBIT(2) | SBIT(3) | SBIT(4) | SBIT(6) | SBIT(7) | SBIT(8) | SBIT(9) | SBIT(10) | SBIT(25), CC_DISCONNECT | REQUEST, l3dss1_disconnect_req}, {SBIT(12), CC_RELEASE | REQUEST, l3dss1_release_req}, {ALL_STATES, CC_RESTART | REQUEST, l3dss1_restart}, {SBIT(6) | SBIT(25), CC_IGNORE | REQUEST, l3dss1_reset}, {SBIT(6) | SBIT(25), CC_REJECT | REQUEST, l3dss1_reject_req}, {SBIT(6) | SBIT(25), CC_PROCEED_SEND | REQUEST, l3dss1_proceed_req}, {SBIT(6), CC_MORE_INFO | REQUEST, l3dss1_setup_ack_req}, {SBIT(25), CC_MORE_INFO | REQUEST, l3dss1_dummy}, {SBIT(6) | SBIT(9) | SBIT(25), CC_ALERTING | REQUEST, l3dss1_alert_req}, {SBIT(6) | SBIT(7) | SBIT(9) | SBIT(25), CC_SETUP | RESPONSE, l3dss1_setup_rsp}, {SBIT(10), CC_SUSPEND | REQUEST, l3dss1_suspend_req}, {SBIT(7) | SBIT(9) | SBIT(25), CC_REDIR | REQUEST, l3dss1_redir_req}, {SBIT(6), CC_REDIR | REQUEST, l3dss1_redir_req_early}, {SBIT(9) | SBIT(25), CC_DISCONNECT | REQUEST, l3dss1_disconnect_req}, {SBIT(25), CC_T302, l3dss1_t302}, {SBIT(1), CC_T303, l3dss1_t303}, {SBIT(2), CC_T304, l3dss1_t304}, {SBIT(3), CC_T310, l3dss1_t310}, {SBIT(8), CC_T313, l3dss1_t313}, {SBIT(11), CC_T305, l3dss1_t305}, {SBIT(15), CC_T319, l3dss1_t319}, {SBIT(17), CC_T318, l3dss1_t318}, {SBIT(19), CC_T308_1, l3dss1_t308_1}, {SBIT(19), CC_T308_2, l3dss1_t308_2}, {SBIT(10), CC_T309, l3dss1_dl_release}, }; static struct stateentry datastatelist[] = { {ALL_STATES, MT_STATUS_ENQUIRY, l3dss1_status_enq}, {ALL_STATES, MT_FACILITY, l3dss1_facility}, {SBIT(19), MT_STATUS, l3dss1_release_ind}, {ALL_STATES, MT_STATUS, l3dss1_status}, {SBIT(0), MT_SETUP, l3dss1_setup}, {SBIT(6) | SBIT(7) | SBIT(8) | SBIT(9) | SBIT(10) | SBIT(11) | SBIT(12) | SBIT(15) | SBIT(17) | SBIT(19) | SBIT(25), MT_SETUP, l3dss1_dummy}, {SBIT(1) | SBIT(2), MT_CALL_PROCEEDING, l3dss1_call_proc}, {SBIT(1), MT_SETUP_ACKNOWLEDGE, l3dss1_setup_ack}, {SBIT(2) | SBIT(3), MT_ALERTING, l3dss1_alerting}, {SBIT(2) | SBIT(3), MT_PROGRESS, l3dss1_progress}, {SBIT(2) | SBIT(3) | SBIT(4) | SBIT(7) | SBIT(8) | SBIT(9) | SBIT(10) | SBIT(11) | SBIT(12) | SBIT(15) | SBIT(17) | SBIT(19) | SBIT(25), MT_INFORMATION, l3dss1_information}, {SBIT(10) | SBIT(11) | SBIT(15), MT_NOTIFY, l3dss1_notify}, {SBIT(0) | SBIT(1) | SBIT(2) | SBIT(3) | SBIT(4) | SBIT(7) | SBIT(8) | SBIT(10) | SBIT(11) | SBIT(12) | SBIT(15) | SBIT(17) | SBIT(19) | SBIT(25), MT_RELEASE_COMPLETE, l3dss1_release_cmpl}, {SBIT(1) | SBIT(2) | SBIT(3) | SBIT(4) | SBIT(7) | SBIT(8) | SBIT(9) | SBIT(10) | SBIT(11) | SBIT(12) | SBIT(15) | SBIT(17) | SBIT(25), MT_RELEASE, l3dss1_release}, {SBIT(19), MT_RELEASE, l3dss1_release_ind}, {SBIT(1) | SBIT(2) | SBIT(3) | SBIT(4) | SBIT(7) | SBIT(8) | SBIT(9) | SBIT(10) | SBIT(11) | SBIT(15) | SBIT(17) | SBIT(25), MT_DISCONNECT, l3dss1_disconnect}, {SBIT(19), MT_DISCONNECT, l3dss1_dummy}, {SBIT(1) | SBIT(2) | SBIT(3) | SBIT(4), MT_CONNECT, l3dss1_connect}, {SBIT(8), MT_CONNECT_ACKNOWLEDGE, l3dss1_connect_ack}, {SBIT(15), MT_SUSPEND_ACKNOWLEDGE, l3dss1_suspend_ack}, {SBIT(15), MT_SUSPEND_REJECT, l3dss1_suspend_rej}, {SBIT(17), MT_RESUME_ACKNOWLEDGE, l3dss1_resume_ack}, {SBIT(17), MT_RESUME_REJECT, l3dss1_resume_rej}, }; static struct stateentry globalmes_list[] = { {ALL_STATES, MT_STATUS, l3dss1_status}, {SBIT(0), MT_RESTART, l3dss1_global_restart}, /* {SBIT(1), MT_RESTART_ACKNOWLEDGE, l3dss1_restart_ack}, */ }; static struct stateentry manstatelist[] = { {SBIT(2), DL_ESTABLISH | INDICATION, l3dss1_dl_reset}, {SBIT(10), DL_ESTABLISH | CONFIRM, l3dss1_dl_reest_status}, {SBIT(10), DL_RELEASE | INDICATION, l3dss1_dl_reestablish}, {ALL_STATES, DL_RELEASE | INDICATION, l3dss1_dl_release}, }; /* *INDENT-ON* */ static void global_handler(struct PStack *st, int mt, struct sk_buff *skb) { u_char tmp[16]; u_char *p = tmp; int l; int i; struct l3_process *proc = st->l3.global; proc->callref = skb->data[2]; /* cr flag */ for (i = 0; i < ARRAY_SIZE(globalmes_list); i++) if ((mt == globalmes_list[i].primitive) && ((1 << proc->state) & globalmes_list[i].state)) break; if (i == ARRAY_SIZE(globalmes_list)) { if (st->l3.debug & L3_DEB_STATE) { l3_debug(st, "dss1 global state %d mt %x unhandled", proc->state, mt); } MsgHead(p, proc->callref, MT_STATUS); *p++ = IE_CAUSE; *p++ = 0x2; *p++ = 0x80; *p++ = 81 | 0x80; /* invalid cr */ *p++ = 0x14; /* CallState */ *p++ = 0x1; *p++ = proc->state & 0x3f; l = p - tmp; if (!(skb = l3_alloc_skb(l))) return; memcpy(skb_put(skb, l), tmp, l); l3_msg(proc->st, DL_DATA | REQUEST, skb); } else { if (st->l3.debug & L3_DEB_STATE) { l3_debug(st, "dss1 global %d mt %x", proc->state, mt); } globalmes_list[i].rout(proc, mt, skb); } } static void dss1up(struct PStack *st, int pr, void *arg) { int i, mt, cr, callState; char *ptr; u_char *p; struct sk_buff *skb = arg; struct l3_process *proc; switch (pr) { case (DL_DATA | INDICATION): case (DL_UNIT_DATA | INDICATION): break; case (DL_ESTABLISH | CONFIRM): case (DL_ESTABLISH | INDICATION): case (DL_RELEASE | INDICATION): case (DL_RELEASE | CONFIRM): l3_msg(st, pr, arg); return; break; default: printk(KERN_ERR "HiSax dss1up unknown pr=%04x\n", pr); return; } if (skb->len < 3) { l3_debug(st, "dss1up frame too short(%d)", skb->len); dev_kfree_skb(skb); return; } if (skb->data[0] != PROTO_DIS_EURO) { if (st->l3.debug & L3_DEB_PROTERR) { l3_debug(st, "dss1up%sunexpected discriminator %x message len %d", (pr == (DL_DATA | INDICATION)) ? " " : "(broadcast) ", skb->data[0], skb->len); } dev_kfree_skb(skb); return; } cr = getcallref(skb->data); if (skb->len < ((skb->data[1] & 0x0f) + 3)) { l3_debug(st, "dss1up frame too short(%d)", skb->len); dev_kfree_skb(skb); return; } mt = skb->data[skb->data[1] + 2]; if (st->l3.debug & L3_DEB_STATE) l3_debug(st, "dss1up cr %d", cr); if (cr == -2) { /* wrong Callref */ if (st->l3.debug & L3_DEB_WARN) l3_debug(st, "dss1up wrong Callref"); dev_kfree_skb(skb); return; } else if (cr == -1) { /* Dummy Callref */ if (mt == MT_FACILITY) if ((p = findie(skb->data, skb->len, IE_FACILITY, 0))) { l3dss1_parse_facility(st, NULL, (pr == (DL_DATA | INDICATION)) ? -1 : -2, p); dev_kfree_skb(skb); return; } if (st->l3.debug & L3_DEB_WARN) l3_debug(st, "dss1up dummy Callref (no facility msg or ie)"); dev_kfree_skb(skb); return; } else if ((((skb->data[1] & 0x0f) == 1) && (0 == (cr & 0x7f))) || (((skb->data[1] & 0x0f) == 2) && (0 == (cr & 0x7fff)))) { /* Global CallRef */ if (st->l3.debug & L3_DEB_STATE) l3_debug(st, "dss1up Global CallRef"); global_handler(st, mt, skb); dev_kfree_skb(skb); return; } else if (!(proc = getl3proc(st, cr))) { /* No transaction process exist, that means no call with * this callreference is active */ if (mt == MT_SETUP) { /* Setup creates a new transaction process */ if (skb->data[2] & 0x80) { /* Setup with wrong CREF flag */ if (st->l3.debug & L3_DEB_STATE) l3_debug(st, "dss1up wrong CRef flag"); dev_kfree_skb(skb); return; } if (!(proc = dss1_new_l3_process(st, cr))) { /* May be to answer with RELEASE_COMPLETE and * CAUSE 0x2f "Resource unavailable", but this * need a new_l3_process too ... arghh */ dev_kfree_skb(skb); return; } } else if (mt == MT_STATUS) { if ((ptr = findie(skb->data, skb->len, IE_CAUSE, 0)) != NULL) { ptr++; if (*ptr++ == 2) ptr++; } callState = 0; if ((ptr = findie(skb->data, skb->len, IE_CALL_STATE, 0)) != NULL) { ptr++; if (*ptr++ == 2) ptr++; callState = *ptr; } /* ETS 300-104 part 2.4.1 * if setup has not been made and a message type * MT_STATUS is received with call state == 0, * we must send nothing */ if (callState != 0) { /* ETS 300-104 part 2.4.2 * if setup has not been made and a message type * MT_STATUS is received with call state != 0, * we must send MT_RELEASE_COMPLETE cause 101 */ if ((proc = dss1_new_l3_process(st, cr))) { proc->para.cause = 101; l3dss1_msg_without_setup(proc, 0, NULL); } } dev_kfree_skb(skb); return; } else if (mt == MT_RELEASE_COMPLETE) { dev_kfree_skb(skb); return; } else { /* ETS 300-104 part 2 * if setup has not been made and a message type * (except MT_SETUP and RELEASE_COMPLETE) is received, * we must send MT_RELEASE_COMPLETE cause 81 */ dev_kfree_skb(skb); if ((proc = dss1_new_l3_process(st, cr))) { proc->para.cause = 81; l3dss1_msg_without_setup(proc, 0, NULL); } return; } } if (l3dss1_check_messagetype_validity(proc, mt, skb)) { dev_kfree_skb(skb); return; } if ((p = findie(skb->data, skb->len, IE_DISPLAY, 0)) != NULL) l3dss1_deliver_display(proc, pr, p); /* Display IE included */ for (i = 0; i < ARRAY_SIZE(datastatelist); i++) if ((mt == datastatelist[i].primitive) && ((1 << proc->state) & datastatelist[i].state)) break; if (i == ARRAY_SIZE(datastatelist)) { if (st->l3.debug & L3_DEB_STATE) { l3_debug(st, "dss1up%sstate %d mt %#x unhandled", (pr == (DL_DATA | INDICATION)) ? " " : "(broadcast) ", proc->state, mt); } if ((MT_RELEASE_COMPLETE != mt) && (MT_RELEASE != mt)) { proc->para.cause = 101; l3dss1_status_send(proc, pr, skb); } } else { if (st->l3.debug & L3_DEB_STATE) { l3_debug(st, "dss1up%sstate %d mt %x", (pr == (DL_DATA | INDICATION)) ? " " : "(broadcast) ", proc->state, mt); } datastatelist[i].rout(proc, pr, skb); } dev_kfree_skb(skb); return; } static void dss1down(struct PStack *st, int pr, void *arg) { int i, cr; struct l3_process *proc; struct Channel *chan; if ((DL_ESTABLISH | REQUEST) == pr) { l3_msg(st, pr, NULL); return; } else if (((CC_SETUP | REQUEST) == pr) || ((CC_RESUME | REQUEST) == pr)) { chan = arg; cr = newcallref(); cr |= 0x80; if ((proc = dss1_new_l3_process(st, cr))) { proc->chan = chan; chan->proc = proc; memcpy(&proc->para.setup, &chan->setup, sizeof(setup_parm)); proc->callref = cr; } } else { proc = arg; } if (!proc) { printk(KERN_ERR "HiSax dss1down without proc pr=%04x\n", pr); return; } if (pr == (CC_TDSS1_IO | REQUEST)) { l3dss1_io_timer(proc); /* timer expires */ return; } for (i = 0; i < ARRAY_SIZE(downstatelist); i++) if ((pr == downstatelist[i].primitive) && ((1 << proc->state) & downstatelist[i].state)) break; if (i == ARRAY_SIZE(downstatelist)) { if (st->l3.debug & L3_DEB_STATE) { l3_debug(st, "dss1down state %d prim %#x unhandled", proc->state, pr); } } else { if (st->l3.debug & L3_DEB_STATE) { l3_debug(st, "dss1down state %d prim %#x", proc->state, pr); } downstatelist[i].rout(proc, pr, arg); } } static void dss1man(struct PStack *st, int pr, void *arg) { int i; struct l3_process *proc = arg; if (!proc) { printk(KERN_ERR "HiSax dss1man without proc pr=%04x\n", pr); return; } for (i = 0; i < ARRAY_SIZE(manstatelist); i++) if ((pr == manstatelist[i].primitive) && ((1 << proc->state) & manstatelist[i].state)) break; if (i == ARRAY_SIZE(manstatelist)) { if (st->l3.debug & L3_DEB_STATE) { l3_debug(st, "cr %d dss1man state %d prim %#x unhandled", proc->callref & 0x7f, proc->state, pr); } } else { if (st->l3.debug & L3_DEB_STATE) { l3_debug(st, "cr %d dss1man state %d prim %#x", proc->callref & 0x7f, proc->state, pr); } manstatelist[i].rout(proc, pr, arg); } } void setstack_dss1(struct PStack *st) { char tmp[64]; int i; st->lli.l4l3 = dss1down; st->lli.l4l3_proto = l3dss1_cmd_global; st->l2.l2l3 = dss1up; st->l3.l3ml3 = dss1man; st->l3.N303 = 1; st->prot.dss1.last_invoke_id = 0; st->prot.dss1.invoke_used[0] = 1; /* Bit 0 must always be set to 1 */ i = 1; while (i < 32) st->prot.dss1.invoke_used[i++] = 0; if (!(st->l3.global = kmalloc(sizeof(struct l3_process), GFP_ATOMIC))) { printk(KERN_ERR "HiSax can't get memory for dss1 global CR\n"); } else { st->l3.global->state = 0; st->l3.global->callref = 0; st->l3.global->next = NULL; st->l3.global->debug = L3_DEB_WARN; st->l3.global->st = st; st->l3.global->N303 = 1; st->l3.global->prot.dss1.invoke_id = 0; L3InitTimer(st->l3.global, &st->l3.global->timer); } strcpy(tmp, dss1_revision); printk(KERN_INFO "HiSax: DSS1 Rev. %s\n", HiSax_getrev(tmp)); }
gpl-2.0
jfdsmabalot/kernel_sony_msm8974
arch/arm/mach-bcmring/csp/chipc/chipcHw_init.c
9553
9482
/***************************************************************************** * Copyright 2003 - 2008 Broadcom Corporation. All rights reserved. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you * under the terms of the GNU General Public License version 2, available at * http://www.broadcom.com/licenses/GPLv2.php (the "GPL"). * * Notwithstanding the above, under no circumstances may you combine this * software in any way with any other Broadcom software provided under a * license other than the GPL, without Broadcom's express prior written * consent. *****************************************************************************/ /****************************************************************************/ /** * @file chipcHw_init.c * * @brief Low level CHIPC PLL configuration functions * * @note * * These routines provide basic PLL controlling functionality only. */ /****************************************************************************/ /* ---- Include Files ---------------------------------------------------- */ #include <csp/errno.h> #include <csp/stdint.h> #include <csp/module.h> #include <mach/csp/chipcHw_def.h> #include <mach/csp/chipcHw_inline.h> #include <csp/reg.h> #include <csp/delay.h> /* ---- Private Constants and Types --------------------------------------- */ /* Calculation for NDIV_i to obtain VCO frequency ----------------------------------------------- Freq_vco = Freq_ref * (P2 / P1) * (PLL_NDIV_i + PLL_NDIV_f) for Freq_vco = VCO_FREQ_MHz Freq_ref = chipcHw_XTAL_FREQ_Hz PLL_P1 = PLL_P2 = 1 and PLL_NDIV_f = 0 We get: PLL_NDIV_i = Freq_vco / Freq_ref = VCO_FREQ_MHz / chipcHw_XTAL_FREQ_Hz Calculation for PLL MDIV to obtain frequency Freq_x for channel x ----------------------------------------------------------------- Freq_x = chipcHw_XTAL_FREQ_Hz * PLL_NDIV_i / PLL_MDIV_x = VCO_FREQ_MHz / PLL_MDIV_x PLL_MDIV_x = VCO_FREQ_MHz / Freq_x */ /* ---- Private Variables ------------------------------------------------- */ /****************************************************************************/ /** * @brief Initializes the PLL2 * * This function initializes the PLL2 * */ /****************************************************************************/ void chipcHw_pll2Enable(uint32_t vcoFreqHz) { uint32_t pllPreDivider2 = 0; { REG_LOCAL_IRQ_SAVE; pChipcHw->PLLConfig2 = chipcHw_REG_PLL_CONFIG_D_RESET | chipcHw_REG_PLL_CONFIG_A_RESET; pllPreDivider2 = chipcHw_REG_PLL_PREDIVIDER_POWER_DOWN | chipcHw_REG_PLL_PREDIVIDER_NDIV_MODE_INTEGER | (chipcHw_REG_PLL_PREDIVIDER_NDIV_i(vcoFreqHz) << chipcHw_REG_PLL_PREDIVIDER_NDIV_SHIFT) | (chipcHw_REG_PLL_PREDIVIDER_P1 << chipcHw_REG_PLL_PREDIVIDER_P1_SHIFT) | (chipcHw_REG_PLL_PREDIVIDER_P2 << chipcHw_REG_PLL_PREDIVIDER_P2_SHIFT); /* Enable CHIPC registers to control the PLL */ pChipcHw->PLLStatus |= chipcHw_REG_PLL_STATUS_CONTROL_ENABLE; /* Set pre divider to get desired VCO frequency */ pChipcHw->PLLPreDivider2 = pllPreDivider2; /* Set NDIV Frac */ pChipcHw->PLLDivider2 = chipcHw_REG_PLL_DIVIDER_NDIV_f; /* This has to be removed once the default values are fixed for PLL2. */ pChipcHw->PLLControl12 = 0x38000700; pChipcHw->PLLControl22 = 0x00000015; /* Reset PLL2 */ if (vcoFreqHz > chipcHw_REG_PLL_CONFIG_VCO_SPLIT_FREQ) { pChipcHw->PLLConfig2 = chipcHw_REG_PLL_CONFIG_D_RESET | chipcHw_REG_PLL_CONFIG_A_RESET | chipcHw_REG_PLL_CONFIG_VCO_1601_3200 | chipcHw_REG_PLL_CONFIG_POWER_DOWN; } else { pChipcHw->PLLConfig2 = chipcHw_REG_PLL_CONFIG_D_RESET | chipcHw_REG_PLL_CONFIG_A_RESET | chipcHw_REG_PLL_CONFIG_VCO_800_1600 | chipcHw_REG_PLL_CONFIG_POWER_DOWN; } REG_LOCAL_IRQ_RESTORE; } /* Insert certain amount of delay before deasserting ARESET. */ udelay(1); { REG_LOCAL_IRQ_SAVE; /* Remove analog reset and Power on the PLL */ pChipcHw->PLLConfig2 &= ~(chipcHw_REG_PLL_CONFIG_A_RESET | chipcHw_REG_PLL_CONFIG_POWER_DOWN); REG_LOCAL_IRQ_RESTORE; } /* Wait until PLL is locked */ while (!(pChipcHw->PLLStatus2 & chipcHw_REG_PLL_STATUS_LOCKED)) ; { REG_LOCAL_IRQ_SAVE; /* Remove digital reset */ pChipcHw->PLLConfig2 &= ~chipcHw_REG_PLL_CONFIG_D_RESET; REG_LOCAL_IRQ_RESTORE; } } EXPORT_SYMBOL(chipcHw_pll2Enable); /****************************************************************************/ /** * @brief Initializes the PLL1 * * This function initializes the PLL1 * */ /****************************************************************************/ void chipcHw_pll1Enable(uint32_t vcoFreqHz, chipcHw_SPREAD_SPECTRUM_e ssSupport) { uint32_t pllPreDivider = 0; { REG_LOCAL_IRQ_SAVE; pChipcHw->PLLConfig = chipcHw_REG_PLL_CONFIG_D_RESET | chipcHw_REG_PLL_CONFIG_A_RESET; /* Setting VCO frequency */ if (ssSupport == chipcHw_SPREAD_SPECTRUM_ALLOW) { pllPreDivider = chipcHw_REG_PLL_PREDIVIDER_NDIV_MODE_MASH_1_8 | ((chipcHw_REG_PLL_PREDIVIDER_NDIV_i(vcoFreqHz) - 1) << chipcHw_REG_PLL_PREDIVIDER_NDIV_SHIFT) | (chipcHw_REG_PLL_PREDIVIDER_P1 << chipcHw_REG_PLL_PREDIVIDER_P1_SHIFT) | (chipcHw_REG_PLL_PREDIVIDER_P2 << chipcHw_REG_PLL_PREDIVIDER_P2_SHIFT); } else { pllPreDivider = chipcHw_REG_PLL_PREDIVIDER_POWER_DOWN | chipcHw_REG_PLL_PREDIVIDER_NDIV_MODE_INTEGER | (chipcHw_REG_PLL_PREDIVIDER_NDIV_i(vcoFreqHz) << chipcHw_REG_PLL_PREDIVIDER_NDIV_SHIFT) | (chipcHw_REG_PLL_PREDIVIDER_P1 << chipcHw_REG_PLL_PREDIVIDER_P1_SHIFT) | (chipcHw_REG_PLL_PREDIVIDER_P2 << chipcHw_REG_PLL_PREDIVIDER_P2_SHIFT); } /* Enable CHIPC registers to control the PLL */ pChipcHw->PLLStatus |= chipcHw_REG_PLL_STATUS_CONTROL_ENABLE; /* Set pre divider to get desired VCO frequency */ pChipcHw->PLLPreDivider = pllPreDivider; /* Set NDIV Frac */ if (ssSupport == chipcHw_SPREAD_SPECTRUM_ALLOW) { pChipcHw->PLLDivider = chipcHw_REG_PLL_DIVIDER_M1DIV | chipcHw_REG_PLL_DIVIDER_NDIV_f_SS; } else { pChipcHw->PLLDivider = chipcHw_REG_PLL_DIVIDER_M1DIV | chipcHw_REG_PLL_DIVIDER_NDIV_f; } /* Reset PLL1 */ if (vcoFreqHz > chipcHw_REG_PLL_CONFIG_VCO_SPLIT_FREQ) { pChipcHw->PLLConfig = chipcHw_REG_PLL_CONFIG_D_RESET | chipcHw_REG_PLL_CONFIG_A_RESET | chipcHw_REG_PLL_CONFIG_VCO_1601_3200 | chipcHw_REG_PLL_CONFIG_POWER_DOWN; } else { pChipcHw->PLLConfig = chipcHw_REG_PLL_CONFIG_D_RESET | chipcHw_REG_PLL_CONFIG_A_RESET | chipcHw_REG_PLL_CONFIG_VCO_800_1600 | chipcHw_REG_PLL_CONFIG_POWER_DOWN; } REG_LOCAL_IRQ_RESTORE; /* Insert certain amount of delay before deasserting ARESET. */ udelay(1); { REG_LOCAL_IRQ_SAVE; /* Remove analog reset and Power on the PLL */ pChipcHw->PLLConfig &= ~(chipcHw_REG_PLL_CONFIG_A_RESET | chipcHw_REG_PLL_CONFIG_POWER_DOWN); REG_LOCAL_IRQ_RESTORE; } /* Wait until PLL is locked */ while (!(pChipcHw->PLLStatus & chipcHw_REG_PLL_STATUS_LOCKED) || !(pChipcHw-> PLLStatus2 & chipcHw_REG_PLL_STATUS_LOCKED)) ; /* Remove digital reset */ { REG_LOCAL_IRQ_SAVE; pChipcHw->PLLConfig &= ~chipcHw_REG_PLL_CONFIG_D_RESET; REG_LOCAL_IRQ_RESTORE; } } } EXPORT_SYMBOL(chipcHw_pll1Enable); /****************************************************************************/ /** * @brief Initializes the chipc module * * This function initializes the PLLs and core system clocks * */ /****************************************************************************/ void chipcHw_Init(chipcHw_INIT_PARAM_t *initParam /* [ IN ] Misc chip initialization parameter */ ) { #if !(defined(__KERNEL__) && !defined(STANDALONE)) delay_init(); #endif /* Do not program PLL, when warm reset */ if (!(chipcHw_getStickyBits() & chipcHw_REG_STICKY_CHIP_WARM_RESET)) { chipcHw_pll1Enable(initParam->pllVcoFreqHz, initParam->ssSupport); chipcHw_pll2Enable(initParam->pll2VcoFreqHz); } else { /* Clear sticky bits */ chipcHw_clearStickyBits(chipcHw_REG_STICKY_CHIP_WARM_RESET); } /* Clear sticky bits */ chipcHw_clearStickyBits(chipcHw_REG_STICKY_CHIP_SOFT_RESET); /* Before configuring the ARM clock, atleast we need to make sure BUS clock maintains the proper ratio with ARM clock */ pChipcHw->ACLKClock = (pChipcHw-> ACLKClock & ~chipcHw_REG_ACLKClock_CLK_DIV_MASK) | (initParam-> armBusRatio & chipcHw_REG_ACLKClock_CLK_DIV_MASK); /* Set various core component frequencies. The order in which this is done is important for some. */ /* The RTBUS (DDR PHY) is derived from the BUS, and the BUS from the ARM, and VPM needs to know BUS */ /* frequency to find its ratio with the BUS. Hence we must set the ARM first, followed by the BUS, */ /* then VPM and RTBUS. */ chipcHw_setClockFrequency(chipcHw_CLOCK_ARM, initParam->busClockFreqHz * initParam->armBusRatio); chipcHw_setClockFrequency(chipcHw_CLOCK_BUS, initParam->busClockFreqHz); chipcHw_setClockFrequency(chipcHw_CLOCK_VPM, initParam->busClockFreqHz * initParam->vpmBusRatio); chipcHw_setClockFrequency(chipcHw_CLOCK_DDR, initParam->busClockFreqHz * initParam->ddrBusRatio); chipcHw_setClockFrequency(chipcHw_CLOCK_RTBUS, initParam->busClockFreqHz / 2); }
gpl-2.0
Apollo5520/s9-s5pv210-kernel
arch/x86/crypto/salsa20_glue.c
9809
3789
/* * Glue code for optimized assembly version of Salsa20. * * Copyright (c) 2007 Tan Swee Heng <thesweeheng@gmail.com> * * The assembly codes are public domain assembly codes written by Daniel. J. * Bernstein <djb@cr.yp.to>. The codes are modified to include indentation * and to remove extraneous comments and functions that are not needed. * - i586 version, renamed as salsa20-i586-asm_32.S * available from <http://cr.yp.to/snuffle/salsa20/x86-pm/salsa20.s> * - x86-64 version, renamed as salsa20-x86_64-asm_64.S * available from <http://cr.yp.to/snuffle/salsa20/amd64-3/salsa20.s> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * */ #include <crypto/algapi.h> #include <linux/module.h> #include <linux/crypto.h> #define SALSA20_IV_SIZE 8U #define SALSA20_MIN_KEY_SIZE 16U #define SALSA20_MAX_KEY_SIZE 32U // use the ECRYPT_* function names #define salsa20_keysetup ECRYPT_keysetup #define salsa20_ivsetup ECRYPT_ivsetup #define salsa20_encrypt_bytes ECRYPT_encrypt_bytes struct salsa20_ctx { u32 input[16]; }; asmlinkage void salsa20_keysetup(struct salsa20_ctx *ctx, const u8 *k, u32 keysize, u32 ivsize); asmlinkage void salsa20_ivsetup(struct salsa20_ctx *ctx, const u8 *iv); asmlinkage void salsa20_encrypt_bytes(struct salsa20_ctx *ctx, const u8 *src, u8 *dst, u32 bytes); static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keysize) { struct salsa20_ctx *ctx = crypto_tfm_ctx(tfm); salsa20_keysetup(ctx, key, keysize*8, SALSA20_IV_SIZE*8); return 0; } static int encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct blkcipher_walk walk; struct crypto_blkcipher *tfm = desc->tfm; struct salsa20_ctx *ctx = crypto_blkcipher_ctx(tfm); int err; blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt_block(desc, &walk, 64); salsa20_ivsetup(ctx, walk.iv); if (likely(walk.nbytes == nbytes)) { salsa20_encrypt_bytes(ctx, walk.src.virt.addr, walk.dst.virt.addr, nbytes); return blkcipher_walk_done(desc, &walk, 0); } while (walk.nbytes >= 64) { salsa20_encrypt_bytes(ctx, walk.src.virt.addr, walk.dst.virt.addr, walk.nbytes - (walk.nbytes % 64)); err = blkcipher_walk_done(desc, &walk, walk.nbytes % 64); } if (walk.nbytes) { salsa20_encrypt_bytes(ctx, walk.src.virt.addr, walk.dst.virt.addr, walk.nbytes); err = blkcipher_walk_done(desc, &walk, 0); } return err; } static struct crypto_alg alg = { .cra_name = "salsa20", .cra_driver_name = "salsa20-asm", .cra_priority = 200, .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, .cra_type = &crypto_blkcipher_type, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct salsa20_ctx), .cra_alignmask = 3, .cra_module = THIS_MODULE, .cra_list = LIST_HEAD_INIT(alg.cra_list), .cra_u = { .blkcipher = { .setkey = setkey, .encrypt = encrypt, .decrypt = encrypt, .min_keysize = SALSA20_MIN_KEY_SIZE, .max_keysize = SALSA20_MAX_KEY_SIZE, .ivsize = SALSA20_IV_SIZE, } } }; static int __init init(void) { return crypto_register_alg(&alg); } static void __exit fini(void) { crypto_unregister_alg(&alg); } module_init(init); module_exit(fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION ("Salsa20 stream cipher algorithm (optimized assembly version)"); MODULE_ALIAS("salsa20"); MODULE_ALIAS("salsa20-asm");
gpl-2.0
gmarkall/lenovo_a1_07_kernel
drivers/firmware/dell_rbu.c
13905
19596
/* * dell_rbu.c * Bios Update driver for Dell systems * Author: Dell Inc * Abhay Salunke <abhay_salunke@dell.com> * * Copyright (C) 2005 Dell Inc. * * Remote BIOS Update (rbu) driver is used for updating DELL BIOS by * creating entries in the /sys file systems on Linux 2.6 and higher * kernels. The driver supports two mechanism to update the BIOS namely * contiguous and packetized. Both these methods still require having some * application to set the CMOS bit indicating the BIOS to update itself * after a reboot. * * Contiguous method: * This driver writes the incoming data in a monolithic image by allocating * contiguous physical pages large enough to accommodate the incoming BIOS * image size. * * Packetized method: * The driver writes the incoming packet image by allocating a new packet * on every time the packet data is written. This driver requires an * application to break the BIOS image in to fixed sized packet chunks. * * See Documentation/dell_rbu.txt for more info. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License v2.0 as published by * the Free Software Foundation * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/init.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/blkdev.h> #include <linux/platform_device.h> #include <linux/spinlock.h> #include <linux/moduleparam.h> #include <linux/firmware.h> #include <linux/dma-mapping.h> MODULE_AUTHOR("Abhay Salunke <abhay_salunke@dell.com>"); MODULE_DESCRIPTION("Driver for updating BIOS image on DELL systems"); MODULE_LICENSE("GPL"); MODULE_VERSION("3.2"); #define BIOS_SCAN_LIMIT 0xffffffff #define MAX_IMAGE_LENGTH 16 static struct _rbu_data { void *image_update_buffer; unsigned long image_update_buffer_size; unsigned long bios_image_size; int image_update_ordernum; int dma_alloc; spinlock_t lock; unsigned long packet_read_count; unsigned long num_packets; unsigned long packetsize; unsigned long imagesize; int entry_created; } rbu_data; static char image_type[MAX_IMAGE_LENGTH + 1] = "mono"; module_param_string(image_type, image_type, sizeof (image_type), 0); MODULE_PARM_DESC(image_type, "BIOS image type. choose- mono or packet or init"); static unsigned long allocation_floor = 0x100000; module_param(allocation_floor, ulong, 0644); MODULE_PARM_DESC(allocation_floor, "Minimum address for allocations when using Packet mode"); struct packet_data { struct list_head list; size_t length; void *data; int ordernum; }; static struct packet_data packet_data_head; static struct platform_device *rbu_device; static int context; static dma_addr_t dell_rbu_dmaaddr; static void init_packet_head(void) { INIT_LIST_HEAD(&packet_data_head.list); rbu_data.packet_read_count = 0; rbu_data.num_packets = 0; rbu_data.packetsize = 0; rbu_data.imagesize = 0; } static int create_packet(void *data, size_t length) { struct packet_data *newpacket; int ordernum = 0; int retval = 0; unsigned int packet_array_size = 0; void **invalid_addr_packet_array = NULL; void *packet_data_temp_buf = NULL; unsigned int idx = 0; pr_debug("create_packet: entry \n"); if (!rbu_data.packetsize) { pr_debug("create_packet: packetsize not specified\n"); retval = -EINVAL; goto out_noalloc; } spin_unlock(&rbu_data.lock); newpacket = kzalloc(sizeof (struct packet_data), GFP_KERNEL); if (!newpacket) { printk(KERN_WARNING "dell_rbu:%s: failed to allocate new " "packet\n", __func__); retval = -ENOMEM; spin_lock(&rbu_data.lock); goto out_noalloc; } ordernum = get_order(length); /* * BIOS errata mean we cannot allocate packets below 1MB or they will * be overwritten by BIOS. * * array to temporarily hold packets * that are below the allocation floor * * NOTE: very simplistic because we only need the floor to be at 1MB * due to BIOS errata. This shouldn't be used for higher floors * or you will run out of mem trying to allocate the array. */ packet_array_size = max( (unsigned int)(allocation_floor / rbu_data.packetsize), (unsigned int)1); invalid_addr_packet_array = kzalloc(packet_array_size * sizeof(void*), GFP_KERNEL); if (!invalid_addr_packet_array) { printk(KERN_WARNING "dell_rbu:%s: failed to allocate " "invalid_addr_packet_array \n", __func__); retval = -ENOMEM; spin_lock(&rbu_data.lock); goto out_alloc_packet; } while (!packet_data_temp_buf) { packet_data_temp_buf = (unsigned char *) __get_free_pages(GFP_KERNEL, ordernum); if (!packet_data_temp_buf) { printk(KERN_WARNING "dell_rbu:%s: failed to allocate new " "packet\n", __func__); retval = -ENOMEM; spin_lock(&rbu_data.lock); goto out_alloc_packet_array; } if ((unsigned long)virt_to_phys(packet_data_temp_buf) < allocation_floor) { pr_debug("packet 0x%lx below floor at 0x%lx.\n", (unsigned long)virt_to_phys( packet_data_temp_buf), allocation_floor); invalid_addr_packet_array[idx++] = packet_data_temp_buf; packet_data_temp_buf = NULL; } } spin_lock(&rbu_data.lock); newpacket->data = packet_data_temp_buf; pr_debug("create_packet: newpacket at physical addr %lx\n", (unsigned long)virt_to_phys(newpacket->data)); /* packets may not have fixed size */ newpacket->length = length; newpacket->ordernum = ordernum; ++rbu_data.num_packets; /* initialize the newly created packet headers */ INIT_LIST_HEAD(&newpacket->list); list_add_tail(&newpacket->list, &packet_data_head.list); memcpy(newpacket->data, data, length); pr_debug("create_packet: exit \n"); out_alloc_packet_array: /* always free packet array */ for (;idx>0;idx--) { pr_debug("freeing unused packet below floor 0x%lx.\n", (unsigned long)virt_to_phys( invalid_addr_packet_array[idx-1])); free_pages((unsigned long)invalid_addr_packet_array[idx-1], ordernum); } kfree(invalid_addr_packet_array); out_alloc_packet: /* if error, free data */ if (retval) kfree(newpacket); out_noalloc: return retval; } static int packetize_data(const u8 *data, size_t length) { int rc = 0; int done = 0; int packet_length; u8 *temp; u8 *end = (u8 *) data + length; pr_debug("packetize_data: data length %zd\n", length); if (!rbu_data.packetsize) { printk(KERN_WARNING "dell_rbu: packetsize not specified\n"); return -EIO; } temp = (u8 *) data; /* packetize the hunk */ while (!done) { if ((temp + rbu_data.packetsize) < end) packet_length = rbu_data.packetsize; else { /* this is the last packet */ packet_length = end - temp; done = 1; } if ((rc = create_packet(temp, packet_length))) return rc; pr_debug("%p:%td\n", temp, (end - temp)); temp += packet_length; } rbu_data.imagesize = length; return rc; } static int do_packet_read(char *data, struct list_head *ptemp_list, int length, int bytes_read, int *list_read_count) { void *ptemp_buf; struct packet_data *newpacket = NULL; int bytes_copied = 0; int j = 0; newpacket = list_entry(ptemp_list, struct packet_data, list); *list_read_count += newpacket->length; if (*list_read_count > bytes_read) { /* point to the start of unread data */ j = newpacket->length - (*list_read_count - bytes_read); /* point to the offset in the packet buffer */ ptemp_buf = (u8 *) newpacket->data + j; /* * check if there is enough room in * * the incoming buffer */ if (length > (*list_read_count - bytes_read)) /* * copy what ever is there in this * packet and move on */ bytes_copied = (*list_read_count - bytes_read); else /* copy the remaining */ bytes_copied = length; memcpy(data, ptemp_buf, bytes_copied); } return bytes_copied; } static int packet_read_list(char *data, size_t * pread_length) { struct list_head *ptemp_list; int temp_count = 0; int bytes_copied = 0; int bytes_read = 0; int remaining_bytes = 0; char *pdest = data; /* check if we have any packets */ if (0 == rbu_data.num_packets) return -ENOMEM; remaining_bytes = *pread_length; bytes_read = rbu_data.packet_read_count; ptemp_list = (&packet_data_head.list)->next; while (!list_empty(ptemp_list)) { bytes_copied = do_packet_read(pdest, ptemp_list, remaining_bytes, bytes_read, &temp_count); remaining_bytes -= bytes_copied; bytes_read += bytes_copied; pdest += bytes_copied; /* * check if we reached end of buffer before reaching the * last packet */ if (remaining_bytes == 0) break; ptemp_list = ptemp_list->next; } /*finally set the bytes read */ *pread_length = bytes_read - rbu_data.packet_read_count; rbu_data.packet_read_count = bytes_read; return 0; } static void packet_empty_list(void) { struct list_head *ptemp_list; struct list_head *pnext_list; struct packet_data *newpacket; ptemp_list = (&packet_data_head.list)->next; while (!list_empty(ptemp_list)) { newpacket = list_entry(ptemp_list, struct packet_data, list); pnext_list = ptemp_list->next; list_del(ptemp_list); ptemp_list = pnext_list; /* * zero out the RBU packet memory before freeing * to make sure there are no stale RBU packets left in memory */ memset(newpacket->data, 0, rbu_data.packetsize); free_pages((unsigned long) newpacket->data, newpacket->ordernum); kfree(newpacket); } rbu_data.packet_read_count = 0; rbu_data.num_packets = 0; rbu_data.imagesize = 0; } /* * img_update_free: Frees the buffer allocated for storing BIOS image * Always called with lock held and returned with lock held */ static void img_update_free(void) { if (!rbu_data.image_update_buffer) return; /* * zero out this buffer before freeing it to get rid of any stale * BIOS image copied in memory. */ memset(rbu_data.image_update_buffer, 0, rbu_data.image_update_buffer_size); if (rbu_data.dma_alloc == 1) dma_free_coherent(NULL, rbu_data.bios_image_size, rbu_data.image_update_buffer, dell_rbu_dmaaddr); else free_pages((unsigned long) rbu_data.image_update_buffer, rbu_data.image_update_ordernum); /* * Re-initialize the rbu_data variables after a free */ rbu_data.image_update_ordernum = -1; rbu_data.image_update_buffer = NULL; rbu_data.image_update_buffer_size = 0; rbu_data.bios_image_size = 0; rbu_data.dma_alloc = 0; } /* * img_update_realloc: This function allocates the contiguous pages to * accommodate the requested size of data. The memory address and size * values are stored globally and on every call to this function the new * size is checked to see if more data is required than the existing size. * If true the previous memory is freed and new allocation is done to * accommodate the new size. If the incoming size is less then than the * already allocated size, then that memory is reused. This function is * called with lock held and returns with lock held. */ static int img_update_realloc(unsigned long size) { unsigned char *image_update_buffer = NULL; unsigned long rc; unsigned long img_buf_phys_addr; int ordernum; int dma_alloc = 0; /* * check if the buffer of sufficient size has been * already allocated */ if (rbu_data.image_update_buffer_size >= size) { /* * check for corruption */ if ((size != 0) && (rbu_data.image_update_buffer == NULL)) { printk(KERN_ERR "dell_rbu:%s: corruption " "check failed\n", __func__); return -EINVAL; } /* * we have a valid pre-allocated buffer with * sufficient size */ return 0; } /* * free any previously allocated buffer */ img_update_free(); spin_unlock(&rbu_data.lock); ordernum = get_order(size); image_update_buffer = (unsigned char *) __get_free_pages(GFP_KERNEL, ordernum); img_buf_phys_addr = (unsigned long) virt_to_phys(image_update_buffer); if (img_buf_phys_addr > BIOS_SCAN_LIMIT) { free_pages((unsigned long) image_update_buffer, ordernum); ordernum = -1; image_update_buffer = dma_alloc_coherent(NULL, size, &dell_rbu_dmaaddr, GFP_KERNEL); dma_alloc = 1; } spin_lock(&rbu_data.lock); if (image_update_buffer != NULL) { rbu_data.image_update_buffer = image_update_buffer; rbu_data.image_update_buffer_size = size; rbu_data.bios_image_size = rbu_data.image_update_buffer_size; rbu_data.image_update_ordernum = ordernum; rbu_data.dma_alloc = dma_alloc; rc = 0; } else { pr_debug("Not enough memory for image update:" "size = %ld\n", size); rc = -ENOMEM; } return rc; } static ssize_t read_packet_data(char *buffer, loff_t pos, size_t count) { int retval; size_t bytes_left; size_t data_length; char *ptempBuf = buffer; /* check to see if we have something to return */ if (rbu_data.num_packets == 0) { pr_debug("read_packet_data: no packets written\n"); retval = -ENOMEM; goto read_rbu_data_exit; } if (pos > rbu_data.imagesize) { retval = 0; printk(KERN_WARNING "dell_rbu:read_packet_data: " "data underrun\n"); goto read_rbu_data_exit; } bytes_left = rbu_data.imagesize - pos; data_length = min(bytes_left, count); if ((retval = packet_read_list(ptempBuf, &data_length)) < 0) goto read_rbu_data_exit; if ((pos + count) > rbu_data.imagesize) { rbu_data.packet_read_count = 0; /* this was the last copy */ retval = bytes_left; } else retval = count; read_rbu_data_exit: return retval; } static ssize_t read_rbu_mono_data(char *buffer, loff_t pos, size_t count) { /* check to see if we have something to return */ if ((rbu_data.image_update_buffer == NULL) || (rbu_data.bios_image_size == 0)) { pr_debug("read_rbu_data_mono: image_update_buffer %p ," "bios_image_size %lu\n", rbu_data.image_update_buffer, rbu_data.bios_image_size); return -ENOMEM; } return memory_read_from_buffer(buffer, count, &pos, rbu_data.image_update_buffer, rbu_data.bios_image_size); } static ssize_t read_rbu_data(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buffer, loff_t pos, size_t count) { ssize_t ret_count = 0; spin_lock(&rbu_data.lock); if (!strcmp(image_type, "mono")) ret_count = read_rbu_mono_data(buffer, pos, count); else if (!strcmp(image_type, "packet")) ret_count = read_packet_data(buffer, pos, count); else pr_debug("read_rbu_data: invalid image type specified\n"); spin_unlock(&rbu_data.lock); return ret_count; } static void callbackfn_rbu(const struct firmware *fw, void *context) { rbu_data.entry_created = 0; if (!fw) return; if (!fw->size) goto out; spin_lock(&rbu_data.lock); if (!strcmp(image_type, "mono")) { if (!img_update_realloc(fw->size)) memcpy(rbu_data.image_update_buffer, fw->data, fw->size); } else if (!strcmp(image_type, "packet")) { /* * we need to free previous packets if a * new hunk of packets needs to be downloaded */ packet_empty_list(); if (packetize_data(fw->data, fw->size)) /* Incase something goes wrong when we are * in middle of packetizing the data, we * need to free up whatever packets might * have been created before we quit. */ packet_empty_list(); } else pr_debug("invalid image type specified.\n"); spin_unlock(&rbu_data.lock); out: release_firmware(fw); } static ssize_t read_rbu_image_type(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buffer, loff_t pos, size_t count) { int size = 0; if (!pos) size = scnprintf(buffer, count, "%s\n", image_type); return size; } static ssize_t write_rbu_image_type(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buffer, loff_t pos, size_t count) { int rc = count; int req_firm_rc = 0; int i; spin_lock(&rbu_data.lock); /* * Find the first newline or space */ for (i = 0; i < count; ++i) if (buffer[i] == '\n' || buffer[i] == ' ') { buffer[i] = '\0'; break; } if (i == count) buffer[count] = '\0'; if (strstr(buffer, "mono")) strcpy(image_type, "mono"); else if (strstr(buffer, "packet")) strcpy(image_type, "packet"); else if (strstr(buffer, "init")) { /* * If due to the user error the driver gets in a bad * state where even though it is loaded , the * /sys/class/firmware/dell_rbu entries are missing. * to cover this situation the user can recreate entries * by writing init to image_type. */ if (!rbu_data.entry_created) { spin_unlock(&rbu_data.lock); req_firm_rc = request_firmware_nowait(THIS_MODULE, FW_ACTION_NOHOTPLUG, "dell_rbu", &rbu_device->dev, GFP_KERNEL, &context, callbackfn_rbu); if (req_firm_rc) { printk(KERN_ERR "dell_rbu:%s request_firmware_nowait" " failed %d\n", __func__, rc); rc = -EIO; } else rbu_data.entry_created = 1; spin_lock(&rbu_data.lock); } } else { printk(KERN_WARNING "dell_rbu: image_type is invalid\n"); spin_unlock(&rbu_data.lock); return -EINVAL; } /* we must free all previous allocations */ packet_empty_list(); img_update_free(); spin_unlock(&rbu_data.lock); return rc; } static ssize_t read_rbu_packet_size(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buffer, loff_t pos, size_t count) { int size = 0; if (!pos) { spin_lock(&rbu_data.lock); size = scnprintf(buffer, count, "%lu\n", rbu_data.packetsize); spin_unlock(&rbu_data.lock); } return size; } static ssize_t write_rbu_packet_size(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buffer, loff_t pos, size_t count) { unsigned long temp; spin_lock(&rbu_data.lock); packet_empty_list(); sscanf(buffer, "%lu", &temp); if (temp < 0xffffffff) rbu_data.packetsize = temp; spin_unlock(&rbu_data.lock); return count; } static struct bin_attribute rbu_data_attr = { .attr = {.name = "data", .mode = 0444}, .read = read_rbu_data, }; static struct bin_attribute rbu_image_type_attr = { .attr = {.name = "image_type", .mode = 0644}, .read = read_rbu_image_type, .write = write_rbu_image_type, }; static struct bin_attribute rbu_packet_size_attr = { .attr = {.name = "packet_size", .mode = 0644}, .read = read_rbu_packet_size, .write = write_rbu_packet_size, }; static int __init dcdrbu_init(void) { int rc; spin_lock_init(&rbu_data.lock); init_packet_head(); rbu_device = platform_device_register_simple("dell_rbu", -1, NULL, 0); if (IS_ERR(rbu_device)) { printk(KERN_ERR "dell_rbu:%s:platform_device_register_simple " "failed\n", __func__); return PTR_ERR(rbu_device); } rc = sysfs_create_bin_file(&rbu_device->dev.kobj, &rbu_data_attr); if (rc) goto out_devreg; rc = sysfs_create_bin_file(&rbu_device->dev.kobj, &rbu_image_type_attr); if (rc) goto out_data; rc = sysfs_create_bin_file(&rbu_device->dev.kobj, &rbu_packet_size_attr); if (rc) goto out_imtype; rbu_data.entry_created = 0; return 0; out_imtype: sysfs_remove_bin_file(&rbu_device->dev.kobj, &rbu_image_type_attr); out_data: sysfs_remove_bin_file(&rbu_device->dev.kobj, &rbu_data_attr); out_devreg: platform_device_unregister(rbu_device); return rc; } static __exit void dcdrbu_exit(void) { spin_lock(&rbu_data.lock); packet_empty_list(); img_update_free(); spin_unlock(&rbu_data.lock); platform_device_unregister(rbu_device); } module_exit(dcdrbu_exit); module_init(dcdrbu_init); /* vim:noet:ts=8:sw=8 */
gpl-2.0
sonulohani/Xperia-L-Kernel
arch/x86/math-emu/fpu_trig.c
14417
39143
/*---------------------------------------------------------------------------+ | fpu_trig.c | | | | Implementation of the FPU "transcendental" functions. | | | | Copyright (C) 1992,1993,1994,1997,1999 | | W. Metzenthen, 22 Parker St, Ormond, Vic 3163, | | Australia. E-mail billm@melbpc.org.au | | | | | +---------------------------------------------------------------------------*/ #include "fpu_system.h" #include "exception.h" #include "fpu_emu.h" #include "status_w.h" #include "control_w.h" #include "reg_constant.h" static void rem_kernel(unsigned long long st0, unsigned long long *y, unsigned long long st1, unsigned long long q, int n); #define BETTER_THAN_486 #define FCOS 4 /* Used only by fptan, fsin, fcos, and fsincos. */ /* This routine produces very accurate results, similar to using a value of pi with more than 128 bits precision. */ /* Limited measurements show no results worse than 64 bit precision except for the results for arguments close to 2^63, where the precision of the result sometimes degrades to about 63.9 bits */ static int trig_arg(FPU_REG *st0_ptr, int even) { FPU_REG tmp; u_char tmptag; unsigned long long q; int old_cw = control_word, saved_status = partial_status; int tag, st0_tag = TAG_Valid; if (exponent(st0_ptr) >= 63) { partial_status |= SW_C2; /* Reduction incomplete. */ return -1; } control_word &= ~CW_RC; control_word |= RC_CHOP; setpositive(st0_ptr); tag = FPU_u_div(st0_ptr, &CONST_PI2, &tmp, PR_64_BITS | RC_CHOP | 0x3f, SIGN_POS); FPU_round_to_int(&tmp, tag); /* Fortunately, this can't overflow to 2^64 */ q = significand(&tmp); if (q) { rem_kernel(significand(st0_ptr), &significand(&tmp), significand(&CONST_PI2), q, exponent(st0_ptr) - exponent(&CONST_PI2)); setexponent16(&tmp, exponent(&CONST_PI2)); st0_tag = FPU_normalize(&tmp); FPU_copy_to_reg0(&tmp, st0_tag); } if ((even && !(q & 1)) || (!even && (q & 1))) { st0_tag = FPU_sub(REV | LOADED | TAG_Valid, (int)&CONST_PI2, FULL_PRECISION); #ifdef BETTER_THAN_486 /* So far, the results are exact but based upon a 64 bit precision approximation to pi/2. The technique used now is equivalent to using an approximation to pi/2 which is accurate to about 128 bits. */ if ((exponent(st0_ptr) <= exponent(&CONST_PI2extra) + 64) || (q > 1)) { /* This code gives the effect of having pi/2 to better than 128 bits precision. */ significand(&tmp) = q + 1; setexponent16(&tmp, 63); FPU_normalize(&tmp); tmptag = FPU_u_mul(&CONST_PI2extra, &tmp, &tmp, FULL_PRECISION, SIGN_POS, exponent(&CONST_PI2extra) + exponent(&tmp)); setsign(&tmp, getsign(&CONST_PI2extra)); st0_tag = FPU_add(&tmp, tmptag, 0, FULL_PRECISION); if (signnegative(st0_ptr)) { /* CONST_PI2extra is negative, so the result of the addition can be negative. This means that the argument is actually in a different quadrant. The correction is always < pi/2, so it can't overflow into yet another quadrant. */ setpositive(st0_ptr); q++; } } #endif /* BETTER_THAN_486 */ } #ifdef BETTER_THAN_486 else { /* So far, the results are exact but based upon a 64 bit precision approximation to pi/2. The technique used now is equivalent to using an approximation to pi/2 which is accurate to about 128 bits. */ if (((q > 0) && (exponent(st0_ptr) <= exponent(&CONST_PI2extra) + 64)) || (q > 1)) { /* This code gives the effect of having p/2 to better than 128 bits precision. */ significand(&tmp) = q; setexponent16(&tmp, 63); FPU_normalize(&tmp); /* This must return TAG_Valid */ tmptag = FPU_u_mul(&CONST_PI2extra, &tmp, &tmp, FULL_PRECISION, SIGN_POS, exponent(&CONST_PI2extra) + exponent(&tmp)); setsign(&tmp, getsign(&CONST_PI2extra)); st0_tag = FPU_sub(LOADED | (tmptag & 0x0f), (int)&tmp, FULL_PRECISION); if ((exponent(st0_ptr) == exponent(&CONST_PI2)) && ((st0_ptr->sigh > CONST_PI2.sigh) || ((st0_ptr->sigh == CONST_PI2.sigh) && (st0_ptr->sigl > CONST_PI2.sigl)))) { /* CONST_PI2extra is negative, so the result of the subtraction can be larger than pi/2. This means that the argument is actually in a different quadrant. The correction is always < pi/2, so it can't overflow into yet another quadrant. */ st0_tag = FPU_sub(REV | LOADED | TAG_Valid, (int)&CONST_PI2, FULL_PRECISION); q++; } } } #endif /* BETTER_THAN_486 */ FPU_settag0(st0_tag); control_word = old_cw; partial_status = saved_status & ~SW_C2; /* Reduction complete. */ return (q & 3) | even; } /* Convert a long to register */ static void convert_l2reg(long const *arg, int deststnr) { int tag; long num = *arg; u_char sign; FPU_REG *dest = &st(deststnr); if (num == 0) { FPU_copy_to_regi(&CONST_Z, TAG_Zero, deststnr); return; } if (num > 0) { sign = SIGN_POS; } else { num = -num; sign = SIGN_NEG; } dest->sigh = num; dest->sigl = 0; setexponent16(dest, 31); tag = FPU_normalize(dest); FPU_settagi(deststnr, tag); setsign(dest, sign); return; } static void single_arg_error(FPU_REG *st0_ptr, u_char st0_tag) { if (st0_tag == TAG_Empty) FPU_stack_underflow(); /* Puts a QNaN in st(0) */ else if (st0_tag == TW_NaN) real_1op_NaN(st0_ptr); /* return with a NaN in st(0) */ #ifdef PARANOID else EXCEPTION(EX_INTERNAL | 0x0112); #endif /* PARANOID */ } static void single_arg_2_error(FPU_REG *st0_ptr, u_char st0_tag) { int isNaN; switch (st0_tag) { case TW_NaN: isNaN = (exponent(st0_ptr) == EXP_OVER) && (st0_ptr->sigh & 0x80000000); if (isNaN && !(st0_ptr->sigh & 0x40000000)) { /* Signaling ? */ EXCEPTION(EX_Invalid); if (control_word & CW_Invalid) { /* The masked response */ /* Convert to a QNaN */ st0_ptr->sigh |= 0x40000000; push(); FPU_copy_to_reg0(st0_ptr, TAG_Special); } } else if (isNaN) { /* A QNaN */ push(); FPU_copy_to_reg0(st0_ptr, TAG_Special); } else { /* pseudoNaN or other unsupported */ EXCEPTION(EX_Invalid); if (control_word & CW_Invalid) { /* The masked response */ FPU_copy_to_reg0(&CONST_QNaN, TAG_Special); push(); FPU_copy_to_reg0(&CONST_QNaN, TAG_Special); } } break; /* return with a NaN in st(0) */ #ifdef PARANOID default: EXCEPTION(EX_INTERNAL | 0x0112); #endif /* PARANOID */ } } /*---------------------------------------------------------------------------*/ static void f2xm1(FPU_REG *st0_ptr, u_char tag) { FPU_REG a; clear_C1(); if (tag == TAG_Valid) { /* For an 80486 FPU, the result is undefined if the arg is >= 1.0 */ if (exponent(st0_ptr) < 0) { denormal_arg: FPU_to_exp16(st0_ptr, &a); /* poly_2xm1(x) requires 0 < st(0) < 1. */ poly_2xm1(getsign(st0_ptr), &a, st0_ptr); } set_precision_flag_up(); /* 80486 appears to always do this */ return; } if (tag == TAG_Zero) return; if (tag == TAG_Special) tag = FPU_Special(st0_ptr); switch (tag) { case TW_Denormal: if (denormal_operand() < 0) return; goto denormal_arg; case TW_Infinity: if (signnegative(st0_ptr)) { /* -infinity gives -1 (p16-10) */ FPU_copy_to_reg0(&CONST_1, TAG_Valid); setnegative(st0_ptr); } return; default: single_arg_error(st0_ptr, tag); } } static void fptan(FPU_REG *st0_ptr, u_char st0_tag) { FPU_REG *st_new_ptr; int q; u_char arg_sign = getsign(st0_ptr); /* Stack underflow has higher priority */ if (st0_tag == TAG_Empty) { FPU_stack_underflow(); /* Puts a QNaN in st(0) */ if (control_word & CW_Invalid) { st_new_ptr = &st(-1); push(); FPU_stack_underflow(); /* Puts a QNaN in the new st(0) */ } return; } if (STACK_OVERFLOW) { FPU_stack_overflow(); return; } if (st0_tag == TAG_Valid) { if (exponent(st0_ptr) > -40) { if ((q = trig_arg(st0_ptr, 0)) == -1) { /* Operand is out of range */ return; } poly_tan(st0_ptr); setsign(st0_ptr, (q & 1) ^ (arg_sign != 0)); set_precision_flag_up(); /* We do not really know if up or down */ } else { /* For a small arg, the result == the argument */ /* Underflow may happen */ denormal_arg: FPU_to_exp16(st0_ptr, st0_ptr); st0_tag = FPU_round(st0_ptr, 1, 0, FULL_PRECISION, arg_sign); FPU_settag0(st0_tag); } push(); FPU_copy_to_reg0(&CONST_1, TAG_Valid); return; } if (st0_tag == TAG_Zero) { push(); FPU_copy_to_reg0(&CONST_1, TAG_Valid); setcc(0); return; } if (st0_tag == TAG_Special) st0_tag = FPU_Special(st0_ptr); if (st0_tag == TW_Denormal) { if (denormal_operand() < 0) return; goto denormal_arg; } if (st0_tag == TW_Infinity) { /* The 80486 treats infinity as an invalid operand */ if (arith_invalid(0) >= 0) { st_new_ptr = &st(-1); push(); arith_invalid(0); } return; } single_arg_2_error(st0_ptr, st0_tag); } static void fxtract(FPU_REG *st0_ptr, u_char st0_tag) { FPU_REG *st_new_ptr; u_char sign; register FPU_REG *st1_ptr = st0_ptr; /* anticipate */ if (STACK_OVERFLOW) { FPU_stack_overflow(); return; } clear_C1(); if (st0_tag == TAG_Valid) { long e; push(); sign = getsign(st1_ptr); reg_copy(st1_ptr, st_new_ptr); setexponent16(st_new_ptr, exponent(st_new_ptr)); denormal_arg: e = exponent16(st_new_ptr); convert_l2reg(&e, 1); setexponentpos(st_new_ptr, 0); setsign(st_new_ptr, sign); FPU_settag0(TAG_Valid); /* Needed if arg was a denormal */ return; } else if (st0_tag == TAG_Zero) { sign = getsign(st0_ptr); if (FPU_divide_by_zero(0, SIGN_NEG) < 0) return; push(); FPU_copy_to_reg0(&CONST_Z, TAG_Zero); setsign(st_new_ptr, sign); return; } if (st0_tag == TAG_Special) st0_tag = FPU_Special(st0_ptr); if (st0_tag == TW_Denormal) { if (denormal_operand() < 0) return; push(); sign = getsign(st1_ptr); FPU_to_exp16(st1_ptr, st_new_ptr); goto denormal_arg; } else if (st0_tag == TW_Infinity) { sign = getsign(st0_ptr); setpositive(st0_ptr); push(); FPU_copy_to_reg0(&CONST_INF, TAG_Special); setsign(st_new_ptr, sign); return; } else if (st0_tag == TW_NaN) { if (real_1op_NaN(st0_ptr) < 0) return; push(); FPU_copy_to_reg0(st0_ptr, TAG_Special); return; } else if (st0_tag == TAG_Empty) { /* Is this the correct behaviour? */ if (control_word & EX_Invalid) { FPU_stack_underflow(); push(); FPU_stack_underflow(); } else EXCEPTION(EX_StackUnder); } #ifdef PARANOID else EXCEPTION(EX_INTERNAL | 0x119); #endif /* PARANOID */ } static void fdecstp(void) { clear_C1(); top--; } static void fincstp(void) { clear_C1(); top++; } static void fsqrt_(FPU_REG *st0_ptr, u_char st0_tag) { int expon; clear_C1(); if (st0_tag == TAG_Valid) { u_char tag; if (signnegative(st0_ptr)) { arith_invalid(0); /* sqrt(negative) is invalid */ return; } /* make st(0) in [1.0 .. 4.0) */ expon = exponent(st0_ptr); denormal_arg: setexponent16(st0_ptr, (expon & 1)); /* Do the computation, the sign of the result will be positive. */ tag = wm_sqrt(st0_ptr, 0, 0, control_word, SIGN_POS); addexponent(st0_ptr, expon >> 1); FPU_settag0(tag); return; } if (st0_tag == TAG_Zero) return; if (st0_tag == TAG_Special) st0_tag = FPU_Special(st0_ptr); if (st0_tag == TW_Infinity) { if (signnegative(st0_ptr)) arith_invalid(0); /* sqrt(-Infinity) is invalid */ return; } else if (st0_tag == TW_Denormal) { if (signnegative(st0_ptr)) { arith_invalid(0); /* sqrt(negative) is invalid */ return; } if (denormal_operand() < 0) return; FPU_to_exp16(st0_ptr, st0_ptr); expon = exponent16(st0_ptr); goto denormal_arg; } single_arg_error(st0_ptr, st0_tag); } static void frndint_(FPU_REG *st0_ptr, u_char st0_tag) { int flags, tag; if (st0_tag == TAG_Valid) { u_char sign; denormal_arg: sign = getsign(st0_ptr); if (exponent(st0_ptr) > 63) return; if (st0_tag == TW_Denormal) { if (denormal_operand() < 0) return; } /* Fortunately, this can't overflow to 2^64 */ if ((flags = FPU_round_to_int(st0_ptr, st0_tag))) set_precision_flag(flags); setexponent16(st0_ptr, 63); tag = FPU_normalize(st0_ptr); setsign(st0_ptr, sign); FPU_settag0(tag); return; } if (st0_tag == TAG_Zero) return; if (st0_tag == TAG_Special) st0_tag = FPU_Special(st0_ptr); if (st0_tag == TW_Denormal) goto denormal_arg; else if (st0_tag == TW_Infinity) return; else single_arg_error(st0_ptr, st0_tag); } static int fsin(FPU_REG *st0_ptr, u_char tag) { u_char arg_sign = getsign(st0_ptr); if (tag == TAG_Valid) { int q; if (exponent(st0_ptr) > -40) { if ((q = trig_arg(st0_ptr, 0)) == -1) { /* Operand is out of range */ return 1; } poly_sine(st0_ptr); if (q & 2) changesign(st0_ptr); setsign(st0_ptr, getsign(st0_ptr) ^ arg_sign); /* We do not really know if up or down */ set_precision_flag_up(); return 0; } else { /* For a small arg, the result == the argument */ set_precision_flag_up(); /* Must be up. */ return 0; } } if (tag == TAG_Zero) { setcc(0); return 0; } if (tag == TAG_Special) tag = FPU_Special(st0_ptr); if (tag == TW_Denormal) { if (denormal_operand() < 0) return 1; /* For a small arg, the result == the argument */ /* Underflow may happen */ FPU_to_exp16(st0_ptr, st0_ptr); tag = FPU_round(st0_ptr, 1, 0, FULL_PRECISION, arg_sign); FPU_settag0(tag); return 0; } else if (tag == TW_Infinity) { /* The 80486 treats infinity as an invalid operand */ arith_invalid(0); return 1; } else { single_arg_error(st0_ptr, tag); return 1; } } static int f_cos(FPU_REG *st0_ptr, u_char tag) { u_char st0_sign; st0_sign = getsign(st0_ptr); if (tag == TAG_Valid) { int q; if (exponent(st0_ptr) > -40) { if ((exponent(st0_ptr) < 0) || ((exponent(st0_ptr) == 0) && (significand(st0_ptr) <= 0xc90fdaa22168c234LL))) { poly_cos(st0_ptr); /* We do not really know if up or down */ set_precision_flag_down(); return 0; } else if ((q = trig_arg(st0_ptr, FCOS)) != -1) { poly_sine(st0_ptr); if ((q + 1) & 2) changesign(st0_ptr); /* We do not really know if up or down */ set_precision_flag_down(); return 0; } else { /* Operand is out of range */ return 1; } } else { denormal_arg: setcc(0); FPU_copy_to_reg0(&CONST_1, TAG_Valid); #ifdef PECULIAR_486 set_precision_flag_down(); /* 80486 appears to do this. */ #else set_precision_flag_up(); /* Must be up. */ #endif /* PECULIAR_486 */ return 0; } } else if (tag == TAG_Zero) { FPU_copy_to_reg0(&CONST_1, TAG_Valid); setcc(0); return 0; } if (tag == TAG_Special) tag = FPU_Special(st0_ptr); if (tag == TW_Denormal) { if (denormal_operand() < 0) return 1; goto denormal_arg; } else if (tag == TW_Infinity) { /* The 80486 treats infinity as an invalid operand */ arith_invalid(0); return 1; } else { single_arg_error(st0_ptr, tag); /* requires st0_ptr == &st(0) */ return 1; } } static void fcos(FPU_REG *st0_ptr, u_char st0_tag) { f_cos(st0_ptr, st0_tag); } static void fsincos(FPU_REG *st0_ptr, u_char st0_tag) { FPU_REG *st_new_ptr; FPU_REG arg; u_char tag; /* Stack underflow has higher priority */ if (st0_tag == TAG_Empty) { FPU_stack_underflow(); /* Puts a QNaN in st(0) */ if (control_word & CW_Invalid) { st_new_ptr = &st(-1); push(); FPU_stack_underflow(); /* Puts a QNaN in the new st(0) */ } return; } if (STACK_OVERFLOW) { FPU_stack_overflow(); return; } if (st0_tag == TAG_Special) tag = FPU_Special(st0_ptr); else tag = st0_tag; if (tag == TW_NaN) { single_arg_2_error(st0_ptr, TW_NaN); return; } else if (tag == TW_Infinity) { /* The 80486 treats infinity as an invalid operand */ if (arith_invalid(0) >= 0) { /* Masked response */ push(); arith_invalid(0); } return; } reg_copy(st0_ptr, &arg); if (!fsin(st0_ptr, st0_tag)) { push(); FPU_copy_to_reg0(&arg, st0_tag); f_cos(&st(0), st0_tag); } else { /* An error, so restore st(0) */ FPU_copy_to_reg0(&arg, st0_tag); } } /*---------------------------------------------------------------------------*/ /* The following all require two arguments: st(0) and st(1) */ /* A lean, mean kernel for the fprem instructions. This relies upon the division and rounding to an integer in do_fprem giving an exact result. Because of this, rem_kernel() needs to deal only with the least significant 64 bits, the more significant bits of the result must be zero. */ static void rem_kernel(unsigned long long st0, unsigned long long *y, unsigned long long st1, unsigned long long q, int n) { int dummy; unsigned long long x; x = st0 << n; /* Do the required multiplication and subtraction in the one operation */ /* lsw x -= lsw st1 * lsw q */ asm volatile ("mull %4; subl %%eax,%0; sbbl %%edx,%1":"=m" (((unsigned *)&x)[0]), "=m"(((unsigned *)&x)[1]), "=a"(dummy) :"2"(((unsigned *)&st1)[0]), "m"(((unsigned *)&q)[0]) :"%dx"); /* msw x -= msw st1 * lsw q */ asm volatile ("mull %3; subl %%eax,%0":"=m" (((unsigned *)&x)[1]), "=a"(dummy) :"1"(((unsigned *)&st1)[1]), "m"(((unsigned *)&q)[0]) :"%dx"); /* msw x -= lsw st1 * msw q */ asm volatile ("mull %3; subl %%eax,%0":"=m" (((unsigned *)&x)[1]), "=a"(dummy) :"1"(((unsigned *)&st1)[0]), "m"(((unsigned *)&q)[1]) :"%dx"); *y = x; } /* Remainder of st(0) / st(1) */ /* This routine produces exact results, i.e. there is never any rounding or truncation, etc of the result. */ static void do_fprem(FPU_REG *st0_ptr, u_char st0_tag, int round) { FPU_REG *st1_ptr = &st(1); u_char st1_tag = FPU_gettagi(1); if (!((st0_tag ^ TAG_Valid) | (st1_tag ^ TAG_Valid))) { FPU_REG tmp, st0, st1; u_char st0_sign, st1_sign; u_char tmptag; int tag; int old_cw; int expdif; long long q; unsigned short saved_status; int cc; fprem_valid: /* Convert registers for internal use. */ st0_sign = FPU_to_exp16(st0_ptr, &st0); st1_sign = FPU_to_exp16(st1_ptr, &st1); expdif = exponent16(&st0) - exponent16(&st1); old_cw = control_word; cc = 0; /* We want the status following the denorm tests, but don't want the status changed by the arithmetic operations. */ saved_status = partial_status; control_word &= ~CW_RC; control_word |= RC_CHOP; if (expdif < 64) { /* This should be the most common case */ if (expdif > -2) { u_char sign = st0_sign ^ st1_sign; tag = FPU_u_div(&st0, &st1, &tmp, PR_64_BITS | RC_CHOP | 0x3f, sign); setsign(&tmp, sign); if (exponent(&tmp) >= 0) { FPU_round_to_int(&tmp, tag); /* Fortunately, this can't overflow to 2^64 */ q = significand(&tmp); rem_kernel(significand(&st0), &significand(&tmp), significand(&st1), q, expdif); setexponent16(&tmp, exponent16(&st1)); } else { reg_copy(&st0, &tmp); q = 0; } if ((round == RC_RND) && (tmp.sigh & 0xc0000000)) { /* We may need to subtract st(1) once more, to get a result <= 1/2 of st(1). */ unsigned long long x; expdif = exponent16(&st1) - exponent16(&tmp); if (expdif <= 1) { if (expdif == 0) x = significand(&st1) - significand(&tmp); else /* expdif is 1 */ x = (significand(&st1) << 1) - significand(&tmp); if ((x < significand(&tmp)) || /* or equi-distant (from 0 & st(1)) and q is odd */ ((x == significand(&tmp)) && (q & 1))) { st0_sign = !st0_sign; significand(&tmp) = x; q++; } } } if (q & 4) cc |= SW_C0; if (q & 2) cc |= SW_C3; if (q & 1) cc |= SW_C1; } else { control_word = old_cw; setcc(0); return; } } else { /* There is a large exponent difference ( >= 64 ) */ /* To make much sense, the code in this section should be done at high precision. */ int exp_1, N; u_char sign; /* prevent overflow here */ /* N is 'a number between 32 and 63' (p26-113) */ reg_copy(&st0, &tmp); tmptag = st0_tag; N = (expdif & 0x0000001f) + 32; /* This choice gives results identical to an AMD 486 */ setexponent16(&tmp, N); exp_1 = exponent16(&st1); setexponent16(&st1, 0); expdif -= N; sign = getsign(&tmp) ^ st1_sign; tag = FPU_u_div(&tmp, &st1, &tmp, PR_64_BITS | RC_CHOP | 0x3f, sign); setsign(&tmp, sign); FPU_round_to_int(&tmp, tag); /* Fortunately, this can't overflow to 2^64 */ rem_kernel(significand(&st0), &significand(&tmp), significand(&st1), significand(&tmp), exponent(&tmp) ); setexponent16(&tmp, exp_1 + expdif); /* It is possible for the operation to be complete here. What does the IEEE standard say? The Intel 80486 manual implies that the operation will never be completed at this point, and the behaviour of a real 80486 confirms this. */ if (!(tmp.sigh | tmp.sigl)) { /* The result is zero */ control_word = old_cw; partial_status = saved_status; FPU_copy_to_reg0(&CONST_Z, TAG_Zero); setsign(&st0, st0_sign); #ifdef PECULIAR_486 setcc(SW_C2); #else setcc(0); #endif /* PECULIAR_486 */ return; } cc = SW_C2; } control_word = old_cw; partial_status = saved_status; tag = FPU_normalize_nuo(&tmp); reg_copy(&tmp, st0_ptr); /* The only condition to be looked for is underflow, and it can occur here only if underflow is unmasked. */ if ((exponent16(&tmp) <= EXP_UNDER) && (tag != TAG_Zero) && !(control_word & CW_Underflow)) { setcc(cc); tag = arith_underflow(st0_ptr); setsign(st0_ptr, st0_sign); FPU_settag0(tag); return; } else if ((exponent16(&tmp) > EXP_UNDER) || (tag == TAG_Zero)) { stdexp(st0_ptr); setsign(st0_ptr, st0_sign); } else { tag = FPU_round(st0_ptr, 0, 0, FULL_PRECISION, st0_sign); } FPU_settag0(tag); setcc(cc); return; } if (st0_tag == TAG_Special) st0_tag = FPU_Special(st0_ptr); if (st1_tag == TAG_Special) st1_tag = FPU_Special(st1_ptr); if (((st0_tag == TAG_Valid) && (st1_tag == TW_Denormal)) || ((st0_tag == TW_Denormal) && (st1_tag == TAG_Valid)) || ((st0_tag == TW_Denormal) && (st1_tag == TW_Denormal))) { if (denormal_operand() < 0) return; goto fprem_valid; } else if ((st0_tag == TAG_Empty) || (st1_tag == TAG_Empty)) { FPU_stack_underflow(); return; } else if (st0_tag == TAG_Zero) { if (st1_tag == TAG_Valid) { setcc(0); return; } else if (st1_tag == TW_Denormal) { if (denormal_operand() < 0) return; setcc(0); return; } else if (st1_tag == TAG_Zero) { arith_invalid(0); return; } /* fprem(?,0) always invalid */ else if (st1_tag == TW_Infinity) { setcc(0); return; } } else if ((st0_tag == TAG_Valid) || (st0_tag == TW_Denormal)) { if (st1_tag == TAG_Zero) { arith_invalid(0); /* fprem(Valid,Zero) is invalid */ return; } else if (st1_tag != TW_NaN) { if (((st0_tag == TW_Denormal) || (st1_tag == TW_Denormal)) && (denormal_operand() < 0)) return; if (st1_tag == TW_Infinity) { /* fprem(Valid,Infinity) is o.k. */ setcc(0); return; } } } else if (st0_tag == TW_Infinity) { if (st1_tag != TW_NaN) { arith_invalid(0); /* fprem(Infinity,?) is invalid */ return; } } /* One of the registers must contain a NaN if we got here. */ #ifdef PARANOID if ((st0_tag != TW_NaN) && (st1_tag != TW_NaN)) EXCEPTION(EX_INTERNAL | 0x118); #endif /* PARANOID */ real_2op_NaN(st1_ptr, st1_tag, 0, st1_ptr); } /* ST(1) <- ST(1) * log ST; pop ST */ static void fyl2x(FPU_REG *st0_ptr, u_char st0_tag) { FPU_REG *st1_ptr = &st(1), exponent; u_char st1_tag = FPU_gettagi(1); u_char sign; int e, tag; clear_C1(); if ((st0_tag == TAG_Valid) && (st1_tag == TAG_Valid)) { both_valid: /* Both regs are Valid or Denormal */ if (signpositive(st0_ptr)) { if (st0_tag == TW_Denormal) FPU_to_exp16(st0_ptr, st0_ptr); else /* Convert st(0) for internal use. */ setexponent16(st0_ptr, exponent(st0_ptr)); if ((st0_ptr->sigh == 0x80000000) && (st0_ptr->sigl == 0)) { /* Special case. The result can be precise. */ u_char esign; e = exponent16(st0_ptr); if (e >= 0) { exponent.sigh = e; esign = SIGN_POS; } else { exponent.sigh = -e; esign = SIGN_NEG; } exponent.sigl = 0; setexponent16(&exponent, 31); tag = FPU_normalize_nuo(&exponent); stdexp(&exponent); setsign(&exponent, esign); tag = FPU_mul(&exponent, tag, 1, FULL_PRECISION); if (tag >= 0) FPU_settagi(1, tag); } else { /* The usual case */ sign = getsign(st1_ptr); if (st1_tag == TW_Denormal) FPU_to_exp16(st1_ptr, st1_ptr); else /* Convert st(1) for internal use. */ setexponent16(st1_ptr, exponent(st1_ptr)); poly_l2(st0_ptr, st1_ptr, sign); } } else { /* negative */ if (arith_invalid(1) < 0) return; } FPU_pop(); return; } if (st0_tag == TAG_Special) st0_tag = FPU_Special(st0_ptr); if (st1_tag == TAG_Special) st1_tag = FPU_Special(st1_ptr); if ((st0_tag == TAG_Empty) || (st1_tag == TAG_Empty)) { FPU_stack_underflow_pop(1); return; } else if ((st0_tag <= TW_Denormal) && (st1_tag <= TW_Denormal)) { if (st0_tag == TAG_Zero) { if (st1_tag == TAG_Zero) { /* Both args zero is invalid */ if (arith_invalid(1) < 0) return; } else { u_char sign; sign = getsign(st1_ptr) ^ SIGN_NEG; if (FPU_divide_by_zero(1, sign) < 0) return; setsign(st1_ptr, sign); } } else if (st1_tag == TAG_Zero) { /* st(1) contains zero, st(0) valid <> 0 */ /* Zero is the valid answer */ sign = getsign(st1_ptr); if (signnegative(st0_ptr)) { /* log(negative) */ if (arith_invalid(1) < 0) return; } else if ((st0_tag == TW_Denormal) && (denormal_operand() < 0)) return; else { if (exponent(st0_ptr) < 0) sign ^= SIGN_NEG; FPU_copy_to_reg1(&CONST_Z, TAG_Zero); setsign(st1_ptr, sign); } } else { /* One or both operands are denormals. */ if (denormal_operand() < 0) return; goto both_valid; } } else if ((st0_tag == TW_NaN) || (st1_tag == TW_NaN)) { if (real_2op_NaN(st0_ptr, st0_tag, 1, st0_ptr) < 0) return; } /* One or both arg must be an infinity */ else if (st0_tag == TW_Infinity) { if ((signnegative(st0_ptr)) || (st1_tag == TAG_Zero)) { /* log(-infinity) or 0*log(infinity) */ if (arith_invalid(1) < 0) return; } else { u_char sign = getsign(st1_ptr); if ((st1_tag == TW_Denormal) && (denormal_operand() < 0)) return; FPU_copy_to_reg1(&CONST_INF, TAG_Special); setsign(st1_ptr, sign); } } /* st(1) must be infinity here */ else if (((st0_tag == TAG_Valid) || (st0_tag == TW_Denormal)) && (signpositive(st0_ptr))) { if (exponent(st0_ptr) >= 0) { if ((exponent(st0_ptr) == 0) && (st0_ptr->sigh == 0x80000000) && (st0_ptr->sigl == 0)) { /* st(0) holds 1.0 */ /* infinity*log(1) */ if (arith_invalid(1) < 0) return; } /* else st(0) is positive and > 1.0 */ } else { /* st(0) is positive and < 1.0 */ if ((st0_tag == TW_Denormal) && (denormal_operand() < 0)) return; changesign(st1_ptr); } } else { /* st(0) must be zero or negative */ if (st0_tag == TAG_Zero) { /* This should be invalid, but a real 80486 is happy with it. */ #ifndef PECULIAR_486 sign = getsign(st1_ptr); if (FPU_divide_by_zero(1, sign) < 0) return; #endif /* PECULIAR_486 */ changesign(st1_ptr); } else if (arith_invalid(1) < 0) /* log(negative) */ return; } FPU_pop(); } static void fpatan(FPU_REG *st0_ptr, u_char st0_tag) { FPU_REG *st1_ptr = &st(1); u_char st1_tag = FPU_gettagi(1); int tag; clear_C1(); if (!((st0_tag ^ TAG_Valid) | (st1_tag ^ TAG_Valid))) { valid_atan: poly_atan(st0_ptr, st0_tag, st1_ptr, st1_tag); FPU_pop(); return; } if (st0_tag == TAG_Special) st0_tag = FPU_Special(st0_ptr); if (st1_tag == TAG_Special) st1_tag = FPU_Special(st1_ptr); if (((st0_tag == TAG_Valid) && (st1_tag == TW_Denormal)) || ((st0_tag == TW_Denormal) && (st1_tag == TAG_Valid)) || ((st0_tag == TW_Denormal) && (st1_tag == TW_Denormal))) { if (denormal_operand() < 0) return; goto valid_atan; } else if ((st0_tag == TAG_Empty) || (st1_tag == TAG_Empty)) { FPU_stack_underflow_pop(1); return; } else if ((st0_tag == TW_NaN) || (st1_tag == TW_NaN)) { if (real_2op_NaN(st0_ptr, st0_tag, 1, st0_ptr) >= 0) FPU_pop(); return; } else if ((st0_tag == TW_Infinity) || (st1_tag == TW_Infinity)) { u_char sign = getsign(st1_ptr); if (st0_tag == TW_Infinity) { if (st1_tag == TW_Infinity) { if (signpositive(st0_ptr)) { FPU_copy_to_reg1(&CONST_PI4, TAG_Valid); } else { setpositive(st1_ptr); tag = FPU_u_add(&CONST_PI4, &CONST_PI2, st1_ptr, FULL_PRECISION, SIGN_POS, exponent(&CONST_PI4), exponent(&CONST_PI2)); if (tag >= 0) FPU_settagi(1, tag); } } else { if ((st1_tag == TW_Denormal) && (denormal_operand() < 0)) return; if (signpositive(st0_ptr)) { FPU_copy_to_reg1(&CONST_Z, TAG_Zero); setsign(st1_ptr, sign); /* An 80486 preserves the sign */ FPU_pop(); return; } else { FPU_copy_to_reg1(&CONST_PI, TAG_Valid); } } } else { /* st(1) is infinity, st(0) not infinity */ if ((st0_tag == TW_Denormal) && (denormal_operand() < 0)) return; FPU_copy_to_reg1(&CONST_PI2, TAG_Valid); } setsign(st1_ptr, sign); } else if (st1_tag == TAG_Zero) { /* st(0) must be valid or zero */ u_char sign = getsign(st1_ptr); if ((st0_tag == TW_Denormal) && (denormal_operand() < 0)) return; if (signpositive(st0_ptr)) { /* An 80486 preserves the sign */ FPU_pop(); return; } FPU_copy_to_reg1(&CONST_PI, TAG_Valid); setsign(st1_ptr, sign); } else if (st0_tag == TAG_Zero) { /* st(1) must be TAG_Valid here */ u_char sign = getsign(st1_ptr); if ((st1_tag == TW_Denormal) && (denormal_operand() < 0)) return; FPU_copy_to_reg1(&CONST_PI2, TAG_Valid); setsign(st1_ptr, sign); } #ifdef PARANOID else EXCEPTION(EX_INTERNAL | 0x125); #endif /* PARANOID */ FPU_pop(); set_precision_flag_up(); /* We do not really know if up or down */ } static void fprem(FPU_REG *st0_ptr, u_char st0_tag) { do_fprem(st0_ptr, st0_tag, RC_CHOP); } static void fprem1(FPU_REG *st0_ptr, u_char st0_tag) { do_fprem(st0_ptr, st0_tag, RC_RND); } static void fyl2xp1(FPU_REG *st0_ptr, u_char st0_tag) { u_char sign, sign1; FPU_REG *st1_ptr = &st(1), a, b; u_char st1_tag = FPU_gettagi(1); clear_C1(); if (!((st0_tag ^ TAG_Valid) | (st1_tag ^ TAG_Valid))) { valid_yl2xp1: sign = getsign(st0_ptr); sign1 = getsign(st1_ptr); FPU_to_exp16(st0_ptr, &a); FPU_to_exp16(st1_ptr, &b); if (poly_l2p1(sign, sign1, &a, &b, st1_ptr)) return; FPU_pop(); return; } if (st0_tag == TAG_Special) st0_tag = FPU_Special(st0_ptr); if (st1_tag == TAG_Special) st1_tag = FPU_Special(st1_ptr); if (((st0_tag == TAG_Valid) && (st1_tag == TW_Denormal)) || ((st0_tag == TW_Denormal) && (st1_tag == TAG_Valid)) || ((st0_tag == TW_Denormal) && (st1_tag == TW_Denormal))) { if (denormal_operand() < 0) return; goto valid_yl2xp1; } else if ((st0_tag == TAG_Empty) | (st1_tag == TAG_Empty)) { FPU_stack_underflow_pop(1); return; } else if (st0_tag == TAG_Zero) { switch (st1_tag) { case TW_Denormal: if (denormal_operand() < 0) return; case TAG_Zero: case TAG_Valid: setsign(st0_ptr, getsign(st0_ptr) ^ getsign(st1_ptr)); FPU_copy_to_reg1(st0_ptr, st0_tag); break; case TW_Infinity: /* Infinity*log(1) */ if (arith_invalid(1) < 0) return; break; case TW_NaN: if (real_2op_NaN(st0_ptr, st0_tag, 1, st0_ptr) < 0) return; break; default: #ifdef PARANOID EXCEPTION(EX_INTERNAL | 0x116); return; #endif /* PARANOID */ break; } } else if ((st0_tag == TAG_Valid) || (st0_tag == TW_Denormal)) { switch (st1_tag) { case TAG_Zero: if (signnegative(st0_ptr)) { if (exponent(st0_ptr) >= 0) { /* st(0) holds <= -1.0 */ #ifdef PECULIAR_486 /* Stupid 80486 doesn't worry about log(negative). */ changesign(st1_ptr); #else if (arith_invalid(1) < 0) return; #endif /* PECULIAR_486 */ } else if ((st0_tag == TW_Denormal) && (denormal_operand() < 0)) return; else changesign(st1_ptr); } else if ((st0_tag == TW_Denormal) && (denormal_operand() < 0)) return; break; case TW_Infinity: if (signnegative(st0_ptr)) { if ((exponent(st0_ptr) >= 0) && !((st0_ptr->sigh == 0x80000000) && (st0_ptr->sigl == 0))) { /* st(0) holds < -1.0 */ #ifdef PECULIAR_486 /* Stupid 80486 doesn't worry about log(negative). */ changesign(st1_ptr); #else if (arith_invalid(1) < 0) return; #endif /* PECULIAR_486 */ } else if ((st0_tag == TW_Denormal) && (denormal_operand() < 0)) return; else changesign(st1_ptr); } else if ((st0_tag == TW_Denormal) && (denormal_operand() < 0)) return; break; case TW_NaN: if (real_2op_NaN(st0_ptr, st0_tag, 1, st0_ptr) < 0) return; } } else if (st0_tag == TW_NaN) { if (real_2op_NaN(st0_ptr, st0_tag, 1, st0_ptr) < 0) return; } else if (st0_tag == TW_Infinity) { if (st1_tag == TW_NaN) { if (real_2op_NaN(st0_ptr, st0_tag, 1, st0_ptr) < 0) return; } else if (signnegative(st0_ptr)) { #ifndef PECULIAR_486 /* This should have higher priority than denormals, but... */ if (arith_invalid(1) < 0) /* log(-infinity) */ return; #endif /* PECULIAR_486 */ if ((st1_tag == TW_Denormal) && (denormal_operand() < 0)) return; #ifdef PECULIAR_486 /* Denormal operands actually get higher priority */ if (arith_invalid(1) < 0) /* log(-infinity) */ return; #endif /* PECULIAR_486 */ } else if (st1_tag == TAG_Zero) { /* log(infinity) */ if (arith_invalid(1) < 0) return; } /* st(1) must be valid here. */ else if ((st1_tag == TW_Denormal) && (denormal_operand() < 0)) return; /* The Manual says that log(Infinity) is invalid, but a real 80486 sensibly says that it is o.k. */ else { u_char sign = getsign(st1_ptr); FPU_copy_to_reg1(&CONST_INF, TAG_Special); setsign(st1_ptr, sign); } } #ifdef PARANOID else { EXCEPTION(EX_INTERNAL | 0x117); return; } #endif /* PARANOID */ FPU_pop(); return; } static void fscale(FPU_REG *st0_ptr, u_char st0_tag) { FPU_REG *st1_ptr = &st(1); u_char st1_tag = FPU_gettagi(1); int old_cw = control_word; u_char sign = getsign(st0_ptr); clear_C1(); if (!((st0_tag ^ TAG_Valid) | (st1_tag ^ TAG_Valid))) { long scale; FPU_REG tmp; /* Convert register for internal use. */ setexponent16(st0_ptr, exponent(st0_ptr)); valid_scale: if (exponent(st1_ptr) > 30) { /* 2^31 is far too large, would require 2^(2^30) or 2^(-2^30) */ if (signpositive(st1_ptr)) { EXCEPTION(EX_Overflow); FPU_copy_to_reg0(&CONST_INF, TAG_Special); } else { EXCEPTION(EX_Underflow); FPU_copy_to_reg0(&CONST_Z, TAG_Zero); } setsign(st0_ptr, sign); return; } control_word &= ~CW_RC; control_word |= RC_CHOP; reg_copy(st1_ptr, &tmp); FPU_round_to_int(&tmp, st1_tag); /* This can never overflow here */ control_word = old_cw; scale = signnegative(st1_ptr) ? -tmp.sigl : tmp.sigl; scale += exponent16(st0_ptr); setexponent16(st0_ptr, scale); /* Use FPU_round() to properly detect under/overflow etc */ FPU_round(st0_ptr, 0, 0, control_word, sign); return; } if (st0_tag == TAG_Special) st0_tag = FPU_Special(st0_ptr); if (st1_tag == TAG_Special) st1_tag = FPU_Special(st1_ptr); if ((st0_tag == TAG_Valid) || (st0_tag == TW_Denormal)) { switch (st1_tag) { case TAG_Valid: /* st(0) must be a denormal */ if ((st0_tag == TW_Denormal) && (denormal_operand() < 0)) return; FPU_to_exp16(st0_ptr, st0_ptr); /* Will not be left on stack */ goto valid_scale; case TAG_Zero: if (st0_tag == TW_Denormal) denormal_operand(); return; case TW_Denormal: denormal_operand(); return; case TW_Infinity: if ((st0_tag == TW_Denormal) && (denormal_operand() < 0)) return; if (signpositive(st1_ptr)) FPU_copy_to_reg0(&CONST_INF, TAG_Special); else FPU_copy_to_reg0(&CONST_Z, TAG_Zero); setsign(st0_ptr, sign); return; case TW_NaN: real_2op_NaN(st1_ptr, st1_tag, 0, st0_ptr); return; } } else if (st0_tag == TAG_Zero) { switch (st1_tag) { case TAG_Valid: case TAG_Zero: return; case TW_Denormal: denormal_operand(); return; case TW_Infinity: if (signpositive(st1_ptr)) arith_invalid(0); /* Zero scaled by +Infinity */ return; case TW_NaN: real_2op_NaN(st1_ptr, st1_tag, 0, st0_ptr); return; } } else if (st0_tag == TW_Infinity) { switch (st1_tag) { case TAG_Valid: case TAG_Zero: return; case TW_Denormal: denormal_operand(); return; case TW_Infinity: if (signnegative(st1_ptr)) arith_invalid(0); /* Infinity scaled by -Infinity */ return; case TW_NaN: real_2op_NaN(st1_ptr, st1_tag, 0, st0_ptr); return; } } else if (st0_tag == TW_NaN) { if (st1_tag != TAG_Empty) { real_2op_NaN(st1_ptr, st1_tag, 0, st0_ptr); return; } } #ifdef PARANOID if (!((st0_tag == TAG_Empty) || (st1_tag == TAG_Empty))) { EXCEPTION(EX_INTERNAL | 0x115); return; } #endif /* At least one of st(0), st(1) must be empty */ FPU_stack_underflow(); } /*---------------------------------------------------------------------------*/ static FUNC_ST0 const trig_table_a[] = { f2xm1, fyl2x, fptan, fpatan, fxtract, fprem1, (FUNC_ST0) fdecstp, (FUNC_ST0) fincstp }; void FPU_triga(void) { (trig_table_a[FPU_rm]) (&st(0), FPU_gettag0()); } static FUNC_ST0 const trig_table_b[] = { fprem, fyl2xp1, fsqrt_, fsincos, frndint_, fscale, (FUNC_ST0) fsin, fcos }; void FPU_trigb(void) { (trig_table_b[FPU_rm]) (&st(0), FPU_gettag0()); }
gpl-2.0
vokama/obsforvk
deps/w32-pthreads/pthread_win32_attach_detach_np.c
82
7039
/* * pthread_win32_attach_detach_np.c * * Description: * This translation unit implements non-portable thread functions. * * -------------------------------------------------------------------------- * * Pthreads-win32 - POSIX Threads Library for Win32 * Copyright(C) 1998 John E. Bossom * Copyright(C) 1999,2005 Pthreads-win32 contributors * * Contact Email: rpj@callisto.canberra.edu.au * * The current list of contributors is contained * in the file CONTRIBUTORS included with the source * code distribution. The list can also be seen at the * following World Wide Web location: * http://sources.redhat.com/pthreads-win32/contributors.html * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library in the file COPYING.LIB; * if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ #include "pthread.h" #include "implement.h" #include <tchar.h> /* * Handle to quserex.dll */ static HINSTANCE ptw32_h_quserex; BOOL pthread_win32_process_attach_np () { TCHAR QuserExDLLPathBuf[1024]; BOOL result = TRUE; result = ptw32_processInitialize (); #if defined(_UWIN) pthread_count++; #endif #if defined(__GNUC__) ptw32_features = 0; #else /* * This is obsolete now. */ ptw32_features = PTW32_SYSTEM_INTERLOCKED_COMPARE_EXCHANGE; #endif /* * Load QUSEREX.DLL and try to get address of QueueUserAPCEx. * Because QUSEREX.DLL requires a driver to be installed we will * assume the DLL is in the system directory. * * This should take care of any security issues. */ #if defined(__GNUC__) || _MSC_VER < 1400 if(GetSystemDirectory(QuserExDLLPathBuf, sizeof(QuserExDLLPathBuf))) { (void) strncat(QuserExDLLPathBuf, "\\QUSEREX.DLL", sizeof(QuserExDLLPathBuf) - strlen(QuserExDLLPathBuf) - 1); ptw32_h_quserex = LoadLibrary(QuserExDLLPathBuf); } #else /* strncat is secure - this is just to avoid a warning */ if(GetSystemDirectory(QuserExDLLPathBuf, sizeof(QuserExDLLPathBuf) / sizeof(TCHAR)) && 0 == _tcsncat_s(QuserExDLLPathBuf, sizeof(QuserExDLLPathBuf) / sizeof(TCHAR), TEXT("\\QUSEREX.DLL"), 12)) { ptw32_h_quserex = LoadLibrary(QuserExDLLPathBuf); } #endif if (ptw32_h_quserex != NULL) { ptw32_register_cancelation = (DWORD (*)(PAPCFUNC, HANDLE, DWORD)) #if defined(NEED_UNICODE_CONSTS) GetProcAddress (ptw32_h_quserex, (const TCHAR *) TEXT ("QueueUserAPCEx")); #else GetProcAddress (ptw32_h_quserex, (LPCSTR) "QueueUserAPCEx"); #endif } if (NULL == ptw32_register_cancelation) { ptw32_register_cancelation = ptw32_RegisterCancelation; if (ptw32_h_quserex != NULL) { (void) FreeLibrary (ptw32_h_quserex); } ptw32_h_quserex = 0; } else { /* Initialise QueueUserAPCEx */ BOOL (*queue_user_apc_ex_init) (VOID); queue_user_apc_ex_init = (BOOL (*)(VOID)) #if defined(NEED_UNICODE_CONSTS) GetProcAddress (ptw32_h_quserex, (const TCHAR *) TEXT ("QueueUserAPCEx_Init")); #else GetProcAddress (ptw32_h_quserex, (LPCSTR) "QueueUserAPCEx_Init"); #endif if (queue_user_apc_ex_init == NULL || !queue_user_apc_ex_init ()) { ptw32_register_cancelation = ptw32_RegisterCancelation; (void) FreeLibrary (ptw32_h_quserex); ptw32_h_quserex = 0; } } if (ptw32_h_quserex) { ptw32_features |= PTW32_ALERTABLE_ASYNC_CANCEL; } return result; } BOOL pthread_win32_process_detach_np () { if (ptw32_processInitialized) { ptw32_thread_t * sp = (ptw32_thread_t *) pthread_getspecific (ptw32_selfThreadKey); if (sp != NULL) { /* * Detached threads have their resources automatically * cleaned up upon exit (others must be 'joined'). */ if (sp->detachState == PTHREAD_CREATE_DETACHED) { ptw32_threadDestroy (sp->ptHandle); TlsSetValue (ptw32_selfThreadKey->key, NULL); } } /* * The DLL is being unmapped from the process's address space */ ptw32_processTerminate (); if (ptw32_h_quserex) { /* Close QueueUserAPCEx */ BOOL (*queue_user_apc_ex_fini) (VOID); queue_user_apc_ex_fini = (BOOL (*)(VOID)) #if defined(NEED_UNICODE_CONSTS) GetProcAddress (ptw32_h_quserex, (const TCHAR *) TEXT ("QueueUserAPCEx_Fini")); #else GetProcAddress (ptw32_h_quserex, (LPCSTR) "QueueUserAPCEx_Fini"); #endif if (queue_user_apc_ex_fini != NULL) { (void) queue_user_apc_ex_fini (); } (void) FreeLibrary (ptw32_h_quserex); } } return TRUE; } BOOL pthread_win32_thread_attach_np () { return TRUE; } BOOL pthread_win32_thread_detach_np () { if (ptw32_processInitialized) { /* * Don't use pthread_self() - to avoid creating an implicit POSIX thread handle * unnecessarily. */ ptw32_thread_t * sp = (ptw32_thread_t *) pthread_getspecific (ptw32_selfThreadKey); if (sp != NULL) // otherwise Win32 thread with no implicit POSIX handle. { ptw32_mcs_local_node_t stateLock; ptw32_callUserDestroyRoutines (sp->ptHandle); ptw32_mcs_lock_acquire (&sp->stateLock, &stateLock); sp->state = PThreadStateLast; /* * If the thread is joinable at this point then it MUST be joined * or detached explicitly by the application. */ ptw32_mcs_lock_release (&stateLock); /* * Robust Mutexes */ while (sp->robustMxList != NULL) { pthread_mutex_t mx = sp->robustMxList->mx; ptw32_robust_mutex_remove(&mx, sp); (void) PTW32_INTERLOCKED_EXCHANGE_LONG( (PTW32_INTERLOCKED_LONGPTR)&mx->robustNode->stateInconsistent, (PTW32_INTERLOCKED_LONG)-1); /* * If there are no waiters then the next thread to block will * sleep, wakeup immediately and then go back to sleep. * See pthread_mutex_lock.c. */ SetEvent(mx->event); } if (sp->detachState == PTHREAD_CREATE_DETACHED) { ptw32_threadDestroy (sp->ptHandle); TlsSetValue (ptw32_selfThreadKey->key, NULL); } } } return TRUE; } BOOL pthread_win32_test_features_np (int feature_mask) { return ((ptw32_features & feature_mask) == feature_mask); }
gpl-2.0
aospan/linux-stable-netup-universal-dvb-1.4
arch/arm/mach-imx/avic.c
82
5910
/* * Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved. * Copyright 2008 Juergen Beisert, kernel@pengutronix.de * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, * MA 02110-1301, USA. */ #include <linux/module.h> #include <linux/irq.h> #include <linux/irqdomain.h> #include <linux/io.h> #include <linux/of.h> #include <asm/mach/irq.h> #include <asm/exception.h> #include "common.h" #include "hardware.h" #include "irq-common.h" #define AVIC_INTCNTL 0x00 /* int control reg */ #define AVIC_NIMASK 0x04 /* int mask reg */ #define AVIC_INTENNUM 0x08 /* int enable number reg */ #define AVIC_INTDISNUM 0x0C /* int disable number reg */ #define AVIC_INTENABLEH 0x10 /* int enable reg high */ #define AVIC_INTENABLEL 0x14 /* int enable reg low */ #define AVIC_INTTYPEH 0x18 /* int type reg high */ #define AVIC_INTTYPEL 0x1C /* int type reg low */ #define AVIC_NIPRIORITY(x) (0x20 + 4 * (7 - (x))) /* int priority */ #define AVIC_NIVECSR 0x40 /* norm int vector/status */ #define AVIC_FIVECSR 0x44 /* fast int vector/status */ #define AVIC_INTSRCH 0x48 /* int source reg high */ #define AVIC_INTSRCL 0x4C /* int source reg low */ #define AVIC_INTFRCH 0x50 /* int force reg high */ #define AVIC_INTFRCL 0x54 /* int force reg low */ #define AVIC_NIPNDH 0x58 /* norm int pending high */ #define AVIC_NIPNDL 0x5C /* norm int pending low */ #define AVIC_FIPNDH 0x60 /* fast int pending high */ #define AVIC_FIPNDL 0x64 /* fast int pending low */ #define AVIC_NUM_IRQS 64 static void __iomem *avic_base; static struct irq_domain *domain; #ifdef CONFIG_FIQ static int avic_set_irq_fiq(unsigned int irq, unsigned int type) { struct irq_data *d = irq_get_irq_data(irq); unsigned int irqt; irq = d->hwirq; if (irq >= AVIC_NUM_IRQS) return -EINVAL; if (irq < AVIC_NUM_IRQS / 2) { irqt = imx_readl(avic_base + AVIC_INTTYPEL) & ~(1 << irq); imx_writel(irqt | (!!type << irq), avic_base + AVIC_INTTYPEL); } else { irq -= AVIC_NUM_IRQS / 2; irqt = imx_readl(avic_base + AVIC_INTTYPEH) & ~(1 << irq); imx_writel(irqt | (!!type << irq), avic_base + AVIC_INTTYPEH); } return 0; } #endif /* CONFIG_FIQ */ static struct mxc_extra_irq avic_extra_irq = { #ifdef CONFIG_FIQ .set_irq_fiq = avic_set_irq_fiq, #endif }; #ifdef CONFIG_PM static u32 avic_saved_mask_reg[2]; static void avic_irq_suspend(struct irq_data *d) { struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); struct irq_chip_type *ct = gc->chip_types; int idx = d->hwirq >> 5; avic_saved_mask_reg[idx] = imx_readl(avic_base + ct->regs.mask); imx_writel(gc->wake_active, avic_base + ct->regs.mask); } static void avic_irq_resume(struct irq_data *d) { struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); struct irq_chip_type *ct = gc->chip_types; int idx = d->hwirq >> 5; imx_writel(avic_saved_mask_reg[idx], avic_base + ct->regs.mask); } #else #define avic_irq_suspend NULL #define avic_irq_resume NULL #endif static __init void avic_init_gc(int idx, unsigned int irq_start) { struct irq_chip_generic *gc; struct irq_chip_type *ct; gc = irq_alloc_generic_chip("mxc-avic", 1, irq_start, avic_base, handle_level_irq); gc->private = &avic_extra_irq; gc->wake_enabled = IRQ_MSK(32); ct = gc->chip_types; ct->chip.irq_mask = irq_gc_mask_clr_bit; ct->chip.irq_unmask = irq_gc_mask_set_bit; ct->chip.irq_ack = irq_gc_mask_clr_bit; ct->chip.irq_set_wake = irq_gc_set_wake; ct->chip.irq_suspend = avic_irq_suspend; ct->chip.irq_resume = avic_irq_resume; ct->regs.mask = !idx ? AVIC_INTENABLEL : AVIC_INTENABLEH; ct->regs.ack = ct->regs.mask; irq_setup_generic_chip(gc, IRQ_MSK(32), 0, IRQ_NOREQUEST, 0); } static void __exception_irq_entry avic_handle_irq(struct pt_regs *regs) { u32 nivector; do { nivector = imx_readl(avic_base + AVIC_NIVECSR) >> 16; if (nivector == 0xffff) break; handle_domain_irq(domain, nivector, regs); } while (1); } /* * This function initializes the AVIC hardware and disables all the * interrupts. It registers the interrupt enable and disable functions * to the kernel for each interrupt source. */ void __init mxc_init_irq(void __iomem *irqbase) { struct device_node *np; int irq_base; int i; avic_base = irqbase; /* put the AVIC into the reset value with * all interrupts disabled */ imx_writel(0, avic_base + AVIC_INTCNTL); imx_writel(0x1f, avic_base + AVIC_NIMASK); /* disable all interrupts */ imx_writel(0, avic_base + AVIC_INTENABLEH); imx_writel(0, avic_base + AVIC_INTENABLEL); /* all IRQ no FIQ */ imx_writel(0, avic_base + AVIC_INTTYPEH); imx_writel(0, avic_base + AVIC_INTTYPEL); irq_base = irq_alloc_descs(-1, 0, AVIC_NUM_IRQS, numa_node_id()); WARN_ON(irq_base < 0); np = of_find_compatible_node(NULL, NULL, "fsl,avic"); domain = irq_domain_add_legacy(np, AVIC_NUM_IRQS, irq_base, 0, &irq_domain_simple_ops, NULL); WARN_ON(!domain); for (i = 0; i < AVIC_NUM_IRQS / 32; i++, irq_base += 32) avic_init_gc(i, irq_base); /* Set default priority value (0) for all IRQ's */ for (i = 0; i < 8; i++) imx_writel(0, avic_base + AVIC_NIPRIORITY(i)); set_handle_irq(avic_handle_irq); #ifdef CONFIG_FIQ /* Initialize FIQ */ init_FIQ(FIQ_START); #endif printk(KERN_INFO "MXC IRQ initialized\n"); }
gpl-2.0
bananacakes/bravo_2.6.35_gb-mr
drivers/mmc/card/block.c
82
22662
/* * Block driver for media (i.e., flash cards) * * Copyright 2002 Hewlett-Packard Company * Copyright 2005-2008 Pierre Ossman * * Use consistent with the GNU GPL is permitted, * provided that this copyright notice is * preserved in its entirety in all copies and derived works. * * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED, * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS * FITNESS FOR ANY PARTICULAR PURPOSE. * * Many thanks to Alessandro Rubini and Jonathan Corbet! * * Author: Andrew Christian * 28 May 2002 */ #include <linux/moduleparam.h> #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/fs.h> #include <linux/errno.h> #include <linux/hdreg.h> #include <linux/slab.h> #include <linux/kdev_t.h> #include <linux/blkdev.h> #include <linux/mutex.h> #include <linux/scatterlist.h> #include <linux/string_helpers.h> #include <linux/genhd.h> #include <linux/delay.h> #include <linux/mmc/card.h> #include <linux/mmc/host.h> #include <linux/mmc/mmc.h> #include <linux/mmc/sd.h> #include <asm/system.h> #include <asm/uaccess.h> #include "queue.h" MODULE_ALIAS("mmc:block"); /* * max 8 partitions per card */ #if defined(CONFIG_ARCH_MSM7X30) || defined(CONFIG_ARCH_MSM8X60) #define MMC_SHIFT 6 #else #define MMC_SHIFT 3 #endif #define MMC_NUM_MINORS (256 >> MMC_SHIFT) extern int board_emmc_boot(void); static DECLARE_BITMAP(dev_use, MMC_NUM_MINORS); /* * There is one mmc_blk_data per slot. */ struct mmc_blk_data { spinlock_t lock; struct gendisk *disk; struct mmc_queue queue; unsigned int usage; unsigned int read_only; }; static DEFINE_MUTEX(open_lock); static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk) { struct mmc_blk_data *md; mutex_lock(&open_lock); md = disk->private_data; if (md && md->usage == 0) md = NULL; if (md) md->usage++; mutex_unlock(&open_lock); return md; } static void mmc_blk_put(struct mmc_blk_data *md) { mutex_lock(&open_lock); md->usage--; if (md->usage == 0) { int devidx = md->disk->first_minor >> MMC_SHIFT; blk_cleanup_queue(md->queue.queue); __clear_bit(devidx, dev_use); put_disk(md->disk); kfree(md); } mutex_unlock(&open_lock); } static int mmc_blk_open(struct block_device *bdev, fmode_t mode) { struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk); int ret = -ENXIO; if (md) { if (md->usage == 2) check_disk_change(bdev); ret = 0; if ((mode & FMODE_WRITE) && md->read_only) { mmc_blk_put(md); ret = -EROFS; } } return ret; } static int mmc_blk_release(struct gendisk *disk, fmode_t mode) { struct mmc_blk_data *md = disk->private_data; mmc_blk_put(md); return 0; } static int mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) { geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16); geo->heads = 4; geo->sectors = 16; return 0; } static const struct block_device_operations mmc_bdops = { .open = mmc_blk_open, .release = mmc_blk_release, .getgeo = mmc_blk_getgeo, .owner = THIS_MODULE, }; struct mmc_blk_request { struct mmc_request mrq; struct mmc_command cmd; struct mmc_command stop; struct mmc_data data; }; static u32 mmc_sd_num_wr_blocks(struct mmc_card *card) { int err; u32 result; __be32 *blocks; struct mmc_request mrq; struct mmc_command cmd; struct mmc_data data; unsigned int timeout_us; struct scatterlist sg; memset(&cmd, 0, sizeof(struct mmc_command)); cmd.opcode = MMC_APP_CMD; cmd.arg = card->rca << 16; cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; err = mmc_wait_for_cmd(card->host, &cmd, 0); if (err) return (u32)-1; if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD)) return (u32)-1; memset(&cmd, 0, sizeof(struct mmc_command)); cmd.opcode = SD_APP_SEND_NUM_WR_BLKS; cmd.arg = 0; cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; memset(&data, 0, sizeof(struct mmc_data)); data.timeout_ns = card->csd.tacc_ns * 100; data.timeout_clks = card->csd.tacc_clks * 100; timeout_us = data.timeout_ns / 1000; timeout_us += data.timeout_clks * 1000 / (card->host->ios.clock / 1000); if (timeout_us > 100000) { data.timeout_ns = 100000000; data.timeout_clks = 0; } data.blksz = 4; data.blocks = 1; data.flags = MMC_DATA_READ; data.sg = &sg; data.sg_len = 1; memset(&mrq, 0, sizeof(struct mmc_request)); mrq.cmd = &cmd; mrq.data = &data; blocks = kmalloc(4, GFP_KERNEL); if (!blocks) return (u32)-1; sg_init_one(&sg, blocks, 4); mmc_wait_for_req(card->host, &mrq); result = ntohl(*blocks); kfree(blocks); if (cmd.error || data.error) result = (u32)-1; return result; } static u32 get_card_status(struct mmc_card *card, struct request *req) { struct mmc_command cmd; int err; memset(&cmd, 0, sizeof(struct mmc_command)); cmd.opcode = MMC_SEND_STATUS; if (!mmc_host_is_spi(card->host)) cmd.arg = card->rca << 16; cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC; err = mmc_wait_for_cmd(card->host, &cmd, 0); if (err) printk(KERN_ERR "%s: error %d sending status comand", req->rq_disk->disk_name, err); return cmd.resp[0]; } static int mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card) { struct mmc_command cmd; int err; /* Block-addressed cards ignore MMC_SET_BLOCKLEN. */ if (mmc_card_blockaddr(card)) return 0; mmc_claim_host(card->host); cmd.opcode = MMC_SET_BLOCKLEN; cmd.arg = 512; cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; err = mmc_wait_for_cmd(card->host, &cmd, 5); mmc_release_host(card->host); if (err) { printk(KERN_ERR "%s: unable to set block size to %d: %d\n", md->disk->disk_name, cmd.arg, err); return -EINVAL; } return 0; } static void remove_card(struct mmc_host *host) { printk(KERN_INFO "%s: remove card\n", mmc_hostname(host)); if (!host->card || host->card->removed) { printk(KERN_INFO "%s: card already removed\n", mmc_hostname(host)); return; } if (!mmc_card_present(host->card)) { printk(KERN_INFO "%s: card is not present\n", mmc_hostname(host)); return; } host->card->removed = 1; mmc_schedule_card_removal_work(&host->remove, 0); } static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) { struct mmc_blk_data *md = mq->data; struct mmc_card *card = md->queue.card; struct mmc_blk_request brq; int ret = 1, disable_multi = 0, card_no_ready = 0; int err = 0; int try_recovery = 1, do_reinit = 0, do_remove = 0; #ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME int retries = 3; if (mmc_bus_needs_resume(card->host)) { err = mmc_resume_bus(card->host); if (err) { if (mmc_card_sd(card)) remove_card(card->host); spin_lock_irq(&md->lock); __blk_end_request_all(req, -EIO); spin_unlock_irq(&md->lock); return 0; } mmc_blk_set_blksize(md, card); if (mmc_card_mmc(card)) { struct mmc_command cmd; unsigned long delay = jiffies + HZ; int j = 0; do { int err; cmd.opcode = MMC_SEND_STATUS; cmd.arg = mq->card->rca << 16; cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; mmc_claim_host(mq->card->host); err = mmc_wait_for_cmd(mq->card->host, &cmd, 5); mmc_release_host(mq->card->host); if (err) { printk(KERN_ERR "failed to get status(%d)!!\n" , err); msleep(5); retries--; continue; } if (time_after(jiffies, delay) && (fls(j) > 10)) { if ((cmd.resp[0] & R1_READY_FOR_DATA) && (R1_CURRENT_STATE(cmd.resp[0]) == 4)) { printk(KERN_ERR "Timeout but get card ready j = %d\n", j); break; } card_no_ready++; printk(KERN_ERR "Failed to get card ready %d\n", card_no_ready); break; } j++; } while (retries && (!(cmd.resp[0] & R1_READY_FOR_DATA) || (R1_CURRENT_STATE(cmd.resp[0]) == 7))); } } if (mmc_bus_fails_resume(card->host) || card_no_ready || !retries) { spin_lock_irq(&md->lock); __blk_end_request_all(req, -EIO); spin_unlock_irq(&md->lock); return 0; } #endif mmc_claim_host(card->host); do { struct mmc_command cmd; u32 readcmd, writecmd, status = 0; memset(&brq, 0, sizeof(struct mmc_blk_request)); brq.mrq.cmd = &brq.cmd; brq.mrq.data = &brq.data; brq.cmd.arg = blk_rq_pos(req); if (!mmc_card_blockaddr(card)) brq.cmd.arg <<= 9; brq.cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; brq.data.blksz = 512; brq.stop.opcode = MMC_STOP_TRANSMISSION; brq.stop.arg = 0; brq.stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; brq.data.blocks = blk_rq_sectors(req); /* * The block layer doesn't support all sector count * restrictions, so we need to be prepared for too big * requests. */ if (brq.data.blocks > card->host->max_blk_count) brq.data.blocks = card->host->max_blk_count; /* * After a read error, we redo the request one sector at a time * in order to accurately determine which sectors can be read * successfully. */ if (disable_multi && brq.data.blocks > 1) brq.data.blocks = 1; if (brq.data.blocks > 1) { /* SPI multiblock writes terminate using a special * token, not a STOP_TRANSMISSION request. */ if (!mmc_host_is_spi(card->host) || rq_data_dir(req) == READ) brq.mrq.stop = &brq.stop; readcmd = MMC_READ_MULTIPLE_BLOCK; writecmd = MMC_WRITE_MULTIPLE_BLOCK; } else { brq.mrq.stop = NULL; readcmd = MMC_READ_SINGLE_BLOCK; writecmd = MMC_WRITE_BLOCK; } if (rq_data_dir(req) == READ) { brq.cmd.opcode = readcmd; brq.data.flags |= MMC_DATA_READ; } else { brq.cmd.opcode = writecmd; brq.data.flags |= MMC_DATA_WRITE; #if defined(CONFIG_ARCH_MSM7X30) if (board_emmc_boot()) if (mmc_card_mmc(card)) { if (brq.cmd.arg < 131073) {/* should not write any value before 131073 */ pr_err("%s: pid %d(tgid %d)(%s)\n", __func__, (unsigned)(current->pid), (unsigned)(current->tgid), current->comm); pr_err("ERROR! Attemp to write radio partition start %d size %d\n" , brq.cmd.arg, blk_rq_sectors(req)); BUG(); return 0; } #if defined(CONFIG_ARCH_MSM7230) if ((brq.cmd.arg > 143361) && (brq.cmd.arg < 163328)) { pr_err("%s: pid %d(tgid %d)(%s)\n", __func__, (unsigned)(current->pid), (unsigned)(current->tgid), current->comm); pr_err("ERROR! Attemp to write radio partition start %d size %d\n" , brq.cmd.arg, blk_rq_sectors(req)); BUG(); return 0; } #endif } #endif } mmc_set_data_timeout(&brq.data, card); brq.data.sg = mq->sg; brq.data.sg_len = mmc_queue_map_sg(mq); /* * Adjust the sg list so it is the same size as the * request. */ if (brq.data.blocks != blk_rq_sectors(req)) { int i, data_size = brq.data.blocks << 9; struct scatterlist *sg; for_each_sg(brq.data.sg, sg, brq.data.sg_len, i) { data_size -= sg->length; if (data_size <= 0) { sg->length += data_size; i++; break; } } brq.data.sg_len = i; } mmc_queue_bounce_pre(mq); mmc_wait_for_req(card->host, &brq.mrq); mmc_queue_bounce_post(mq); /* * Check for errors here, but don't jump to cmd_err * until later as we need to wait for the card to leave * programming mode even when things go wrong. */ if (brq.cmd.error || brq.data.error || brq.stop.error) { if (brq.data.blocks > 1 && rq_data_dir(req) == READ) { if (brq.cmd.error) { printk(KERN_ERR "%s: error %d sending read " "command, response %#x\n", req->rq_disk->disk_name, brq.cmd.error, brq.cmd.resp[0]); } /* Redo read one sector at a time */ printk(KERN_WARNING "%s: retrying using single " "block read\n", req->rq_disk->disk_name); disable_multi = 1; continue; } status = get_card_status(card, req); } else if (disable_multi == 1) { disable_multi = 0; } if (brq.cmd.error) { printk(KERN_ERR "%s: error %d sending read/write " "command, response %#x, card status %#x\n", req->rq_disk->disk_name, brq.cmd.error, brq.cmd.resp[0], status); } if (brq.data.error) { if (brq.data.error == -ETIMEDOUT && brq.mrq.stop) /* 'Stop' response contains card status */ status = brq.mrq.stop->resp[0]; printk(KERN_ERR "%s: error %d transferring data," " sector %u, nr %u, card status %#x\n", req->rq_disk->disk_name, brq.data.error, (unsigned)blk_rq_pos(req), (unsigned)blk_rq_sectors(req), status); } if (brq.stop.error) { printk(KERN_ERR "%s: error %d sending stop command, " "response %#x, card status %#x\n", req->rq_disk->disk_name, brq.stop.error, brq.stop.resp[0], status); } if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) { int i = 0; int sleepy = mmc_card_sd(card) ? 1 : 0; unsigned int msec = 0; unsigned long delay = jiffies + HZ; err = 0; do { if (sleepy && (fls(i) > 11)) { msec = (unsigned int)fls(i >> 11); msleep(msec); if (msec > 3 && ((i - 1) & i) == 0) { printk(KERN_ERR "%s: start " "sleep %u msecs\n", req->rq_disk->disk_name, msec); } } cmd.opcode = MMC_SEND_STATUS; cmd.arg = card->rca << 16; cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; err = mmc_wait_for_cmd(card->host, &cmd, 5); if (err) { printk(KERN_ERR "%s: error %d requesting status\n", req->rq_disk->disk_name, err); goto cmd_err; } if (time_after(jiffies, delay) && (fls(i) > 10)) { if ((cmd.resp[0] & R1_READY_FOR_DATA) && (R1_CURRENT_STATE(cmd.resp[0]) == 4)) { printk(KERN_ERR "%s: timeout but get card ready i = %d\n", mmc_hostname(card->host), i); break; } if (try_recovery == 1) do_reinit = 1; else if (mmc_card_sd(card) && (try_recovery == 2)) do_remove = 1; try_recovery++; err = 1; card_no_ready++; printk(KERN_ERR "%s: Failed to get card ready i = %d\n", mmc_hostname(card->host), i); break; } /* * Some cards mishandle the status bits, * so make sure to check both the busy * indication and the card state. */ i++; } while (!(cmd.resp[0] & R1_READY_FOR_DATA) || (R1_CURRENT_STATE(cmd.resp[0]) == 7)); #if 0 if (cmd.resp[0] & ~0x00000900) printk(KERN_ERR "%s: status = %08x\n", req->rq_disk->disk_name, cmd.resp[0]); if (mmc_decode_status(cmd.resp)) goto cmd_err; #endif if (!err) card_no_ready = 0; } recovery: if (do_reinit) { do_reinit = 0; if (card->removed) goto cmd_err; printk(KERN_INFO "%s: reinit card\n", mmc_hostname(card->host)); card->host->bus_resume_flags |= MMC_BUSRESUME_NEEDS_RESUME; err = mmc_resume_bus(card->host); if (!err) { mmc_blk_set_blksize(md, card); continue; } else { if (mmc_card_sd(card)) { printk(KERN_INFO "mmc: reinit failed, remove card\n"); remove_card(card->host); } goto cmd_err; } } else if (do_remove) { do_remove = 0; remove_card(card->host); goto cmd_err; } if (brq.cmd.error || brq.stop.error || brq.data.error || card_no_ready) { if (try_recovery == 1) do_reinit = 1; else if (mmc_card_sd(card) && (try_recovery == 2)) do_remove = 1; try_recovery++; if (do_reinit || do_remove) goto recovery; if (rq_data_dir(req) == READ) { /* * After an error, we redo I/O one sector at a * time, so we only reach here after trying to * read a single sector. */ spin_lock_irq(&md->lock); ret = __blk_end_request(req, -EIO, brq.data.blksz); spin_unlock_irq(&md->lock); continue; } goto cmd_err; } /* * A block was successfully transferred. */ spin_lock_irq(&md->lock); ret = __blk_end_request(req, 0, brq.data.bytes_xfered); spin_unlock_irq(&md->lock); } while (ret); mmc_release_host(card->host); return 1; cmd_err: /* * If this is an SD card and we're writing, we can first * mark the known good sectors as ok. * * If the card is not SD, we can still ok written sectors * as reported by the controller (which might be less than * the real number of written sectors, but never more). */ if (mmc_card_sd(card)) { u32 blocks; blocks = mmc_sd_num_wr_blocks(card); if (blocks != (u32)-1) { spin_lock_irq(&md->lock); ret = __blk_end_request(req, 0, blocks << 9); spin_unlock_irq(&md->lock); } } else { spin_lock_irq(&md->lock); ret = __blk_end_request(req, 0, brq.data.bytes_xfered); spin_unlock_irq(&md->lock); } mmc_release_host(card->host); spin_lock_irq(&md->lock); while (ret) ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req)); spin_unlock_irq(&md->lock); return 0; } static inline int mmc_blk_readonly(struct mmc_card *card) { return mmc_card_readonly(card) || !(card->csd.cmdclass & CCC_BLOCK_WRITE); } static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card) { struct mmc_blk_data *md; int devidx, ret; devidx = find_first_zero_bit(dev_use, MMC_NUM_MINORS); if (devidx >= MMC_NUM_MINORS) return ERR_PTR(-ENOSPC); __set_bit(devidx, dev_use); md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL); if (!md) { ret = -ENOMEM; goto out; } /* * Set the read-only status based on the supported commands * and the write protect switch. */ md->read_only = mmc_blk_readonly(card); md->disk = alloc_disk(1 << MMC_SHIFT); if (md->disk == NULL) { ret = -ENOMEM; goto err_kfree; } spin_lock_init(&md->lock); md->usage = 1; ret = mmc_init_queue(&md->queue, card, &md->lock); if (ret) goto err_putdisk; md->queue.issue_fn = mmc_blk_issue_rq; md->queue.data = md; md->disk->major = MMC_BLOCK_MAJOR; md->disk->first_minor = devidx << MMC_SHIFT; md->disk->fops = &mmc_bdops; md->disk->private_data = md; md->disk->queue = md->queue.queue; md->disk->driverfs_dev = &card->dev; md->disk->flags = GENHD_FL_EXT_DEVT; /* * As discussed on lkml, GENHD_FL_REMOVABLE should: * * - be set for removable media with permanent block devices * - be unset for removable block devices with permanent media * * Since MMC block devices clearly fall under the second * case, we do not set GENHD_FL_REMOVABLE. Userspace * should use the block device creation/destruction hotplug * messages to tell when the card is present. */ sprintf(md->disk->disk_name, "mmcblk%d", devidx); blk_queue_logical_block_size(md->queue.queue, 512); if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) { /* * The EXT_CSD sector count is in number or 512 byte * sectors. */ set_capacity(md->disk, card->ext_csd.sectors); } else { /* * The CSD capacity field is in units of read_blkbits. * set_capacity takes units of 512 bytes. */ set_capacity(md->disk, card->csd.capacity << (card->csd.read_blkbits - 9)); } return md; err_putdisk: put_disk(md->disk); err_kfree: kfree(md); out: return ERR_PTR(ret); } static int mmc_blk_probe(struct mmc_card *card) { struct mmc_blk_data *md; int err; char cap_str[10]; /* * Check that the card supports the command class(es) we need. */ if (!(card->csd.cmdclass & CCC_BLOCK_READ)) return -ENODEV; md = mmc_blk_alloc(card); if (IS_ERR(md)) return PTR_ERR(md); err = mmc_blk_set_blksize(md, card); if (err) goto out; string_get_size((u64)get_capacity(md->disk) << 9, STRING_UNITS_2, cap_str, sizeof(cap_str)); printk(KERN_INFO "%s: %s %s %s %s\n", md->disk->disk_name, mmc_card_id(card), mmc_card_name(card), cap_str, md->read_only ? "(ro)" : ""); mmc_set_drvdata(card, md); mmc_init_bus_resume_flags(card->host); #ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME if (mmc_card_sd(card) || mmc_card_mmc(card)) mmc_set_bus_resume_policy(card->host, 1); #endif add_disk(md->disk); return 0; out: mmc_cleanup_queue(&md->queue); mmc_blk_put(md); return err; } /* * Duplicate from fs/partitions/check.c del_gendisk(), but disable * fsync_bdev(). */ void del_gendisk_async(struct gendisk *disk) { struct disk_part_iter piter; struct hd_struct *part; /* invalidate stuff */ disk_part_iter_init(&piter, disk, DISK_PITER_INCL_EMPTY | DISK_PITER_REVERSE); while ((part = disk_part_iter_next(&piter))) { struct block_device *bdev = bdget_disk(disk, part->partno); if (bdev) { __invalidate_device(bdev); bdput(bdev); } delete_partition(disk, part->partno); } disk_part_iter_exit(&piter); invalidate_partition(disk, 0); blk_free_devt(disk_to_dev(disk)->devt); set_capacity(disk, 0); disk->flags &= ~GENHD_FL_UP; unlink_gendisk(disk); part_stat_set_all(&disk->part0, 0); disk->part0.stamp = 0; kobject_put(disk->part0.holder_dir); kobject_put(disk->slave_dir); disk->driverfs_dev = NULL; #ifndef CONFIG_SYSFS_DEPRECATED sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk))); #endif device_del(disk_to_dev(disk)); } static void mmc_blk_remove(struct mmc_card *card) { struct mmc_blk_data *md = mmc_get_drvdata(card); if (md) { /* Stop new requests from getting into the queue */ if (mmc_card_sd(card)) del_gendisk_async(md->disk); else del_gendisk(md->disk); /* Then flush out any already in there */ mmc_cleanup_queue(&md->queue); mmc_blk_put(md); } mmc_set_drvdata(card, NULL); #ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME mmc_set_bus_resume_policy(card->host, 0); #endif } #ifdef CONFIG_PM static int mmc_blk_suspend(struct mmc_card *card, pm_message_t state) { struct mmc_blk_data *md = mmc_get_drvdata(card); if (md) { mmc_queue_suspend(&md->queue); } return 0; } static int mmc_blk_resume(struct mmc_card *card) { struct mmc_blk_data *md = mmc_get_drvdata(card); if (md) { if (!mmc_bus_manual_resume(card->host)) { mmc_blk_set_blksize(md, card); #ifdef CONFIG_MMC_BLOCK_PARANOID_RESUME md->queue.check_status = 1; #endif } mmc_queue_resume(&md->queue); } return 0; } #else #define mmc_blk_suspend NULL #define mmc_blk_resume NULL #endif static struct mmc_driver mmc_driver = { .drv = { .name = "mmcblk", }, .probe = mmc_blk_probe, .remove = mmc_blk_remove, .suspend = mmc_blk_suspend, .resume = mmc_blk_resume, }; static int __init mmc_blk_init(void) { int res; res = register_blkdev(MMC_BLOCK_MAJOR, "mmc"); if (res) goto out; res = mmc_register_driver(&mmc_driver); if (res) goto out2; return 0; out2: unregister_blkdev(MMC_BLOCK_MAJOR, "mmc"); out: return res; } static void __exit mmc_blk_exit(void) { mmc_unregister_driver(&mmc_driver); unregister_blkdev(MMC_BLOCK_MAJOR, "mmc"); } module_init(mmc_blk_init); module_exit(mmc_blk_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");
gpl-2.0
mayli/wrapfs-latest
drivers/atm/nicstar.c
594
76412
/* * nicstar.c * * Device driver supporting CBR for IDT 77201/77211 "NICStAR" based cards. * * IMPORTANT: The included file nicstarmac.c was NOT WRITTEN BY ME. * It was taken from the frle-0.22 device driver. * As the file doesn't have a copyright notice, in the file * nicstarmac.copyright I put the copyright notice from the * frle-0.22 device driver. * Some code is based on the nicstar driver by M. Welsh. * * Author: Rui Prior (rprior@inescn.pt) * PowerPC support by Jay Talbott (jay_talbott@mcg.mot.com) April 1999 * * * (C) INESC 1999 */ /* * IMPORTANT INFORMATION * * There are currently three types of spinlocks: * * 1 - Per card interrupt spinlock (to protect structures and such) * 2 - Per SCQ scq spinlock * 3 - Per card resource spinlock (to access registers, etc.) * * These must NEVER be grabbed in reverse order. * */ /* Header files */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/skbuff.h> #include <linux/atmdev.h> #include <linux/atm.h> #include <linux/pci.h> #include <linux/dma-mapping.h> #include <linux/types.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/timer.h> #include <linux/interrupt.h> #include <linux/bitops.h> #include <linux/slab.h> #include <linux/idr.h> #include <asm/io.h> #include <asm/uaccess.h> #include <linux/atomic.h> #include <linux/etherdevice.h> #include "nicstar.h" #ifdef CONFIG_ATM_NICSTAR_USE_SUNI #include "suni.h" #endif /* CONFIG_ATM_NICSTAR_USE_SUNI */ #ifdef CONFIG_ATM_NICSTAR_USE_IDT77105 #include "idt77105.h" #endif /* CONFIG_ATM_NICSTAR_USE_IDT77105 */ /* Additional code */ #include "nicstarmac.c" /* Configurable parameters */ #undef PHY_LOOPBACK #undef TX_DEBUG #undef RX_DEBUG #undef GENERAL_DEBUG #undef EXTRA_DEBUG #undef NS_USE_DESTRUCTORS /* For now keep this undefined unless you know you're going to use only raw ATM */ /* Do not touch these */ #ifdef TX_DEBUG #define TXPRINTK(args...) printk(args) #else #define TXPRINTK(args...) #endif /* TX_DEBUG */ #ifdef RX_DEBUG #define RXPRINTK(args...) printk(args) #else #define RXPRINTK(args...) #endif /* RX_DEBUG */ #ifdef GENERAL_DEBUG #define PRINTK(args...) printk(args) #else #define PRINTK(args...) #endif /* GENERAL_DEBUG */ #ifdef EXTRA_DEBUG #define XPRINTK(args...) printk(args) #else #define XPRINTK(args...) #endif /* EXTRA_DEBUG */ /* Macros */ #define CMD_BUSY(card) (readl((card)->membase + STAT) & NS_STAT_CMDBZ) #define NS_DELAY mdelay(1) #define PTR_DIFF(a, b) ((u32)((unsigned long)(a) - (unsigned long)(b))) #ifndef ATM_SKB #define ATM_SKB(s) (&(s)->atm) #endif #define scq_virt_to_bus(scq, p) \ (scq->dma + ((unsigned long)(p) - (unsigned long)(scq)->org)) /* Function declarations */ static u32 ns_read_sram(ns_dev * card, u32 sram_address); static void ns_write_sram(ns_dev * card, u32 sram_address, u32 * value, int count); static int ns_init_card(int i, struct pci_dev *pcidev); static void ns_init_card_error(ns_dev * card, int error); static scq_info *get_scq(ns_dev *card, int size, u32 scd); static void free_scq(ns_dev *card, scq_info * scq, struct atm_vcc *vcc); static void push_rxbufs(ns_dev *, struct sk_buff *); static irqreturn_t ns_irq_handler(int irq, void *dev_id); static int ns_open(struct atm_vcc *vcc); static void ns_close(struct atm_vcc *vcc); static void fill_tst(ns_dev * card, int n, vc_map * vc); static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb); static int push_scqe(ns_dev * card, vc_map * vc, scq_info * scq, ns_scqe * tbd, struct sk_buff *skb); static void process_tsq(ns_dev * card); static void drain_scq(ns_dev * card, scq_info * scq, int pos); static void process_rsq(ns_dev * card); static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe); #ifdef NS_USE_DESTRUCTORS static void ns_sb_destructor(struct sk_buff *sb); static void ns_lb_destructor(struct sk_buff *lb); static void ns_hb_destructor(struct sk_buff *hb); #endif /* NS_USE_DESTRUCTORS */ static void recycle_rx_buf(ns_dev * card, struct sk_buff *skb); static void recycle_iovec_rx_bufs(ns_dev * card, struct iovec *iov, int count); static void recycle_iov_buf(ns_dev * card, struct sk_buff *iovb); static void dequeue_sm_buf(ns_dev * card, struct sk_buff *sb); static void dequeue_lg_buf(ns_dev * card, struct sk_buff *lb); static int ns_proc_read(struct atm_dev *dev, loff_t * pos, char *page); static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user * arg); #ifdef EXTRA_DEBUG static void which_list(ns_dev * card, struct sk_buff *skb); #endif static void ns_poll(unsigned long arg); static void ns_phy_put(struct atm_dev *dev, unsigned char value, unsigned long addr); static unsigned char ns_phy_get(struct atm_dev *dev, unsigned long addr); /* Global variables */ static struct ns_dev *cards[NS_MAX_CARDS]; static unsigned num_cards; static struct atmdev_ops atm_ops = { .open = ns_open, .close = ns_close, .ioctl = ns_ioctl, .send = ns_send, .phy_put = ns_phy_put, .phy_get = ns_phy_get, .proc_read = ns_proc_read, .owner = THIS_MODULE, }; static struct timer_list ns_timer; static char *mac[NS_MAX_CARDS]; module_param_array(mac, charp, NULL, 0); MODULE_LICENSE("GPL"); /* Functions */ static int nicstar_init_one(struct pci_dev *pcidev, const struct pci_device_id *ent) { static int index = -1; unsigned int error; index++; cards[index] = NULL; error = ns_init_card(index, pcidev); if (error) { cards[index--] = NULL; /* don't increment index */ goto err_out; } return 0; err_out: return -ENODEV; } static void nicstar_remove_one(struct pci_dev *pcidev) { int i, j; ns_dev *card = pci_get_drvdata(pcidev); struct sk_buff *hb; struct sk_buff *iovb; struct sk_buff *lb; struct sk_buff *sb; i = card->index; if (cards[i] == NULL) return; if (card->atmdev->phy && card->atmdev->phy->stop) card->atmdev->phy->stop(card->atmdev); /* Stop everything */ writel(0x00000000, card->membase + CFG); /* De-register device */ atm_dev_deregister(card->atmdev); /* Disable PCI device */ pci_disable_device(pcidev); /* Free up resources */ j = 0; PRINTK("nicstar%d: freeing %d huge buffers.\n", i, card->hbpool.count); while ((hb = skb_dequeue(&card->hbpool.queue)) != NULL) { dev_kfree_skb_any(hb); j++; } PRINTK("nicstar%d: %d huge buffers freed.\n", i, j); j = 0; PRINTK("nicstar%d: freeing %d iovec buffers.\n", i, card->iovpool.count); while ((iovb = skb_dequeue(&card->iovpool.queue)) != NULL) { dev_kfree_skb_any(iovb); j++; } PRINTK("nicstar%d: %d iovec buffers freed.\n", i, j); while ((lb = skb_dequeue(&card->lbpool.queue)) != NULL) dev_kfree_skb_any(lb); while ((sb = skb_dequeue(&card->sbpool.queue)) != NULL) dev_kfree_skb_any(sb); free_scq(card, card->scq0, NULL); for (j = 0; j < NS_FRSCD_NUM; j++) { if (card->scd2vc[j] != NULL) free_scq(card, card->scd2vc[j]->scq, card->scd2vc[j]->tx_vcc); } idr_destroy(&card->idr); pci_free_consistent(card->pcidev, NS_RSQSIZE + NS_RSQ_ALIGNMENT, card->rsq.org, card->rsq.dma); pci_free_consistent(card->pcidev, NS_TSQSIZE + NS_TSQ_ALIGNMENT, card->tsq.org, card->tsq.dma); free_irq(card->pcidev->irq, card); iounmap(card->membase); kfree(card); } static struct pci_device_id nicstar_pci_tbl[] = { { PCI_VDEVICE(IDT, PCI_DEVICE_ID_IDT_IDT77201), 0 }, {0,} /* terminate list */ }; MODULE_DEVICE_TABLE(pci, nicstar_pci_tbl); static struct pci_driver nicstar_driver = { .name = "nicstar", .id_table = nicstar_pci_tbl, .probe = nicstar_init_one, .remove = nicstar_remove_one, }; static int __init nicstar_init(void) { unsigned error = 0; /* Initialized to remove compile warning */ XPRINTK("nicstar: nicstar_init() called.\n"); error = pci_register_driver(&nicstar_driver); TXPRINTK("nicstar: TX debug enabled.\n"); RXPRINTK("nicstar: RX debug enabled.\n"); PRINTK("nicstar: General debug enabled.\n"); #ifdef PHY_LOOPBACK printk("nicstar: using PHY loopback.\n"); #endif /* PHY_LOOPBACK */ XPRINTK("nicstar: nicstar_init() returned.\n"); if (!error) { init_timer(&ns_timer); ns_timer.expires = jiffies + NS_POLL_PERIOD; ns_timer.data = 0UL; ns_timer.function = ns_poll; add_timer(&ns_timer); } return error; } static void __exit nicstar_cleanup(void) { XPRINTK("nicstar: nicstar_cleanup() called.\n"); del_timer(&ns_timer); pci_unregister_driver(&nicstar_driver); XPRINTK("nicstar: nicstar_cleanup() returned.\n"); } static u32 ns_read_sram(ns_dev * card, u32 sram_address) { unsigned long flags; u32 data; sram_address <<= 2; sram_address &= 0x0007FFFC; /* address must be dword aligned */ sram_address |= 0x50000000; /* SRAM read command */ spin_lock_irqsave(&card->res_lock, flags); while (CMD_BUSY(card)) ; writel(sram_address, card->membase + CMD); while (CMD_BUSY(card)) ; data = readl(card->membase + DR0); spin_unlock_irqrestore(&card->res_lock, flags); return data; } static void ns_write_sram(ns_dev * card, u32 sram_address, u32 * value, int count) { unsigned long flags; int i, c; count--; /* count range now is 0..3 instead of 1..4 */ c = count; c <<= 2; /* to use increments of 4 */ spin_lock_irqsave(&card->res_lock, flags); while (CMD_BUSY(card)) ; for (i = 0; i <= c; i += 4) writel(*(value++), card->membase + i); /* Note: DR# registers are the first 4 dwords in nicstar's memspace, so card->membase + DR0 == card->membase */ sram_address <<= 2; sram_address &= 0x0007FFFC; sram_address |= (0x40000000 | count); writel(sram_address, card->membase + CMD); spin_unlock_irqrestore(&card->res_lock, flags); } static int ns_init_card(int i, struct pci_dev *pcidev) { int j; struct ns_dev *card = NULL; unsigned char pci_latency; unsigned error; u32 data; u32 u32d[4]; u32 ns_cfg_rctsize; int bcount; unsigned long membase; error = 0; if (pci_enable_device(pcidev)) { printk("nicstar%d: can't enable PCI device\n", i); error = 2; ns_init_card_error(card, error); return error; } if ((pci_set_dma_mask(pcidev, DMA_BIT_MASK(32)) != 0) || (pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32)) != 0)) { printk(KERN_WARNING "nicstar%d: No suitable DMA available.\n", i); error = 2; ns_init_card_error(card, error); return error; } if ((card = kmalloc(sizeof(ns_dev), GFP_KERNEL)) == NULL) { printk ("nicstar%d: can't allocate memory for device structure.\n", i); error = 2; ns_init_card_error(card, error); return error; } cards[i] = card; spin_lock_init(&card->int_lock); spin_lock_init(&card->res_lock); pci_set_drvdata(pcidev, card); card->index = i; card->atmdev = NULL; card->pcidev = pcidev; membase = pci_resource_start(pcidev, 1); card->membase = ioremap(membase, NS_IOREMAP_SIZE); if (!card->membase) { printk("nicstar%d: can't ioremap() membase.\n", i); error = 3; ns_init_card_error(card, error); return error; } PRINTK("nicstar%d: membase at 0x%p.\n", i, card->membase); pci_set_master(pcidev); if (pci_read_config_byte(pcidev, PCI_LATENCY_TIMER, &pci_latency) != 0) { printk("nicstar%d: can't read PCI latency timer.\n", i); error = 6; ns_init_card_error(card, error); return error; } #ifdef NS_PCI_LATENCY if (pci_latency < NS_PCI_LATENCY) { PRINTK("nicstar%d: setting PCI latency timer to %d.\n", i, NS_PCI_LATENCY); for (j = 1; j < 4; j++) { if (pci_write_config_byte (pcidev, PCI_LATENCY_TIMER, NS_PCI_LATENCY) != 0) break; } if (j == 4) { printk ("nicstar%d: can't set PCI latency timer to %d.\n", i, NS_PCI_LATENCY); error = 7; ns_init_card_error(card, error); return error; } } #endif /* NS_PCI_LATENCY */ /* Clear timer overflow */ data = readl(card->membase + STAT); if (data & NS_STAT_TMROF) writel(NS_STAT_TMROF, card->membase + STAT); /* Software reset */ writel(NS_CFG_SWRST, card->membase + CFG); NS_DELAY; writel(0x00000000, card->membase + CFG); /* PHY reset */ writel(0x00000008, card->membase + GP); NS_DELAY; writel(0x00000001, card->membase + GP); NS_DELAY; while (CMD_BUSY(card)) ; writel(NS_CMD_WRITE_UTILITY | 0x00000100, card->membase + CMD); /* Sync UTOPIA with SAR clock */ NS_DELAY; /* Detect PHY type */ while (CMD_BUSY(card)) ; writel(NS_CMD_READ_UTILITY | 0x00000200, card->membase + CMD); while (CMD_BUSY(card)) ; data = readl(card->membase + DR0); switch (data) { case 0x00000009: printk("nicstar%d: PHY seems to be 25 Mbps.\n", i); card->max_pcr = ATM_25_PCR; while (CMD_BUSY(card)) ; writel(0x00000008, card->membase + DR0); writel(NS_CMD_WRITE_UTILITY | 0x00000200, card->membase + CMD); /* Clear an eventual pending interrupt */ writel(NS_STAT_SFBQF, card->membase + STAT); #ifdef PHY_LOOPBACK while (CMD_BUSY(card)) ; writel(0x00000022, card->membase + DR0); writel(NS_CMD_WRITE_UTILITY | 0x00000202, card->membase + CMD); #endif /* PHY_LOOPBACK */ break; case 0x00000030: case 0x00000031: printk("nicstar%d: PHY seems to be 155 Mbps.\n", i); card->max_pcr = ATM_OC3_PCR; #ifdef PHY_LOOPBACK while (CMD_BUSY(card)) ; writel(0x00000002, card->membase + DR0); writel(NS_CMD_WRITE_UTILITY | 0x00000205, card->membase + CMD); #endif /* PHY_LOOPBACK */ break; default: printk("nicstar%d: unknown PHY type (0x%08X).\n", i, data); error = 8; ns_init_card_error(card, error); return error; } writel(0x00000000, card->membase + GP); /* Determine SRAM size */ data = 0x76543210; ns_write_sram(card, 0x1C003, &data, 1); data = 0x89ABCDEF; ns_write_sram(card, 0x14003, &data, 1); if (ns_read_sram(card, 0x14003) == 0x89ABCDEF && ns_read_sram(card, 0x1C003) == 0x76543210) card->sram_size = 128; else card->sram_size = 32; PRINTK("nicstar%d: %dK x 32bit SRAM size.\n", i, card->sram_size); card->rct_size = NS_MAX_RCTSIZE; #if (NS_MAX_RCTSIZE == 4096) if (card->sram_size == 128) printk ("nicstar%d: limiting maximum VCI. See NS_MAX_RCTSIZE in nicstar.h\n", i); #elif (NS_MAX_RCTSIZE == 16384) if (card->sram_size == 32) { printk ("nicstar%d: wasting memory. See NS_MAX_RCTSIZE in nicstar.h\n", i); card->rct_size = 4096; } #else #error NS_MAX_RCTSIZE must be either 4096 or 16384 in nicstar.c #endif card->vpibits = NS_VPIBITS; if (card->rct_size == 4096) card->vcibits = 12 - NS_VPIBITS; else /* card->rct_size == 16384 */ card->vcibits = 14 - NS_VPIBITS; /* Initialize the nicstar eeprom/eprom stuff, for the MAC addr */ if (mac[i] == NULL) nicstar_init_eprom(card->membase); /* Set the VPI/VCI MSb mask to zero so we can receive OAM cells */ writel(0x00000000, card->membase + VPM); /* Initialize TSQ */ card->tsq.org = pci_alloc_consistent(card->pcidev, NS_TSQSIZE + NS_TSQ_ALIGNMENT, &card->tsq.dma); if (card->tsq.org == NULL) { printk("nicstar%d: can't allocate TSQ.\n", i); error = 10; ns_init_card_error(card, error); return error; } card->tsq.base = PTR_ALIGN(card->tsq.org, NS_TSQ_ALIGNMENT); card->tsq.next = card->tsq.base; card->tsq.last = card->tsq.base + (NS_TSQ_NUM_ENTRIES - 1); for (j = 0; j < NS_TSQ_NUM_ENTRIES; j++) ns_tsi_init(card->tsq.base + j); writel(0x00000000, card->membase + TSQH); writel(ALIGN(card->tsq.dma, NS_TSQ_ALIGNMENT), card->membase + TSQB); PRINTK("nicstar%d: TSQ base at 0x%p.\n", i, card->tsq.base); /* Initialize RSQ */ card->rsq.org = pci_alloc_consistent(card->pcidev, NS_RSQSIZE + NS_RSQ_ALIGNMENT, &card->rsq.dma); if (card->rsq.org == NULL) { printk("nicstar%d: can't allocate RSQ.\n", i); error = 11; ns_init_card_error(card, error); return error; } card->rsq.base = PTR_ALIGN(card->rsq.org, NS_RSQ_ALIGNMENT); card->rsq.next = card->rsq.base; card->rsq.last = card->rsq.base + (NS_RSQ_NUM_ENTRIES - 1); for (j = 0; j < NS_RSQ_NUM_ENTRIES; j++) ns_rsqe_init(card->rsq.base + j); writel(0x00000000, card->membase + RSQH); writel(ALIGN(card->rsq.dma, NS_RSQ_ALIGNMENT), card->membase + RSQB); PRINTK("nicstar%d: RSQ base at 0x%p.\n", i, card->rsq.base); /* Initialize SCQ0, the only VBR SCQ used */ card->scq1 = NULL; card->scq2 = NULL; card->scq0 = get_scq(card, VBR_SCQSIZE, NS_VRSCD0); if (card->scq0 == NULL) { printk("nicstar%d: can't get SCQ0.\n", i); error = 12; ns_init_card_error(card, error); return error; } u32d[0] = scq_virt_to_bus(card->scq0, card->scq0->base); u32d[1] = (u32) 0x00000000; u32d[2] = (u32) 0xffffffff; u32d[3] = (u32) 0x00000000; ns_write_sram(card, NS_VRSCD0, u32d, 4); ns_write_sram(card, NS_VRSCD1, u32d, 4); /* These last two won't be used */ ns_write_sram(card, NS_VRSCD2, u32d, 4); /* but are initialized, just in case... */ card->scq0->scd = NS_VRSCD0; PRINTK("nicstar%d: VBR-SCQ0 base at 0x%p.\n", i, card->scq0->base); /* Initialize TSTs */ card->tst_addr = NS_TST0; card->tst_free_entries = NS_TST_NUM_ENTRIES; data = NS_TST_OPCODE_VARIABLE; for (j = 0; j < NS_TST_NUM_ENTRIES; j++) ns_write_sram(card, NS_TST0 + j, &data, 1); data = ns_tste_make(NS_TST_OPCODE_END, NS_TST0); ns_write_sram(card, NS_TST0 + NS_TST_NUM_ENTRIES, &data, 1); for (j = 0; j < NS_TST_NUM_ENTRIES; j++) ns_write_sram(card, NS_TST1 + j, &data, 1); data = ns_tste_make(NS_TST_OPCODE_END, NS_TST1); ns_write_sram(card, NS_TST1 + NS_TST_NUM_ENTRIES, &data, 1); for (j = 0; j < NS_TST_NUM_ENTRIES; j++) card->tste2vc[j] = NULL; writel(NS_TST0 << 2, card->membase + TSTB); /* Initialize RCT. AAL type is set on opening the VC. */ #ifdef RCQ_SUPPORT u32d[0] = NS_RCTE_RAWCELLINTEN; #else u32d[0] = 0x00000000; #endif /* RCQ_SUPPORT */ u32d[1] = 0x00000000; u32d[2] = 0x00000000; u32d[3] = 0xFFFFFFFF; for (j = 0; j < card->rct_size; j++) ns_write_sram(card, j * 4, u32d, 4); memset(card->vcmap, 0, NS_MAX_RCTSIZE * sizeof(vc_map)); for (j = 0; j < NS_FRSCD_NUM; j++) card->scd2vc[j] = NULL; /* Initialize buffer levels */ card->sbnr.min = MIN_SB; card->sbnr.init = NUM_SB; card->sbnr.max = MAX_SB; card->lbnr.min = MIN_LB; card->lbnr.init = NUM_LB; card->lbnr.max = MAX_LB; card->iovnr.min = MIN_IOVB; card->iovnr.init = NUM_IOVB; card->iovnr.max = MAX_IOVB; card->hbnr.min = MIN_HB; card->hbnr.init = NUM_HB; card->hbnr.max = MAX_HB; card->sm_handle = NULL; card->sm_addr = 0x00000000; card->lg_handle = NULL; card->lg_addr = 0x00000000; card->efbie = 1; /* To prevent push_rxbufs from enabling the interrupt */ idr_init(&card->idr); /* Pre-allocate some huge buffers */ skb_queue_head_init(&card->hbpool.queue); card->hbpool.count = 0; for (j = 0; j < NUM_HB; j++) { struct sk_buff *hb; hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL); if (hb == NULL) { printk ("nicstar%d: can't allocate %dth of %d huge buffers.\n", i, j, NUM_HB); error = 13; ns_init_card_error(card, error); return error; } NS_PRV_BUFTYPE(hb) = BUF_NONE; skb_queue_tail(&card->hbpool.queue, hb); card->hbpool.count++; } /* Allocate large buffers */ skb_queue_head_init(&card->lbpool.queue); card->lbpool.count = 0; /* Not used */ for (j = 0; j < NUM_LB; j++) { struct sk_buff *lb; lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL); if (lb == NULL) { printk ("nicstar%d: can't allocate %dth of %d large buffers.\n", i, j, NUM_LB); error = 14; ns_init_card_error(card, error); return error; } NS_PRV_BUFTYPE(lb) = BUF_LG; skb_queue_tail(&card->lbpool.queue, lb); skb_reserve(lb, NS_SMBUFSIZE); push_rxbufs(card, lb); /* Due to the implementation of push_rxbufs() this is 1, not 0 */ if (j == 1) { card->rcbuf = lb; card->rawcell = (struct ns_rcqe *) lb->data; card->rawch = NS_PRV_DMA(lb); } } /* Test for strange behaviour which leads to crashes */ if ((bcount = ns_stat_lfbqc_get(readl(card->membase + STAT))) < card->lbnr.min) { printk ("nicstar%d: Strange... Just allocated %d large buffers and lfbqc = %d.\n", i, j, bcount); error = 14; ns_init_card_error(card, error); return error; } /* Allocate small buffers */ skb_queue_head_init(&card->sbpool.queue); card->sbpool.count = 0; /* Not used */ for (j = 0; j < NUM_SB; j++) { struct sk_buff *sb; sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL); if (sb == NULL) { printk ("nicstar%d: can't allocate %dth of %d small buffers.\n", i, j, NUM_SB); error = 15; ns_init_card_error(card, error); return error; } NS_PRV_BUFTYPE(sb) = BUF_SM; skb_queue_tail(&card->sbpool.queue, sb); skb_reserve(sb, NS_AAL0_HEADER); push_rxbufs(card, sb); } /* Test for strange behaviour which leads to crashes */ if ((bcount = ns_stat_sfbqc_get(readl(card->membase + STAT))) < card->sbnr.min) { printk ("nicstar%d: Strange... Just allocated %d small buffers and sfbqc = %d.\n", i, j, bcount); error = 15; ns_init_card_error(card, error); return error; } /* Allocate iovec buffers */ skb_queue_head_init(&card->iovpool.queue); card->iovpool.count = 0; for (j = 0; j < NUM_IOVB; j++) { struct sk_buff *iovb; iovb = alloc_skb(NS_IOVBUFSIZE, GFP_KERNEL); if (iovb == NULL) { printk ("nicstar%d: can't allocate %dth of %d iovec buffers.\n", i, j, NUM_IOVB); error = 16; ns_init_card_error(card, error); return error; } NS_PRV_BUFTYPE(iovb) = BUF_NONE; skb_queue_tail(&card->iovpool.queue, iovb); card->iovpool.count++; } /* Configure NICStAR */ if (card->rct_size == 4096) ns_cfg_rctsize = NS_CFG_RCTSIZE_4096_ENTRIES; else /* (card->rct_size == 16384) */ ns_cfg_rctsize = NS_CFG_RCTSIZE_16384_ENTRIES; card->efbie = 1; card->intcnt = 0; if (request_irq (pcidev->irq, &ns_irq_handler, IRQF_SHARED, "nicstar", card) != 0) { printk("nicstar%d: can't allocate IRQ %d.\n", i, pcidev->irq); error = 9; ns_init_card_error(card, error); return error; } /* Register device */ card->atmdev = atm_dev_register("nicstar", &card->pcidev->dev, &atm_ops, -1, NULL); if (card->atmdev == NULL) { printk("nicstar%d: can't register device.\n", i); error = 17; ns_init_card_error(card, error); return error; } if (mac[i] == NULL || !mac_pton(mac[i], card->atmdev->esi)) { nicstar_read_eprom(card->membase, NICSTAR_EPROM_MAC_ADDR_OFFSET, card->atmdev->esi, 6); if (ether_addr_equal(card->atmdev->esi, "\x00\x00\x00\x00\x00\x00")) { nicstar_read_eprom(card->membase, NICSTAR_EPROM_MAC_ADDR_OFFSET_ALT, card->atmdev->esi, 6); } } printk("nicstar%d: MAC address %pM\n", i, card->atmdev->esi); card->atmdev->dev_data = card; card->atmdev->ci_range.vpi_bits = card->vpibits; card->atmdev->ci_range.vci_bits = card->vcibits; card->atmdev->link_rate = card->max_pcr; card->atmdev->phy = NULL; #ifdef CONFIG_ATM_NICSTAR_USE_SUNI if (card->max_pcr == ATM_OC3_PCR) suni_init(card->atmdev); #endif /* CONFIG_ATM_NICSTAR_USE_SUNI */ #ifdef CONFIG_ATM_NICSTAR_USE_IDT77105 if (card->max_pcr == ATM_25_PCR) idt77105_init(card->atmdev); #endif /* CONFIG_ATM_NICSTAR_USE_IDT77105 */ if (card->atmdev->phy && card->atmdev->phy->start) card->atmdev->phy->start(card->atmdev); writel(NS_CFG_RXPATH | NS_CFG_SMBUFSIZE | NS_CFG_LGBUFSIZE | NS_CFG_EFBIE | NS_CFG_RSQSIZE | NS_CFG_VPIBITS | ns_cfg_rctsize | NS_CFG_RXINT_NODELAY | NS_CFG_RAWIE | /* Only enabled if RCQ_SUPPORT */ NS_CFG_RSQAFIE | NS_CFG_TXEN | NS_CFG_TXIE | NS_CFG_TSQFIE_OPT | /* Only enabled if ENABLE_TSQFIE */ NS_CFG_PHYIE, card->membase + CFG); num_cards++; return error; } static void ns_init_card_error(ns_dev *card, int error) { if (error >= 17) { writel(0x00000000, card->membase + CFG); } if (error >= 16) { struct sk_buff *iovb; while ((iovb = skb_dequeue(&card->iovpool.queue)) != NULL) dev_kfree_skb_any(iovb); } if (error >= 15) { struct sk_buff *sb; while ((sb = skb_dequeue(&card->sbpool.queue)) != NULL) dev_kfree_skb_any(sb); free_scq(card, card->scq0, NULL); } if (error >= 14) { struct sk_buff *lb; while ((lb = skb_dequeue(&card->lbpool.queue)) != NULL) dev_kfree_skb_any(lb); } if (error >= 13) { struct sk_buff *hb; while ((hb = skb_dequeue(&card->hbpool.queue)) != NULL) dev_kfree_skb_any(hb); } if (error >= 12) { kfree(card->rsq.org); } if (error >= 11) { kfree(card->tsq.org); } if (error >= 10) { free_irq(card->pcidev->irq, card); } if (error >= 4) { iounmap(card->membase); } if (error >= 3) { pci_disable_device(card->pcidev); kfree(card); } } static scq_info *get_scq(ns_dev *card, int size, u32 scd) { scq_info *scq; int i; if (size != VBR_SCQSIZE && size != CBR_SCQSIZE) return NULL; scq = kmalloc(sizeof(scq_info), GFP_KERNEL); if (!scq) return NULL; scq->org = pci_alloc_consistent(card->pcidev, 2 * size, &scq->dma); if (!scq->org) { kfree(scq); return NULL; } scq->skb = kmalloc(sizeof(struct sk_buff *) * (size / NS_SCQE_SIZE), GFP_KERNEL); if (!scq->skb) { kfree(scq->org); kfree(scq); return NULL; } scq->num_entries = size / NS_SCQE_SIZE; scq->base = PTR_ALIGN(scq->org, size); scq->next = scq->base; scq->last = scq->base + (scq->num_entries - 1); scq->tail = scq->last; scq->scd = scd; scq->num_entries = size / NS_SCQE_SIZE; scq->tbd_count = 0; init_waitqueue_head(&scq->scqfull_waitq); scq->full = 0; spin_lock_init(&scq->lock); for (i = 0; i < scq->num_entries; i++) scq->skb[i] = NULL; return scq; } /* For variable rate SCQ vcc must be NULL */ static void free_scq(ns_dev *card, scq_info *scq, struct atm_vcc *vcc) { int i; if (scq->num_entries == VBR_SCQ_NUM_ENTRIES) for (i = 0; i < scq->num_entries; i++) { if (scq->skb[i] != NULL) { vcc = ATM_SKB(scq->skb[i])->vcc; if (vcc->pop != NULL) vcc->pop(vcc, scq->skb[i]); else dev_kfree_skb_any(scq->skb[i]); } } else { /* vcc must be != NULL */ if (vcc == NULL) { printk ("nicstar: free_scq() called with vcc == NULL for fixed rate scq."); for (i = 0; i < scq->num_entries; i++) dev_kfree_skb_any(scq->skb[i]); } else for (i = 0; i < scq->num_entries; i++) { if (scq->skb[i] != NULL) { if (vcc->pop != NULL) vcc->pop(vcc, scq->skb[i]); else dev_kfree_skb_any(scq->skb[i]); } } } kfree(scq->skb); pci_free_consistent(card->pcidev, 2 * (scq->num_entries == VBR_SCQ_NUM_ENTRIES ? VBR_SCQSIZE : CBR_SCQSIZE), scq->org, scq->dma); kfree(scq); } /* The handles passed must be pointers to the sk_buff containing the small or large buffer(s) cast to u32. */ static void push_rxbufs(ns_dev * card, struct sk_buff *skb) { struct sk_buff *handle1, *handle2; int id1, id2; u32 addr1, addr2; u32 stat; unsigned long flags; /* *BARF* */ handle2 = NULL; addr2 = 0; handle1 = skb; addr1 = pci_map_single(card->pcidev, skb->data, (NS_PRV_BUFTYPE(skb) == BUF_SM ? NS_SMSKBSIZE : NS_LGSKBSIZE), PCI_DMA_TODEVICE); NS_PRV_DMA(skb) = addr1; /* save so we can unmap later */ #ifdef GENERAL_DEBUG if (!addr1) printk("nicstar%d: push_rxbufs called with addr1 = 0.\n", card->index); #endif /* GENERAL_DEBUG */ stat = readl(card->membase + STAT); card->sbfqc = ns_stat_sfbqc_get(stat); card->lbfqc = ns_stat_lfbqc_get(stat); if (NS_PRV_BUFTYPE(skb) == BUF_SM) { if (!addr2) { if (card->sm_addr) { addr2 = card->sm_addr; handle2 = card->sm_handle; card->sm_addr = 0x00000000; card->sm_handle = NULL; } else { /* (!sm_addr) */ card->sm_addr = addr1; card->sm_handle = handle1; } } } else { /* buf_type == BUF_LG */ if (!addr2) { if (card->lg_addr) { addr2 = card->lg_addr; handle2 = card->lg_handle; card->lg_addr = 0x00000000; card->lg_handle = NULL; } else { /* (!lg_addr) */ card->lg_addr = addr1; card->lg_handle = handle1; } } } if (addr2) { if (NS_PRV_BUFTYPE(skb) == BUF_SM) { if (card->sbfqc >= card->sbnr.max) { skb_unlink(handle1, &card->sbpool.queue); dev_kfree_skb_any(handle1); skb_unlink(handle2, &card->sbpool.queue); dev_kfree_skb_any(handle2); return; } else card->sbfqc += 2; } else { /* (buf_type == BUF_LG) */ if (card->lbfqc >= card->lbnr.max) { skb_unlink(handle1, &card->lbpool.queue); dev_kfree_skb_any(handle1); skb_unlink(handle2, &card->lbpool.queue); dev_kfree_skb_any(handle2); return; } else card->lbfqc += 2; } id1 = idr_alloc(&card->idr, handle1, 0, 0, GFP_ATOMIC); if (id1 < 0) goto out; id2 = idr_alloc(&card->idr, handle2, 0, 0, GFP_ATOMIC); if (id2 < 0) goto out; spin_lock_irqsave(&card->res_lock, flags); while (CMD_BUSY(card)) ; writel(addr2, card->membase + DR3); writel(id2, card->membase + DR2); writel(addr1, card->membase + DR1); writel(id1, card->membase + DR0); writel(NS_CMD_WRITE_FREEBUFQ | NS_PRV_BUFTYPE(skb), card->membase + CMD); spin_unlock_irqrestore(&card->res_lock, flags); XPRINTK("nicstar%d: Pushing %s buffers at 0x%x and 0x%x.\n", card->index, (NS_PRV_BUFTYPE(skb) == BUF_SM ? "small" : "large"), addr1, addr2); } if (!card->efbie && card->sbfqc >= card->sbnr.min && card->lbfqc >= card->lbnr.min) { card->efbie = 1; writel((readl(card->membase + CFG) | NS_CFG_EFBIE), card->membase + CFG); } out: return; } static irqreturn_t ns_irq_handler(int irq, void *dev_id) { u32 stat_r; ns_dev *card; struct atm_dev *dev; unsigned long flags; card = (ns_dev *) dev_id; dev = card->atmdev; card->intcnt++; PRINTK("nicstar%d: NICStAR generated an interrupt\n", card->index); spin_lock_irqsave(&card->int_lock, flags); stat_r = readl(card->membase + STAT); /* Transmit Status Indicator has been written to T. S. Queue */ if (stat_r & NS_STAT_TSIF) { TXPRINTK("nicstar%d: TSI interrupt\n", card->index); process_tsq(card); writel(NS_STAT_TSIF, card->membase + STAT); } /* Incomplete CS-PDU has been transmitted */ if (stat_r & NS_STAT_TXICP) { writel(NS_STAT_TXICP, card->membase + STAT); TXPRINTK("nicstar%d: Incomplete CS-PDU transmitted.\n", card->index); } /* Transmit Status Queue 7/8 full */ if (stat_r & NS_STAT_TSQF) { writel(NS_STAT_TSQF, card->membase + STAT); PRINTK("nicstar%d: TSQ full.\n", card->index); process_tsq(card); } /* Timer overflow */ if (stat_r & NS_STAT_TMROF) { writel(NS_STAT_TMROF, card->membase + STAT); PRINTK("nicstar%d: Timer overflow.\n", card->index); } /* PHY device interrupt signal active */ if (stat_r & NS_STAT_PHYI) { writel(NS_STAT_PHYI, card->membase + STAT); PRINTK("nicstar%d: PHY interrupt.\n", card->index); if (dev->phy && dev->phy->interrupt) { dev->phy->interrupt(dev); } } /* Small Buffer Queue is full */ if (stat_r & NS_STAT_SFBQF) { writel(NS_STAT_SFBQF, card->membase + STAT); printk("nicstar%d: Small free buffer queue is full.\n", card->index); } /* Large Buffer Queue is full */ if (stat_r & NS_STAT_LFBQF) { writel(NS_STAT_LFBQF, card->membase + STAT); printk("nicstar%d: Large free buffer queue is full.\n", card->index); } /* Receive Status Queue is full */ if (stat_r & NS_STAT_RSQF) { writel(NS_STAT_RSQF, card->membase + STAT); printk("nicstar%d: RSQ full.\n", card->index); process_rsq(card); } /* Complete CS-PDU received */ if (stat_r & NS_STAT_EOPDU) { RXPRINTK("nicstar%d: End of CS-PDU received.\n", card->index); process_rsq(card); writel(NS_STAT_EOPDU, card->membase + STAT); } /* Raw cell received */ if (stat_r & NS_STAT_RAWCF) { writel(NS_STAT_RAWCF, card->membase + STAT); #ifndef RCQ_SUPPORT printk("nicstar%d: Raw cell received and no support yet...\n", card->index); #endif /* RCQ_SUPPORT */ /* NOTE: the following procedure may keep a raw cell pending until the next interrupt. As this preliminary support is only meant to avoid buffer leakage, this is not an issue. */ while (readl(card->membase + RAWCT) != card->rawch) { if (ns_rcqe_islast(card->rawcell)) { struct sk_buff *oldbuf; oldbuf = card->rcbuf; card->rcbuf = idr_find(&card->idr, ns_rcqe_nextbufhandle(card->rawcell)); card->rawch = NS_PRV_DMA(card->rcbuf); card->rawcell = (struct ns_rcqe *) card->rcbuf->data; recycle_rx_buf(card, oldbuf); } else { card->rawch += NS_RCQE_SIZE; card->rawcell++; } } } /* Small buffer queue is empty */ if (stat_r & NS_STAT_SFBQE) { int i; struct sk_buff *sb; writel(NS_STAT_SFBQE, card->membase + STAT); printk("nicstar%d: Small free buffer queue empty.\n", card->index); for (i = 0; i < card->sbnr.min; i++) { sb = dev_alloc_skb(NS_SMSKBSIZE); if (sb == NULL) { writel(readl(card->membase + CFG) & ~NS_CFG_EFBIE, card->membase + CFG); card->efbie = 0; break; } NS_PRV_BUFTYPE(sb) = BUF_SM; skb_queue_tail(&card->sbpool.queue, sb); skb_reserve(sb, NS_AAL0_HEADER); push_rxbufs(card, sb); } card->sbfqc = i; process_rsq(card); } /* Large buffer queue empty */ if (stat_r & NS_STAT_LFBQE) { int i; struct sk_buff *lb; writel(NS_STAT_LFBQE, card->membase + STAT); printk("nicstar%d: Large free buffer queue empty.\n", card->index); for (i = 0; i < card->lbnr.min; i++) { lb = dev_alloc_skb(NS_LGSKBSIZE); if (lb == NULL) { writel(readl(card->membase + CFG) & ~NS_CFG_EFBIE, card->membase + CFG); card->efbie = 0; break; } NS_PRV_BUFTYPE(lb) = BUF_LG; skb_queue_tail(&card->lbpool.queue, lb); skb_reserve(lb, NS_SMBUFSIZE); push_rxbufs(card, lb); } card->lbfqc = i; process_rsq(card); } /* Receive Status Queue is 7/8 full */ if (stat_r & NS_STAT_RSQAF) { writel(NS_STAT_RSQAF, card->membase + STAT); RXPRINTK("nicstar%d: RSQ almost full.\n", card->index); process_rsq(card); } spin_unlock_irqrestore(&card->int_lock, flags); PRINTK("nicstar%d: end of interrupt service\n", card->index); return IRQ_HANDLED; } static int ns_open(struct atm_vcc *vcc) { ns_dev *card; vc_map *vc; unsigned long tmpl, modl; int tcr, tcra; /* target cell rate, and absolute value */ int n = 0; /* Number of entries in the TST. Initialized to remove the compiler warning. */ u32 u32d[4]; int frscdi = 0; /* Index of the SCD. Initialized to remove the compiler warning. How I wish compilers were clever enough to tell which variables can truly be used uninitialized... */ int inuse; /* tx or rx vc already in use by another vcc */ short vpi = vcc->vpi; int vci = vcc->vci; card = (ns_dev *) vcc->dev->dev_data; PRINTK("nicstar%d: opening vpi.vci %d.%d \n", card->index, (int)vpi, vci); if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) { PRINTK("nicstar%d: unsupported AAL.\n", card->index); return -EINVAL; } vc = &(card->vcmap[vpi << card->vcibits | vci]); vcc->dev_data = vc; inuse = 0; if (vcc->qos.txtp.traffic_class != ATM_NONE && vc->tx) inuse = 1; if (vcc->qos.rxtp.traffic_class != ATM_NONE && vc->rx) inuse += 2; if (inuse) { printk("nicstar%d: %s vci already in use.\n", card->index, inuse == 1 ? "tx" : inuse == 2 ? "rx" : "tx and rx"); return -EINVAL; } set_bit(ATM_VF_ADDR, &vcc->flags); /* NOTE: You are not allowed to modify an open connection's QOS. To change that, remove the ATM_VF_PARTIAL flag checking. There may be other changes needed to do that. */ if (!test_bit(ATM_VF_PARTIAL, &vcc->flags)) { scq_info *scq; set_bit(ATM_VF_PARTIAL, &vcc->flags); if (vcc->qos.txtp.traffic_class == ATM_CBR) { /* Check requested cell rate and availability of SCD */ if (vcc->qos.txtp.max_pcr == 0 && vcc->qos.txtp.pcr == 0 && vcc->qos.txtp.min_pcr == 0) { PRINTK ("nicstar%d: trying to open a CBR vc with cell rate = 0 \n", card->index); clear_bit(ATM_VF_PARTIAL, &vcc->flags); clear_bit(ATM_VF_ADDR, &vcc->flags); return -EINVAL; } tcr = atm_pcr_goal(&(vcc->qos.txtp)); tcra = tcr >= 0 ? tcr : -tcr; PRINTK("nicstar%d: target cell rate = %d.\n", card->index, vcc->qos.txtp.max_pcr); tmpl = (unsigned long)tcra *(unsigned long) NS_TST_NUM_ENTRIES; modl = tmpl % card->max_pcr; n = (int)(tmpl / card->max_pcr); if (tcr > 0) { if (modl > 0) n++; } else if (tcr == 0) { if ((n = (card->tst_free_entries - NS_TST_RESERVED)) <= 0) { PRINTK ("nicstar%d: no CBR bandwidth free.\n", card->index); clear_bit(ATM_VF_PARTIAL, &vcc->flags); clear_bit(ATM_VF_ADDR, &vcc->flags); return -EINVAL; } } if (n == 0) { printk ("nicstar%d: selected bandwidth < granularity.\n", card->index); clear_bit(ATM_VF_PARTIAL, &vcc->flags); clear_bit(ATM_VF_ADDR, &vcc->flags); return -EINVAL; } if (n > (card->tst_free_entries - NS_TST_RESERVED)) { PRINTK ("nicstar%d: not enough free CBR bandwidth.\n", card->index); clear_bit(ATM_VF_PARTIAL, &vcc->flags); clear_bit(ATM_VF_ADDR, &vcc->flags); return -EINVAL; } else card->tst_free_entries -= n; XPRINTK("nicstar%d: writing %d tst entries.\n", card->index, n); for (frscdi = 0; frscdi < NS_FRSCD_NUM; frscdi++) { if (card->scd2vc[frscdi] == NULL) { card->scd2vc[frscdi] = vc; break; } } if (frscdi == NS_FRSCD_NUM) { PRINTK ("nicstar%d: no SCD available for CBR channel.\n", card->index); card->tst_free_entries += n; clear_bit(ATM_VF_PARTIAL, &vcc->flags); clear_bit(ATM_VF_ADDR, &vcc->flags); return -EBUSY; } vc->cbr_scd = NS_FRSCD + frscdi * NS_FRSCD_SIZE; scq = get_scq(card, CBR_SCQSIZE, vc->cbr_scd); if (scq == NULL) { PRINTK("nicstar%d: can't get fixed rate SCQ.\n", card->index); card->scd2vc[frscdi] = NULL; card->tst_free_entries += n; clear_bit(ATM_VF_PARTIAL, &vcc->flags); clear_bit(ATM_VF_ADDR, &vcc->flags); return -ENOMEM; } vc->scq = scq; u32d[0] = scq_virt_to_bus(scq, scq->base); u32d[1] = (u32) 0x00000000; u32d[2] = (u32) 0xffffffff; u32d[3] = (u32) 0x00000000; ns_write_sram(card, vc->cbr_scd, u32d, 4); fill_tst(card, n, vc); } else if (vcc->qos.txtp.traffic_class == ATM_UBR) { vc->cbr_scd = 0x00000000; vc->scq = card->scq0; } if (vcc->qos.txtp.traffic_class != ATM_NONE) { vc->tx = 1; vc->tx_vcc = vcc; vc->tbd_count = 0; } if (vcc->qos.rxtp.traffic_class != ATM_NONE) { u32 status; vc->rx = 1; vc->rx_vcc = vcc; vc->rx_iov = NULL; /* Open the connection in hardware */ if (vcc->qos.aal == ATM_AAL5) status = NS_RCTE_AAL5 | NS_RCTE_CONNECTOPEN; else /* vcc->qos.aal == ATM_AAL0 */ status = NS_RCTE_AAL0 | NS_RCTE_CONNECTOPEN; #ifdef RCQ_SUPPORT status |= NS_RCTE_RAWCELLINTEN; #endif /* RCQ_SUPPORT */ ns_write_sram(card, NS_RCT + (vpi << card->vcibits | vci) * NS_RCT_ENTRY_SIZE, &status, 1); } } set_bit(ATM_VF_READY, &vcc->flags); return 0; } static void ns_close(struct atm_vcc *vcc) { vc_map *vc; ns_dev *card; u32 data; int i; vc = vcc->dev_data; card = vcc->dev->dev_data; PRINTK("nicstar%d: closing vpi.vci %d.%d \n", card->index, (int)vcc->vpi, vcc->vci); clear_bit(ATM_VF_READY, &vcc->flags); if (vcc->qos.rxtp.traffic_class != ATM_NONE) { u32 addr; unsigned long flags; addr = NS_RCT + (vcc->vpi << card->vcibits | vcc->vci) * NS_RCT_ENTRY_SIZE; spin_lock_irqsave(&card->res_lock, flags); while (CMD_BUSY(card)) ; writel(NS_CMD_CLOSE_CONNECTION | addr << 2, card->membase + CMD); spin_unlock_irqrestore(&card->res_lock, flags); vc->rx = 0; if (vc->rx_iov != NULL) { struct sk_buff *iovb; u32 stat; stat = readl(card->membase + STAT); card->sbfqc = ns_stat_sfbqc_get(stat); card->lbfqc = ns_stat_lfbqc_get(stat); PRINTK ("nicstar%d: closing a VC with pending rx buffers.\n", card->index); iovb = vc->rx_iov; recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data, NS_PRV_IOVCNT(iovb)); NS_PRV_IOVCNT(iovb) = 0; spin_lock_irqsave(&card->int_lock, flags); recycle_iov_buf(card, iovb); spin_unlock_irqrestore(&card->int_lock, flags); vc->rx_iov = NULL; } } if (vcc->qos.txtp.traffic_class != ATM_NONE) { vc->tx = 0; } if (vcc->qos.txtp.traffic_class == ATM_CBR) { unsigned long flags; ns_scqe *scqep; scq_info *scq; scq = vc->scq; for (;;) { spin_lock_irqsave(&scq->lock, flags); scqep = scq->next; if (scqep == scq->base) scqep = scq->last; else scqep--; if (scqep == scq->tail) { spin_unlock_irqrestore(&scq->lock, flags); break; } /* If the last entry is not a TSR, place one in the SCQ in order to be able to completely drain it and then close. */ if (!ns_scqe_is_tsr(scqep) && scq->tail != scq->next) { ns_scqe tsr; u32 scdi, scqi; u32 data; int index; tsr.word_1 = ns_tsr_mkword_1(NS_TSR_INTENABLE); scdi = (vc->cbr_scd - NS_FRSCD) / NS_FRSCD_SIZE; scqi = scq->next - scq->base; tsr.word_2 = ns_tsr_mkword_2(scdi, scqi); tsr.word_3 = 0x00000000; tsr.word_4 = 0x00000000; *scq->next = tsr; index = (int)scqi; scq->skb[index] = NULL; if (scq->next == scq->last) scq->next = scq->base; else scq->next++; data = scq_virt_to_bus(scq, scq->next); ns_write_sram(card, scq->scd, &data, 1); } spin_unlock_irqrestore(&scq->lock, flags); schedule(); } /* Free all TST entries */ data = NS_TST_OPCODE_VARIABLE; for (i = 0; i < NS_TST_NUM_ENTRIES; i++) { if (card->tste2vc[i] == vc) { ns_write_sram(card, card->tst_addr + i, &data, 1); card->tste2vc[i] = NULL; card->tst_free_entries++; } } card->scd2vc[(vc->cbr_scd - NS_FRSCD) / NS_FRSCD_SIZE] = NULL; free_scq(card, vc->scq, vcc); } /* remove all references to vcc before deleting it */ if (vcc->qos.txtp.traffic_class != ATM_NONE) { unsigned long flags; scq_info *scq = card->scq0; spin_lock_irqsave(&scq->lock, flags); for (i = 0; i < scq->num_entries; i++) { if (scq->skb[i] && ATM_SKB(scq->skb[i])->vcc == vcc) { ATM_SKB(scq->skb[i])->vcc = NULL; atm_return(vcc, scq->skb[i]->truesize); PRINTK ("nicstar: deleted pending vcc mapping\n"); } } spin_unlock_irqrestore(&scq->lock, flags); } vcc->dev_data = NULL; clear_bit(ATM_VF_PARTIAL, &vcc->flags); clear_bit(ATM_VF_ADDR, &vcc->flags); #ifdef RX_DEBUG { u32 stat, cfg; stat = readl(card->membase + STAT); cfg = readl(card->membase + CFG); printk("STAT = 0x%08X CFG = 0x%08X \n", stat, cfg); printk ("TSQ: base = 0x%p next = 0x%p last = 0x%p TSQT = 0x%08X \n", card->tsq.base, card->tsq.next, card->tsq.last, readl(card->membase + TSQT)); printk ("RSQ: base = 0x%p next = 0x%p last = 0x%p RSQT = 0x%08X \n", card->rsq.base, card->rsq.next, card->rsq.last, readl(card->membase + RSQT)); printk("Empty free buffer queue interrupt %s \n", card->efbie ? "enabled" : "disabled"); printk("SBCNT = %d count = %d LBCNT = %d count = %d \n", ns_stat_sfbqc_get(stat), card->sbpool.count, ns_stat_lfbqc_get(stat), card->lbpool.count); printk("hbpool.count = %d iovpool.count = %d \n", card->hbpool.count, card->iovpool.count); } #endif /* RX_DEBUG */ } static void fill_tst(ns_dev * card, int n, vc_map * vc) { u32 new_tst; unsigned long cl; int e, r; u32 data; /* It would be very complicated to keep the two TSTs synchronized while assuring that writes are only made to the inactive TST. So, for now I will use only one TST. If problems occur, I will change this again */ new_tst = card->tst_addr; /* Fill procedure */ for (e = 0; e < NS_TST_NUM_ENTRIES; e++) { if (card->tste2vc[e] == NULL) break; } if (e == NS_TST_NUM_ENTRIES) { printk("nicstar%d: No free TST entries found. \n", card->index); return; } r = n; cl = NS_TST_NUM_ENTRIES; data = ns_tste_make(NS_TST_OPCODE_FIXED, vc->cbr_scd); while (r > 0) { if (cl >= NS_TST_NUM_ENTRIES && card->tste2vc[e] == NULL) { card->tste2vc[e] = vc; ns_write_sram(card, new_tst + e, &data, 1); cl -= NS_TST_NUM_ENTRIES; r--; } if (++e == NS_TST_NUM_ENTRIES) { e = 0; } cl += n; } /* End of fill procedure */ data = ns_tste_make(NS_TST_OPCODE_END, new_tst); ns_write_sram(card, new_tst + NS_TST_NUM_ENTRIES, &data, 1); ns_write_sram(card, card->tst_addr + NS_TST_NUM_ENTRIES, &data, 1); card->tst_addr = new_tst; } static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb) { ns_dev *card; vc_map *vc; scq_info *scq; unsigned long buflen; ns_scqe scqe; u32 flags; /* TBD flags, not CPU flags */ card = vcc->dev->dev_data; TXPRINTK("nicstar%d: ns_send() called.\n", card->index); if ((vc = (vc_map *) vcc->dev_data) == NULL) { printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n", card->index); atomic_inc(&vcc->stats->tx_err); dev_kfree_skb_any(skb); return -EINVAL; } if (!vc->tx) { printk("nicstar%d: Trying to transmit on a non-tx VC.\n", card->index); atomic_inc(&vcc->stats->tx_err); dev_kfree_skb_any(skb); return -EINVAL; } if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) { printk("nicstar%d: Only AAL0 and AAL5 are supported.\n", card->index); atomic_inc(&vcc->stats->tx_err); dev_kfree_skb_any(skb); return -EINVAL; } if (skb_shinfo(skb)->nr_frags != 0) { printk("nicstar%d: No scatter-gather yet.\n", card->index); atomic_inc(&vcc->stats->tx_err); dev_kfree_skb_any(skb); return -EINVAL; } ATM_SKB(skb)->vcc = vcc; NS_PRV_DMA(skb) = pci_map_single(card->pcidev, skb->data, skb->len, PCI_DMA_TODEVICE); if (vcc->qos.aal == ATM_AAL5) { buflen = (skb->len + 47 + 8) / 48 * 48; /* Multiple of 48 */ flags = NS_TBD_AAL5; scqe.word_2 = cpu_to_le32(NS_PRV_DMA(skb)); scqe.word_3 = cpu_to_le32(skb->len); scqe.word_4 = ns_tbd_mkword_4(0, (u32) vcc->vpi, (u32) vcc->vci, 0, ATM_SKB(skb)-> atm_options & ATM_ATMOPT_CLP ? 1 : 0); flags |= NS_TBD_EOPDU; } else { /* (vcc->qos.aal == ATM_AAL0) */ buflen = ATM_CELL_PAYLOAD; /* i.e., 48 bytes */ flags = NS_TBD_AAL0; scqe.word_2 = cpu_to_le32(NS_PRV_DMA(skb) + NS_AAL0_HEADER); scqe.word_3 = cpu_to_le32(0x00000000); if (*skb->data & 0x02) /* Payload type 1 - end of pdu */ flags |= NS_TBD_EOPDU; scqe.word_4 = cpu_to_le32(*((u32 *) skb->data) & ~NS_TBD_VC_MASK); /* Force the VPI/VCI to be the same as in VCC struct */ scqe.word_4 |= cpu_to_le32((((u32) vcc-> vpi) << NS_TBD_VPI_SHIFT | ((u32) vcc-> vci) << NS_TBD_VCI_SHIFT) & NS_TBD_VC_MASK); } if (vcc->qos.txtp.traffic_class == ATM_CBR) { scqe.word_1 = ns_tbd_mkword_1_novbr(flags, (u32) buflen); scq = ((vc_map *) vcc->dev_data)->scq; } else { scqe.word_1 = ns_tbd_mkword_1(flags, (u32) 1, (u32) 1, (u32) buflen); scq = card->scq0; } if (push_scqe(card, vc, scq, &scqe, skb) != 0) { atomic_inc(&vcc->stats->tx_err); dev_kfree_skb_any(skb); return -EIO; } atomic_inc(&vcc->stats->tx); return 0; } static int push_scqe(ns_dev * card, vc_map * vc, scq_info * scq, ns_scqe * tbd, struct sk_buff *skb) { unsigned long flags; ns_scqe tsr; u32 scdi, scqi; int scq_is_vbr; u32 data; int index; spin_lock_irqsave(&scq->lock, flags); while (scq->tail == scq->next) { if (in_interrupt()) { spin_unlock_irqrestore(&scq->lock, flags); printk("nicstar%d: Error pushing TBD.\n", card->index); return 1; } scq->full = 1; wait_event_interruptible_lock_irq_timeout(scq->scqfull_waitq, scq->tail != scq->next, scq->lock, SCQFULL_TIMEOUT); if (scq->full) { spin_unlock_irqrestore(&scq->lock, flags); printk("nicstar%d: Timeout pushing TBD.\n", card->index); return 1; } } *scq->next = *tbd; index = (int)(scq->next - scq->base); scq->skb[index] = skb; XPRINTK("nicstar%d: sending skb at 0x%p (pos %d).\n", card->index, skb, index); XPRINTK("nicstar%d: TBD written:\n0x%x\n0x%x\n0x%x\n0x%x\n at 0x%p.\n", card->index, le32_to_cpu(tbd->word_1), le32_to_cpu(tbd->word_2), le32_to_cpu(tbd->word_3), le32_to_cpu(tbd->word_4), scq->next); if (scq->next == scq->last) scq->next = scq->base; else scq->next++; vc->tbd_count++; if (scq->num_entries == VBR_SCQ_NUM_ENTRIES) { scq->tbd_count++; scq_is_vbr = 1; } else scq_is_vbr = 0; if (vc->tbd_count >= MAX_TBD_PER_VC || scq->tbd_count >= MAX_TBD_PER_SCQ) { int has_run = 0; while (scq->tail == scq->next) { if (in_interrupt()) { data = scq_virt_to_bus(scq, scq->next); ns_write_sram(card, scq->scd, &data, 1); spin_unlock_irqrestore(&scq->lock, flags); printk("nicstar%d: Error pushing TSR.\n", card->index); return 0; } scq->full = 1; if (has_run++) break; wait_event_interruptible_lock_irq_timeout(scq->scqfull_waitq, scq->tail != scq->next, scq->lock, SCQFULL_TIMEOUT); } if (!scq->full) { tsr.word_1 = ns_tsr_mkword_1(NS_TSR_INTENABLE); if (scq_is_vbr) scdi = NS_TSR_SCDISVBR; else scdi = (vc->cbr_scd - NS_FRSCD) / NS_FRSCD_SIZE; scqi = scq->next - scq->base; tsr.word_2 = ns_tsr_mkword_2(scdi, scqi); tsr.word_3 = 0x00000000; tsr.word_4 = 0x00000000; *scq->next = tsr; index = (int)scqi; scq->skb[index] = NULL; XPRINTK ("nicstar%d: TSR written:\n0x%x\n0x%x\n0x%x\n0x%x\n at 0x%p.\n", card->index, le32_to_cpu(tsr.word_1), le32_to_cpu(tsr.word_2), le32_to_cpu(tsr.word_3), le32_to_cpu(tsr.word_4), scq->next); if (scq->next == scq->last) scq->next = scq->base; else scq->next++; vc->tbd_count = 0; scq->tbd_count = 0; } else PRINTK("nicstar%d: Timeout pushing TSR.\n", card->index); } data = scq_virt_to_bus(scq, scq->next); ns_write_sram(card, scq->scd, &data, 1); spin_unlock_irqrestore(&scq->lock, flags); return 0; } static void process_tsq(ns_dev * card) { u32 scdi; scq_info *scq; ns_tsi *previous = NULL, *one_ahead, *two_ahead; int serviced_entries; /* flag indicating at least on entry was serviced */ serviced_entries = 0; if (card->tsq.next == card->tsq.last) one_ahead = card->tsq.base; else one_ahead = card->tsq.next + 1; if (one_ahead == card->tsq.last) two_ahead = card->tsq.base; else two_ahead = one_ahead + 1; while (!ns_tsi_isempty(card->tsq.next) || !ns_tsi_isempty(one_ahead) || !ns_tsi_isempty(two_ahead)) /* At most two empty, as stated in the 77201 errata */ { serviced_entries = 1; /* Skip the one or two possible empty entries */ while (ns_tsi_isempty(card->tsq.next)) { if (card->tsq.next == card->tsq.last) card->tsq.next = card->tsq.base; else card->tsq.next++; } if (!ns_tsi_tmrof(card->tsq.next)) { scdi = ns_tsi_getscdindex(card->tsq.next); if (scdi == NS_TSI_SCDISVBR) scq = card->scq0; else { if (card->scd2vc[scdi] == NULL) { printk ("nicstar%d: could not find VC from SCD index.\n", card->index); ns_tsi_init(card->tsq.next); return; } scq = card->scd2vc[scdi]->scq; } drain_scq(card, scq, ns_tsi_getscqpos(card->tsq.next)); scq->full = 0; wake_up_interruptible(&(scq->scqfull_waitq)); } ns_tsi_init(card->tsq.next); previous = card->tsq.next; if (card->tsq.next == card->tsq.last) card->tsq.next = card->tsq.base; else card->tsq.next++; if (card->tsq.next == card->tsq.last) one_ahead = card->tsq.base; else one_ahead = card->tsq.next + 1; if (one_ahead == card->tsq.last) two_ahead = card->tsq.base; else two_ahead = one_ahead + 1; } if (serviced_entries) writel(PTR_DIFF(previous, card->tsq.base), card->membase + TSQH); } static void drain_scq(ns_dev * card, scq_info * scq, int pos) { struct atm_vcc *vcc; struct sk_buff *skb; int i; unsigned long flags; XPRINTK("nicstar%d: drain_scq() called, scq at 0x%p, pos %d.\n", card->index, scq, pos); if (pos >= scq->num_entries) { printk("nicstar%d: Bad index on drain_scq().\n", card->index); return; } spin_lock_irqsave(&scq->lock, flags); i = (int)(scq->tail - scq->base); if (++i == scq->num_entries) i = 0; while (i != pos) { skb = scq->skb[i]; XPRINTK("nicstar%d: freeing skb at 0x%p (index %d).\n", card->index, skb, i); if (skb != NULL) { pci_unmap_single(card->pcidev, NS_PRV_DMA(skb), skb->len, PCI_DMA_TODEVICE); vcc = ATM_SKB(skb)->vcc; if (vcc && vcc->pop != NULL) { vcc->pop(vcc, skb); } else { dev_kfree_skb_irq(skb); } scq->skb[i] = NULL; } if (++i == scq->num_entries) i = 0; } scq->tail = scq->base + pos; spin_unlock_irqrestore(&scq->lock, flags); } static void process_rsq(ns_dev * card) { ns_rsqe *previous; if (!ns_rsqe_valid(card->rsq.next)) return; do { dequeue_rx(card, card->rsq.next); ns_rsqe_init(card->rsq.next); previous = card->rsq.next; if (card->rsq.next == card->rsq.last) card->rsq.next = card->rsq.base; else card->rsq.next++; } while (ns_rsqe_valid(card->rsq.next)); writel(PTR_DIFF(previous, card->rsq.base), card->membase + RSQH); } static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe) { u32 vpi, vci; vc_map *vc; struct sk_buff *iovb; struct iovec *iov; struct atm_vcc *vcc; struct sk_buff *skb; unsigned short aal5_len; int len; u32 stat; u32 id; stat = readl(card->membase + STAT); card->sbfqc = ns_stat_sfbqc_get(stat); card->lbfqc = ns_stat_lfbqc_get(stat); id = le32_to_cpu(rsqe->buffer_handle); skb = idr_find(&card->idr, id); if (!skb) { RXPRINTK(KERN_ERR "nicstar%d: idr_find() failed!\n", card->index); return; } idr_remove(&card->idr, id); pci_dma_sync_single_for_cpu(card->pcidev, NS_PRV_DMA(skb), (NS_PRV_BUFTYPE(skb) == BUF_SM ? NS_SMSKBSIZE : NS_LGSKBSIZE), PCI_DMA_FROMDEVICE); pci_unmap_single(card->pcidev, NS_PRV_DMA(skb), (NS_PRV_BUFTYPE(skb) == BUF_SM ? NS_SMSKBSIZE : NS_LGSKBSIZE), PCI_DMA_FROMDEVICE); vpi = ns_rsqe_vpi(rsqe); vci = ns_rsqe_vci(rsqe); if (vpi >= 1UL << card->vpibits || vci >= 1UL << card->vcibits) { printk("nicstar%d: SDU received for out-of-range vc %d.%d.\n", card->index, vpi, vci); recycle_rx_buf(card, skb); return; } vc = &(card->vcmap[vpi << card->vcibits | vci]); if (!vc->rx) { RXPRINTK("nicstar%d: SDU received on non-rx vc %d.%d.\n", card->index, vpi, vci); recycle_rx_buf(card, skb); return; } vcc = vc->rx_vcc; if (vcc->qos.aal == ATM_AAL0) { struct sk_buff *sb; unsigned char *cell; int i; cell = skb->data; for (i = ns_rsqe_cellcount(rsqe); i; i--) { if ((sb = dev_alloc_skb(NS_SMSKBSIZE)) == NULL) { printk ("nicstar%d: Can't allocate buffers for aal0.\n", card->index); atomic_add(i, &vcc->stats->rx_drop); break; } if (!atm_charge(vcc, sb->truesize)) { RXPRINTK ("nicstar%d: atm_charge() dropped aal0 packets.\n", card->index); atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */ dev_kfree_skb_any(sb); break; } /* Rebuild the header */ *((u32 *) sb->data) = le32_to_cpu(rsqe->word_1) << 4 | (ns_rsqe_clp(rsqe) ? 0x00000001 : 0x00000000); if (i == 1 && ns_rsqe_eopdu(rsqe)) *((u32 *) sb->data) |= 0x00000002; skb_put(sb, NS_AAL0_HEADER); memcpy(skb_tail_pointer(sb), cell, ATM_CELL_PAYLOAD); skb_put(sb, ATM_CELL_PAYLOAD); ATM_SKB(sb)->vcc = vcc; __net_timestamp(sb); vcc->push(vcc, sb); atomic_inc(&vcc->stats->rx); cell += ATM_CELL_PAYLOAD; } recycle_rx_buf(card, skb); return; } /* To reach this point, the AAL layer can only be AAL5 */ if ((iovb = vc->rx_iov) == NULL) { iovb = skb_dequeue(&(card->iovpool.queue)); if (iovb == NULL) { /* No buffers in the queue */ iovb = alloc_skb(NS_IOVBUFSIZE, GFP_ATOMIC); if (iovb == NULL) { printk("nicstar%d: Out of iovec buffers.\n", card->index); atomic_inc(&vcc->stats->rx_drop); recycle_rx_buf(card, skb); return; } NS_PRV_BUFTYPE(iovb) = BUF_NONE; } else if (--card->iovpool.count < card->iovnr.min) { struct sk_buff *new_iovb; if ((new_iovb = alloc_skb(NS_IOVBUFSIZE, GFP_ATOMIC)) != NULL) { NS_PRV_BUFTYPE(iovb) = BUF_NONE; skb_queue_tail(&card->iovpool.queue, new_iovb); card->iovpool.count++; } } vc->rx_iov = iovb; NS_PRV_IOVCNT(iovb) = 0; iovb->len = 0; iovb->data = iovb->head; skb_reset_tail_pointer(iovb); /* IMPORTANT: a pointer to the sk_buff containing the small or large buffer is stored as iovec base, NOT a pointer to the small or large buffer itself. */ } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) { printk("nicstar%d: received too big AAL5 SDU.\n", card->index); atomic_inc(&vcc->stats->rx_err); recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data, NS_MAX_IOVECS); NS_PRV_IOVCNT(iovb) = 0; iovb->len = 0; iovb->data = iovb->head; skb_reset_tail_pointer(iovb); } iov = &((struct iovec *)iovb->data)[NS_PRV_IOVCNT(iovb)++]; iov->iov_base = (void *)skb; iov->iov_len = ns_rsqe_cellcount(rsqe) * 48; iovb->len += iov->iov_len; #ifdef EXTRA_DEBUG if (NS_PRV_IOVCNT(iovb) == 1) { if (NS_PRV_BUFTYPE(skb) != BUF_SM) { printk ("nicstar%d: Expected a small buffer, and this is not one.\n", card->index); which_list(card, skb); atomic_inc(&vcc->stats->rx_err); recycle_rx_buf(card, skb); vc->rx_iov = NULL; recycle_iov_buf(card, iovb); return; } } else { /* NS_PRV_IOVCNT(iovb) >= 2 */ if (NS_PRV_BUFTYPE(skb) != BUF_LG) { printk ("nicstar%d: Expected a large buffer, and this is not one.\n", card->index); which_list(card, skb); atomic_inc(&vcc->stats->rx_err); recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data, NS_PRV_IOVCNT(iovb)); vc->rx_iov = NULL; recycle_iov_buf(card, iovb); return; } } #endif /* EXTRA_DEBUG */ if (ns_rsqe_eopdu(rsqe)) { /* This works correctly regardless of the endianness of the host */ unsigned char *L1L2 = (unsigned char *) (skb->data + iov->iov_len - 6); aal5_len = L1L2[0] << 8 | L1L2[1]; len = (aal5_len == 0x0000) ? 0x10000 : aal5_len; if (ns_rsqe_crcerr(rsqe) || len + 8 > iovb->len || len + (47 + 8) < iovb->len) { printk("nicstar%d: AAL5 CRC error", card->index); if (len + 8 > iovb->len || len + (47 + 8) < iovb->len) printk(" - PDU size mismatch.\n"); else printk(".\n"); atomic_inc(&vcc->stats->rx_err); recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data, NS_PRV_IOVCNT(iovb)); vc->rx_iov = NULL; recycle_iov_buf(card, iovb); return; } /* By this point we (hopefully) have a complete SDU without errors. */ if (NS_PRV_IOVCNT(iovb) == 1) { /* Just a small buffer */ /* skb points to a small buffer */ if (!atm_charge(vcc, skb->truesize)) { push_rxbufs(card, skb); atomic_inc(&vcc->stats->rx_drop); } else { skb_put(skb, len); dequeue_sm_buf(card, skb); #ifdef NS_USE_DESTRUCTORS skb->destructor = ns_sb_destructor; #endif /* NS_USE_DESTRUCTORS */ ATM_SKB(skb)->vcc = vcc; __net_timestamp(skb); vcc->push(vcc, skb); atomic_inc(&vcc->stats->rx); } } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */ struct sk_buff *sb; sb = (struct sk_buff *)(iov - 1)->iov_base; /* skb points to a large buffer */ if (len <= NS_SMBUFSIZE) { if (!atm_charge(vcc, sb->truesize)) { push_rxbufs(card, sb); atomic_inc(&vcc->stats->rx_drop); } else { skb_put(sb, len); dequeue_sm_buf(card, sb); #ifdef NS_USE_DESTRUCTORS sb->destructor = ns_sb_destructor; #endif /* NS_USE_DESTRUCTORS */ ATM_SKB(sb)->vcc = vcc; __net_timestamp(sb); vcc->push(vcc, sb); atomic_inc(&vcc->stats->rx); } push_rxbufs(card, skb); } else { /* len > NS_SMBUFSIZE, the usual case */ if (!atm_charge(vcc, skb->truesize)) { push_rxbufs(card, skb); atomic_inc(&vcc->stats->rx_drop); } else { dequeue_lg_buf(card, skb); #ifdef NS_USE_DESTRUCTORS skb->destructor = ns_lb_destructor; #endif /* NS_USE_DESTRUCTORS */ skb_push(skb, NS_SMBUFSIZE); skb_copy_from_linear_data(sb, skb->data, NS_SMBUFSIZE); skb_put(skb, len - NS_SMBUFSIZE); ATM_SKB(skb)->vcc = vcc; __net_timestamp(skb); vcc->push(vcc, skb); atomic_inc(&vcc->stats->rx); } push_rxbufs(card, sb); } } else { /* Must push a huge buffer */ struct sk_buff *hb, *sb, *lb; int remaining, tocopy; int j; hb = skb_dequeue(&(card->hbpool.queue)); if (hb == NULL) { /* No buffers in the queue */ hb = dev_alloc_skb(NS_HBUFSIZE); if (hb == NULL) { printk ("nicstar%d: Out of huge buffers.\n", card->index); atomic_inc(&vcc->stats->rx_drop); recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_PRV_IOVCNT(iovb)); vc->rx_iov = NULL; recycle_iov_buf(card, iovb); return; } else if (card->hbpool.count < card->hbnr.min) { struct sk_buff *new_hb; if ((new_hb = dev_alloc_skb(NS_HBUFSIZE)) != NULL) { skb_queue_tail(&card->hbpool. queue, new_hb); card->hbpool.count++; } } NS_PRV_BUFTYPE(hb) = BUF_NONE; } else if (--card->hbpool.count < card->hbnr.min) { struct sk_buff *new_hb; if ((new_hb = dev_alloc_skb(NS_HBUFSIZE)) != NULL) { NS_PRV_BUFTYPE(new_hb) = BUF_NONE; skb_queue_tail(&card->hbpool.queue, new_hb); card->hbpool.count++; } if (card->hbpool.count < card->hbnr.min) { if ((new_hb = dev_alloc_skb(NS_HBUFSIZE)) != NULL) { NS_PRV_BUFTYPE(new_hb) = BUF_NONE; skb_queue_tail(&card->hbpool. queue, new_hb); card->hbpool.count++; } } } iov = (struct iovec *)iovb->data; if (!atm_charge(vcc, hb->truesize)) { recycle_iovec_rx_bufs(card, iov, NS_PRV_IOVCNT(iovb)); if (card->hbpool.count < card->hbnr.max) { skb_queue_tail(&card->hbpool.queue, hb); card->hbpool.count++; } else dev_kfree_skb_any(hb); atomic_inc(&vcc->stats->rx_drop); } else { /* Copy the small buffer to the huge buffer */ sb = (struct sk_buff *)iov->iov_base; skb_copy_from_linear_data(sb, hb->data, iov->iov_len); skb_put(hb, iov->iov_len); remaining = len - iov->iov_len; iov++; /* Free the small buffer */ push_rxbufs(card, sb); /* Copy all large buffers to the huge buffer and free them */ for (j = 1; j < NS_PRV_IOVCNT(iovb); j++) { lb = (struct sk_buff *)iov->iov_base; tocopy = min_t(int, remaining, iov->iov_len); skb_copy_from_linear_data(lb, skb_tail_pointer (hb), tocopy); skb_put(hb, tocopy); iov++; remaining -= tocopy; push_rxbufs(card, lb); } #ifdef EXTRA_DEBUG if (remaining != 0 || hb->len != len) printk ("nicstar%d: Huge buffer len mismatch.\n", card->index); #endif /* EXTRA_DEBUG */ ATM_SKB(hb)->vcc = vcc; #ifdef NS_USE_DESTRUCTORS hb->destructor = ns_hb_destructor; #endif /* NS_USE_DESTRUCTORS */ __net_timestamp(hb); vcc->push(vcc, hb); atomic_inc(&vcc->stats->rx); } } vc->rx_iov = NULL; recycle_iov_buf(card, iovb); } } #ifdef NS_USE_DESTRUCTORS static void ns_sb_destructor(struct sk_buff *sb) { ns_dev *card; u32 stat; card = (ns_dev *) ATM_SKB(sb)->vcc->dev->dev_data; stat = readl(card->membase + STAT); card->sbfqc = ns_stat_sfbqc_get(stat); card->lbfqc = ns_stat_lfbqc_get(stat); do { sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL); if (sb == NULL) break; NS_PRV_BUFTYPE(sb) = BUF_SM; skb_queue_tail(&card->sbpool.queue, sb); skb_reserve(sb, NS_AAL0_HEADER); push_rxbufs(card, sb); } while (card->sbfqc < card->sbnr.min); } static void ns_lb_destructor(struct sk_buff *lb) { ns_dev *card; u32 stat; card = (ns_dev *) ATM_SKB(lb)->vcc->dev->dev_data; stat = readl(card->membase + STAT); card->sbfqc = ns_stat_sfbqc_get(stat); card->lbfqc = ns_stat_lfbqc_get(stat); do { lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL); if (lb == NULL) break; NS_PRV_BUFTYPE(lb) = BUF_LG; skb_queue_tail(&card->lbpool.queue, lb); skb_reserve(lb, NS_SMBUFSIZE); push_rxbufs(card, lb); } while (card->lbfqc < card->lbnr.min); } static void ns_hb_destructor(struct sk_buff *hb) { ns_dev *card; card = (ns_dev *) ATM_SKB(hb)->vcc->dev->dev_data; while (card->hbpool.count < card->hbnr.init) { hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL); if (hb == NULL) break; NS_PRV_BUFTYPE(hb) = BUF_NONE; skb_queue_tail(&card->hbpool.queue, hb); card->hbpool.count++; } } #endif /* NS_USE_DESTRUCTORS */ static void recycle_rx_buf(ns_dev * card, struct sk_buff *skb) { if (unlikely(NS_PRV_BUFTYPE(skb) == BUF_NONE)) { printk("nicstar%d: What kind of rx buffer is this?\n", card->index); dev_kfree_skb_any(skb); } else push_rxbufs(card, skb); } static void recycle_iovec_rx_bufs(ns_dev * card, struct iovec *iov, int count) { while (count-- > 0) recycle_rx_buf(card, (struct sk_buff *)(iov++)->iov_base); } static void recycle_iov_buf(ns_dev * card, struct sk_buff *iovb) { if (card->iovpool.count < card->iovnr.max) { skb_queue_tail(&card->iovpool.queue, iovb); card->iovpool.count++; } else dev_kfree_skb_any(iovb); } static void dequeue_sm_buf(ns_dev * card, struct sk_buff *sb) { skb_unlink(sb, &card->sbpool.queue); #ifdef NS_USE_DESTRUCTORS if (card->sbfqc < card->sbnr.min) #else if (card->sbfqc < card->sbnr.init) { struct sk_buff *new_sb; if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL) { NS_PRV_BUFTYPE(new_sb) = BUF_SM; skb_queue_tail(&card->sbpool.queue, new_sb); skb_reserve(new_sb, NS_AAL0_HEADER); push_rxbufs(card, new_sb); } } if (card->sbfqc < card->sbnr.init) #endif /* NS_USE_DESTRUCTORS */ { struct sk_buff *new_sb; if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL) { NS_PRV_BUFTYPE(new_sb) = BUF_SM; skb_queue_tail(&card->sbpool.queue, new_sb); skb_reserve(new_sb, NS_AAL0_HEADER); push_rxbufs(card, new_sb); } } } static void dequeue_lg_buf(ns_dev * card, struct sk_buff *lb) { skb_unlink(lb, &card->lbpool.queue); #ifdef NS_USE_DESTRUCTORS if (card->lbfqc < card->lbnr.min) #else if (card->lbfqc < card->lbnr.init) { struct sk_buff *new_lb; if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL) { NS_PRV_BUFTYPE(new_lb) = BUF_LG; skb_queue_tail(&card->lbpool.queue, new_lb); skb_reserve(new_lb, NS_SMBUFSIZE); push_rxbufs(card, new_lb); } } if (card->lbfqc < card->lbnr.init) #endif /* NS_USE_DESTRUCTORS */ { struct sk_buff *new_lb; if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL) { NS_PRV_BUFTYPE(new_lb) = BUF_LG; skb_queue_tail(&card->lbpool.queue, new_lb); skb_reserve(new_lb, NS_SMBUFSIZE); push_rxbufs(card, new_lb); } } } static int ns_proc_read(struct atm_dev *dev, loff_t * pos, char *page) { u32 stat; ns_dev *card; int left; left = (int)*pos; card = (ns_dev *) dev->dev_data; stat = readl(card->membase + STAT); if (!left--) return sprintf(page, "Pool count min init max \n"); if (!left--) return sprintf(page, "Small %5d %5d %5d %5d \n", ns_stat_sfbqc_get(stat), card->sbnr.min, card->sbnr.init, card->sbnr.max); if (!left--) return sprintf(page, "Large %5d %5d %5d %5d \n", ns_stat_lfbqc_get(stat), card->lbnr.min, card->lbnr.init, card->lbnr.max); if (!left--) return sprintf(page, "Huge %5d %5d %5d %5d \n", card->hbpool.count, card->hbnr.min, card->hbnr.init, card->hbnr.max); if (!left--) return sprintf(page, "Iovec %5d %5d %5d %5d \n", card->iovpool.count, card->iovnr.min, card->iovnr.init, card->iovnr.max); if (!left--) { int retval; retval = sprintf(page, "Interrupt counter: %u \n", card->intcnt); card->intcnt = 0; return retval; } #if 0 /* Dump 25.6 Mbps PHY registers */ /* Now there's a 25.6 Mbps PHY driver this code isn't needed. I left it here just in case it's needed for debugging. */ if (card->max_pcr == ATM_25_PCR && !left--) { u32 phy_regs[4]; u32 i; for (i = 0; i < 4; i++) { while (CMD_BUSY(card)) ; writel(NS_CMD_READ_UTILITY | 0x00000200 | i, card->membase + CMD); while (CMD_BUSY(card)) ; phy_regs[i] = readl(card->membase + DR0) & 0x000000FF; } return sprintf(page, "PHY regs: 0x%02X 0x%02X 0x%02X 0x%02X \n", phy_regs[0], phy_regs[1], phy_regs[2], phy_regs[3]); } #endif /* 0 - Dump 25.6 Mbps PHY registers */ #if 0 /* Dump TST */ if (left-- < NS_TST_NUM_ENTRIES) { if (card->tste2vc[left + 1] == NULL) return sprintf(page, "%5d - VBR/UBR \n", left + 1); else return sprintf(page, "%5d - %d %d \n", left + 1, card->tste2vc[left + 1]->tx_vcc->vpi, card->tste2vc[left + 1]->tx_vcc->vci); } #endif /* 0 */ return 0; } static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user * arg) { ns_dev *card; pool_levels pl; long btype; unsigned long flags; card = dev->dev_data; switch (cmd) { case NS_GETPSTAT: if (get_user (pl.buftype, &((pool_levels __user *) arg)->buftype)) return -EFAULT; switch (pl.buftype) { case NS_BUFTYPE_SMALL: pl.count = ns_stat_sfbqc_get(readl(card->membase + STAT)); pl.level.min = card->sbnr.min; pl.level.init = card->sbnr.init; pl.level.max = card->sbnr.max; break; case NS_BUFTYPE_LARGE: pl.count = ns_stat_lfbqc_get(readl(card->membase + STAT)); pl.level.min = card->lbnr.min; pl.level.init = card->lbnr.init; pl.level.max = card->lbnr.max; break; case NS_BUFTYPE_HUGE: pl.count = card->hbpool.count; pl.level.min = card->hbnr.min; pl.level.init = card->hbnr.init; pl.level.max = card->hbnr.max; break; case NS_BUFTYPE_IOVEC: pl.count = card->iovpool.count; pl.level.min = card->iovnr.min; pl.level.init = card->iovnr.init; pl.level.max = card->iovnr.max; break; default: return -ENOIOCTLCMD; } if (!copy_to_user((pool_levels __user *) arg, &pl, sizeof(pl))) return (sizeof(pl)); else return -EFAULT; case NS_SETBUFLEV: if (!capable(CAP_NET_ADMIN)) return -EPERM; if (copy_from_user(&pl, (pool_levels __user *) arg, sizeof(pl))) return -EFAULT; if (pl.level.min >= pl.level.init || pl.level.init >= pl.level.max) return -EINVAL; if (pl.level.min == 0) return -EINVAL; switch (pl.buftype) { case NS_BUFTYPE_SMALL: if (pl.level.max > TOP_SB) return -EINVAL; card->sbnr.min = pl.level.min; card->sbnr.init = pl.level.init; card->sbnr.max = pl.level.max; break; case NS_BUFTYPE_LARGE: if (pl.level.max > TOP_LB) return -EINVAL; card->lbnr.min = pl.level.min; card->lbnr.init = pl.level.init; card->lbnr.max = pl.level.max; break; case NS_BUFTYPE_HUGE: if (pl.level.max > TOP_HB) return -EINVAL; card->hbnr.min = pl.level.min; card->hbnr.init = pl.level.init; card->hbnr.max = pl.level.max; break; case NS_BUFTYPE_IOVEC: if (pl.level.max > TOP_IOVB) return -EINVAL; card->iovnr.min = pl.level.min; card->iovnr.init = pl.level.init; card->iovnr.max = pl.level.max; break; default: return -EINVAL; } return 0; case NS_ADJBUFLEV: if (!capable(CAP_NET_ADMIN)) return -EPERM; btype = (long)arg; /* a long is the same size as a pointer or bigger */ switch (btype) { case NS_BUFTYPE_SMALL: while (card->sbfqc < card->sbnr.init) { struct sk_buff *sb; sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL); if (sb == NULL) return -ENOMEM; NS_PRV_BUFTYPE(sb) = BUF_SM; skb_queue_tail(&card->sbpool.queue, sb); skb_reserve(sb, NS_AAL0_HEADER); push_rxbufs(card, sb); } break; case NS_BUFTYPE_LARGE: while (card->lbfqc < card->lbnr.init) { struct sk_buff *lb; lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL); if (lb == NULL) return -ENOMEM; NS_PRV_BUFTYPE(lb) = BUF_LG; skb_queue_tail(&card->lbpool.queue, lb); skb_reserve(lb, NS_SMBUFSIZE); push_rxbufs(card, lb); } break; case NS_BUFTYPE_HUGE: while (card->hbpool.count > card->hbnr.init) { struct sk_buff *hb; spin_lock_irqsave(&card->int_lock, flags); hb = skb_dequeue(&card->hbpool.queue); card->hbpool.count--; spin_unlock_irqrestore(&card->int_lock, flags); if (hb == NULL) printk ("nicstar%d: huge buffer count inconsistent.\n", card->index); else dev_kfree_skb_any(hb); } while (card->hbpool.count < card->hbnr.init) { struct sk_buff *hb; hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL); if (hb == NULL) return -ENOMEM; NS_PRV_BUFTYPE(hb) = BUF_NONE; spin_lock_irqsave(&card->int_lock, flags); skb_queue_tail(&card->hbpool.queue, hb); card->hbpool.count++; spin_unlock_irqrestore(&card->int_lock, flags); } break; case NS_BUFTYPE_IOVEC: while (card->iovpool.count > card->iovnr.init) { struct sk_buff *iovb; spin_lock_irqsave(&card->int_lock, flags); iovb = skb_dequeue(&card->iovpool.queue); card->iovpool.count--; spin_unlock_irqrestore(&card->int_lock, flags); if (iovb == NULL) printk ("nicstar%d: iovec buffer count inconsistent.\n", card->index); else dev_kfree_skb_any(iovb); } while (card->iovpool.count < card->iovnr.init) { struct sk_buff *iovb; iovb = alloc_skb(NS_IOVBUFSIZE, GFP_KERNEL); if (iovb == NULL) return -ENOMEM; NS_PRV_BUFTYPE(iovb) = BUF_NONE; spin_lock_irqsave(&card->int_lock, flags); skb_queue_tail(&card->iovpool.queue, iovb); card->iovpool.count++; spin_unlock_irqrestore(&card->int_lock, flags); } break; default: return -EINVAL; } return 0; default: if (dev->phy && dev->phy->ioctl) { return dev->phy->ioctl(dev, cmd, arg); } else { printk("nicstar%d: %s == NULL \n", card->index, dev->phy ? "dev->phy->ioctl" : "dev->phy"); return -ENOIOCTLCMD; } } } #ifdef EXTRA_DEBUG static void which_list(ns_dev * card, struct sk_buff *skb) { printk("skb buf_type: 0x%08x\n", NS_PRV_BUFTYPE(skb)); } #endif /* EXTRA_DEBUG */ static void ns_poll(unsigned long arg) { int i; ns_dev *card; unsigned long flags; u32 stat_r, stat_w; PRINTK("nicstar: Entering ns_poll().\n"); for (i = 0; i < num_cards; i++) { card = cards[i]; if (spin_is_locked(&card->int_lock)) { /* Probably it isn't worth spinning */ continue; } spin_lock_irqsave(&card->int_lock, flags); stat_w = 0; stat_r = readl(card->membase + STAT); if (stat_r & NS_STAT_TSIF) stat_w |= NS_STAT_TSIF; if (stat_r & NS_STAT_EOPDU) stat_w |= NS_STAT_EOPDU; process_tsq(card); process_rsq(card); writel(stat_w, card->membase + STAT); spin_unlock_irqrestore(&card->int_lock, flags); } mod_timer(&ns_timer, jiffies + NS_POLL_PERIOD); PRINTK("nicstar: Leaving ns_poll().\n"); } static void ns_phy_put(struct atm_dev *dev, unsigned char value, unsigned long addr) { ns_dev *card; unsigned long flags; card = dev->dev_data; spin_lock_irqsave(&card->res_lock, flags); while (CMD_BUSY(card)) ; writel((u32) value, card->membase + DR0); writel(NS_CMD_WRITE_UTILITY | 0x00000200 | (addr & 0x000000FF), card->membase + CMD); spin_unlock_irqrestore(&card->res_lock, flags); } static unsigned char ns_phy_get(struct atm_dev *dev, unsigned long addr) { ns_dev *card; unsigned long flags; u32 data; card = dev->dev_data; spin_lock_irqsave(&card->res_lock, flags); while (CMD_BUSY(card)) ; writel(NS_CMD_READ_UTILITY | 0x00000200 | (addr & 0x000000FF), card->membase + CMD); while (CMD_BUSY(card)) ; data = readl(card->membase + DR0) & 0x000000FF; spin_unlock_irqrestore(&card->res_lock, flags); return (unsigned char)data; } module_init(nicstar_init); module_exit(nicstar_cleanup);
gpl-2.0
Kaisrlik/linux
drivers/atm/nicstar.c
594
76412
/* * nicstar.c * * Device driver supporting CBR for IDT 77201/77211 "NICStAR" based cards. * * IMPORTANT: The included file nicstarmac.c was NOT WRITTEN BY ME. * It was taken from the frle-0.22 device driver. * As the file doesn't have a copyright notice, in the file * nicstarmac.copyright I put the copyright notice from the * frle-0.22 device driver. * Some code is based on the nicstar driver by M. Welsh. * * Author: Rui Prior (rprior@inescn.pt) * PowerPC support by Jay Talbott (jay_talbott@mcg.mot.com) April 1999 * * * (C) INESC 1999 */ /* * IMPORTANT INFORMATION * * There are currently three types of spinlocks: * * 1 - Per card interrupt spinlock (to protect structures and such) * 2 - Per SCQ scq spinlock * 3 - Per card resource spinlock (to access registers, etc.) * * These must NEVER be grabbed in reverse order. * */ /* Header files */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/skbuff.h> #include <linux/atmdev.h> #include <linux/atm.h> #include <linux/pci.h> #include <linux/dma-mapping.h> #include <linux/types.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/timer.h> #include <linux/interrupt.h> #include <linux/bitops.h> #include <linux/slab.h> #include <linux/idr.h> #include <asm/io.h> #include <asm/uaccess.h> #include <linux/atomic.h> #include <linux/etherdevice.h> #include "nicstar.h" #ifdef CONFIG_ATM_NICSTAR_USE_SUNI #include "suni.h" #endif /* CONFIG_ATM_NICSTAR_USE_SUNI */ #ifdef CONFIG_ATM_NICSTAR_USE_IDT77105 #include "idt77105.h" #endif /* CONFIG_ATM_NICSTAR_USE_IDT77105 */ /* Additional code */ #include "nicstarmac.c" /* Configurable parameters */ #undef PHY_LOOPBACK #undef TX_DEBUG #undef RX_DEBUG #undef GENERAL_DEBUG #undef EXTRA_DEBUG #undef NS_USE_DESTRUCTORS /* For now keep this undefined unless you know you're going to use only raw ATM */ /* Do not touch these */ #ifdef TX_DEBUG #define TXPRINTK(args...) printk(args) #else #define TXPRINTK(args...) #endif /* TX_DEBUG */ #ifdef RX_DEBUG #define RXPRINTK(args...) printk(args) #else #define RXPRINTK(args...) #endif /* RX_DEBUG */ #ifdef GENERAL_DEBUG #define PRINTK(args...) printk(args) #else #define PRINTK(args...) #endif /* GENERAL_DEBUG */ #ifdef EXTRA_DEBUG #define XPRINTK(args...) printk(args) #else #define XPRINTK(args...) #endif /* EXTRA_DEBUG */ /* Macros */ #define CMD_BUSY(card) (readl((card)->membase + STAT) & NS_STAT_CMDBZ) #define NS_DELAY mdelay(1) #define PTR_DIFF(a, b) ((u32)((unsigned long)(a) - (unsigned long)(b))) #ifndef ATM_SKB #define ATM_SKB(s) (&(s)->atm) #endif #define scq_virt_to_bus(scq, p) \ (scq->dma + ((unsigned long)(p) - (unsigned long)(scq)->org)) /* Function declarations */ static u32 ns_read_sram(ns_dev * card, u32 sram_address); static void ns_write_sram(ns_dev * card, u32 sram_address, u32 * value, int count); static int ns_init_card(int i, struct pci_dev *pcidev); static void ns_init_card_error(ns_dev * card, int error); static scq_info *get_scq(ns_dev *card, int size, u32 scd); static void free_scq(ns_dev *card, scq_info * scq, struct atm_vcc *vcc); static void push_rxbufs(ns_dev *, struct sk_buff *); static irqreturn_t ns_irq_handler(int irq, void *dev_id); static int ns_open(struct atm_vcc *vcc); static void ns_close(struct atm_vcc *vcc); static void fill_tst(ns_dev * card, int n, vc_map * vc); static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb); static int push_scqe(ns_dev * card, vc_map * vc, scq_info * scq, ns_scqe * tbd, struct sk_buff *skb); static void process_tsq(ns_dev * card); static void drain_scq(ns_dev * card, scq_info * scq, int pos); static void process_rsq(ns_dev * card); static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe); #ifdef NS_USE_DESTRUCTORS static void ns_sb_destructor(struct sk_buff *sb); static void ns_lb_destructor(struct sk_buff *lb); static void ns_hb_destructor(struct sk_buff *hb); #endif /* NS_USE_DESTRUCTORS */ static void recycle_rx_buf(ns_dev * card, struct sk_buff *skb); static void recycle_iovec_rx_bufs(ns_dev * card, struct iovec *iov, int count); static void recycle_iov_buf(ns_dev * card, struct sk_buff *iovb); static void dequeue_sm_buf(ns_dev * card, struct sk_buff *sb); static void dequeue_lg_buf(ns_dev * card, struct sk_buff *lb); static int ns_proc_read(struct atm_dev *dev, loff_t * pos, char *page); static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user * arg); #ifdef EXTRA_DEBUG static void which_list(ns_dev * card, struct sk_buff *skb); #endif static void ns_poll(unsigned long arg); static void ns_phy_put(struct atm_dev *dev, unsigned char value, unsigned long addr); static unsigned char ns_phy_get(struct atm_dev *dev, unsigned long addr); /* Global variables */ static struct ns_dev *cards[NS_MAX_CARDS]; static unsigned num_cards; static struct atmdev_ops atm_ops = { .open = ns_open, .close = ns_close, .ioctl = ns_ioctl, .send = ns_send, .phy_put = ns_phy_put, .phy_get = ns_phy_get, .proc_read = ns_proc_read, .owner = THIS_MODULE, }; static struct timer_list ns_timer; static char *mac[NS_MAX_CARDS]; module_param_array(mac, charp, NULL, 0); MODULE_LICENSE("GPL"); /* Functions */ static int nicstar_init_one(struct pci_dev *pcidev, const struct pci_device_id *ent) { static int index = -1; unsigned int error; index++; cards[index] = NULL; error = ns_init_card(index, pcidev); if (error) { cards[index--] = NULL; /* don't increment index */ goto err_out; } return 0; err_out: return -ENODEV; } static void nicstar_remove_one(struct pci_dev *pcidev) { int i, j; ns_dev *card = pci_get_drvdata(pcidev); struct sk_buff *hb; struct sk_buff *iovb; struct sk_buff *lb; struct sk_buff *sb; i = card->index; if (cards[i] == NULL) return; if (card->atmdev->phy && card->atmdev->phy->stop) card->atmdev->phy->stop(card->atmdev); /* Stop everything */ writel(0x00000000, card->membase + CFG); /* De-register device */ atm_dev_deregister(card->atmdev); /* Disable PCI device */ pci_disable_device(pcidev); /* Free up resources */ j = 0; PRINTK("nicstar%d: freeing %d huge buffers.\n", i, card->hbpool.count); while ((hb = skb_dequeue(&card->hbpool.queue)) != NULL) { dev_kfree_skb_any(hb); j++; } PRINTK("nicstar%d: %d huge buffers freed.\n", i, j); j = 0; PRINTK("nicstar%d: freeing %d iovec buffers.\n", i, card->iovpool.count); while ((iovb = skb_dequeue(&card->iovpool.queue)) != NULL) { dev_kfree_skb_any(iovb); j++; } PRINTK("nicstar%d: %d iovec buffers freed.\n", i, j); while ((lb = skb_dequeue(&card->lbpool.queue)) != NULL) dev_kfree_skb_any(lb); while ((sb = skb_dequeue(&card->sbpool.queue)) != NULL) dev_kfree_skb_any(sb); free_scq(card, card->scq0, NULL); for (j = 0; j < NS_FRSCD_NUM; j++) { if (card->scd2vc[j] != NULL) free_scq(card, card->scd2vc[j]->scq, card->scd2vc[j]->tx_vcc); } idr_destroy(&card->idr); pci_free_consistent(card->pcidev, NS_RSQSIZE + NS_RSQ_ALIGNMENT, card->rsq.org, card->rsq.dma); pci_free_consistent(card->pcidev, NS_TSQSIZE + NS_TSQ_ALIGNMENT, card->tsq.org, card->tsq.dma); free_irq(card->pcidev->irq, card); iounmap(card->membase); kfree(card); } static struct pci_device_id nicstar_pci_tbl[] = { { PCI_VDEVICE(IDT, PCI_DEVICE_ID_IDT_IDT77201), 0 }, {0,} /* terminate list */ }; MODULE_DEVICE_TABLE(pci, nicstar_pci_tbl); static struct pci_driver nicstar_driver = { .name = "nicstar", .id_table = nicstar_pci_tbl, .probe = nicstar_init_one, .remove = nicstar_remove_one, }; static int __init nicstar_init(void) { unsigned error = 0; /* Initialized to remove compile warning */ XPRINTK("nicstar: nicstar_init() called.\n"); error = pci_register_driver(&nicstar_driver); TXPRINTK("nicstar: TX debug enabled.\n"); RXPRINTK("nicstar: RX debug enabled.\n"); PRINTK("nicstar: General debug enabled.\n"); #ifdef PHY_LOOPBACK printk("nicstar: using PHY loopback.\n"); #endif /* PHY_LOOPBACK */ XPRINTK("nicstar: nicstar_init() returned.\n"); if (!error) { init_timer(&ns_timer); ns_timer.expires = jiffies + NS_POLL_PERIOD; ns_timer.data = 0UL; ns_timer.function = ns_poll; add_timer(&ns_timer); } return error; } static void __exit nicstar_cleanup(void) { XPRINTK("nicstar: nicstar_cleanup() called.\n"); del_timer(&ns_timer); pci_unregister_driver(&nicstar_driver); XPRINTK("nicstar: nicstar_cleanup() returned.\n"); } static u32 ns_read_sram(ns_dev * card, u32 sram_address) { unsigned long flags; u32 data; sram_address <<= 2; sram_address &= 0x0007FFFC; /* address must be dword aligned */ sram_address |= 0x50000000; /* SRAM read command */ spin_lock_irqsave(&card->res_lock, flags); while (CMD_BUSY(card)) ; writel(sram_address, card->membase + CMD); while (CMD_BUSY(card)) ; data = readl(card->membase + DR0); spin_unlock_irqrestore(&card->res_lock, flags); return data; } static void ns_write_sram(ns_dev * card, u32 sram_address, u32 * value, int count) { unsigned long flags; int i, c; count--; /* count range now is 0..3 instead of 1..4 */ c = count; c <<= 2; /* to use increments of 4 */ spin_lock_irqsave(&card->res_lock, flags); while (CMD_BUSY(card)) ; for (i = 0; i <= c; i += 4) writel(*(value++), card->membase + i); /* Note: DR# registers are the first 4 dwords in nicstar's memspace, so card->membase + DR0 == card->membase */ sram_address <<= 2; sram_address &= 0x0007FFFC; sram_address |= (0x40000000 | count); writel(sram_address, card->membase + CMD); spin_unlock_irqrestore(&card->res_lock, flags); } static int ns_init_card(int i, struct pci_dev *pcidev) { int j; struct ns_dev *card = NULL; unsigned char pci_latency; unsigned error; u32 data; u32 u32d[4]; u32 ns_cfg_rctsize; int bcount; unsigned long membase; error = 0; if (pci_enable_device(pcidev)) { printk("nicstar%d: can't enable PCI device\n", i); error = 2; ns_init_card_error(card, error); return error; } if ((pci_set_dma_mask(pcidev, DMA_BIT_MASK(32)) != 0) || (pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32)) != 0)) { printk(KERN_WARNING "nicstar%d: No suitable DMA available.\n", i); error = 2; ns_init_card_error(card, error); return error; } if ((card = kmalloc(sizeof(ns_dev), GFP_KERNEL)) == NULL) { printk ("nicstar%d: can't allocate memory for device structure.\n", i); error = 2; ns_init_card_error(card, error); return error; } cards[i] = card; spin_lock_init(&card->int_lock); spin_lock_init(&card->res_lock); pci_set_drvdata(pcidev, card); card->index = i; card->atmdev = NULL; card->pcidev = pcidev; membase = pci_resource_start(pcidev, 1); card->membase = ioremap(membase, NS_IOREMAP_SIZE); if (!card->membase) { printk("nicstar%d: can't ioremap() membase.\n", i); error = 3; ns_init_card_error(card, error); return error; } PRINTK("nicstar%d: membase at 0x%p.\n", i, card->membase); pci_set_master(pcidev); if (pci_read_config_byte(pcidev, PCI_LATENCY_TIMER, &pci_latency) != 0) { printk("nicstar%d: can't read PCI latency timer.\n", i); error = 6; ns_init_card_error(card, error); return error; } #ifdef NS_PCI_LATENCY if (pci_latency < NS_PCI_LATENCY) { PRINTK("nicstar%d: setting PCI latency timer to %d.\n", i, NS_PCI_LATENCY); for (j = 1; j < 4; j++) { if (pci_write_config_byte (pcidev, PCI_LATENCY_TIMER, NS_PCI_LATENCY) != 0) break; } if (j == 4) { printk ("nicstar%d: can't set PCI latency timer to %d.\n", i, NS_PCI_LATENCY); error = 7; ns_init_card_error(card, error); return error; } } #endif /* NS_PCI_LATENCY */ /* Clear timer overflow */ data = readl(card->membase + STAT); if (data & NS_STAT_TMROF) writel(NS_STAT_TMROF, card->membase + STAT); /* Software reset */ writel(NS_CFG_SWRST, card->membase + CFG); NS_DELAY; writel(0x00000000, card->membase + CFG); /* PHY reset */ writel(0x00000008, card->membase + GP); NS_DELAY; writel(0x00000001, card->membase + GP); NS_DELAY; while (CMD_BUSY(card)) ; writel(NS_CMD_WRITE_UTILITY | 0x00000100, card->membase + CMD); /* Sync UTOPIA with SAR clock */ NS_DELAY; /* Detect PHY type */ while (CMD_BUSY(card)) ; writel(NS_CMD_READ_UTILITY | 0x00000200, card->membase + CMD); while (CMD_BUSY(card)) ; data = readl(card->membase + DR0); switch (data) { case 0x00000009: printk("nicstar%d: PHY seems to be 25 Mbps.\n", i); card->max_pcr = ATM_25_PCR; while (CMD_BUSY(card)) ; writel(0x00000008, card->membase + DR0); writel(NS_CMD_WRITE_UTILITY | 0x00000200, card->membase + CMD); /* Clear an eventual pending interrupt */ writel(NS_STAT_SFBQF, card->membase + STAT); #ifdef PHY_LOOPBACK while (CMD_BUSY(card)) ; writel(0x00000022, card->membase + DR0); writel(NS_CMD_WRITE_UTILITY | 0x00000202, card->membase + CMD); #endif /* PHY_LOOPBACK */ break; case 0x00000030: case 0x00000031: printk("nicstar%d: PHY seems to be 155 Mbps.\n", i); card->max_pcr = ATM_OC3_PCR; #ifdef PHY_LOOPBACK while (CMD_BUSY(card)) ; writel(0x00000002, card->membase + DR0); writel(NS_CMD_WRITE_UTILITY | 0x00000205, card->membase + CMD); #endif /* PHY_LOOPBACK */ break; default: printk("nicstar%d: unknown PHY type (0x%08X).\n", i, data); error = 8; ns_init_card_error(card, error); return error; } writel(0x00000000, card->membase + GP); /* Determine SRAM size */ data = 0x76543210; ns_write_sram(card, 0x1C003, &data, 1); data = 0x89ABCDEF; ns_write_sram(card, 0x14003, &data, 1); if (ns_read_sram(card, 0x14003) == 0x89ABCDEF && ns_read_sram(card, 0x1C003) == 0x76543210) card->sram_size = 128; else card->sram_size = 32; PRINTK("nicstar%d: %dK x 32bit SRAM size.\n", i, card->sram_size); card->rct_size = NS_MAX_RCTSIZE; #if (NS_MAX_RCTSIZE == 4096) if (card->sram_size == 128) printk ("nicstar%d: limiting maximum VCI. See NS_MAX_RCTSIZE in nicstar.h\n", i); #elif (NS_MAX_RCTSIZE == 16384) if (card->sram_size == 32) { printk ("nicstar%d: wasting memory. See NS_MAX_RCTSIZE in nicstar.h\n", i); card->rct_size = 4096; } #else #error NS_MAX_RCTSIZE must be either 4096 or 16384 in nicstar.c #endif card->vpibits = NS_VPIBITS; if (card->rct_size == 4096) card->vcibits = 12 - NS_VPIBITS; else /* card->rct_size == 16384 */ card->vcibits = 14 - NS_VPIBITS; /* Initialize the nicstar eeprom/eprom stuff, for the MAC addr */ if (mac[i] == NULL) nicstar_init_eprom(card->membase); /* Set the VPI/VCI MSb mask to zero so we can receive OAM cells */ writel(0x00000000, card->membase + VPM); /* Initialize TSQ */ card->tsq.org = pci_alloc_consistent(card->pcidev, NS_TSQSIZE + NS_TSQ_ALIGNMENT, &card->tsq.dma); if (card->tsq.org == NULL) { printk("nicstar%d: can't allocate TSQ.\n", i); error = 10; ns_init_card_error(card, error); return error; } card->tsq.base = PTR_ALIGN(card->tsq.org, NS_TSQ_ALIGNMENT); card->tsq.next = card->tsq.base; card->tsq.last = card->tsq.base + (NS_TSQ_NUM_ENTRIES - 1); for (j = 0; j < NS_TSQ_NUM_ENTRIES; j++) ns_tsi_init(card->tsq.base + j); writel(0x00000000, card->membase + TSQH); writel(ALIGN(card->tsq.dma, NS_TSQ_ALIGNMENT), card->membase + TSQB); PRINTK("nicstar%d: TSQ base at 0x%p.\n", i, card->tsq.base); /* Initialize RSQ */ card->rsq.org = pci_alloc_consistent(card->pcidev, NS_RSQSIZE + NS_RSQ_ALIGNMENT, &card->rsq.dma); if (card->rsq.org == NULL) { printk("nicstar%d: can't allocate RSQ.\n", i); error = 11; ns_init_card_error(card, error); return error; } card->rsq.base = PTR_ALIGN(card->rsq.org, NS_RSQ_ALIGNMENT); card->rsq.next = card->rsq.base; card->rsq.last = card->rsq.base + (NS_RSQ_NUM_ENTRIES - 1); for (j = 0; j < NS_RSQ_NUM_ENTRIES; j++) ns_rsqe_init(card->rsq.base + j); writel(0x00000000, card->membase + RSQH); writel(ALIGN(card->rsq.dma, NS_RSQ_ALIGNMENT), card->membase + RSQB); PRINTK("nicstar%d: RSQ base at 0x%p.\n", i, card->rsq.base); /* Initialize SCQ0, the only VBR SCQ used */ card->scq1 = NULL; card->scq2 = NULL; card->scq0 = get_scq(card, VBR_SCQSIZE, NS_VRSCD0); if (card->scq0 == NULL) { printk("nicstar%d: can't get SCQ0.\n", i); error = 12; ns_init_card_error(card, error); return error; } u32d[0] = scq_virt_to_bus(card->scq0, card->scq0->base); u32d[1] = (u32) 0x00000000; u32d[2] = (u32) 0xffffffff; u32d[3] = (u32) 0x00000000; ns_write_sram(card, NS_VRSCD0, u32d, 4); ns_write_sram(card, NS_VRSCD1, u32d, 4); /* These last two won't be used */ ns_write_sram(card, NS_VRSCD2, u32d, 4); /* but are initialized, just in case... */ card->scq0->scd = NS_VRSCD0; PRINTK("nicstar%d: VBR-SCQ0 base at 0x%p.\n", i, card->scq0->base); /* Initialize TSTs */ card->tst_addr = NS_TST0; card->tst_free_entries = NS_TST_NUM_ENTRIES; data = NS_TST_OPCODE_VARIABLE; for (j = 0; j < NS_TST_NUM_ENTRIES; j++) ns_write_sram(card, NS_TST0 + j, &data, 1); data = ns_tste_make(NS_TST_OPCODE_END, NS_TST0); ns_write_sram(card, NS_TST0 + NS_TST_NUM_ENTRIES, &data, 1); for (j = 0; j < NS_TST_NUM_ENTRIES; j++) ns_write_sram(card, NS_TST1 + j, &data, 1); data = ns_tste_make(NS_TST_OPCODE_END, NS_TST1); ns_write_sram(card, NS_TST1 + NS_TST_NUM_ENTRIES, &data, 1); for (j = 0; j < NS_TST_NUM_ENTRIES; j++) card->tste2vc[j] = NULL; writel(NS_TST0 << 2, card->membase + TSTB); /* Initialize RCT. AAL type is set on opening the VC. */ #ifdef RCQ_SUPPORT u32d[0] = NS_RCTE_RAWCELLINTEN; #else u32d[0] = 0x00000000; #endif /* RCQ_SUPPORT */ u32d[1] = 0x00000000; u32d[2] = 0x00000000; u32d[3] = 0xFFFFFFFF; for (j = 0; j < card->rct_size; j++) ns_write_sram(card, j * 4, u32d, 4); memset(card->vcmap, 0, NS_MAX_RCTSIZE * sizeof(vc_map)); for (j = 0; j < NS_FRSCD_NUM; j++) card->scd2vc[j] = NULL; /* Initialize buffer levels */ card->sbnr.min = MIN_SB; card->sbnr.init = NUM_SB; card->sbnr.max = MAX_SB; card->lbnr.min = MIN_LB; card->lbnr.init = NUM_LB; card->lbnr.max = MAX_LB; card->iovnr.min = MIN_IOVB; card->iovnr.init = NUM_IOVB; card->iovnr.max = MAX_IOVB; card->hbnr.min = MIN_HB; card->hbnr.init = NUM_HB; card->hbnr.max = MAX_HB; card->sm_handle = NULL; card->sm_addr = 0x00000000; card->lg_handle = NULL; card->lg_addr = 0x00000000; card->efbie = 1; /* To prevent push_rxbufs from enabling the interrupt */ idr_init(&card->idr); /* Pre-allocate some huge buffers */ skb_queue_head_init(&card->hbpool.queue); card->hbpool.count = 0; for (j = 0; j < NUM_HB; j++) { struct sk_buff *hb; hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL); if (hb == NULL) { printk ("nicstar%d: can't allocate %dth of %d huge buffers.\n", i, j, NUM_HB); error = 13; ns_init_card_error(card, error); return error; } NS_PRV_BUFTYPE(hb) = BUF_NONE; skb_queue_tail(&card->hbpool.queue, hb); card->hbpool.count++; } /* Allocate large buffers */ skb_queue_head_init(&card->lbpool.queue); card->lbpool.count = 0; /* Not used */ for (j = 0; j < NUM_LB; j++) { struct sk_buff *lb; lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL); if (lb == NULL) { printk ("nicstar%d: can't allocate %dth of %d large buffers.\n", i, j, NUM_LB); error = 14; ns_init_card_error(card, error); return error; } NS_PRV_BUFTYPE(lb) = BUF_LG; skb_queue_tail(&card->lbpool.queue, lb); skb_reserve(lb, NS_SMBUFSIZE); push_rxbufs(card, lb); /* Due to the implementation of push_rxbufs() this is 1, not 0 */ if (j == 1) { card->rcbuf = lb; card->rawcell = (struct ns_rcqe *) lb->data; card->rawch = NS_PRV_DMA(lb); } } /* Test for strange behaviour which leads to crashes */ if ((bcount = ns_stat_lfbqc_get(readl(card->membase + STAT))) < card->lbnr.min) { printk ("nicstar%d: Strange... Just allocated %d large buffers and lfbqc = %d.\n", i, j, bcount); error = 14; ns_init_card_error(card, error); return error; } /* Allocate small buffers */ skb_queue_head_init(&card->sbpool.queue); card->sbpool.count = 0; /* Not used */ for (j = 0; j < NUM_SB; j++) { struct sk_buff *sb; sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL); if (sb == NULL) { printk ("nicstar%d: can't allocate %dth of %d small buffers.\n", i, j, NUM_SB); error = 15; ns_init_card_error(card, error); return error; } NS_PRV_BUFTYPE(sb) = BUF_SM; skb_queue_tail(&card->sbpool.queue, sb); skb_reserve(sb, NS_AAL0_HEADER); push_rxbufs(card, sb); } /* Test for strange behaviour which leads to crashes */ if ((bcount = ns_stat_sfbqc_get(readl(card->membase + STAT))) < card->sbnr.min) { printk ("nicstar%d: Strange... Just allocated %d small buffers and sfbqc = %d.\n", i, j, bcount); error = 15; ns_init_card_error(card, error); return error; } /* Allocate iovec buffers */ skb_queue_head_init(&card->iovpool.queue); card->iovpool.count = 0; for (j = 0; j < NUM_IOVB; j++) { struct sk_buff *iovb; iovb = alloc_skb(NS_IOVBUFSIZE, GFP_KERNEL); if (iovb == NULL) { printk ("nicstar%d: can't allocate %dth of %d iovec buffers.\n", i, j, NUM_IOVB); error = 16; ns_init_card_error(card, error); return error; } NS_PRV_BUFTYPE(iovb) = BUF_NONE; skb_queue_tail(&card->iovpool.queue, iovb); card->iovpool.count++; } /* Configure NICStAR */ if (card->rct_size == 4096) ns_cfg_rctsize = NS_CFG_RCTSIZE_4096_ENTRIES; else /* (card->rct_size == 16384) */ ns_cfg_rctsize = NS_CFG_RCTSIZE_16384_ENTRIES; card->efbie = 1; card->intcnt = 0; if (request_irq (pcidev->irq, &ns_irq_handler, IRQF_SHARED, "nicstar", card) != 0) { printk("nicstar%d: can't allocate IRQ %d.\n", i, pcidev->irq); error = 9; ns_init_card_error(card, error); return error; } /* Register device */ card->atmdev = atm_dev_register("nicstar", &card->pcidev->dev, &atm_ops, -1, NULL); if (card->atmdev == NULL) { printk("nicstar%d: can't register device.\n", i); error = 17; ns_init_card_error(card, error); return error; } if (mac[i] == NULL || !mac_pton(mac[i], card->atmdev->esi)) { nicstar_read_eprom(card->membase, NICSTAR_EPROM_MAC_ADDR_OFFSET, card->atmdev->esi, 6); if (ether_addr_equal(card->atmdev->esi, "\x00\x00\x00\x00\x00\x00")) { nicstar_read_eprom(card->membase, NICSTAR_EPROM_MAC_ADDR_OFFSET_ALT, card->atmdev->esi, 6); } } printk("nicstar%d: MAC address %pM\n", i, card->atmdev->esi); card->atmdev->dev_data = card; card->atmdev->ci_range.vpi_bits = card->vpibits; card->atmdev->ci_range.vci_bits = card->vcibits; card->atmdev->link_rate = card->max_pcr; card->atmdev->phy = NULL; #ifdef CONFIG_ATM_NICSTAR_USE_SUNI if (card->max_pcr == ATM_OC3_PCR) suni_init(card->atmdev); #endif /* CONFIG_ATM_NICSTAR_USE_SUNI */ #ifdef CONFIG_ATM_NICSTAR_USE_IDT77105 if (card->max_pcr == ATM_25_PCR) idt77105_init(card->atmdev); #endif /* CONFIG_ATM_NICSTAR_USE_IDT77105 */ if (card->atmdev->phy && card->atmdev->phy->start) card->atmdev->phy->start(card->atmdev); writel(NS_CFG_RXPATH | NS_CFG_SMBUFSIZE | NS_CFG_LGBUFSIZE | NS_CFG_EFBIE | NS_CFG_RSQSIZE | NS_CFG_VPIBITS | ns_cfg_rctsize | NS_CFG_RXINT_NODELAY | NS_CFG_RAWIE | /* Only enabled if RCQ_SUPPORT */ NS_CFG_RSQAFIE | NS_CFG_TXEN | NS_CFG_TXIE | NS_CFG_TSQFIE_OPT | /* Only enabled if ENABLE_TSQFIE */ NS_CFG_PHYIE, card->membase + CFG); num_cards++; return error; } static void ns_init_card_error(ns_dev *card, int error) { if (error >= 17) { writel(0x00000000, card->membase + CFG); } if (error >= 16) { struct sk_buff *iovb; while ((iovb = skb_dequeue(&card->iovpool.queue)) != NULL) dev_kfree_skb_any(iovb); } if (error >= 15) { struct sk_buff *sb; while ((sb = skb_dequeue(&card->sbpool.queue)) != NULL) dev_kfree_skb_any(sb); free_scq(card, card->scq0, NULL); } if (error >= 14) { struct sk_buff *lb; while ((lb = skb_dequeue(&card->lbpool.queue)) != NULL) dev_kfree_skb_any(lb); } if (error >= 13) { struct sk_buff *hb; while ((hb = skb_dequeue(&card->hbpool.queue)) != NULL) dev_kfree_skb_any(hb); } if (error >= 12) { kfree(card->rsq.org); } if (error >= 11) { kfree(card->tsq.org); } if (error >= 10) { free_irq(card->pcidev->irq, card); } if (error >= 4) { iounmap(card->membase); } if (error >= 3) { pci_disable_device(card->pcidev); kfree(card); } } static scq_info *get_scq(ns_dev *card, int size, u32 scd) { scq_info *scq; int i; if (size != VBR_SCQSIZE && size != CBR_SCQSIZE) return NULL; scq = kmalloc(sizeof(scq_info), GFP_KERNEL); if (!scq) return NULL; scq->org = pci_alloc_consistent(card->pcidev, 2 * size, &scq->dma); if (!scq->org) { kfree(scq); return NULL; } scq->skb = kmalloc(sizeof(struct sk_buff *) * (size / NS_SCQE_SIZE), GFP_KERNEL); if (!scq->skb) { kfree(scq->org); kfree(scq); return NULL; } scq->num_entries = size / NS_SCQE_SIZE; scq->base = PTR_ALIGN(scq->org, size); scq->next = scq->base; scq->last = scq->base + (scq->num_entries - 1); scq->tail = scq->last; scq->scd = scd; scq->num_entries = size / NS_SCQE_SIZE; scq->tbd_count = 0; init_waitqueue_head(&scq->scqfull_waitq); scq->full = 0; spin_lock_init(&scq->lock); for (i = 0; i < scq->num_entries; i++) scq->skb[i] = NULL; return scq; } /* For variable rate SCQ vcc must be NULL */ static void free_scq(ns_dev *card, scq_info *scq, struct atm_vcc *vcc) { int i; if (scq->num_entries == VBR_SCQ_NUM_ENTRIES) for (i = 0; i < scq->num_entries; i++) { if (scq->skb[i] != NULL) { vcc = ATM_SKB(scq->skb[i])->vcc; if (vcc->pop != NULL) vcc->pop(vcc, scq->skb[i]); else dev_kfree_skb_any(scq->skb[i]); } } else { /* vcc must be != NULL */ if (vcc == NULL) { printk ("nicstar: free_scq() called with vcc == NULL for fixed rate scq."); for (i = 0; i < scq->num_entries; i++) dev_kfree_skb_any(scq->skb[i]); } else for (i = 0; i < scq->num_entries; i++) { if (scq->skb[i] != NULL) { if (vcc->pop != NULL) vcc->pop(vcc, scq->skb[i]); else dev_kfree_skb_any(scq->skb[i]); } } } kfree(scq->skb); pci_free_consistent(card->pcidev, 2 * (scq->num_entries == VBR_SCQ_NUM_ENTRIES ? VBR_SCQSIZE : CBR_SCQSIZE), scq->org, scq->dma); kfree(scq); } /* The handles passed must be pointers to the sk_buff containing the small or large buffer(s) cast to u32. */ static void push_rxbufs(ns_dev * card, struct sk_buff *skb) { struct sk_buff *handle1, *handle2; int id1, id2; u32 addr1, addr2; u32 stat; unsigned long flags; /* *BARF* */ handle2 = NULL; addr2 = 0; handle1 = skb; addr1 = pci_map_single(card->pcidev, skb->data, (NS_PRV_BUFTYPE(skb) == BUF_SM ? NS_SMSKBSIZE : NS_LGSKBSIZE), PCI_DMA_TODEVICE); NS_PRV_DMA(skb) = addr1; /* save so we can unmap later */ #ifdef GENERAL_DEBUG if (!addr1) printk("nicstar%d: push_rxbufs called with addr1 = 0.\n", card->index); #endif /* GENERAL_DEBUG */ stat = readl(card->membase + STAT); card->sbfqc = ns_stat_sfbqc_get(stat); card->lbfqc = ns_stat_lfbqc_get(stat); if (NS_PRV_BUFTYPE(skb) == BUF_SM) { if (!addr2) { if (card->sm_addr) { addr2 = card->sm_addr; handle2 = card->sm_handle; card->sm_addr = 0x00000000; card->sm_handle = NULL; } else { /* (!sm_addr) */ card->sm_addr = addr1; card->sm_handle = handle1; } } } else { /* buf_type == BUF_LG */ if (!addr2) { if (card->lg_addr) { addr2 = card->lg_addr; handle2 = card->lg_handle; card->lg_addr = 0x00000000; card->lg_handle = NULL; } else { /* (!lg_addr) */ card->lg_addr = addr1; card->lg_handle = handle1; } } } if (addr2) { if (NS_PRV_BUFTYPE(skb) == BUF_SM) { if (card->sbfqc >= card->sbnr.max) { skb_unlink(handle1, &card->sbpool.queue); dev_kfree_skb_any(handle1); skb_unlink(handle2, &card->sbpool.queue); dev_kfree_skb_any(handle2); return; } else card->sbfqc += 2; } else { /* (buf_type == BUF_LG) */ if (card->lbfqc >= card->lbnr.max) { skb_unlink(handle1, &card->lbpool.queue); dev_kfree_skb_any(handle1); skb_unlink(handle2, &card->lbpool.queue); dev_kfree_skb_any(handle2); return; } else card->lbfqc += 2; } id1 = idr_alloc(&card->idr, handle1, 0, 0, GFP_ATOMIC); if (id1 < 0) goto out; id2 = idr_alloc(&card->idr, handle2, 0, 0, GFP_ATOMIC); if (id2 < 0) goto out; spin_lock_irqsave(&card->res_lock, flags); while (CMD_BUSY(card)) ; writel(addr2, card->membase + DR3); writel(id2, card->membase + DR2); writel(addr1, card->membase + DR1); writel(id1, card->membase + DR0); writel(NS_CMD_WRITE_FREEBUFQ | NS_PRV_BUFTYPE(skb), card->membase + CMD); spin_unlock_irqrestore(&card->res_lock, flags); XPRINTK("nicstar%d: Pushing %s buffers at 0x%x and 0x%x.\n", card->index, (NS_PRV_BUFTYPE(skb) == BUF_SM ? "small" : "large"), addr1, addr2); } if (!card->efbie && card->sbfqc >= card->sbnr.min && card->lbfqc >= card->lbnr.min) { card->efbie = 1; writel((readl(card->membase + CFG) | NS_CFG_EFBIE), card->membase + CFG); } out: return; } static irqreturn_t ns_irq_handler(int irq, void *dev_id) { u32 stat_r; ns_dev *card; struct atm_dev *dev; unsigned long flags; card = (ns_dev *) dev_id; dev = card->atmdev; card->intcnt++; PRINTK("nicstar%d: NICStAR generated an interrupt\n", card->index); spin_lock_irqsave(&card->int_lock, flags); stat_r = readl(card->membase + STAT); /* Transmit Status Indicator has been written to T. S. Queue */ if (stat_r & NS_STAT_TSIF) { TXPRINTK("nicstar%d: TSI interrupt\n", card->index); process_tsq(card); writel(NS_STAT_TSIF, card->membase + STAT); } /* Incomplete CS-PDU has been transmitted */ if (stat_r & NS_STAT_TXICP) { writel(NS_STAT_TXICP, card->membase + STAT); TXPRINTK("nicstar%d: Incomplete CS-PDU transmitted.\n", card->index); } /* Transmit Status Queue 7/8 full */ if (stat_r & NS_STAT_TSQF) { writel(NS_STAT_TSQF, card->membase + STAT); PRINTK("nicstar%d: TSQ full.\n", card->index); process_tsq(card); } /* Timer overflow */ if (stat_r & NS_STAT_TMROF) { writel(NS_STAT_TMROF, card->membase + STAT); PRINTK("nicstar%d: Timer overflow.\n", card->index); } /* PHY device interrupt signal active */ if (stat_r & NS_STAT_PHYI) { writel(NS_STAT_PHYI, card->membase + STAT); PRINTK("nicstar%d: PHY interrupt.\n", card->index); if (dev->phy && dev->phy->interrupt) { dev->phy->interrupt(dev); } } /* Small Buffer Queue is full */ if (stat_r & NS_STAT_SFBQF) { writel(NS_STAT_SFBQF, card->membase + STAT); printk("nicstar%d: Small free buffer queue is full.\n", card->index); } /* Large Buffer Queue is full */ if (stat_r & NS_STAT_LFBQF) { writel(NS_STAT_LFBQF, card->membase + STAT); printk("nicstar%d: Large free buffer queue is full.\n", card->index); } /* Receive Status Queue is full */ if (stat_r & NS_STAT_RSQF) { writel(NS_STAT_RSQF, card->membase + STAT); printk("nicstar%d: RSQ full.\n", card->index); process_rsq(card); } /* Complete CS-PDU received */ if (stat_r & NS_STAT_EOPDU) { RXPRINTK("nicstar%d: End of CS-PDU received.\n", card->index); process_rsq(card); writel(NS_STAT_EOPDU, card->membase + STAT); } /* Raw cell received */ if (stat_r & NS_STAT_RAWCF) { writel(NS_STAT_RAWCF, card->membase + STAT); #ifndef RCQ_SUPPORT printk("nicstar%d: Raw cell received and no support yet...\n", card->index); #endif /* RCQ_SUPPORT */ /* NOTE: the following procedure may keep a raw cell pending until the next interrupt. As this preliminary support is only meant to avoid buffer leakage, this is not an issue. */ while (readl(card->membase + RAWCT) != card->rawch) { if (ns_rcqe_islast(card->rawcell)) { struct sk_buff *oldbuf; oldbuf = card->rcbuf; card->rcbuf = idr_find(&card->idr, ns_rcqe_nextbufhandle(card->rawcell)); card->rawch = NS_PRV_DMA(card->rcbuf); card->rawcell = (struct ns_rcqe *) card->rcbuf->data; recycle_rx_buf(card, oldbuf); } else { card->rawch += NS_RCQE_SIZE; card->rawcell++; } } } /* Small buffer queue is empty */ if (stat_r & NS_STAT_SFBQE) { int i; struct sk_buff *sb; writel(NS_STAT_SFBQE, card->membase + STAT); printk("nicstar%d: Small free buffer queue empty.\n", card->index); for (i = 0; i < card->sbnr.min; i++) { sb = dev_alloc_skb(NS_SMSKBSIZE); if (sb == NULL) { writel(readl(card->membase + CFG) & ~NS_CFG_EFBIE, card->membase + CFG); card->efbie = 0; break; } NS_PRV_BUFTYPE(sb) = BUF_SM; skb_queue_tail(&card->sbpool.queue, sb); skb_reserve(sb, NS_AAL0_HEADER); push_rxbufs(card, sb); } card->sbfqc = i; process_rsq(card); } /* Large buffer queue empty */ if (stat_r & NS_STAT_LFBQE) { int i; struct sk_buff *lb; writel(NS_STAT_LFBQE, card->membase + STAT); printk("nicstar%d: Large free buffer queue empty.\n", card->index); for (i = 0; i < card->lbnr.min; i++) { lb = dev_alloc_skb(NS_LGSKBSIZE); if (lb == NULL) { writel(readl(card->membase + CFG) & ~NS_CFG_EFBIE, card->membase + CFG); card->efbie = 0; break; } NS_PRV_BUFTYPE(lb) = BUF_LG; skb_queue_tail(&card->lbpool.queue, lb); skb_reserve(lb, NS_SMBUFSIZE); push_rxbufs(card, lb); } card->lbfqc = i; process_rsq(card); } /* Receive Status Queue is 7/8 full */ if (stat_r & NS_STAT_RSQAF) { writel(NS_STAT_RSQAF, card->membase + STAT); RXPRINTK("nicstar%d: RSQ almost full.\n", card->index); process_rsq(card); } spin_unlock_irqrestore(&card->int_lock, flags); PRINTK("nicstar%d: end of interrupt service\n", card->index); return IRQ_HANDLED; } static int ns_open(struct atm_vcc *vcc) { ns_dev *card; vc_map *vc; unsigned long tmpl, modl; int tcr, tcra; /* target cell rate, and absolute value */ int n = 0; /* Number of entries in the TST. Initialized to remove the compiler warning. */ u32 u32d[4]; int frscdi = 0; /* Index of the SCD. Initialized to remove the compiler warning. How I wish compilers were clever enough to tell which variables can truly be used uninitialized... */ int inuse; /* tx or rx vc already in use by another vcc */ short vpi = vcc->vpi; int vci = vcc->vci; card = (ns_dev *) vcc->dev->dev_data; PRINTK("nicstar%d: opening vpi.vci %d.%d \n", card->index, (int)vpi, vci); if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) { PRINTK("nicstar%d: unsupported AAL.\n", card->index); return -EINVAL; } vc = &(card->vcmap[vpi << card->vcibits | vci]); vcc->dev_data = vc; inuse = 0; if (vcc->qos.txtp.traffic_class != ATM_NONE && vc->tx) inuse = 1; if (vcc->qos.rxtp.traffic_class != ATM_NONE && vc->rx) inuse += 2; if (inuse) { printk("nicstar%d: %s vci already in use.\n", card->index, inuse == 1 ? "tx" : inuse == 2 ? "rx" : "tx and rx"); return -EINVAL; } set_bit(ATM_VF_ADDR, &vcc->flags); /* NOTE: You are not allowed to modify an open connection's QOS. To change that, remove the ATM_VF_PARTIAL flag checking. There may be other changes needed to do that. */ if (!test_bit(ATM_VF_PARTIAL, &vcc->flags)) { scq_info *scq; set_bit(ATM_VF_PARTIAL, &vcc->flags); if (vcc->qos.txtp.traffic_class == ATM_CBR) { /* Check requested cell rate and availability of SCD */ if (vcc->qos.txtp.max_pcr == 0 && vcc->qos.txtp.pcr == 0 && vcc->qos.txtp.min_pcr == 0) { PRINTK ("nicstar%d: trying to open a CBR vc with cell rate = 0 \n", card->index); clear_bit(ATM_VF_PARTIAL, &vcc->flags); clear_bit(ATM_VF_ADDR, &vcc->flags); return -EINVAL; } tcr = atm_pcr_goal(&(vcc->qos.txtp)); tcra = tcr >= 0 ? tcr : -tcr; PRINTK("nicstar%d: target cell rate = %d.\n", card->index, vcc->qos.txtp.max_pcr); tmpl = (unsigned long)tcra *(unsigned long) NS_TST_NUM_ENTRIES; modl = tmpl % card->max_pcr; n = (int)(tmpl / card->max_pcr); if (tcr > 0) { if (modl > 0) n++; } else if (tcr == 0) { if ((n = (card->tst_free_entries - NS_TST_RESERVED)) <= 0) { PRINTK ("nicstar%d: no CBR bandwidth free.\n", card->index); clear_bit(ATM_VF_PARTIAL, &vcc->flags); clear_bit(ATM_VF_ADDR, &vcc->flags); return -EINVAL; } } if (n == 0) { printk ("nicstar%d: selected bandwidth < granularity.\n", card->index); clear_bit(ATM_VF_PARTIAL, &vcc->flags); clear_bit(ATM_VF_ADDR, &vcc->flags); return -EINVAL; } if (n > (card->tst_free_entries - NS_TST_RESERVED)) { PRINTK ("nicstar%d: not enough free CBR bandwidth.\n", card->index); clear_bit(ATM_VF_PARTIAL, &vcc->flags); clear_bit(ATM_VF_ADDR, &vcc->flags); return -EINVAL; } else card->tst_free_entries -= n; XPRINTK("nicstar%d: writing %d tst entries.\n", card->index, n); for (frscdi = 0; frscdi < NS_FRSCD_NUM; frscdi++) { if (card->scd2vc[frscdi] == NULL) { card->scd2vc[frscdi] = vc; break; } } if (frscdi == NS_FRSCD_NUM) { PRINTK ("nicstar%d: no SCD available for CBR channel.\n", card->index); card->tst_free_entries += n; clear_bit(ATM_VF_PARTIAL, &vcc->flags); clear_bit(ATM_VF_ADDR, &vcc->flags); return -EBUSY; } vc->cbr_scd = NS_FRSCD + frscdi * NS_FRSCD_SIZE; scq = get_scq(card, CBR_SCQSIZE, vc->cbr_scd); if (scq == NULL) { PRINTK("nicstar%d: can't get fixed rate SCQ.\n", card->index); card->scd2vc[frscdi] = NULL; card->tst_free_entries += n; clear_bit(ATM_VF_PARTIAL, &vcc->flags); clear_bit(ATM_VF_ADDR, &vcc->flags); return -ENOMEM; } vc->scq = scq; u32d[0] = scq_virt_to_bus(scq, scq->base); u32d[1] = (u32) 0x00000000; u32d[2] = (u32) 0xffffffff; u32d[3] = (u32) 0x00000000; ns_write_sram(card, vc->cbr_scd, u32d, 4); fill_tst(card, n, vc); } else if (vcc->qos.txtp.traffic_class == ATM_UBR) { vc->cbr_scd = 0x00000000; vc->scq = card->scq0; } if (vcc->qos.txtp.traffic_class != ATM_NONE) { vc->tx = 1; vc->tx_vcc = vcc; vc->tbd_count = 0; } if (vcc->qos.rxtp.traffic_class != ATM_NONE) { u32 status; vc->rx = 1; vc->rx_vcc = vcc; vc->rx_iov = NULL; /* Open the connection in hardware */ if (vcc->qos.aal == ATM_AAL5) status = NS_RCTE_AAL5 | NS_RCTE_CONNECTOPEN; else /* vcc->qos.aal == ATM_AAL0 */ status = NS_RCTE_AAL0 | NS_RCTE_CONNECTOPEN; #ifdef RCQ_SUPPORT status |= NS_RCTE_RAWCELLINTEN; #endif /* RCQ_SUPPORT */ ns_write_sram(card, NS_RCT + (vpi << card->vcibits | vci) * NS_RCT_ENTRY_SIZE, &status, 1); } } set_bit(ATM_VF_READY, &vcc->flags); return 0; } static void ns_close(struct atm_vcc *vcc) { vc_map *vc; ns_dev *card; u32 data; int i; vc = vcc->dev_data; card = vcc->dev->dev_data; PRINTK("nicstar%d: closing vpi.vci %d.%d \n", card->index, (int)vcc->vpi, vcc->vci); clear_bit(ATM_VF_READY, &vcc->flags); if (vcc->qos.rxtp.traffic_class != ATM_NONE) { u32 addr; unsigned long flags; addr = NS_RCT + (vcc->vpi << card->vcibits | vcc->vci) * NS_RCT_ENTRY_SIZE; spin_lock_irqsave(&card->res_lock, flags); while (CMD_BUSY(card)) ; writel(NS_CMD_CLOSE_CONNECTION | addr << 2, card->membase + CMD); spin_unlock_irqrestore(&card->res_lock, flags); vc->rx = 0; if (vc->rx_iov != NULL) { struct sk_buff *iovb; u32 stat; stat = readl(card->membase + STAT); card->sbfqc = ns_stat_sfbqc_get(stat); card->lbfqc = ns_stat_lfbqc_get(stat); PRINTK ("nicstar%d: closing a VC with pending rx buffers.\n", card->index); iovb = vc->rx_iov; recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data, NS_PRV_IOVCNT(iovb)); NS_PRV_IOVCNT(iovb) = 0; spin_lock_irqsave(&card->int_lock, flags); recycle_iov_buf(card, iovb); spin_unlock_irqrestore(&card->int_lock, flags); vc->rx_iov = NULL; } } if (vcc->qos.txtp.traffic_class != ATM_NONE) { vc->tx = 0; } if (vcc->qos.txtp.traffic_class == ATM_CBR) { unsigned long flags; ns_scqe *scqep; scq_info *scq; scq = vc->scq; for (;;) { spin_lock_irqsave(&scq->lock, flags); scqep = scq->next; if (scqep == scq->base) scqep = scq->last; else scqep--; if (scqep == scq->tail) { spin_unlock_irqrestore(&scq->lock, flags); break; } /* If the last entry is not a TSR, place one in the SCQ in order to be able to completely drain it and then close. */ if (!ns_scqe_is_tsr(scqep) && scq->tail != scq->next) { ns_scqe tsr; u32 scdi, scqi; u32 data; int index; tsr.word_1 = ns_tsr_mkword_1(NS_TSR_INTENABLE); scdi = (vc->cbr_scd - NS_FRSCD) / NS_FRSCD_SIZE; scqi = scq->next - scq->base; tsr.word_2 = ns_tsr_mkword_2(scdi, scqi); tsr.word_3 = 0x00000000; tsr.word_4 = 0x00000000; *scq->next = tsr; index = (int)scqi; scq->skb[index] = NULL; if (scq->next == scq->last) scq->next = scq->base; else scq->next++; data = scq_virt_to_bus(scq, scq->next); ns_write_sram(card, scq->scd, &data, 1); } spin_unlock_irqrestore(&scq->lock, flags); schedule(); } /* Free all TST entries */ data = NS_TST_OPCODE_VARIABLE; for (i = 0; i < NS_TST_NUM_ENTRIES; i++) { if (card->tste2vc[i] == vc) { ns_write_sram(card, card->tst_addr + i, &data, 1); card->tste2vc[i] = NULL; card->tst_free_entries++; } } card->scd2vc[(vc->cbr_scd - NS_FRSCD) / NS_FRSCD_SIZE] = NULL; free_scq(card, vc->scq, vcc); } /* remove all references to vcc before deleting it */ if (vcc->qos.txtp.traffic_class != ATM_NONE) { unsigned long flags; scq_info *scq = card->scq0; spin_lock_irqsave(&scq->lock, flags); for (i = 0; i < scq->num_entries; i++) { if (scq->skb[i] && ATM_SKB(scq->skb[i])->vcc == vcc) { ATM_SKB(scq->skb[i])->vcc = NULL; atm_return(vcc, scq->skb[i]->truesize); PRINTK ("nicstar: deleted pending vcc mapping\n"); } } spin_unlock_irqrestore(&scq->lock, flags); } vcc->dev_data = NULL; clear_bit(ATM_VF_PARTIAL, &vcc->flags); clear_bit(ATM_VF_ADDR, &vcc->flags); #ifdef RX_DEBUG { u32 stat, cfg; stat = readl(card->membase + STAT); cfg = readl(card->membase + CFG); printk("STAT = 0x%08X CFG = 0x%08X \n", stat, cfg); printk ("TSQ: base = 0x%p next = 0x%p last = 0x%p TSQT = 0x%08X \n", card->tsq.base, card->tsq.next, card->tsq.last, readl(card->membase + TSQT)); printk ("RSQ: base = 0x%p next = 0x%p last = 0x%p RSQT = 0x%08X \n", card->rsq.base, card->rsq.next, card->rsq.last, readl(card->membase + RSQT)); printk("Empty free buffer queue interrupt %s \n", card->efbie ? "enabled" : "disabled"); printk("SBCNT = %d count = %d LBCNT = %d count = %d \n", ns_stat_sfbqc_get(stat), card->sbpool.count, ns_stat_lfbqc_get(stat), card->lbpool.count); printk("hbpool.count = %d iovpool.count = %d \n", card->hbpool.count, card->iovpool.count); } #endif /* RX_DEBUG */ } static void fill_tst(ns_dev * card, int n, vc_map * vc) { u32 new_tst; unsigned long cl; int e, r; u32 data; /* It would be very complicated to keep the two TSTs synchronized while assuring that writes are only made to the inactive TST. So, for now I will use only one TST. If problems occur, I will change this again */ new_tst = card->tst_addr; /* Fill procedure */ for (e = 0; e < NS_TST_NUM_ENTRIES; e++) { if (card->tste2vc[e] == NULL) break; } if (e == NS_TST_NUM_ENTRIES) { printk("nicstar%d: No free TST entries found. \n", card->index); return; } r = n; cl = NS_TST_NUM_ENTRIES; data = ns_tste_make(NS_TST_OPCODE_FIXED, vc->cbr_scd); while (r > 0) { if (cl >= NS_TST_NUM_ENTRIES && card->tste2vc[e] == NULL) { card->tste2vc[e] = vc; ns_write_sram(card, new_tst + e, &data, 1); cl -= NS_TST_NUM_ENTRIES; r--; } if (++e == NS_TST_NUM_ENTRIES) { e = 0; } cl += n; } /* End of fill procedure */ data = ns_tste_make(NS_TST_OPCODE_END, new_tst); ns_write_sram(card, new_tst + NS_TST_NUM_ENTRIES, &data, 1); ns_write_sram(card, card->tst_addr + NS_TST_NUM_ENTRIES, &data, 1); card->tst_addr = new_tst; } static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb) { ns_dev *card; vc_map *vc; scq_info *scq; unsigned long buflen; ns_scqe scqe; u32 flags; /* TBD flags, not CPU flags */ card = vcc->dev->dev_data; TXPRINTK("nicstar%d: ns_send() called.\n", card->index); if ((vc = (vc_map *) vcc->dev_data) == NULL) { printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n", card->index); atomic_inc(&vcc->stats->tx_err); dev_kfree_skb_any(skb); return -EINVAL; } if (!vc->tx) { printk("nicstar%d: Trying to transmit on a non-tx VC.\n", card->index); atomic_inc(&vcc->stats->tx_err); dev_kfree_skb_any(skb); return -EINVAL; } if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) { printk("nicstar%d: Only AAL0 and AAL5 are supported.\n", card->index); atomic_inc(&vcc->stats->tx_err); dev_kfree_skb_any(skb); return -EINVAL; } if (skb_shinfo(skb)->nr_frags != 0) { printk("nicstar%d: No scatter-gather yet.\n", card->index); atomic_inc(&vcc->stats->tx_err); dev_kfree_skb_any(skb); return -EINVAL; } ATM_SKB(skb)->vcc = vcc; NS_PRV_DMA(skb) = pci_map_single(card->pcidev, skb->data, skb->len, PCI_DMA_TODEVICE); if (vcc->qos.aal == ATM_AAL5) { buflen = (skb->len + 47 + 8) / 48 * 48; /* Multiple of 48 */ flags = NS_TBD_AAL5; scqe.word_2 = cpu_to_le32(NS_PRV_DMA(skb)); scqe.word_3 = cpu_to_le32(skb->len); scqe.word_4 = ns_tbd_mkword_4(0, (u32) vcc->vpi, (u32) vcc->vci, 0, ATM_SKB(skb)-> atm_options & ATM_ATMOPT_CLP ? 1 : 0); flags |= NS_TBD_EOPDU; } else { /* (vcc->qos.aal == ATM_AAL0) */ buflen = ATM_CELL_PAYLOAD; /* i.e., 48 bytes */ flags = NS_TBD_AAL0; scqe.word_2 = cpu_to_le32(NS_PRV_DMA(skb) + NS_AAL0_HEADER); scqe.word_3 = cpu_to_le32(0x00000000); if (*skb->data & 0x02) /* Payload type 1 - end of pdu */ flags |= NS_TBD_EOPDU; scqe.word_4 = cpu_to_le32(*((u32 *) skb->data) & ~NS_TBD_VC_MASK); /* Force the VPI/VCI to be the same as in VCC struct */ scqe.word_4 |= cpu_to_le32((((u32) vcc-> vpi) << NS_TBD_VPI_SHIFT | ((u32) vcc-> vci) << NS_TBD_VCI_SHIFT) & NS_TBD_VC_MASK); } if (vcc->qos.txtp.traffic_class == ATM_CBR) { scqe.word_1 = ns_tbd_mkword_1_novbr(flags, (u32) buflen); scq = ((vc_map *) vcc->dev_data)->scq; } else { scqe.word_1 = ns_tbd_mkword_1(flags, (u32) 1, (u32) 1, (u32) buflen); scq = card->scq0; } if (push_scqe(card, vc, scq, &scqe, skb) != 0) { atomic_inc(&vcc->stats->tx_err); dev_kfree_skb_any(skb); return -EIO; } atomic_inc(&vcc->stats->tx); return 0; } static int push_scqe(ns_dev * card, vc_map * vc, scq_info * scq, ns_scqe * tbd, struct sk_buff *skb) { unsigned long flags; ns_scqe tsr; u32 scdi, scqi; int scq_is_vbr; u32 data; int index; spin_lock_irqsave(&scq->lock, flags); while (scq->tail == scq->next) { if (in_interrupt()) { spin_unlock_irqrestore(&scq->lock, flags); printk("nicstar%d: Error pushing TBD.\n", card->index); return 1; } scq->full = 1; wait_event_interruptible_lock_irq_timeout(scq->scqfull_waitq, scq->tail != scq->next, scq->lock, SCQFULL_TIMEOUT); if (scq->full) { spin_unlock_irqrestore(&scq->lock, flags); printk("nicstar%d: Timeout pushing TBD.\n", card->index); return 1; } } *scq->next = *tbd; index = (int)(scq->next - scq->base); scq->skb[index] = skb; XPRINTK("nicstar%d: sending skb at 0x%p (pos %d).\n", card->index, skb, index); XPRINTK("nicstar%d: TBD written:\n0x%x\n0x%x\n0x%x\n0x%x\n at 0x%p.\n", card->index, le32_to_cpu(tbd->word_1), le32_to_cpu(tbd->word_2), le32_to_cpu(tbd->word_3), le32_to_cpu(tbd->word_4), scq->next); if (scq->next == scq->last) scq->next = scq->base; else scq->next++; vc->tbd_count++; if (scq->num_entries == VBR_SCQ_NUM_ENTRIES) { scq->tbd_count++; scq_is_vbr = 1; } else scq_is_vbr = 0; if (vc->tbd_count >= MAX_TBD_PER_VC || scq->tbd_count >= MAX_TBD_PER_SCQ) { int has_run = 0; while (scq->tail == scq->next) { if (in_interrupt()) { data = scq_virt_to_bus(scq, scq->next); ns_write_sram(card, scq->scd, &data, 1); spin_unlock_irqrestore(&scq->lock, flags); printk("nicstar%d: Error pushing TSR.\n", card->index); return 0; } scq->full = 1; if (has_run++) break; wait_event_interruptible_lock_irq_timeout(scq->scqfull_waitq, scq->tail != scq->next, scq->lock, SCQFULL_TIMEOUT); } if (!scq->full) { tsr.word_1 = ns_tsr_mkword_1(NS_TSR_INTENABLE); if (scq_is_vbr) scdi = NS_TSR_SCDISVBR; else scdi = (vc->cbr_scd - NS_FRSCD) / NS_FRSCD_SIZE; scqi = scq->next - scq->base; tsr.word_2 = ns_tsr_mkword_2(scdi, scqi); tsr.word_3 = 0x00000000; tsr.word_4 = 0x00000000; *scq->next = tsr; index = (int)scqi; scq->skb[index] = NULL; XPRINTK ("nicstar%d: TSR written:\n0x%x\n0x%x\n0x%x\n0x%x\n at 0x%p.\n", card->index, le32_to_cpu(tsr.word_1), le32_to_cpu(tsr.word_2), le32_to_cpu(tsr.word_3), le32_to_cpu(tsr.word_4), scq->next); if (scq->next == scq->last) scq->next = scq->base; else scq->next++; vc->tbd_count = 0; scq->tbd_count = 0; } else PRINTK("nicstar%d: Timeout pushing TSR.\n", card->index); } data = scq_virt_to_bus(scq, scq->next); ns_write_sram(card, scq->scd, &data, 1); spin_unlock_irqrestore(&scq->lock, flags); return 0; } static void process_tsq(ns_dev * card) { u32 scdi; scq_info *scq; ns_tsi *previous = NULL, *one_ahead, *two_ahead; int serviced_entries; /* flag indicating at least on entry was serviced */ serviced_entries = 0; if (card->tsq.next == card->tsq.last) one_ahead = card->tsq.base; else one_ahead = card->tsq.next + 1; if (one_ahead == card->tsq.last) two_ahead = card->tsq.base; else two_ahead = one_ahead + 1; while (!ns_tsi_isempty(card->tsq.next) || !ns_tsi_isempty(one_ahead) || !ns_tsi_isempty(two_ahead)) /* At most two empty, as stated in the 77201 errata */ { serviced_entries = 1; /* Skip the one or two possible empty entries */ while (ns_tsi_isempty(card->tsq.next)) { if (card->tsq.next == card->tsq.last) card->tsq.next = card->tsq.base; else card->tsq.next++; } if (!ns_tsi_tmrof(card->tsq.next)) { scdi = ns_tsi_getscdindex(card->tsq.next); if (scdi == NS_TSI_SCDISVBR) scq = card->scq0; else { if (card->scd2vc[scdi] == NULL) { printk ("nicstar%d: could not find VC from SCD index.\n", card->index); ns_tsi_init(card->tsq.next); return; } scq = card->scd2vc[scdi]->scq; } drain_scq(card, scq, ns_tsi_getscqpos(card->tsq.next)); scq->full = 0; wake_up_interruptible(&(scq->scqfull_waitq)); } ns_tsi_init(card->tsq.next); previous = card->tsq.next; if (card->tsq.next == card->tsq.last) card->tsq.next = card->tsq.base; else card->tsq.next++; if (card->tsq.next == card->tsq.last) one_ahead = card->tsq.base; else one_ahead = card->tsq.next + 1; if (one_ahead == card->tsq.last) two_ahead = card->tsq.base; else two_ahead = one_ahead + 1; } if (serviced_entries) writel(PTR_DIFF(previous, card->tsq.base), card->membase + TSQH); } static void drain_scq(ns_dev * card, scq_info * scq, int pos) { struct atm_vcc *vcc; struct sk_buff *skb; int i; unsigned long flags; XPRINTK("nicstar%d: drain_scq() called, scq at 0x%p, pos %d.\n", card->index, scq, pos); if (pos >= scq->num_entries) { printk("nicstar%d: Bad index on drain_scq().\n", card->index); return; } spin_lock_irqsave(&scq->lock, flags); i = (int)(scq->tail - scq->base); if (++i == scq->num_entries) i = 0; while (i != pos) { skb = scq->skb[i]; XPRINTK("nicstar%d: freeing skb at 0x%p (index %d).\n", card->index, skb, i); if (skb != NULL) { pci_unmap_single(card->pcidev, NS_PRV_DMA(skb), skb->len, PCI_DMA_TODEVICE); vcc = ATM_SKB(skb)->vcc; if (vcc && vcc->pop != NULL) { vcc->pop(vcc, skb); } else { dev_kfree_skb_irq(skb); } scq->skb[i] = NULL; } if (++i == scq->num_entries) i = 0; } scq->tail = scq->base + pos; spin_unlock_irqrestore(&scq->lock, flags); } static void process_rsq(ns_dev * card) { ns_rsqe *previous; if (!ns_rsqe_valid(card->rsq.next)) return; do { dequeue_rx(card, card->rsq.next); ns_rsqe_init(card->rsq.next); previous = card->rsq.next; if (card->rsq.next == card->rsq.last) card->rsq.next = card->rsq.base; else card->rsq.next++; } while (ns_rsqe_valid(card->rsq.next)); writel(PTR_DIFF(previous, card->rsq.base), card->membase + RSQH); } static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe) { u32 vpi, vci; vc_map *vc; struct sk_buff *iovb; struct iovec *iov; struct atm_vcc *vcc; struct sk_buff *skb; unsigned short aal5_len; int len; u32 stat; u32 id; stat = readl(card->membase + STAT); card->sbfqc = ns_stat_sfbqc_get(stat); card->lbfqc = ns_stat_lfbqc_get(stat); id = le32_to_cpu(rsqe->buffer_handle); skb = idr_find(&card->idr, id); if (!skb) { RXPRINTK(KERN_ERR "nicstar%d: idr_find() failed!\n", card->index); return; } idr_remove(&card->idr, id); pci_dma_sync_single_for_cpu(card->pcidev, NS_PRV_DMA(skb), (NS_PRV_BUFTYPE(skb) == BUF_SM ? NS_SMSKBSIZE : NS_LGSKBSIZE), PCI_DMA_FROMDEVICE); pci_unmap_single(card->pcidev, NS_PRV_DMA(skb), (NS_PRV_BUFTYPE(skb) == BUF_SM ? NS_SMSKBSIZE : NS_LGSKBSIZE), PCI_DMA_FROMDEVICE); vpi = ns_rsqe_vpi(rsqe); vci = ns_rsqe_vci(rsqe); if (vpi >= 1UL << card->vpibits || vci >= 1UL << card->vcibits) { printk("nicstar%d: SDU received for out-of-range vc %d.%d.\n", card->index, vpi, vci); recycle_rx_buf(card, skb); return; } vc = &(card->vcmap[vpi << card->vcibits | vci]); if (!vc->rx) { RXPRINTK("nicstar%d: SDU received on non-rx vc %d.%d.\n", card->index, vpi, vci); recycle_rx_buf(card, skb); return; } vcc = vc->rx_vcc; if (vcc->qos.aal == ATM_AAL0) { struct sk_buff *sb; unsigned char *cell; int i; cell = skb->data; for (i = ns_rsqe_cellcount(rsqe); i; i--) { if ((sb = dev_alloc_skb(NS_SMSKBSIZE)) == NULL) { printk ("nicstar%d: Can't allocate buffers for aal0.\n", card->index); atomic_add(i, &vcc->stats->rx_drop); break; } if (!atm_charge(vcc, sb->truesize)) { RXPRINTK ("nicstar%d: atm_charge() dropped aal0 packets.\n", card->index); atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */ dev_kfree_skb_any(sb); break; } /* Rebuild the header */ *((u32 *) sb->data) = le32_to_cpu(rsqe->word_1) << 4 | (ns_rsqe_clp(rsqe) ? 0x00000001 : 0x00000000); if (i == 1 && ns_rsqe_eopdu(rsqe)) *((u32 *) sb->data) |= 0x00000002; skb_put(sb, NS_AAL0_HEADER); memcpy(skb_tail_pointer(sb), cell, ATM_CELL_PAYLOAD); skb_put(sb, ATM_CELL_PAYLOAD); ATM_SKB(sb)->vcc = vcc; __net_timestamp(sb); vcc->push(vcc, sb); atomic_inc(&vcc->stats->rx); cell += ATM_CELL_PAYLOAD; } recycle_rx_buf(card, skb); return; } /* To reach this point, the AAL layer can only be AAL5 */ if ((iovb = vc->rx_iov) == NULL) { iovb = skb_dequeue(&(card->iovpool.queue)); if (iovb == NULL) { /* No buffers in the queue */ iovb = alloc_skb(NS_IOVBUFSIZE, GFP_ATOMIC); if (iovb == NULL) { printk("nicstar%d: Out of iovec buffers.\n", card->index); atomic_inc(&vcc->stats->rx_drop); recycle_rx_buf(card, skb); return; } NS_PRV_BUFTYPE(iovb) = BUF_NONE; } else if (--card->iovpool.count < card->iovnr.min) { struct sk_buff *new_iovb; if ((new_iovb = alloc_skb(NS_IOVBUFSIZE, GFP_ATOMIC)) != NULL) { NS_PRV_BUFTYPE(iovb) = BUF_NONE; skb_queue_tail(&card->iovpool.queue, new_iovb); card->iovpool.count++; } } vc->rx_iov = iovb; NS_PRV_IOVCNT(iovb) = 0; iovb->len = 0; iovb->data = iovb->head; skb_reset_tail_pointer(iovb); /* IMPORTANT: a pointer to the sk_buff containing the small or large buffer is stored as iovec base, NOT a pointer to the small or large buffer itself. */ } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) { printk("nicstar%d: received too big AAL5 SDU.\n", card->index); atomic_inc(&vcc->stats->rx_err); recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data, NS_MAX_IOVECS); NS_PRV_IOVCNT(iovb) = 0; iovb->len = 0; iovb->data = iovb->head; skb_reset_tail_pointer(iovb); } iov = &((struct iovec *)iovb->data)[NS_PRV_IOVCNT(iovb)++]; iov->iov_base = (void *)skb; iov->iov_len = ns_rsqe_cellcount(rsqe) * 48; iovb->len += iov->iov_len; #ifdef EXTRA_DEBUG if (NS_PRV_IOVCNT(iovb) == 1) { if (NS_PRV_BUFTYPE(skb) != BUF_SM) { printk ("nicstar%d: Expected a small buffer, and this is not one.\n", card->index); which_list(card, skb); atomic_inc(&vcc->stats->rx_err); recycle_rx_buf(card, skb); vc->rx_iov = NULL; recycle_iov_buf(card, iovb); return; } } else { /* NS_PRV_IOVCNT(iovb) >= 2 */ if (NS_PRV_BUFTYPE(skb) != BUF_LG) { printk ("nicstar%d: Expected a large buffer, and this is not one.\n", card->index); which_list(card, skb); atomic_inc(&vcc->stats->rx_err); recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data, NS_PRV_IOVCNT(iovb)); vc->rx_iov = NULL; recycle_iov_buf(card, iovb); return; } } #endif /* EXTRA_DEBUG */ if (ns_rsqe_eopdu(rsqe)) { /* This works correctly regardless of the endianness of the host */ unsigned char *L1L2 = (unsigned char *) (skb->data + iov->iov_len - 6); aal5_len = L1L2[0] << 8 | L1L2[1]; len = (aal5_len == 0x0000) ? 0x10000 : aal5_len; if (ns_rsqe_crcerr(rsqe) || len + 8 > iovb->len || len + (47 + 8) < iovb->len) { printk("nicstar%d: AAL5 CRC error", card->index); if (len + 8 > iovb->len || len + (47 + 8) < iovb->len) printk(" - PDU size mismatch.\n"); else printk(".\n"); atomic_inc(&vcc->stats->rx_err); recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data, NS_PRV_IOVCNT(iovb)); vc->rx_iov = NULL; recycle_iov_buf(card, iovb); return; } /* By this point we (hopefully) have a complete SDU without errors. */ if (NS_PRV_IOVCNT(iovb) == 1) { /* Just a small buffer */ /* skb points to a small buffer */ if (!atm_charge(vcc, skb->truesize)) { push_rxbufs(card, skb); atomic_inc(&vcc->stats->rx_drop); } else { skb_put(skb, len); dequeue_sm_buf(card, skb); #ifdef NS_USE_DESTRUCTORS skb->destructor = ns_sb_destructor; #endif /* NS_USE_DESTRUCTORS */ ATM_SKB(skb)->vcc = vcc; __net_timestamp(skb); vcc->push(vcc, skb); atomic_inc(&vcc->stats->rx); } } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */ struct sk_buff *sb; sb = (struct sk_buff *)(iov - 1)->iov_base; /* skb points to a large buffer */ if (len <= NS_SMBUFSIZE) { if (!atm_charge(vcc, sb->truesize)) { push_rxbufs(card, sb); atomic_inc(&vcc->stats->rx_drop); } else { skb_put(sb, len); dequeue_sm_buf(card, sb); #ifdef NS_USE_DESTRUCTORS sb->destructor = ns_sb_destructor; #endif /* NS_USE_DESTRUCTORS */ ATM_SKB(sb)->vcc = vcc; __net_timestamp(sb); vcc->push(vcc, sb); atomic_inc(&vcc->stats->rx); } push_rxbufs(card, skb); } else { /* len > NS_SMBUFSIZE, the usual case */ if (!atm_charge(vcc, skb->truesize)) { push_rxbufs(card, skb); atomic_inc(&vcc->stats->rx_drop); } else { dequeue_lg_buf(card, skb); #ifdef NS_USE_DESTRUCTORS skb->destructor = ns_lb_destructor; #endif /* NS_USE_DESTRUCTORS */ skb_push(skb, NS_SMBUFSIZE); skb_copy_from_linear_data(sb, skb->data, NS_SMBUFSIZE); skb_put(skb, len - NS_SMBUFSIZE); ATM_SKB(skb)->vcc = vcc; __net_timestamp(skb); vcc->push(vcc, skb); atomic_inc(&vcc->stats->rx); } push_rxbufs(card, sb); } } else { /* Must push a huge buffer */ struct sk_buff *hb, *sb, *lb; int remaining, tocopy; int j; hb = skb_dequeue(&(card->hbpool.queue)); if (hb == NULL) { /* No buffers in the queue */ hb = dev_alloc_skb(NS_HBUFSIZE); if (hb == NULL) { printk ("nicstar%d: Out of huge buffers.\n", card->index); atomic_inc(&vcc->stats->rx_drop); recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_PRV_IOVCNT(iovb)); vc->rx_iov = NULL; recycle_iov_buf(card, iovb); return; } else if (card->hbpool.count < card->hbnr.min) { struct sk_buff *new_hb; if ((new_hb = dev_alloc_skb(NS_HBUFSIZE)) != NULL) { skb_queue_tail(&card->hbpool. queue, new_hb); card->hbpool.count++; } } NS_PRV_BUFTYPE(hb) = BUF_NONE; } else if (--card->hbpool.count < card->hbnr.min) { struct sk_buff *new_hb; if ((new_hb = dev_alloc_skb(NS_HBUFSIZE)) != NULL) { NS_PRV_BUFTYPE(new_hb) = BUF_NONE; skb_queue_tail(&card->hbpool.queue, new_hb); card->hbpool.count++; } if (card->hbpool.count < card->hbnr.min) { if ((new_hb = dev_alloc_skb(NS_HBUFSIZE)) != NULL) { NS_PRV_BUFTYPE(new_hb) = BUF_NONE; skb_queue_tail(&card->hbpool. queue, new_hb); card->hbpool.count++; } } } iov = (struct iovec *)iovb->data; if (!atm_charge(vcc, hb->truesize)) { recycle_iovec_rx_bufs(card, iov, NS_PRV_IOVCNT(iovb)); if (card->hbpool.count < card->hbnr.max) { skb_queue_tail(&card->hbpool.queue, hb); card->hbpool.count++; } else dev_kfree_skb_any(hb); atomic_inc(&vcc->stats->rx_drop); } else { /* Copy the small buffer to the huge buffer */ sb = (struct sk_buff *)iov->iov_base; skb_copy_from_linear_data(sb, hb->data, iov->iov_len); skb_put(hb, iov->iov_len); remaining = len - iov->iov_len; iov++; /* Free the small buffer */ push_rxbufs(card, sb); /* Copy all large buffers to the huge buffer and free them */ for (j = 1; j < NS_PRV_IOVCNT(iovb); j++) { lb = (struct sk_buff *)iov->iov_base; tocopy = min_t(int, remaining, iov->iov_len); skb_copy_from_linear_data(lb, skb_tail_pointer (hb), tocopy); skb_put(hb, tocopy); iov++; remaining -= tocopy; push_rxbufs(card, lb); } #ifdef EXTRA_DEBUG if (remaining != 0 || hb->len != len) printk ("nicstar%d: Huge buffer len mismatch.\n", card->index); #endif /* EXTRA_DEBUG */ ATM_SKB(hb)->vcc = vcc; #ifdef NS_USE_DESTRUCTORS hb->destructor = ns_hb_destructor; #endif /* NS_USE_DESTRUCTORS */ __net_timestamp(hb); vcc->push(vcc, hb); atomic_inc(&vcc->stats->rx); } } vc->rx_iov = NULL; recycle_iov_buf(card, iovb); } } #ifdef NS_USE_DESTRUCTORS static void ns_sb_destructor(struct sk_buff *sb) { ns_dev *card; u32 stat; card = (ns_dev *) ATM_SKB(sb)->vcc->dev->dev_data; stat = readl(card->membase + STAT); card->sbfqc = ns_stat_sfbqc_get(stat); card->lbfqc = ns_stat_lfbqc_get(stat); do { sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL); if (sb == NULL) break; NS_PRV_BUFTYPE(sb) = BUF_SM; skb_queue_tail(&card->sbpool.queue, sb); skb_reserve(sb, NS_AAL0_HEADER); push_rxbufs(card, sb); } while (card->sbfqc < card->sbnr.min); } static void ns_lb_destructor(struct sk_buff *lb) { ns_dev *card; u32 stat; card = (ns_dev *) ATM_SKB(lb)->vcc->dev->dev_data; stat = readl(card->membase + STAT); card->sbfqc = ns_stat_sfbqc_get(stat); card->lbfqc = ns_stat_lfbqc_get(stat); do { lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL); if (lb == NULL) break; NS_PRV_BUFTYPE(lb) = BUF_LG; skb_queue_tail(&card->lbpool.queue, lb); skb_reserve(lb, NS_SMBUFSIZE); push_rxbufs(card, lb); } while (card->lbfqc < card->lbnr.min); } static void ns_hb_destructor(struct sk_buff *hb) { ns_dev *card; card = (ns_dev *) ATM_SKB(hb)->vcc->dev->dev_data; while (card->hbpool.count < card->hbnr.init) { hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL); if (hb == NULL) break; NS_PRV_BUFTYPE(hb) = BUF_NONE; skb_queue_tail(&card->hbpool.queue, hb); card->hbpool.count++; } } #endif /* NS_USE_DESTRUCTORS */ static void recycle_rx_buf(ns_dev * card, struct sk_buff *skb) { if (unlikely(NS_PRV_BUFTYPE(skb) == BUF_NONE)) { printk("nicstar%d: What kind of rx buffer is this?\n", card->index); dev_kfree_skb_any(skb); } else push_rxbufs(card, skb); } static void recycle_iovec_rx_bufs(ns_dev * card, struct iovec *iov, int count) { while (count-- > 0) recycle_rx_buf(card, (struct sk_buff *)(iov++)->iov_base); } static void recycle_iov_buf(ns_dev * card, struct sk_buff *iovb) { if (card->iovpool.count < card->iovnr.max) { skb_queue_tail(&card->iovpool.queue, iovb); card->iovpool.count++; } else dev_kfree_skb_any(iovb); } static void dequeue_sm_buf(ns_dev * card, struct sk_buff *sb) { skb_unlink(sb, &card->sbpool.queue); #ifdef NS_USE_DESTRUCTORS if (card->sbfqc < card->sbnr.min) #else if (card->sbfqc < card->sbnr.init) { struct sk_buff *new_sb; if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL) { NS_PRV_BUFTYPE(new_sb) = BUF_SM; skb_queue_tail(&card->sbpool.queue, new_sb); skb_reserve(new_sb, NS_AAL0_HEADER); push_rxbufs(card, new_sb); } } if (card->sbfqc < card->sbnr.init) #endif /* NS_USE_DESTRUCTORS */ { struct sk_buff *new_sb; if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL) { NS_PRV_BUFTYPE(new_sb) = BUF_SM; skb_queue_tail(&card->sbpool.queue, new_sb); skb_reserve(new_sb, NS_AAL0_HEADER); push_rxbufs(card, new_sb); } } } static void dequeue_lg_buf(ns_dev * card, struct sk_buff *lb) { skb_unlink(lb, &card->lbpool.queue); #ifdef NS_USE_DESTRUCTORS if (card->lbfqc < card->lbnr.min) #else if (card->lbfqc < card->lbnr.init) { struct sk_buff *new_lb; if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL) { NS_PRV_BUFTYPE(new_lb) = BUF_LG; skb_queue_tail(&card->lbpool.queue, new_lb); skb_reserve(new_lb, NS_SMBUFSIZE); push_rxbufs(card, new_lb); } } if (card->lbfqc < card->lbnr.init) #endif /* NS_USE_DESTRUCTORS */ { struct sk_buff *new_lb; if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL) { NS_PRV_BUFTYPE(new_lb) = BUF_LG; skb_queue_tail(&card->lbpool.queue, new_lb); skb_reserve(new_lb, NS_SMBUFSIZE); push_rxbufs(card, new_lb); } } } static int ns_proc_read(struct atm_dev *dev, loff_t * pos, char *page) { u32 stat; ns_dev *card; int left; left = (int)*pos; card = (ns_dev *) dev->dev_data; stat = readl(card->membase + STAT); if (!left--) return sprintf(page, "Pool count min init max \n"); if (!left--) return sprintf(page, "Small %5d %5d %5d %5d \n", ns_stat_sfbqc_get(stat), card->sbnr.min, card->sbnr.init, card->sbnr.max); if (!left--) return sprintf(page, "Large %5d %5d %5d %5d \n", ns_stat_lfbqc_get(stat), card->lbnr.min, card->lbnr.init, card->lbnr.max); if (!left--) return sprintf(page, "Huge %5d %5d %5d %5d \n", card->hbpool.count, card->hbnr.min, card->hbnr.init, card->hbnr.max); if (!left--) return sprintf(page, "Iovec %5d %5d %5d %5d \n", card->iovpool.count, card->iovnr.min, card->iovnr.init, card->iovnr.max); if (!left--) { int retval; retval = sprintf(page, "Interrupt counter: %u \n", card->intcnt); card->intcnt = 0; return retval; } #if 0 /* Dump 25.6 Mbps PHY registers */ /* Now there's a 25.6 Mbps PHY driver this code isn't needed. I left it here just in case it's needed for debugging. */ if (card->max_pcr == ATM_25_PCR && !left--) { u32 phy_regs[4]; u32 i; for (i = 0; i < 4; i++) { while (CMD_BUSY(card)) ; writel(NS_CMD_READ_UTILITY | 0x00000200 | i, card->membase + CMD); while (CMD_BUSY(card)) ; phy_regs[i] = readl(card->membase + DR0) & 0x000000FF; } return sprintf(page, "PHY regs: 0x%02X 0x%02X 0x%02X 0x%02X \n", phy_regs[0], phy_regs[1], phy_regs[2], phy_regs[3]); } #endif /* 0 - Dump 25.6 Mbps PHY registers */ #if 0 /* Dump TST */ if (left-- < NS_TST_NUM_ENTRIES) { if (card->tste2vc[left + 1] == NULL) return sprintf(page, "%5d - VBR/UBR \n", left + 1); else return sprintf(page, "%5d - %d %d \n", left + 1, card->tste2vc[left + 1]->tx_vcc->vpi, card->tste2vc[left + 1]->tx_vcc->vci); } #endif /* 0 */ return 0; } static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user * arg) { ns_dev *card; pool_levels pl; long btype; unsigned long flags; card = dev->dev_data; switch (cmd) { case NS_GETPSTAT: if (get_user (pl.buftype, &((pool_levels __user *) arg)->buftype)) return -EFAULT; switch (pl.buftype) { case NS_BUFTYPE_SMALL: pl.count = ns_stat_sfbqc_get(readl(card->membase + STAT)); pl.level.min = card->sbnr.min; pl.level.init = card->sbnr.init; pl.level.max = card->sbnr.max; break; case NS_BUFTYPE_LARGE: pl.count = ns_stat_lfbqc_get(readl(card->membase + STAT)); pl.level.min = card->lbnr.min; pl.level.init = card->lbnr.init; pl.level.max = card->lbnr.max; break; case NS_BUFTYPE_HUGE: pl.count = card->hbpool.count; pl.level.min = card->hbnr.min; pl.level.init = card->hbnr.init; pl.level.max = card->hbnr.max; break; case NS_BUFTYPE_IOVEC: pl.count = card->iovpool.count; pl.level.min = card->iovnr.min; pl.level.init = card->iovnr.init; pl.level.max = card->iovnr.max; break; default: return -ENOIOCTLCMD; } if (!copy_to_user((pool_levels __user *) arg, &pl, sizeof(pl))) return (sizeof(pl)); else return -EFAULT; case NS_SETBUFLEV: if (!capable(CAP_NET_ADMIN)) return -EPERM; if (copy_from_user(&pl, (pool_levels __user *) arg, sizeof(pl))) return -EFAULT; if (pl.level.min >= pl.level.init || pl.level.init >= pl.level.max) return -EINVAL; if (pl.level.min == 0) return -EINVAL; switch (pl.buftype) { case NS_BUFTYPE_SMALL: if (pl.level.max > TOP_SB) return -EINVAL; card->sbnr.min = pl.level.min; card->sbnr.init = pl.level.init; card->sbnr.max = pl.level.max; break; case NS_BUFTYPE_LARGE: if (pl.level.max > TOP_LB) return -EINVAL; card->lbnr.min = pl.level.min; card->lbnr.init = pl.level.init; card->lbnr.max = pl.level.max; break; case NS_BUFTYPE_HUGE: if (pl.level.max > TOP_HB) return -EINVAL; card->hbnr.min = pl.level.min; card->hbnr.init = pl.level.init; card->hbnr.max = pl.level.max; break; case NS_BUFTYPE_IOVEC: if (pl.level.max > TOP_IOVB) return -EINVAL; card->iovnr.min = pl.level.min; card->iovnr.init = pl.level.init; card->iovnr.max = pl.level.max; break; default: return -EINVAL; } return 0; case NS_ADJBUFLEV: if (!capable(CAP_NET_ADMIN)) return -EPERM; btype = (long)arg; /* a long is the same size as a pointer or bigger */ switch (btype) { case NS_BUFTYPE_SMALL: while (card->sbfqc < card->sbnr.init) { struct sk_buff *sb; sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL); if (sb == NULL) return -ENOMEM; NS_PRV_BUFTYPE(sb) = BUF_SM; skb_queue_tail(&card->sbpool.queue, sb); skb_reserve(sb, NS_AAL0_HEADER); push_rxbufs(card, sb); } break; case NS_BUFTYPE_LARGE: while (card->lbfqc < card->lbnr.init) { struct sk_buff *lb; lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL); if (lb == NULL) return -ENOMEM; NS_PRV_BUFTYPE(lb) = BUF_LG; skb_queue_tail(&card->lbpool.queue, lb); skb_reserve(lb, NS_SMBUFSIZE); push_rxbufs(card, lb); } break; case NS_BUFTYPE_HUGE: while (card->hbpool.count > card->hbnr.init) { struct sk_buff *hb; spin_lock_irqsave(&card->int_lock, flags); hb = skb_dequeue(&card->hbpool.queue); card->hbpool.count--; spin_unlock_irqrestore(&card->int_lock, flags); if (hb == NULL) printk ("nicstar%d: huge buffer count inconsistent.\n", card->index); else dev_kfree_skb_any(hb); } while (card->hbpool.count < card->hbnr.init) { struct sk_buff *hb; hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL); if (hb == NULL) return -ENOMEM; NS_PRV_BUFTYPE(hb) = BUF_NONE; spin_lock_irqsave(&card->int_lock, flags); skb_queue_tail(&card->hbpool.queue, hb); card->hbpool.count++; spin_unlock_irqrestore(&card->int_lock, flags); } break; case NS_BUFTYPE_IOVEC: while (card->iovpool.count > card->iovnr.init) { struct sk_buff *iovb; spin_lock_irqsave(&card->int_lock, flags); iovb = skb_dequeue(&card->iovpool.queue); card->iovpool.count--; spin_unlock_irqrestore(&card->int_lock, flags); if (iovb == NULL) printk ("nicstar%d: iovec buffer count inconsistent.\n", card->index); else dev_kfree_skb_any(iovb); } while (card->iovpool.count < card->iovnr.init) { struct sk_buff *iovb; iovb = alloc_skb(NS_IOVBUFSIZE, GFP_KERNEL); if (iovb == NULL) return -ENOMEM; NS_PRV_BUFTYPE(iovb) = BUF_NONE; spin_lock_irqsave(&card->int_lock, flags); skb_queue_tail(&card->iovpool.queue, iovb); card->iovpool.count++; spin_unlock_irqrestore(&card->int_lock, flags); } break; default: return -EINVAL; } return 0; default: if (dev->phy && dev->phy->ioctl) { return dev->phy->ioctl(dev, cmd, arg); } else { printk("nicstar%d: %s == NULL \n", card->index, dev->phy ? "dev->phy->ioctl" : "dev->phy"); return -ENOIOCTLCMD; } } } #ifdef EXTRA_DEBUG static void which_list(ns_dev * card, struct sk_buff *skb) { printk("skb buf_type: 0x%08x\n", NS_PRV_BUFTYPE(skb)); } #endif /* EXTRA_DEBUG */ static void ns_poll(unsigned long arg) { int i; ns_dev *card; unsigned long flags; u32 stat_r, stat_w; PRINTK("nicstar: Entering ns_poll().\n"); for (i = 0; i < num_cards; i++) { card = cards[i]; if (spin_is_locked(&card->int_lock)) { /* Probably it isn't worth spinning */ continue; } spin_lock_irqsave(&card->int_lock, flags); stat_w = 0; stat_r = readl(card->membase + STAT); if (stat_r & NS_STAT_TSIF) stat_w |= NS_STAT_TSIF; if (stat_r & NS_STAT_EOPDU) stat_w |= NS_STAT_EOPDU; process_tsq(card); process_rsq(card); writel(stat_w, card->membase + STAT); spin_unlock_irqrestore(&card->int_lock, flags); } mod_timer(&ns_timer, jiffies + NS_POLL_PERIOD); PRINTK("nicstar: Leaving ns_poll().\n"); } static void ns_phy_put(struct atm_dev *dev, unsigned char value, unsigned long addr) { ns_dev *card; unsigned long flags; card = dev->dev_data; spin_lock_irqsave(&card->res_lock, flags); while (CMD_BUSY(card)) ; writel((u32) value, card->membase + DR0); writel(NS_CMD_WRITE_UTILITY | 0x00000200 | (addr & 0x000000FF), card->membase + CMD); spin_unlock_irqrestore(&card->res_lock, flags); } static unsigned char ns_phy_get(struct atm_dev *dev, unsigned long addr) { ns_dev *card; unsigned long flags; u32 data; card = dev->dev_data; spin_lock_irqsave(&card->res_lock, flags); while (CMD_BUSY(card)) ; writel(NS_CMD_READ_UTILITY | 0x00000200 | (addr & 0x000000FF), card->membase + CMD); while (CMD_BUSY(card)) ; data = readl(card->membase + DR0) & 0x000000FF; spin_unlock_irqrestore(&card->res_lock, flags); return (unsigned char)data; } module_init(nicstar_init); module_exit(nicstar_cleanup);
gpl-2.0
RacerMod/android_kernel_zte_msm7x27-caf
arch/microblaze/kernel/cpu/cpuinfo.c
850
2108
/* * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu> * Copyright (C) 2007-2009 PetaLogix * Copyright (C) 2007 John Williams <john.williams@petalogix.com> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <asm/cpuinfo.h> #include <asm/pvr.h> const struct cpu_ver_key cpu_ver_lookup[] = { /* These key value are as per MBV field in PVR0 */ {"5.00.a", 0x01}, {"5.00.b", 0x02}, {"5.00.c", 0x03}, {"6.00.a", 0x04}, {"6.00.b", 0x06}, {"7.00.a", 0x05}, {"7.00.b", 0x07}, {"7.10.a", 0x08}, {"7.10.b", 0x09}, {"7.10.c", 0x0a}, {"7.10.d", 0x0b}, {"7.20.a", 0x0c}, {"7.20.b", 0x0d}, {"7.20.c", 0x0e}, {"7.20.d", 0x0f}, {"7.30.a", 0x10}, {NULL, 0}, }; /* * FIXME Not sure if the actual key is defined by Xilinx in the PVR */ const struct family_string_key family_string_lookup[] = { {"virtex2", 0x4}, {"virtex2pro", 0x5}, {"spartan3", 0x6}, {"virtex4", 0x7}, {"virtex5", 0x8}, {"spartan3e", 0x9}, {"spartan3a", 0xa}, {"spartan3an", 0xb}, {"spartan3adsp", 0xc}, {"spartan6", 0xd}, {"virtex6", 0xe}, /* FIXME There is no key code defined for spartan2 */ {"spartan2", 0xf0}, {NULL, 0}, }; struct cpuinfo cpuinfo; void __init setup_cpuinfo(void) { struct device_node *cpu = NULL; cpu = (struct device_node *) of_find_node_by_type(NULL, "cpu"); if (!cpu) printk(KERN_ERR "You don't have cpu!!!\n"); printk(KERN_INFO "%s: initialising\n", __func__); switch (cpu_has_pvr()) { case 0: printk(KERN_WARNING "%s: No PVR support. Using static CPU info from FDT\n", __func__); set_cpuinfo_static(&cpuinfo, cpu); break; /* FIXME I found weird behavior with MB 7.00.a/b 7.10.a * please do not use FULL PVR with MMU */ case 1: printk(KERN_INFO "%s: Using full CPU PVR support\n", __func__); set_cpuinfo_static(&cpuinfo, cpu); set_cpuinfo_pvr_full(&cpuinfo, cpu); break; default: printk(KERN_WARNING "%s: Unsupported PVR setting\n", __func__); set_cpuinfo_static(&cpuinfo, cpu); } }
gpl-2.0
virtuous/kernel-vivow-gingerbread-v2
arch/arm/mach-clps711x/p720t.c
1618
3079
/* * linux/arch/arm/mach-clps711x/p720t.c * * Copyright (C) 2000-2001 Deep Blue Solutions Ltd * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/types.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/io.h> #include <mach/hardware.h> #include <asm/pgtable.h> #include <asm/page.h> #include <asm/setup.h> #include <asm/sizes.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <mach/syspld.h> #include "common.h" /* * Map the P720T system PLD. It occupies two address spaces: * SYSPLD_PHYS_BASE and SYSPLD_PHYS_BASE + 0x00400000 * We map both here. */ static struct map_desc p720t_io_desc[] __initdata = { { .virtual = SYSPLD_VIRT_BASE, .pfn = __phys_to_pfn(SYSPLD_PHYS_BASE), .length = SZ_1M, .type = MT_DEVICE }, { .virtual = 0xfe400000, .pfn = __phys_to_pfn(0x10400000), .length = SZ_1M, .type = MT_DEVICE } }; static void __init fixup_p720t(struct machine_desc *desc, struct tag *tag, char **cmdline, struct meminfo *mi) { /* * Our bootloader doesn't setup any tags (yet). */ if (tag->hdr.tag != ATAG_CORE) { tag->hdr.tag = ATAG_CORE; tag->hdr.size = tag_size(tag_core); tag->u.core.flags = 0; tag->u.core.pagesize = PAGE_SIZE; tag->u.core.rootdev = 0x0100; tag = tag_next(tag); tag->hdr.tag = ATAG_MEM; tag->hdr.size = tag_size(tag_mem32); tag->u.mem.size = 4096; tag->u.mem.start = PHYS_OFFSET; tag = tag_next(tag); tag->hdr.tag = ATAG_NONE; tag->hdr.size = 0; } } static void __init p720t_map_io(void) { clps711x_map_io(); iotable_init(p720t_io_desc, ARRAY_SIZE(p720t_io_desc)); } MACHINE_START(P720T, "ARM-Prospector720T") /* Maintainer: ARM Ltd/Deep Blue Solutions Ltd */ .phys_io = 0x80000000, .io_pg_offst = ((0xff000000) >> 18) & 0xfffc, .boot_params = 0xc0000100, .fixup = fixup_p720t, .map_io = p720t_map_io, .init_irq = clps711x_init_irq, .timer = &clps711x_timer, MACHINE_END static int p720t_hw_init(void) { /* * Power down as much as possible in case we don't * have the drivers loaded. */ PLD_LCDEN = 0; PLD_PWR &= ~(PLD_S4_ON|PLD_S3_ON|PLD_S2_ON|PLD_S1_ON); PLD_KBD = 0; PLD_IO = 0; PLD_IRDA = 0; PLD_CODEC = 0; PLD_TCH = 0; PLD_SPI = 0; #ifndef CONFIG_DEBUG_LL PLD_COM2 = 0; PLD_COM1 = 0; #endif return 0; } __initcall(p720t_hw_init);
gpl-2.0
isogai/test
third-party/zlib-1.2.3/trees.c
1874
44027
/* trees.c -- output deflated data using Huffman coding * Copyright (C) 1995-2005 Jean-loup Gailly * For conditions of distribution and use, see copyright notice in zlib.h */ /* * ALGORITHM * * The "deflation" process uses several Huffman trees. The more * common source values are represented by shorter bit sequences. * * Each code tree is stored in a compressed form which is itself * a Huffman encoding of the lengths of all the code strings (in * ascending order by source values). The actual code strings are * reconstructed from the lengths in the inflate process, as described * in the deflate specification. * * REFERENCES * * Deutsch, L.P.,"'Deflate' Compressed Data Format Specification". * Available in ftp.uu.net:/pub/archiving/zip/doc/deflate-1.1.doc * * Storer, James A. * Data Compression: Methods and Theory, pp. 49-50. * Computer Science Press, 1988. ISBN 0-7167-8156-5. * * Sedgewick, R. * Algorithms, p290. * Addison-Wesley, 1983. ISBN 0-201-06672-6. */ /* @(#) $Id$ */ /* #define GEN_TREES_H */ #include "deflate.h" #ifdef DEBUG # include <ctype.h> #endif /* =========================================================================== * Constants */ #define MAX_BL_BITS 7 /* Bit length codes must not exceed MAX_BL_BITS bits */ #define END_BLOCK 256 /* end of block literal code */ #define REP_3_6 16 /* repeat previous bit length 3-6 times (2 bits of repeat count) */ #define REPZ_3_10 17 /* repeat a zero length 3-10 times (3 bits of repeat count) */ #define REPZ_11_138 18 /* repeat a zero length 11-138 times (7 bits of repeat count) */ local const int extra_lbits[LENGTH_CODES] /* extra bits for each length code */ = {0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0}; local const int extra_dbits[D_CODES] /* extra bits for each distance code */ = {0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13}; local const int extra_blbits[BL_CODES]/* extra bits for each bit length code */ = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,3,7}; local const uch bl_order[BL_CODES] = {16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15}; /* The lengths of the bit length codes are sent in order of decreasing * probability, to avoid transmitting the lengths for unused bit length codes. */ #define Buf_size (8 * 2*sizeof(char)) /* Number of bits used within bi_buf. (bi_buf might be implemented on * more than 16 bits on some systems.) */ /* =========================================================================== * Local data. These are initialized only once. */ #define DIST_CODE_LEN 512 /* see definition of array dist_code below */ #if defined(GEN_TREES_H) || !defined(STDC) /* non ANSI compilers may not accept trees.h */ local ct_data static_ltree[L_CODES+2]; /* The static literal tree. Since the bit lengths are imposed, there is no * need for the L_CODES extra codes used during heap construction. However * The codes 286 and 287 are needed to build a canonical tree (see _tr_init * below). */ local ct_data static_dtree[D_CODES]; /* The static distance tree. (Actually a trivial tree since all codes use * 5 bits.) */ uch _dist_code[DIST_CODE_LEN]; /* Distance codes. The first 256 values correspond to the distances * 3 .. 258, the last 256 values correspond to the top 8 bits of * the 15 bit distances. */ uch _length_code[MAX_MATCH-MIN_MATCH+1]; /* length code for each normalized match length (0 == MIN_MATCH) */ local int base_length[LENGTH_CODES]; /* First normalized length for each code (0 = MIN_MATCH) */ local int base_dist[D_CODES]; /* First normalized distance for each code (0 = distance of 1) */ #else # include "trees.h" #endif /* GEN_TREES_H */ struct static_tree_desc_s { const ct_data *static_tree; /* static tree or NULL */ const intf *extra_bits; /* extra bits for each code or NULL */ int extra_base; /* base index for extra_bits */ int elems; /* max number of elements in the tree */ int max_length; /* max bit length for the codes */ }; local static_tree_desc static_l_desc = {static_ltree, extra_lbits, LITERALS+1, L_CODES, MAX_BITS}; local static_tree_desc static_d_desc = {static_dtree, extra_dbits, 0, D_CODES, MAX_BITS}; local static_tree_desc static_bl_desc = {(const ct_data *)0, extra_blbits, 0, BL_CODES, MAX_BL_BITS}; /* =========================================================================== * Local (static) routines in this file. */ local void tr_static_init OF((void)); local void init_block OF((deflate_state *s)); local void pqdownheap OF((deflate_state *s, ct_data *tree, int k)); local void gen_bitlen OF((deflate_state *s, tree_desc *desc)); local void gen_codes OF((ct_data *tree, int max_code, ushf *bl_count)); local void build_tree OF((deflate_state *s, tree_desc *desc)); local void scan_tree OF((deflate_state *s, ct_data *tree, int max_code)); local void send_tree OF((deflate_state *s, ct_data *tree, int max_code)); local int build_bl_tree OF((deflate_state *s)); local void send_all_trees OF((deflate_state *s, int lcodes, int dcodes, int blcodes)); local void compress_block OF((deflate_state *s, ct_data *ltree, ct_data *dtree)); local void set_data_type OF((deflate_state *s)); local unsigned bi_reverse OF((unsigned value, int length)); local void bi_windup OF((deflate_state *s)); local void bi_flush OF((deflate_state *s)); local void copy_block OF((deflate_state *s, charf *buf, unsigned len, int header)); #ifdef GEN_TREES_H local void gen_trees_header OF((void)); #endif #ifndef DEBUG # define send_code(s, c, tree) send_bits(s, tree[c].Code, tree[c].Len) /* Send a code of the given tree. c and tree must not have side effects */ #else /* DEBUG */ # define send_code(s, c, tree) \ { if (z_verbose>2) fprintf(stderr,"\ncd %3d ",(c)); \ send_bits(s, tree[c].Code, tree[c].Len); } #endif /* =========================================================================== * Output a short LSB first on the stream. * IN assertion: there is enough room in pendingBuf. */ #define put_short(s, w) { \ put_byte(s, (uch)((w) & 0xff)); \ put_byte(s, (uch)((ush)(w) >> 8)); \ } /* =========================================================================== * Send a value on a given number of bits. * IN assertion: length <= 16 and value fits in length bits. */ #ifdef DEBUG local void send_bits OF((deflate_state *s, int value, int length)); local void send_bits(s, value, length) deflate_state *s; int value; /* value to send */ int length; /* number of bits */ { Tracevv((stderr," l %2d v %4x ", length, value)); Assert(length > 0 && length <= 15, "invalid length"); s->bits_sent += (ulg)length; /* If not enough room in bi_buf, use (valid) bits from bi_buf and * (16 - bi_valid) bits from value, leaving (width - (16-bi_valid)) * unused bits in value. */ if (s->bi_valid > (int)Buf_size - length) { s->bi_buf |= (value << s->bi_valid); put_short(s, s->bi_buf); s->bi_buf = (ush)value >> (Buf_size - s->bi_valid); s->bi_valid += length - Buf_size; } else { s->bi_buf |= value << s->bi_valid; s->bi_valid += length; } } #else /* !DEBUG */ #define send_bits(s, value, length) \ { int len = length;\ if (s->bi_valid > (int)Buf_size - len) {\ int val = value;\ s->bi_buf |= (val << s->bi_valid);\ put_short(s, s->bi_buf);\ s->bi_buf = (ush)val >> (Buf_size - s->bi_valid);\ s->bi_valid += len - Buf_size;\ } else {\ s->bi_buf |= (value) << s->bi_valid;\ s->bi_valid += len;\ }\ } #endif /* DEBUG */ /* the arguments must not have side effects */ /* =========================================================================== * Initialize the various 'constant' tables. */ local void tr_static_init() { #if defined(GEN_TREES_H) || !defined(STDC) static int static_init_done = 0; int n; /* iterates over tree elements */ int bits; /* bit counter */ int length; /* length value */ int code; /* code value */ int dist; /* distance index */ ush bl_count[MAX_BITS+1]; /* number of codes at each bit length for an optimal tree */ if (static_init_done) return; /* For some embedded targets, global variables are not initialized: */ static_l_desc.static_tree = static_ltree; static_l_desc.extra_bits = extra_lbits; static_d_desc.static_tree = static_dtree; static_d_desc.extra_bits = extra_dbits; static_bl_desc.extra_bits = extra_blbits; /* Initialize the mapping length (0..255) -> length code (0..28) */ length = 0; for (code = 0; code < LENGTH_CODES-1; code++) { base_length[code] = length; for (n = 0; n < (1<<extra_lbits[code]); n++) { _length_code[length++] = (uch)code; } } Assert (length == 256, "tr_static_init: length != 256"); /* Note that the length 255 (match length 258) can be represented * in two different ways: code 284 + 5 bits or code 285, so we * overwrite length_code[255] to use the best encoding: */ _length_code[length-1] = (uch)code; /* Initialize the mapping dist (0..32K) -> dist code (0..29) */ dist = 0; for (code = 0 ; code < 16; code++) { base_dist[code] = dist; for (n = 0; n < (1<<extra_dbits[code]); n++) { _dist_code[dist++] = (uch)code; } } Assert (dist == 256, "tr_static_init: dist != 256"); dist >>= 7; /* from now on, all distances are divided by 128 */ for ( ; code < D_CODES; code++) { base_dist[code] = dist << 7; for (n = 0; n < (1<<(extra_dbits[code]-7)); n++) { _dist_code[256 + dist++] = (uch)code; } } Assert (dist == 256, "tr_static_init: 256+dist != 512"); /* Construct the codes of the static literal tree */ for (bits = 0; bits <= MAX_BITS; bits++) bl_count[bits] = 0; n = 0; while (n <= 143) static_ltree[n++].Len = 8, bl_count[8]++; while (n <= 255) static_ltree[n++].Len = 9, bl_count[9]++; while (n <= 279) static_ltree[n++].Len = 7, bl_count[7]++; while (n <= 287) static_ltree[n++].Len = 8, bl_count[8]++; /* Codes 286 and 287 do not exist, but we must include them in the * tree construction to get a canonical Huffman tree (longest code * all ones) */ gen_codes((ct_data *)static_ltree, L_CODES+1, bl_count); /* The static distance tree is trivial: */ for (n = 0; n < D_CODES; n++) { static_dtree[n].Len = 5; static_dtree[n].Code = bi_reverse((unsigned)n, 5); } static_init_done = 1; # ifdef GEN_TREES_H gen_trees_header(); # endif #endif /* defined(GEN_TREES_H) || !defined(STDC) */ } /* =========================================================================== * Genererate the file trees.h describing the static trees. */ #ifdef GEN_TREES_H # ifndef DEBUG # include <stdio.h> # endif # define SEPARATOR(i, last, width) \ ((i) == (last)? "\n};\n\n" : \ ((i) % (width) == (width)-1 ? ",\n" : ", ")) void gen_trees_header() { FILE *header = fopen("trees.h", "w"); int i; Assert (header != NULL, "Can't open trees.h"); fprintf(header, "/* header created automatically with -DGEN_TREES_H */\n\n"); fprintf(header, "local const ct_data static_ltree[L_CODES+2] = {\n"); for (i = 0; i < L_CODES+2; i++) { fprintf(header, "{{%3u},{%3u}}%s", static_ltree[i].Code, static_ltree[i].Len, SEPARATOR(i, L_CODES+1, 5)); } fprintf(header, "local const ct_data static_dtree[D_CODES] = {\n"); for (i = 0; i < D_CODES; i++) { fprintf(header, "{{%2u},{%2u}}%s", static_dtree[i].Code, static_dtree[i].Len, SEPARATOR(i, D_CODES-1, 5)); } fprintf(header, "const uch _dist_code[DIST_CODE_LEN] = {\n"); for (i = 0; i < DIST_CODE_LEN; i++) { fprintf(header, "%2u%s", _dist_code[i], SEPARATOR(i, DIST_CODE_LEN-1, 20)); } fprintf(header, "const uch _length_code[MAX_MATCH-MIN_MATCH+1]= {\n"); for (i = 0; i < MAX_MATCH-MIN_MATCH+1; i++) { fprintf(header, "%2u%s", _length_code[i], SEPARATOR(i, MAX_MATCH-MIN_MATCH, 20)); } fprintf(header, "local const int base_length[LENGTH_CODES] = {\n"); for (i = 0; i < LENGTH_CODES; i++) { fprintf(header, "%1u%s", base_length[i], SEPARATOR(i, LENGTH_CODES-1, 20)); } fprintf(header, "local const int base_dist[D_CODES] = {\n"); for (i = 0; i < D_CODES; i++) { fprintf(header, "%5u%s", base_dist[i], SEPARATOR(i, D_CODES-1, 10)); } fclose(header); } #endif /* GEN_TREES_H */ /* =========================================================================== * Initialize the tree data structures for a new zlib stream. */ void _tr_init(s) deflate_state *s; { tr_static_init(); s->l_desc.dyn_tree = s->dyn_ltree; s->l_desc.stat_desc = &static_l_desc; s->d_desc.dyn_tree = s->dyn_dtree; s->d_desc.stat_desc = &static_d_desc; s->bl_desc.dyn_tree = s->bl_tree; s->bl_desc.stat_desc = &static_bl_desc; s->bi_buf = 0; s->bi_valid = 0; s->last_eob_len = 8; /* enough lookahead for inflate */ #ifdef DEBUG s->compressed_len = 0L; s->bits_sent = 0L; #endif /* Initialize the first block of the first file: */ init_block(s); } /* =========================================================================== * Initialize a new block. */ local void init_block(s) deflate_state *s; { int n; /* iterates over tree elements */ /* Initialize the trees. */ for (n = 0; n < L_CODES; n++) s->dyn_ltree[n].Freq = 0; for (n = 0; n < D_CODES; n++) s->dyn_dtree[n].Freq = 0; for (n = 0; n < BL_CODES; n++) s->bl_tree[n].Freq = 0; s->dyn_ltree[END_BLOCK].Freq = 1; s->opt_len = s->static_len = 0L; s->last_lit = s->matches = 0; } #define SMALLEST 1 /* Index within the heap array of least frequent node in the Huffman tree */ /* =========================================================================== * Remove the smallest element from the heap and recreate the heap with * one less element. Updates heap and heap_len. */ #define pqremove(s, tree, top) \ {\ top = s->heap[SMALLEST]; \ s->heap[SMALLEST] = s->heap[s->heap_len--]; \ pqdownheap(s, tree, SMALLEST); \ } /* =========================================================================== * Compares to subtrees, using the tree depth as tie breaker when * the subtrees have equal frequency. This minimizes the worst case length. */ #define smaller(tree, n, m, depth) \ (tree[n].Freq < tree[m].Freq || \ (tree[n].Freq == tree[m].Freq && depth[n] <= depth[m])) /* =========================================================================== * Restore the heap property by moving down the tree starting at node k, * exchanging a node with the smallest of its two sons if necessary, stopping * when the heap property is re-established (each father smaller than its * two sons). */ local void pqdownheap(s, tree, k) deflate_state *s; ct_data *tree; /* the tree to restore */ int k; /* node to move down */ { int v = s->heap[k]; int j = k << 1; /* left son of k */ while (j <= s->heap_len) { /* Set j to the smallest of the two sons: */ if (j < s->heap_len && smaller(tree, s->heap[j+1], s->heap[j], s->depth)) { j++; } /* Exit if v is smaller than both sons */ if (smaller(tree, v, s->heap[j], s->depth)) break; /* Exchange v with the smallest son */ s->heap[k] = s->heap[j]; k = j; /* And continue down the tree, setting j to the left son of k */ j <<= 1; } s->heap[k] = v; } /* =========================================================================== * Compute the optimal bit lengths for a tree and update the total bit length * for the current block. * IN assertion: the fields freq and dad are set, heap[heap_max] and * above are the tree nodes sorted by increasing frequency. * OUT assertions: the field len is set to the optimal bit length, the * array bl_count contains the frequencies for each bit length. * The length opt_len is updated; static_len is also updated if stree is * not null. */ local void gen_bitlen(s, desc) deflate_state *s; tree_desc *desc; /* the tree descriptor */ { ct_data *tree = desc->dyn_tree; int max_code = desc->max_code; const ct_data *stree = desc->stat_desc->static_tree; const intf *extra = desc->stat_desc->extra_bits; int base = desc->stat_desc->extra_base; int max_length = desc->stat_desc->max_length; int h; /* heap index */ int n, m; /* iterate over the tree elements */ int bits; /* bit length */ int xbits; /* extra bits */ ush f; /* frequency */ int overflow = 0; /* number of elements with bit length too large */ for (bits = 0; bits <= MAX_BITS; bits++) s->bl_count[bits] = 0; /* In a first pass, compute the optimal bit lengths (which may * overflow in the case of the bit length tree). */ tree[s->heap[s->heap_max]].Len = 0; /* root of the heap */ for (h = s->heap_max+1; h < HEAP_SIZE; h++) { n = s->heap[h]; bits = tree[tree[n].Dad].Len + 1; if (bits > max_length) bits = max_length, overflow++; tree[n].Len = (ush)bits; /* We overwrite tree[n].Dad which is no longer needed */ if (n > max_code) continue; /* not a leaf node */ s->bl_count[bits]++; xbits = 0; if (n >= base) xbits = extra[n-base]; f = tree[n].Freq; s->opt_len += (ulg)f * (bits + xbits); if (stree) s->static_len += (ulg)f * (stree[n].Len + xbits); } if (overflow == 0) return; Trace((stderr,"\nbit length overflow\n")); /* This happens for example on obj2 and pic of the Calgary corpus */ /* Find the first bit length which could increase: */ do { bits = max_length-1; while (s->bl_count[bits] == 0) bits--; s->bl_count[bits]--; /* move one leaf down the tree */ s->bl_count[bits+1] += 2; /* move one overflow item as its brother */ s->bl_count[max_length]--; /* The brother of the overflow item also moves one step up, * but this does not affect bl_count[max_length] */ overflow -= 2; } while (overflow > 0); /* Now recompute all bit lengths, scanning in increasing frequency. * h is still equal to HEAP_SIZE. (It is simpler to reconstruct all * lengths instead of fixing only the wrong ones. This idea is taken * from 'ar' written by Haruhiko Okumura.) */ for (bits = max_length; bits != 0; bits--) { n = s->bl_count[bits]; while (n != 0) { m = s->heap[--h]; if (m > max_code) continue; if ((unsigned) tree[m].Len != (unsigned) bits) { Trace((stderr,"code %d bits %d->%d\n", m, tree[m].Len, bits)); s->opt_len += ((long)bits - (long)tree[m].Len) *(long)tree[m].Freq; tree[m].Len = (ush)bits; } n--; } } } /* =========================================================================== * Generate the codes for a given tree and bit counts (which need not be * optimal). * IN assertion: the array bl_count contains the bit length statistics for * the given tree and the field len is set for all tree elements. * OUT assertion: the field code is set for all tree elements of non * zero code length. */ local void gen_codes (tree, max_code, bl_count) ct_data *tree; /* the tree to decorate */ int max_code; /* largest code with non zero frequency */ ushf *bl_count; /* number of codes at each bit length */ { ush next_code[MAX_BITS+1]; /* next code value for each bit length */ ush code = 0; /* running code value */ int bits; /* bit index */ int n; /* code index */ /* The distribution counts are first used to generate the code values * without bit reversal. */ for (bits = 1; bits <= MAX_BITS; bits++) { next_code[bits] = code = (code + bl_count[bits-1]) << 1; } /* Check that the bit counts in bl_count are consistent. The last code * must be all ones. */ Assert (code + bl_count[MAX_BITS]-1 == (1<<MAX_BITS)-1, "inconsistent bit counts"); Tracev((stderr,"\ngen_codes: max_code %d ", max_code)); for (n = 0; n <= max_code; n++) { int len = tree[n].Len; if (len == 0) continue; /* Now reverse the bits */ tree[n].Code = bi_reverse(next_code[len]++, len); Tracecv(tree != static_ltree, (stderr,"\nn %3d %c l %2d c %4x (%x) ", n, (isgraph(n) ? n : ' '), len, tree[n].Code, next_code[len]-1)); } } /* =========================================================================== * Construct one Huffman tree and assigns the code bit strings and lengths. * Update the total bit length for the current block. * IN assertion: the field freq is set for all tree elements. * OUT assertions: the fields len and code are set to the optimal bit length * and corresponding code. The length opt_len is updated; static_len is * also updated if stree is not null. The field max_code is set. */ local void build_tree(s, desc) deflate_state *s; tree_desc *desc; /* the tree descriptor */ { ct_data *tree = desc->dyn_tree; const ct_data *stree = desc->stat_desc->static_tree; int elems = desc->stat_desc->elems; int n, m; /* iterate over heap elements */ int max_code = -1; /* largest code with non zero frequency */ int node; /* new node being created */ /* Construct the initial heap, with least frequent element in * heap[SMALLEST]. The sons of heap[n] are heap[2*n] and heap[2*n+1]. * heap[0] is not used. */ s->heap_len = 0, s->heap_max = HEAP_SIZE; for (n = 0; n < elems; n++) { if (tree[n].Freq != 0) { s->heap[++(s->heap_len)] = max_code = n; s->depth[n] = 0; } else { tree[n].Len = 0; } } /* The pkzip format requires that at least one distance code exists, * and that at least one bit should be sent even if there is only one * possible code. So to avoid special checks later on we force at least * two codes of non zero frequency. */ while (s->heap_len < 2) { node = s->heap[++(s->heap_len)] = (max_code < 2 ? ++max_code : 0); tree[node].Freq = 1; s->depth[node] = 0; s->opt_len--; if (stree) s->static_len -= stree[node].Len; /* node is 0 or 1 so it does not have extra bits */ } desc->max_code = max_code; /* The elements heap[heap_len/2+1 .. heap_len] are leaves of the tree, * establish sub-heaps of increasing lengths: */ for (n = s->heap_len/2; n >= 1; n--) pqdownheap(s, tree, n); /* Construct the Huffman tree by repeatedly combining the least two * frequent nodes. */ node = elems; /* next internal node of the tree */ do { pqremove(s, tree, n); /* n = node of least frequency */ m = s->heap[SMALLEST]; /* m = node of next least frequency */ s->heap[--(s->heap_max)] = n; /* keep the nodes sorted by frequency */ s->heap[--(s->heap_max)] = m; /* Create a new node father of n and m */ tree[node].Freq = tree[n].Freq + tree[m].Freq; s->depth[node] = (uch)((s->depth[n] >= s->depth[m] ? s->depth[n] : s->depth[m]) + 1); tree[n].Dad = tree[m].Dad = (ush)node; #ifdef DUMP_BL_TREE if (tree == s->bl_tree) { fprintf(stderr,"\nnode %d(%d), sons %d(%d) %d(%d)", node, tree[node].Freq, n, tree[n].Freq, m, tree[m].Freq); } #endif /* and insert the new node in the heap */ s->heap[SMALLEST] = node++; pqdownheap(s, tree, SMALLEST); } while (s->heap_len >= 2); s->heap[--(s->heap_max)] = s->heap[SMALLEST]; /* At this point, the fields freq and dad are set. We can now * generate the bit lengths. */ gen_bitlen(s, (tree_desc *)desc); /* The field len is now set, we can generate the bit codes */ gen_codes ((ct_data *)tree, max_code, s->bl_count); } /* =========================================================================== * Scan a literal or distance tree to determine the frequencies of the codes * in the bit length tree. */ local void scan_tree (s, tree, max_code) deflate_state *s; ct_data *tree; /* the tree to be scanned */ int max_code; /* and its largest code of non zero frequency */ { int n; /* iterates over all tree elements */ int prevlen = -1; /* last emitted length */ int curlen; /* length of current code */ int nextlen = tree[0].Len; /* length of next code */ int count = 0; /* repeat count of the current code */ int max_count = 7; /* max repeat count */ int min_count = 4; /* min repeat count */ if (nextlen == 0) max_count = 138, min_count = 3; tree[max_code+1].Len = (ush)0xffff; /* guard */ for (n = 0; n <= max_code; n++) { curlen = nextlen; nextlen = tree[n+1].Len; if (++count < max_count && curlen == nextlen) { continue; } else if (count < min_count) { s->bl_tree[curlen].Freq += count; } else if (curlen != 0) { if (curlen != prevlen) s->bl_tree[curlen].Freq++; s->bl_tree[REP_3_6].Freq++; } else if (count <= 10) { s->bl_tree[REPZ_3_10].Freq++; } else { s->bl_tree[REPZ_11_138].Freq++; } count = 0; prevlen = curlen; if (nextlen == 0) { max_count = 138, min_count = 3; } else if (curlen == nextlen) { max_count = 6, min_count = 3; } else { max_count = 7, min_count = 4; } } } /* =========================================================================== * Send a literal or distance tree in compressed form, using the codes in * bl_tree. */ local void send_tree (s, tree, max_code) deflate_state *s; ct_data *tree; /* the tree to be scanned */ int max_code; /* and its largest code of non zero frequency */ { int n; /* iterates over all tree elements */ int prevlen = -1; /* last emitted length */ int curlen; /* length of current code */ int nextlen = tree[0].Len; /* length of next code */ int count = 0; /* repeat count of the current code */ int max_count = 7; /* max repeat count */ int min_count = 4; /* min repeat count */ /* tree[max_code+1].Len = -1; */ /* guard already set */ if (nextlen == 0) max_count = 138, min_count = 3; for (n = 0; n <= max_code; n++) { curlen = nextlen; nextlen = tree[n+1].Len; if (++count < max_count && curlen == nextlen) { continue; } else if (count < min_count) { do { send_code(s, curlen, s->bl_tree); } while (--count != 0); } else if (curlen != 0) { if (curlen != prevlen) { send_code(s, curlen, s->bl_tree); count--; } Assert(count >= 3 && count <= 6, " 3_6?"); send_code(s, REP_3_6, s->bl_tree); send_bits(s, count-3, 2); } else if (count <= 10) { send_code(s, REPZ_3_10, s->bl_tree); send_bits(s, count-3, 3); } else { send_code(s, REPZ_11_138, s->bl_tree); send_bits(s, count-11, 7); } count = 0; prevlen = curlen; if (nextlen == 0) { max_count = 138, min_count = 3; } else if (curlen == nextlen) { max_count = 6, min_count = 3; } else { max_count = 7, min_count = 4; } } } /* =========================================================================== * Construct the Huffman tree for the bit lengths and return the index in * bl_order of the last bit length code to send. */ local int build_bl_tree(s) deflate_state *s; { int max_blindex; /* index of last bit length code of non zero freq */ /* Determine the bit length frequencies for literal and distance trees */ scan_tree(s, (ct_data *)s->dyn_ltree, s->l_desc.max_code); scan_tree(s, (ct_data *)s->dyn_dtree, s->d_desc.max_code); /* Build the bit length tree: */ build_tree(s, (tree_desc *)(&(s->bl_desc))); /* opt_len now includes the length of the tree representations, except * the lengths of the bit lengths codes and the 5+5+4 bits for the counts. */ /* Determine the number of bit length codes to send. The pkzip format * requires that at least 4 bit length codes be sent. (appnote.txt says * 3 but the actual value used is 4.) */ for (max_blindex = BL_CODES-1; max_blindex >= 3; max_blindex--) { if (s->bl_tree[bl_order[max_blindex]].Len != 0) break; } /* Update opt_len to include the bit length tree and counts */ s->opt_len += 3*(max_blindex+1) + 5+5+4; Tracev((stderr, "\ndyn trees: dyn %ld, stat %ld", s->opt_len, s->static_len)); return max_blindex; } /* =========================================================================== * Send the header for a block using dynamic Huffman trees: the counts, the * lengths of the bit length codes, the literal tree and the distance tree. * IN assertion: lcodes >= 257, dcodes >= 1, blcodes >= 4. */ local void send_all_trees(s, lcodes, dcodes, blcodes) deflate_state *s; int lcodes, dcodes, blcodes; /* number of codes for each tree */ { int rank; /* index in bl_order */ Assert (lcodes >= 257 && dcodes >= 1 && blcodes >= 4, "not enough codes"); Assert (lcodes <= L_CODES && dcodes <= D_CODES && blcodes <= BL_CODES, "too many codes"); Tracev((stderr, "\nbl counts: ")); send_bits(s, lcodes-257, 5); /* not +255 as stated in appnote.txt */ send_bits(s, dcodes-1, 5); send_bits(s, blcodes-4, 4); /* not -3 as stated in appnote.txt */ for (rank = 0; rank < blcodes; rank++) { Tracev((stderr, "\nbl code %2d ", bl_order[rank])); send_bits(s, s->bl_tree[bl_order[rank]].Len, 3); } Tracev((stderr, "\nbl tree: sent %ld", s->bits_sent)); send_tree(s, (ct_data *)s->dyn_ltree, lcodes-1); /* literal tree */ Tracev((stderr, "\nlit tree: sent %ld", s->bits_sent)); send_tree(s, (ct_data *)s->dyn_dtree, dcodes-1); /* distance tree */ Tracev((stderr, "\ndist tree: sent %ld", s->bits_sent)); } /* =========================================================================== * Send a stored block */ void _tr_stored_block(s, buf, stored_len, eof) deflate_state *s; charf *buf; /* input block */ ulg stored_len; /* length of input block */ int eof; /* true if this is the last block for a file */ { send_bits(s, (STORED_BLOCK<<1)+eof, 3); /* send block type */ #ifdef DEBUG s->compressed_len = (s->compressed_len + 3 + 7) & (ulg)~7L; s->compressed_len += (stored_len + 4) << 3; #endif copy_block(s, buf, (unsigned)stored_len, 1); /* with header */ } /* =========================================================================== * Send one empty static block to give enough lookahead for inflate. * This takes 10 bits, of which 7 may remain in the bit buffer. * The current inflate code requires 9 bits of lookahead. If the * last two codes for the previous block (real code plus EOB) were coded * on 5 bits or less, inflate may have only 5+3 bits of lookahead to decode * the last real code. In this case we send two empty static blocks instead * of one. (There are no problems if the previous block is stored or fixed.) * To simplify the code, we assume the worst case of last real code encoded * on one bit only. */ void _tr_align(s) deflate_state *s; { send_bits(s, STATIC_TREES<<1, 3); send_code(s, END_BLOCK, static_ltree); #ifdef DEBUG s->compressed_len += 10L; /* 3 for block type, 7 for EOB */ #endif bi_flush(s); /* Of the 10 bits for the empty block, we have already sent * (10 - bi_valid) bits. The lookahead for the last real code (before * the EOB of the previous block) was thus at least one plus the length * of the EOB plus what we have just sent of the empty static block. */ if (1 + s->last_eob_len + 10 - s->bi_valid < 9) { send_bits(s, STATIC_TREES<<1, 3); send_code(s, END_BLOCK, static_ltree); #ifdef DEBUG s->compressed_len += 10L; #endif bi_flush(s); } s->last_eob_len = 7; } /* =========================================================================== * Determine the best encoding for the current block: dynamic trees, static * trees or store, and output the encoded block to the zip file. */ void _tr_flush_block(s, buf, stored_len, eof) deflate_state *s; charf *buf; /* input block, or NULL if too old */ ulg stored_len; /* length of input block */ int eof; /* true if this is the last block for a file */ { ulg opt_lenb, static_lenb; /* opt_len and static_len in bytes */ int max_blindex = 0; /* index of last bit length code of non zero freq */ /* Build the Huffman trees unless a stored block is forced */ if (s->level > 0) { /* Check if the file is binary or text */ if (stored_len > 0 && s->strm->data_type == Z_UNKNOWN) set_data_type(s); /* Construct the literal and distance trees */ build_tree(s, (tree_desc *)(&(s->l_desc))); Tracev((stderr, "\nlit data: dyn %ld, stat %ld", s->opt_len, s->static_len)); build_tree(s, (tree_desc *)(&(s->d_desc))); Tracev((stderr, "\ndist data: dyn %ld, stat %ld", s->opt_len, s->static_len)); /* At this point, opt_len and static_len are the total bit lengths of * the compressed block data, excluding the tree representations. */ /* Build the bit length tree for the above two trees, and get the index * in bl_order of the last bit length code to send. */ max_blindex = build_bl_tree(s); /* Determine the best encoding. Compute the block lengths in bytes. */ opt_lenb = (s->opt_len+3+7)>>3; static_lenb = (s->static_len+3+7)>>3; Tracev((stderr, "\nopt %lu(%lu) stat %lu(%lu) stored %lu lit %u ", opt_lenb, s->opt_len, static_lenb, s->static_len, stored_len, s->last_lit)); if (static_lenb <= opt_lenb) opt_lenb = static_lenb; } else { Assert(buf != (char*)0, "lost buf"); opt_lenb = static_lenb = stored_len + 5; /* force a stored block */ } #ifdef FORCE_STORED if (buf != (char*)0) { /* force stored block */ #else if (stored_len+4 <= opt_lenb && buf != (char*)0) { /* 4: two words for the lengths */ #endif /* The test buf != NULL is only necessary if LIT_BUFSIZE > WSIZE. * Otherwise we can't have processed more than WSIZE input bytes since * the last block flush, because compression would have been * successful. If LIT_BUFSIZE <= WSIZE, it is never too late to * transform a block into a stored block. */ _tr_stored_block(s, buf, stored_len, eof); #ifdef FORCE_STATIC } else if (static_lenb >= 0) { /* force static trees */ #else } else if (s->strategy == Z_FIXED || static_lenb == opt_lenb) { #endif send_bits(s, (STATIC_TREES<<1)+eof, 3); compress_block(s, (ct_data *)static_ltree, (ct_data *)static_dtree); #ifdef DEBUG s->compressed_len += 3 + s->static_len; #endif } else { send_bits(s, (DYN_TREES<<1)+eof, 3); send_all_trees(s, s->l_desc.max_code+1, s->d_desc.max_code+1, max_blindex+1); compress_block(s, (ct_data *)s->dyn_ltree, (ct_data *)s->dyn_dtree); #ifdef DEBUG s->compressed_len += 3 + s->opt_len; #endif } Assert (s->compressed_len == s->bits_sent, "bad compressed size"); /* The above check is made mod 2^32, for files larger than 512 MB * and uLong implemented on 32 bits. */ init_block(s); if (eof) { bi_windup(s); #ifdef DEBUG s->compressed_len += 7; /* align on byte boundary */ #endif } Tracev((stderr,"\ncomprlen %lu(%lu) ", s->compressed_len>>3, s->compressed_len-7*eof)); } /* =========================================================================== * Save the match info and tally the frequency counts. Return true if * the current block must be flushed. */ int _tr_tally (s, dist, lc) deflate_state *s; unsigned dist; /* distance of matched string */ unsigned lc; /* match length-MIN_MATCH or unmatched char (if dist==0) */ { s->d_buf[s->last_lit] = (ush)dist; s->l_buf[s->last_lit++] = (uch)lc; if (dist == 0) { /* lc is the unmatched char */ s->dyn_ltree[lc].Freq++; } else { s->matches++; /* Here, lc is the match length - MIN_MATCH */ dist--; /* dist = match distance - 1 */ Assert((ush)dist < (ush)MAX_DIST(s) && (ush)lc <= (ush)(MAX_MATCH-MIN_MATCH) && (ush)d_code(dist) < (ush)D_CODES, "_tr_tally: bad match"); s->dyn_ltree[_length_code[lc]+LITERALS+1].Freq++; s->dyn_dtree[d_code(dist)].Freq++; } #ifdef TRUNCATE_BLOCK /* Try to guess if it is profitable to stop the current block here */ if ((s->last_lit & 0x1fff) == 0 && s->level > 2) { /* Compute an upper bound for the compressed length */ ulg out_length = (ulg)s->last_lit*8L; ulg in_length = (ulg)((long)s->strstart - s->block_start); int dcode; for (dcode = 0; dcode < D_CODES; dcode++) { out_length += (ulg)s->dyn_dtree[dcode].Freq * (5L+extra_dbits[dcode]); } out_length >>= 3; Tracev((stderr,"\nlast_lit %u, in %ld, out ~%ld(%ld%%) ", s->last_lit, in_length, out_length, 100L - out_length*100L/in_length)); if (s->matches < s->last_lit/2 && out_length < in_length/2) return 1; } #endif return (s->last_lit == s->lit_bufsize-1); /* We avoid equality with lit_bufsize because of wraparound at 64K * on 16 bit machines and because stored blocks are restricted to * 64K-1 bytes. */ } /* =========================================================================== * Send the block data compressed using the given Huffman trees */ local void compress_block(s, ltree, dtree) deflate_state *s; ct_data *ltree; /* literal tree */ ct_data *dtree; /* distance tree */ { unsigned dist; /* distance of matched string */ int lc; /* match length or unmatched char (if dist == 0) */ unsigned lx = 0; /* running index in l_buf */ unsigned code; /* the code to send */ int extra; /* number of extra bits to send */ if (s->last_lit != 0) do { dist = s->d_buf[lx]; lc = s->l_buf[lx++]; if (dist == 0) { send_code(s, lc, ltree); /* send a literal byte */ Tracecv(isgraph(lc), (stderr," '%c' ", lc)); } else { /* Here, lc is the match length - MIN_MATCH */ code = _length_code[lc]; send_code(s, code+LITERALS+1, ltree); /* send the length code */ extra = extra_lbits[code]; if (extra != 0) { lc -= base_length[code]; send_bits(s, lc, extra); /* send the extra length bits */ } dist--; /* dist is now the match distance - 1 */ code = d_code(dist); Assert (code < D_CODES, "bad d_code"); send_code(s, code, dtree); /* send the distance code */ extra = extra_dbits[code]; if (extra != 0) { dist -= base_dist[code]; send_bits(s, dist, extra); /* send the extra distance bits */ } } /* literal or match pair ? */ /* Check that the overlay between pending_buf and d_buf+l_buf is ok: */ Assert((uInt)(s->pending) < s->lit_bufsize + 2*lx, "pendingBuf overflow"); } while (lx < s->last_lit); send_code(s, END_BLOCK, ltree); s->last_eob_len = ltree[END_BLOCK].Len; } /* =========================================================================== * Set the data type to BINARY or TEXT, using a crude approximation: * set it to Z_TEXT if all symbols are either printable characters (33 to 255) * or white spaces (9 to 13, or 32); or set it to Z_BINARY otherwise. * IN assertion: the fields Freq of dyn_ltree are set. */ local void set_data_type(s) deflate_state *s; { int n; for (n = 0; n < 9; n++) if (s->dyn_ltree[n].Freq != 0) break; if (n == 9) for (n = 14; n < 32; n++) if (s->dyn_ltree[n].Freq != 0) break; s->strm->data_type = (n == 32) ? Z_TEXT : Z_BINARY; } /* =========================================================================== * Reverse the first len bits of a code, using straightforward code (a faster * method would use a table) * IN assertion: 1 <= len <= 15 */ local unsigned bi_reverse(code, len) unsigned code; /* the value to invert */ int len; /* its bit length */ { register unsigned res = 0; do { res |= code & 1; code >>= 1, res <<= 1; } while (--len > 0); return res >> 1; } /* =========================================================================== * Flush the bit buffer, keeping at most 7 bits in it. */ local void bi_flush(s) deflate_state *s; { if (s->bi_valid == 16) { put_short(s, s->bi_buf); s->bi_buf = 0; s->bi_valid = 0; } else if (s->bi_valid >= 8) { put_byte(s, (Byte)s->bi_buf); s->bi_buf >>= 8; s->bi_valid -= 8; } } /* =========================================================================== * Flush the bit buffer and align the output on a byte boundary */ local void bi_windup(s) deflate_state *s; { if (s->bi_valid > 8) { put_short(s, s->bi_buf); } else if (s->bi_valid > 0) { put_byte(s, (Byte)s->bi_buf); } s->bi_buf = 0; s->bi_valid = 0; #ifdef DEBUG s->bits_sent = (s->bits_sent+7) & ~7; #endif } /* =========================================================================== * Copy a stored block, storing first the length and its * one's complement if requested. */ local void copy_block(s, buf, len, header) deflate_state *s; charf *buf; /* the input data */ unsigned len; /* its length */ int header; /* true if block header must be written */ { bi_windup(s); /* align on byte boundary */ s->last_eob_len = 8; /* enough lookahead for inflate */ if (header) { put_short(s, (ush)len); put_short(s, (ush)~len); #ifdef DEBUG s->bits_sent += 2*16; #endif } #ifdef DEBUG s->bits_sent += (ulg)len<<3; #endif while (len--) { put_byte(s, *buf++); } }
gpl-2.0
leyarx/android_kernel_wexler_qc750
arch/mips/pci/msi-octeon.c
2898
12537
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2005-2009, 2010 Cavium Networks */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/msi.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #include <asm/octeon/octeon.h> #include <asm/octeon/cvmx-npi-defs.h> #include <asm/octeon/cvmx-pci-defs.h> #include <asm/octeon/cvmx-npei-defs.h> #include <asm/octeon/cvmx-pexp-defs.h> #include <asm/octeon/pci-octeon.h> /* * Each bit in msi_free_irq_bitmask represents a MSI interrupt that is * in use. */ static u64 msi_free_irq_bitmask[4]; /* * Each bit in msi_multiple_irq_bitmask tells that the device using * this bit in msi_free_irq_bitmask is also using the next bit. This * is used so we can disable all of the MSI interrupts when a device * uses multiple. */ static u64 msi_multiple_irq_bitmask[4]; /* * This lock controls updates to msi_free_irq_bitmask and * msi_multiple_irq_bitmask. */ static DEFINE_SPINLOCK(msi_free_irq_bitmask_lock); /* * Number of MSI IRQs used. This variable is set up in * the module init time. */ static int msi_irq_size; /** * Called when a driver request MSI interrupts instead of the * legacy INT A-D. This routine will allocate multiple interrupts * for MSI devices that support them. A device can override this by * programming the MSI control bits [6:4] before calling * pci_enable_msi(). * * @dev: Device requesting MSI interrupts * @desc: MSI descriptor * * Returns 0 on success. */ int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc) { struct msi_msg msg; u16 control; int configured_private_bits; int request_private_bits; int irq = 0; int irq_step; u64 search_mask; int index; /* * Read the MSI config to figure out how many IRQs this device * wants. Most devices only want 1, which will give * configured_private_bits and request_private_bits equal 0. */ pci_read_config_word(dev, desc->msi_attrib.pos + PCI_MSI_FLAGS, &control); /* * If the number of private bits has been configured then use * that value instead of the requested number. This gives the * driver the chance to override the number of interrupts * before calling pci_enable_msi(). */ configured_private_bits = (control & PCI_MSI_FLAGS_QSIZE) >> 4; if (configured_private_bits == 0) { /* Nothing is configured, so use the hardware requested size */ request_private_bits = (control & PCI_MSI_FLAGS_QMASK) >> 1; } else { /* * Use the number of configured bits, assuming the * driver wanted to override the hardware request * value. */ request_private_bits = configured_private_bits; } /* * The PCI 2.3 spec mandates that there are at most 32 * interrupts. If this device asks for more, only give it one. */ if (request_private_bits > 5) request_private_bits = 0; try_only_one: /* * The IRQs have to be aligned on a power of two based on the * number being requested. */ irq_step = 1 << request_private_bits; /* Mask with one bit for each IRQ */ search_mask = (1 << irq_step) - 1; /* * We're going to search msi_free_irq_bitmask_lock for zero * bits. This represents an MSI interrupt number that isn't in * use. */ spin_lock(&msi_free_irq_bitmask_lock); for (index = 0; index < msi_irq_size/64; index++) { for (irq = 0; irq < 64; irq += irq_step) { if ((msi_free_irq_bitmask[index] & (search_mask << irq)) == 0) { msi_free_irq_bitmask[index] |= search_mask << irq; msi_multiple_irq_bitmask[index] |= (search_mask >> 1) << irq; goto msi_irq_allocated; } } } msi_irq_allocated: spin_unlock(&msi_free_irq_bitmask_lock); /* Make sure the search for available interrupts didn't fail */ if (irq >= 64) { if (request_private_bits) { pr_err("arch_setup_msi_irq: Unable to find %d free interrupts, trying just one", 1 << request_private_bits); request_private_bits = 0; goto try_only_one; } else panic("arch_setup_msi_irq: Unable to find a free MSI interrupt"); } /* MSI interrupts start at logical IRQ OCTEON_IRQ_MSI_BIT0 */ irq += index*64; irq += OCTEON_IRQ_MSI_BIT0; switch (octeon_dma_bar_type) { case OCTEON_DMA_BAR_TYPE_SMALL: /* When not using big bar, Bar 0 is based at 128MB */ msg.address_lo = ((128ul << 20) + CVMX_PCI_MSI_RCV) & 0xffffffff; msg.address_hi = ((128ul << 20) + CVMX_PCI_MSI_RCV) >> 32; case OCTEON_DMA_BAR_TYPE_BIG: /* When using big bar, Bar 0 is based at 0 */ msg.address_lo = (0 + CVMX_PCI_MSI_RCV) & 0xffffffff; msg.address_hi = (0 + CVMX_PCI_MSI_RCV) >> 32; break; case OCTEON_DMA_BAR_TYPE_PCIE: /* When using PCIe, Bar 0 is based at 0 */ /* FIXME CVMX_NPEI_MSI_RCV* other than 0? */ msg.address_lo = (0 + CVMX_NPEI_PCIE_MSI_RCV) & 0xffffffff; msg.address_hi = (0 + CVMX_NPEI_PCIE_MSI_RCV) >> 32; break; default: panic("arch_setup_msi_irq: Invalid octeon_dma_bar_type\n"); } msg.data = irq - OCTEON_IRQ_MSI_BIT0; /* Update the number of IRQs the device has available to it */ control &= ~PCI_MSI_FLAGS_QSIZE; control |= request_private_bits << 4; pci_write_config_word(dev, desc->msi_attrib.pos + PCI_MSI_FLAGS, control); irq_set_msi_desc(irq, desc); write_msi_msg(irq, &msg); return 0; } int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) { struct msi_desc *entry; int ret; /* * MSI-X is not supported. */ if (type == PCI_CAP_ID_MSIX) return -EINVAL; /* * If an architecture wants to support multiple MSI, it needs to * override arch_setup_msi_irqs() */ if (type == PCI_CAP_ID_MSI && nvec > 1) return 1; list_for_each_entry(entry, &dev->msi_list, list) { ret = arch_setup_msi_irq(dev, entry); if (ret < 0) return ret; if (ret > 0) return -ENOSPC; } return 0; } /** * Called when a device no longer needs its MSI interrupts. All * MSI interrupts for the device are freed. * * @irq: The devices first irq number. There may be multple in sequence. */ void arch_teardown_msi_irq(unsigned int irq) { int number_irqs; u64 bitmask; int index = 0; int irq0; if ((irq < OCTEON_IRQ_MSI_BIT0) || (irq > msi_irq_size + OCTEON_IRQ_MSI_BIT0)) panic("arch_teardown_msi_irq: Attempted to teardown illegal " "MSI interrupt (%d)", irq); irq -= OCTEON_IRQ_MSI_BIT0; index = irq / 64; irq0 = irq % 64; /* * Count the number of IRQs we need to free by looking at the * msi_multiple_irq_bitmask. Each bit set means that the next * IRQ is also owned by this device. */ number_irqs = 0; while ((irq0 + number_irqs < 64) && (msi_multiple_irq_bitmask[index] & (1ull << (irq0 + number_irqs)))) number_irqs++; number_irqs++; /* Mask with one bit for each IRQ */ bitmask = (1 << number_irqs) - 1; /* Shift the mask to the correct bit location */ bitmask <<= irq0; if ((msi_free_irq_bitmask[index] & bitmask) != bitmask) panic("arch_teardown_msi_irq: Attempted to teardown MSI " "interrupt (%d) not in use", irq); /* Checks are done, update the in use bitmask */ spin_lock(&msi_free_irq_bitmask_lock); msi_free_irq_bitmask[index] &= ~bitmask; msi_multiple_irq_bitmask[index] &= ~bitmask; spin_unlock(&msi_free_irq_bitmask_lock); } static DEFINE_RAW_SPINLOCK(octeon_irq_msi_lock); static u64 msi_rcv_reg[4]; static u64 mis_ena_reg[4]; static void octeon_irq_msi_enable_pcie(struct irq_data *data) { u64 en; unsigned long flags; int msi_number = data->irq - OCTEON_IRQ_MSI_BIT0; int irq_index = msi_number >> 6; int irq_bit = msi_number & 0x3f; raw_spin_lock_irqsave(&octeon_irq_msi_lock, flags); en = cvmx_read_csr(mis_ena_reg[irq_index]); en |= 1ull << irq_bit; cvmx_write_csr(mis_ena_reg[irq_index], en); cvmx_read_csr(mis_ena_reg[irq_index]); raw_spin_unlock_irqrestore(&octeon_irq_msi_lock, flags); } static void octeon_irq_msi_disable_pcie(struct irq_data *data) { u64 en; unsigned long flags; int msi_number = data->irq - OCTEON_IRQ_MSI_BIT0; int irq_index = msi_number >> 6; int irq_bit = msi_number & 0x3f; raw_spin_lock_irqsave(&octeon_irq_msi_lock, flags); en = cvmx_read_csr(mis_ena_reg[irq_index]); en &= ~(1ull << irq_bit); cvmx_write_csr(mis_ena_reg[irq_index], en); cvmx_read_csr(mis_ena_reg[irq_index]); raw_spin_unlock_irqrestore(&octeon_irq_msi_lock, flags); } static struct irq_chip octeon_irq_chip_msi_pcie = { .name = "MSI", .irq_enable = octeon_irq_msi_enable_pcie, .irq_disable = octeon_irq_msi_disable_pcie, }; static void octeon_irq_msi_enable_pci(struct irq_data *data) { /* * Octeon PCI doesn't have the ability to mask/unmask MSI * interrupts individually. Instead of masking/unmasking them * in groups of 16, we simple assume MSI devices are well * behaved. MSI interrupts are always enable and the ACK is * assumed to be enough */ } static void octeon_irq_msi_disable_pci(struct irq_data *data) { /* See comment in enable */ } static struct irq_chip octeon_irq_chip_msi_pci = { .name = "MSI", .irq_enable = octeon_irq_msi_enable_pci, .irq_disable = octeon_irq_msi_disable_pci, }; /* * Called by the interrupt handling code when an MSI interrupt * occurs. */ static irqreturn_t __octeon_msi_do_interrupt(int index, u64 msi_bits) { int irq; int bit; bit = fls64(msi_bits); if (bit) { bit--; /* Acknowledge it first. */ cvmx_write_csr(msi_rcv_reg[index], 1ull << bit); irq = bit + OCTEON_IRQ_MSI_BIT0 + 64 * index; do_IRQ(irq); return IRQ_HANDLED; } return IRQ_NONE; } #define OCTEON_MSI_INT_HANDLER_X(x) \ static irqreturn_t octeon_msi_interrupt##x(int cpl, void *dev_id) \ { \ u64 msi_bits = cvmx_read_csr(msi_rcv_reg[(x)]); \ return __octeon_msi_do_interrupt((x), msi_bits); \ } /* * Create octeon_msi_interrupt{0-3} function body */ OCTEON_MSI_INT_HANDLER_X(0); OCTEON_MSI_INT_HANDLER_X(1); OCTEON_MSI_INT_HANDLER_X(2); OCTEON_MSI_INT_HANDLER_X(3); /* * Initializes the MSI interrupt handling code */ int __init octeon_msi_initialize(void) { int irq; struct irq_chip *msi; if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_PCIE) { msi_rcv_reg[0] = CVMX_PEXP_NPEI_MSI_RCV0; msi_rcv_reg[1] = CVMX_PEXP_NPEI_MSI_RCV1; msi_rcv_reg[2] = CVMX_PEXP_NPEI_MSI_RCV2; msi_rcv_reg[3] = CVMX_PEXP_NPEI_MSI_RCV3; mis_ena_reg[0] = CVMX_PEXP_NPEI_MSI_ENB0; mis_ena_reg[1] = CVMX_PEXP_NPEI_MSI_ENB1; mis_ena_reg[2] = CVMX_PEXP_NPEI_MSI_ENB2; mis_ena_reg[3] = CVMX_PEXP_NPEI_MSI_ENB3; msi = &octeon_irq_chip_msi_pcie; } else { msi_rcv_reg[0] = CVMX_NPI_NPI_MSI_RCV; #define INVALID_GENERATE_ADE 0x8700000000000000ULL; msi_rcv_reg[1] = INVALID_GENERATE_ADE; msi_rcv_reg[2] = INVALID_GENERATE_ADE; msi_rcv_reg[3] = INVALID_GENERATE_ADE; mis_ena_reg[0] = INVALID_GENERATE_ADE; mis_ena_reg[1] = INVALID_GENERATE_ADE; mis_ena_reg[2] = INVALID_GENERATE_ADE; mis_ena_reg[3] = INVALID_GENERATE_ADE; msi = &octeon_irq_chip_msi_pci; } for (irq = OCTEON_IRQ_MSI_BIT0; irq <= OCTEON_IRQ_MSI_LAST; irq++) irq_set_chip_and_handler(irq, msi, handle_simple_irq); if (octeon_has_feature(OCTEON_FEATURE_PCIE)) { if (request_irq(OCTEON_IRQ_PCI_MSI0, octeon_msi_interrupt0, 0, "MSI[0:63]", octeon_msi_interrupt0)) panic("request_irq(OCTEON_IRQ_PCI_MSI0) failed"); if (request_irq(OCTEON_IRQ_PCI_MSI1, octeon_msi_interrupt1, 0, "MSI[64:127]", octeon_msi_interrupt1)) panic("request_irq(OCTEON_IRQ_PCI_MSI1) failed"); if (request_irq(OCTEON_IRQ_PCI_MSI2, octeon_msi_interrupt2, 0, "MSI[127:191]", octeon_msi_interrupt2)) panic("request_irq(OCTEON_IRQ_PCI_MSI2) failed"); if (request_irq(OCTEON_IRQ_PCI_MSI3, octeon_msi_interrupt3, 0, "MSI[192:255]", octeon_msi_interrupt3)) panic("request_irq(OCTEON_IRQ_PCI_MSI3) failed"); msi_irq_size = 256; } else if (octeon_is_pci_host()) { if (request_irq(OCTEON_IRQ_PCI_MSI0, octeon_msi_interrupt0, 0, "MSI[0:15]", octeon_msi_interrupt0)) panic("request_irq(OCTEON_IRQ_PCI_MSI0) failed"); if (request_irq(OCTEON_IRQ_PCI_MSI1, octeon_msi_interrupt0, 0, "MSI[16:31]", octeon_msi_interrupt0)) panic("request_irq(OCTEON_IRQ_PCI_MSI1) failed"); if (request_irq(OCTEON_IRQ_PCI_MSI2, octeon_msi_interrupt0, 0, "MSI[32:47]", octeon_msi_interrupt0)) panic("request_irq(OCTEON_IRQ_PCI_MSI2) failed"); if (request_irq(OCTEON_IRQ_PCI_MSI3, octeon_msi_interrupt0, 0, "MSI[48:63]", octeon_msi_interrupt0)) panic("request_irq(OCTEON_IRQ_PCI_MSI3) failed"); msi_irq_size = 64; } return 0; } subsys_initcall(octeon_msi_initialize);
gpl-2.0
neohackt/android_kernel_xiaomi_armani
mm/swap.c
3154
21023
/* * linux/mm/swap.c * * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds */ /* * This file contains the default values for the operation of the * Linux VM subsystem. Fine-tuning documentation can be found in * Documentation/sysctl/vm.txt. * Started 18.12.91 * Swap aging added 23.2.95, Stephen Tweedie. * Buffermem limits added 12.3.98, Rik van Riel. */ #include <linux/mm.h> #include <linux/sched.h> #include <linux/kernel_stat.h> #include <linux/swap.h> #include <linux/mman.h> #include <linux/pagemap.h> #include <linux/pagevec.h> #include <linux/init.h> #include <linux/export.h> #include <linux/mm_inline.h> #include <linux/percpu_counter.h> #include <linux/percpu.h> #include <linux/cpu.h> #include <linux/notifier.h> #include <linux/backing-dev.h> #include <linux/memcontrol.h> #include <linux/gfp.h> #include "internal.h" /* How many pages do we try to swap or page in/out together? */ int page_cluster; static DEFINE_PER_CPU(struct pagevec[NR_LRU_LISTS], lru_add_pvecs); static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs); static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs); /* * This path almost never happens for VM activity - pages are normally * freed via pagevecs. But it gets used by networking. */ static void __page_cache_release(struct page *page) { if (PageLRU(page)) { unsigned long flags; struct zone *zone = page_zone(page); spin_lock_irqsave(&zone->lru_lock, flags); VM_BUG_ON(!PageLRU(page)); __ClearPageLRU(page); del_page_from_lru_list(zone, page, page_off_lru(page)); spin_unlock_irqrestore(&zone->lru_lock, flags); } } static void __put_single_page(struct page *page) { __page_cache_release(page); free_hot_cold_page(page, 0); } static void __put_compound_page(struct page *page) { compound_page_dtor *dtor; __page_cache_release(page); dtor = get_compound_page_dtor(page); (*dtor)(page); } static void put_compound_page(struct page *page) { if (unlikely(PageTail(page))) { /* __split_huge_page_refcount can run under us */ struct page *page_head = compound_trans_head(page); if (likely(page != page_head && get_page_unless_zero(page_head))) { unsigned long flags; /* * page_head wasn't a dangling pointer but it * may not be a head page anymore by the time * we obtain the lock. That is ok as long as it * can't be freed from under us. */ flags = compound_lock_irqsave(page_head); if (unlikely(!PageTail(page))) { /* __split_huge_page_refcount run before us */ compound_unlock_irqrestore(page_head, flags); VM_BUG_ON(PageHead(page_head)); if (put_page_testzero(page_head)) __put_single_page(page_head); out_put_single: if (put_page_testzero(page)) __put_single_page(page); return; } VM_BUG_ON(page_head != page->first_page); /* * We can release the refcount taken by * get_page_unless_zero() now that * __split_huge_page_refcount() is blocked on * the compound_lock. */ if (put_page_testzero(page_head)) VM_BUG_ON(1); /* __split_huge_page_refcount will wait now */ VM_BUG_ON(page_mapcount(page) <= 0); atomic_dec(&page->_mapcount); VM_BUG_ON(atomic_read(&page_head->_count) <= 0); VM_BUG_ON(atomic_read(&page->_count) != 0); compound_unlock_irqrestore(page_head, flags); if (put_page_testzero(page_head)) { if (PageHead(page_head)) __put_compound_page(page_head); else __put_single_page(page_head); } } else { /* page_head is a dangling pointer */ VM_BUG_ON(PageTail(page)); goto out_put_single; } } else if (put_page_testzero(page)) { if (PageHead(page)) __put_compound_page(page); else __put_single_page(page); } } void put_page(struct page *page) { if (unlikely(PageCompound(page))) put_compound_page(page); else if (put_page_testzero(page)) __put_single_page(page); } EXPORT_SYMBOL(put_page); /* * This function is exported but must not be called by anything other * than get_page(). It implements the slow path of get_page(). */ bool __get_page_tail(struct page *page) { /* * This takes care of get_page() if run on a tail page * returned by one of the get_user_pages/follow_page variants. * get_user_pages/follow_page itself doesn't need the compound * lock because it runs __get_page_tail_foll() under the * proper PT lock that already serializes against * split_huge_page(). */ unsigned long flags; bool got = false; struct page *page_head = compound_trans_head(page); if (likely(page != page_head && get_page_unless_zero(page_head))) { /* * page_head wasn't a dangling pointer but it * may not be a head page anymore by the time * we obtain the lock. That is ok as long as it * can't be freed from under us. */ flags = compound_lock_irqsave(page_head); /* here __split_huge_page_refcount won't run anymore */ if (likely(PageTail(page))) { __get_page_tail_foll(page, false); got = true; } compound_unlock_irqrestore(page_head, flags); if (unlikely(!got)) put_page(page_head); } return got; } EXPORT_SYMBOL(__get_page_tail); /** * put_pages_list() - release a list of pages * @pages: list of pages threaded on page->lru * * Release a list of pages which are strung together on page.lru. Currently * used by read_cache_pages() and related error recovery code. */ void put_pages_list(struct list_head *pages) { while (!list_empty(pages)) { struct page *victim; victim = list_entry(pages->prev, struct page, lru); list_del(&victim->lru); page_cache_release(victim); } } EXPORT_SYMBOL(put_pages_list); static void pagevec_lru_move_fn(struct pagevec *pvec, void (*move_fn)(struct page *page, void *arg), void *arg) { int i; struct zone *zone = NULL; unsigned long flags = 0; for (i = 0; i < pagevec_count(pvec); i++) { struct page *page = pvec->pages[i]; struct zone *pagezone = page_zone(page); if (pagezone != zone) { if (zone) spin_unlock_irqrestore(&zone->lru_lock, flags); zone = pagezone; spin_lock_irqsave(&zone->lru_lock, flags); } (*move_fn)(page, arg); } if (zone) spin_unlock_irqrestore(&zone->lru_lock, flags); release_pages(pvec->pages, pvec->nr, pvec->cold); pagevec_reinit(pvec); } static void pagevec_move_tail_fn(struct page *page, void *arg) { int *pgmoved = arg; if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { enum lru_list lru = page_lru_base_type(page); struct lruvec *lruvec; lruvec = mem_cgroup_lru_move_lists(page_zone(page), page, lru, lru); list_move_tail(&page->lru, &lruvec->lists[lru]); (*pgmoved)++; } } /* * pagevec_move_tail() must be called with IRQ disabled. * Otherwise this may cause nasty races. */ static void pagevec_move_tail(struct pagevec *pvec) { int pgmoved = 0; pagevec_lru_move_fn(pvec, pagevec_move_tail_fn, &pgmoved); __count_vm_events(PGROTATED, pgmoved); } /* * Writeback is about to end against a page which has been marked for immediate * reclaim. If it still appears to be reclaimable, move it to the tail of the * inactive list. */ void rotate_reclaimable_page(struct page *page) { if (!PageLocked(page) && !PageDirty(page) && !PageActive(page) && !PageUnevictable(page) && PageLRU(page)) { struct pagevec *pvec; unsigned long flags; page_cache_get(page); local_irq_save(flags); pvec = &__get_cpu_var(lru_rotate_pvecs); if (!pagevec_add(pvec, page)) pagevec_move_tail(pvec); local_irq_restore(flags); } } static void update_page_reclaim_stat(struct zone *zone, struct page *page, int file, int rotated) { struct zone_reclaim_stat *reclaim_stat = &zone->reclaim_stat; struct zone_reclaim_stat *memcg_reclaim_stat; memcg_reclaim_stat = mem_cgroup_get_reclaim_stat_from_page(page); reclaim_stat->recent_scanned[file]++; if (rotated) reclaim_stat->recent_rotated[file]++; if (!memcg_reclaim_stat) return; memcg_reclaim_stat->recent_scanned[file]++; if (rotated) memcg_reclaim_stat->recent_rotated[file]++; } static void __activate_page(struct page *page, void *arg) { struct zone *zone = page_zone(page); if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { int file = page_is_file_cache(page); int lru = page_lru_base_type(page); del_page_from_lru_list(zone, page, lru); SetPageActive(page); lru += LRU_ACTIVE; add_page_to_lru_list(zone, page, lru); __count_vm_event(PGACTIVATE); update_page_reclaim_stat(zone, page, file, 1); } } #ifdef CONFIG_SMP static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs); static void activate_page_drain(int cpu) { struct pagevec *pvec = &per_cpu(activate_page_pvecs, cpu); if (pagevec_count(pvec)) pagevec_lru_move_fn(pvec, __activate_page, NULL); } void activate_page(struct page *page) { if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { struct pagevec *pvec = &get_cpu_var(activate_page_pvecs); page_cache_get(page); if (!pagevec_add(pvec, page)) pagevec_lru_move_fn(pvec, __activate_page, NULL); put_cpu_var(activate_page_pvecs); } } #else static inline void activate_page_drain(int cpu) { } void activate_page(struct page *page) { struct zone *zone = page_zone(page); spin_lock_irq(&zone->lru_lock); __activate_page(page, NULL); spin_unlock_irq(&zone->lru_lock); } #endif /* * Mark a page as having seen activity. * * inactive,unreferenced -> inactive,referenced * inactive,referenced -> active,unreferenced * active,unreferenced -> active,referenced */ void mark_page_accessed(struct page *page) { if (!PageActive(page) && !PageUnevictable(page) && PageReferenced(page) && PageLRU(page)) { activate_page(page); ClearPageReferenced(page); } else if (!PageReferenced(page)) { SetPageReferenced(page); } } EXPORT_SYMBOL(mark_page_accessed); void __lru_cache_add(struct page *page, enum lru_list lru) { struct pagevec *pvec = &get_cpu_var(lru_add_pvecs)[lru]; page_cache_get(page); if (!pagevec_add(pvec, page)) __pagevec_lru_add(pvec, lru); put_cpu_var(lru_add_pvecs); } EXPORT_SYMBOL(__lru_cache_add); /** * lru_cache_add_lru - add a page to a page list * @page: the page to be added to the LRU. * @lru: the LRU list to which the page is added. */ void lru_cache_add_lru(struct page *page, enum lru_list lru) { if (PageActive(page)) { VM_BUG_ON(PageUnevictable(page)); ClearPageActive(page); } else if (PageUnevictable(page)) { VM_BUG_ON(PageActive(page)); ClearPageUnevictable(page); } VM_BUG_ON(PageLRU(page) || PageActive(page) || PageUnevictable(page)); __lru_cache_add(page, lru); } /** * add_page_to_unevictable_list - add a page to the unevictable list * @page: the page to be added to the unevictable list * * Add page directly to its zone's unevictable list. To avoid races with * tasks that might be making the page evictable, through eg. munlock, * munmap or exit, while it's not on the lru, we want to add the page * while it's locked or otherwise "invisible" to other tasks. This is * difficult to do when using the pagevec cache, so bypass that. */ void add_page_to_unevictable_list(struct page *page) { struct zone *zone = page_zone(page); spin_lock_irq(&zone->lru_lock); SetPageUnevictable(page); SetPageLRU(page); add_page_to_lru_list(zone, page, LRU_UNEVICTABLE); spin_unlock_irq(&zone->lru_lock); } /* * If the page can not be invalidated, it is moved to the * inactive list to speed up its reclaim. It is moved to the * head of the list, rather than the tail, to give the flusher * threads some time to write it out, as this is much more * effective than the single-page writeout from reclaim. * * If the page isn't page_mapped and dirty/writeback, the page * could reclaim asap using PG_reclaim. * * 1. active, mapped page -> none * 2. active, dirty/writeback page -> inactive, head, PG_reclaim * 3. inactive, mapped page -> none * 4. inactive, dirty/writeback page -> inactive, head, PG_reclaim * 5. inactive, clean -> inactive, tail * 6. Others -> none * * In 4, why it moves inactive's head, the VM expects the page would * be write it out by flusher threads as this is much more effective * than the single-page writeout from reclaim. */ static void lru_deactivate_fn(struct page *page, void *arg) { int lru, file; bool active; struct zone *zone = page_zone(page); if (!PageLRU(page)) return; if (PageUnevictable(page)) return; /* Some processes are using the page */ if (page_mapped(page)) return; active = PageActive(page); file = page_is_file_cache(page); lru = page_lru_base_type(page); del_page_from_lru_list(zone, page, lru + active); ClearPageActive(page); ClearPageReferenced(page); add_page_to_lru_list(zone, page, lru); if (PageWriteback(page) || PageDirty(page)) { /* * PG_reclaim could be raced with end_page_writeback * It can make readahead confusing. But race window * is _really_ small and it's non-critical problem. */ SetPageReclaim(page); } else { struct lruvec *lruvec; /* * The page's writeback ends up during pagevec * We moves tha page into tail of inactive. */ lruvec = mem_cgroup_lru_move_lists(zone, page, lru, lru); list_move_tail(&page->lru, &lruvec->lists[lru]); __count_vm_event(PGROTATED); } if (active) __count_vm_event(PGDEACTIVATE); update_page_reclaim_stat(zone, page, file, 0); } /* * Drain pages out of the cpu's pagevecs. * Either "cpu" is the current CPU, and preemption has already been * disabled; or "cpu" is being hot-unplugged, and is already dead. */ void lru_add_drain_cpu(int cpu) { struct pagevec *pvecs = per_cpu(lru_add_pvecs, cpu); struct pagevec *pvec; int lru; for_each_lru(lru) { pvec = &pvecs[lru - LRU_BASE]; if (pagevec_count(pvec)) __pagevec_lru_add(pvec, lru); } pvec = &per_cpu(lru_rotate_pvecs, cpu); if (pagevec_count(pvec)) { unsigned long flags; /* No harm done if a racing interrupt already did this */ local_irq_save(flags); pagevec_move_tail(pvec); local_irq_restore(flags); } pvec = &per_cpu(lru_deactivate_pvecs, cpu); if (pagevec_count(pvec)) pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL); activate_page_drain(cpu); } /** * deactivate_page - forcefully deactivate a page * @page: page to deactivate * * This function hints the VM that @page is a good reclaim candidate, * for example if its invalidation fails due to the page being dirty * or under writeback. */ void deactivate_page(struct page *page) { /* * In a workload with many unevictable page such as mprotect, unevictable * page deactivation for accelerating reclaim is pointless. */ if (PageUnevictable(page)) return; if (likely(get_page_unless_zero(page))) { struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs); if (!pagevec_add(pvec, page)) pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL); put_cpu_var(lru_deactivate_pvecs); } } void lru_add_drain(void) { lru_add_drain_cpu(get_cpu()); put_cpu(); } static void lru_add_drain_per_cpu(struct work_struct *dummy) { lru_add_drain(); } /* * Returns 0 for success */ int lru_add_drain_all(void) { return schedule_on_each_cpu(lru_add_drain_per_cpu); } /* * Batched page_cache_release(). Decrement the reference count on all the * passed pages. If it fell to zero then remove the page from the LRU and * free it. * * Avoid taking zone->lru_lock if possible, but if it is taken, retain it * for the remainder of the operation. * * The locking in this function is against shrink_inactive_list(): we recheck * the page count inside the lock to see whether shrink_inactive_list() * grabbed the page via the LRU. If it did, give up: shrink_inactive_list() * will free it. */ void release_pages(struct page **pages, int nr, int cold) { int i; LIST_HEAD(pages_to_free); struct zone *zone = NULL; unsigned long uninitialized_var(flags); for (i = 0; i < nr; i++) { struct page *page = pages[i]; if (unlikely(PageCompound(page))) { if (zone) { spin_unlock_irqrestore(&zone->lru_lock, flags); zone = NULL; } put_compound_page(page); continue; } if (!put_page_testzero(page)) continue; if (PageLRU(page)) { struct zone *pagezone = page_zone(page); if (pagezone != zone) { if (zone) spin_unlock_irqrestore(&zone->lru_lock, flags); zone = pagezone; spin_lock_irqsave(&zone->lru_lock, flags); } VM_BUG_ON(!PageLRU(page)); __ClearPageLRU(page); del_page_from_lru_list(zone, page, page_off_lru(page)); } list_add(&page->lru, &pages_to_free); } if (zone) spin_unlock_irqrestore(&zone->lru_lock, flags); free_hot_cold_page_list(&pages_to_free, cold); } EXPORT_SYMBOL(release_pages); /* * The pages which we're about to release may be in the deferred lru-addition * queues. That would prevent them from really being freed right now. That's * OK from a correctness point of view but is inefficient - those pages may be * cache-warm and we want to give them back to the page allocator ASAP. * * So __pagevec_release() will drain those queues here. __pagevec_lru_add() * and __pagevec_lru_add_active() call release_pages() directly to avoid * mutual recursion. */ void __pagevec_release(struct pagevec *pvec) { lru_add_drain(); release_pages(pvec->pages, pagevec_count(pvec), pvec->cold); pagevec_reinit(pvec); } EXPORT_SYMBOL(__pagevec_release); #ifdef CONFIG_TRANSPARENT_HUGEPAGE /* used by __split_huge_page_refcount() */ void lru_add_page_tail(struct zone* zone, struct page *page, struct page *page_tail) { int uninitialized_var(active); enum lru_list lru; const int file = 0; VM_BUG_ON(!PageHead(page)); VM_BUG_ON(PageCompound(page_tail)); VM_BUG_ON(PageLRU(page_tail)); VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&zone->lru_lock)); SetPageLRU(page_tail); if (page_evictable(page_tail, NULL)) { if (PageActive(page)) { SetPageActive(page_tail); active = 1; lru = LRU_ACTIVE_ANON; } else { active = 0; lru = LRU_INACTIVE_ANON; } } else { SetPageUnevictable(page_tail); lru = LRU_UNEVICTABLE; } if (likely(PageLRU(page))) list_add_tail(&page_tail->lru, &page->lru); else { struct list_head *list_head; /* * Head page has not yet been counted, as an hpage, * so we must account for each subpage individually. * * Use the standard add function to put page_tail on the list, * but then correct its position so they all end up in order. */ add_page_to_lru_list(zone, page_tail, lru); list_head = page_tail->lru.prev; list_move_tail(&page_tail->lru, list_head); } if (!PageUnevictable(page)) update_page_reclaim_stat(zone, page_tail, file, active); } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ static void __pagevec_lru_add_fn(struct page *page, void *arg) { enum lru_list lru = (enum lru_list)arg; struct zone *zone = page_zone(page); int file = is_file_lru(lru); int active = is_active_lru(lru); VM_BUG_ON(PageActive(page)); VM_BUG_ON(PageUnevictable(page)); VM_BUG_ON(PageLRU(page)); SetPageLRU(page); if (active) SetPageActive(page); add_page_to_lru_list(zone, page, lru); update_page_reclaim_stat(zone, page, file, active); } /* * Add the passed pages to the LRU, then drop the caller's refcount * on them. Reinitialises the caller's pagevec. */ void __pagevec_lru_add(struct pagevec *pvec, enum lru_list lru) { VM_BUG_ON(is_unevictable_lru(lru)); pagevec_lru_move_fn(pvec, __pagevec_lru_add_fn, (void *)lru); } EXPORT_SYMBOL(__pagevec_lru_add); /** * pagevec_lookup - gang pagecache lookup * @pvec: Where the resulting pages are placed * @mapping: The address_space to search * @start: The starting page index * @nr_pages: The maximum number of pages * * pagevec_lookup() will search for and return a group of up to @nr_pages pages * in the mapping. The pages are placed in @pvec. pagevec_lookup() takes a * reference against the pages in @pvec. * * The search returns a group of mapping-contiguous pages with ascending * indexes. There may be holes in the indices due to not-present pages. * * pagevec_lookup() returns the number of pages which were found. */ unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping, pgoff_t start, unsigned nr_pages) { pvec->nr = find_get_pages(mapping, start, nr_pages, pvec->pages); return pagevec_count(pvec); } EXPORT_SYMBOL(pagevec_lookup); unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping, pgoff_t *index, int tag, unsigned nr_pages) { pvec->nr = find_get_pages_tag(mapping, index, tag, nr_pages, pvec->pages); return pagevec_count(pvec); } EXPORT_SYMBOL(pagevec_lookup_tag); /* * Perform any setup for the swap system */ void __init swap_setup(void) { unsigned long megs = totalram_pages >> (20 - PAGE_SHIFT); #ifdef CONFIG_SWAP bdi_init(swapper_space.backing_dev_info); #endif /* Use a smaller cluster for small-memory machines */ if (megs < 16) page_cluster = 2; else page_cluster = 3; /* * Right now other parts of the system means that we * _really_ don't want to cluster much more */ }
gpl-2.0
ohadbc/hwspinlock-next
arch/x86/kernel/cpu/perf_event_intel_lbr.c
3922
4521
#ifdef CONFIG_CPU_SUP_INTEL enum { LBR_FORMAT_32 = 0x00, LBR_FORMAT_LIP = 0x01, LBR_FORMAT_EIP = 0x02, LBR_FORMAT_EIP_FLAGS = 0x03, }; /* * We only support LBR implementations that have FREEZE_LBRS_ON_PMI * otherwise it becomes near impossible to get a reliable stack. */ static void __intel_pmu_lbr_enable(void) { u64 debugctl; rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); debugctl |= (DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI); wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); } static void __intel_pmu_lbr_disable(void) { u64 debugctl; rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); debugctl &= ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI); wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); } static void intel_pmu_lbr_reset_32(void) { int i; for (i = 0; i < x86_pmu.lbr_nr; i++) wrmsrl(x86_pmu.lbr_from + i, 0); } static void intel_pmu_lbr_reset_64(void) { int i; for (i = 0; i < x86_pmu.lbr_nr; i++) { wrmsrl(x86_pmu.lbr_from + i, 0); wrmsrl(x86_pmu.lbr_to + i, 0); } } static void intel_pmu_lbr_reset(void) { if (!x86_pmu.lbr_nr) return; if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32) intel_pmu_lbr_reset_32(); else intel_pmu_lbr_reset_64(); } static void intel_pmu_lbr_enable(struct perf_event *event) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); if (!x86_pmu.lbr_nr) return; WARN_ON_ONCE(cpuc->enabled); /* * Reset the LBR stack if we changed task context to * avoid data leaks. */ if (event->ctx->task && cpuc->lbr_context != event->ctx) { intel_pmu_lbr_reset(); cpuc->lbr_context = event->ctx; } cpuc->lbr_users++; } static void intel_pmu_lbr_disable(struct perf_event *event) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); if (!x86_pmu.lbr_nr) return; cpuc->lbr_users--; WARN_ON_ONCE(cpuc->lbr_users < 0); if (cpuc->enabled && !cpuc->lbr_users) __intel_pmu_lbr_disable(); } static void intel_pmu_lbr_enable_all(void) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); if (cpuc->lbr_users) __intel_pmu_lbr_enable(); } static void intel_pmu_lbr_disable_all(void) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); if (cpuc->lbr_users) __intel_pmu_lbr_disable(); } static inline u64 intel_pmu_lbr_tos(void) { u64 tos; rdmsrl(x86_pmu.lbr_tos, tos); return tos; } static void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc) { unsigned long mask = x86_pmu.lbr_nr - 1; u64 tos = intel_pmu_lbr_tos(); int i; for (i = 0; i < x86_pmu.lbr_nr; i++) { unsigned long lbr_idx = (tos - i) & mask; union { struct { u32 from; u32 to; }; u64 lbr; } msr_lastbranch; rdmsrl(x86_pmu.lbr_from + lbr_idx, msr_lastbranch.lbr); cpuc->lbr_entries[i].from = msr_lastbranch.from; cpuc->lbr_entries[i].to = msr_lastbranch.to; cpuc->lbr_entries[i].flags = 0; } cpuc->lbr_stack.nr = i; } #define LBR_FROM_FLAG_MISPRED (1ULL << 63) /* * Due to lack of segmentation in Linux the effective address (offset) * is the same as the linear address, allowing us to merge the LIP and EIP * LBR formats. */ static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc) { unsigned long mask = x86_pmu.lbr_nr - 1; int lbr_format = x86_pmu.intel_cap.lbr_format; u64 tos = intel_pmu_lbr_tos(); int i; for (i = 0; i < x86_pmu.lbr_nr; i++) { unsigned long lbr_idx = (tos - i) & mask; u64 from, to, flags = 0; rdmsrl(x86_pmu.lbr_from + lbr_idx, from); rdmsrl(x86_pmu.lbr_to + lbr_idx, to); if (lbr_format == LBR_FORMAT_EIP_FLAGS) { flags = !!(from & LBR_FROM_FLAG_MISPRED); from = (u64)((((s64)from) << 1) >> 1); } cpuc->lbr_entries[i].from = from; cpuc->lbr_entries[i].to = to; cpuc->lbr_entries[i].flags = flags; } cpuc->lbr_stack.nr = i; } static void intel_pmu_lbr_read(void) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); if (!cpuc->lbr_users) return; if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32) intel_pmu_lbr_read_32(cpuc); else intel_pmu_lbr_read_64(cpuc); } static void intel_pmu_lbr_init_core(void) { x86_pmu.lbr_nr = 4; x86_pmu.lbr_tos = 0x01c9; x86_pmu.lbr_from = 0x40; x86_pmu.lbr_to = 0x60; } static void intel_pmu_lbr_init_nhm(void) { x86_pmu.lbr_nr = 16; x86_pmu.lbr_tos = 0x01c9; x86_pmu.lbr_from = 0x680; x86_pmu.lbr_to = 0x6c0; } static void intel_pmu_lbr_init_atom(void) { x86_pmu.lbr_nr = 8; x86_pmu.lbr_tos = 0x01c9; x86_pmu.lbr_from = 0x40; x86_pmu.lbr_to = 0x60; } #endif /* CONFIG_CPU_SUP_INTEL */
gpl-2.0