repo_name
string
path
string
copies
string
size
string
content
string
license
string
benpye/buzz-kernel-2.6.35
arch/arm/mach-pxa/cpufreq-pxa3xx.c
769
6513
/* * linux/arch/arm/mach-pxa/cpufreq-pxa3xx.c * * Copyright (C) 2008 Marvell International Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/sched.h> #include <linux/init.h> #include <linux/cpufreq.h> #include <linux/slab.h> #include <mach/pxa3xx-regs.h> #include "generic.h" #define HSS_104M (0) #define HSS_156M (1) #define HSS_208M (2) #define HSS_312M (3) #define SMCFS_78M (0) #define SMCFS_104M (2) #define SMCFS_208M (5) #define SFLFS_104M (0) #define SFLFS_156M (1) #define SFLFS_208M (2) #define SFLFS_312M (3) #define XSPCLK_156M (0) #define XSPCLK_NONE (3) #define DMCFS_26M (0) #define DMCFS_260M (3) struct pxa3xx_freq_info { unsigned int cpufreq_mhz; unsigned int core_xl : 5; unsigned int core_xn : 3; unsigned int hss : 2; unsigned int dmcfs : 2; unsigned int smcfs : 3; unsigned int sflfs : 2; unsigned int df_clkdiv : 3; int vcc_core; /* in mV */ int vcc_sram; /* in mV */ }; #define OP(cpufreq, _xl, _xn, _hss, _dmc, _smc, _sfl, _dfi, vcore, vsram) \ { \ .cpufreq_mhz = cpufreq, \ .core_xl = _xl, \ .core_xn = _xn, \ .hss = HSS_##_hss##M, \ .dmcfs = DMCFS_##_dmc##M, \ .smcfs = SMCFS_##_smc##M, \ .sflfs = SFLFS_##_sfl##M, \ .df_clkdiv = _dfi, \ .vcc_core = vcore, \ .vcc_sram = vsram, \ } static struct pxa3xx_freq_info pxa300_freqs[] = { /* CPU XL XN HSS DMEM SMEM SRAM DFI VCC_CORE VCC_SRAM */ OP(104, 8, 1, 104, 260, 78, 104, 3, 1000, 1100), /* 104MHz */ OP(208, 16, 1, 104, 260, 104, 156, 2, 1000, 1100), /* 208MHz */ OP(416, 16, 2, 156, 260, 104, 208, 2, 1100, 1200), /* 416MHz */ OP(624, 24, 2, 208, 260, 208, 312, 3, 1375, 1400), /* 624MHz */ }; static struct pxa3xx_freq_info pxa320_freqs[] = { /* CPU XL XN HSS DMEM SMEM SRAM DFI VCC_CORE VCC_SRAM */ OP(104, 8, 1, 104, 260, 78, 104, 3, 1000, 1100), /* 104MHz */ OP(208, 16, 1, 104, 260, 104, 156, 2, 1000, 1100), /* 208MHz */ OP(416, 16, 2, 156, 260, 104, 208, 2, 1100, 1200), /* 416MHz */ OP(624, 24, 2, 208, 260, 208, 312, 3, 1375, 1400), /* 624MHz */ OP(806, 31, 2, 208, 260, 208, 312, 3, 1400, 1400), /* 806MHz */ }; static unsigned int pxa3xx_freqs_num; static struct pxa3xx_freq_info *pxa3xx_freqs; static struct cpufreq_frequency_table *pxa3xx_freqs_table; static int setup_freqs_table(struct cpufreq_policy *policy, struct pxa3xx_freq_info *freqs, int num) { struct cpufreq_frequency_table *table; int i; table = kzalloc((num + 1) * sizeof(*table), GFP_KERNEL); if (table == NULL) return -ENOMEM; for (i = 0; i < num; i++) { table[i].index = i; table[i].frequency = freqs[i].cpufreq_mhz * 1000; } table[num].index = i; table[num].frequency = CPUFREQ_TABLE_END; pxa3xx_freqs = freqs; pxa3xx_freqs_num = num; pxa3xx_freqs_table = table; return cpufreq_frequency_table_cpuinfo(policy, table); } static void __update_core_freq(struct pxa3xx_freq_info *info) { uint32_t mask = ACCR_XN_MASK | ACCR_XL_MASK; uint32_t accr = ACCR; uint32_t xclkcfg; accr &= ~(ACCR_XN_MASK | ACCR_XL_MASK | ACCR_XSPCLK_MASK); accr |= ACCR_XN(info->core_xn) | ACCR_XL(info->core_xl); /* No clock until core PLL is re-locked */ accr |= ACCR_XSPCLK(XSPCLK_NONE); xclkcfg = (info->core_xn == 2) ? 0x3 : 0x2; /* turbo bit */ ACCR = accr; __asm__("mcr p14, 0, %0, c6, c0, 0\n" : : "r"(xclkcfg)); while ((ACSR & mask) != (accr & mask)) cpu_relax(); } static void __update_bus_freq(struct pxa3xx_freq_info *info) { uint32_t mask; uint32_t accr = ACCR; mask = ACCR_SMCFS_MASK | ACCR_SFLFS_MASK | ACCR_HSS_MASK | ACCR_DMCFS_MASK; accr &= ~mask; accr |= ACCR_SMCFS(info->smcfs) | ACCR_SFLFS(info->sflfs) | ACCR_HSS(info->hss) | ACCR_DMCFS(info->dmcfs); ACCR = accr; while ((ACSR & mask) != (accr & mask)) cpu_relax(); } static int pxa3xx_cpufreq_verify(struct cpufreq_policy *policy) { return cpufreq_frequency_table_verify(policy, pxa3xx_freqs_table); } static unsigned int pxa3xx_cpufreq_get(unsigned int cpu) { return get_clk_frequency_khz(0); } static int pxa3xx_cpufreq_set(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation) { struct pxa3xx_freq_info *next; struct cpufreq_freqs freqs; unsigned long flags; int idx; if (policy->cpu != 0) return -EINVAL; /* Lookup the next frequency */ if (cpufreq_frequency_table_target(policy, pxa3xx_freqs_table, target_freq, relation, &idx)) return -EINVAL; next = &pxa3xx_freqs[idx]; freqs.old = policy->cur; freqs.new = next->cpufreq_mhz * 1000; freqs.cpu = policy->cpu; pr_debug("CPU frequency from %d MHz to %d MHz%s\n", freqs.old / 1000, freqs.new / 1000, (freqs.old == freqs.new) ? " (skipped)" : ""); if (freqs.old == target_freq) return 0; cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); local_irq_save(flags); __update_core_freq(next); __update_bus_freq(next); local_irq_restore(flags); cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); return 0; } static __init int pxa3xx_cpufreq_init(struct cpufreq_policy *policy) { int ret = -EINVAL; /* set default policy and cpuinfo */ policy->cpuinfo.min_freq = 104000; policy->cpuinfo.max_freq = (cpu_is_pxa320()) ? 806000 : 624000; policy->cpuinfo.transition_latency = 1000; /* FIXME: 1 ms, assumed */ policy->cur = policy->min = policy->max = get_clk_frequency_khz(0); if (cpu_is_pxa300() || cpu_is_pxa310()) ret = setup_freqs_table(policy, ARRAY_AND_SIZE(pxa300_freqs)); if (cpu_is_pxa320()) ret = setup_freqs_table(policy, ARRAY_AND_SIZE(pxa320_freqs)); if (ret) { pr_err("failed to setup frequency table\n"); return ret; } pr_info("CPUFREQ support for PXA3xx initialized\n"); return 0; } static struct cpufreq_driver pxa3xx_cpufreq_driver = { .verify = pxa3xx_cpufreq_verify, .target = pxa3xx_cpufreq_set, .init = pxa3xx_cpufreq_init, .get = pxa3xx_cpufreq_get, .name = "pxa3xx-cpufreq", }; static int __init cpufreq_init(void) { if (cpu_is_pxa3xx()) return cpufreq_register_driver(&pxa3xx_cpufreq_driver); return 0; } module_init(cpufreq_init); static void __exit cpufreq_exit(void) { cpufreq_unregister_driver(&pxa3xx_cpufreq_driver); } module_exit(cpufreq_exit); MODULE_DESCRIPTION("CPU frequency scaling driver for PXA3xx"); MODULE_LICENSE("GPL");
gpl-2.0
mialwe/mngb
arch/x86/mm/highmem_32.c
1281
3283
#include <linux/highmem.h> #include <linux/module.h> #include <linux/swap.h> /* for totalram_pages */ void *kmap(struct page *page) { might_sleep(); if (!PageHighMem(page)) return page_address(page); return kmap_high(page); } void kunmap(struct page *page) { if (in_interrupt()) BUG(); if (!PageHighMem(page)) return; kunmap_high(page); } /* * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because * no global lock is needed and because the kmap code must perform a global TLB * invalidation when the kmap pool wraps. * * However when holding an atomic kmap it is not legal to sleep, so atomic * kmaps are appropriate for short, tight code paths only. */ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) { enum fixed_addresses idx; unsigned long vaddr; /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ pagefault_disable(); if (!PageHighMem(page)) return page_address(page); debug_kmap_atomic(type); idx = type + KM_TYPE_NR*smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); BUG_ON(!pte_none(*(kmap_pte-idx))); set_pte(kmap_pte-idx, mk_pte(page, prot)); return (void *)vaddr; } void *kmap_atomic(struct page *page, enum km_type type) { return kmap_atomic_prot(page, type, kmap_prot); } void kunmap_atomic(void *kvaddr, enum km_type type) { unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); /* * Force other mappings to Oops if they'll try to access this pte * without first remap it. Keeping stale mappings around is a bad idea * also, in case the page changes cacheability attributes or becomes * a protected page in a hypervisor. */ if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx)) kpte_clear_flush(kmap_pte-idx, vaddr); else { #ifdef CONFIG_DEBUG_HIGHMEM BUG_ON(vaddr < PAGE_OFFSET); BUG_ON(vaddr >= (unsigned long)high_memory); #endif } pagefault_enable(); } /* * This is the same as kmap_atomic() but can map memory that doesn't * have a struct page associated with it. */ void *kmap_atomic_pfn(unsigned long pfn, enum km_type type) { return kmap_atomic_prot_pfn(pfn, type, kmap_prot); } EXPORT_SYMBOL_GPL(kmap_atomic_pfn); /* temporarily in use by i915 GEM until vmap */ struct page *kmap_atomic_to_page(void *ptr) { unsigned long idx, vaddr = (unsigned long)ptr; pte_t *pte; if (vaddr < FIXADDR_START) return virt_to_page(ptr); idx = virt_to_fix(vaddr); pte = kmap_pte - (idx - FIX_KMAP_BEGIN); return pte_page(*pte); } EXPORT_SYMBOL(kmap); EXPORT_SYMBOL(kunmap); EXPORT_SYMBOL(kmap_atomic); EXPORT_SYMBOL(kunmap_atomic); EXPORT_SYMBOL(kmap_atomic_prot); EXPORT_SYMBOL(kmap_atomic_to_page); void __init set_highmem_pages_init(void) { struct zone *zone; int nid; for_each_zone(zone) { unsigned long zone_start_pfn, zone_end_pfn; if (!is_highmem(zone)) continue; zone_start_pfn = zone->zone_start_pfn; zone_end_pfn = zone_start_pfn + zone->spanned_pages; nid = zone_to_nid(zone); printk(KERN_INFO "Initializing %s for node %d (%08lx:%08lx)\n", zone->name, nid, zone_start_pfn, zone_end_pfn); add_highpages_with_active_regions(nid, zone_start_pfn, zone_end_pfn); } totalram_pages += totalhigh_pages; }
gpl-2.0
JacobTech/lge_kernel_e400-
net/bridge/netfilter/ebt_ulog.c
2817
9011
/* * netfilter module for userspace bridged Ethernet frames logging daemons * * Authors: * Bart De Schuymer <bdschuym@pandora.be> * Harald Welte <laforge@netfilter.org> * * November, 2004 * * Based on ipt_ULOG.c, which is * (C) 2000-2002 by Harald Welte <laforge@netfilter.org> * * This module accepts two parameters: * * nlbufsiz: * The parameter specifies how big the buffer for each netlink multicast * group is. e.g. If you say nlbufsiz=8192, up to eight kb of packets will * get accumulated in the kernel until they are sent to userspace. It is * NOT possible to allocate more than 128kB, and it is strongly discouraged, * because atomically allocating 128kB inside the network rx softirq is not * reliable. Please also keep in mind that this buffer size is allocated for * each nlgroup you are using, so the total kernel memory usage increases * by that factor. * * flushtimeout: * Specify, after how many hundredths of a second the queue should be * flushed even if it is not full yet. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/socket.h> #include <linux/skbuff.h> #include <linux/kernel.h> #include <linux/timer.h> #include <linux/netlink.h> #include <linux/netdevice.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter_bridge/ebtables.h> #include <linux/netfilter_bridge/ebt_ulog.h> #include <net/netfilter/nf_log.h> #include <net/sock.h> #include "../br_private.h" static unsigned int nlbufsiz = NLMSG_GOODSIZE; module_param(nlbufsiz, uint, 0600); MODULE_PARM_DESC(nlbufsiz, "netlink buffer size (number of bytes) " "(defaults to 4096)"); static unsigned int flushtimeout = 10; module_param(flushtimeout, uint, 0600); MODULE_PARM_DESC(flushtimeout, "buffer flush timeout (hundredths ofa second) " "(defaults to 10)"); typedef struct { unsigned int qlen; /* number of nlmsgs' in the skb */ struct nlmsghdr *lastnlh; /* netlink header of last msg in skb */ struct sk_buff *skb; /* the pre-allocated skb */ struct timer_list timer; /* the timer function */ spinlock_t lock; /* the per-queue lock */ } ebt_ulog_buff_t; static ebt_ulog_buff_t ulog_buffers[EBT_ULOG_MAXNLGROUPS]; static struct sock *ebtulognl; /* send one ulog_buff_t to userspace */ static void ulog_send(unsigned int nlgroup) { ebt_ulog_buff_t *ub = &ulog_buffers[nlgroup]; if (timer_pending(&ub->timer)) del_timer(&ub->timer); if (!ub->skb) return; /* last nlmsg needs NLMSG_DONE */ if (ub->qlen > 1) ub->lastnlh->nlmsg_type = NLMSG_DONE; NETLINK_CB(ub->skb).dst_group = nlgroup + 1; netlink_broadcast(ebtulognl, ub->skb, 0, nlgroup + 1, GFP_ATOMIC); ub->qlen = 0; ub->skb = NULL; } /* timer function to flush queue in flushtimeout time */ static void ulog_timer(unsigned long data) { spin_lock_bh(&ulog_buffers[data].lock); if (ulog_buffers[data].skb) ulog_send(data); spin_unlock_bh(&ulog_buffers[data].lock); } static struct sk_buff *ulog_alloc_skb(unsigned int size) { struct sk_buff *skb; unsigned int n; n = max(size, nlbufsiz); skb = alloc_skb(n, GFP_ATOMIC); if (!skb) { pr_debug("cannot alloc whole buffer of size %ub!\n", n); if (n > size) { /* try to allocate only as much as we need for * current packet */ skb = alloc_skb(size, GFP_ATOMIC); if (!skb) pr_debug("cannot even allocate " "buffer of size %ub\n", size); } } return skb; } static void ebt_ulog_packet(unsigned int hooknr, const struct sk_buff *skb, const struct net_device *in, const struct net_device *out, const struct ebt_ulog_info *uloginfo, const char *prefix) { ebt_ulog_packet_msg_t *pm; size_t size, copy_len; struct nlmsghdr *nlh; unsigned int group = uloginfo->nlgroup; ebt_ulog_buff_t *ub = &ulog_buffers[group]; spinlock_t *lock = &ub->lock; ktime_t kt; if ((uloginfo->cprange == 0) || (uloginfo->cprange > skb->len + ETH_HLEN)) copy_len = skb->len + ETH_HLEN; else copy_len = uloginfo->cprange; size = NLMSG_SPACE(sizeof(*pm) + copy_len); if (size > nlbufsiz) { pr_debug("Size %Zd needed, but nlbufsiz=%d\n", size, nlbufsiz); return; } spin_lock_bh(lock); if (!ub->skb) { if (!(ub->skb = ulog_alloc_skb(size))) goto alloc_failure; } else if (size > skb_tailroom(ub->skb)) { ulog_send(group); if (!(ub->skb = ulog_alloc_skb(size))) goto alloc_failure; } nlh = NLMSG_PUT(ub->skb, 0, ub->qlen, 0, size - NLMSG_ALIGN(sizeof(*nlh))); ub->qlen++; pm = NLMSG_DATA(nlh); /* Fill in the ulog data */ pm->version = EBT_ULOG_VERSION; kt = ktime_get_real(); pm->stamp = ktime_to_timeval(kt); if (ub->qlen == 1) ub->skb->tstamp = kt; pm->data_len = copy_len; pm->mark = skb->mark; pm->hook = hooknr; if (uloginfo->prefix != NULL) strcpy(pm->prefix, uloginfo->prefix); else *(pm->prefix) = '\0'; if (in) { strcpy(pm->physindev, in->name); /* If in isn't a bridge, then physindev==indev */ if (br_port_exists(in)) /* rcu_read_lock()ed by nf_hook_slow */ strcpy(pm->indev, br_port_get_rcu(in)->br->dev->name); else strcpy(pm->indev, in->name); } else pm->indev[0] = pm->physindev[0] = '\0'; if (out) { /* If out exists, then out is a bridge port */ strcpy(pm->physoutdev, out->name); /* rcu_read_lock()ed by nf_hook_slow */ strcpy(pm->outdev, br_port_get_rcu(out)->br->dev->name); } else pm->outdev[0] = pm->physoutdev[0] = '\0'; if (skb_copy_bits(skb, -ETH_HLEN, pm->data, copy_len) < 0) BUG(); if (ub->qlen > 1) ub->lastnlh->nlmsg_flags |= NLM_F_MULTI; ub->lastnlh = nlh; if (ub->qlen >= uloginfo->qthreshold) ulog_send(group); else if (!timer_pending(&ub->timer)) { ub->timer.expires = jiffies + flushtimeout * HZ / 100; add_timer(&ub->timer); } unlock: spin_unlock_bh(lock); return; nlmsg_failure: pr_debug("error during NLMSG_PUT. This should " "not happen, please report to author.\n"); goto unlock; alloc_failure: goto unlock; } /* this function is registered with the netfilter core */ static void ebt_log_packet(u_int8_t pf, unsigned int hooknum, const struct sk_buff *skb, const struct net_device *in, const struct net_device *out, const struct nf_loginfo *li, const char *prefix) { struct ebt_ulog_info loginfo; if (!li || li->type != NF_LOG_TYPE_ULOG) { loginfo.nlgroup = EBT_ULOG_DEFAULT_NLGROUP; loginfo.cprange = 0; loginfo.qthreshold = EBT_ULOG_DEFAULT_QTHRESHOLD; loginfo.prefix[0] = '\0'; } else { loginfo.nlgroup = li->u.ulog.group; loginfo.cprange = li->u.ulog.copy_len; loginfo.qthreshold = li->u.ulog.qthreshold; strlcpy(loginfo.prefix, prefix, sizeof(loginfo.prefix)); } ebt_ulog_packet(hooknum, skb, in, out, &loginfo, prefix); } static unsigned int ebt_ulog_tg(struct sk_buff *skb, const struct xt_action_param *par) { ebt_ulog_packet(par->hooknum, skb, par->in, par->out, par->targinfo, NULL); return EBT_CONTINUE; } static int ebt_ulog_tg_check(const struct xt_tgchk_param *par) { struct ebt_ulog_info *uloginfo = par->targinfo; if (uloginfo->nlgroup > 31) return -EINVAL; uloginfo->prefix[EBT_ULOG_PREFIX_LEN - 1] = '\0'; if (uloginfo->qthreshold > EBT_ULOG_MAX_QLEN) uloginfo->qthreshold = EBT_ULOG_MAX_QLEN; return 0; } static struct xt_target ebt_ulog_tg_reg __read_mostly = { .name = "ulog", .revision = 0, .family = NFPROTO_BRIDGE, .target = ebt_ulog_tg, .checkentry = ebt_ulog_tg_check, .targetsize = sizeof(struct ebt_ulog_info), .me = THIS_MODULE, }; static struct nf_logger ebt_ulog_logger __read_mostly = { .name = "ebt_ulog", .logfn = &ebt_log_packet, .me = THIS_MODULE, }; static int __init ebt_ulog_init(void) { int ret; int i; if (nlbufsiz >= 128*1024) { pr_warning("Netlink buffer has to be <= 128kB," " please try a smaller nlbufsiz parameter.\n"); return -EINVAL; } /* initialize ulog_buffers */ for (i = 0; i < EBT_ULOG_MAXNLGROUPS; i++) { setup_timer(&ulog_buffers[i].timer, ulog_timer, i); spin_lock_init(&ulog_buffers[i].lock); } ebtulognl = netlink_kernel_create(&init_net, NETLINK_NFLOG, EBT_ULOG_MAXNLGROUPS, NULL, NULL, THIS_MODULE); if (!ebtulognl) ret = -ENOMEM; else if ((ret = xt_register_target(&ebt_ulog_tg_reg)) != 0) netlink_kernel_release(ebtulognl); if (ret == 0) nf_log_register(NFPROTO_BRIDGE, &ebt_ulog_logger); return ret; } static void __exit ebt_ulog_fini(void) { ebt_ulog_buff_t *ub; int i; nf_log_unregister(&ebt_ulog_logger); xt_unregister_target(&ebt_ulog_tg_reg); for (i = 0; i < EBT_ULOG_MAXNLGROUPS; i++) { ub = &ulog_buffers[i]; if (timer_pending(&ub->timer)) del_timer(&ub->timer); spin_lock_bh(&ub->lock); if (ub->skb) { kfree_skb(ub->skb); ub->skb = NULL; } spin_unlock_bh(&ub->lock); } netlink_kernel_release(ebtulognl); } module_init(ebt_ulog_init); module_exit(ebt_ulog_fini); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Bart De Schuymer <bdschuym@pandora.be>"); MODULE_DESCRIPTION("Ebtables: Packet logging to netlink using ULOG");
gpl-2.0
EnJens/kernel_tf201_stock
drivers/staging/octeon/ethernet.c
3073
26430
/********************************************************************** * Author: Cavium Networks * * Contact: support@caviumnetworks.com * This file is part of the OCTEON SDK * * Copyright (c) 2003-2007 Cavium Networks * * This file is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, Version 2, as * published by the Free Software Foundation. * * This file is distributed in the hope that it will be useful, but * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or * NONINFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this file; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * or visit http://www.gnu.org/licenses/. * * This file may also be available under a different license from Cavium. * Contact Cavium Networks for more information **********************************************************************/ #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/phy.h> #include <linux/slab.h> #include <net/dst.h> #include <asm/octeon/octeon.h> #include "ethernet-defines.h" #include "octeon-ethernet.h" #include "ethernet-mem.h" #include "ethernet-rx.h" #include "ethernet-tx.h" #include "ethernet-mdio.h" #include "ethernet-util.h" #include "cvmx-pip.h" #include "cvmx-pko.h" #include "cvmx-fau.h" #include "cvmx-ipd.h" #include "cvmx-helper.h" #include "cvmx-gmxx-defs.h" #include "cvmx-smix-defs.h" #if defined(CONFIG_CAVIUM_OCTEON_NUM_PACKET_BUFFERS) \ && CONFIG_CAVIUM_OCTEON_NUM_PACKET_BUFFERS int num_packet_buffers = CONFIG_CAVIUM_OCTEON_NUM_PACKET_BUFFERS; #else int num_packet_buffers = 1024; #endif module_param(num_packet_buffers, int, 0444); MODULE_PARM_DESC(num_packet_buffers, "\n" "\tNumber of packet buffers to allocate and store in the\n" "\tFPA. By default, 1024 packet buffers are used unless\n" "\tCONFIG_CAVIUM_OCTEON_NUM_PACKET_BUFFERS is defined."); int pow_receive_group = 15; module_param(pow_receive_group, int, 0444); MODULE_PARM_DESC(pow_receive_group, "\n" "\tPOW group to receive packets from. All ethernet hardware\n" "\twill be configured to send incomming packets to this POW\n" "\tgroup. Also any other software can submit packets to this\n" "\tgroup for the kernel to process."); int pow_send_group = -1; module_param(pow_send_group, int, 0644); MODULE_PARM_DESC(pow_send_group, "\n" "\tPOW group to send packets to other software on. This\n" "\tcontrols the creation of the virtual device pow0.\n" "\talways_use_pow also depends on this value."); int always_use_pow; module_param(always_use_pow, int, 0444); MODULE_PARM_DESC(always_use_pow, "\n" "\tWhen set, always send to the pow group. This will cause\n" "\tpackets sent to real ethernet devices to be sent to the\n" "\tPOW group instead of the hardware. Unless some other\n" "\tapplication changes the config, packets will still be\n" "\treceived from the low level hardware. Use this option\n" "\tto allow a CVMX app to intercept all packets from the\n" "\tlinux kernel. You must specify pow_send_group along with\n" "\tthis option."); char pow_send_list[128] = ""; module_param_string(pow_send_list, pow_send_list, sizeof(pow_send_list), 0444); MODULE_PARM_DESC(pow_send_list, "\n" "\tComma separated list of ethernet devices that should use the\n" "\tPOW for transmit instead of the actual ethernet hardware. This\n" "\tis a per port version of always_use_pow. always_use_pow takes\n" "\tprecedence over this list. For example, setting this to\n" "\t\"eth2,spi3,spi7\" would cause these three devices to transmit\n" "\tusing the pow_send_group."); int max_rx_cpus = -1; module_param(max_rx_cpus, int, 0444); MODULE_PARM_DESC(max_rx_cpus, "\n" "\t\tThe maximum number of CPUs to use for packet reception.\n" "\t\tUse -1 to use all available CPUs."); int rx_napi_weight = 32; module_param(rx_napi_weight, int, 0444); MODULE_PARM_DESC(rx_napi_weight, "The NAPI WEIGHT parameter."); /* * The offset from mac_addr_base that should be used for the next port * that is configured. By convention, if any mgmt ports exist on the * chip, they get the first mac addresses, The ports controlled by * this driver are numbered sequencially following any mgmt addresses * that may exist. */ static unsigned int cvm_oct_mac_addr_offset; /** * cvm_oct_poll_queue - Workqueue for polling operations. */ struct workqueue_struct *cvm_oct_poll_queue; /** * cvm_oct_poll_queue_stopping - flag to indicate polling should stop. * * Set to one right before cvm_oct_poll_queue is destroyed. */ atomic_t cvm_oct_poll_queue_stopping = ATOMIC_INIT(0); /** * Array of every ethernet device owned by this driver indexed by * the ipd input port number. */ struct net_device *cvm_oct_device[TOTAL_NUMBER_OF_PORTS]; u64 cvm_oct_tx_poll_interval; static void cvm_oct_rx_refill_worker(struct work_struct *work); static DECLARE_DELAYED_WORK(cvm_oct_rx_refill_work, cvm_oct_rx_refill_worker); static void cvm_oct_rx_refill_worker(struct work_struct *work) { /* * FPA 0 may have been drained, try to refill it if we need * more than num_packet_buffers / 2, otherwise normal receive * processing will refill it. If it were drained, no packets * could be received so cvm_oct_napi_poll would never be * invoked to do the refill. */ cvm_oct_rx_refill_pool(num_packet_buffers / 2); if (!atomic_read(&cvm_oct_poll_queue_stopping)) queue_delayed_work(cvm_oct_poll_queue, &cvm_oct_rx_refill_work, HZ); } static void cvm_oct_periodic_worker(struct work_struct *work) { struct octeon_ethernet *priv = container_of(work, struct octeon_ethernet, port_periodic_work.work); if (priv->poll) priv->poll(cvm_oct_device[priv->port]); cvm_oct_device[priv->port]->netdev_ops->ndo_get_stats(cvm_oct_device[priv->port]); if (!atomic_read(&cvm_oct_poll_queue_stopping)) queue_delayed_work(cvm_oct_poll_queue, &priv->port_periodic_work, HZ); } static __init void cvm_oct_configure_common_hw(void) { /* Setup the FPA */ cvmx_fpa_enable(); cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE, num_packet_buffers); cvm_oct_mem_fill_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE, num_packet_buffers); if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL) cvm_oct_mem_fill_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL, CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 128); if (USE_RED) cvmx_helper_setup_red(num_packet_buffers / 4, num_packet_buffers / 8); } /** * cvm_oct_free_work- Free a work queue entry * * @work_queue_entry: Work queue entry to free * * Returns Zero on success, Negative on failure. */ int cvm_oct_free_work(void *work_queue_entry) { cvmx_wqe_t *work = work_queue_entry; int segments = work->word2.s.bufs; union cvmx_buf_ptr segment_ptr = work->packet_ptr; while (segments--) { union cvmx_buf_ptr next_ptr = *(union cvmx_buf_ptr *) cvmx_phys_to_ptr(segment_ptr.s.addr - 8); if (unlikely(!segment_ptr.s.i)) cvmx_fpa_free(cvm_oct_get_buffer_ptr(segment_ptr), segment_ptr.s.pool, DONT_WRITEBACK(CVMX_FPA_PACKET_POOL_SIZE / 128)); segment_ptr = next_ptr; } cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, DONT_WRITEBACK(1)); return 0; } EXPORT_SYMBOL(cvm_oct_free_work); /** * cvm_oct_common_get_stats - get the low level ethernet statistics * @dev: Device to get the statistics from * * Returns Pointer to the statistics */ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev) { cvmx_pip_port_status_t rx_status; cvmx_pko_port_status_t tx_status; struct octeon_ethernet *priv = netdev_priv(dev); if (priv->port < CVMX_PIP_NUM_INPUT_PORTS) { if (octeon_is_simulation()) { /* The simulator doesn't support statistics */ memset(&rx_status, 0, sizeof(rx_status)); memset(&tx_status, 0, sizeof(tx_status)); } else { cvmx_pip_get_port_status(priv->port, 1, &rx_status); cvmx_pko_get_port_status(priv->port, 1, &tx_status); } priv->stats.rx_packets += rx_status.inb_packets; priv->stats.tx_packets += tx_status.packets; priv->stats.rx_bytes += rx_status.inb_octets; priv->stats.tx_bytes += tx_status.octets; priv->stats.multicast += rx_status.multicast_packets; priv->stats.rx_crc_errors += rx_status.inb_errors; priv->stats.rx_frame_errors += rx_status.fcs_align_err_packets; /* * The drop counter must be incremented atomically * since the RX tasklet also increments it. */ #ifdef CONFIG_64BIT atomic64_add(rx_status.dropped_packets, (atomic64_t *)&priv->stats.rx_dropped); #else atomic_add(rx_status.dropped_packets, (atomic_t *)&priv->stats.rx_dropped); #endif } return &priv->stats; } /** * cvm_oct_common_change_mtu - change the link MTU * @dev: Device to change * @new_mtu: The new MTU * * Returns Zero on success */ static int cvm_oct_common_change_mtu(struct net_device *dev, int new_mtu) { struct octeon_ethernet *priv = netdev_priv(dev); int interface = INTERFACE(priv->port); int index = INDEX(priv->port); #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) int vlan_bytes = 4; #else int vlan_bytes = 0; #endif /* * Limit the MTU to make sure the ethernet packets are between * 64 bytes and 65535 bytes. */ if ((new_mtu + 14 + 4 + vlan_bytes < 64) || (new_mtu + 14 + 4 + vlan_bytes > 65392)) { pr_err("MTU must be between %d and %d.\n", 64 - 14 - 4 - vlan_bytes, 65392 - 14 - 4 - vlan_bytes); return -EINVAL; } dev->mtu = new_mtu; if ((interface < 2) && (cvmx_helper_interface_get_mode(interface) != CVMX_HELPER_INTERFACE_MODE_SPI)) { /* Add ethernet header and FCS, and VLAN if configured. */ int max_packet = new_mtu + 14 + 4 + vlan_bytes; if (OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN58XX)) { /* Signal errors on packets larger than the MTU */ cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX(index, interface), max_packet); } else { /* * Set the hardware to truncate packets larger * than the MTU and smaller the 64 bytes. */ union cvmx_pip_frm_len_chkx frm_len_chk; frm_len_chk.u64 = 0; frm_len_chk.s.minlen = 64; frm_len_chk.s.maxlen = max_packet; cvmx_write_csr(CVMX_PIP_FRM_LEN_CHKX(interface), frm_len_chk.u64); } /* * Set the hardware to truncate packets larger than * the MTU. The jabber register must be set to a * multiple of 8 bytes, so round up. */ cvmx_write_csr(CVMX_GMXX_RXX_JABBER(index, interface), (max_packet + 7) & ~7u); } return 0; } /** * cvm_oct_common_set_multicast_list - set the multicast list * @dev: Device to work on */ static void cvm_oct_common_set_multicast_list(struct net_device *dev) { union cvmx_gmxx_prtx_cfg gmx_cfg; struct octeon_ethernet *priv = netdev_priv(dev); int interface = INTERFACE(priv->port); int index = INDEX(priv->port); if ((interface < 2) && (cvmx_helper_interface_get_mode(interface) != CVMX_HELPER_INTERFACE_MODE_SPI)) { union cvmx_gmxx_rxx_adr_ctl control; control.u64 = 0; control.s.bcst = 1; /* Allow broadcast MAC addresses */ if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI) || (dev->flags & IFF_PROMISC)) /* Force accept multicast packets */ control.s.mcst = 2; else /* Force reject multicat packets */ control.s.mcst = 1; if (dev->flags & IFF_PROMISC) /* * Reject matches if promisc. Since CAM is * shut off, should accept everything. */ control.s.cam_mode = 0; else /* Filter packets based on the CAM */ control.s.cam_mode = 1; gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64 & ~1ull); cvmx_write_csr(CVMX_GMXX_RXX_ADR_CTL(index, interface), control.u64); if (dev->flags & IFF_PROMISC) cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN (index, interface), 0); else cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN (index, interface), 1); cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64); } } /** * cvm_oct_common_set_mac_address - set the hardware MAC address for a device * @dev: The device in question. * @addr: Address structure to change it too. * Returns Zero on success */ static int cvm_oct_common_set_mac_address(struct net_device *dev, void *addr) { struct octeon_ethernet *priv = netdev_priv(dev); union cvmx_gmxx_prtx_cfg gmx_cfg; int interface = INTERFACE(priv->port); int index = INDEX(priv->port); memcpy(dev->dev_addr, addr + 2, 6); if ((interface < 2) && (cvmx_helper_interface_get_mode(interface) != CVMX_HELPER_INTERFACE_MODE_SPI)) { int i; uint8_t *ptr = addr; uint64_t mac = 0; for (i = 0; i < 6; i++) mac = (mac << 8) | (uint64_t) (ptr[i + 2]); gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64 & ~1ull); cvmx_write_csr(CVMX_GMXX_SMACX(index, interface), mac); cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM0(index, interface), ptr[2]); cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM1(index, interface), ptr[3]); cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM2(index, interface), ptr[4]); cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM3(index, interface), ptr[5]); cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM4(index, interface), ptr[6]); cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM5(index, interface), ptr[7]); cvm_oct_common_set_multicast_list(dev); cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64); } return 0; } /** * cvm_oct_common_init - per network device initialization * @dev: Device to initialize * * Returns Zero on success */ int cvm_oct_common_init(struct net_device *dev) { struct octeon_ethernet *priv = netdev_priv(dev); struct sockaddr sa; u64 mac = ((u64)(octeon_bootinfo->mac_addr_base[0] & 0xff) << 40) | ((u64)(octeon_bootinfo->mac_addr_base[1] & 0xff) << 32) | ((u64)(octeon_bootinfo->mac_addr_base[2] & 0xff) << 24) | ((u64)(octeon_bootinfo->mac_addr_base[3] & 0xff) << 16) | ((u64)(octeon_bootinfo->mac_addr_base[4] & 0xff) << 8) | (u64)(octeon_bootinfo->mac_addr_base[5] & 0xff); mac += cvm_oct_mac_addr_offset; sa.sa_data[0] = (mac >> 40) & 0xff; sa.sa_data[1] = (mac >> 32) & 0xff; sa.sa_data[2] = (mac >> 24) & 0xff; sa.sa_data[3] = (mac >> 16) & 0xff; sa.sa_data[4] = (mac >> 8) & 0xff; sa.sa_data[5] = mac & 0xff; if (cvm_oct_mac_addr_offset >= octeon_bootinfo->mac_addr_count) printk(KERN_DEBUG "%s: Using MAC outside of the assigned range:" " %pM\n", dev->name, sa.sa_data); cvm_oct_mac_addr_offset++; /* * Force the interface to use the POW send if always_use_pow * was specified or it is in the pow send list. */ if ((pow_send_group != -1) && (always_use_pow || strstr(pow_send_list, dev->name))) priv->queue = -1; if (priv->queue != -1) { dev->features |= NETIF_F_SG; if (USE_HW_TCPUDP_CHECKSUM) dev->features |= NETIF_F_IP_CSUM; } /* We do our own locking, Linux doesn't need to */ dev->features |= NETIF_F_LLTX; SET_ETHTOOL_OPS(dev, &cvm_oct_ethtool_ops); cvm_oct_phy_setup_device(dev); dev->netdev_ops->ndo_set_mac_address(dev, &sa); dev->netdev_ops->ndo_change_mtu(dev, dev->mtu); /* * Zero out stats for port so we won't mistakenly show * counters from the bootloader. */ memset(dev->netdev_ops->ndo_get_stats(dev), 0, sizeof(struct net_device_stats)); return 0; } void cvm_oct_common_uninit(struct net_device *dev) { struct octeon_ethernet *priv = netdev_priv(dev); if (priv->phydev) phy_disconnect(priv->phydev); } static const struct net_device_ops cvm_oct_npi_netdev_ops = { .ndo_init = cvm_oct_common_init, .ndo_uninit = cvm_oct_common_uninit, .ndo_start_xmit = cvm_oct_xmit, .ndo_set_multicast_list = cvm_oct_common_set_multicast_list, .ndo_set_mac_address = cvm_oct_common_set_mac_address, .ndo_do_ioctl = cvm_oct_ioctl, .ndo_change_mtu = cvm_oct_common_change_mtu, .ndo_get_stats = cvm_oct_common_get_stats, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = cvm_oct_poll_controller, #endif }; static const struct net_device_ops cvm_oct_xaui_netdev_ops = { .ndo_init = cvm_oct_xaui_init, .ndo_uninit = cvm_oct_xaui_uninit, .ndo_open = cvm_oct_xaui_open, .ndo_stop = cvm_oct_xaui_stop, .ndo_start_xmit = cvm_oct_xmit, .ndo_set_multicast_list = cvm_oct_common_set_multicast_list, .ndo_set_mac_address = cvm_oct_common_set_mac_address, .ndo_do_ioctl = cvm_oct_ioctl, .ndo_change_mtu = cvm_oct_common_change_mtu, .ndo_get_stats = cvm_oct_common_get_stats, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = cvm_oct_poll_controller, #endif }; static const struct net_device_ops cvm_oct_sgmii_netdev_ops = { .ndo_init = cvm_oct_sgmii_init, .ndo_uninit = cvm_oct_sgmii_uninit, .ndo_open = cvm_oct_sgmii_open, .ndo_stop = cvm_oct_sgmii_stop, .ndo_start_xmit = cvm_oct_xmit, .ndo_set_multicast_list = cvm_oct_common_set_multicast_list, .ndo_set_mac_address = cvm_oct_common_set_mac_address, .ndo_do_ioctl = cvm_oct_ioctl, .ndo_change_mtu = cvm_oct_common_change_mtu, .ndo_get_stats = cvm_oct_common_get_stats, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = cvm_oct_poll_controller, #endif }; static const struct net_device_ops cvm_oct_spi_netdev_ops = { .ndo_init = cvm_oct_spi_init, .ndo_uninit = cvm_oct_spi_uninit, .ndo_start_xmit = cvm_oct_xmit, .ndo_set_multicast_list = cvm_oct_common_set_multicast_list, .ndo_set_mac_address = cvm_oct_common_set_mac_address, .ndo_do_ioctl = cvm_oct_ioctl, .ndo_change_mtu = cvm_oct_common_change_mtu, .ndo_get_stats = cvm_oct_common_get_stats, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = cvm_oct_poll_controller, #endif }; static const struct net_device_ops cvm_oct_rgmii_netdev_ops = { .ndo_init = cvm_oct_rgmii_init, .ndo_uninit = cvm_oct_rgmii_uninit, .ndo_open = cvm_oct_rgmii_open, .ndo_stop = cvm_oct_rgmii_stop, .ndo_start_xmit = cvm_oct_xmit, .ndo_set_multicast_list = cvm_oct_common_set_multicast_list, .ndo_set_mac_address = cvm_oct_common_set_mac_address, .ndo_do_ioctl = cvm_oct_ioctl, .ndo_change_mtu = cvm_oct_common_change_mtu, .ndo_get_stats = cvm_oct_common_get_stats, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = cvm_oct_poll_controller, #endif }; static const struct net_device_ops cvm_oct_pow_netdev_ops = { .ndo_init = cvm_oct_common_init, .ndo_start_xmit = cvm_oct_xmit_pow, .ndo_set_multicast_list = cvm_oct_common_set_multicast_list, .ndo_set_mac_address = cvm_oct_common_set_mac_address, .ndo_do_ioctl = cvm_oct_ioctl, .ndo_change_mtu = cvm_oct_common_change_mtu, .ndo_get_stats = cvm_oct_common_get_stats, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = cvm_oct_poll_controller, #endif }; extern void octeon_mdiobus_force_mod_depencency(void); static int __init cvm_oct_init_module(void) { int num_interfaces; int interface; int fau = FAU_NUM_PACKET_BUFFERS_TO_FREE; int qos; octeon_mdiobus_force_mod_depencency(); pr_notice("cavium-ethernet %s\n", OCTEON_ETHERNET_VERSION); if (OCTEON_IS_MODEL(OCTEON_CN52XX)) cvm_oct_mac_addr_offset = 2; /* First two are the mgmt ports. */ else if (OCTEON_IS_MODEL(OCTEON_CN56XX)) cvm_oct_mac_addr_offset = 1; /* First one is the mgmt port. */ else cvm_oct_mac_addr_offset = 0; cvm_oct_poll_queue = create_singlethread_workqueue("octeon-ethernet"); if (cvm_oct_poll_queue == NULL) { pr_err("octeon-ethernet: Cannot create workqueue"); return -ENOMEM; } cvm_oct_configure_common_hw(); cvmx_helper_initialize_packet_io_global(); /* Change the input group for all ports before input is enabled */ num_interfaces = cvmx_helper_get_number_of_interfaces(); for (interface = 0; interface < num_interfaces; interface++) { int num_ports = cvmx_helper_ports_on_interface(interface); int port; for (port = cvmx_helper_get_ipd_port(interface, 0); port < cvmx_helper_get_ipd_port(interface, num_ports); port++) { union cvmx_pip_prt_tagx pip_prt_tagx; pip_prt_tagx.u64 = cvmx_read_csr(CVMX_PIP_PRT_TAGX(port)); pip_prt_tagx.s.grp = pow_receive_group; cvmx_write_csr(CVMX_PIP_PRT_TAGX(port), pip_prt_tagx.u64); } } cvmx_helper_ipd_and_packet_input_enable(); memset(cvm_oct_device, 0, sizeof(cvm_oct_device)); /* * Initialize the FAU used for counting packet buffers that * need to be freed. */ cvmx_fau_atomic_write32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0); /* Initialize the FAU used for counting tx SKBs that need to be freed */ cvmx_fau_atomic_write32(FAU_TOTAL_TX_TO_CLEAN, 0); if ((pow_send_group != -1)) { struct net_device *dev; pr_info("\tConfiguring device for POW only access\n"); dev = alloc_etherdev(sizeof(struct octeon_ethernet)); if (dev) { /* Initialize the device private structure. */ struct octeon_ethernet *priv = netdev_priv(dev); dev->netdev_ops = &cvm_oct_pow_netdev_ops; priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED; priv->port = CVMX_PIP_NUM_INPUT_PORTS; priv->queue = -1; strcpy(dev->name, "pow%d"); for (qos = 0; qos < 16; qos++) skb_queue_head_init(&priv->tx_free_list[qos]); if (register_netdev(dev) < 0) { pr_err("Failed to register ethernet device for POW\n"); free_netdev(dev); } else { cvm_oct_device[CVMX_PIP_NUM_INPUT_PORTS] = dev; pr_info("%s: POW send group %d, receive group %d\n", dev->name, pow_send_group, pow_receive_group); } } else { pr_err("Failed to allocate ethernet device for POW\n"); } } num_interfaces = cvmx_helper_get_number_of_interfaces(); for (interface = 0; interface < num_interfaces; interface++) { cvmx_helper_interface_mode_t imode = cvmx_helper_interface_get_mode(interface); int num_ports = cvmx_helper_ports_on_interface(interface); int port; for (port = cvmx_helper_get_ipd_port(interface, 0); port < cvmx_helper_get_ipd_port(interface, num_ports); port++) { struct octeon_ethernet *priv; struct net_device *dev = alloc_etherdev(sizeof(struct octeon_ethernet)); if (!dev) { pr_err("Failed to allocate ethernet device for port %d\n", port); continue; } /* Initialize the device private structure. */ priv = netdev_priv(dev); INIT_DELAYED_WORK(&priv->port_periodic_work, cvm_oct_periodic_worker); priv->imode = imode; priv->port = port; priv->queue = cvmx_pko_get_base_queue(priv->port); priv->fau = fau - cvmx_pko_get_num_queues(port) * 4; for (qos = 0; qos < 16; qos++) skb_queue_head_init(&priv->tx_free_list[qos]); for (qos = 0; qos < cvmx_pko_get_num_queues(port); qos++) cvmx_fau_atomic_write32(priv->fau + qos * 4, 0); switch (priv->imode) { /* These types don't support ports to IPD/PKO */ case CVMX_HELPER_INTERFACE_MODE_DISABLED: case CVMX_HELPER_INTERFACE_MODE_PCIE: case CVMX_HELPER_INTERFACE_MODE_PICMG: break; case CVMX_HELPER_INTERFACE_MODE_NPI: dev->netdev_ops = &cvm_oct_npi_netdev_ops; strcpy(dev->name, "npi%d"); break; case CVMX_HELPER_INTERFACE_MODE_XAUI: dev->netdev_ops = &cvm_oct_xaui_netdev_ops; strcpy(dev->name, "xaui%d"); break; case CVMX_HELPER_INTERFACE_MODE_LOOP: dev->netdev_ops = &cvm_oct_npi_netdev_ops; strcpy(dev->name, "loop%d"); break; case CVMX_HELPER_INTERFACE_MODE_SGMII: dev->netdev_ops = &cvm_oct_sgmii_netdev_ops; strcpy(dev->name, "eth%d"); break; case CVMX_HELPER_INTERFACE_MODE_SPI: dev->netdev_ops = &cvm_oct_spi_netdev_ops; strcpy(dev->name, "spi%d"); break; case CVMX_HELPER_INTERFACE_MODE_RGMII: case CVMX_HELPER_INTERFACE_MODE_GMII: dev->netdev_ops = &cvm_oct_rgmii_netdev_ops; strcpy(dev->name, "eth%d"); break; } if (!dev->netdev_ops) { free_netdev(dev); } else if (register_netdev(dev) < 0) { pr_err("Failed to register ethernet device " "for interface %d, port %d\n", interface, priv->port); free_netdev(dev); } else { cvm_oct_device[priv->port] = dev; fau -= cvmx_pko_get_num_queues(priv->port) * sizeof(uint32_t); queue_delayed_work(cvm_oct_poll_queue, &priv->port_periodic_work, HZ); } } } cvm_oct_tx_initialize(); cvm_oct_rx_initialize(); /* * 150 uS: about 10 1500-byte packtes at 1GE. */ cvm_oct_tx_poll_interval = 150 * (octeon_get_clock_rate() / 1000000); queue_delayed_work(cvm_oct_poll_queue, &cvm_oct_rx_refill_work, HZ); return 0; } static void __exit cvm_oct_cleanup_module(void) { int port; /* Disable POW interrupt */ cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), 0); cvmx_ipd_disable(); /* Free the interrupt handler */ free_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group, cvm_oct_device); atomic_inc_return(&cvm_oct_poll_queue_stopping); cancel_delayed_work_sync(&cvm_oct_rx_refill_work); cvm_oct_rx_shutdown(); cvm_oct_tx_shutdown(); cvmx_pko_disable(); /* Free the ethernet devices */ for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) { if (cvm_oct_device[port]) { struct net_device *dev = cvm_oct_device[port]; struct octeon_ethernet *priv = netdev_priv(dev); cancel_delayed_work_sync(&priv->port_periodic_work); cvm_oct_tx_shutdown_dev(dev); unregister_netdev(dev); free_netdev(dev); cvm_oct_device[port] = NULL; } } destroy_workqueue(cvm_oct_poll_queue); cvmx_pko_shutdown(); cvmx_ipd_free_ptr(); /* Free the HW pools */ cvm_oct_mem_empty_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE, num_packet_buffers); cvm_oct_mem_empty_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE, num_packet_buffers); if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL) cvm_oct_mem_empty_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL, CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 128); } MODULE_LICENSE("GPL"); MODULE_AUTHOR("Cavium Networks <support@caviumnetworks.com>"); MODULE_DESCRIPTION("Cavium Networks Octeon ethernet driver."); module_init(cvm_oct_init_module); module_exit(cvm_oct_cleanup_module);
gpl-2.0
Daniil2017/HTC-desire-A8181-kernel
arch/powerpc/platforms/82xx/pq2.c
4097
2000
/* * Common PowerQUICC II code. * * Author: Scott Wood <scottwood@freescale.com> * Copyright (c) 2007 Freescale Semiconductor * * Based on code by Vitaly Bordug <vbordug@ru.mvista.com> * pq2_restart fix by Wade Farnsworth <wfarnsworth@mvista.com> * Copyright (c) 2006 MontaVista Software, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <asm/cpm2.h> #include <asm/io.h> #include <asm/pci-bridge.h> #include <asm/system.h> #include <platforms/82xx/pq2.h> #define RMR_CSRE 0x00000001 void pq2_restart(char *cmd) { local_irq_disable(); setbits32(&cpm2_immr->im_clkrst.car_rmr, RMR_CSRE); /* Clear the ME,EE,IR & DR bits in MSR to cause checkstop */ mtmsr(mfmsr() & ~(MSR_ME | MSR_EE | MSR_IR | MSR_DR)); in_8(&cpm2_immr->im_clkrst.res[0]); panic("Restart failed\n"); } #ifdef CONFIG_PCI static int pq2_pci_exclude_device(struct pci_controller *hose, u_char bus, u8 devfn) { if (bus == 0 && PCI_SLOT(devfn) == 0) return PCIBIOS_DEVICE_NOT_FOUND; else return PCIBIOS_SUCCESSFUL; } static void __init pq2_pci_add_bridge(struct device_node *np) { struct pci_controller *hose; struct resource r; if (of_address_to_resource(np, 0, &r) || r.end - r.start < 0x10b) goto err; ppc_pci_add_flags(PPC_PCI_REASSIGN_ALL_BUS); hose = pcibios_alloc_controller(np); if (!hose) return; hose->dn = np; setup_indirect_pci(hose, r.start + 0x100, r.start + 0x104, 0); pci_process_bridge_OF_ranges(hose, np, 1); return; err: printk(KERN_ERR "No valid PCI reg property in device tree\n"); } void __init pq2_init_pci(void) { struct device_node *np = NULL; ppc_md.pci_exclude_device = pq2_pci_exclude_device; while ((np = of_find_compatible_node(np, NULL, "fsl,pq2-pci"))) pq2_pci_add_bridge(np); } #endif
gpl-2.0
schqiushui/android_kk_kernel_htc_dlxj
drivers/hid/hid-roccat-isku.c
4865
12052
/* * Roccat Isku driver for Linux * * Copyright (c) 2011 Stefan Achatz <erazor_de@users.sourceforge.net> */ /* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. */ /* * Roccat Isku is a gamer keyboard with macro keys that can be configured in * 5 profiles. */ #include <linux/device.h> #include <linux/input.h> #include <linux/hid.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/hid-roccat.h> #include "hid-ids.h" #include "hid-roccat-common.h" #include "hid-roccat-isku.h" static struct class *isku_class; static void isku_profile_activated(struct isku_device *isku, uint new_profile) { isku->actual_profile = new_profile; } static int isku_receive(struct usb_device *usb_dev, uint command, void *buf, uint size) { return roccat_common_receive(usb_dev, command, buf, size); } static int isku_receive_control_status(struct usb_device *usb_dev) { int retval; struct isku_control control; do { msleep(50); retval = isku_receive(usb_dev, ISKU_COMMAND_CONTROL, &control, sizeof(struct isku_control)); if (retval) return retval; switch (control.value) { case ISKU_CONTROL_VALUE_STATUS_OK: return 0; case ISKU_CONTROL_VALUE_STATUS_WAIT: continue; case ISKU_CONTROL_VALUE_STATUS_INVALID: /* seems to be critical - replug necessary */ case ISKU_CONTROL_VALUE_STATUS_OVERLOAD: return -EINVAL; default: hid_err(usb_dev, "isku_receive_control_status: " "unknown response value 0x%x\n", control.value); return -EINVAL; } } while (1); } static int isku_send(struct usb_device *usb_dev, uint command, void const *buf, uint size) { int retval; retval = roccat_common_send(usb_dev, command, buf, size); if (retval) return retval; return isku_receive_control_status(usb_dev); } static int isku_get_actual_profile(struct usb_device *usb_dev) { struct isku_actual_profile buf; int retval; retval = isku_receive(usb_dev, ISKU_COMMAND_ACTUAL_PROFILE, &buf, sizeof(struct isku_actual_profile)); return retval ? retval : buf.actual_profile; } static int isku_set_actual_profile(struct usb_device *usb_dev, int new_profile) { struct isku_actual_profile buf; buf.command = ISKU_COMMAND_ACTUAL_PROFILE; buf.size = sizeof(struct isku_actual_profile); buf.actual_profile = new_profile; return isku_send(usb_dev, ISKU_COMMAND_ACTUAL_PROFILE, &buf, sizeof(struct isku_actual_profile)); } static ssize_t isku_sysfs_show_actual_profile(struct device *dev, struct device_attribute *attr, char *buf) { struct isku_device *isku = hid_get_drvdata(dev_get_drvdata(dev->parent->parent)); return snprintf(buf, PAGE_SIZE, "%d\n", isku->actual_profile); } static ssize_t isku_sysfs_set_actual_profile(struct device *dev, struct device_attribute *attr, char const *buf, size_t size) { struct isku_device *isku; struct usb_device *usb_dev; unsigned long profile; int retval; struct isku_roccat_report roccat_report; dev = dev->parent->parent; isku = hid_get_drvdata(dev_get_drvdata(dev)); usb_dev = interface_to_usbdev(to_usb_interface(dev)); retval = strict_strtoul(buf, 10, &profile); if (retval) return retval; if (profile > 4) return -EINVAL; mutex_lock(&isku->isku_lock); retval = isku_set_actual_profile(usb_dev, profile); if (retval) { mutex_unlock(&isku->isku_lock); return retval; } isku_profile_activated(isku, profile); roccat_report.event = ISKU_REPORT_BUTTON_EVENT_PROFILE; roccat_report.data1 = profile + 1; roccat_report.data2 = 0; roccat_report.profile = profile + 1; roccat_report_event(isku->chrdev_minor, (uint8_t const *)&roccat_report); mutex_unlock(&isku->isku_lock); return size; } static struct device_attribute isku_attributes[] = { __ATTR(actual_profile, 0660, isku_sysfs_show_actual_profile, isku_sysfs_set_actual_profile), __ATTR_NULL }; static ssize_t isku_sysfs_read(struct file *fp, struct kobject *kobj, char *buf, loff_t off, size_t count, size_t real_size, uint command) { struct device *dev = container_of(kobj, struct device, kobj)->parent->parent; struct isku_device *isku = hid_get_drvdata(dev_get_drvdata(dev)); struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev)); int retval; if (off >= real_size) return 0; if (off != 0 || count != real_size) return -EINVAL; mutex_lock(&isku->isku_lock); retval = isku_receive(usb_dev, command, buf, real_size); mutex_unlock(&isku->isku_lock); return retval ? retval : real_size; } static ssize_t isku_sysfs_write(struct file *fp, struct kobject *kobj, void const *buf, loff_t off, size_t count, size_t real_size, uint command) { struct device *dev = container_of(kobj, struct device, kobj)->parent->parent; struct isku_device *isku = hid_get_drvdata(dev_get_drvdata(dev)); struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev)); int retval; if (off != 0 || count != real_size) return -EINVAL; mutex_lock(&isku->isku_lock); retval = isku_send(usb_dev, command, (void *)buf, real_size); mutex_unlock(&isku->isku_lock); return retval ? retval : real_size; } #define ISKU_SYSFS_W(thingy, THINGY) \ static ssize_t isku_sysfs_write_ ## thingy(struct file *fp, struct kobject *kobj, \ struct bin_attribute *attr, char *buf, \ loff_t off, size_t count) \ { \ return isku_sysfs_write(fp, kobj, buf, off, count, \ sizeof(struct isku_ ## thingy), ISKU_COMMAND_ ## THINGY); \ } #define ISKU_SYSFS_R(thingy, THINGY) \ static ssize_t isku_sysfs_read_ ## thingy(struct file *fp, struct kobject *kobj, \ struct bin_attribute *attr, char *buf, \ loff_t off, size_t count) \ { \ return isku_sysfs_read(fp, kobj, buf, off, count, \ sizeof(struct isku_ ## thingy), ISKU_COMMAND_ ## THINGY); \ } #define ISKU_SYSFS_RW(thingy, THINGY) \ ISKU_SYSFS_R(thingy, THINGY) \ ISKU_SYSFS_W(thingy, THINGY) #define ISKU_BIN_ATTR_RW(thingy) \ { \ .attr = { .name = #thingy, .mode = 0660 }, \ .size = sizeof(struct isku_ ## thingy), \ .read = isku_sysfs_read_ ## thingy, \ .write = isku_sysfs_write_ ## thingy \ } #define ISKU_BIN_ATTR_R(thingy) \ { \ .attr = { .name = #thingy, .mode = 0440 }, \ .size = sizeof(struct isku_ ## thingy), \ .read = isku_sysfs_read_ ## thingy, \ } #define ISKU_BIN_ATTR_W(thingy) \ { \ .attr = { .name = #thingy, .mode = 0220 }, \ .size = sizeof(struct isku_ ## thingy), \ .write = isku_sysfs_write_ ## thingy \ } ISKU_SYSFS_RW(macro, MACRO) ISKU_SYSFS_RW(keys_function, KEYS_FUNCTION) ISKU_SYSFS_RW(keys_easyzone, KEYS_EASYZONE) ISKU_SYSFS_RW(keys_media, KEYS_MEDIA) ISKU_SYSFS_RW(keys_thumbster, KEYS_THUMBSTER) ISKU_SYSFS_RW(keys_macro, KEYS_MACRO) ISKU_SYSFS_RW(keys_capslock, KEYS_CAPSLOCK) ISKU_SYSFS_RW(light, LIGHT) ISKU_SYSFS_RW(key_mask, KEY_MASK) ISKU_SYSFS_RW(last_set, LAST_SET) ISKU_SYSFS_W(talk, TALK) ISKU_SYSFS_R(info, INFO) ISKU_SYSFS_W(control, CONTROL) static struct bin_attribute isku_bin_attributes[] = { ISKU_BIN_ATTR_RW(macro), ISKU_BIN_ATTR_RW(keys_function), ISKU_BIN_ATTR_RW(keys_easyzone), ISKU_BIN_ATTR_RW(keys_media), ISKU_BIN_ATTR_RW(keys_thumbster), ISKU_BIN_ATTR_RW(keys_macro), ISKU_BIN_ATTR_RW(keys_capslock), ISKU_BIN_ATTR_RW(light), ISKU_BIN_ATTR_RW(key_mask), ISKU_BIN_ATTR_RW(last_set), ISKU_BIN_ATTR_W(talk), ISKU_BIN_ATTR_R(info), ISKU_BIN_ATTR_W(control), __ATTR_NULL }; static int isku_init_isku_device_struct(struct usb_device *usb_dev, struct isku_device *isku) { int retval; mutex_init(&isku->isku_lock); retval = isku_get_actual_profile(usb_dev); if (retval < 0) return retval; isku_profile_activated(isku, retval); return 0; } static int isku_init_specials(struct hid_device *hdev) { struct usb_interface *intf = to_usb_interface(hdev->dev.parent); struct usb_device *usb_dev = interface_to_usbdev(intf); struct isku_device *isku; int retval; if (intf->cur_altsetting->desc.bInterfaceProtocol != ISKU_USB_INTERFACE_PROTOCOL) { hid_set_drvdata(hdev, NULL); return 0; } isku = kzalloc(sizeof(*isku), GFP_KERNEL); if (!isku) { hid_err(hdev, "can't alloc device descriptor\n"); return -ENOMEM; } hid_set_drvdata(hdev, isku); retval = isku_init_isku_device_struct(usb_dev, isku); if (retval) { hid_err(hdev, "couldn't init struct isku_device\n"); goto exit_free; } retval = roccat_connect(isku_class, hdev, sizeof(struct isku_roccat_report)); if (retval < 0) { hid_err(hdev, "couldn't init char dev\n"); } else { isku->chrdev_minor = retval; isku->roccat_claimed = 1; } return 0; exit_free: kfree(isku); return retval; } static void isku_remove_specials(struct hid_device *hdev) { struct usb_interface *intf = to_usb_interface(hdev->dev.parent); struct isku_device *isku; if (intf->cur_altsetting->desc.bInterfaceProtocol != ISKU_USB_INTERFACE_PROTOCOL) return; isku = hid_get_drvdata(hdev); if (isku->roccat_claimed) roccat_disconnect(isku->chrdev_minor); kfree(isku); } static int isku_probe(struct hid_device *hdev, const struct hid_device_id *id) { int retval; retval = hid_parse(hdev); if (retval) { hid_err(hdev, "parse failed\n"); goto exit; } retval = hid_hw_start(hdev, HID_CONNECT_DEFAULT); if (retval) { hid_err(hdev, "hw start failed\n"); goto exit; } retval = isku_init_specials(hdev); if (retval) { hid_err(hdev, "couldn't install keyboard\n"); goto exit_stop; } return 0; exit_stop: hid_hw_stop(hdev); exit: return retval; } static void isku_remove(struct hid_device *hdev) { isku_remove_specials(hdev); hid_hw_stop(hdev); } static void isku_keep_values_up_to_date(struct isku_device *isku, u8 const *data) { struct isku_report_button const *button_report; switch (data[0]) { case ISKU_REPORT_NUMBER_BUTTON: button_report = (struct isku_report_button const *)data; switch (button_report->event) { case ISKU_REPORT_BUTTON_EVENT_PROFILE: isku_profile_activated(isku, button_report->data1 - 1); break; } break; } } static void isku_report_to_chrdev(struct isku_device const *isku, u8 const *data) { struct isku_roccat_report roccat_report; struct isku_report_button const *button_report; if (data[0] != ISKU_REPORT_NUMBER_BUTTON) return; button_report = (struct isku_report_button const *)data; roccat_report.event = button_report->event; roccat_report.data1 = button_report->data1; roccat_report.data2 = button_report->data2; roccat_report.profile = isku->actual_profile + 1; roccat_report_event(isku->chrdev_minor, (uint8_t const *)&roccat_report); } static int isku_raw_event(struct hid_device *hdev, struct hid_report *report, u8 *data, int size) { struct usb_interface *intf = to_usb_interface(hdev->dev.parent); struct isku_device *isku = hid_get_drvdata(hdev); if (intf->cur_altsetting->desc.bInterfaceProtocol != ISKU_USB_INTERFACE_PROTOCOL) return 0; if (isku == NULL) return 0; isku_keep_values_up_to_date(isku, data); if (isku->roccat_claimed) isku_report_to_chrdev(isku, data); return 0; } static const struct hid_device_id isku_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ISKU) }, { } }; MODULE_DEVICE_TABLE(hid, isku_devices); static struct hid_driver isku_driver = { .name = "isku", .id_table = isku_devices, .probe = isku_probe, .remove = isku_remove, .raw_event = isku_raw_event }; static int __init isku_init(void) { int retval; isku_class = class_create(THIS_MODULE, "isku"); if (IS_ERR(isku_class)) return PTR_ERR(isku_class); isku_class->dev_attrs = isku_attributes; isku_class->dev_bin_attrs = isku_bin_attributes; retval = hid_register_driver(&isku_driver); if (retval) class_destroy(isku_class); return retval; } static void __exit isku_exit(void) { hid_unregister_driver(&isku_driver); class_destroy(isku_class); } module_init(isku_init); module_exit(isku_exit); MODULE_AUTHOR("Stefan Achatz"); MODULE_DESCRIPTION("USB Roccat Isku driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
Smartandroidtech/platform_kernel_lge_hammerhead
arch/avr32/boards/favr-32/setup.c
6913
8046
/* * Favr-32 board-specific setup code. * * Copyright (C) 2008 Atmel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/clk.h> #include <linux/etherdevice.h> #include <linux/bootmem.h> #include <linux/fb.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/types.h> #include <linux/linkage.h> #include <linux/gpio.h> #include <linux/leds.h> #include <linux/atmel-mci.h> #include <linux/atmel-pwm-bl.h> #include <linux/spi/spi.h> #include <linux/spi/ads7846.h> #include <sound/atmel-abdac.h> #include <video/atmel_lcdc.h> #include <asm/setup.h> #include <mach/at32ap700x.h> #include <mach/init.h> #include <mach/board.h> #include <mach/portmux.h> /* Oscillator frequencies. These are board-specific */ unsigned long at32_board_osc_rates[3] = { [0] = 32768, /* 32.768 kHz on RTC osc */ [1] = 20000000, /* 20 MHz on osc0 */ [2] = 12000000, /* 12 MHz on osc1 */ }; /* Initialized by bootloader-specific startup code. */ struct tag *bootloader_tags __initdata; static struct atmel_abdac_pdata __initdata abdac0_data = { }; struct eth_addr { u8 addr[6]; }; static struct eth_addr __initdata hw_addr[1]; static struct macb_platform_data __initdata eth_data[1] = { { .phy_mask = ~(1U << 1), }, }; static int ads7843_get_pendown_state(void) { return !gpio_get_value(GPIO_PIN_PB(3)); } static struct ads7846_platform_data ads7843_data = { .model = 7843, .get_pendown_state = ads7843_get_pendown_state, .pressure_max = 255, /* * Values below are for debounce filtering, these can be experimented * with further. */ .debounce_max = 20, .debounce_rep = 4, .debounce_tol = 5, .keep_vref_on = true, .settle_delay_usecs = 500, .penirq_recheck_delay_usecs = 100, }; static struct spi_board_info __initdata spi1_board_info[] = { { /* ADS7843 touch controller */ .modalias = "ads7846", .max_speed_hz = 2000000, .chip_select = 0, .bus_num = 1, .platform_data = &ads7843_data, }, }; static struct mci_platform_data __initdata mci0_data = { .slot[0] = { .bus_width = 4, .detect_pin = -ENODEV, .wp_pin = -ENODEV, }, }; static struct fb_videomode __initdata lb104v03_modes[] = { { .name = "640x480 @ 50", .refresh = 50, .xres = 640, .yres = 480, .pixclock = KHZ2PICOS(25100), .left_margin = 90, .right_margin = 70, .upper_margin = 30, .lower_margin = 15, .hsync_len = 12, .vsync_len = 2, .sync = 0, .vmode = FB_VMODE_NONINTERLACED, }, }; static struct fb_monspecs __initdata favr32_default_monspecs = { .manufacturer = "LG", .monitor = "LB104V03", .modedb = lb104v03_modes, .modedb_len = ARRAY_SIZE(lb104v03_modes), .hfmin = 27273, .hfmax = 31111, .vfmin = 45, .vfmax = 60, .dclkmax = 28000000, }; struct atmel_lcdfb_info __initdata favr32_lcdc_data = { .default_bpp = 16, .default_dmacon = ATMEL_LCDC_DMAEN | ATMEL_LCDC_DMA2DEN, .default_lcdcon2 = (ATMEL_LCDC_DISTYPE_TFT | ATMEL_LCDC_CLKMOD_ALWAYSACTIVE | ATMEL_LCDC_MEMOR_BIG), .default_monspecs = &favr32_default_monspecs, .guard_time = 2, }; static struct gpio_led favr32_leds[] = { { .name = "green", .gpio = GPIO_PIN_PE(19), .default_trigger = "heartbeat", .active_low = 1, }, { .name = "red", .gpio = GPIO_PIN_PE(20), .active_low = 1, }, }; static struct gpio_led_platform_data favr32_led_data = { .num_leds = ARRAY_SIZE(favr32_leds), .leds = favr32_leds, }; static struct platform_device favr32_led_dev = { .name = "leds-gpio", .id = 0, .dev = { .platform_data = &favr32_led_data, }, }; /* * The next two functions should go away as the boot loader is * supposed to initialize the macb address registers with a valid * ethernet address. But we need to keep it around for a while until * we can be reasonably sure the boot loader does this. * * The phy_id is ignored as the driver will probe for it. */ static int __init parse_tag_ethernet(struct tag *tag) { int i; i = tag->u.ethernet.mac_index; if (i < ARRAY_SIZE(hw_addr)) memcpy(hw_addr[i].addr, tag->u.ethernet.hw_address, sizeof(hw_addr[i].addr)); return 0; } __tagtable(ATAG_ETHERNET, parse_tag_ethernet); static void __init set_hw_addr(struct platform_device *pdev) { struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); const u8 *addr; void __iomem *regs; struct clk *pclk; if (!res) return; if (pdev->id >= ARRAY_SIZE(hw_addr)) return; addr = hw_addr[pdev->id].addr; if (!is_valid_ether_addr(addr)) return; /* * Since this is board-specific code, we'll cheat and use the * physical address directly as we happen to know that it's * the same as the virtual address. */ regs = (void __iomem __force *)res->start; pclk = clk_get(&pdev->dev, "pclk"); if (IS_ERR(pclk)) return; clk_enable(pclk); __raw_writel((addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0], regs + 0x98); __raw_writel((addr[5] << 8) | addr[4], regs + 0x9c); clk_disable(pclk); clk_put(pclk); } void __init favr32_setup_leds(void) { unsigned i; for (i = 0; i < ARRAY_SIZE(favr32_leds); i++) at32_select_gpio(favr32_leds[i].gpio, AT32_GPIOF_OUTPUT); platform_device_register(&favr32_led_dev); } static struct atmel_pwm_bl_platform_data atmel_pwm_bl_pdata = { .pwm_channel = 2, .pwm_frequency = 200000, .pwm_compare_max = 345, .pwm_duty_max = 345, .pwm_duty_min = 90, .pwm_active_low = 1, .gpio_on = GPIO_PIN_PA(28), .on_active_low = 0, }; static struct platform_device atmel_pwm_bl_dev = { .name = "atmel-pwm-bl", .id = 0, .dev = { .platform_data = &atmel_pwm_bl_pdata, }, }; static void __init favr32_setup_atmel_pwm_bl(void) { platform_device_register(&atmel_pwm_bl_dev); at32_select_gpio(atmel_pwm_bl_pdata.gpio_on, 0); } void __init setup_board(void) { at32_map_usart(3, 0, 0); /* USART 3 => /dev/ttyS0 */ at32_setup_serial_console(0); } static int __init set_abdac_rate(struct platform_device *pdev) { int retval; struct clk *osc1; struct clk *pll1; struct clk *abdac; if (pdev == NULL) return -ENXIO; osc1 = clk_get(NULL, "osc1"); if (IS_ERR(osc1)) { retval = PTR_ERR(osc1); goto out; } pll1 = clk_get(NULL, "pll1"); if (IS_ERR(pll1)) { retval = PTR_ERR(pll1); goto out_osc1; } abdac = clk_get(&pdev->dev, "sample_clk"); if (IS_ERR(abdac)) { retval = PTR_ERR(abdac); goto out_pll1; } retval = clk_set_parent(pll1, osc1); if (retval != 0) goto out_abdac; /* * Rate is 32000 to 50000 and ABDAC oversamples 256x. Multiply, in * power of 2, to a value above 80 MHz. Power of 2 so it is possible * for the generic clock to divide it down again and 80 MHz is the * lowest frequency for the PLL. */ retval = clk_round_rate(pll1, CONFIG_BOARD_FAVR32_ABDAC_RATE * 256 * 16); if (retval < 0) goto out_abdac; retval = clk_set_rate(pll1, retval); if (retval != 0) goto out_abdac; retval = clk_set_parent(abdac, pll1); if (retval != 0) goto out_abdac; out_abdac: clk_put(abdac); out_pll1: clk_put(pll1); out_osc1: clk_put(osc1); out: return retval; } static int __init favr32_init(void) { /* * Favr-32 uses 32-bit SDRAM interface. Reserve the SDRAM-specific * pins so that nobody messes with them. */ at32_reserve_pin(GPIO_PIOE_BASE, ATMEL_EBI_PE_DATA_ALL); at32_select_gpio(GPIO_PIN_PB(3), 0); /* IRQ from ADS7843 */ at32_add_device_usart(0); set_hw_addr(at32_add_device_eth(0, &eth_data[0])); spi1_board_info[0].irq = gpio_to_irq(GPIO_PIN_PB(3)); set_abdac_rate(at32_add_device_abdac(0, &abdac0_data)); at32_add_device_pwm(1 << atmel_pwm_bl_pdata.pwm_channel); at32_add_device_spi(1, spi1_board_info, ARRAY_SIZE(spi1_board_info)); at32_add_device_mci(0, &mci0_data); at32_add_device_usba(0, NULL); at32_add_device_lcdc(0, &favr32_lcdc_data, fbmem_start, fbmem_size, 0); favr32_setup_leds(); favr32_setup_atmel_pwm_bl(); return 0; } postcore_initcall(favr32_init);
gpl-2.0
whodunnit/AK-OnePlusOne-CM
arch/x86/platform/geode/net5501.c
7169
3435
/* * System Specific setup for Soekris net5501 * At the moment this means setup of GPIO control of LEDs and buttons * on net5501 boards. * * * Copyright (C) 2008-2009 Tower Technologies * Written by Alessandro Zummo <a.zummo@towertech.it> * * Copyright (C) 2008 Constantin Baranov <const@mimas.ru> * Copyright (C) 2011 Ed Wildgoose <kernel@wildgooses.com> * and Philip Prindeville <philipp@redfish-solutions.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/io.h> #include <linux/string.h> #include <linux/module.h> #include <linux/leds.h> #include <linux/platform_device.h> #include <linux/gpio.h> #include <linux/input.h> #include <linux/gpio_keys.h> #include <asm/geode.h> #define BIOS_REGION_BASE 0xffff0000 #define BIOS_REGION_SIZE 0x00010000 static struct gpio_keys_button net5501_gpio_buttons[] = { { .code = KEY_RESTART, .gpio = 24, .active_low = 1, .desc = "Reset button", .type = EV_KEY, .wakeup = 0, .debounce_interval = 100, .can_disable = 0, } }; static struct gpio_keys_platform_data net5501_buttons_data = { .buttons = net5501_gpio_buttons, .nbuttons = ARRAY_SIZE(net5501_gpio_buttons), .poll_interval = 20, }; static struct platform_device net5501_buttons_dev = { .name = "gpio-keys-polled", .id = 1, .dev = { .platform_data = &net5501_buttons_data, } }; static struct gpio_led net5501_leds[] = { { .name = "net5501:1", .gpio = 6, .default_trigger = "default-on", .active_low = 0, }, }; static struct gpio_led_platform_data net5501_leds_data = { .num_leds = ARRAY_SIZE(net5501_leds), .leds = net5501_leds, }; static struct platform_device net5501_leds_dev = { .name = "leds-gpio", .id = -1, .dev.platform_data = &net5501_leds_data, }; static struct __initdata platform_device *net5501_devs[] = { &net5501_buttons_dev, &net5501_leds_dev, }; static void __init register_net5501(void) { /* Setup LED control through leds-gpio driver */ platform_add_devices(net5501_devs, ARRAY_SIZE(net5501_devs)); } struct net5501_board { u16 offset; u16 len; char *sig; }; static struct net5501_board __initdata boards[] = { { 0xb7b, 7, "net5501" }, /* net5501 v1.33/1.33c */ { 0xb1f, 7, "net5501" }, /* net5501 v1.32i */ }; static bool __init net5501_present(void) { int i; unsigned char *rombase, *bios; bool found = false; rombase = ioremap(BIOS_REGION_BASE, BIOS_REGION_SIZE - 1); if (!rombase) { printk(KERN_ERR "%s: failed to get rombase\n", KBUILD_MODNAME); return found; } bios = rombase + 0x20; /* null terminated */ if (memcmp(bios, "comBIOS", 7)) goto unmap; for (i = 0; i < ARRAY_SIZE(boards); i++) { unsigned char *model = rombase + boards[i].offset; if (!memcmp(model, boards[i].sig, boards[i].len)) { printk(KERN_INFO "%s: system is recognized as \"%s\"\n", KBUILD_MODNAME, model); found = true; break; } } unmap: iounmap(rombase); return found; } static int __init net5501_init(void) { if (!is_geode()) return 0; if (!net5501_present()) return 0; register_net5501(); return 0; } module_init(net5501_init); MODULE_AUTHOR("Philip Prindeville <philipp@redfish-solutions.com>"); MODULE_DESCRIPTION("Soekris net5501 System Setup"); MODULE_LICENSE("GPL");
gpl-2.0
cmenard/android_kernel_samsung_espresso10
arch/sparc/kernel/us2e_cpufreq.c
7425
9847
/* us2e_cpufreq.c: UltraSPARC-IIe cpu frequency support * * Copyright (C) 2003 David S. Miller (davem@redhat.com) * * Many thanks to Dominik Brodowski for fixing up the cpufreq * infrastructure in order to make this driver easier to implement. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/sched.h> #include <linux/smp.h> #include <linux/cpufreq.h> #include <linux/threads.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/init.h> #include <asm/asi.h> #include <asm/timer.h> static struct cpufreq_driver *cpufreq_us2e_driver; struct us2e_freq_percpu_info { struct cpufreq_frequency_table table[6]; }; /* Indexed by cpu number. */ static struct us2e_freq_percpu_info *us2e_freq_table; #define HBIRD_MEM_CNTL0_ADDR 0x1fe0000f010UL #define HBIRD_ESTAR_MODE_ADDR 0x1fe0000f080UL /* UltraSPARC-IIe has five dividers: 1, 2, 4, 6, and 8. These are controlled * in the ESTAR mode control register. */ #define ESTAR_MODE_DIV_1 0x0000000000000000UL #define ESTAR_MODE_DIV_2 0x0000000000000001UL #define ESTAR_MODE_DIV_4 0x0000000000000003UL #define ESTAR_MODE_DIV_6 0x0000000000000002UL #define ESTAR_MODE_DIV_8 0x0000000000000004UL #define ESTAR_MODE_DIV_MASK 0x0000000000000007UL #define MCTRL0_SREFRESH_ENAB 0x0000000000010000UL #define MCTRL0_REFR_COUNT_MASK 0x0000000000007f00UL #define MCTRL0_REFR_COUNT_SHIFT 8 #define MCTRL0_REFR_INTERVAL 7800 #define MCTRL0_REFR_CLKS_P_CNT 64 static unsigned long read_hbreg(unsigned long addr) { unsigned long ret; __asm__ __volatile__("ldxa [%1] %2, %0" : "=&r" (ret) : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)); return ret; } static void write_hbreg(unsigned long addr, unsigned long val) { __asm__ __volatile__("stxa %0, [%1] %2\n\t" "membar #Sync" : /* no outputs */ : "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E) : "memory"); if (addr == HBIRD_ESTAR_MODE_ADDR) { /* Need to wait 16 clock cycles for the PLL to lock. */ udelay(1); } } static void self_refresh_ctl(int enable) { unsigned long mctrl = read_hbreg(HBIRD_MEM_CNTL0_ADDR); if (enable) mctrl |= MCTRL0_SREFRESH_ENAB; else mctrl &= ~MCTRL0_SREFRESH_ENAB; write_hbreg(HBIRD_MEM_CNTL0_ADDR, mctrl); (void) read_hbreg(HBIRD_MEM_CNTL0_ADDR); } static void frob_mem_refresh(int cpu_slowing_down, unsigned long clock_tick, unsigned long old_divisor, unsigned long divisor) { unsigned long old_refr_count, refr_count, mctrl; refr_count = (clock_tick * MCTRL0_REFR_INTERVAL); refr_count /= (MCTRL0_REFR_CLKS_P_CNT * divisor * 1000000000UL); mctrl = read_hbreg(HBIRD_MEM_CNTL0_ADDR); old_refr_count = (mctrl & MCTRL0_REFR_COUNT_MASK) >> MCTRL0_REFR_COUNT_SHIFT; mctrl &= ~MCTRL0_REFR_COUNT_MASK; mctrl |= refr_count << MCTRL0_REFR_COUNT_SHIFT; write_hbreg(HBIRD_MEM_CNTL0_ADDR, mctrl); mctrl = read_hbreg(HBIRD_MEM_CNTL0_ADDR); if (cpu_slowing_down && !(mctrl & MCTRL0_SREFRESH_ENAB)) { unsigned long usecs; /* We have to wait for both refresh counts (old * and new) to go to zero. */ usecs = (MCTRL0_REFR_CLKS_P_CNT * (refr_count + old_refr_count) * 1000000UL * old_divisor) / clock_tick; udelay(usecs + 1UL); } } static void us2e_transition(unsigned long estar, unsigned long new_bits, unsigned long clock_tick, unsigned long old_divisor, unsigned long divisor) { unsigned long flags; local_irq_save(flags); estar &= ~ESTAR_MODE_DIV_MASK; /* This is based upon the state transition diagram in the IIe manual. */ if (old_divisor == 2 && divisor == 1) { self_refresh_ctl(0); write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits); frob_mem_refresh(0, clock_tick, old_divisor, divisor); } else if (old_divisor == 1 && divisor == 2) { frob_mem_refresh(1, clock_tick, old_divisor, divisor); write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits); self_refresh_ctl(1); } else if (old_divisor == 1 && divisor > 2) { us2e_transition(estar, ESTAR_MODE_DIV_2, clock_tick, 1, 2); us2e_transition(estar, new_bits, clock_tick, 2, divisor); } else if (old_divisor > 2 && divisor == 1) { us2e_transition(estar, ESTAR_MODE_DIV_2, clock_tick, old_divisor, 2); us2e_transition(estar, new_bits, clock_tick, 2, divisor); } else if (old_divisor < divisor) { frob_mem_refresh(0, clock_tick, old_divisor, divisor); write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits); } else if (old_divisor > divisor) { write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits); frob_mem_refresh(1, clock_tick, old_divisor, divisor); } else { BUG(); } local_irq_restore(flags); } static unsigned long index_to_estar_mode(unsigned int index) { switch (index) { case 0: return ESTAR_MODE_DIV_1; case 1: return ESTAR_MODE_DIV_2; case 2: return ESTAR_MODE_DIV_4; case 3: return ESTAR_MODE_DIV_6; case 4: return ESTAR_MODE_DIV_8; default: BUG(); } } static unsigned long index_to_divisor(unsigned int index) { switch (index) { case 0: return 1; case 1: return 2; case 2: return 4; case 3: return 6; case 4: return 8; default: BUG(); } } static unsigned long estar_to_divisor(unsigned long estar) { unsigned long ret; switch (estar & ESTAR_MODE_DIV_MASK) { case ESTAR_MODE_DIV_1: ret = 1; break; case ESTAR_MODE_DIV_2: ret = 2; break; case ESTAR_MODE_DIV_4: ret = 4; break; case ESTAR_MODE_DIV_6: ret = 6; break; case ESTAR_MODE_DIV_8: ret = 8; break; default: BUG(); } return ret; } static unsigned int us2e_freq_get(unsigned int cpu) { cpumask_t cpus_allowed; unsigned long clock_tick, estar; if (!cpu_online(cpu)) return 0; cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current)); set_cpus_allowed_ptr(current, cpumask_of(cpu)); clock_tick = sparc64_get_clock_tick(cpu) / 1000; estar = read_hbreg(HBIRD_ESTAR_MODE_ADDR); set_cpus_allowed_ptr(current, &cpus_allowed); return clock_tick / estar_to_divisor(estar); } static void us2e_set_cpu_divider_index(unsigned int cpu, unsigned int index) { unsigned long new_bits, new_freq; unsigned long clock_tick, divisor, old_divisor, estar; cpumask_t cpus_allowed; struct cpufreq_freqs freqs; if (!cpu_online(cpu)) return; cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current)); set_cpus_allowed_ptr(current, cpumask_of(cpu)); new_freq = clock_tick = sparc64_get_clock_tick(cpu) / 1000; new_bits = index_to_estar_mode(index); divisor = index_to_divisor(index); new_freq /= divisor; estar = read_hbreg(HBIRD_ESTAR_MODE_ADDR); old_divisor = estar_to_divisor(estar); freqs.old = clock_tick / old_divisor; freqs.new = new_freq; freqs.cpu = cpu; cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); if (old_divisor != divisor) us2e_transition(estar, new_bits, clock_tick * 1000, old_divisor, divisor); cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); set_cpus_allowed_ptr(current, &cpus_allowed); } static int us2e_freq_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation) { unsigned int new_index = 0; if (cpufreq_frequency_table_target(policy, &us2e_freq_table[policy->cpu].table[0], target_freq, relation, &new_index)) return -EINVAL; us2e_set_cpu_divider_index(policy->cpu, new_index); return 0; } static int us2e_freq_verify(struct cpufreq_policy *policy) { return cpufreq_frequency_table_verify(policy, &us2e_freq_table[policy->cpu].table[0]); } static int __init us2e_freq_cpu_init(struct cpufreq_policy *policy) { unsigned int cpu = policy->cpu; unsigned long clock_tick = sparc64_get_clock_tick(cpu) / 1000; struct cpufreq_frequency_table *table = &us2e_freq_table[cpu].table[0]; table[0].index = 0; table[0].frequency = clock_tick / 1; table[1].index = 1; table[1].frequency = clock_tick / 2; table[2].index = 2; table[2].frequency = clock_tick / 4; table[2].index = 3; table[2].frequency = clock_tick / 6; table[2].index = 4; table[2].frequency = clock_tick / 8; table[2].index = 5; table[3].frequency = CPUFREQ_TABLE_END; policy->cpuinfo.transition_latency = 0; policy->cur = clock_tick; return cpufreq_frequency_table_cpuinfo(policy, table); } static int us2e_freq_cpu_exit(struct cpufreq_policy *policy) { if (cpufreq_us2e_driver) us2e_set_cpu_divider_index(policy->cpu, 0); return 0; } static int __init us2e_freq_init(void) { unsigned long manuf, impl, ver; int ret; if (tlb_type != spitfire) return -ENODEV; __asm__("rdpr %%ver, %0" : "=r" (ver)); manuf = ((ver >> 48) & 0xffff); impl = ((ver >> 32) & 0xffff); if (manuf == 0x17 && impl == 0x13) { struct cpufreq_driver *driver; ret = -ENOMEM; driver = kzalloc(sizeof(struct cpufreq_driver), GFP_KERNEL); if (!driver) goto err_out; us2e_freq_table = kzalloc( (NR_CPUS * sizeof(struct us2e_freq_percpu_info)), GFP_KERNEL); if (!us2e_freq_table) goto err_out; driver->init = us2e_freq_cpu_init; driver->verify = us2e_freq_verify; driver->target = us2e_freq_target; driver->get = us2e_freq_get; driver->exit = us2e_freq_cpu_exit; driver->owner = THIS_MODULE, strcpy(driver->name, "UltraSPARC-IIe"); cpufreq_us2e_driver = driver; ret = cpufreq_register_driver(driver); if (ret) goto err_out; return 0; err_out: if (driver) { kfree(driver); cpufreq_us2e_driver = NULL; } kfree(us2e_freq_table); us2e_freq_table = NULL; return ret; } return -ENODEV; } static void __exit us2e_freq_exit(void) { if (cpufreq_us2e_driver) { cpufreq_unregister_driver(cpufreq_us2e_driver); kfree(cpufreq_us2e_driver); cpufreq_us2e_driver = NULL; kfree(us2e_freq_table); us2e_freq_table = NULL; } } MODULE_AUTHOR("David S. Miller <davem@redhat.com>"); MODULE_DESCRIPTION("cpufreq driver for UltraSPARC-IIe"); MODULE_LICENSE("GPL"); module_init(us2e_freq_init); module_exit(us2e_freq_exit);
gpl-2.0
halaszk/SGS3
drivers/infiniband/ulp/ipoib/ipoib_vlan.c
9217
4914
/* * Copyright (c) 2004 Topspin Communications. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/module.h> #include <linux/init.h> #include <linux/seq_file.h> #include <asm/uaccess.h> #include "ipoib.h" static ssize_t show_parent(struct device *d, struct device_attribute *attr, char *buf) { struct net_device *dev = to_net_dev(d); struct ipoib_dev_priv *priv = netdev_priv(dev); return sprintf(buf, "%s\n", priv->parent->name); } static DEVICE_ATTR(parent, S_IRUGO, show_parent, NULL); int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey) { struct ipoib_dev_priv *ppriv, *priv; char intf_name[IFNAMSIZ]; int result; if (!capable(CAP_NET_ADMIN)) return -EPERM; ppriv = netdev_priv(pdev); if (!rtnl_trylock()) return restart_syscall(); mutex_lock(&ppriv->vlan_mutex); /* * First ensure this isn't a duplicate. We check the parent device and * then all of the child interfaces to make sure the Pkey doesn't match. */ if (ppriv->pkey == pkey) { result = -ENOTUNIQ; priv = NULL; goto err; } list_for_each_entry(priv, &ppriv->child_intfs, list) { if (priv->pkey == pkey) { result = -ENOTUNIQ; priv = NULL; goto err; } } snprintf(intf_name, sizeof intf_name, "%s.%04x", ppriv->dev->name, pkey); priv = ipoib_intf_alloc(intf_name); if (!priv) { result = -ENOMEM; goto err; } priv->max_ib_mtu = ppriv->max_ib_mtu; /* MTU will be reset when mcast join happens */ priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu); priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu; set_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags); result = ipoib_set_dev_features(priv, ppriv->ca); if (result) goto err; priv->pkey = pkey; memcpy(priv->dev->dev_addr, ppriv->dev->dev_addr, INFINIBAND_ALEN); priv->dev->broadcast[8] = pkey >> 8; priv->dev->broadcast[9] = pkey & 0xff; result = ipoib_dev_init(priv->dev, ppriv->ca, ppriv->port); if (result < 0) { ipoib_warn(ppriv, "failed to initialize subinterface: " "device %s, port %d", ppriv->ca->name, ppriv->port); goto err; } result = register_netdevice(priv->dev); if (result) { ipoib_warn(priv, "failed to initialize; error %i", result); goto register_failed; } priv->parent = ppriv->dev; ipoib_create_debug_files(priv->dev); if (ipoib_cm_add_mode_attr(priv->dev)) goto sysfs_failed; if (ipoib_add_pkey_attr(priv->dev)) goto sysfs_failed; if (ipoib_add_umcast_attr(priv->dev)) goto sysfs_failed; if (device_create_file(&priv->dev->dev, &dev_attr_parent)) goto sysfs_failed; list_add_tail(&priv->list, &ppriv->child_intfs); mutex_unlock(&ppriv->vlan_mutex); rtnl_unlock(); return 0; sysfs_failed: ipoib_delete_debug_files(priv->dev); unregister_netdevice(priv->dev); register_failed: ipoib_dev_cleanup(priv->dev); err: mutex_unlock(&ppriv->vlan_mutex); rtnl_unlock(); if (priv) free_netdev(priv->dev); return result; } int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey) { struct ipoib_dev_priv *ppriv, *priv, *tpriv; struct net_device *dev = NULL; if (!capable(CAP_NET_ADMIN)) return -EPERM; ppriv = netdev_priv(pdev); if (!rtnl_trylock()) return restart_syscall(); mutex_lock(&ppriv->vlan_mutex); list_for_each_entry_safe(priv, tpriv, &ppriv->child_intfs, list) { if (priv->pkey == pkey) { unregister_netdevice(priv->dev); ipoib_dev_cleanup(priv->dev); list_del(&priv->list); dev = priv->dev; break; } } mutex_unlock(&ppriv->vlan_mutex); rtnl_unlock(); if (dev) { free_netdev(dev); return 0; } return -ENODEV; }
gpl-2.0
glewarne/LG_G2-D802_StockMOD_Kernel
drivers/staging/wlan-ng/p80211wep.c
9217
9824
/* src/p80211/p80211wep.c * * WEP encode/decode for P80211. * * Copyright (C) 2002 AbsoluteValue Systems, Inc. All Rights Reserved. * -------------------------------------------------------------------- * * linux-wlan * * The contents of this file are subject to the Mozilla Public * License Version 1.1 (the "License"); you may not use this file * except in compliance with the License. You may obtain a copy of * the License at http://www.mozilla.org/MPL/ * * Software distributed under the License is distributed on an "AS * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or * implied. See the License for the specific language governing * rights and limitations under the License. * * Alternatively, the contents of this file may be used under the * terms of the GNU Public License version 2 (the "GPL"), in which * case the provisions of the GPL are applicable instead of the * above. If you wish to allow the use of your version of this file * only under the terms of the GPL and not to allow others to use * your version of this file under the MPL, indicate your decision * by deleting the provisions above and replace them with the notice * and other provisions required by the GPL. If you do not delete * the provisions above, a recipient may use your version of this * file under either the MPL or the GPL. * * -------------------------------------------------------------------- * * Inquiries regarding the linux-wlan Open Source project can be * made directly to: * * AbsoluteValue Systems Inc. * info@linux-wlan.com * http://www.linux-wlan.com * * -------------------------------------------------------------------- * * Portions of the development of this software were funded by * Intersil Corporation as part of PRISM(R) chipset product development. * * -------------------------------------------------------------------- */ /*================================================================*/ /* System Includes */ #include <linux/netdevice.h> #include <linux/wireless.h> #include <linux/random.h> #include <linux/kernel.h> /* #define WEP_DEBUG */ #include "p80211hdr.h" #include "p80211types.h" #include "p80211msg.h" #include "p80211conv.h" #include "p80211netdev.h" #define WEP_KEY(x) (((x) & 0xC0) >> 6) static const u32 wep_crc32_table[256] = { 0x00000000L, 0x77073096L, 0xee0e612cL, 0x990951baL, 0x076dc419L, 0x706af48fL, 0xe963a535L, 0x9e6495a3L, 0x0edb8832L, 0x79dcb8a4L, 0xe0d5e91eL, 0x97d2d988L, 0x09b64c2bL, 0x7eb17cbdL, 0xe7b82d07L, 0x90bf1d91L, 0x1db71064L, 0x6ab020f2L, 0xf3b97148L, 0x84be41deL, 0x1adad47dL, 0x6ddde4ebL, 0xf4d4b551L, 0x83d385c7L, 0x136c9856L, 0x646ba8c0L, 0xfd62f97aL, 0x8a65c9ecL, 0x14015c4fL, 0x63066cd9L, 0xfa0f3d63L, 0x8d080df5L, 0x3b6e20c8L, 0x4c69105eL, 0xd56041e4L, 0xa2677172L, 0x3c03e4d1L, 0x4b04d447L, 0xd20d85fdL, 0xa50ab56bL, 0x35b5a8faL, 0x42b2986cL, 0xdbbbc9d6L, 0xacbcf940L, 0x32d86ce3L, 0x45df5c75L, 0xdcd60dcfL, 0xabd13d59L, 0x26d930acL, 0x51de003aL, 0xc8d75180L, 0xbfd06116L, 0x21b4f4b5L, 0x56b3c423L, 0xcfba9599L, 0xb8bda50fL, 0x2802b89eL, 0x5f058808L, 0xc60cd9b2L, 0xb10be924L, 0x2f6f7c87L, 0x58684c11L, 0xc1611dabL, 0xb6662d3dL, 0x76dc4190L, 0x01db7106L, 0x98d220bcL, 0xefd5102aL, 0x71b18589L, 0x06b6b51fL, 0x9fbfe4a5L, 0xe8b8d433L, 0x7807c9a2L, 0x0f00f934L, 0x9609a88eL, 0xe10e9818L, 0x7f6a0dbbL, 0x086d3d2dL, 0x91646c97L, 0xe6635c01L, 0x6b6b51f4L, 0x1c6c6162L, 0x856530d8L, 0xf262004eL, 0x6c0695edL, 0x1b01a57bL, 0x8208f4c1L, 0xf50fc457L, 0x65b0d9c6L, 0x12b7e950L, 0x8bbeb8eaL, 0xfcb9887cL, 0x62dd1ddfL, 0x15da2d49L, 0x8cd37cf3L, 0xfbd44c65L, 0x4db26158L, 0x3ab551ceL, 0xa3bc0074L, 0xd4bb30e2L, 0x4adfa541L, 0x3dd895d7L, 0xa4d1c46dL, 0xd3d6f4fbL, 0x4369e96aL, 0x346ed9fcL, 0xad678846L, 0xda60b8d0L, 0x44042d73L, 0x33031de5L, 0xaa0a4c5fL, 0xdd0d7cc9L, 0x5005713cL, 0x270241aaL, 0xbe0b1010L, 0xc90c2086L, 0x5768b525L, 0x206f85b3L, 0xb966d409L, 0xce61e49fL, 0x5edef90eL, 0x29d9c998L, 0xb0d09822L, 0xc7d7a8b4L, 0x59b33d17L, 0x2eb40d81L, 0xb7bd5c3bL, 0xc0ba6cadL, 0xedb88320L, 0x9abfb3b6L, 0x03b6e20cL, 0x74b1d29aL, 0xead54739L, 0x9dd277afL, 0x04db2615L, 0x73dc1683L, 0xe3630b12L, 0x94643b84L, 0x0d6d6a3eL, 0x7a6a5aa8L, 0xe40ecf0bL, 0x9309ff9dL, 0x0a00ae27L, 0x7d079eb1L, 0xf00f9344L, 0x8708a3d2L, 0x1e01f268L, 0x6906c2feL, 0xf762575dL, 0x806567cbL, 0x196c3671L, 0x6e6b06e7L, 0xfed41b76L, 0x89d32be0L, 0x10da7a5aL, 0x67dd4accL, 0xf9b9df6fL, 0x8ebeeff9L, 0x17b7be43L, 0x60b08ed5L, 0xd6d6a3e8L, 0xa1d1937eL, 0x38d8c2c4L, 0x4fdff252L, 0xd1bb67f1L, 0xa6bc5767L, 0x3fb506ddL, 0x48b2364bL, 0xd80d2bdaL, 0xaf0a1b4cL, 0x36034af6L, 0x41047a60L, 0xdf60efc3L, 0xa867df55L, 0x316e8eefL, 0x4669be79L, 0xcb61b38cL, 0xbc66831aL, 0x256fd2a0L, 0x5268e236L, 0xcc0c7795L, 0xbb0b4703L, 0x220216b9L, 0x5505262fL, 0xc5ba3bbeL, 0xb2bd0b28L, 0x2bb45a92L, 0x5cb36a04L, 0xc2d7ffa7L, 0xb5d0cf31L, 0x2cd99e8bL, 0x5bdeae1dL, 0x9b64c2b0L, 0xec63f226L, 0x756aa39cL, 0x026d930aL, 0x9c0906a9L, 0xeb0e363fL, 0x72076785L, 0x05005713L, 0x95bf4a82L, 0xe2b87a14L, 0x7bb12baeL, 0x0cb61b38L, 0x92d28e9bL, 0xe5d5be0dL, 0x7cdcefb7L, 0x0bdbdf21L, 0x86d3d2d4L, 0xf1d4e242L, 0x68ddb3f8L, 0x1fda836eL, 0x81be16cdL, 0xf6b9265bL, 0x6fb077e1L, 0x18b74777L, 0x88085ae6L, 0xff0f6a70L, 0x66063bcaL, 0x11010b5cL, 0x8f659effL, 0xf862ae69L, 0x616bffd3L, 0x166ccf45L, 0xa00ae278L, 0xd70dd2eeL, 0x4e048354L, 0x3903b3c2L, 0xa7672661L, 0xd06016f7L, 0x4969474dL, 0x3e6e77dbL, 0xaed16a4aL, 0xd9d65adcL, 0x40df0b66L, 0x37d83bf0L, 0xa9bcae53L, 0xdebb9ec5L, 0x47b2cf7fL, 0x30b5ffe9L, 0xbdbdf21cL, 0xcabac28aL, 0x53b39330L, 0x24b4a3a6L, 0xbad03605L, 0xcdd70693L, 0x54de5729L, 0x23d967bfL, 0xb3667a2eL, 0xc4614ab8L, 0x5d681b02L, 0x2a6f2b94L, 0xb40bbe37L, 0xc30c8ea1L, 0x5a05df1bL, 0x2d02ef8dL }; /* keylen in bytes! */ int wep_change_key(wlandevice_t *wlandev, int keynum, u8 *key, int keylen) { if (keylen < 0) return -1; if (keylen >= MAX_KEYLEN) return -1; if (key == NULL) return -1; if (keynum < 0) return -1; if (keynum >= NUM_WEPKEYS) return -1; #ifdef WEP_DEBUG printk(KERN_DEBUG "WEP key %d len %d = %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n", keynum, keylen, key[0], key[1], key[2], key[3], key[4], key[5], key[6], key[7]); #endif wlandev->wep_keylens[keynum] = keylen; memcpy(wlandev->wep_keys[keynum], key, keylen); return 0; } /* 4-byte IV at start of buffer, 4-byte ICV at end of buffer. if successful, buf start is payload begin, length -= 8; */ int wep_decrypt(wlandevice_t *wlandev, u8 *buf, u32 len, int key_override, u8 *iv, u8 *icv) { u32 i, j, k, crc, keylen; u8 s[256], key[64], c_crc[4]; u8 keyidx; /* Needs to be at least 8 bytes of payload */ if (len <= 0) return -1; /* initialize the first bytes of the key from the IV */ key[0] = iv[0]; key[1] = iv[1]; key[2] = iv[2]; keyidx = WEP_KEY(iv[3]); if (key_override >= 0) keyidx = key_override; if (keyidx >= NUM_WEPKEYS) return -2; keylen = wlandev->wep_keylens[keyidx]; if (keylen == 0) return -3; /* copy the rest of the key over from the designated key */ memcpy(key + 3, wlandev->wep_keys[keyidx], keylen); keylen += 3; /* add in IV bytes */ #ifdef WEP_DEBUG printk(KERN_DEBUG "D %d: %02x %02x %02x (%d %d) %02x:%02x:%02x:%02x:%02x\n", len, key[0], key[1], key[2], keyidx, keylen, key[3], key[4], key[5], key[6], key[7]); #endif /* set up the RC4 state */ for (i = 0; i < 256; i++) s[i] = i; j = 0; for (i = 0; i < 256; i++) { j = (j + s[i] + key[i % keylen]) & 0xff; swap(i, j); } /* Apply the RC4 to the data, update the CRC32 */ crc = ~0; i = j = 0; for (k = 0; k < len; k++) { i = (i + 1) & 0xff; j = (j + s[i]) & 0xff; swap(i, j); buf[k] ^= s[(s[i] + s[j]) & 0xff]; crc = wep_crc32_table[(crc ^ buf[k]) & 0xff] ^ (crc >> 8); } crc = ~crc; /* now let's check the crc */ c_crc[0] = crc; c_crc[1] = crc >> 8; c_crc[2] = crc >> 16; c_crc[3] = crc >> 24; for (k = 0; k < 4; k++) { i = (i + 1) & 0xff; j = (j + s[i]) & 0xff; swap(i, j); if ((c_crc[k] ^ s[(s[i] + s[j]) & 0xff]) != icv[k]) return -(4 | (k << 4)); /* ICV mismatch */ } return 0; } /* encrypts in-place. */ int wep_encrypt(wlandevice_t *wlandev, u8 *buf, u8 *dst, u32 len, int keynum, u8 *iv, u8 *icv) { u32 i, j, k, crc, keylen; u8 s[256], key[64]; /* no point in WEPping an empty frame */ if (len <= 0) return -1; /* we need to have a real key.. */ if (keynum >= NUM_WEPKEYS) return -2; keylen = wlandev->wep_keylens[keynum]; if (keylen <= 0) return -3; /* use a random IV. And skip known weak ones. */ get_random_bytes(iv, 3); while ((iv[1] == 0xff) && (iv[0] >= 3) && (iv[0] < keylen)) get_random_bytes(iv, 3); iv[3] = (keynum & 0x03) << 6; key[0] = iv[0]; key[1] = iv[1]; key[2] = iv[2]; /* copy the rest of the key over from the designated key */ memcpy(key + 3, wlandev->wep_keys[keynum], keylen); keylen += 3; /* add in IV bytes */ #ifdef WEP_DEBUG printk(KERN_DEBUG "E %d (%d/%d %d) %02x %02x %02x %02x:%02x:%02x:%02x:%02x\n", len, iv[3], keynum, keylen, key[0], key[1], key[2], key[3], key[4], key[5], key[6], key[7]); #endif /* set up the RC4 state */ for (i = 0; i < 256; i++) s[i] = i; j = 0; for (i = 0; i < 256; i++) { j = (j + s[i] + key[i % keylen]) & 0xff; swap(i, j); } /* Update CRC32 then apply RC4 to the data */ crc = ~0; i = j = 0; for (k = 0; k < len; k++) { crc = wep_crc32_table[(crc ^ buf[k]) & 0xff] ^ (crc >> 8); i = (i + 1) & 0xff; j = (j + s[i]) & 0xff; swap(i, j); dst[k] = buf[k] ^ s[(s[i] + s[j]) & 0xff]; } crc = ~crc; /* now let's encrypt the crc */ icv[0] = crc; icv[1] = crc >> 8; icv[2] = crc >> 16; icv[3] = crc >> 24; for (k = 0; k < 4; k++) { i = (i + 1) & 0xff; j = (j + s[i]) & 0xff; swap(i, j); icv[k] ^= s[(s[i] + s[j]) & 0xff]; } return 0; }
gpl-2.0
Team-Hydra/S5-AEL-Kernel
fs/quota/netlink.c
9217
2648
#include <linux/cred.h> #include <linux/init.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/quotaops.h> #include <linux/sched.h> #include <linux/slab.h> #include <net/netlink.h> #include <net/genetlink.h> /* Netlink family structure for quota */ static struct genl_family quota_genl_family = { .id = GENL_ID_GENERATE, .hdrsize = 0, .name = "VFS_DQUOT", .version = 1, .maxattr = QUOTA_NL_A_MAX, }; /** * quota_send_warning - Send warning to userspace about exceeded quota * @type: The quota type: USRQQUOTA, GRPQUOTA,... * @id: The user or group id of the quota that was exceeded * @dev: The device on which the fs is mounted (sb->s_dev) * @warntype: The type of the warning: QUOTA_NL_... * * This can be used by filesystems (including those which don't use * dquot) to send a message to userspace relating to quota limits. * */ void quota_send_warning(short type, unsigned int id, dev_t dev, const char warntype) { static atomic_t seq; struct sk_buff *skb; void *msg_head; int ret; int msg_size = 4 * nla_total_size(sizeof(u32)) + 2 * nla_total_size(sizeof(u64)); /* We have to allocate using GFP_NOFS as we are called from a * filesystem performing write and thus further recursion into * the fs to free some data could cause deadlocks. */ skb = genlmsg_new(msg_size, GFP_NOFS); if (!skb) { printk(KERN_ERR "VFS: Not enough memory to send quota warning.\n"); return; } msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq), &quota_genl_family, 0, QUOTA_NL_C_WARNING); if (!msg_head) { printk(KERN_ERR "VFS: Cannot store netlink header in quota warning.\n"); goto err_out; } ret = nla_put_u32(skb, QUOTA_NL_A_QTYPE, type); if (ret) goto attr_err_out; ret = nla_put_u64(skb, QUOTA_NL_A_EXCESS_ID, id); if (ret) goto attr_err_out; ret = nla_put_u32(skb, QUOTA_NL_A_WARNING, warntype); if (ret) goto attr_err_out; ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MAJOR, MAJOR(dev)); if (ret) goto attr_err_out; ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MINOR, MINOR(dev)); if (ret) goto attr_err_out; ret = nla_put_u64(skb, QUOTA_NL_A_CAUSED_ID, current_uid()); if (ret) goto attr_err_out; genlmsg_end(skb, msg_head); genlmsg_multicast(skb, 0, quota_genl_family.id, GFP_NOFS); return; attr_err_out: printk(KERN_ERR "VFS: Not enough space to compose quota message!\n"); err_out: kfree_skb(skb); } EXPORT_SYMBOL(quota_send_warning); static int __init quota_init(void) { if (genl_register_family(&quota_genl_family) != 0) printk(KERN_ERR "VFS: Failed to create quota netlink interface.\n"); return 0; }; module_init(quota_init);
gpl-2.0
FEDEVEL/imx6rex-linux-3.0.35
drivers/ide/siimage.c
9217
21477
/* * Copyright (C) 2001-2002 Andre Hedrick <andre@linux-ide.org> * Copyright (C) 2003 Red Hat * Copyright (C) 2007-2008 MontaVista Software, Inc. * Copyright (C) 2007-2008 Bartlomiej Zolnierkiewicz * * May be copied or modified under the terms of the GNU General Public License * * Documentation for CMD680: * http://gkernel.sourceforge.net/specs/sii/sii-0680a-v1.31.pdf.bz2 * * Documentation for SiI 3112: * http://gkernel.sourceforge.net/specs/sii/3112A_SiI-DS-0095-B2.pdf.bz2 * * Errata and other documentation only available under NDA. * * * FAQ Items: * If you are using Marvell SATA-IDE adapters with Maxtor drives * ensure the system is set up for ATA100/UDMA5, not UDMA6. * * If you are using WD drives with SATA bridges you must set the * drive to "Single". "Master" will hang. * * If you have strange problems with nVidia chipset systems please * see the SI support documentation and update your system BIOS * if necessary * * The Dell DRAC4 has some interesting features including effectively hot * unplugging/replugging the virtual CD interface when the DRAC is reset. * This often causes drivers/ide/siimage to panic but is ok with the rather * smarter code in libata. * * TODO: * - VDMA support */ #include <linux/types.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/ide.h> #include <linux/init.h> #include <linux/io.h> #define DRV_NAME "siimage" /** * pdev_is_sata - check if device is SATA * @pdev: PCI device to check * * Returns true if this is a SATA controller */ static int pdev_is_sata(struct pci_dev *pdev) { #ifdef CONFIG_BLK_DEV_IDE_SATA switch (pdev->device) { case PCI_DEVICE_ID_SII_3112: case PCI_DEVICE_ID_SII_1210SA: return 1; case PCI_DEVICE_ID_SII_680: return 0; } BUG(); #endif return 0; } /** * is_sata - check if hwif is SATA * @hwif: interface to check * * Returns true if this is a SATA controller */ static inline int is_sata(ide_hwif_t *hwif) { return pdev_is_sata(to_pci_dev(hwif->dev)); } /** * siimage_selreg - return register base * @hwif: interface * @r: config offset * * Turn a config register offset into the right address in either * PCI space or MMIO space to access the control register in question * Thankfully this is a configuration operation, so isn't performance * critical. */ static unsigned long siimage_selreg(ide_hwif_t *hwif, int r) { unsigned long base = (unsigned long)hwif->hwif_data; base += 0xA0 + r; if (hwif->host_flags & IDE_HFLAG_MMIO) base += hwif->channel << 6; else base += hwif->channel << 4; return base; } /** * siimage_seldev - return register base * @hwif: interface * @r: config offset * * Turn a config register offset into the right address in either * PCI space or MMIO space to access the control register in question * including accounting for the unit shift. */ static inline unsigned long siimage_seldev(ide_drive_t *drive, int r) { ide_hwif_t *hwif = drive->hwif; unsigned long base = (unsigned long)hwif->hwif_data; u8 unit = drive->dn & 1; base += 0xA0 + r; if (hwif->host_flags & IDE_HFLAG_MMIO) base += hwif->channel << 6; else base += hwif->channel << 4; base |= unit << unit; return base; } static u8 sil_ioread8(struct pci_dev *dev, unsigned long addr) { struct ide_host *host = pci_get_drvdata(dev); u8 tmp = 0; if (host->host_priv) tmp = readb((void __iomem *)addr); else pci_read_config_byte(dev, addr, &tmp); return tmp; } static u16 sil_ioread16(struct pci_dev *dev, unsigned long addr) { struct ide_host *host = pci_get_drvdata(dev); u16 tmp = 0; if (host->host_priv) tmp = readw((void __iomem *)addr); else pci_read_config_word(dev, addr, &tmp); return tmp; } static void sil_iowrite8(struct pci_dev *dev, u8 val, unsigned long addr) { struct ide_host *host = pci_get_drvdata(dev); if (host->host_priv) writeb(val, (void __iomem *)addr); else pci_write_config_byte(dev, addr, val); } static void sil_iowrite16(struct pci_dev *dev, u16 val, unsigned long addr) { struct ide_host *host = pci_get_drvdata(dev); if (host->host_priv) writew(val, (void __iomem *)addr); else pci_write_config_word(dev, addr, val); } static void sil_iowrite32(struct pci_dev *dev, u32 val, unsigned long addr) { struct ide_host *host = pci_get_drvdata(dev); if (host->host_priv) writel(val, (void __iomem *)addr); else pci_write_config_dword(dev, addr, val); } /** * sil_udma_filter - compute UDMA mask * @drive: IDE device * * Compute the available UDMA speeds for the device on the interface. * * For the CMD680 this depends on the clocking mode (scsc), for the * SI3112 SATA controller life is a bit simpler. */ static u8 sil_pata_udma_filter(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; struct pci_dev *dev = to_pci_dev(hwif->dev); unsigned long base = (unsigned long)hwif->hwif_data; u8 scsc, mask = 0; base += (hwif->host_flags & IDE_HFLAG_MMIO) ? 0x4A : 0x8A; scsc = sil_ioread8(dev, base); switch (scsc & 0x30) { case 0x10: /* 133 */ mask = ATA_UDMA6; break; case 0x20: /* 2xPCI */ mask = ATA_UDMA6; break; case 0x00: /* 100 */ mask = ATA_UDMA5; break; default: /* Disabled ? */ BUG(); } return mask; } static u8 sil_sata_udma_filter(ide_drive_t *drive) { char *m = (char *)&drive->id[ATA_ID_PROD]; return strstr(m, "Maxtor") ? ATA_UDMA5 : ATA_UDMA6; } /** * sil_set_pio_mode - set host controller for PIO mode * @hwif: port * @drive: drive * * Load the timing settings for this device mode into the * controller. */ static void sil_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) { static const u16 tf_speed[] = { 0x328a, 0x2283, 0x1281, 0x10c3, 0x10c1 }; static const u16 data_speed[] = { 0x328a, 0x2283, 0x1104, 0x10c3, 0x10c1 }; struct pci_dev *dev = to_pci_dev(hwif->dev); ide_drive_t *pair = ide_get_pair_dev(drive); u32 speedt = 0; u16 speedp = 0; unsigned long addr = siimage_seldev(drive, 0x04); unsigned long tfaddr = siimage_selreg(hwif, 0x02); unsigned long base = (unsigned long)hwif->hwif_data; const u8 pio = drive->pio_mode - XFER_PIO_0; u8 tf_pio = pio; u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0; u8 addr_mask = hwif->channel ? (mmio ? 0xF4 : 0x84) : (mmio ? 0xB4 : 0x80); u8 mode = 0; u8 unit = drive->dn & 1; /* trim *taskfile* PIO to the slowest of the master/slave */ if (pair) { u8 pair_pio = pair->pio_mode - XFER_PIO_0; if (pair_pio < tf_pio) tf_pio = pair_pio; } /* cheat for now and use the docs */ speedp = data_speed[pio]; speedt = tf_speed[tf_pio]; sil_iowrite16(dev, speedp, addr); sil_iowrite16(dev, speedt, tfaddr); /* now set up IORDY */ speedp = sil_ioread16(dev, tfaddr - 2); speedp &= ~0x200; mode = sil_ioread8(dev, base + addr_mask); mode &= ~(unit ? 0x30 : 0x03); if (ide_pio_need_iordy(drive, pio)) { speedp |= 0x200; mode |= unit ? 0x10 : 0x01; } sil_iowrite16(dev, speedp, tfaddr - 2); sil_iowrite8(dev, mode, base + addr_mask); } /** * sil_set_dma_mode - set host controller for DMA mode * @hwif: port * @drive: drive * * Tune the SiI chipset for the desired DMA mode. */ static void sil_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive) { static const u8 ultra6[] = { 0x0F, 0x0B, 0x07, 0x05, 0x03, 0x02, 0x01 }; static const u8 ultra5[] = { 0x0C, 0x07, 0x05, 0x04, 0x02, 0x01 }; static const u16 dma[] = { 0x2208, 0x10C2, 0x10C1 }; struct pci_dev *dev = to_pci_dev(hwif->dev); unsigned long base = (unsigned long)hwif->hwif_data; u16 ultra = 0, multi = 0; u8 mode = 0, unit = drive->dn & 1; u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0; u8 scsc = 0, addr_mask = hwif->channel ? (mmio ? 0xF4 : 0x84) : (mmio ? 0xB4 : 0x80); unsigned long ma = siimage_seldev(drive, 0x08); unsigned long ua = siimage_seldev(drive, 0x0C); const u8 speed = drive->dma_mode; scsc = sil_ioread8 (dev, base + (mmio ? 0x4A : 0x8A)); mode = sil_ioread8 (dev, base + addr_mask); multi = sil_ioread16(dev, ma); ultra = sil_ioread16(dev, ua); mode &= ~(unit ? 0x30 : 0x03); ultra &= ~0x3F; scsc = ((scsc & 0x30) == 0x00) ? 0 : 1; scsc = is_sata(hwif) ? 1 : scsc; if (speed >= XFER_UDMA_0) { multi = dma[2]; ultra |= scsc ? ultra6[speed - XFER_UDMA_0] : ultra5[speed - XFER_UDMA_0]; mode |= unit ? 0x30 : 0x03; } else { multi = dma[speed - XFER_MW_DMA_0]; mode |= unit ? 0x20 : 0x02; } sil_iowrite8 (dev, mode, base + addr_mask); sil_iowrite16(dev, multi, ma); sil_iowrite16(dev, ultra, ua); } static int sil_test_irq(ide_hwif_t *hwif) { struct pci_dev *dev = to_pci_dev(hwif->dev); unsigned long addr = siimage_selreg(hwif, 1); u8 val = sil_ioread8(dev, addr); /* Return 1 if INTRQ asserted */ return (val & 8) ? 1 : 0; } /** * siimage_mmio_dma_test_irq - check we caused an IRQ * @drive: drive we are testing * * Check if we caused an IDE DMA interrupt. We may also have caused * SATA status interrupts, if so we clean them up and continue. */ static int siimage_mmio_dma_test_irq(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; void __iomem *sata_error_addr = (void __iomem *)hwif->sata_scr[SATA_ERROR_OFFSET]; if (sata_error_addr) { unsigned long base = (unsigned long)hwif->hwif_data; u32 ext_stat = readl((void __iomem *)(base + 0x10)); u8 watchdog = 0; if (ext_stat & ((hwif->channel) ? 0x40 : 0x10)) { u32 sata_error = readl(sata_error_addr); writel(sata_error, sata_error_addr); watchdog = (sata_error & 0x00680000) ? 1 : 0; printk(KERN_WARNING "%s: sata_error = 0x%08x, " "watchdog = %d, %s\n", drive->name, sata_error, watchdog, __func__); } else watchdog = (ext_stat & 0x8000) ? 1 : 0; ext_stat >>= 16; if (!(ext_stat & 0x0404) && !watchdog) return 0; } /* return 1 if INTR asserted */ if (readb((void __iomem *)(hwif->dma_base + ATA_DMA_STATUS)) & 4) return 1; return 0; } static int siimage_dma_test_irq(ide_drive_t *drive) { if (drive->hwif->host_flags & IDE_HFLAG_MMIO) return siimage_mmio_dma_test_irq(drive); else return ide_dma_test_irq(drive); } /** * sil_sata_reset_poll - wait for SATA reset * @drive: drive we are resetting * * Poll the SATA phy and see whether it has come back from the dead * yet. */ static int sil_sata_reset_poll(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; void __iomem *sata_status_addr = (void __iomem *)hwif->sata_scr[SATA_STATUS_OFFSET]; if (sata_status_addr) { /* SATA Status is available only when in MMIO mode */ u32 sata_stat = readl(sata_status_addr); if ((sata_stat & 0x03) != 0x03) { printk(KERN_WARNING "%s: reset phy dead, status=0x%08x\n", hwif->name, sata_stat); return -ENXIO; } } return 0; } /** * sil_sata_pre_reset - reset hook * @drive: IDE device being reset * * For the SATA devices we need to handle recalibration/geometry * differently */ static void sil_sata_pre_reset(ide_drive_t *drive) { if (drive->media == ide_disk) { drive->special_flags &= ~(IDE_SFLAG_SET_GEOMETRY | IDE_SFLAG_RECALIBRATE); } } /** * init_chipset_siimage - set up an SI device * @dev: PCI device * * Perform the initial PCI set up for this device. Attempt to switch * to 133 MHz clocking if the system isn't already set up to do it. */ static int init_chipset_siimage(struct pci_dev *dev) { struct ide_host *host = pci_get_drvdata(dev); void __iomem *ioaddr = host->host_priv; unsigned long base, scsc_addr; u8 rev = dev->revision, tmp; pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, rev ? 1 : 255); if (ioaddr) pci_set_master(dev); base = (unsigned long)ioaddr; if (ioaddr && pdev_is_sata(dev)) { u32 tmp32, irq_mask; /* make sure IDE0/1 interrupts are not masked */ irq_mask = (1 << 22) | (1 << 23); tmp32 = readl(ioaddr + 0x48); if (tmp32 & irq_mask) { tmp32 &= ~irq_mask; writel(tmp32, ioaddr + 0x48); readl(ioaddr + 0x48); /* flush */ } writel(0, ioaddr + 0x148); writel(0, ioaddr + 0x1C8); } sil_iowrite8(dev, 0, base ? (base + 0xB4) : 0x80); sil_iowrite8(dev, 0, base ? (base + 0xF4) : 0x84); scsc_addr = base ? (base + 0x4A) : 0x8A; tmp = sil_ioread8(dev, scsc_addr); switch (tmp & 0x30) { case 0x00: /* On 100 MHz clocking, try and switch to 133 MHz */ sil_iowrite8(dev, tmp | 0x10, scsc_addr); break; case 0x30: /* Clocking is disabled, attempt to force 133MHz clocking. */ sil_iowrite8(dev, tmp & ~0x20, scsc_addr); case 0x10: /* On 133Mhz clocking. */ break; case 0x20: /* On PCIx2 clocking. */ break; } tmp = sil_ioread8(dev, scsc_addr); sil_iowrite8 (dev, 0x72, base + 0xA1); sil_iowrite16(dev, 0x328A, base + 0xA2); sil_iowrite32(dev, 0x62DD62DD, base + 0xA4); sil_iowrite32(dev, 0x43924392, base + 0xA8); sil_iowrite32(dev, 0x40094009, base + 0xAC); sil_iowrite8 (dev, 0x72, base ? (base + 0xE1) : 0xB1); sil_iowrite16(dev, 0x328A, base ? (base + 0xE2) : 0xB2); sil_iowrite32(dev, 0x62DD62DD, base ? (base + 0xE4) : 0xB4); sil_iowrite32(dev, 0x43924392, base ? (base + 0xE8) : 0xB8); sil_iowrite32(dev, 0x40094009, base ? (base + 0xEC) : 0xBC); if (base && pdev_is_sata(dev)) { writel(0xFFFF0000, ioaddr + 0x108); writel(0xFFFF0000, ioaddr + 0x188); writel(0x00680000, ioaddr + 0x148); writel(0x00680000, ioaddr + 0x1C8); } /* report the clocking mode of the controller */ if (!pdev_is_sata(dev)) { static const char *clk_str[] = { "== 100", "== 133", "== 2X PCI", "DISABLED!" }; tmp >>= 4; printk(KERN_INFO DRV_NAME " %s: BASE CLOCK %s\n", pci_name(dev), clk_str[tmp & 3]); } return 0; } /** * init_mmio_iops_siimage - set up the iops for MMIO * @hwif: interface to set up * * The basic setup here is fairly simple, we can use standard MMIO * operations. However we do have to set the taskfile register offsets * by hand as there isn't a standard defined layout for them this time. * * The hardware supports buffered taskfiles and also some rather nice * extended PRD tables. For better SI3112 support use the libata driver */ static void __devinit init_mmio_iops_siimage(ide_hwif_t *hwif) { struct pci_dev *dev = to_pci_dev(hwif->dev); struct ide_host *host = pci_get_drvdata(dev); void *addr = host->host_priv; u8 ch = hwif->channel; struct ide_io_ports *io_ports = &hwif->io_ports; unsigned long base; /* * Fill in the basic hwif bits */ hwif->host_flags |= IDE_HFLAG_MMIO; hwif->hwif_data = addr; /* * Now set up the hw. We have to do this ourselves as the * MMIO layout isn't the same as the standard port based I/O. */ memset(io_ports, 0, sizeof(*io_ports)); base = (unsigned long)addr; if (ch) base += 0xC0; else base += 0x80; /* * The buffered task file doesn't have status/control, so we * can't currently use it sanely since we want to use LBA48 mode. */ io_ports->data_addr = base; io_ports->error_addr = base + 1; io_ports->nsect_addr = base + 2; io_ports->lbal_addr = base + 3; io_ports->lbam_addr = base + 4; io_ports->lbah_addr = base + 5; io_ports->device_addr = base + 6; io_ports->status_addr = base + 7; io_ports->ctl_addr = base + 10; if (pdev_is_sata(dev)) { base = (unsigned long)addr; if (ch) base += 0x80; hwif->sata_scr[SATA_STATUS_OFFSET] = base + 0x104; hwif->sata_scr[SATA_ERROR_OFFSET] = base + 0x108; hwif->sata_scr[SATA_CONTROL_OFFSET] = base + 0x100; } hwif->irq = dev->irq; hwif->dma_base = (unsigned long)addr + (ch ? 0x08 : 0x00); } static int is_dev_seagate_sata(ide_drive_t *drive) { const char *s = (const char *)&drive->id[ATA_ID_PROD]; unsigned len = strnlen(s, ATA_ID_PROD_LEN); if ((len > 4) && (!memcmp(s, "ST", 2))) if ((!memcmp(s + len - 2, "AS", 2)) || (!memcmp(s + len - 3, "ASL", 3))) { printk(KERN_INFO "%s: applying pessimistic Seagate " "errata fix\n", drive->name); return 1; } return 0; } /** * sil_quirkproc - post probe fixups * @drive: drive * * Called after drive probe we use this to decide whether the * Seagate fixup must be applied. This used to be in init_iops but * that can occur before we know what drives are present. */ static void sil_quirkproc(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; /* Try and rise the rqsize */ if (!is_sata(hwif) || !is_dev_seagate_sata(drive)) hwif->rqsize = 128; } /** * init_iops_siimage - set up iops * @hwif: interface to set up * * Do the basic setup for the SIIMAGE hardware interface * and then do the MMIO setup if we can. This is the first * look in we get for setting up the hwif so that we * can get the iops right before using them. */ static void __devinit init_iops_siimage(ide_hwif_t *hwif) { struct pci_dev *dev = to_pci_dev(hwif->dev); struct ide_host *host = pci_get_drvdata(dev); hwif->hwif_data = NULL; /* Pessimal until we finish probing */ hwif->rqsize = 15; if (host->host_priv) init_mmio_iops_siimage(hwif); } /** * sil_cable_detect - cable detection * @hwif: interface to check * * Check for the presence of an ATA66 capable cable on the interface. */ static u8 sil_cable_detect(ide_hwif_t *hwif) { struct pci_dev *dev = to_pci_dev(hwif->dev); unsigned long addr = siimage_selreg(hwif, 0); u8 ata66 = sil_ioread8(dev, addr); return (ata66 & 0x01) ? ATA_CBL_PATA80 : ATA_CBL_PATA40; } static const struct ide_port_ops sil_pata_port_ops = { .set_pio_mode = sil_set_pio_mode, .set_dma_mode = sil_set_dma_mode, .quirkproc = sil_quirkproc, .test_irq = sil_test_irq, .udma_filter = sil_pata_udma_filter, .cable_detect = sil_cable_detect, }; static const struct ide_port_ops sil_sata_port_ops = { .set_pio_mode = sil_set_pio_mode, .set_dma_mode = sil_set_dma_mode, .reset_poll = sil_sata_reset_poll, .pre_reset = sil_sata_pre_reset, .quirkproc = sil_quirkproc, .test_irq = sil_test_irq, .udma_filter = sil_sata_udma_filter, .cable_detect = sil_cable_detect, }; static const struct ide_dma_ops sil_dma_ops = { .dma_host_set = ide_dma_host_set, .dma_setup = ide_dma_setup, .dma_start = ide_dma_start, .dma_end = ide_dma_end, .dma_test_irq = siimage_dma_test_irq, .dma_timer_expiry = ide_dma_sff_timer_expiry, .dma_lost_irq = ide_dma_lost_irq, .dma_sff_read_status = ide_dma_sff_read_status, }; #define DECLARE_SII_DEV(p_ops) \ { \ .name = DRV_NAME, \ .init_chipset = init_chipset_siimage, \ .init_iops = init_iops_siimage, \ .port_ops = p_ops, \ .dma_ops = &sil_dma_ops, \ .pio_mask = ATA_PIO4, \ .mwdma_mask = ATA_MWDMA2, \ .udma_mask = ATA_UDMA6, \ } static const struct ide_port_info siimage_chipsets[] __devinitdata = { /* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops), /* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops) }; /** * siimage_init_one - PCI layer discovery entry * @dev: PCI device * @id: ident table entry * * Called by the PCI code when it finds an SiI680 or SiI3112 controller. * We then use the IDE PCI generic helper to do most of the work. */ static int __devinit siimage_init_one(struct pci_dev *dev, const struct pci_device_id *id) { void __iomem *ioaddr = NULL; resource_size_t bar5 = pci_resource_start(dev, 5); unsigned long barsize = pci_resource_len(dev, 5); int rc; struct ide_port_info d; u8 idx = id->driver_data; u8 BA5_EN; d = siimage_chipsets[idx]; if (idx) { static int first = 1; if (first) { printk(KERN_INFO DRV_NAME ": For full SATA support you " "should use the libata sata_sil module.\n"); first = 0; } d.host_flags |= IDE_HFLAG_NO_ATAPI_DMA; } rc = pci_enable_device(dev); if (rc) return rc; pci_read_config_byte(dev, 0x8A, &BA5_EN); if ((BA5_EN & 0x01) || bar5) { /* * Drop back to PIO if we can't map the MMIO. Some systems * seem to get terminally confused in the PCI spaces. */ if (!request_mem_region(bar5, barsize, d.name)) { printk(KERN_WARNING DRV_NAME " %s: MMIO ports not " "available\n", pci_name(dev)); } else { ioaddr = pci_ioremap_bar(dev, 5); if (ioaddr == NULL) release_mem_region(bar5, barsize); } } rc = ide_pci_init_one(dev, &d, ioaddr); if (rc) { if (ioaddr) { iounmap(ioaddr); release_mem_region(bar5, barsize); } pci_disable_device(dev); } return rc; } static void __devexit siimage_remove(struct pci_dev *dev) { struct ide_host *host = pci_get_drvdata(dev); void __iomem *ioaddr = host->host_priv; ide_pci_remove(dev); if (ioaddr) { resource_size_t bar5 = pci_resource_start(dev, 5); unsigned long barsize = pci_resource_len(dev, 5); iounmap(ioaddr); release_mem_region(bar5, barsize); } pci_disable_device(dev); } static const struct pci_device_id siimage_pci_tbl[] = { { PCI_VDEVICE(CMD, PCI_DEVICE_ID_SII_680), 0 }, #ifdef CONFIG_BLK_DEV_IDE_SATA { PCI_VDEVICE(CMD, PCI_DEVICE_ID_SII_3112), 1 }, { PCI_VDEVICE(CMD, PCI_DEVICE_ID_SII_1210SA), 1 }, #endif { 0, }, }; MODULE_DEVICE_TABLE(pci, siimage_pci_tbl); static struct pci_driver siimage_pci_driver = { .name = "SiI_IDE", .id_table = siimage_pci_tbl, .probe = siimage_init_one, .remove = __devexit_p(siimage_remove), .suspend = ide_pci_suspend, .resume = ide_pci_resume, }; static int __init siimage_ide_init(void) { return ide_pci_register_driver(&siimage_pci_driver); } static void __exit siimage_ide_exit(void) { pci_unregister_driver(&siimage_pci_driver); } module_init(siimage_ide_init); module_exit(siimage_ide_exit); MODULE_AUTHOR("Andre Hedrick, Alan Cox"); MODULE_DESCRIPTION("PCI driver module for SiI IDE"); MODULE_LICENSE("GPL");
gpl-2.0
asymingt/Kronux
arch/powerpc/platforms/pasemi/idle.c
13569
2410
/* * Copyright (C) 2006-2007 PA Semi, Inc * * Maintained by: Olof Johansson <olof@lixom.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #undef DEBUG #include <linux/kernel.h> #include <linux/string.h> #include <linux/irq.h> #include <asm/machdep.h> #include <asm/reg.h> #include <asm/smp.h> #include "pasemi.h" struct sleep_mode { char *name; void (*entry)(void); }; static struct sleep_mode modes[] = { { .name = "spin", .entry = &idle_spin }, { .name = "doze", .entry = &idle_doze }, }; static int current_mode = 0; static int pasemi_system_reset_exception(struct pt_regs *regs) { /* If we were woken up from power savings, we need to return * to the calling function, since nip is not saved across * all modes. */ if (regs->msr & SRR1_WAKEMASK) regs->nip = regs->link; switch (regs->msr & SRR1_WAKEMASK) { case SRR1_WAKEEE: do_IRQ(regs); break; case SRR1_WAKEDEC: timer_interrupt(regs); break; default: /* do system reset */ return 0; } /* Set higher astate since we come out of power savings at 0 */ restore_astate(hard_smp_processor_id()); /* everything handled */ regs->msr |= MSR_RI; return 1; } static int __init pasemi_idle_init(void) { #ifndef CONFIG_PPC_PASEMI_CPUFREQ printk(KERN_WARNING "No cpufreq driver, powersavings modes disabled\n"); current_mode = 0; #endif ppc_md.system_reset_exception = pasemi_system_reset_exception; ppc_md.power_save = modes[current_mode].entry; printk(KERN_INFO "Using PA6T idle loop (%s)\n", modes[current_mode].name); return 0; } machine_late_initcall(pasemi, pasemi_idle_init); static int __init idle_param(char *p) { int i; for (i = 0; i < ARRAY_SIZE(modes); i++) { if (!strcmp(modes[i].name, p)) { current_mode = i; break; } } return 0; } early_param("idle", idle_param);
gpl-2.0
kzlin129/tt-gpl
go12/linux-2.6.28.10/drivers/video/samsung/s3cfb_fimd4x.c
2
45559
/* * drivers/video/samsung//s3cfb_fimd4x.c * * $Id: s3cfb_fimd4x.c,v 1.2 2008/11/17 23:44:28 jsgood Exp $ * * Copyright (C) 2008 Jinsung Yang <jsgood.yang@samsung.com> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. * * S3C Frame Buffer Driver * based on skeletonfb.c, sa1100fb.h, s3c2410fb.c */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/tty.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/fb.h> #include <linux/init.h> #include <linux/dma-mapping.h> #include <linux/string.h> #include <linux/ioctl.h> #include <linux/clk.h> #include <linux/platform_device.h> #include <linux/gpio.h> #include <asm/io.h> #include <asm/uaccess.h> #include <plat/gpio-cfg.h> #include <plat/regs-clock.h> #include <plat/regs-lcd.h> #include <plat/regs-gpio.h> #include <mach/map.h> #if defined(CONFIG_PM) #include <plat/pm.h> #endif #include "s3cfb.h" s3cfb_fimd_info_t s3cfb_fimd = { .vidcon0 = S3C_VIDCON0_INTERLACE_F_PROGRESSIVE | S3C_VIDCON0_VIDOUT_RGB_IF | S3C_VIDCON0_L1_DATA16_SUB_16_MODE | \ S3C_VIDCON0_L0_DATA16_MAIN_16_MODE | S3C_VIDCON0_PNRMODE_RGB_P | \ S3C_VIDCON0_CLKVALUP_ALWAYS | S3C_VIDCON0_CLKDIR_DIVIDED | S3C_VIDCON0_CLKSEL_F_HCLK | \ S3C_VIDCON0_ENVID_DISABLE | S3C_VIDCON0_ENVID_F_DISABLE, .dithmode = (S3C_DITHMODE_RDITHPOS_5BIT | S3C_DITHMODE_GDITHPOS_6BIT | S3C_DITHMODE_BDITHPOS_5BIT ) & S3C_DITHMODE_DITHERING_DISABLE, #if defined (CONFIG_FB_S3C_BPP_8) .wincon0 = S3C_WINCONx_BYTSWP_ENABLE | S3C_WINCONx_BURSTLEN_4WORD | S3C_WINCONx_BPPMODE_F_8BPP_PAL, .wincon1 = S3C_WINCONx_HAWSWP_ENABLE | S3C_WINCONx_BURSTLEN_4WORD | S3C_WINCONx_BPPMODE_F_16BPP_565 | S3C_WINCONx_BLD_PIX_PLANE | S3C_WINCONx_ALPHA_SEL_1, .bpp = S3CFB_PIXEL_BPP_8, .bytes_per_pixel = 1, .wpalcon = S3C_WPALCON_W0PAL_16BIT, #elif defined (CONFIG_FB_S3C_BPP_16) .wincon0 = S3C_WINCONx_ENLOCAL_DMA | S3C_WINCONx_BUFSEL_1 | S3C_WINCONx_BUFAUTOEN_DISABLE | \ S3C_WINCONx_BITSWP_DISABLE | S3C_WINCONx_BYTSWP_DISABLE | S3C_WINCONx_HAWSWP_ENABLE | \ S3C_WINCONx_BURSTLEN_16WORD | S3C_WINCONx_BPPMODE_F_16BPP_565 | S3C_WINCONx_ENWIN_F_DISABLE, .wincon1 = S3C_WINCONx_ENLOCAL_DMA | S3C_WINCONx_BUFSEL_0 | S3C_WINCONx_BUFAUTOEN_DISABLE | \ S3C_WINCONx_BITSWP_DISABLE | S3C_WINCONx_BYTSWP_DISABLE | S3C_WINCONx_HAWSWP_ENABLE | \ S3C_WINCONx_BURSTLEN_16WORD | S3C_WINCONx_BLD_PIX_PLANE | S3C_WINCONx_BPPMODE_F_16BPP_565 | \ S3C_WINCONx_ALPHA_SEL_1 | S3C_WINCONx_ENWIN_F_DISABLE, .wincon2 = S3C_WINCONx_ENLOCAL_DMA | S3C_WINCONx_BITSWP_DISABLE | S3C_WINCONx_BYTSWP_DISABLE | \ S3C_WINCONx_HAWSWP_ENABLE | S3C_WINCONx_BURSTLEN_4WORD | S3C_WINCONx_BURSTLEN_16WORD | \ S3C_WINCONx_BLD_PIX_PLANE | S3C_WINCONx_BPPMODE_F_16BPP_565 | S3C_WINCONx_ALPHA_SEL_1 | S3C_WINCONx_ENWIN_F_DISABLE, .wincon3 = S3C_WINCONx_BITSWP_DISABLE | S3C_WINCONx_BYTSWP_DISABLE | S3C_WINCONx_HAWSWP_ENABLE | \ S3C_WINCONx_BURSTLEN_4WORD | S3C_WINCONx_BURSTLEN_16WORD | S3C_WINCONx_BLD_PIX_PLANE | \ S3C_WINCONx_BPPMODE_F_16BPP_565 | S3C_WINCONx_ALPHA_SEL_1 | S3C_WINCONx_ENWIN_F_DISABLE, .wincon4 = S3C_WINCONx_BITSWP_DISABLE | S3C_WINCONx_BYTSWP_DISABLE | S3C_WINCONx_HAWSWP_ENABLE | \ S3C_WINCONx_BURSTLEN_4WORD | S3C_WINCONx_BURSTLEN_16WORD | S3C_WINCONx_BLD_PIX_PLANE | S3C_WINCONx_BPPMODE_F_16BPP_565 | S3C_WINCONx_ALPHA_SEL_1 | S3C_WINCONx_ENWIN_F_DISABLE, .bpp = S3CFB_PIXEL_BPP_16, .bytes_per_pixel = 2, .wpalcon = S3C_WPALCON_W0PAL_16BIT, #elif defined (CONFIG_FB_S3C_BPP_24) .wincon0 = S3C_WINCONx_HAWSWP_DISABLE | S3C_WINCONx_BURSTLEN_16WORD | S3C_WINCONx_BPPMODE_F_24BPP_888, .wincon1 = S3C_WINCONx_HAWSWP_DISABLE | S3C_WINCONx_BURSTLEN_16WORD | S3C_WINCONx_BPPMODE_F_24BPP_888 | S3C_WINCONx_BLD_PIX_PLANE | S3C_WINCONx_ALPHA_SEL_1, .wincon2 = S3C_WINCONx_HAWSWP_DISABLE | S3C_WINCONx_BURSTLEN_16WORD | S3C_WINCONx_BPPMODE_F_24BPP_888 | S3C_WINCONx_BLD_PIX_PLANE | S3C_WINCONx_ALPHA_SEL_1, .wincon3 = S3C_WINCONx_HAWSWP_DISABLE | S3C_WINCONx_BURSTLEN_16WORD | S3C_WINCONx_BPPMODE_F_24BPP_888 | S3C_WINCONx_BLD_PIX_PLANE | S3C_WINCONx_ALPHA_SEL_1, .wincon4 = S3C_WINCONx_HAWSWP_DISABLE | S3C_WINCONx_BURSTLEN_16WORD | S3C_WINCONx_BPPMODE_F_24BPP_888 | S3C_WINCONx_BLD_PIX_PLANE | S3C_WINCONx_ALPHA_SEL_1, .bpp = S3CFB_PIXEL_BPP_24, .bytes_per_pixel = 4, .wpalcon = S3C_WPALCON_W0PAL_24BIT, #elif defined (CONFIG_FB_S3C_BPP_28) .wincon0 = S3C_WINCONx_HAWSWP_DISABLE | S3C_WINCONx_BURSTLEN_16WORD | S3C_WINCONx_BPPMODE_F_24BPP_888, .wincon1 = S3C_WINCONx_HAWSWP_DISABLE | S3C_WINCONx_BURSTLEN_16WORD | S3C_WINCONx_BPPMODE_F_28BPP_A888 | S3C_WINCONx_BLD_PIX_PIXEL | S3C_WINCONx_ALPHA_SEL_1, .wincon2 = S3C_WINCONx_HAWSWP_DISABLE | S3C_WINCONx_BURSTLEN_16WORD | S3C_WINCONx_BPPMODE_F_28BPP_A888 | S3C_WINCONx_BLD_PIX_PIXEL | S3C_WINCONx_ALPHA_SEL_1, .wincon3 = S3C_WINCONx_HAWSWP_DISABLE | S3C_WINCONx_BURSTLEN_16WORD | S3C_WINCONx_BPPMODE_F_28BPP_A888 | S3C_WINCONx_BLD_PIX_PIXEL | S3C_WINCONx_ALPHA_SEL_1, .wincon4 = S3C_WINCONx_HAWSWP_DISABLE | S3C_WINCONx_BURSTLEN_16WORD | S3C_WINCONx_BPPMODE_F_28BPP_A888 | S3C_WINCONx_BLD_PIX_PIXEL | S3C_WINCONx_ALPHA_SEL_1, .bpp = S3CFB_PIXEL_BPP_28, .bytes_per_pixel = 4, .wpalcon = S3C_WPALCON_W0PAL_24BIT, #endif .vidosd1c = S3C_VIDOSDxC_ALPHA1_B(S3CFB_MAX_ALPHA_LEVEL) | S3C_VIDOSDxC_ALPHA1_G(S3CFB_MAX_ALPHA_LEVEL) | S3C_VIDOSDxC_ALPHA1_R(S3CFB_MAX_ALPHA_LEVEL), .vidosd2c = S3C_VIDOSDxC_ALPHA1_B(S3CFB_MAX_ALPHA_LEVEL) | S3C_VIDOSDxC_ALPHA1_G(S3CFB_MAX_ALPHA_LEVEL) | S3C_VIDOSDxC_ALPHA1_R(S3CFB_MAX_ALPHA_LEVEL), .vidosd3c = S3C_VIDOSDxC_ALPHA1_B(S3CFB_MAX_ALPHA_LEVEL) | S3C_VIDOSDxC_ALPHA1_G(S3CFB_MAX_ALPHA_LEVEL) | S3C_VIDOSDxC_ALPHA1_R(S3CFB_MAX_ALPHA_LEVEL), .vidosd4c = S3C_VIDOSDxC_ALPHA1_B(S3CFB_MAX_ALPHA_LEVEL) | S3C_VIDOSDxC_ALPHA1_G(S3CFB_MAX_ALPHA_LEVEL) | S3C_VIDOSDxC_ALPHA1_R(S3CFB_MAX_ALPHA_LEVEL), .vidintcon0 = S3C_VIDINTCON0_FRAMESEL0_VSYNC | S3C_VIDINTCON0_FRAMESEL1_NONE | S3C_VIDINTCON0_INTFRMEN_DISABLE | \ S3C_VIDINTCON0_FIFOSEL_WIN0 | S3C_VIDINTCON0_FIFOLEVEL_25 | S3C_VIDINTCON0_INTFIFOEN_DISABLE | S3C_VIDINTCON0_INTEN_ENABLE, .vidintcon1 = 0, .xoffset = 0, .yoffset = 0, .w1keycon0 = S3C_WxKEYCON0_KEYBLEN_DISABLE | S3C_WxKEYCON0_KEYEN_F_DISABLE | S3C_WxKEYCON0_DIRCON_MATCH_FG_IMAGE | S3C_WxKEYCON0_COMPKEY(0x0), .w1keycon1 = S3C_WxKEYCON1_COLVAL(0xffffff), .w2keycon0 = S3C_WxKEYCON0_KEYBLEN_DISABLE | S3C_WxKEYCON0_KEYEN_F_DISABLE | S3C_WxKEYCON0_DIRCON_MATCH_FG_IMAGE | S3C_WxKEYCON0_COMPKEY(0x0), .w2keycon1 = S3C_WxKEYCON1_COLVAL(0xffffff), .w3keycon0 = S3C_WxKEYCON0_KEYBLEN_DISABLE | S3C_WxKEYCON0_KEYEN_F_DISABLE | S3C_WxKEYCON0_DIRCON_MATCH_FG_IMAGE | S3C_WxKEYCON0_COMPKEY(0x0), .w3keycon1 = S3C_WxKEYCON1_COLVAL(0xffffff), .w4keycon0 = S3C_WxKEYCON0_KEYBLEN_DISABLE | S3C_WxKEYCON0_KEYEN_F_DISABLE | S3C_WxKEYCON0_DIRCON_MATCH_FG_IMAGE | S3C_WxKEYCON0_COMPKEY(0x0), .w4keycon1 = S3C_WxKEYCON1_COLVAL(0xffffff), .sync = 0, .cmap_static = 1, .vs_offset = S3CFB_DEFAULT_DISPLAY_OFFSET, .brightness = S3CFB_DEFAULT_BRIGHTNESS, .backlight_level = S3CFB_DEFAULT_BACKLIGHT_LEVEL, .backlight_power = 1, .lcd_power = 1, }; #if defined(CONFIG_S3C6410_PWM) void s3cfb_set_brightness(int val) { int channel = 1; /* must use channel-1 */ int usec = 0; /* don't care value */ unsigned long tcnt = 1000; unsigned long tcmp = 0; if (val < 0) val = 0; if (val > S3CFB_MAX_BRIGHTNESS) val = S3CFB_MAX_BRIGHTNESS; s3cfb_fimd.brightness = val; tcmp = val * 5; s3c6410_timer_setup (channel, usec, tcnt, tcmp); } #endif #if defined(CONFIG_FB_S3C_DOUBLE_BUFFERING) static void s3cfb_change_buff(int req_win, int req_fb) { switch (req_win) { case 0: if (req_fb == 0) s3cfb_fimd.wincon0 &= ~S3C_WINCONx_BUFSEL_MASK; else s3cfb_fimd.wincon0 |= S3C_WINCONx_BUFSEL_1; writel(s3cfb_fimd.wincon0 | S3C_WINCONx_ENWIN_F_ENABLE, S3C_WINCON0); break; case 1: if (req_fb == 0) s3cfb_fimd.wincon1 &= ~S3C_WINCONx_BUFSEL_MASK; else s3cfb_fimd.wincon1 |= S3C_WINCONx_BUFSEL_1; writel(s3cfb_fimd.wincon1 | S3C_WINCONx_ENWIN_F_ENABLE, S3C_WINCON1); break; default: break; } } #endif #if defined(CONFIG_FB_S3C_VIRTUAL_SCREEN) static int s3cfb_set_vs_registers(int vs_cmd) { int page_width, offset; int shift_value; page_width = s3cfb_fimd.xres * s3cfb_fimd.bytes_per_pixel; offset = (s3cfb_fimd.xres_virtual - s3cfb_fimd.xres) * s3cfb_fimd.bytes_per_pixel; switch (vs_cmd){ case S3CFB_VS_SET: /* size of buffer */ s3cfb_fimd.vidw00add2 = S3C_VIDWxxADD2_OFFSIZE_F(offset) | S3C_VIDWxxADD2_PAGEWIDTH_F(page_width); writel(s3cfb_fimd.vidw00add2, S3C_VIDW00ADD2); break; case S3CFB_VS_MOVE_LEFT: if (s3cfb_fimd.xoffset < s3cfb_fimd.vs_offset) shift_value = s3cfb_fimd.xoffset; else shift_value = s3cfb_fimd.vs_offset; s3cfb_fimd.xoffset -= shift_value; /* For buffer start address */ s3cfb_fimd.vidw00add0b0 = s3cfb_fimd.vidw00add0b0 - (s3cfb_fimd.bytes_per_pixel * shift_value); s3cfb_fimd.vidw00add0b1 = s3cfb_fimd.vidw00add0b1 - (s3cfb_fimd.bytes_per_pixel * shift_value); break; case S3CFB_VS_MOVE_RIGHT: if ((s3cfb_fimd.vs_info.v_width - (s3cfb_fimd.xoffset + s3cfb_fimd.vs_info.width)) < (s3cfb_fimd.vs_offset)) shift_value = s3cfb_fimd.vs_info.v_width - (s3cfb_fimd.xoffset + s3cfb_fimd.vs_info.width); else shift_value = s3cfb_fimd.vs_offset; s3cfb_fimd.xoffset += shift_value; /* For buffer start address */ s3cfb_fimd.vidw00add0b0 = s3cfb_fimd.vidw00add0b0 + (s3cfb_fimd.bytes_per_pixel * shift_value); s3cfb_fimd.vidw00add0b1 = s3cfb_fimd.vidw00add0b1 + (s3cfb_fimd.bytes_per_pixel * shift_value); break; case S3CFB_VS_MOVE_UP: if (s3cfb_fimd.yoffset < s3cfb_fimd.vs_offset) shift_value = s3cfb_fimd.yoffset; else shift_value = s3cfb_fimd.vs_offset; s3cfb_fimd.yoffset -= shift_value; /* For buffer start address */ s3cfb_fimd.vidw00add0b0 = s3cfb_fimd.vidw00add0b0 - (s3cfb_fimd.xres_virtual * s3cfb_fimd.bytes_per_pixel * shift_value); s3cfb_fimd.vidw00add0b1 = s3cfb_fimd.vidw00add0b1 - (s3cfb_fimd.xres_virtual * s3cfb_fimd.bytes_per_pixel * shift_value); break; case S3CFB_VS_MOVE_DOWN: if ((s3cfb_fimd.vs_info.v_height - (s3cfb_fimd.yoffset + s3cfb_fimd.vs_info.height)) < (s3cfb_fimd.vs_offset)) shift_value = s3cfb_fimd.vs_info.v_height - (s3cfb_fimd.yoffset + s3cfb_fimd.vs_info.height); else shift_value = s3cfb_fimd.vs_offset; s3cfb_fimd.yoffset += shift_value; /* For buffer start address */ s3cfb_fimd.vidw00add0b0 = s3cfb_fimd.vidw00add0b0 + (s3cfb_fimd.xres_virtual * s3cfb_fimd.bytes_per_pixel * shift_value); s3cfb_fimd.vidw00add0b1 = s3cfb_fimd.vidw00add0b1 + (s3cfb_fimd.xres_virtual * s3cfb_fimd.bytes_per_pixel * shift_value); break; default: return -EINVAL; } /* End address */ s3cfb_fimd.vidw00add1b0 = S3C_VIDWxxADD1_VBASEL_F(s3cfb_fimd.vidw00add0b0 + (page_width + offset) * (s3cfb_fimd.yres)); s3cfb_fimd.vidw00add1b1 = S3C_VIDWxxADD1_VBASEL_F(s3cfb_fimd.vidw00add0b1 + (page_width + offset) * (s3cfb_fimd.yres)); writel(s3cfb_fimd.vidw00add0b0, S3C_VIDW00ADD0B0); writel(s3cfb_fimd.vidw00add0b1, S3C_VIDW00ADD0B1); writel(s3cfb_fimd.vidw00add1b0, S3C_VIDW00ADD1B0); writel(s3cfb_fimd.vidw00add1b1, S3C_VIDW00ADD1B1); return 0; } #endif void s3cfb_write_palette(s3cfb_info_t *fbi) { unsigned int i; unsigned long ent; unsigned int win_num = fbi->win_id; fbi->palette_ready = 0; writel((s3cfb_fimd.wpalcon | S3C_WPALCON_PALUPDATEEN), S3C_WPALCON); for (i = 0; i < 256; i++) { if ((ent = fbi->palette_buffer[i]) == S3CFB_PALETTE_BUFF_CLEAR) continue; writel(ent, S3C_TFTPAL0(i) + 0x400 * win_num); /* it seems the only way to know exactly * if the palette wrote ok, is to check * to see if the value verifies ok */ if (readl(S3C_TFTPAL0(i) + 0x400 * win_num) == ent) { fbi->palette_buffer[i] = S3CFB_PALETTE_BUFF_CLEAR; } else { fbi->palette_ready = 1; /* retry */ printk("Retry writing into the palette\n"); } } writel(s3cfb_fimd.wpalcon, S3C_WPALCON); } irqreturn_t s3cfb_irq(int irqno, void *param) { unsigned long buffer_size = 0; unsigned int i; unsigned int buffer_page_offset, buffer_page_width; unsigned int fb_start_address, fb_end_address; if (s3cfb_info[s3cfb_fimd.palette_win].palette_ready) s3cfb_write_palette(&s3cfb_info[s3cfb_fimd.palette_win]); for (i = 0; i < CONFIG_FB_S3C_NUM; i++) { if (s3cfb_info[i].next_fb_info_change_req) { /* fb variable setting */ s3cfb_info[i].fb.fix.smem_start = s3cfb_info[i].next_fb_info.phy_start_addr; s3cfb_info[i].fb.fix.line_length = s3cfb_info[i].next_fb_info.xres_virtual * s3cfb_fimd.bytes_per_pixel; s3cfb_info[i].fb.fix.smem_len = s3cfb_info[i].next_fb_info.xres_virtual * s3cfb_info[i].next_fb_info.yres_virtual * s3cfb_fimd.bytes_per_pixel; s3cfb_info[i].fb.var.xres = s3cfb_info[i].next_fb_info.xres; s3cfb_info[i].fb.var.yres = s3cfb_info[i].next_fb_info.yres; s3cfb_info[i].fb.var.xres_virtual = s3cfb_info[i].next_fb_info.xres_virtual; s3cfb_info[i].fb.var.yres_virtual= s3cfb_info[i].next_fb_info.yres_virtual; s3cfb_info[i].fb.var.xoffset = s3cfb_info[i].next_fb_info.xoffset; s3cfb_info[i].fb.var.yoffset = s3cfb_info[i].next_fb_info.yoffset; s3cfb_info[i].lcd_offset_x= s3cfb_info[i].next_fb_info.lcd_offset_x; s3cfb_info[i].lcd_offset_y= s3cfb_info[i].next_fb_info.lcd_offset_y; /* fb start / end address setting */ fb_start_address = s3cfb_info[i].next_fb_info.phy_start_addr + s3cfb_info[i].fb.fix.line_length * s3cfb_info[i].next_fb_info.yoffset + s3cfb_info[i].next_fb_info.xoffset * s3cfb_fimd.bytes_per_pixel; fb_end_address = fb_start_address + s3cfb_info[i].fb.fix.line_length * s3cfb_info[i].next_fb_info.yres; writel(fb_start_address, S3C_VIDW00ADD0B0 + 0x8 * i); writel(S3C_VIDWxxADD1_VBASEL_F(fb_end_address), S3C_VIDW00ADD1B0 + 0x8 * i); /* fb virtual / visible size setting */ buffer_page_width = s3cfb_info[i].next_fb_info.xres * s3cfb_fimd.bytes_per_pixel; buffer_page_offset = (s3cfb_info[i].next_fb_info.xres_virtual - s3cfb_info[i].next_fb_info.xres) * s3cfb_fimd.bytes_per_pixel; buffer_size = S3C_VIDWxxADD2_OFFSIZE_F(buffer_page_offset) | (S3C_VIDWxxADD2_PAGEWIDTH_F(buffer_page_width)); writel(buffer_size, S3C_VIDW00ADD2 + 0x04 * i); /* LCD position setting */ writel(S3C_VIDOSDxA_OSD_LTX_F(s3cfb_info[i].next_fb_info.lcd_offset_x) | S3C_VIDOSDxA_OSD_LTY_F(s3cfb_info[i].next_fb_info.lcd_offset_y), S3C_VIDOSD0A+(0x10 * i)); writel(S3C_VIDOSDxB_OSD_RBX_F(s3cfb_info[i].next_fb_info.lcd_offset_x - 1 + s3cfb_info[i].next_fb_info.xres) | S3C_VIDOSDxB_OSD_RBY_F(s3cfb_info[i].next_fb_info.lcd_offset_y - 1 + s3cfb_info[i].next_fb_info.yres), S3C_VIDOSD0B + (0x10 * i)); /* fb size setting */ if (i == 0) writel(S3C_VIDOSD0C_OSDSIZE(s3cfb_info[i].next_fb_info.xres * s3cfb_info[i].next_fb_info.yres), S3C_VIDOSD0C); else if (i == 1) writel(S3C_VIDOSD0C_OSDSIZE(s3cfb_info[i].next_fb_info.xres * s3cfb_info[i].next_fb_info.yres), S3C_VIDOSD1D); else if (i == 2) writel(S3C_VIDOSD0C_OSDSIZE(s3cfb_info[i].next_fb_info.xres * s3cfb_info[i].next_fb_info.yres), S3C_VIDOSD2D); s3cfb_info[i].next_fb_info_change_req = 0; } } /* for clearing the interrupt source */ writel(readl(S3C_VIDINTCON1), S3C_VIDINTCON1); s3cfb_fimd.vsync_info.count++; wake_up_interruptible(&s3cfb_fimd.vsync_info.wait_queue); return IRQ_HANDLED; } static void s3cfb_check_line_count(void) { int timeout = 30 * 5300; unsigned int cfg; int i; i = 0; do { if (!(readl(S3C_VIDCON1) & 0x7ff0000)) break; i++; } while (i < timeout); if (i == timeout) { printk(KERN_WARNING "line count mismatch\n"); cfg = readl(S3C_VIDCON0); cfg |= (S3C_VIDCON0_ENVID_F_ENABLE | S3C_VIDCON0_ENVID_ENABLE); writel(cfg, S3C_VIDCON0); } } static void s3cfb_enable_local0(int in_yuv) { unsigned int value; s3cfb_fimd.wincon0 = readl(S3C_WINCON0); s3cfb_fimd.wincon0 &= ~S3C_WINCONx_ENWIN_F_ENABLE; writel(s3cfb_fimd.wincon0, S3C_WINCON0); s3cfb_fimd.wincon0 &= ~(S3C_WINCONx_ENLOCAL_MASK | S3C_WINCONx_INRGB_MASK); value = S3C_WINCONx_ENLOCAL | S3C_WINCONx_ENWIN_F_ENABLE; if (in_yuv) value |= S3C_WINCONx_INRGB_YUV; writel(s3cfb_fimd.wincon0 | value, S3C_WINCON0); } static void s3cfb_enable_local1(int in_yuv, int sel) { unsigned int value; s3cfb_fimd.wincon1 = readl(S3C_WINCON1); s3cfb_fimd.wincon1 &= ~S3C_WINCONx_ENWIN_F_ENABLE; writel(s3cfb_fimd.wincon1, S3C_WINCON1); s3cfb_fimd.wincon1 &= ~(S3C_WINCONx_ENLOCAL_MASK | S3C_WINCONx_INRGB_MASK); s3cfb_fimd.wincon1 &= ~(S3C_WINCON1_LOCALSEL_MASK); value = sel | S3C_WINCONx_ENLOCAL | S3C_WINCONx_ENWIN_F_ENABLE; if (in_yuv) value |= S3C_WINCONx_INRGB_YUV; writel(s3cfb_fimd.wincon1 | value, S3C_WINCON1); } static void s3cfb_enable_local2(int in_yuv, int sel) { unsigned int value; s3cfb_fimd.wincon2 = readl(S3C_WINCON2); s3cfb_fimd.wincon2 &= ~S3C_WINCONx_ENWIN_F_ENABLE; s3cfb_fimd.wincon2 &= ~S3C_WINCON2_LOCALSEL_MASK; writel(s3cfb_fimd.wincon2, S3C_WINCON2); s3cfb_fimd.wincon2 &= ~(S3C_WINCONx_ENLOCAL_MASK | S3C_WINCONx_INRGB_MASK); value = sel | S3C_WINCONx_ENLOCAL | S3C_WINCONx_ENWIN_F_ENABLE; if (in_yuv) value |= S3C_WINCONx_INRGB_YUV; writel(s3cfb_fimd.wincon2 | value, S3C_WINCON2); } static void s3cfb_enable_dma0(void) { u32 value; s3cfb_fimd.wincon0 &= ~(S3C_WINCONx_ENLOCAL_MASK | S3C_WINCONx_INRGB_MASK); value = S3C_WINCONx_ENLOCAL_DMA | S3C_WINCONx_ENWIN_F_ENABLE; writel(s3cfb_fimd.wincon0 | value, S3C_WINCON0); } static void s3cfb_enable_dma1(void) { u32 value; s3cfb_fimd.wincon1 &= ~(S3C_WINCONx_ENLOCAL_MASK | S3C_WINCONx_INRGB_MASK); value = S3C_WINCONx_ENLOCAL_DMA | S3C_WINCONx_ENWIN_F_ENABLE; writel(s3cfb_fimd.wincon1 | value, S3C_WINCON1); } static void s3cfb_enable_dma2(void) { u32 value; s3cfb_fimd.wincon2 &= ~(S3C_WINCONx_ENLOCAL_MASK | S3C_WINCONx_INRGB_MASK); value = S3C_WINCONx_ENLOCAL_DMA | S3C_WINCONx_ENWIN_F_ENABLE; writel(s3cfb_fimd.wincon2 | value, S3C_WINCON2); } void s3cfb_enable_local(int win, int in_yuv, int sel) { s3cfb_check_line_count(); switch (win) { case 0: s3cfb_enable_local0(in_yuv); break; case 1: s3cfb_enable_local1(in_yuv, sel); break; case 2: s3cfb_enable_local2(in_yuv, sel); break; default: break; } } void s3cfb_enable_dma(int win) { s3cfb_stop_lcd(); switch (win) { case 0: s3cfb_enable_dma0(); break; case 1: s3cfb_enable_dma1(); break; case 2: s3cfb_enable_dma2(); break; default: break; } s3cfb_start_lcd(); } EXPORT_SYMBOL(s3cfb_enable_local); EXPORT_SYMBOL(s3cfb_enable_dma); int s3cfb_init_registers(s3cfb_info_t *fbi) { struct clk *lcd_clock; struct fb_var_screeninfo *var = &fbi->fb.var; unsigned long flags = 0, page_width = 0, offset = 0; unsigned long video_phy_temp_f1 = fbi->screen_dma_f1; unsigned long video_phy_temp_f2 = fbi->screen_dma_f2; int win_num = fbi->win_id; /* Initialise LCD with values from hare */ local_irq_save(flags); page_width = var->xres * s3cfb_fimd.bytes_per_pixel; offset = (var->xres_virtual - var->xres) * s3cfb_fimd.bytes_per_pixel; if (win_num == 0) { s3cfb_fimd.vidcon0 = s3cfb_fimd.vidcon0 & ~(S3C_VIDCON0_ENVID_ENABLE | S3C_VIDCON0_ENVID_F_ENABLE); writel(s3cfb_fimd.vidcon0, S3C_VIDCON0); lcd_clock = clk_get(NULL, "lcd"); s3cfb_fimd.vidcon0 |= S3C_VIDCON0_CLKVAL_F((int) ((clk_get_rate(lcd_clock) / s3cfb_fimd.pixclock) - 1)); #if defined(CONFIG_FB_S3C_VIRTUAL_SCREEN) offset = 0; s3cfb_fimd.vidw00add0b0 = video_phy_temp_f1; s3cfb_fimd.vidw00add0b1 = video_phy_temp_f2; s3cfb_fimd.vidw00add1b0 = S3C_VIDWxxADD1_VBASEL_F((unsigned long) video_phy_temp_f1 + (page_width + offset) * (var->yres)); s3cfb_fimd.vidw00add1b1 = S3C_VIDWxxADD1_VBASEL_F((unsigned long) video_phy_temp_f2 + (page_width + offset) * (var->yres)); #endif } writel(video_phy_temp_f1, S3C_VIDW00ADD0B0 + (0x08 * win_num)); writel(S3C_VIDWxxADD1_VBASEL_F((unsigned long) video_phy_temp_f1 + (page_width + offset) * (var->yres)), S3C_VIDW00ADD1B0 + (0x08 * win_num)); writel(S3C_VIDWxxADD2_OFFSIZE_F(offset) | (S3C_VIDWxxADD2_PAGEWIDTH_F(page_width)), S3C_VIDW00ADD2 + (0x04 * win_num)); if (win_num < 2) { writel(video_phy_temp_f2, S3C_VIDW00ADD0B1 + (0x08 * win_num)); writel(S3C_VIDWxxADD1_VBASEL_F((unsigned long) video_phy_temp_f2 + (page_width + offset) * (var->yres)), S3C_VIDW00ADD1B1 + (0x08 * win_num)); } switch (win_num) { case 0: writel(s3cfb_fimd.wincon0, S3C_WINCON0); writel(s3cfb_fimd.vidcon0, S3C_VIDCON0); writel(s3cfb_fimd.vidcon1, S3C_VIDCON1); writel(s3cfb_fimd.vidtcon0, S3C_VIDTCON0); writel(s3cfb_fimd.vidtcon1, S3C_VIDTCON1); writel(s3cfb_fimd.vidtcon2, S3C_VIDTCON2); writel(s3cfb_fimd.dithmode, S3C_DITHMODE); writel(s3cfb_fimd.vidintcon0, S3C_VIDINTCON0); writel(s3cfb_fimd.vidintcon1, S3C_VIDINTCON1); writel(s3cfb_fimd.vidosd0a, S3C_VIDOSD0A); writel(s3cfb_fimd.vidosd0b, S3C_VIDOSD0B); writel(s3cfb_fimd.vidosd0c, S3C_VIDOSD0C); writel(s3cfb_fimd.wpalcon, S3C_WPALCON); s3cfb_onoff_win(fbi, ON); break; case 1: writel(s3cfb_fimd.wincon1, S3C_WINCON1); writel(s3cfb_fimd.vidosd1a, S3C_VIDOSD1A); writel(s3cfb_fimd.vidosd1b, S3C_VIDOSD1B); writel(s3cfb_fimd.vidosd1c, S3C_VIDOSD1C); writel(s3cfb_fimd.vidosd1d, S3C_VIDOSD1D); writel(s3cfb_fimd.wpalcon, S3C_WPALCON); s3cfb_onoff_win(fbi, OFF); break; case 2: writel(s3cfb_fimd.wincon2, S3C_WINCON2); writel(s3cfb_fimd.vidosd2a, S3C_VIDOSD2A); writel(s3cfb_fimd.vidosd2b, S3C_VIDOSD2B); writel(s3cfb_fimd.vidosd2c, S3C_VIDOSD2C); writel(s3cfb_fimd.vidosd2d, S3C_VIDOSD2D); writel(s3cfb_fimd.wpalcon, S3C_WPALCON); s3cfb_onoff_win(fbi, OFF); break; case 3: writel(s3cfb_fimd.wincon3, S3C_WINCON3); writel(s3cfb_fimd.vidosd3a, S3C_VIDOSD3A); writel(s3cfb_fimd.vidosd3b, S3C_VIDOSD3B); writel(s3cfb_fimd.vidosd3c, S3C_VIDOSD3C); writel(s3cfb_fimd.wpalcon, S3C_WPALCON); s3cfb_onoff_win(fbi, OFF); break; case 4: writel(s3cfb_fimd.wincon4, S3C_WINCON4); writel(s3cfb_fimd.vidosd4a, S3C_VIDOSD4A); writel(s3cfb_fimd.vidosd4b, S3C_VIDOSD4B); writel(s3cfb_fimd.vidosd4c, S3C_VIDOSD4C); writel(s3cfb_fimd.wpalcon, S3C_WPALCON); s3cfb_onoff_win(fbi, OFF); break; } local_irq_restore(flags); return 0; } void s3cfb_activate_var(s3cfb_info_t *fbi, struct fb_var_screeninfo *var) { DPRINTK("%s: var->bpp = %d\n", __FUNCTION__, var->bits_per_pixel); switch (var->bits_per_pixel) { case 8: s3cfb_fimd.wincon0 = S3C_WINCONx_BYTSWP_ENABLE | S3C_WINCONx_BURSTLEN_16WORD | S3C_WINCONx_BPPMODE_F_8BPP_PAL; s3cfb_fimd.wincon1 = S3C_WINCONx_HAWSWP_ENABLE | S3C_WINCONx_BURSTLEN_16WORD | S3C_WINCONx_BPPMODE_F_16BPP_565 | S3C_WINCONx_BLD_PIX_PLANE | S3C_WINCONx_ALPHA_SEL_1; s3cfb_fimd.wincon2 = S3C_WINCONx_HAWSWP_ENABLE | S3C_WINCONx_BURSTLEN_16WORD | S3C_WINCONx_BPPMODE_F_16BPP_565 | S3C_WINCONx_BLD_PIX_PLANE | S3C_WINCONx_ALPHA_SEL_1; s3cfb_fimd.wincon3 = S3C_WINCONx_HAWSWP_ENABLE | S3C_WINCONx_BURSTLEN_16WORD | S3C_WINCONx_BPPMODE_F_16BPP_565 | S3C_WINCONx_BLD_PIX_PLANE | S3C_WINCONx_ALPHA_SEL_1; s3cfb_fimd.wincon4 = S3C_WINCONx_HAWSWP_ENABLE | S3C_WINCONx_BURSTLEN_16WORD | S3C_WINCONx_BPPMODE_F_16BPP_565 | S3C_WINCONx_BLD_PIX_PLANE | S3C_WINCONx_ALPHA_SEL_1; s3cfb_fimd.bpp = S3CFB_PIXEL_BPP_8; s3cfb_fimd.bytes_per_pixel = 1; s3cfb_fimd.wpalcon = S3C_WPALCON_W0PAL_16BIT; break; case 16: s3cfb_fimd.wincon0 = S3C_WINCONx_HAWSWP_ENABLE | S3C_WINCONx_BURSTLEN_16WORD | S3C_WINCONx_BPPMODE_F_16BPP_565; s3cfb_fimd.wincon1 = S3C_WINCONx_HAWSWP_ENABLE | S3C_WINCONx_BURSTLEN_16WORD | S3C_WINCONx_BPPMODE_F_16BPP_565 | S3C_WINCONx_BLD_PIX_PLANE | S3C_WINCONx_ALPHA_SEL_1; s3cfb_fimd.wincon2 = S3C_WINCONx_HAWSWP_ENABLE | S3C_WINCONx_BURSTLEN_16WORD | S3C_WINCONx_BPPMODE_F_16BPP_565 | S3C_WINCONx_BLD_PIX_PLANE | S3C_WINCONx_ALPHA_SEL_1; s3cfb_fimd.wincon3 = S3C_WINCONx_HAWSWP_ENABLE | S3C_WINCONx_BURSTLEN_16WORD | S3C_WINCONx_BPPMODE_F_16BPP_565 | S3C_WINCONx_BLD_PIX_PLANE | S3C_WINCONx_ALPHA_SEL_1; s3cfb_fimd.wincon4 = S3C_WINCONx_HAWSWP_ENABLE | S3C_WINCONx_BURSTLEN_16WORD | S3C_WINCONx_BPPMODE_F_16BPP_565 | S3C_WINCONx_BLD_PIX_PLANE | S3C_WINCONx_ALPHA_SEL_1; s3cfb_fimd.bpp = S3CFB_PIXEL_BPP_16; s3cfb_fimd.bytes_per_pixel = 2; break; case 24: s3cfb_fimd.wincon0 = S3C_WINCONx_HAWSWP_DISABLE | S3C_WINCONx_BURSTLEN_16WORD | S3C_WINCONx_BPPMODE_F_24BPP_888; s3cfb_fimd.wincon1 = S3C_WINCONx_HAWSWP_DISABLE | S3C_WINCONx_BURSTLEN_16WORD | S3C_WINCONx_BPPMODE_F_24BPP_888 | S3C_WINCONx_BLD_PIX_PLANE | S3C_WINCONx_ALPHA_SEL_1; s3cfb_fimd.wincon2 = S3C_WINCONx_HAWSWP_DISABLE | S3C_WINCONx_BURSTLEN_16WORD | S3C_WINCONx_BPPMODE_F_24BPP_888 | S3C_WINCONx_BLD_PIX_PLANE | S3C_WINCONx_ALPHA_SEL_1; s3cfb_fimd.wincon3 = S3C_WINCONx_HAWSWP_DISABLE | S3C_WINCONx_BURSTLEN_16WORD | S3C_WINCONx_BPPMODE_F_24BPP_888 | S3C_WINCONx_BLD_PIX_PLANE | S3C_WINCONx_ALPHA_SEL_1; s3cfb_fimd.wincon4 = S3C_WINCONx_HAWSWP_DISABLE | S3C_WINCONx_BURSTLEN_16WORD | S3C_WINCONx_BPPMODE_F_24BPP_888 | S3C_WINCONx_BLD_PIX_PLANE | S3C_WINCONx_ALPHA_SEL_1; s3cfb_fimd.bpp = S3CFB_PIXEL_BPP_24; s3cfb_fimd.bytes_per_pixel = 4; break; case 28: s3cfb_fimd.wincon0 = S3C_WINCONx_HAWSWP_DISABLE | S3C_WINCONx_BURSTLEN_16WORD | S3C_WINCONx_BPPMODE_F_24BPP_888; s3cfb_fimd.wincon1 = S3C_WINCONx_HAWSWP_DISABLE | S3C_WINCONx_BURSTLEN_16WORD | S3C_WINCONx_BPPMODE_F_28BPP_A888 | S3C_WINCONx_BLD_PIX_PIXEL | S3C_WINCONx_ALPHA_SEL_1; s3cfb_fimd.wincon2 = S3C_WINCONx_HAWSWP_DISABLE | S3C_WINCONx_BURSTLEN_16WORD | S3C_WINCONx_BPPMODE_F_28BPP_A888 | S3C_WINCONx_BLD_PIX_PIXEL | S3C_WINCONx_ALPHA_SEL_1; s3cfb_fimd.wincon3 = S3C_WINCONx_HAWSWP_DISABLE | S3C_WINCONx_BURSTLEN_16WORD | S3C_WINCONx_BPPMODE_F_28BPP_A888 | S3C_WINCONx_BLD_PIX_PIXEL | S3C_WINCONx_ALPHA_SEL_1; s3cfb_fimd.wincon4 = S3C_WINCONx_HAWSWP_DISABLE | S3C_WINCONx_BURSTLEN_16WORD | S3C_WINCONx_BPPMODE_F_28BPP_A888 | S3C_WINCONx_BLD_PIX_PIXEL | S3C_WINCONx_ALPHA_SEL_1; s3cfb_fimd.bpp = S3CFB_PIXEL_BPP_28; s3cfb_fimd.bytes_per_pixel = 4; if((fbi->win_id == 0) && (fbi->fb.var.bits_per_pixel == 28) ) fbi->fb.var.bits_per_pixel = 24; break; case 32: s3cfb_fimd.bytes_per_pixel = 4; break; } /* write new registers */ /* FIXME: temporary fixing for pm by jsgood */ #if 1 writel(s3cfb_fimd.wincon0, S3C_WINCON0); writel(s3cfb_fimd.wincon1, S3C_WINCON1); writel(s3cfb_fimd.wincon2, S3C_WINCON2); writel(s3cfb_fimd.wincon3, S3C_WINCON3); writel(s3cfb_fimd.wincon4, S3C_WINCON4); writel(s3cfb_fimd.wpalcon, S3C_WPALCON); writel(s3cfb_fimd.wincon0 | S3C_WINCONx_ENWIN_F_ENABLE, S3C_WINCON0); writel(s3cfb_fimd.vidcon0 | S3C_VIDCON0_ENVID_ENABLE | S3C_VIDCON0_ENVID_F_ENABLE, S3C_VIDCON0); #else writel(readl(S3C_WINCON0) | S3C_WINCONx_ENWIN_F_ENABLE, S3C_WINCON0); writel(readl(S3C_VIDCON0) | S3C_VIDCON0_ENVID_ENABLE | S3C_VIDCON0_ENVID_F_ENABLE, S3C_VIDCON0); #endif } /* JJNAHM comment. * We had some problems related to frame buffer address. * We used 2 frame buffers (FB0 and FB1) and GTK used FB1. * When GTK launched, GTK set FB0's address to FB1's address. * (GTK calls s3c_fb_pan_display() and then it calls this s3c_fb_set_lcdaddr()) * Even though fbi->win_id is not 0, above original codes set ONLY FB0's address. * So, I modified the codes like below. * It works by fbi->win_id value. * Below codes are not verified yet * and there are nothing about Double buffering features */ void s3cfb_set_fb_addr(s3cfb_info_t *fbi) { unsigned long video_phy_temp_f1 = fbi->screen_dma_f1; unsigned long start_address, end_address; unsigned int start; start = fbi->fb.fix.line_length * fbi->fb.var.yoffset; /* for buffer start address and end address */ start_address = video_phy_temp_f1 + start; end_address = start_address + (fbi->fb.fix.line_length * fbi->fb.var.yres); switch (fbi->win_id) { case 0: s3cfb_fimd.vidw00add0b0 = start_address; s3cfb_fimd.vidw00add1b0 = end_address; __raw_writel(s3cfb_fimd.vidw00add0b0, S3C_VIDW00ADD0B0); __raw_writel(s3cfb_fimd.vidw00add1b0, S3C_VIDW00ADD1B0); break; case 1: s3cfb_fimd.vidw01add0b0 = start_address; s3cfb_fimd.vidw01add1b0 = end_address; __raw_writel(s3cfb_fimd.vidw01add0b0, S3C_VIDW01ADD0B0); __raw_writel(s3cfb_fimd.vidw01add1b0, S3C_VIDW01ADD1B0); break; case 2: s3cfb_fimd.vidw02add0 = start_address; s3cfb_fimd.vidw02add1 = end_address; __raw_writel(s3cfb_fimd.vidw02add0, S3C_VIDW02ADD0); __raw_writel(s3cfb_fimd.vidw02add1, S3C_VIDW02ADD1); break; case 3: s3cfb_fimd.vidw03add0 = start_address; s3cfb_fimd.vidw03add1 = end_address; __raw_writel(s3cfb_fimd.vidw03add0, S3C_VIDW03ADD0); __raw_writel(s3cfb_fimd.vidw03add1, S3C_VIDW03ADD1); break; case 4: s3cfb_fimd.vidw04add0 = start_address; s3cfb_fimd.vidw04add1 = end_address; __raw_writel(s3cfb_fimd.vidw04add0, S3C_VIDW04ADD0); __raw_writel(s3cfb_fimd.vidw04add1, S3C_VIDW04ADD1); break; } } static int s3cfb_set_alpha_level(s3cfb_info_t *fbi, unsigned int level, unsigned int alpha_index) { unsigned long alpha_val; int win_num = fbi->win_id; if (win_num == 0) { printk("WIN0 do not support alpha blending.\n"); return -1; } alpha_val = readl(S3C_VIDOSD0C+(0x10 * win_num)); if (alpha_index == 0) { alpha_val &= ~(S3C_VIDOSDxC_ALPHA0_B(0xf) | S3C_VIDOSDxC_ALPHA0_G(0xf) | S3C_VIDOSDxC_ALPHA0_R(0xf)); alpha_val |= S3C_VIDOSDxC_ALPHA0_B(level) | S3C_VIDOSDxC_ALPHA0_G(level) | S3C_VIDOSDxC_ALPHA0_R(level); } else { alpha_val &= ~(S3C_VIDOSDxC_ALPHA1_B(0xf) | S3C_VIDOSDxC_ALPHA1_G(0xf) | S3C_VIDOSDxC_ALPHA1_R(0xf)); alpha_val |= S3C_VIDOSDxC_ALPHA1_B(level) | S3C_VIDOSDxC_ALPHA1_G(level) | S3C_VIDOSDxC_ALPHA1_R(level); } writel(alpha_val, S3C_VIDOSD0C + (0x10 * win_num)); return 0; } int s3cfb_set_alpha_mode(s3cfb_info_t *fbi, int mode) { unsigned long alpha_mode; int win_num = fbi->win_id; if (win_num == 0) { printk("WIN0 do not support alpha blending.\n"); return -1; } alpha_mode = readl(S3C_WINCON0 + (0x04 * win_num)); alpha_mode &= ~(S3C_WINCONx_BLD_PIX_PIXEL | S3C_WINCONx_ALPHA_SEL_1); switch (mode) { case S3CFB_ALPHA_MODE_PLANE: /* Plane Blending */ writel(alpha_mode | S3C_WINCONx_BLD_PIX_PLANE | S3C_WINCONx_ALPHA_SEL_1, S3C_WINCON0 + (0x04 * win_num)); break; case S3CFB_ALPHA_MODE_PIXEL: /* Pixel Blending & chroma(color) key */ writel(alpha_mode | S3C_WINCONx_BLD_PIX_PIXEL | S3C_WINCONx_ALPHA_SEL_0, S3C_WINCON0 + (0x04 * win_num)); break; } return 0; } int s3cfb_set_win_position(s3cfb_info_t *fbi, int left_x, int top_y, int width, int height) { struct fb_var_screeninfo *var= &fbi->fb.var; int win_num = fbi->win_id; writel(S3C_VIDOSDxA_OSD_LTX_F(left_x) | S3C_VIDOSDxA_OSD_LTY_F(top_y), S3C_VIDOSD0A + (0x10 * win_num)); writel(S3C_VIDOSDxB_OSD_RBX_F(width - 1 + left_x) | S3C_VIDOSDxB_OSD_RBY_F(height - 1 + top_y), S3C_VIDOSD0B + (0x10 * win_num)); var->xoffset = left_x; var->yoffset = top_y; return 0; } int s3cfb_set_win_size(s3cfb_info_t *fbi, int width, int height) { struct fb_var_screeninfo *var= &fbi->fb.var; int win_num = fbi->win_id; if (win_num == 1) writel(S3C_VIDOSD0C_OSDSIZE(width * height), S3C_VIDOSD1D); else if (win_num == 2) writel(S3C_VIDOSD0C_OSDSIZE(width * height), S3C_VIDOSD2D); var->xres = width; var->yres = height; var->xres_virtual = width; var->yres_virtual = height; return 0; } int s3cfb_set_fb_size(s3cfb_info_t *fbi) { struct fb_var_screeninfo *var= &fbi->fb.var; int win_num = fbi->win_id; unsigned long offset = 0; unsigned long page_width = 0; unsigned long fb_size = 0; page_width = var->xres * s3cfb_fimd.bytes_per_pixel; offset = (var->xres_virtual - var->xres) * s3cfb_fimd.bytes_per_pixel; #if defined(CONFIG_FB_S3C_VIRTUAL_SCREEN) if (win_num == 0) offset=0; #endif writel(S3C_VIDWxxADD1_VBASEL_F((unsigned long) readl(S3C_VIDW00ADD0B0 + (0x08 * win_num)) + (page_width + offset) * (var->yres)), S3C_VIDW00ADD1B0 + (0x08 * win_num)); if (win_num == 1) writel(S3C_VIDWxxADD1_VBASEL_F((unsigned long) readl(S3C_VIDW00ADD0B1 + (0x08 * win_num)) + (page_width + offset) * (var->yres)), S3C_VIDW00ADD1B1 + (0x08 * win_num)); /* size of frame buffer */ fb_size = S3C_VIDWxxADD2_OFFSIZE_F(offset) | (S3C_VIDWxxADD2_PAGEWIDTH_F(page_width)); writel(fb_size, S3C_VIDW00ADD2 + (0x04 * win_num)); return 0; } void s3cfb_set_output_path(int out) { unsigned int tmp; tmp = readl(S3C_VIDCON0); /* if output mode is LCD mode, Scan mode always should be progressive mode */ if (out == S3CFB_OUTPUT_TV) tmp &= ~S3C_VIDCON0_INTERLACE_F_MASK; tmp &= ~S3C_VIDCON0_VIDOUT_MASK; tmp |= S3C_VIDCON0_VIDOUT(out); writel(tmp, S3C_VIDCON0); } EXPORT_SYMBOL(s3cfb_set_output_path); void s3cfb_enable_rgbport(int on) { if (on) writel(S3C_VIDCON2_ORGYUV_CBCRY | S3C_VIDCON2_YUVORD_CRCB, S3C_VIDCON2); else writel(0, S3C_VIDCON2); } EXPORT_SYMBOL(s3cfb_enable_rgbport); int s3cfb_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg) { s3cfb_info_t *fbi = container_of(info, s3cfb_info_t, fb); s3cfb_win_info_t win_info; s3cfb_color_key_info_t colkey_info; s3cfb_color_val_info_t colval_info; s3cfb_dma_info_t dma_info; s3cfb_next_info_t next_fb_info; struct fb_var_screeninfo *var= &fbi->fb.var; unsigned int crt, alpha_level, alpha_mode; #if defined(CONFIG_S3C6410_PWM) int brightness; #endif #if defined(CONFIG_FB_S3C_DOUBLE_BUFFERING) unsigned int f_num_val; #endif #if defined(CONFIG_FB_S3C_VIRTUAL_SCREEN) s3cfb_vs_info_t vs_info; #endif switch(cmd){ case S3CFB_GET_INFO: dma_info.map_dma_f1 = fbi->map_dma_f1; dma_info.map_dma_f2 = fbi->map_dma_f2; if(copy_to_user((void *) arg, (const void *) &dma_info, sizeof(s3cfb_dma_info_t))) return -EFAULT; break; case S3CFB_OSD_SET_INFO: if (copy_from_user(&win_info, (s3cfb_win_info_t *) arg, sizeof(s3cfb_win_info_t))) return -EFAULT; s3cfb_init_win(fbi, win_info.bpp, win_info.left_x, win_info.top_y, win_info.width, win_info.height, OFF); break; case S3CFB_OSD_START: s3cfb_onoff_win(fbi, ON); break; case S3CFB_OSD_STOP: s3cfb_onoff_win(fbi, OFF); break; case S3CFB_OSD_ALPHA_UP: alpha_level = readl(S3C_VIDOSD0C + (0x10 * fbi->win_id)) & 0xf; if (alpha_level < S3CFB_MAX_ALPHA_LEVEL) alpha_level++; s3cfb_set_alpha_level(fbi, alpha_level, 1); break; case S3CFB_OSD_ALPHA_DOWN: alpha_level = readl(S3C_VIDOSD0C + (0x10 * fbi->win_id)) & 0xf; if (alpha_level > 0) alpha_level--; s3cfb_set_alpha_level(fbi, alpha_level, 1); break; case S3CFB_OSD_ALPHA0_SET: alpha_level = (unsigned int) arg; if (alpha_level > S3CFB_MAX_ALPHA_LEVEL) alpha_level = S3CFB_MAX_ALPHA_LEVEL; s3cfb_set_alpha_level(fbi, alpha_level, 0); break; case S3CFB_OSD_ALPHA1_SET: alpha_level = (unsigned int) arg; if (alpha_level > S3CFB_MAX_ALPHA_LEVEL) alpha_level = S3CFB_MAX_ALPHA_LEVEL; s3cfb_set_alpha_level(fbi, alpha_level, 1); break; case S3CFB_OSD_ALPHA_MODE: alpha_mode = (unsigned int) arg; s3cfb_set_alpha_mode(fbi, alpha_mode); break; case S3CFB_OSD_MOVE_LEFT: if (var->xoffset > 0) var->xoffset--; s3cfb_set_win_position(fbi, var->xoffset, var->yoffset, var->xres, var->yres); break; case S3CFB_OSD_MOVE_RIGHT: if (var->xoffset < (s3cfb_fimd.width - var->xres)) var->xoffset++; s3cfb_set_win_position(fbi, var->xoffset, var->yoffset, var->xres, var->yres); break; case S3CFB_OSD_MOVE_UP: if (var->yoffset > 0) var->yoffset--; s3cfb_set_win_position(fbi, var->xoffset, var->yoffset, var->xres, var->yres); break; case S3CFB_OSD_MOVE_DOWN: if (var->yoffset < (s3cfb_fimd.height - var->yres)) var->yoffset++; s3cfb_set_win_position(fbi, var->xoffset, var->yoffset, var->xres, var->yres); break; case FBIO_WAITFORVSYNC: if (get_user(crt, (unsigned int __user *)arg)) return -EFAULT; return s3cfb_wait_for_vsync(); case S3CFB_COLOR_KEY_START: s3cfb_onoff_color_key(fbi, ON); break; case S3CFB_COLOR_KEY_STOP: s3cfb_onoff_color_key(fbi, OFF); break; case S3CFB_COLOR_KEY_ALPHA_START: s3cfb_onoff_color_key_alpha(fbi, ON); break; case S3CFB_COLOR_KEY_ALPHA_STOP: s3cfb_onoff_color_key_alpha(fbi, OFF); break; case S3CFB_COLOR_KEY_SET_INFO: if (copy_from_user(&colkey_info, (s3cfb_color_key_info_t *) arg, sizeof(s3cfb_color_key_info_t))) return -EFAULT; s3cfb_set_color_key_registers(fbi, colkey_info); break; case S3CFB_COLOR_KEY_VALUE: if (copy_from_user(&colval_info, (s3cfb_color_val_info_t *) arg, sizeof(s3cfb_color_val_info_t))) return -EFAULT; s3cfb_set_color_value(fbi, colval_info); break; case S3CFB_SET_VSYNC_INT: s3cfb_fimd.vidintcon0 &= ~S3C_VIDINTCON0_FRAMESEL0_MASK; s3cfb_fimd.vidintcon0 |= S3C_VIDINTCON0_FRAMESEL0_VSYNC; if (arg) s3cfb_fimd.vidintcon0 |= S3C_VIDINTCON0_INTFRMEN_ENABLE; else s3cfb_fimd.vidintcon0 &= ~S3C_VIDINTCON0_INTFRMEN_ENABLE; writel(s3cfb_fimd.vidintcon0, S3C_VIDINTCON0); break; case S3CFB_SET_NEXT_FB_INFO: if (copy_from_user(&next_fb_info, (s3cfb_next_info_t *) arg, sizeof(s3cfb_next_info_t))) return -EFAULT; /* check arguments */ if ((next_fb_info.xres + next_fb_info.xoffset) > next_fb_info.xres_virtual || (next_fb_info.yres + next_fb_info.yoffset) > next_fb_info.yres_virtual || (next_fb_info.xres + next_fb_info.lcd_offset_x ) > s3cfb_fimd.width || (next_fb_info.yres + next_fb_info.lcd_offset_y ) > s3cfb_fimd.height) { printk("Error : S3CFB_SET_NEXT_FB_INFO\n"); return -EINVAL; } fbi->next_fb_info = next_fb_info; fbi->next_fb_info_change_req = 1; break; case S3CFB_GET_CURR_FB_INFO: next_fb_info.phy_start_addr = fbi->fb.fix.smem_start; next_fb_info.xres = fbi->fb.var.xres; next_fb_info.yres = fbi->fb.var.yres; next_fb_info.xres_virtual = fbi->fb.var.xres_virtual; next_fb_info.yres_virtual = fbi->fb.var.yres_virtual; next_fb_info.xoffset = fbi->fb.var.xoffset; next_fb_info.yoffset = fbi->fb.var.yoffset; next_fb_info.lcd_offset_x = fbi->lcd_offset_x; next_fb_info.lcd_offset_y = fbi->lcd_offset_y; if (copy_to_user((void *)arg, (s3cfb_next_info_t *) &next_fb_info, sizeof(s3cfb_next_info_t))) return -EFAULT; break; case S3CFB_GET_BRIGHTNESS: if (copy_to_user((void *)arg, (const void *) &s3cfb_fimd.brightness, sizeof(int))) return -EFAULT; break; #if defined(CONFIG_S3C6410_PWM) case S3CFB_SET_BRIGHTNESS: if (copy_from_user(&brightness, (int *) arg, sizeof(int))) return -EFAULT; s3cfb_set_brightness(brightness); break; #endif #if defined(CONFIG_FB_S3C_VIRTUAL_SCREEN) case S3CFB_VS_START: s3cfb_fimd.wincon0 &= ~(S3C_WINCONx_ENWIN_F_ENABLE); writel(s3cfb_fimd.wincon0 | S3C_WINCONx_ENWIN_F_ENABLE, S3C_WINCON0); fbi->fb.var.xoffset = s3cfb_fimd.xoffset; fbi->fb.var.yoffset = s3cfb_fimd.yoffset; break; case S3CFB_VS_STOP: s3cfb_fimd.vidw00add0b0 = fbi->screen_dma_f1; s3cfb_fimd.vidw00add0b1 = fbi->screen_dma_f2; fbi->fb.var.xoffset = 0; fbi->fb.var.yoffset = 0; writel(s3cfb_fimd.vidw00add0b0, S3C_VIDW00ADD0B0); writel(s3cfb_fimd.vidw00add0b1, S3C_VIDW00ADD0B1); break; case S3CFB_VS_SET_INFO: if (copy_from_user(&vs_info, (s3cfb_vs_info_t *) arg, sizeof(s3cfb_vs_info_t))) return -EFAULT; if (s3cfb_set_vs_info(vs_info)) { printk("Error S3CFB_VS_SET_INFO\n"); return -EINVAL; } s3cfb_set_vs_registers(S3CFB_VS_SET); fbi->fb.var.xoffset = s3cfb_fimd.xoffset; fbi->fb.var.yoffset = s3cfb_fimd.yoffset; break; case S3CFB_VS_MOVE: s3cfb_set_vs_registers(arg); fbi->fb.var.xoffset = s3cfb_fimd.xoffset; fbi->fb.var.yoffset = s3cfb_fimd.yoffset; break; #endif #if defined(CONFIG_FB_S3C_DOUBLE_BUFFERING) case S3CFB_GET_NUM: if (copy_from_user((void *)&f_num_val, (const void *)arg, sizeof(u_int))) return -EFAULT; if (copy_to_user((void *)arg, (const void *) &f_num_val, sizeof(u_int))) return -EFAULT; break; case S3CFB_CHANGE_REQ: s3cfb_change_buff(0, (int) arg); break; #endif default: return -EINVAL; } return 0; } void s3cfb_pre_init(void) { /* initialize the fimd specific */ s3cfb_fimd.vidintcon0 &= ~S3C_VIDINTCON0_FRAMESEL0_MASK; s3cfb_fimd.vidintcon0 |= S3C_VIDINTCON0_FRAMESEL0_VSYNC; s3cfb_fimd.vidintcon0 |= S3C_VIDINTCON0_INTFRMEN_ENABLE; writel(s3cfb_fimd.vidintcon0, S3C_VIDINTCON0); } int s3cfb_set_gpio(void) { unsigned long val; int i, err; /* Must be '0' for Normal-path instead of By-pass */ writel(0x0, S3C_HOSTIFB_MIFPCON); /* enable clock to LCD */ val = readl(S3C_HCLK_GATE); val |= S3C_CLKCON_HCLK_LCD; writel(val, S3C_HCLK_GATE); /* select TFT LCD type (RGB I/F) */ val = readl(S3C64XX_SPC_BASE); val &= ~0x3; val |= (1 << 0); writel(val, S3C64XX_SPC_BASE); /* VD */ for (i = 0; i < 16; i++) s3c_gpio_cfgpin(S3C64XX_GPI(i), S3C_GPIO_SFN(2)); for (i = 0; i < 12; i++) s3c_gpio_cfgpin(S3C64XX_GPJ(i), S3C_GPIO_SFN(2)); #ifndef CONFIG_BACKLIGHT_PWM /* backlight ON */ if (gpio_is_valid(S3C64XX_GPF(15))) { err = gpio_request(S3C64XX_GPF(15), "GPF"); if (err) { printk(KERN_ERR "failed to request GPF for " "lcd backlight control\n"); return err; } gpio_direction_output(S3C64XX_GPF(15), 1); } #endif /* module reset */ if (gpio_is_valid(S3C64XX_GPN(5))) { err = gpio_request(S3C64XX_GPN(5), "GPN"); if (err) { printk(KERN_ERR "failed to request GPN for " "lcd reset control\n"); return err; } gpio_direction_output(S3C64XX_GPN(5), 1); } mdelay(100); gpio_set_value(S3C64XX_GPN(5), 0); mdelay(10); gpio_set_value(S3C64XX_GPN(5), 1); mdelay(10); #ifndef CONFIG_BACKLIGHT_PWM gpio_free(S3C64XX_GPF(15)); #endif gpio_free(S3C64XX_GPN(5)); return 0; } #if defined(CONFIG_PM) static struct sleep_save s3c_lcd_save[] = { SAVE_ITEM(S3C_VIDCON0), SAVE_ITEM(S3C_VIDCON1), SAVE_ITEM(S3C_VIDTCON0), SAVE_ITEM(S3C_VIDTCON1), SAVE_ITEM(S3C_VIDTCON2), SAVE_ITEM(S3C_VIDTCON3), SAVE_ITEM(S3C_WINCON0), SAVE_ITEM(S3C_WINCON1), SAVE_ITEM(S3C_WINCON2), SAVE_ITEM(S3C_WINCON3), SAVE_ITEM(S3C_WINCON4), SAVE_ITEM(S3C_VIDOSD0A), SAVE_ITEM(S3C_VIDOSD0B), SAVE_ITEM(S3C_VIDOSD0C), SAVE_ITEM(S3C_VIDOSD1A), SAVE_ITEM(S3C_VIDOSD1B), SAVE_ITEM(S3C_VIDOSD1C), SAVE_ITEM(S3C_VIDOSD1D), SAVE_ITEM(S3C_VIDOSD2A), SAVE_ITEM(S3C_VIDOSD2B), SAVE_ITEM(S3C_VIDOSD2C), SAVE_ITEM(S3C_VIDOSD2D), SAVE_ITEM(S3C_VIDOSD3A), SAVE_ITEM(S3C_VIDOSD3B), SAVE_ITEM(S3C_VIDOSD3C), SAVE_ITEM(S3C_VIDOSD4A), SAVE_ITEM(S3C_VIDOSD4B), SAVE_ITEM(S3C_VIDOSD4C), SAVE_ITEM(S3C_VIDW00ADD0B0), SAVE_ITEM(S3C_VIDW00ADD0B1), SAVE_ITEM(S3C_VIDW01ADD0B0), SAVE_ITEM(S3C_VIDW01ADD0B1), SAVE_ITEM(S3C_VIDW02ADD0), SAVE_ITEM(S3C_VIDW03ADD0), SAVE_ITEM(S3C_VIDW04ADD0), SAVE_ITEM(S3C_VIDW00ADD1B0), SAVE_ITEM(S3C_VIDW00ADD1B1), SAVE_ITEM(S3C_VIDW01ADD1B0), SAVE_ITEM(S3C_VIDW01ADD1B1), SAVE_ITEM(S3C_VIDW02ADD1), SAVE_ITEM(S3C_VIDW03ADD1), SAVE_ITEM(S3C_VIDW04ADD1), SAVE_ITEM(S3C_VIDW00ADD2), SAVE_ITEM(S3C_VIDW01ADD2), SAVE_ITEM(S3C_VIDW02ADD2), SAVE_ITEM(S3C_VIDW03ADD2), SAVE_ITEM(S3C_VIDW04ADD2), SAVE_ITEM(S3C_VIDINTCON0), SAVE_ITEM(S3C_VIDINTCON1), SAVE_ITEM(S3C_W1KEYCON0), SAVE_ITEM(S3C_W1KEYCON1), SAVE_ITEM(S3C_W2KEYCON0), SAVE_ITEM(S3C_W2KEYCON1), SAVE_ITEM(S3C_W3KEYCON0), SAVE_ITEM(S3C_W3KEYCON1), SAVE_ITEM(S3C_W4KEYCON0), SAVE_ITEM(S3C_W4KEYCON1), SAVE_ITEM(S3C_DITHMODE), SAVE_ITEM(S3C_WIN0MAP), SAVE_ITEM(S3C_WIN1MAP), SAVE_ITEM(S3C_WIN2MAP), SAVE_ITEM(S3C_WIN3MAP), SAVE_ITEM(S3C_WIN4MAP), SAVE_ITEM(S3C_WPALCON), SAVE_ITEM(S3C_TRIGCON), SAVE_ITEM(S3C_I80IFCONA0), SAVE_ITEM(S3C_I80IFCONA1), SAVE_ITEM(S3C_I80IFCONB0), SAVE_ITEM(S3C_I80IFCONB1), SAVE_ITEM(S3C_LDI_CMDCON0), SAVE_ITEM(S3C_LDI_CMDCON1), SAVE_ITEM(S3C_SIFCCON0), SAVE_ITEM(S3C_SIFCCON1), SAVE_ITEM(S3C_SIFCCON2), SAVE_ITEM(S3C_LDI_CMD0), SAVE_ITEM(S3C_LDI_CMD1), SAVE_ITEM(S3C_LDI_CMD2), SAVE_ITEM(S3C_LDI_CMD3), SAVE_ITEM(S3C_LDI_CMD4), SAVE_ITEM(S3C_LDI_CMD5), SAVE_ITEM(S3C_LDI_CMD6), SAVE_ITEM(S3C_LDI_CMD7), SAVE_ITEM(S3C_LDI_CMD8), SAVE_ITEM(S3C_LDI_CMD9), SAVE_ITEM(S3C_LDI_CMD10), SAVE_ITEM(S3C_LDI_CMD11), SAVE_ITEM(S3C_W2PDATA01), SAVE_ITEM(S3C_W2PDATA23), SAVE_ITEM(S3C_W2PDATA45), SAVE_ITEM(S3C_W2PDATA67), SAVE_ITEM(S3C_W2PDATA89), SAVE_ITEM(S3C_W2PDATAAB), SAVE_ITEM(S3C_W2PDATACD), SAVE_ITEM(S3C_W2PDATAEF), SAVE_ITEM(S3C_W3PDATA01), SAVE_ITEM(S3C_W3PDATA23), SAVE_ITEM(S3C_W3PDATA45), SAVE_ITEM(S3C_W3PDATA67), SAVE_ITEM(S3C_W3PDATA89), SAVE_ITEM(S3C_W3PDATAAB), SAVE_ITEM(S3C_W3PDATACD), SAVE_ITEM(S3C_W3PDATAEF), SAVE_ITEM(S3C_W4PDATA01), SAVE_ITEM(S3C_W4PDATA23), }; /* * Suspend */ int s3cfb_suspend(struct platform_device *dev, pm_message_t state) { struct fb_info *fbinfo = platform_get_drvdata(dev); s3cfb_info_t *info = fbinfo->par; s3cfb_stop_lcd(); s3c6410_pm_do_save(s3c_lcd_save, ARRAY_SIZE(s3c_lcd_save)); /* sleep before disabling the clock, we need to ensure * the LCD DMA engine is not going to get back on the bus * before the clock goes off again (bjd) */ msleep(1); clk_disable(info->clk); return 0; } /* * Resume */ int s3cfb_resume(struct platform_device *dev) { struct fb_info *fbinfo = platform_get_drvdata(dev); s3cfb_info_t *info = fbinfo->par; clk_enable(info->clk); s3c6410_pm_do_restore(s3c_lcd_save, ARRAY_SIZE(s3c_lcd_save)); s3cfb_set_gpio(); s3cfb_start_lcd(); return 0; } #else int s3cfb_suspend(struct platform_device *dev, pm_message_t state) { return 0; } int s3cfb_resume(struct platform_device *dev) { return 0; } #endif
gpl-2.0
h4ck3rm1k3/gcc
gcc/tree-streamer.c
2
9070
/* Miscellaneous utilities for tree streaming. Things that are used in both input and output are here. Copyright 2011 Free Software Foundation, Inc. Contributed by Diego Novillo <dnovillo@google.com> This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ #include "config.h" #include "system.h" #include "coretypes.h" #include "streamer-hooks.h" #include "tree-streamer.h" /* Check that all the TS_* structures handled by the streamer_write_* and streamer_read_* routines are exactly ALL the structures defined in treestruct.def. */ void streamer_check_handled_ts_structures (void) { bool handled_p[LAST_TS_ENUM]; unsigned i; memset (&handled_p, 0, sizeof (handled_p)); /* These are the TS_* structures that are either handled or explicitly ignored by the streamer routines. */ handled_p[TS_BASE] = true; handled_p[TS_TYPED] = true; handled_p[TS_COMMON] = true; handled_p[TS_INT_CST] = true; handled_p[TS_REAL_CST] = true; handled_p[TS_FIXED_CST] = true; handled_p[TS_VECTOR] = true; handled_p[TS_STRING] = true; handled_p[TS_COMPLEX] = true; handled_p[TS_IDENTIFIER] = true; handled_p[TS_DECL_MINIMAL] = true; handled_p[TS_DECL_COMMON] = true; handled_p[TS_DECL_WRTL] = true; handled_p[TS_DECL_NON_COMMON] = true; handled_p[TS_DECL_WITH_VIS] = true; handled_p[TS_FIELD_DECL] = true; handled_p[TS_VAR_DECL] = true; handled_p[TS_PARM_DECL] = true; handled_p[TS_LABEL_DECL] = true; handled_p[TS_RESULT_DECL] = true; handled_p[TS_CONST_DECL] = true; handled_p[TS_TYPE_DECL] = true; handled_p[TS_FUNCTION_DECL] = true; handled_p[TS_TYPE_COMMON] = true; handled_p[TS_TYPE_WITH_LANG_SPECIFIC] = true; handled_p[TS_TYPE_NON_COMMON] = true; handled_p[TS_LIST] = true; handled_p[TS_VEC] = true; handled_p[TS_EXP] = true; handled_p[TS_SSA_NAME] = true; handled_p[TS_BLOCK] = true; handled_p[TS_BINFO] = true; handled_p[TS_STATEMENT_LIST] = true; handled_p[TS_CONSTRUCTOR] = true; handled_p[TS_OMP_CLAUSE] = true; handled_p[TS_OPTIMIZATION] = true; handled_p[TS_TARGET_OPTION] = true; handled_p[TS_TRANSLATION_UNIT_DECL] = true; /* Anything not marked above will trigger the following assertion. If this assertion triggers, it means that there is a new TS_* structure that should be handled by the streamer. */ for (i = 0; i < LAST_TS_ENUM; i++) gcc_assert (handled_p[i]); } /* Helper for streamer_tree_cache_insert_1. Add T to CACHE->NODES at slot IX. */ static void streamer_tree_cache_add_to_node_array (struct streamer_tree_cache_d *cache, unsigned ix, tree t) { /* Make sure we're either replacing an old element or appending consecutively. */ gcc_assert (ix <= VEC_length (tree, cache->nodes)); if (ix == VEC_length (tree, cache->nodes)) VEC_safe_push (tree, heap, cache->nodes, t); else VEC_replace (tree, cache->nodes, ix, t); } /* Helper for streamer_tree_cache_insert and streamer_tree_cache_insert_at. CACHE, T, and IX_P are as in streamer_tree_cache_insert. If INSERT_AT_NEXT_SLOT_P is true, T is inserted at the next available slot in the cache. Otherwise, T is inserted at the position indicated in *IX_P. If T already existed in CACHE, return true. Otherwise, return false. */ static bool streamer_tree_cache_insert_1 (struct streamer_tree_cache_d *cache, tree t, unsigned *ix_p, bool insert_at_next_slot_p) { void **slot; unsigned ix; bool existed_p; gcc_assert (t); slot = pointer_map_insert (cache->node_map, t); if (!*slot) { /* Determine the next slot to use in the cache. */ if (insert_at_next_slot_p) ix = VEC_length (tree, cache->nodes); else ix = *ix_p; *slot = (void *)(size_t) (ix + 1); streamer_tree_cache_add_to_node_array (cache, ix, t); /* Indicate that the item was not present in the cache. */ existed_p = false; } else { ix = (size_t) *slot - 1; if (!insert_at_next_slot_p && ix != *ix_p) { /* If the caller wants to insert T at a specific slot location, and ENTRY->TO does not match *IX_P, add T to the requested location slot. */ ix = *ix_p; streamer_tree_cache_add_to_node_array (cache, ix, t); } /* Indicate that T was already in the cache. */ existed_p = true; } if (ix_p) *ix_p = ix; return existed_p; } /* Insert tree node T in CACHE. If T already existed in the cache return true. Otherwise, return false. If IX_P is non-null, update it with the index into the cache where T has been stored. */ bool streamer_tree_cache_insert (struct streamer_tree_cache_d *cache, tree t, unsigned *ix_p) { return streamer_tree_cache_insert_1 (cache, t, ix_p, true); } /* Insert tree node T in CACHE at slot IX. If T already existed in the cache return true. Otherwise, return false. */ bool streamer_tree_cache_insert_at (struct streamer_tree_cache_d *cache, tree t, unsigned ix) { return streamer_tree_cache_insert_1 (cache, t, &ix, false); } /* Appends tree node T to CACHE, even if T already existed in it. */ void streamer_tree_cache_append (struct streamer_tree_cache_d *cache, tree t) { unsigned ix = VEC_length (tree, cache->nodes); streamer_tree_cache_insert_1 (cache, t, &ix, false); } /* Return true if tree node T exists in CACHE, otherwise false. If IX_P is not NULL, write to *IX_P the index into the cache where T is stored ((unsigned)-1 if T is not found). */ bool streamer_tree_cache_lookup (struct streamer_tree_cache_d *cache, tree t, unsigned *ix_p) { void **slot; bool retval; unsigned ix; gcc_assert (t); slot = pointer_map_contains (cache->node_map, t); if (slot == NULL) { retval = false; ix = -1; } else { retval = true; ix = (size_t) *slot - 1; } if (ix_p) *ix_p = ix; return retval; } /* Record NODE in CACHE. */ static void record_common_node (struct streamer_tree_cache_d *cache, tree node) { /* We have to make sure to fill exactly the same number of elements for all frontends. That can include NULL trees. As our hash table can't deal with zero entries we'll simply stream a random other tree. A NULL tree never will be looked up so it doesn't matter which tree we replace it with, just to be sure use error_mark_node. */ if (!node) node = error_mark_node; streamer_tree_cache_append (cache, node); if (POINTER_TYPE_P (node) || TREE_CODE (node) == COMPLEX_TYPE || TREE_CODE (node) == ARRAY_TYPE) record_common_node (cache, TREE_TYPE (node)); else if (TREE_CODE (node) == RECORD_TYPE) { /* The FIELD_DECLs of structures should be shared, so that every COMPONENT_REF uses the same tree node when referencing a field. Pointer equality between FIELD_DECLs is used by the alias machinery to compute overlapping memory references (See nonoverlapping_component_refs_p). */ tree f; for (f = TYPE_FIELDS (node); f; f = TREE_CHAIN (f)) record_common_node (cache, f); } } /* Preload common nodes into CACHE and make sure they are merged properly according to the gimple type table. */ static void preload_common_nodes (struct streamer_tree_cache_d *cache) { unsigned i; for (i = 0; i < itk_none; i++) /* Skip itk_char. char_type_node is dependent on -f[un]signed-char. */ if (i != itk_char) record_common_node (cache, integer_types[i]); for (i = 0; i < stk_type_kind_last; i++) record_common_node (cache, sizetype_tab[i]); for (i = 0; i < TI_MAX; i++) /* Skip boolean type and constants, they are frontend dependent. */ if (i != TI_BOOLEAN_TYPE && i != TI_BOOLEAN_FALSE && i != TI_BOOLEAN_TRUE) record_common_node (cache, global_trees[i]); } /* Create a cache of pickled nodes. */ struct streamer_tree_cache_d * streamer_tree_cache_create (void) { struct streamer_tree_cache_d *cache; cache = XCNEW (struct streamer_tree_cache_d); cache->node_map = pointer_map_create (); /* Load all the well-known tree nodes that are always created by the compiler on startup. This prevents writing them out unnecessarily. */ preload_common_nodes (cache); return cache; } /* Delete the streamer cache C. */ void streamer_tree_cache_delete (struct streamer_tree_cache_d *c) { if (c == NULL) return; pointer_map_destroy (c->node_map); VEC_free (tree, heap, c->nodes); free (c); }
gpl-2.0
phiexz/ziproxy
src/simplelist.c
2
2542
/* simplelist.c * High level routines for dealing with text files (read-only) with an item per line. * * Ziproxy - the HTTP acceleration proxy * This code is under the following conditions: * * --------------------------------------------------------------------- * Copyright (c)2005-2013 Daniel Mealha Cabrita * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111 USA * --------------------------------------------------------------------- */ #include <stdlib.h> #include <stdio.h> #include <string.h> #include "txtfiletools.h" #include "strtables.h" #include "simplelist.h" /* loads text file into memory and return struct to be used for queries returns NULL if unable to load file or create structure */ t_st_strtable *slist_create (const char* given_filename) { char *filedata; int filedata_len; t_st_strtable *slist_table; int linelen; char *curpos; if ((filedata = load_textfile_to_memory (given_filename)) != NULL) { filedata_len = strlen (filedata); fix_linebreaks_qp (filedata, filedata_len, filedata); remove_junk_data (filedata, filedata); if ((slist_table = st_create ()) != NULL) { curpos = filedata; while ((linelen = get_line_len (curpos))) { if (*(curpos + linelen - 1) == '\n') *(curpos + linelen - 1) = '\0'; if (strchr (curpos, '*') == NULL) st_insert_nometa (slist_table, curpos); else st_insert (slist_table, curpos); curpos += linelen; } /* finished, return */ free (filedata); return (slist_table); } free (filedata); } return (NULL); } void slist_destroy (t_st_strtable *slist_table) { st_destroy (slist_table); } /* if string is present in the list, returns !=0 otherwise returns 0. this function makes pattern-mathing (based on '*') */ int slist_check_if_matches (t_st_strtable *slist_table, const char *strdata) { return (st_check_if_matches (slist_table, strdata)); }
gpl-2.0
ZHAW-INES/rioxo-uClinux-dist
lib/libgmp/gmp-4.2.3/scanf/fscanffuns.c
2
1538
/* __gmp_fscanf_funs -- support for formatted input from a FILE. THE FUNCTIONS IN THIS FILE ARE FOR INTERNAL USE ONLY. THEY'RE ALMOST CERTAIN TO BE SUBJECT TO INCOMPATIBLE CHANGES OR DISAPPEAR COMPLETELY IN FUTURE GNU MP RELEASES. Copyright 2001 Free Software Foundation, Inc. This file is part of the GNU MP Library. The GNU MP Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. The GNU MP Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU MP Library. If not, see http://www.gnu.org/licenses/. */ #include <stdio.h> #include "gmp.h" #include "gmp-impl.h" /* SunOS 4 stdio.h doesn't provide prototypes for these */ #if ! HAVE_DECL_FGETC int fgetc _PROTO ((FILE *fp)); #endif #if ! HAVE_DECL_FSCANF int fscanf _PROTO ((FILE *fp, const char *fmt, ...)); #endif #if ! HAVE_DECL_UNGETC int ungetc _PROTO ((int c, FILE *fp)); #endif static void step (FILE *fp, int n) { } const struct gmp_doscan_funs_t __gmp_fscanf_funs = { (gmp_doscan_scan_t) fscanf, (gmp_doscan_step_t) step, (gmp_doscan_get_t) fgetc, (gmp_doscan_unget_t) ungetc, };
gpl-2.0
jrfastab/Linux-Kernel-QOS
drivers/net/team/team.c
2
70103
/* * drivers/net/team/team.c - Network team device driver * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/rcupdate.h> #include <linux/errno.h> #include <linux/ctype.h> #include <linux/notifier.h> #include <linux/netdevice.h> #include <linux/netpoll.h> #include <linux/if_vlan.h> #include <linux/if_arp.h> #include <linux/socket.h> #include <linux/etherdevice.h> #include <linux/rtnetlink.h> #include <net/rtnetlink.h> #include <net/genetlink.h> #include <net/netlink.h> #include <net/sch_generic.h> #include <generated/utsrelease.h> #include <linux/if_team.h> #define DRV_NAME "team" /********** * Helpers **********/ #define team_port_exists(dev) (dev->priv_flags & IFF_TEAM_PORT) static struct team_port *team_port_get_rcu(const struct net_device *dev) { struct team_port *port = rcu_dereference(dev->rx_handler_data); return team_port_exists(dev) ? port : NULL; } static struct team_port *team_port_get_rtnl(const struct net_device *dev) { struct team_port *port = rtnl_dereference(dev->rx_handler_data); return team_port_exists(dev) ? port : NULL; } /* * Since the ability to change device address for open port device is tested in * team_port_add, this function can be called without control of return value */ static int __set_port_dev_addr(struct net_device *port_dev, const unsigned char *dev_addr) { struct sockaddr addr; memcpy(addr.sa_data, dev_addr, port_dev->addr_len); addr.sa_family = port_dev->type; return dev_set_mac_address(port_dev, &addr); } static int team_port_set_orig_dev_addr(struct team_port *port) { return __set_port_dev_addr(port->dev, port->orig.dev_addr); } static int team_port_set_team_dev_addr(struct team *team, struct team_port *port) { return __set_port_dev_addr(port->dev, team->dev->dev_addr); } int team_modeop_port_enter(struct team *team, struct team_port *port) { return team_port_set_team_dev_addr(team, port); } EXPORT_SYMBOL(team_modeop_port_enter); void team_modeop_port_change_dev_addr(struct team *team, struct team_port *port) { team_port_set_team_dev_addr(team, port); } EXPORT_SYMBOL(team_modeop_port_change_dev_addr); static void team_refresh_port_linkup(struct team_port *port) { port->linkup = port->user.linkup_enabled ? port->user.linkup : port->state.linkup; } /******************* * Options handling *******************/ struct team_option_inst { /* One for each option instance */ struct list_head list; struct list_head tmp_list; struct team_option *option; struct team_option_inst_info info; bool changed; bool removed; }; static struct team_option *__team_find_option(struct team *team, const char *opt_name) { struct team_option *option; list_for_each_entry(option, &team->option_list, list) { if (strcmp(option->name, opt_name) == 0) return option; } return NULL; } static void __team_option_inst_del(struct team_option_inst *opt_inst) { list_del(&opt_inst->list); kfree(opt_inst); } static void __team_option_inst_del_option(struct team *team, struct team_option *option) { struct team_option_inst *opt_inst, *tmp; list_for_each_entry_safe(opt_inst, tmp, &team->option_inst_list, list) { if (opt_inst->option == option) __team_option_inst_del(opt_inst); } } static int __team_option_inst_add(struct team *team, struct team_option *option, struct team_port *port) { struct team_option_inst *opt_inst; unsigned int array_size; unsigned int i; int err; array_size = option->array_size; if (!array_size) array_size = 1; /* No array but still need one instance */ for (i = 0; i < array_size; i++) { opt_inst = kmalloc(sizeof(*opt_inst), GFP_KERNEL); if (!opt_inst) return -ENOMEM; opt_inst->option = option; opt_inst->info.port = port; opt_inst->info.array_index = i; opt_inst->changed = true; opt_inst->removed = false; list_add_tail(&opt_inst->list, &team->option_inst_list); if (option->init) { err = option->init(team, &opt_inst->info); if (err) return err; } } return 0; } static int __team_option_inst_add_option(struct team *team, struct team_option *option) { struct team_port *port; int err; if (!option->per_port) { err = __team_option_inst_add(team, option, NULL); if (err) goto inst_del_option; } list_for_each_entry(port, &team->port_list, list) { err = __team_option_inst_add(team, option, port); if (err) goto inst_del_option; } return 0; inst_del_option: __team_option_inst_del_option(team, option); return err; } static void __team_option_inst_mark_removed_option(struct team *team, struct team_option *option) { struct team_option_inst *opt_inst; list_for_each_entry(opt_inst, &team->option_inst_list, list) { if (opt_inst->option == option) { opt_inst->changed = true; opt_inst->removed = true; } } } static void __team_option_inst_del_port(struct team *team, struct team_port *port) { struct team_option_inst *opt_inst, *tmp; list_for_each_entry_safe(opt_inst, tmp, &team->option_inst_list, list) { if (opt_inst->option->per_port && opt_inst->info.port == port) __team_option_inst_del(opt_inst); } } static int __team_option_inst_add_port(struct team *team, struct team_port *port) { struct team_option *option; int err; list_for_each_entry(option, &team->option_list, list) { if (!option->per_port) continue; err = __team_option_inst_add(team, option, port); if (err) goto inst_del_port; } return 0; inst_del_port: __team_option_inst_del_port(team, port); return err; } static void __team_option_inst_mark_removed_port(struct team *team, struct team_port *port) { struct team_option_inst *opt_inst; list_for_each_entry(opt_inst, &team->option_inst_list, list) { if (opt_inst->info.port == port) { opt_inst->changed = true; opt_inst->removed = true; } } } static int __team_options_register(struct team *team, const struct team_option *option, size_t option_count) { int i; struct team_option **dst_opts; int err; dst_opts = kzalloc(sizeof(struct team_option *) * option_count, GFP_KERNEL); if (!dst_opts) return -ENOMEM; for (i = 0; i < option_count; i++, option++) { if (__team_find_option(team, option->name)) { err = -EEXIST; goto alloc_rollback; } dst_opts[i] = kmemdup(option, sizeof(*option), GFP_KERNEL); if (!dst_opts[i]) { err = -ENOMEM; goto alloc_rollback; } } for (i = 0; i < option_count; i++) { err = __team_option_inst_add_option(team, dst_opts[i]); if (err) goto inst_rollback; list_add_tail(&dst_opts[i]->list, &team->option_list); } kfree(dst_opts); return 0; inst_rollback: for (i--; i >= 0; i--) __team_option_inst_del_option(team, dst_opts[i]); i = option_count - 1; alloc_rollback: for (i--; i >= 0; i--) kfree(dst_opts[i]); kfree(dst_opts); return err; } static void __team_options_mark_removed(struct team *team, const struct team_option *option, size_t option_count) { int i; for (i = 0; i < option_count; i++, option++) { struct team_option *del_opt; del_opt = __team_find_option(team, option->name); if (del_opt) __team_option_inst_mark_removed_option(team, del_opt); } } static void __team_options_unregister(struct team *team, const struct team_option *option, size_t option_count) { int i; for (i = 0; i < option_count; i++, option++) { struct team_option *del_opt; del_opt = __team_find_option(team, option->name); if (del_opt) { __team_option_inst_del_option(team, del_opt); list_del(&del_opt->list); kfree(del_opt); } } } static void __team_options_change_check(struct team *team); int team_options_register(struct team *team, const struct team_option *option, size_t option_count) { int err; err = __team_options_register(team, option, option_count); if (err) return err; __team_options_change_check(team); return 0; } EXPORT_SYMBOL(team_options_register); void team_options_unregister(struct team *team, const struct team_option *option, size_t option_count) { __team_options_mark_removed(team, option, option_count); __team_options_change_check(team); __team_options_unregister(team, option, option_count); } EXPORT_SYMBOL(team_options_unregister); static int team_option_get(struct team *team, struct team_option_inst *opt_inst, struct team_gsetter_ctx *ctx) { if (!opt_inst->option->getter) return -EOPNOTSUPP; return opt_inst->option->getter(team, ctx); } static int team_option_set(struct team *team, struct team_option_inst *opt_inst, struct team_gsetter_ctx *ctx) { if (!opt_inst->option->setter) return -EOPNOTSUPP; return opt_inst->option->setter(team, ctx); } void team_option_inst_set_change(struct team_option_inst_info *opt_inst_info) { struct team_option_inst *opt_inst; opt_inst = container_of(opt_inst_info, struct team_option_inst, info); opt_inst->changed = true; } EXPORT_SYMBOL(team_option_inst_set_change); void team_options_change_check(struct team *team) { __team_options_change_check(team); } EXPORT_SYMBOL(team_options_change_check); /**************** * Mode handling ****************/ static LIST_HEAD(mode_list); static DEFINE_SPINLOCK(mode_list_lock); struct team_mode_item { struct list_head list; const struct team_mode *mode; }; static struct team_mode_item *__find_mode(const char *kind) { struct team_mode_item *mitem; list_for_each_entry(mitem, &mode_list, list) { if (strcmp(mitem->mode->kind, kind) == 0) return mitem; } return NULL; } static bool is_good_mode_name(const char *name) { while (*name != '\0') { if (!isalpha(*name) && !isdigit(*name) && *name != '_') return false; name++; } return true; } int team_mode_register(const struct team_mode *mode) { int err = 0; struct team_mode_item *mitem; if (!is_good_mode_name(mode->kind) || mode->priv_size > TEAM_MODE_PRIV_SIZE) return -EINVAL; mitem = kmalloc(sizeof(*mitem), GFP_KERNEL); if (!mitem) return -ENOMEM; spin_lock(&mode_list_lock); if (__find_mode(mode->kind)) { err = -EEXIST; kfree(mitem); goto unlock; } mitem->mode = mode; list_add_tail(&mitem->list, &mode_list); unlock: spin_unlock(&mode_list_lock); return err; } EXPORT_SYMBOL(team_mode_register); void team_mode_unregister(const struct team_mode *mode) { struct team_mode_item *mitem; spin_lock(&mode_list_lock); mitem = __find_mode(mode->kind); if (mitem) { list_del_init(&mitem->list); kfree(mitem); } spin_unlock(&mode_list_lock); } EXPORT_SYMBOL(team_mode_unregister); static const struct team_mode *team_mode_get(const char *kind) { struct team_mode_item *mitem; const struct team_mode *mode = NULL; spin_lock(&mode_list_lock); mitem = __find_mode(kind); if (!mitem) { spin_unlock(&mode_list_lock); request_module("team-mode-%s", kind); spin_lock(&mode_list_lock); mitem = __find_mode(kind); } if (mitem) { mode = mitem->mode; if (!try_module_get(mode->owner)) mode = NULL; } spin_unlock(&mode_list_lock); return mode; } static void team_mode_put(const struct team_mode *mode) { module_put(mode->owner); } static bool team_dummy_transmit(struct team *team, struct sk_buff *skb) { dev_kfree_skb_any(skb); return false; } static rx_handler_result_t team_dummy_receive(struct team *team, struct team_port *port, struct sk_buff *skb) { return RX_HANDLER_ANOTHER; } static const struct team_mode __team_no_mode = { .kind = "*NOMODE*", }; static bool team_is_mode_set(struct team *team) { return team->mode != &__team_no_mode; } static void team_set_no_mode(struct team *team) { team->user_carrier_enabled = false; team->mode = &__team_no_mode; } static void team_adjust_ops(struct team *team) { /* * To avoid checks in rx/tx skb paths, ensure here that non-null and * correct ops are always set. */ if (!team->en_port_count || !team_is_mode_set(team) || !team->mode->ops->transmit) team->ops.transmit = team_dummy_transmit; else team->ops.transmit = team->mode->ops->transmit; if (!team->en_port_count || !team_is_mode_set(team) || !team->mode->ops->receive) team->ops.receive = team_dummy_receive; else team->ops.receive = team->mode->ops->receive; } /* * We can benefit from the fact that it's ensured no port is present * at the time of mode change. Therefore no packets are in fly so there's no * need to set mode operations in any special way. */ static int __team_change_mode(struct team *team, const struct team_mode *new_mode) { /* Check if mode was previously set and do cleanup if so */ if (team_is_mode_set(team)) { void (*exit_op)(struct team *team) = team->ops.exit; /* Clear ops area so no callback is called any longer */ memset(&team->ops, 0, sizeof(struct team_mode_ops)); team_adjust_ops(team); if (exit_op) exit_op(team); team_mode_put(team->mode); team_set_no_mode(team); /* zero private data area */ memset(&team->mode_priv, 0, sizeof(struct team) - offsetof(struct team, mode_priv)); } if (!new_mode) return 0; if (new_mode->ops->init) { int err; err = new_mode->ops->init(team); if (err) return err; } team->mode = new_mode; memcpy(&team->ops, new_mode->ops, sizeof(struct team_mode_ops)); team_adjust_ops(team); return 0; } static int team_change_mode(struct team *team, const char *kind) { const struct team_mode *new_mode; struct net_device *dev = team->dev; int err; if (!list_empty(&team->port_list)) { netdev_err(dev, "No ports can be present during mode change\n"); return -EBUSY; } if (team_is_mode_set(team) && strcmp(team->mode->kind, kind) == 0) { netdev_err(dev, "Unable to change to the same mode the team is in\n"); return -EINVAL; } new_mode = team_mode_get(kind); if (!new_mode) { netdev_err(dev, "Mode \"%s\" not found\n", kind); return -EINVAL; } err = __team_change_mode(team, new_mode); if (err) { netdev_err(dev, "Failed to change to mode \"%s\"\n", kind); team_mode_put(new_mode); return err; } netdev_info(dev, "Mode changed to \"%s\"\n", kind); return 0; } /********************* * Peers notification *********************/ static void team_notify_peers_work(struct work_struct *work) { struct team *team; team = container_of(work, struct team, notify_peers.dw.work); if (!rtnl_trylock()) { schedule_delayed_work(&team->notify_peers.dw, 0); return; } call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, team->dev); rtnl_unlock(); if (!atomic_dec_and_test(&team->notify_peers.count_pending)) schedule_delayed_work(&team->notify_peers.dw, msecs_to_jiffies(team->notify_peers.interval)); } static void team_notify_peers(struct team *team) { if (!team->notify_peers.count || !netif_running(team->dev)) return; atomic_set(&team->notify_peers.count_pending, team->notify_peers.count); schedule_delayed_work(&team->notify_peers.dw, 0); } static void team_notify_peers_init(struct team *team) { INIT_DELAYED_WORK(&team->notify_peers.dw, team_notify_peers_work); } static void team_notify_peers_fini(struct team *team) { cancel_delayed_work_sync(&team->notify_peers.dw); } /******************************* * Send multicast group rejoins *******************************/ static void team_mcast_rejoin_work(struct work_struct *work) { struct team *team; team = container_of(work, struct team, mcast_rejoin.dw.work); if (!rtnl_trylock()) { schedule_delayed_work(&team->mcast_rejoin.dw, 0); return; } call_netdevice_notifiers(NETDEV_RESEND_IGMP, team->dev); rtnl_unlock(); if (!atomic_dec_and_test(&team->mcast_rejoin.count_pending)) schedule_delayed_work(&team->mcast_rejoin.dw, msecs_to_jiffies(team->mcast_rejoin.interval)); } static void team_mcast_rejoin(struct team *team) { if (!team->mcast_rejoin.count || !netif_running(team->dev)) return; atomic_set(&team->mcast_rejoin.count_pending, team->mcast_rejoin.count); schedule_delayed_work(&team->mcast_rejoin.dw, 0); } static void team_mcast_rejoin_init(struct team *team) { INIT_DELAYED_WORK(&team->mcast_rejoin.dw, team_mcast_rejoin_work); } static void team_mcast_rejoin_fini(struct team *team) { cancel_delayed_work_sync(&team->mcast_rejoin.dw); } /************************ * Rx path frame handler ************************/ /* note: already called with rcu_read_lock */ static rx_handler_result_t team_handle_frame(struct sk_buff **pskb) { struct sk_buff *skb = *pskb; struct team_port *port; struct team *team; rx_handler_result_t res; skb = skb_share_check(skb, GFP_ATOMIC); if (!skb) return RX_HANDLER_CONSUMED; *pskb = skb; port = team_port_get_rcu(skb->dev); team = port->team; if (!team_port_enabled(port)) { /* allow exact match delivery for disabled ports */ res = RX_HANDLER_EXACT; } else { res = team->ops.receive(team, port, skb); } if (res == RX_HANDLER_ANOTHER) { struct team_pcpu_stats *pcpu_stats; pcpu_stats = this_cpu_ptr(team->pcpu_stats); u64_stats_update_begin(&pcpu_stats->syncp); pcpu_stats->rx_packets++; pcpu_stats->rx_bytes += skb->len; if (skb->pkt_type == PACKET_MULTICAST) pcpu_stats->rx_multicast++; u64_stats_update_end(&pcpu_stats->syncp); skb->dev = team->dev; } else { this_cpu_inc(team->pcpu_stats->rx_dropped); } return res; } /************************************* * Multiqueue Tx port select override *************************************/ static int team_queue_override_init(struct team *team) { struct list_head *listarr; unsigned int queue_cnt = team->dev->num_tx_queues - 1; unsigned int i; if (!queue_cnt) return 0; listarr = kmalloc(sizeof(struct list_head) * queue_cnt, GFP_KERNEL); if (!listarr) return -ENOMEM; team->qom_lists = listarr; for (i = 0; i < queue_cnt; i++) INIT_LIST_HEAD(listarr++); return 0; } static void team_queue_override_fini(struct team *team) { kfree(team->qom_lists); } static struct list_head *__team_get_qom_list(struct team *team, u16 queue_id) { return &team->qom_lists[queue_id - 1]; } /* * note: already called with rcu_read_lock */ static bool team_queue_override_transmit(struct team *team, struct sk_buff *skb) { struct list_head *qom_list; struct team_port *port; if (!team->queue_override_enabled || !skb->queue_mapping) return false; qom_list = __team_get_qom_list(team, skb->queue_mapping); list_for_each_entry_rcu(port, qom_list, qom_list) { if (!team_dev_queue_xmit(team, port, skb)) return true; } return false; } static void __team_queue_override_port_del(struct team *team, struct team_port *port) { if (!port->queue_id) return; list_del_rcu(&port->qom_list); } static bool team_queue_override_port_has_gt_prio_than(struct team_port *port, struct team_port *cur) { if (port->priority < cur->priority) return true; if (port->priority > cur->priority) return false; if (port->index < cur->index) return true; return false; } static void __team_queue_override_port_add(struct team *team, struct team_port *port) { struct team_port *cur; struct list_head *qom_list; struct list_head *node; if (!port->queue_id) return; qom_list = __team_get_qom_list(team, port->queue_id); node = qom_list; list_for_each_entry(cur, qom_list, qom_list) { if (team_queue_override_port_has_gt_prio_than(port, cur)) break; node = &cur->qom_list; } list_add_tail_rcu(&port->qom_list, node); } static void __team_queue_override_enabled_check(struct team *team) { struct team_port *port; bool enabled = false; list_for_each_entry(port, &team->port_list, list) { if (port->queue_id) { enabled = true; break; } } if (enabled == team->queue_override_enabled) return; netdev_dbg(team->dev, "%s queue override\n", enabled ? "Enabling" : "Disabling"); team->queue_override_enabled = enabled; } static void team_queue_override_port_prio_changed(struct team *team, struct team_port *port) { if (!port->queue_id || team_port_enabled(port)) return; __team_queue_override_port_del(team, port); __team_queue_override_port_add(team, port); __team_queue_override_enabled_check(team); } static void team_queue_override_port_change_queue_id(struct team *team, struct team_port *port, u16 new_queue_id) { if (team_port_enabled(port)) { __team_queue_override_port_del(team, port); port->queue_id = new_queue_id; __team_queue_override_port_add(team, port); __team_queue_override_enabled_check(team); } else { port->queue_id = new_queue_id; } } static void team_queue_override_port_add(struct team *team, struct team_port *port) { __team_queue_override_port_add(team, port); __team_queue_override_enabled_check(team); } static void team_queue_override_port_del(struct team *team, struct team_port *port) { __team_queue_override_port_del(team, port); __team_queue_override_enabled_check(team); } /**************** * Port handling ****************/ static bool team_port_find(const struct team *team, const struct team_port *port) { struct team_port *cur; list_for_each_entry(cur, &team->port_list, list) if (cur == port) return true; return false; } /* * Enable/disable port by adding to enabled port hashlist and setting * port->index (Might be racy so reader could see incorrect ifindex when * processing a flying packet, but that is not a problem). Write guarded * by team->lock. */ static void team_port_enable(struct team *team, struct team_port *port) { if (team_port_enabled(port)) return; port->index = team->en_port_count++; hlist_add_head_rcu(&port->hlist, team_port_index_hash(team, port->index)); team_adjust_ops(team); team_queue_override_port_add(team, port); if (team->ops.port_enabled) team->ops.port_enabled(team, port); team_notify_peers(team); team_mcast_rejoin(team); } static void __reconstruct_port_hlist(struct team *team, int rm_index) { int i; struct team_port *port; for (i = rm_index + 1; i < team->en_port_count; i++) { port = team_get_port_by_index(team, i); hlist_del_rcu(&port->hlist); port->index--; hlist_add_head_rcu(&port->hlist, team_port_index_hash(team, port->index)); } } static void team_port_disable(struct team *team, struct team_port *port) { if (!team_port_enabled(port)) return; if (team->ops.port_disabled) team->ops.port_disabled(team, port); hlist_del_rcu(&port->hlist); __reconstruct_port_hlist(team, port->index); port->index = -1; team->en_port_count--; team_queue_override_port_del(team, port); team_adjust_ops(team); team_notify_peers(team); team_mcast_rejoin(team); } #define TEAM_VLAN_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | \ NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \ NETIF_F_HIGHDMA | NETIF_F_LRO) static void __team_compute_features(struct team *team) { struct team_port *port; u32 vlan_features = TEAM_VLAN_FEATURES; unsigned short max_hard_header_len = ETH_HLEN; unsigned int flags, dst_release_flag = IFF_XMIT_DST_RELEASE; list_for_each_entry(port, &team->port_list, list) { vlan_features = netdev_increment_features(vlan_features, port->dev->vlan_features, TEAM_VLAN_FEATURES); dst_release_flag &= port->dev->priv_flags; if (port->dev->hard_header_len > max_hard_header_len) max_hard_header_len = port->dev->hard_header_len; } team->dev->vlan_features = vlan_features; team->dev->hard_header_len = max_hard_header_len; flags = team->dev->priv_flags & ~IFF_XMIT_DST_RELEASE; team->dev->priv_flags = flags | dst_release_flag; netdev_change_features(team->dev); } static void team_compute_features(struct team *team) { mutex_lock(&team->lock); __team_compute_features(team); mutex_unlock(&team->lock); } static int team_port_enter(struct team *team, struct team_port *port) { int err = 0; dev_hold(team->dev); port->dev->priv_flags |= IFF_TEAM_PORT; if (team->ops.port_enter) { err = team->ops.port_enter(team, port); if (err) { netdev_err(team->dev, "Device %s failed to enter team mode\n", port->dev->name); goto err_port_enter; } } return 0; err_port_enter: port->dev->priv_flags &= ~IFF_TEAM_PORT; dev_put(team->dev); return err; } static void team_port_leave(struct team *team, struct team_port *port) { if (team->ops.port_leave) team->ops.port_leave(team, port); port->dev->priv_flags &= ~IFF_TEAM_PORT; dev_put(team->dev); } #ifdef CONFIG_NET_POLL_CONTROLLER static int team_port_enable_netpoll(struct team *team, struct team_port *port, gfp_t gfp) { struct netpoll *np; int err; if (!team->dev->npinfo) return 0; np = kzalloc(sizeof(*np), gfp); if (!np) return -ENOMEM; err = __netpoll_setup(np, port->dev, gfp); if (err) { kfree(np); return err; } port->np = np; return err; } static void team_port_disable_netpoll(struct team_port *port) { struct netpoll *np = port->np; if (!np) return; port->np = NULL; /* Wait for transmitting packets to finish before freeing. */ synchronize_rcu_bh(); __netpoll_cleanup(np); kfree(np); } #else static int team_port_enable_netpoll(struct team *team, struct team_port *port, gfp_t gfp) { return 0; } static void team_port_disable_netpoll(struct team_port *port) { } #endif static void __team_port_change_port_added(struct team_port *port, bool linkup); static int team_dev_type_check_change(struct net_device *dev, struct net_device *port_dev); static int team_port_add(struct team *team, struct net_device *port_dev) { struct net_device *dev = team->dev; struct team_port *port; char *portname = port_dev->name; int err; if (port_dev->flags & IFF_LOOPBACK) { netdev_err(dev, "Device %s is loopback device. Loopback devices can't be added as a team port\n", portname); return -EINVAL; } if (team_port_exists(port_dev)) { netdev_err(dev, "Device %s is already a port " "of a team device\n", portname); return -EBUSY; } if (port_dev->features & NETIF_F_VLAN_CHALLENGED && vlan_uses_dev(dev)) { netdev_err(dev, "Device %s is VLAN challenged and team device has VLAN set up\n", portname); return -EPERM; } err = team_dev_type_check_change(dev, port_dev); if (err) return err; if (port_dev->flags & IFF_UP) { netdev_err(dev, "Device %s is up. Set it down before adding it as a team port\n", portname); return -EBUSY; } port = kzalloc(sizeof(struct team_port) + team->mode->port_priv_size, GFP_KERNEL); if (!port) return -ENOMEM; port->dev = port_dev; port->team = team; INIT_LIST_HEAD(&port->qom_list); port->orig.mtu = port_dev->mtu; err = dev_set_mtu(port_dev, dev->mtu); if (err) { netdev_dbg(dev, "Error %d calling dev_set_mtu\n", err); goto err_set_mtu; } memcpy(port->orig.dev_addr, port_dev->dev_addr, port_dev->addr_len); err = team_port_enter(team, port); if (err) { netdev_err(dev, "Device %s failed to enter team mode\n", portname); goto err_port_enter; } err = dev_open(port_dev); if (err) { netdev_dbg(dev, "Device %s opening failed\n", portname); goto err_dev_open; } err = vlan_vids_add_by_dev(port_dev, dev); if (err) { netdev_err(dev, "Failed to add vlan ids to device %s\n", portname); goto err_vids_add; } err = team_port_enable_netpoll(team, port, GFP_KERNEL); if (err) { netdev_err(dev, "Failed to enable netpoll on device %s\n", portname); goto err_enable_netpoll; } err = netdev_master_upper_dev_link(port_dev, dev); if (err) { netdev_err(dev, "Device %s failed to set upper link\n", portname); goto err_set_upper_link; } err = netdev_rx_handler_register(port_dev, team_handle_frame, port); if (err) { netdev_err(dev, "Device %s failed to register rx_handler\n", portname); goto err_handler_register; } err = __team_option_inst_add_port(team, port); if (err) { netdev_err(dev, "Device %s failed to add per-port options\n", portname); goto err_option_port_add; } port->index = -1; list_add_tail_rcu(&port->list, &team->port_list); team_port_enable(team, port); __team_compute_features(team); __team_port_change_port_added(port, !!netif_carrier_ok(port_dev)); __team_options_change_check(team); netdev_info(dev, "Port device %s added\n", portname); return 0; err_option_port_add: netdev_rx_handler_unregister(port_dev); err_handler_register: netdev_upper_dev_unlink(port_dev, dev); err_set_upper_link: team_port_disable_netpoll(port); err_enable_netpoll: vlan_vids_del_by_dev(port_dev, dev); err_vids_add: dev_close(port_dev); err_dev_open: team_port_leave(team, port); team_port_set_orig_dev_addr(port); err_port_enter: dev_set_mtu(port_dev, port->orig.mtu); err_set_mtu: kfree(port); return err; } static void __team_port_change_port_removed(struct team_port *port); static int team_port_del(struct team *team, struct net_device *port_dev) { struct net_device *dev = team->dev; struct team_port *port; char *portname = port_dev->name; port = team_port_get_rtnl(port_dev); if (!port || !team_port_find(team, port)) { netdev_err(dev, "Device %s does not act as a port of this team\n", portname); return -ENOENT; } team_port_disable(team, port); list_del_rcu(&port->list); netdev_rx_handler_unregister(port_dev); netdev_upper_dev_unlink(port_dev, dev); team_port_disable_netpoll(port); vlan_vids_del_by_dev(port_dev, dev); dev_uc_unsync(port_dev, dev); dev_mc_unsync(port_dev, dev); dev_close(port_dev); team_port_leave(team, port); __team_option_inst_mark_removed_port(team, port); __team_options_change_check(team); __team_option_inst_del_port(team, port); __team_port_change_port_removed(port); team_port_set_orig_dev_addr(port); dev_set_mtu(port_dev, port->orig.mtu); kfree_rcu(port, rcu); netdev_info(dev, "Port device %s removed\n", portname); __team_compute_features(team); return 0; } /***************** * Net device ops *****************/ static int team_mode_option_get(struct team *team, struct team_gsetter_ctx *ctx) { ctx->data.str_val = team->mode->kind; return 0; } static int team_mode_option_set(struct team *team, struct team_gsetter_ctx *ctx) { return team_change_mode(team, ctx->data.str_val); } static int team_notify_peers_count_get(struct team *team, struct team_gsetter_ctx *ctx) { ctx->data.u32_val = team->notify_peers.count; return 0; } static int team_notify_peers_count_set(struct team *team, struct team_gsetter_ctx *ctx) { team->notify_peers.count = ctx->data.u32_val; return 0; } static int team_notify_peers_interval_get(struct team *team, struct team_gsetter_ctx *ctx) { ctx->data.u32_val = team->notify_peers.interval; return 0; } static int team_notify_peers_interval_set(struct team *team, struct team_gsetter_ctx *ctx) { team->notify_peers.interval = ctx->data.u32_val; return 0; } static int team_mcast_rejoin_count_get(struct team *team, struct team_gsetter_ctx *ctx) { ctx->data.u32_val = team->mcast_rejoin.count; return 0; } static int team_mcast_rejoin_count_set(struct team *team, struct team_gsetter_ctx *ctx) { team->mcast_rejoin.count = ctx->data.u32_val; return 0; } static int team_mcast_rejoin_interval_get(struct team *team, struct team_gsetter_ctx *ctx) { ctx->data.u32_val = team->mcast_rejoin.interval; return 0; } static int team_mcast_rejoin_interval_set(struct team *team, struct team_gsetter_ctx *ctx) { team->mcast_rejoin.interval = ctx->data.u32_val; return 0; } static int team_port_en_option_get(struct team *team, struct team_gsetter_ctx *ctx) { struct team_port *port = ctx->info->port; ctx->data.bool_val = team_port_enabled(port); return 0; } static int team_port_en_option_set(struct team *team, struct team_gsetter_ctx *ctx) { struct team_port *port = ctx->info->port; if (ctx->data.bool_val) team_port_enable(team, port); else team_port_disable(team, port); return 0; } static int team_user_linkup_option_get(struct team *team, struct team_gsetter_ctx *ctx) { struct team_port *port = ctx->info->port; ctx->data.bool_val = port->user.linkup; return 0; } static int team_user_linkup_option_set(struct team *team, struct team_gsetter_ctx *ctx) { struct team_port *port = ctx->info->port; port->user.linkup = ctx->data.bool_val; team_refresh_port_linkup(port); return 0; } static int team_user_linkup_en_option_get(struct team *team, struct team_gsetter_ctx *ctx) { struct team_port *port = ctx->info->port; ctx->data.bool_val = port->user.linkup_enabled; return 0; } static int team_user_linkup_en_option_set(struct team *team, struct team_gsetter_ctx *ctx) { struct team_port *port = ctx->info->port; port->user.linkup_enabled = ctx->data.bool_val; team_refresh_port_linkup(port); return 0; } static int team_priority_option_get(struct team *team, struct team_gsetter_ctx *ctx) { struct team_port *port = ctx->info->port; ctx->data.s32_val = port->priority; return 0; } static int team_priority_option_set(struct team *team, struct team_gsetter_ctx *ctx) { struct team_port *port = ctx->info->port; s32 priority = ctx->data.s32_val; if (port->priority == priority) return 0; port->priority = priority; team_queue_override_port_prio_changed(team, port); return 0; } static int team_queue_id_option_get(struct team *team, struct team_gsetter_ctx *ctx) { struct team_port *port = ctx->info->port; ctx->data.u32_val = port->queue_id; return 0; } static int team_queue_id_option_set(struct team *team, struct team_gsetter_ctx *ctx) { struct team_port *port = ctx->info->port; u16 new_queue_id = ctx->data.u32_val; if (port->queue_id == new_queue_id) return 0; if (new_queue_id >= team->dev->real_num_tx_queues) return -EINVAL; team_queue_override_port_change_queue_id(team, port, new_queue_id); return 0; } static const struct team_option team_options[] = { { .name = "mode", .type = TEAM_OPTION_TYPE_STRING, .getter = team_mode_option_get, .setter = team_mode_option_set, }, { .name = "notify_peers_count", .type = TEAM_OPTION_TYPE_U32, .getter = team_notify_peers_count_get, .setter = team_notify_peers_count_set, }, { .name = "notify_peers_interval", .type = TEAM_OPTION_TYPE_U32, .getter = team_notify_peers_interval_get, .setter = team_notify_peers_interval_set, }, { .name = "mcast_rejoin_count", .type = TEAM_OPTION_TYPE_U32, .getter = team_mcast_rejoin_count_get, .setter = team_mcast_rejoin_count_set, }, { .name = "mcast_rejoin_interval", .type = TEAM_OPTION_TYPE_U32, .getter = team_mcast_rejoin_interval_get, .setter = team_mcast_rejoin_interval_set, }, { .name = "enabled", .type = TEAM_OPTION_TYPE_BOOL, .per_port = true, .getter = team_port_en_option_get, .setter = team_port_en_option_set, }, { .name = "user_linkup", .type = TEAM_OPTION_TYPE_BOOL, .per_port = true, .getter = team_user_linkup_option_get, .setter = team_user_linkup_option_set, }, { .name = "user_linkup_enabled", .type = TEAM_OPTION_TYPE_BOOL, .per_port = true, .getter = team_user_linkup_en_option_get, .setter = team_user_linkup_en_option_set, }, { .name = "priority", .type = TEAM_OPTION_TYPE_S32, .per_port = true, .getter = team_priority_option_get, .setter = team_priority_option_set, }, { .name = "queue_id", .type = TEAM_OPTION_TYPE_U32, .per_port = true, .getter = team_queue_id_option_get, .setter = team_queue_id_option_set, }, }; static struct lock_class_key team_netdev_xmit_lock_key; static struct lock_class_key team_netdev_addr_lock_key; static struct lock_class_key team_tx_busylock_key; static void team_set_lockdep_class_one(struct net_device *dev, struct netdev_queue *txq, void *unused) { lockdep_set_class(&txq->_xmit_lock, &team_netdev_xmit_lock_key); } static void team_set_lockdep_class(struct net_device *dev) { lockdep_set_class(&dev->addr_list_lock, &team_netdev_addr_lock_key); netdev_for_each_tx_queue(dev, team_set_lockdep_class_one, NULL); dev->qdisc_tx_busylock = &team_tx_busylock_key; } static int team_init(struct net_device *dev) { struct team *team = netdev_priv(dev); int i; int err; team->dev = dev; mutex_init(&team->lock); team_set_no_mode(team); team->pcpu_stats = alloc_percpu(struct team_pcpu_stats); if (!team->pcpu_stats) return -ENOMEM; for (i = 0; i < TEAM_PORT_HASHENTRIES; i++) INIT_HLIST_HEAD(&team->en_port_hlist[i]); INIT_LIST_HEAD(&team->port_list); err = team_queue_override_init(team); if (err) goto err_team_queue_override_init; team_adjust_ops(team); INIT_LIST_HEAD(&team->option_list); INIT_LIST_HEAD(&team->option_inst_list); team_notify_peers_init(team); team_mcast_rejoin_init(team); err = team_options_register(team, team_options, ARRAY_SIZE(team_options)); if (err) goto err_options_register; netif_carrier_off(dev); team_set_lockdep_class(dev); return 0; err_options_register: team_mcast_rejoin_fini(team); team_notify_peers_fini(team); team_queue_override_fini(team); err_team_queue_override_init: free_percpu(team->pcpu_stats); return err; } static void team_uninit(struct net_device *dev) { struct team *team = netdev_priv(dev); struct team_port *port; struct team_port *tmp; mutex_lock(&team->lock); list_for_each_entry_safe(port, tmp, &team->port_list, list) team_port_del(team, port->dev); __team_change_mode(team, NULL); /* cleanup */ __team_options_unregister(team, team_options, ARRAY_SIZE(team_options)); team_mcast_rejoin_fini(team); team_notify_peers_fini(team); team_queue_override_fini(team); mutex_unlock(&team->lock); } static void team_destructor(struct net_device *dev) { struct team *team = netdev_priv(dev); free_percpu(team->pcpu_stats); free_netdev(dev); } static int team_open(struct net_device *dev) { return 0; } static int team_close(struct net_device *dev) { return 0; } /* * note: already called with rcu_read_lock */ static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev) { struct team *team = netdev_priv(dev); bool tx_success; unsigned int len = skb->len; tx_success = team_queue_override_transmit(team, skb); if (!tx_success) tx_success = team->ops.transmit(team, skb); if (tx_success) { struct team_pcpu_stats *pcpu_stats; pcpu_stats = this_cpu_ptr(team->pcpu_stats); u64_stats_update_begin(&pcpu_stats->syncp); pcpu_stats->tx_packets++; pcpu_stats->tx_bytes += len; u64_stats_update_end(&pcpu_stats->syncp); } else { this_cpu_inc(team->pcpu_stats->tx_dropped); } return NETDEV_TX_OK; } static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb) { /* * This helper function exists to help dev_pick_tx get the correct * destination queue. Using a helper function skips a call to * skb_tx_hash and will put the skbs in the queue we expect on their * way down to the team driver. */ u16 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0; /* * Save the original txq to restore before passing to the driver */ qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping; if (unlikely(txq >= dev->real_num_tx_queues)) { do { txq -= dev->real_num_tx_queues; } while (txq >= dev->real_num_tx_queues); } return txq; } static void team_change_rx_flags(struct net_device *dev, int change) { struct team *team = netdev_priv(dev); struct team_port *port; int inc; rcu_read_lock(); list_for_each_entry_rcu(port, &team->port_list, list) { if (change & IFF_PROMISC) { inc = dev->flags & IFF_PROMISC ? 1 : -1; dev_set_promiscuity(port->dev, inc); } if (change & IFF_ALLMULTI) { inc = dev->flags & IFF_ALLMULTI ? 1 : -1; dev_set_allmulti(port->dev, inc); } } rcu_read_unlock(); } static void team_set_rx_mode(struct net_device *dev) { struct team *team = netdev_priv(dev); struct team_port *port; rcu_read_lock(); list_for_each_entry_rcu(port, &team->port_list, list) { dev_uc_sync_multiple(port->dev, dev); dev_mc_sync_multiple(port->dev, dev); } rcu_read_unlock(); } static int team_set_mac_address(struct net_device *dev, void *p) { struct sockaddr *addr = p; struct team *team = netdev_priv(dev); struct team_port *port; if (dev->type == ARPHRD_ETHER && !is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); rcu_read_lock(); list_for_each_entry_rcu(port, &team->port_list, list) if (team->ops.port_change_dev_addr) team->ops.port_change_dev_addr(team, port); rcu_read_unlock(); return 0; } static int team_change_mtu(struct net_device *dev, int new_mtu) { struct team *team = netdev_priv(dev); struct team_port *port; int err; /* * Alhough this is reader, it's guarded by team lock. It's not possible * to traverse list in reverse under rcu_read_lock */ mutex_lock(&team->lock); list_for_each_entry(port, &team->port_list, list) { err = dev_set_mtu(port->dev, new_mtu); if (err) { netdev_err(dev, "Device %s failed to change mtu", port->dev->name); goto unwind; } } mutex_unlock(&team->lock); dev->mtu = new_mtu; return 0; unwind: list_for_each_entry_continue_reverse(port, &team->port_list, list) dev_set_mtu(port->dev, dev->mtu); mutex_unlock(&team->lock); return err; } static struct rtnl_link_stats64 * team_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct team *team = netdev_priv(dev); struct team_pcpu_stats *p; u64 rx_packets, rx_bytes, rx_multicast, tx_packets, tx_bytes; u32 rx_dropped = 0, tx_dropped = 0; unsigned int start; int i; for_each_possible_cpu(i) { p = per_cpu_ptr(team->pcpu_stats, i); do { start = u64_stats_fetch_begin_bh(&p->syncp); rx_packets = p->rx_packets; rx_bytes = p->rx_bytes; rx_multicast = p->rx_multicast; tx_packets = p->tx_packets; tx_bytes = p->tx_bytes; } while (u64_stats_fetch_retry_bh(&p->syncp, start)); stats->rx_packets += rx_packets; stats->rx_bytes += rx_bytes; stats->multicast += rx_multicast; stats->tx_packets += tx_packets; stats->tx_bytes += tx_bytes; /* * rx_dropped & tx_dropped are u32, updated * without syncp protection. */ rx_dropped += p->rx_dropped; tx_dropped += p->tx_dropped; } stats->rx_dropped = rx_dropped; stats->tx_dropped = tx_dropped; return stats; } static int team_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) { struct team *team = netdev_priv(dev); struct team_port *port; int err; /* * Alhough this is reader, it's guarded by team lock. It's not possible * to traverse list in reverse under rcu_read_lock */ mutex_lock(&team->lock); list_for_each_entry(port, &team->port_list, list) { err = vlan_vid_add(port->dev, proto, vid); if (err) goto unwind; } mutex_unlock(&team->lock); return 0; unwind: list_for_each_entry_continue_reverse(port, &team->port_list, list) vlan_vid_del(port->dev, proto, vid); mutex_unlock(&team->lock); return err; } static int team_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) { struct team *team = netdev_priv(dev); struct team_port *port; rcu_read_lock(); list_for_each_entry_rcu(port, &team->port_list, list) vlan_vid_del(port->dev, proto, vid); rcu_read_unlock(); return 0; } #ifdef CONFIG_NET_POLL_CONTROLLER static void team_poll_controller(struct net_device *dev) { } static void __team_netpoll_cleanup(struct team *team) { struct team_port *port; list_for_each_entry(port, &team->port_list, list) team_port_disable_netpoll(port); } static void team_netpoll_cleanup(struct net_device *dev) { struct team *team = netdev_priv(dev); mutex_lock(&team->lock); __team_netpoll_cleanup(team); mutex_unlock(&team->lock); } static int team_netpoll_setup(struct net_device *dev, struct netpoll_info *npifo, gfp_t gfp) { struct team *team = netdev_priv(dev); struct team_port *port; int err = 0; mutex_lock(&team->lock); list_for_each_entry(port, &team->port_list, list) { err = team_port_enable_netpoll(team, port, gfp); if (err) { __team_netpoll_cleanup(team); break; } } mutex_unlock(&team->lock); return err; } #endif static int team_add_slave(struct net_device *dev, struct net_device *port_dev) { struct team *team = netdev_priv(dev); int err; mutex_lock(&team->lock); err = team_port_add(team, port_dev); mutex_unlock(&team->lock); return err; } static int team_del_slave(struct net_device *dev, struct net_device *port_dev) { struct team *team = netdev_priv(dev); int err; mutex_lock(&team->lock); err = team_port_del(team, port_dev); mutex_unlock(&team->lock); return err; } static netdev_features_t team_fix_features(struct net_device *dev, netdev_features_t features) { struct team_port *port; struct team *team = netdev_priv(dev); netdev_features_t mask; mask = features; features &= ~NETIF_F_ONE_FOR_ALL; features |= NETIF_F_ALL_FOR_ALL; rcu_read_lock(); list_for_each_entry_rcu(port, &team->port_list, list) { features = netdev_increment_features(features, port->dev->features, mask); } rcu_read_unlock(); return features; } static int team_change_carrier(struct net_device *dev, bool new_carrier) { struct team *team = netdev_priv(dev); team->user_carrier_enabled = true; if (new_carrier) netif_carrier_on(dev); else netif_carrier_off(dev); return 0; } static const struct net_device_ops team_netdev_ops = { .ndo_init = team_init, .ndo_uninit = team_uninit, .ndo_open = team_open, .ndo_stop = team_close, .ndo_start_xmit = team_xmit, .ndo_select_queue = team_select_queue, .ndo_change_rx_flags = team_change_rx_flags, .ndo_set_rx_mode = team_set_rx_mode, .ndo_set_mac_address = team_set_mac_address, .ndo_change_mtu = team_change_mtu, .ndo_get_stats64 = team_get_stats64, .ndo_vlan_rx_add_vid = team_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = team_vlan_rx_kill_vid, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = team_poll_controller, .ndo_netpoll_setup = team_netpoll_setup, .ndo_netpoll_cleanup = team_netpoll_cleanup, #endif .ndo_add_slave = team_add_slave, .ndo_del_slave = team_del_slave, .ndo_fix_features = team_fix_features, .ndo_change_carrier = team_change_carrier, }; /*********************** * ethtool interface ***********************/ static void team_ethtool_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) { strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version)); } static const struct ethtool_ops team_ethtool_ops = { .get_drvinfo = team_ethtool_get_drvinfo, .get_link = ethtool_op_get_link, }; /*********************** * rt netlink interface ***********************/ static void team_setup_by_port(struct net_device *dev, struct net_device *port_dev) { dev->header_ops = port_dev->header_ops; dev->type = port_dev->type; dev->hard_header_len = port_dev->hard_header_len; dev->addr_len = port_dev->addr_len; dev->mtu = port_dev->mtu; memcpy(dev->broadcast, port_dev->broadcast, port_dev->addr_len); memcpy(dev->dev_addr, port_dev->dev_addr, port_dev->addr_len); } static int team_dev_type_check_change(struct net_device *dev, struct net_device *port_dev) { struct team *team = netdev_priv(dev); char *portname = port_dev->name; int err; if (dev->type == port_dev->type) return 0; if (!list_empty(&team->port_list)) { netdev_err(dev, "Device %s is of different type\n", portname); return -EBUSY; } err = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE, dev); err = notifier_to_errno(err); if (err) { netdev_err(dev, "Refused to change device type\n"); return err; } dev_uc_flush(dev); dev_mc_flush(dev); team_setup_by_port(dev, port_dev); call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev); return 0; } static void team_setup(struct net_device *dev) { ether_setup(dev); dev->netdev_ops = &team_netdev_ops; dev->ethtool_ops = &team_ethtool_ops; dev->destructor = team_destructor; dev->tx_queue_len = 0; dev->flags |= IFF_MULTICAST; dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); /* * Indicate we support unicast address filtering. That way core won't * bring us to promisc mode in case a unicast addr is added. * Let this up to underlay drivers. */ dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE; dev->features |= NETIF_F_LLTX; dev->features |= NETIF_F_GRO; dev->hw_features = TEAM_VLAN_FEATURES | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER; dev->hw_features &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_HW_CSUM); dev->features |= dev->hw_features; } static int team_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) { int err; if (tb[IFLA_ADDRESS] == NULL) eth_hw_addr_random(dev); err = register_netdevice(dev); if (err) return err; return 0; } static int team_validate(struct nlattr *tb[], struct nlattr *data[]) { if (tb[IFLA_ADDRESS]) { if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) return -EINVAL; if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) return -EADDRNOTAVAIL; } return 0; } static unsigned int team_get_num_tx_queues(void) { return TEAM_DEFAULT_NUM_TX_QUEUES; } static unsigned int team_get_num_rx_queues(void) { return TEAM_DEFAULT_NUM_RX_QUEUES; } static struct rtnl_link_ops team_link_ops __read_mostly = { .kind = DRV_NAME, .priv_size = sizeof(struct team), .setup = team_setup, .newlink = team_newlink, .validate = team_validate, .get_num_tx_queues = team_get_num_tx_queues, .get_num_rx_queues = team_get_num_rx_queues, }; /*********************************** * Generic netlink custom interface ***********************************/ static struct genl_family team_nl_family = { .id = GENL_ID_GENERATE, .name = TEAM_GENL_NAME, .version = TEAM_GENL_VERSION, .maxattr = TEAM_ATTR_MAX, .netnsok = true, }; static const struct nla_policy team_nl_policy[TEAM_ATTR_MAX + 1] = { [TEAM_ATTR_UNSPEC] = { .type = NLA_UNSPEC, }, [TEAM_ATTR_TEAM_IFINDEX] = { .type = NLA_U32 }, [TEAM_ATTR_LIST_OPTION] = { .type = NLA_NESTED }, [TEAM_ATTR_LIST_PORT] = { .type = NLA_NESTED }, }; static const struct nla_policy team_nl_option_policy[TEAM_ATTR_OPTION_MAX + 1] = { [TEAM_ATTR_OPTION_UNSPEC] = { .type = NLA_UNSPEC, }, [TEAM_ATTR_OPTION_NAME] = { .type = NLA_STRING, .len = TEAM_STRING_MAX_LEN, }, [TEAM_ATTR_OPTION_CHANGED] = { .type = NLA_FLAG }, [TEAM_ATTR_OPTION_TYPE] = { .type = NLA_U8 }, [TEAM_ATTR_OPTION_DATA] = { .type = NLA_BINARY }, }; static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info) { struct sk_buff *msg; void *hdr; int err; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; hdr = genlmsg_put(msg, info->snd_portid, info->snd_seq, &team_nl_family, 0, TEAM_CMD_NOOP); if (!hdr) { err = -EMSGSIZE; goto err_msg_put; } genlmsg_end(msg, hdr); return genlmsg_unicast(genl_info_net(info), msg, info->snd_portid); err_msg_put: nlmsg_free(msg); return err; } /* * Netlink cmd functions should be locked by following two functions. * Since dev gets held here, that ensures dev won't disappear in between. */ static struct team *team_nl_team_get(struct genl_info *info) { struct net *net = genl_info_net(info); int ifindex; struct net_device *dev; struct team *team; if (!info->attrs[TEAM_ATTR_TEAM_IFINDEX]) return NULL; ifindex = nla_get_u32(info->attrs[TEAM_ATTR_TEAM_IFINDEX]); dev = dev_get_by_index(net, ifindex); if (!dev || dev->netdev_ops != &team_netdev_ops) { if (dev) dev_put(dev); return NULL; } team = netdev_priv(dev); mutex_lock(&team->lock); return team; } static void team_nl_team_put(struct team *team) { mutex_unlock(&team->lock); dev_put(team->dev); } typedef int team_nl_send_func_t(struct sk_buff *skb, struct team *team, u32 portid); static int team_nl_send_unicast(struct sk_buff *skb, struct team *team, u32 portid) { return genlmsg_unicast(dev_net(team->dev), skb, portid); } static int team_nl_fill_one_option_get(struct sk_buff *skb, struct team *team, struct team_option_inst *opt_inst) { struct nlattr *option_item; struct team_option *option = opt_inst->option; struct team_option_inst_info *opt_inst_info = &opt_inst->info; struct team_gsetter_ctx ctx; int err; ctx.info = opt_inst_info; err = team_option_get(team, opt_inst, &ctx); if (err) return err; option_item = nla_nest_start(skb, TEAM_ATTR_ITEM_OPTION); if (!option_item) return -EMSGSIZE; if (nla_put_string(skb, TEAM_ATTR_OPTION_NAME, option->name)) goto nest_cancel; if (opt_inst_info->port && nla_put_u32(skb, TEAM_ATTR_OPTION_PORT_IFINDEX, opt_inst_info->port->dev->ifindex)) goto nest_cancel; if (opt_inst->option->array_size && nla_put_u32(skb, TEAM_ATTR_OPTION_ARRAY_INDEX, opt_inst_info->array_index)) goto nest_cancel; switch (option->type) { case TEAM_OPTION_TYPE_U32: if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_U32)) goto nest_cancel; if (nla_put_u32(skb, TEAM_ATTR_OPTION_DATA, ctx.data.u32_val)) goto nest_cancel; break; case TEAM_OPTION_TYPE_STRING: if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_STRING)) goto nest_cancel; if (nla_put_string(skb, TEAM_ATTR_OPTION_DATA, ctx.data.str_val)) goto nest_cancel; break; case TEAM_OPTION_TYPE_BINARY: if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_BINARY)) goto nest_cancel; if (nla_put(skb, TEAM_ATTR_OPTION_DATA, ctx.data.bin_val.len, ctx.data.bin_val.ptr)) goto nest_cancel; break; case TEAM_OPTION_TYPE_BOOL: if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_FLAG)) goto nest_cancel; if (ctx.data.bool_val && nla_put_flag(skb, TEAM_ATTR_OPTION_DATA)) goto nest_cancel; break; case TEAM_OPTION_TYPE_S32: if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_S32)) goto nest_cancel; if (nla_put_s32(skb, TEAM_ATTR_OPTION_DATA, ctx.data.s32_val)) goto nest_cancel; break; default: BUG(); } if (opt_inst->removed && nla_put_flag(skb, TEAM_ATTR_OPTION_REMOVED)) goto nest_cancel; if (opt_inst->changed) { if (nla_put_flag(skb, TEAM_ATTR_OPTION_CHANGED)) goto nest_cancel; opt_inst->changed = false; } nla_nest_end(skb, option_item); return 0; nest_cancel: nla_nest_cancel(skb, option_item); return -EMSGSIZE; } static int __send_and_alloc_skb(struct sk_buff **pskb, struct team *team, u32 portid, team_nl_send_func_t *send_func) { int err; if (*pskb) { err = send_func(*pskb, team, portid); if (err) return err; } *pskb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!*pskb) return -ENOMEM; return 0; } static int team_nl_send_options_get(struct team *team, u32 portid, u32 seq, int flags, team_nl_send_func_t *send_func, struct list_head *sel_opt_inst_list) { struct nlattr *option_list; struct nlmsghdr *nlh; void *hdr; struct team_option_inst *opt_inst; int err; struct sk_buff *skb = NULL; bool incomplete; int i; opt_inst = list_first_entry(sel_opt_inst_list, struct team_option_inst, tmp_list); start_again: err = __send_and_alloc_skb(&skb, team, portid, send_func); if (err) return err; hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags | NLM_F_MULTI, TEAM_CMD_OPTIONS_GET); if (!hdr) return -EMSGSIZE; if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex)) goto nla_put_failure; option_list = nla_nest_start(skb, TEAM_ATTR_LIST_OPTION); if (!option_list) goto nla_put_failure; i = 0; incomplete = false; list_for_each_entry_from(opt_inst, sel_opt_inst_list, tmp_list) { err = team_nl_fill_one_option_get(skb, team, opt_inst); if (err) { if (err == -EMSGSIZE) { if (!i) goto errout; incomplete = true; break; } goto errout; } i++; } nla_nest_end(skb, option_list); genlmsg_end(skb, hdr); if (incomplete) goto start_again; send_done: nlh = nlmsg_put(skb, portid, seq, NLMSG_DONE, 0, flags | NLM_F_MULTI); if (!nlh) { err = __send_and_alloc_skb(&skb, team, portid, send_func); if (err) goto errout; goto send_done; } return send_func(skb, team, portid); nla_put_failure: err = -EMSGSIZE; errout: genlmsg_cancel(skb, hdr); nlmsg_free(skb); return err; } static int team_nl_cmd_options_get(struct sk_buff *skb, struct genl_info *info) { struct team *team; struct team_option_inst *opt_inst; int err; LIST_HEAD(sel_opt_inst_list); team = team_nl_team_get(info); if (!team) return -EINVAL; list_for_each_entry(opt_inst, &team->option_inst_list, list) list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list); err = team_nl_send_options_get(team, info->snd_portid, info->snd_seq, NLM_F_ACK, team_nl_send_unicast, &sel_opt_inst_list); team_nl_team_put(team); return err; } static int team_nl_send_event_options_get(struct team *team, struct list_head *sel_opt_inst_list); static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info) { struct team *team; int err = 0; int i; struct nlattr *nl_option; LIST_HEAD(opt_inst_list); team = team_nl_team_get(info); if (!team) return -EINVAL; err = -EINVAL; if (!info->attrs[TEAM_ATTR_LIST_OPTION]) { err = -EINVAL; goto team_put; } nla_for_each_nested(nl_option, info->attrs[TEAM_ATTR_LIST_OPTION], i) { struct nlattr *opt_attrs[TEAM_ATTR_OPTION_MAX + 1]; struct nlattr *attr; struct nlattr *attr_data; enum team_option_type opt_type; int opt_port_ifindex = 0; /* != 0 for per-port options */ u32 opt_array_index = 0; bool opt_is_array = false; struct team_option_inst *opt_inst; char *opt_name; bool opt_found = false; if (nla_type(nl_option) != TEAM_ATTR_ITEM_OPTION) { err = -EINVAL; goto team_put; } err = nla_parse_nested(opt_attrs, TEAM_ATTR_OPTION_MAX, nl_option, team_nl_option_policy); if (err) goto team_put; if (!opt_attrs[TEAM_ATTR_OPTION_NAME] || !opt_attrs[TEAM_ATTR_OPTION_TYPE]) { err = -EINVAL; goto team_put; } switch (nla_get_u8(opt_attrs[TEAM_ATTR_OPTION_TYPE])) { case NLA_U32: opt_type = TEAM_OPTION_TYPE_U32; break; case NLA_STRING: opt_type = TEAM_OPTION_TYPE_STRING; break; case NLA_BINARY: opt_type = TEAM_OPTION_TYPE_BINARY; break; case NLA_FLAG: opt_type = TEAM_OPTION_TYPE_BOOL; break; case NLA_S32: opt_type = TEAM_OPTION_TYPE_S32; break; default: goto team_put; } attr_data = opt_attrs[TEAM_ATTR_OPTION_DATA]; if (opt_type != TEAM_OPTION_TYPE_BOOL && !attr_data) { err = -EINVAL; goto team_put; } opt_name = nla_data(opt_attrs[TEAM_ATTR_OPTION_NAME]); attr = opt_attrs[TEAM_ATTR_OPTION_PORT_IFINDEX]; if (attr) opt_port_ifindex = nla_get_u32(attr); attr = opt_attrs[TEAM_ATTR_OPTION_ARRAY_INDEX]; if (attr) { opt_is_array = true; opt_array_index = nla_get_u32(attr); } list_for_each_entry(opt_inst, &team->option_inst_list, list) { struct team_option *option = opt_inst->option; struct team_gsetter_ctx ctx; struct team_option_inst_info *opt_inst_info; int tmp_ifindex; opt_inst_info = &opt_inst->info; tmp_ifindex = opt_inst_info->port ? opt_inst_info->port->dev->ifindex : 0; if (option->type != opt_type || strcmp(option->name, opt_name) || tmp_ifindex != opt_port_ifindex || (option->array_size && !opt_is_array) || opt_inst_info->array_index != opt_array_index) continue; opt_found = true; ctx.info = opt_inst_info; switch (opt_type) { case TEAM_OPTION_TYPE_U32: ctx.data.u32_val = nla_get_u32(attr_data); break; case TEAM_OPTION_TYPE_STRING: if (nla_len(attr_data) > TEAM_STRING_MAX_LEN) { err = -EINVAL; goto team_put; } ctx.data.str_val = nla_data(attr_data); break; case TEAM_OPTION_TYPE_BINARY: ctx.data.bin_val.len = nla_len(attr_data); ctx.data.bin_val.ptr = nla_data(attr_data); break; case TEAM_OPTION_TYPE_BOOL: ctx.data.bool_val = attr_data ? true : false; break; case TEAM_OPTION_TYPE_S32: ctx.data.s32_val = nla_get_s32(attr_data); break; default: BUG(); } err = team_option_set(team, opt_inst, &ctx); if (err) goto team_put; opt_inst->changed = true; list_add(&opt_inst->tmp_list, &opt_inst_list); } if (!opt_found) { err = -ENOENT; goto team_put; } } err = team_nl_send_event_options_get(team, &opt_inst_list); team_put: team_nl_team_put(team); return err; } static int team_nl_fill_one_port_get(struct sk_buff *skb, struct team_port *port) { struct nlattr *port_item; port_item = nla_nest_start(skb, TEAM_ATTR_ITEM_PORT); if (!port_item) goto nest_cancel; if (nla_put_u32(skb, TEAM_ATTR_PORT_IFINDEX, port->dev->ifindex)) goto nest_cancel; if (port->changed) { if (nla_put_flag(skb, TEAM_ATTR_PORT_CHANGED)) goto nest_cancel; port->changed = false; } if ((port->removed && nla_put_flag(skb, TEAM_ATTR_PORT_REMOVED)) || (port->state.linkup && nla_put_flag(skb, TEAM_ATTR_PORT_LINKUP)) || nla_put_u32(skb, TEAM_ATTR_PORT_SPEED, port->state.speed) || nla_put_u8(skb, TEAM_ATTR_PORT_DUPLEX, port->state.duplex)) goto nest_cancel; nla_nest_end(skb, port_item); return 0; nest_cancel: nla_nest_cancel(skb, port_item); return -EMSGSIZE; } static int team_nl_send_port_list_get(struct team *team, u32 portid, u32 seq, int flags, team_nl_send_func_t *send_func, struct team_port *one_port) { struct nlattr *port_list; struct nlmsghdr *nlh; void *hdr; struct team_port *port; int err; struct sk_buff *skb = NULL; bool incomplete; int i; port = list_first_entry_or_null(&team->port_list, struct team_port, list); start_again: err = __send_and_alloc_skb(&skb, team, portid, send_func); if (err) return err; hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags | NLM_F_MULTI, TEAM_CMD_PORT_LIST_GET); if (!hdr) return -EMSGSIZE; if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex)) goto nla_put_failure; port_list = nla_nest_start(skb, TEAM_ATTR_LIST_PORT); if (!port_list) goto nla_put_failure; i = 0; incomplete = false; /* If one port is selected, called wants to send port list containing * only this port. Otherwise go through all listed ports and send all */ if (one_port) { err = team_nl_fill_one_port_get(skb, one_port); if (err) goto errout; } else if (port) { list_for_each_entry_from(port, &team->port_list, list) { err = team_nl_fill_one_port_get(skb, port); if (err) { if (err == -EMSGSIZE) { if (!i) goto errout; incomplete = true; break; } goto errout; } i++; } } nla_nest_end(skb, port_list); genlmsg_end(skb, hdr); if (incomplete) goto start_again; send_done: nlh = nlmsg_put(skb, portid, seq, NLMSG_DONE, 0, flags | NLM_F_MULTI); if (!nlh) { err = __send_and_alloc_skb(&skb, team, portid, send_func); if (err) goto errout; goto send_done; } return send_func(skb, team, portid); nla_put_failure: err = -EMSGSIZE; errout: genlmsg_cancel(skb, hdr); nlmsg_free(skb); return err; } static int team_nl_cmd_port_list_get(struct sk_buff *skb, struct genl_info *info) { struct team *team; int err; team = team_nl_team_get(info); if (!team) return -EINVAL; err = team_nl_send_port_list_get(team, info->snd_portid, info->snd_seq, NLM_F_ACK, team_nl_send_unicast, NULL); team_nl_team_put(team); return err; } static struct genl_ops team_nl_ops[] = { { .cmd = TEAM_CMD_NOOP, .doit = team_nl_cmd_noop, .policy = team_nl_policy, }, { .cmd = TEAM_CMD_OPTIONS_SET, .doit = team_nl_cmd_options_set, .policy = team_nl_policy, .flags = GENL_ADMIN_PERM, }, { .cmd = TEAM_CMD_OPTIONS_GET, .doit = team_nl_cmd_options_get, .policy = team_nl_policy, .flags = GENL_ADMIN_PERM, }, { .cmd = TEAM_CMD_PORT_LIST_GET, .doit = team_nl_cmd_port_list_get, .policy = team_nl_policy, .flags = GENL_ADMIN_PERM, }, }; static struct genl_multicast_group team_change_event_mcgrp = { .name = TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME, }; static int team_nl_send_multicast(struct sk_buff *skb, struct team *team, u32 portid) { return genlmsg_multicast_netns(dev_net(team->dev), skb, 0, team_change_event_mcgrp.id, GFP_KERNEL); } static int team_nl_send_event_options_get(struct team *team, struct list_head *sel_opt_inst_list) { return team_nl_send_options_get(team, 0, 0, 0, team_nl_send_multicast, sel_opt_inst_list); } static int team_nl_send_event_port_get(struct team *team, struct team_port *port) { return team_nl_send_port_list_get(team, 0, 0, 0, team_nl_send_multicast, port); } static int team_nl_init(void) { int err; err = genl_register_family_with_ops(&team_nl_family, team_nl_ops, ARRAY_SIZE(team_nl_ops)); if (err) return err; err = genl_register_mc_group(&team_nl_family, &team_change_event_mcgrp); if (err) goto err_change_event_grp_reg; return 0; err_change_event_grp_reg: genl_unregister_family(&team_nl_family); return err; } static void team_nl_fini(void) { genl_unregister_family(&team_nl_family); } /****************** * Change checkers ******************/ static void __team_options_change_check(struct team *team) { int err; struct team_option_inst *opt_inst; LIST_HEAD(sel_opt_inst_list); list_for_each_entry(opt_inst, &team->option_inst_list, list) { if (opt_inst->changed) list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list); } err = team_nl_send_event_options_get(team, &sel_opt_inst_list); if (err && err != -ESRCH) netdev_warn(team->dev, "Failed to send options change via netlink (err %d)\n", err); } /* rtnl lock is held */ static void __team_port_change_send(struct team_port *port, bool linkup) { int err; port->changed = true; port->state.linkup = linkup; team_refresh_port_linkup(port); if (linkup) { struct ethtool_cmd ecmd; err = __ethtool_get_settings(port->dev, &ecmd); if (!err) { port->state.speed = ethtool_cmd_speed(&ecmd); port->state.duplex = ecmd.duplex; goto send_event; } } port->state.speed = 0; port->state.duplex = 0; send_event: err = team_nl_send_event_port_get(port->team, port); if (err && err != -ESRCH) netdev_warn(port->team->dev, "Failed to send port change of device %s via netlink (err %d)\n", port->dev->name, err); } static void __team_carrier_check(struct team *team) { struct team_port *port; bool team_linkup; if (team->user_carrier_enabled) return; team_linkup = false; list_for_each_entry(port, &team->port_list, list) { if (port->linkup) { team_linkup = true; break; } } if (team_linkup) netif_carrier_on(team->dev); else netif_carrier_off(team->dev); } static void __team_port_change_check(struct team_port *port, bool linkup) { if (port->state.linkup != linkup) __team_port_change_send(port, linkup); __team_carrier_check(port->team); } static void __team_port_change_port_added(struct team_port *port, bool linkup) { __team_port_change_send(port, linkup); __team_carrier_check(port->team); } static void __team_port_change_port_removed(struct team_port *port) { port->removed = true; __team_port_change_send(port, false); __team_carrier_check(port->team); } static void team_port_change_check(struct team_port *port, bool linkup) { struct team *team = port->team; mutex_lock(&team->lock); __team_port_change_check(port, linkup); mutex_unlock(&team->lock); } /************************************ * Net device notifier event handler ************************************/ static int team_device_event(struct notifier_block *unused, unsigned long event, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct team_port *port; port = team_port_get_rtnl(dev); if (!port) return NOTIFY_DONE; switch (event) { case NETDEV_UP: if (netif_carrier_ok(dev)) team_port_change_check(port, true); case NETDEV_DOWN: team_port_change_check(port, false); case NETDEV_CHANGE: if (netif_running(port->dev)) team_port_change_check(port, !!netif_carrier_ok(port->dev)); break; case NETDEV_UNREGISTER: team_del_slave(port->team->dev, dev); break; case NETDEV_FEAT_CHANGE: team_compute_features(port->team); break; case NETDEV_CHANGEMTU: /* Forbid to change mtu of underlaying device */ return NOTIFY_BAD; case NETDEV_PRE_TYPE_CHANGE: /* Forbid to change type of underlaying device */ return NOTIFY_BAD; case NETDEV_RESEND_IGMP: /* Propagate to master device */ call_netdevice_notifiers(event, port->team->dev); break; } return NOTIFY_DONE; } static struct notifier_block team_notifier_block __read_mostly = { .notifier_call = team_device_event, }; /*********************** * Module init and exit ***********************/ static int __init team_module_init(void) { int err; register_netdevice_notifier(&team_notifier_block); err = rtnl_link_register(&team_link_ops); if (err) goto err_rtnl_reg; err = team_nl_init(); if (err) goto err_nl_init; return 0; err_nl_init: rtnl_link_unregister(&team_link_ops); err_rtnl_reg: unregister_netdevice_notifier(&team_notifier_block); return err; } static void __exit team_module_exit(void) { team_nl_fini(); rtnl_link_unregister(&team_link_ops); unregister_netdevice_notifier(&team_notifier_block); } module_init(team_module_init); module_exit(team_module_exit); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>"); MODULE_DESCRIPTION("Ethernet team device driver"); MODULE_ALIAS_RTNL_LINK(DRV_NAME);
gpl-2.0
suxinde2009/wesnoth
src/game_events/conditional_wml.cpp
2
7490
/* Copyright (C) 2003 - 2016 by David White <dave@whitevine.net> Part of the Battle for Wesnoth Project http://www.wesnoth.org/ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY. See the COPYING file for more details. */ /** * @file * Implementations of conditional action WML tags. */ #include "global.hpp" #include "conditional_wml.hpp" #include "config.hpp" #include "game_board.hpp" #include "game_data.hpp" #include "log.hpp" #include "recall_list_manager.hpp" #include "resources.hpp" #include "scripting/game_lua_kernel.hpp" #include "serialization/string_utils.hpp" #include "team.hpp" #include "terrain/filter.hpp" #include "units/unit.hpp" #include "units/filter.hpp" #include "units/map.hpp" #include "util.hpp" #include "variable.hpp" #include <boost/assign/list_of.hpp> static lg::log_domain log_engine("engine"); #define WRN_NG LOG_STREAM(warn, log_engine) // This file is in the game_events namespace. namespace game_events { namespace { // Support functions bool internal_conditional_passed(const vconfig& cond) { const vconfig::child_list& true_keyword = cond.get_children("true"); if(!true_keyword.empty()) { return true; } const vconfig::child_list& false_keyword = cond.get_children("false"); if(!false_keyword.empty()) { return false; } static std::vector<std::pair<int,int> > default_counts = utils::parse_ranges("1-99999"); // If the if statement requires we have a certain unit, // then check for that. const vconfig::child_list& have_unit = cond.get_children("have_unit"); for(vconfig::child_list::const_iterator u = have_unit.begin(); u != have_unit.end(); ++u) { if(resources::units == nullptr) return false; std::vector<std::pair<int,int> > counts = (*u).has_attribute("count") ? utils::parse_ranges((*u)["count"]) : default_counts; int match_count = 0; const unit_filter ufilt(*u, resources::filter_con); for (const unit &i : *resources::units) { if ( i.hitpoints() > 0 && ufilt(i) ) { ++match_count; if(counts == default_counts) { // by default a single match is enough, so avoid extra work break; } } } if ((*u)["search_recall_list"].to_bool()) { const unit_filter ufilt(*u, resources::filter_con); for(std::vector<team>::iterator team = resources::teams->begin(); team!=resources::teams->end(); ++team) { if(counts == default_counts && match_count) { break; } for(size_t t = 0; t < team->recall_list().size(); ++t) { if(counts == default_counts && match_count) { break; } scoped_recall_unit auto_store("this_unit", team->save_id(), t); if ( ufilt( *team->recall_list()[t] ) ) { ++match_count; } } } } if(!in_ranges(match_count, counts)) { return false; } } // If the if statement requires we have a certain location, // then check for that. const vconfig::child_list& have_location = cond.get_children("have_location"); for(vconfig::child_list::const_iterator v = have_location.begin(); v != have_location.end(); ++v) { std::set<map_location> res; terrain_filter(*v, resources::filter_con).get_locations(res); std::vector<std::pair<int,int> > counts = (*v).has_attribute("count") ? utils::parse_ranges((*v)["count"]) : default_counts; if(!in_ranges<int>(res.size(), counts)) { return false; } } // Check against each variable statement, // to see if the variable matches the conditions or not. const vconfig::child_list& variables = cond.get_children("variable"); for (const vconfig &values : variables) { const std::string name = values["name"]; config::attribute_value value = resources::gamedata->get_variable_const(name); #define TEST_STR_ATTR(name, test) do { \ if (values.has_attribute(name)) { \ std::string attr_str = values[name].str(); \ std::string str_value = value.str(); \ if (!(test)) return false; \ } \ } while (0) #define TEST_NUM_ATTR(name, test) do { \ if (values.has_attribute(name)) { \ double attr_num = values[name].to_double(); \ double num_value = value.to_double(); \ if (!(test)) return false; \ } \ } while (0) #define TEST_BOL_ATTR(name, test) do { \ if (values.has_attribute(name)) { \ bool attr_bool = values[name].to_bool(); \ bool bool_value = value.to_bool(); \ if (!(test)) return false; \ } \ } while (0) TEST_STR_ATTR("equals", str_value == attr_str); TEST_STR_ATTR("not_equals", str_value != attr_str); TEST_NUM_ATTR("numerical_equals", num_value == attr_num); TEST_NUM_ATTR("numerical_not_equals", num_value != attr_num); TEST_NUM_ATTR("greater_than", num_value > attr_num); TEST_NUM_ATTR("less_than", num_value < attr_num); TEST_NUM_ATTR("greater_than_equal_to", num_value >= attr_num); TEST_NUM_ATTR("less_than_equal_to", num_value <= attr_num); TEST_BOL_ATTR("boolean_equals", bool_value == attr_bool); TEST_BOL_ATTR("boolean_not_equals", bool_value != attr_bool); TEST_STR_ATTR("contains", str_value.find(attr_str) != std::string::npos); #undef TEST_STR_ATTR #undef TEST_NUM_ATTR #undef TEST_BOL_ATTR } vconfig::all_children_iterator cond_end = cond.ordered_end(); static const boost::container::flat_set<std::string> hard_coded = boost::assign::list_of("true")("false")("have_unit")("have_location")("variable") ("then")("else")("elseif")("not")("and")("or")("do").convert_to_container<boost::container::flat_set<std::string> >(); assert(resources::lua_kernel); for (vconfig::all_children_iterator it = cond.ordered_begin(); it != cond_end; ++it) { std::string key = it.get_key(); if (std::find(hard_coded.begin(), hard_coded.end(), key) == hard_coded.end()) { bool result = resources::lua_kernel->run_wml_conditional(key, it.get_child()); if (!result) { return false; } } } return true; } } // end anonymous namespace (support functions) bool conditional_passed(const vconfig& cond) { bool matches = internal_conditional_passed(cond); // Handle [and], [or], and [not] with in-order precedence vconfig::all_children_iterator cond_i = cond.ordered_begin(); vconfig::all_children_iterator cond_end = cond.ordered_end(); while(cond_i != cond_end) { const std::string& cond_name = cond_i.get_key(); const vconfig& cond_filter = cond_i.get_child(); // Handle [and] if(cond_name == "and") { matches = matches && conditional_passed(cond_filter); } // Handle [or] else if(cond_name == "or") { matches = matches || conditional_passed(cond_filter); } // Handle [not] else if(cond_name == "not") { matches = matches && !conditional_passed(cond_filter); } ++cond_i; } return matches; } bool matches_special_filter(const config &cfg, const vconfig& filter) { if (!cfg) { WRN_NG << "attempt to filter attack for an event with no attack data." << std::endl; // better to not execute the event (so the problem is more obvious) return false; } const attack_type attack(cfg); return attack.matches_filter(filter.get_parsed_config()); } } // end namespace game_events
gpl-2.0
Razish/CompJA
codemp/game/g_turret.c
2
24287
// Copyright (C) 1999-2000 Id Software, Inc. // #include "g_local.h" #include "qcommon/q_shared.h" void G_SetEnemy( gentity_t *self, gentity_t *enemy ); qboolean turret_base_spawn_top( gentity_t *base ); void ObjectDie (gentity_t *self, gentity_t *inflictor, gentity_t *attacker, int damage, int meansOfDeath ); //------------------------------------------------------------------------------------------------------------ void TurretPain( gentity_t *self, gentity_t *attacker, int damage ) //------------------------------------------------------------------------------------------------------------ { if (self->target_ent) { self->target_ent->health = self->health; if (self->target_ent->maxHealth) { G_ScaleNetHealth(self->target_ent); } } if ( attacker->client && attacker->client->ps.weapon == WP_DEMP2 ) { self->attackDebounceTime = level.time + 800 + random() * 500; self->painDebounceTime = self->attackDebounceTime; } if ( !self->enemy ) {//react to being hit G_SetEnemy( self, attacker ); } } //------------------------------------------------------------------------------------------------------------ void TurretBasePain( gentity_t *self, gentity_t *attacker, int damage ) //------------------------------------------------------------------------------------------------------------ { if (self->target_ent) { self->target_ent->health = self->health; if (self->target_ent->maxHealth) { G_ScaleNetHealth(self->target_ent); } TurretPain(self->target_ent, attacker, damage); } } //------------------------------------------------------------------------------------------------------------ void auto_turret_die ( gentity_t *self, gentity_t *inflictor, gentity_t *attacker, int damage, int meansOfDeath ) //------------------------------------------------------------------------------------------------------------ { vec3_t forward = { 0,0, 1 }, pos; // Turn off the thinking of the base & use it's targets g_entities[self->r.ownerNum].think = NULL; g_entities[self->r.ownerNum].use = NULL; // clear my data self->die = NULL; self->takedamage = qfalse; self->s.health = self->health = 0; self->s.loopSound = 0; self->s.shouldtarget = qfalse; //self->s.owner = MAX_CLIENTS; //not owned by any client VectorCopy( self->r.currentOrigin, pos ); pos[2] += self->r.maxs[2]*0.5f; G_PlayEffect( EFFECT_EXPLOSION_TURRET, pos, forward ); G_PlayEffectID( G_EffectIndex( "turret/explode" ), pos, forward ); if ( self->splashDamage > 0 && self->splashRadius > 0 ) { G_RadiusDamage( self->r.currentOrigin, attacker, self->splashDamage, self->splashRadius, attacker, NULL, MOD_UNKNOWN ); } self->s.weapon = 0; // crosshair code uses this to mark crosshair red if ( self->s.modelindex2 ) { // switch to damage model if we should self->s.modelindex = self->s.modelindex2; if (self->target_ent && self->target_ent->s.modelindex2) { self->target_ent->s.modelindex = self->target_ent->s.modelindex2; } VectorCopy( self->r.currentAngles, self->s.apos.trBase ); VectorClear( self->s.apos.trDelta ); if ( self->target ) { G_UseTargets( self, attacker ); } } else { ObjectDie( self, inflictor, attacker, damage, meansOfDeath ); } } //------------------------------------------------------------------------------------------------------------ void bottom_die ( gentity_t *self, gentity_t *inflictor, gentity_t *attacker, int damage, int meansOfDeath ) //------------------------------------------------------------------------------------------------------------ { if (self->target_ent && self->target_ent->health > 0) { self->target_ent->health = self->health; if (self->target_ent->maxHealth) { G_ScaleNetHealth(self->target_ent); } auto_turret_die(self->target_ent, inflictor, attacker, damage, meansOfDeath); } } #define START_DIS 15 //---------------------------------------------------------------- static void turret_fire ( gentity_t *ent, vec3_t start, vec3_t dir ) //---------------------------------------------------------------- { vec3_t org; gentity_t *bolt; if ( (trap->PointContents( start, ent->s.number )&MASK_SHOT) ) { return; } VectorMA( start, -START_DIS, dir, org ); // dumb.... G_PlayEffectID( ent->genericValue13, org, dir ); bolt = G_Spawn(); //use a custom shot effect bolt->s.otherEntityNum2 = ent->genericValue14; //use a custom impact effect bolt->s.emplacedOwner = ent->genericValue15; bolt->classname = "turret_proj"; bolt->nextthink = level.time + 10000; bolt->think = G_FreeEntity; bolt->s.eType = ET_MISSILE; bolt->s.weapon = WP_EMPLACED_GUN; bolt->r.ownerNum = ent->s.number; bolt->damage = ent->damage; bolt->alliedTeam = ent->alliedTeam; bolt->teamnodmg = ent->teamnodmg; //bolt->dflags = DAMAGE_NO_KNOCKBACK;// | DAMAGE_HEAVY_WEAP_CLASS; // Don't push them around, or else we are constantly re-aiming bolt->splashDamage = ent->damage; bolt->splashRadius = 100; bolt->methodOfDeath = MOD_TARGET_LASER; bolt->splashMethodOfDeath = MOD_TARGET_LASER; bolt->clipmask = MASK_SHOT | CONTENTS_LIGHTSABER; //bolt->trigger_formation = qfalse; // don't draw tail on first frame VectorSet( bolt->r.maxs, 1.5, 1.5, 1.5 ); VectorScale( bolt->r.maxs, -1, bolt->r.mins ); bolt->s.pos.trType = TR_LINEAR; bolt->s.pos.trTime = level.time; VectorCopy( start, bolt->s.pos.trBase ); VectorScale( dir, ent->mass, bolt->s.pos.trDelta ); SnapVector( bolt->s.pos.trDelta ); // save net bandwidth VectorCopy( start, bolt->r.currentOrigin); bolt->parent = ent; } //----------------------------------------------------- void turret_head_think( gentity_t *self ) //----------------------------------------------------- { gentity_t *top = &g_entities[self->r.ownerNum]; if ( !top ) { return; } if ( self->painDebounceTime > level.time ) { vec3_t v_up; VectorSet( v_up, 0, 0, 1 ); G_PlayEffect( EFFECT_SPARKS, self->r.currentOrigin, v_up ); if ( Q_irand( 0, 3) ) {//25% chance of still firing return; } } // if it's time to fire and we have an enemy, then gun 'em down! pushDebounce time controls next fire time if ( self->enemy && self->setTime < level.time && self->attackDebounceTime < level.time ) { vec3_t fwd, org; // set up our next fire time self->setTime = level.time + self->wait; /* mdxaBone_t boltMatrix; // Getting the flash bolt here trap->G2API_GetBoltMatrix( self->ghoul2, self->playerModel, self->torsoBolt, &boltMatrix, self->r.currentAngles, self->r.currentOrigin, (cg.time?cg.time:level.time), NULL, self->s.modelScale ); trap->G2API_GiveMeVectorFromMatrix( boltMatrix, ORIGIN, org ); trap->G2API_GiveMeVectorFromMatrix( boltMatrix, POSITIVE_Y, fwd ); */ VectorCopy( top->r.currentOrigin, org ); org[2] += top->r.maxs[2]-8; AngleVectors( top->r.currentAngles, fwd, NULL, NULL ); VectorMA( org, START_DIS, fwd, org ); turret_fire( top, org, fwd ); self->fly_sound_debounce_time = level.time;//used as lastShotTime } } //----------------------------------------------------- static void turret_aim( gentity_t *self ) //----------------------------------------------------- { vec3_t enemyDir, org, org2; vec3_t desiredAngles, setAngle; float diffYaw = 0.0f, diffPitch = 0.0f, turnSpeed; const float pitchCap = 40.0f; gentity_t *top = &g_entities[self->r.ownerNum]; if ( !top ) { return; } // move our gun base yaw to where we should be at this time.... BG_EvaluateTrajectory( &top->s.apos, level.time, top->r.currentAngles ); top->r.currentAngles[YAW] = AngleNormalize180( top->r.currentAngles[YAW] ); top->r.currentAngles[PITCH] = AngleNormalize180( top->r.currentAngles[PITCH] ); turnSpeed = top->speed; if ( self->painDebounceTime > level.time ) { desiredAngles[YAW] = top->r.currentAngles[YAW]+flrand(-45,45); desiredAngles[PITCH] = top->r.currentAngles[PITCH]+flrand(-10,10); if (desiredAngles[PITCH] < -pitchCap) { desiredAngles[PITCH] = -pitchCap; } else if (desiredAngles[PITCH] > pitchCap) { desiredAngles[PITCH] = pitchCap; } diffYaw = AngleSubtract( desiredAngles[YAW], top->r.currentAngles[YAW] ); diffPitch = AngleSubtract( desiredAngles[PITCH], top->r.currentAngles[PITCH] ); turnSpeed = flrand( -5, 5 ); } else if ( self->enemy ) { // ...then we'll calculate what new aim adjustments we should attempt to make this frame // Aim at enemy VectorCopy( self->enemy->r.currentOrigin, org ); org[2]+=self->enemy->r.maxs[2]*0.5f; if (self->enemy->s.eType == ET_NPC && self->enemy->s.NPC_class == CLASS_VEHICLE && self->enemy->m_pVehicle && self->enemy->m_pVehicle->m_pVehicleInfo->type == VH_WALKER) { //hack! org[2] += 32.0f; } /* mdxaBone_t boltMatrix; // Getting the "eye" here trap->G2API_GetBoltMatrix( self->ghoul2, self->playerModel, self->torsoBolt, &boltMatrix, self->r.currentAngles, self->s.origin, (cg.time?cg.time:level.time), NULL, self->s.modelScale ); trap->G2API_GiveMeVectorFromMatrix( boltMatrix, ORIGIN, org2 ); */ VectorCopy( top->r.currentOrigin, org2 ); VectorSubtract( org, org2, enemyDir ); vectoangles( enemyDir, desiredAngles ); desiredAngles[PITCH] = AngleNormalize180(desiredAngles[PITCH]); if (desiredAngles[PITCH] < -pitchCap) { desiredAngles[PITCH] = -pitchCap; } else if (desiredAngles[PITCH] > pitchCap) { desiredAngles[PITCH] = pitchCap; } diffYaw = AngleSubtract( desiredAngles[YAW], top->r.currentAngles[YAW] ); diffPitch = AngleSubtract( desiredAngles[PITCH], top->r.currentAngles[PITCH] ); } else {//FIXME: Pan back and forth in original facing // no enemy, so make us slowly sweep back and forth as if searching for a new one desiredAngles[YAW] = sin( level.time * 0.0001f + top->count ); desiredAngles[YAW] *= 60.0f; desiredAngles[YAW] += self->s.angles[YAW]; desiredAngles[YAW] = AngleNormalize180( desiredAngles[YAW] ); diffYaw = AngleSubtract( desiredAngles[YAW], top->r.currentAngles[YAW] ); diffPitch = AngleSubtract( 0, top->r.currentAngles[PITCH] ); turnSpeed = 1.0f; } if ( diffYaw ) { // cap max speed.... if ( fabs(diffYaw) > turnSpeed ) { diffYaw = ( diffYaw >= 0 ? turnSpeed : -turnSpeed ); } } if ( diffPitch ) { if ( fabs(diffPitch) > turnSpeed ) { // cap max speed diffPitch = (diffPitch > 0.0f ? turnSpeed : -turnSpeed ); } } // ...then set up our desired yaw VectorSet( setAngle, diffPitch, diffYaw, 0 ); VectorCopy( top->r.currentAngles, top->s.apos.trBase ); VectorScale( setAngle, (1000/FRAMETIME), top->s.apos.trDelta ); top->s.apos.trTime = level.time; top->s.apos.trType = TR_LINEAR_STOP; top->s.apos.trDuration = FRAMETIME; if ( diffYaw || diffPitch ) { top->s.loopSound = G_SoundIndex( "sound/vehicles/weapons/hoth_turret/turn.wav" ); } else { top->s.loopSound = 0; } } //----------------------------------------------------- static void turret_turnoff( gentity_t *self ) //----------------------------------------------------- { gentity_t *top = &g_entities[self->r.ownerNum]; if ( top != NULL ) {//still have a top //stop it from rotating VectorCopy( top->r.currentAngles, top->s.apos.trBase ); VectorClear( top->s.apos.trDelta ); top->s.apos.trTime = level.time; top->s.apos.trType = TR_STATIONARY; } self->s.loopSound = 0; // shut-down sound //G_Sound( self, CHAN_BODY, G_SoundIndex( "sound/chars/turret/shutdown.wav" )); // Clear enemy self->enemy = NULL; } //----------------------------------------------------- static void turret_sleep( gentity_t *self ) //----------------------------------------------------- { if ( self->enemy == NULL ) { // we don't need to play sound return; } // make turret play ping sound for 5 seconds self->aimDebounceTime = level.time + 5000; // Clear enemy self->enemy = NULL; } //----------------------------------------------------- static qboolean turret_find_enemies( gentity_t *self ) //----------------------------------------------------- { qboolean found = qfalse; int i, count; float bestDist = self->radius * self->radius; float enemyDist; vec3_t enemyDir, org, org2; gentity_t *entity_list[MAX_GENTITIES], *target, *bestTarget = NULL; trace_t tr; gentity_t *top = &g_entities[self->r.ownerNum]; if ( !top ) { return qfalse; } if ( self->aimDebounceTime > level.time ) // time since we've been shut off { // We were active and alert, i.e. had an enemy in the last 3 secs if ( self->timestamp < level.time ) { //G_Sound(self, CHAN_BODY, G_SoundIndex( "sound/chars/turret/ping.wav" )); self->timestamp = level.time + 1000; } } VectorCopy( top->r.currentOrigin, org2 ); count = G_RadiusList( org2, self->radius, self, qtrue, entity_list ); for ( i = 0; i < count; i++ ) { target = entity_list[i]; if ( !target->client ) { // only attack clients continue; } if ( target == self || !target->takedamage || target->health <= 0 || ( target->flags & FL_NOTARGET )) { continue; } if ( target->client->sess.sessionTeam == TEAM_SPECTATOR ) { continue; } if ( target->client->tempSpectate >= level.time ) { continue; } if ( self->alliedTeam ) { if ( target->client ) { if ( target->client->sess.sessionTeam == self->alliedTeam ) { // A bot/client/NPC we don't want to shoot continue; } } else if ( target->teamnodmg == self->alliedTeam ) { // An ent we don't want to shoot continue; } } if ( !trap->InPVS( org2, target->r.currentOrigin )) { continue; } VectorCopy( target->r.currentOrigin, org ); org[2] += target->r.maxs[2]*0.5f; trap->Trace( &tr, org2, NULL, NULL, org, self->s.number, MASK_SHOT, qfalse, 0, 0 ); if ( !tr.allsolid && !tr.startsolid && ( tr.fraction == 1.0 || tr.entityNum == target->s.number )) { // Only acquire if have a clear shot, Is it in range and closer than our best? VectorSubtract( target->r.currentOrigin, top->r.currentOrigin, enemyDir ); enemyDist = VectorLengthSquared( enemyDir ); if ( enemyDist < bestDist // all things equal, keep current || (!Q_stricmp( "atst_vehicle", target->NPC_type ) && bestTarget && Q_stricmp( "atst_vehicle", bestTarget->NPC_type ) ) )//target AT-STs over non-AT-STs... FIXME: must be a better, easier way to tell this, no? { if ( self->attackDebounceTime < level.time ) { // We haven't fired or acquired an enemy in the last 2 seconds-start-up sound //G_Sound( self, CHAN_BODY, G_SoundIndex( "sound/chars/turret/startup.wav" )); // Wind up turrets for a bit self->attackDebounceTime = level.time + 1400; } bestTarget = target; bestDist = enemyDist; found = qtrue; } } } if ( found ) { G_SetEnemy( self, bestTarget ); if ( VALIDSTRING( self->target2 )) { G_UseTargets2( self, self, self->target2 ); } } return found; } //----------------------------------------------------- void turret_base_think( gentity_t *self ) //----------------------------------------------------- { qboolean turnOff = qtrue; float enemyDist; vec3_t enemyDir, org, org2; if ( self->spawnflags & 1 ) { // not turned on turret_turnoff( self ); // No target self->flags |= FL_NOTARGET; self->nextthink = -1;//never think again return; } else { // I'm all hot and bothered self->flags &= ~FL_NOTARGET; //remember to keep thinking! self->nextthink = level.time + FRAMETIME; } if ( !self->enemy ) { if ( turret_find_enemies( self )) { turnOff = qfalse; } } else if ( self->enemy->client && self->enemy->client->sess.sessionTeam == TEAM_SPECTATOR ) {//don't keep going after spectators self->enemy = NULL; } else if ( self->enemy->client && self->enemy->client->tempSpectate >= level.time ) {//don't keep going after spectators self->enemy = NULL; } else {//FIXME: remain single-minded or look for a new enemy every now and then? if ( self->enemy->health > 0 ) { // enemy is alive VectorSubtract( self->enemy->r.currentOrigin, self->r.currentOrigin, enemyDir ); enemyDist = VectorLengthSquared( enemyDir ); if ( enemyDist < (self->radius * self->radius) ) { // was in valid radius if ( trap->InPVS( self->r.currentOrigin, self->enemy->r.currentOrigin ) ) { // Every now and again, check to see if we can even trace to the enemy trace_t tr; if ( self->enemy->client ) { VectorCopy( self->enemy->client->renderInfo.eyePoint, org ); } else { VectorCopy( self->enemy->r.currentOrigin, org ); } VectorCopy( self->r.currentOrigin, org2 ); if ( self->spawnflags & 2 ) { org2[2] += 10; } else { org2[2] -= 10; } trap->Trace( &tr, org2, NULL, NULL, org, self->s.number, MASK_SHOT, qfalse, 0, 0 ); if ( !tr.allsolid && !tr.startsolid && tr.entityNum == self->enemy->s.number ) { turnOff = qfalse; // Can see our enemy } } } } turret_head_think( self ); } if ( turnOff ) { if ( self->bounceCount < level.time ) // bounceCount is used to keep the thing from ping-ponging from on to off { turret_sleep( self ); } } else { // keep our enemy for a minimum of 2 seconds from now self->bounceCount = level.time + 2000 + random() * 150; } turret_aim( self ); } //----------------------------------------------------------------------------- void turret_base_use( gentity_t *self, gentity_t *other, gentity_t *activator ) //----------------------------------------------------------------------------- { // Toggle on and off self->spawnflags = (self->spawnflags ^ 1); /* if (( self->s.eFlags & EF_SHADER_ANIM ) && ( self->spawnflags & 1 )) // Start_Off { self->s.frame = 1; // black } else { self->s.frame = 0; // glow } */ } /*QUAKED misc_turret (1 0 0) (-48 -48 0) (48 48 144) START_OFF Large 2-piece turbolaser turret START_OFF - Starts off radius - How far away an enemy can be for it to pick it up (default 1024) wait - Time between shots (default 300 ms) dmg - How much damage each shot does (default 100) health - How much damage it can take before exploding (default 3000) speed - how fast it turns (default 10) splashDamage - How much damage the explosion does (300) splashRadius - The radius of the explosion (128) shotspeed - speed at which projectiles will move targetname - Toggles it on/off target - What to use when destroyed target2 - What to use when it decides to start shooting at an enemy showhealth - set to 1 to show health bar on this entity when crosshair is over it teamowner - crosshair shows green for this team, red for opposite team 0 - none 1 - red 2 - blue alliedTeam - team that this turret won't target 0 - none 1 - red 2 - blue teamnodmg - team that turret does not take damage from 0 - none 1 - red 2 - blue "icon" - icon that represents the objective on the radar */ //----------------------------------------------------- void SP_misc_turret( gentity_t *base ) //----------------------------------------------------- { char* s; base->s.modelindex2 = G_ModelIndex( "models/map_objects/hoth/turret_bottom.md3" ); base->s.modelindex = G_ModelIndex( "models/map_objects/hoth/turret_base.md3" ); //base->playerModel = trap->G2API_InitGhoul2Model( base->ghoul2, "models/map_objects/imp_mine/turret_canon.glm", base->s.modelindex ); //base->s.radius = 80.0f; //trap->G2API_SetBoneAngles( &base->ghoul2[base->playerModel], "Bone_body", vec3_origin, BONE_ANGLES_POSTMULT, POSITIVE_Y, POSITIVE_Z, POSITIVE_X, NULL ); //base->torsoBolt = trap->G2API_AddBolt( &base->ghoul2[base->playerModel], "*flash03" ); G_SpawnString( "icon", "", &s ); if (s && s[0]) { // We have an icon, so index it now. We are reusing the genericenemyindex // variable rather than adding a new one to the entity state. base->s.genericenemyindex = G_IconIndex(s); } G_SetAngles( base, base->s.angles ); G_SetOrigin( base, base->s.origin ); base->r.contents = CONTENTS_BODY; VectorSet( base->r.maxs, 32.0f, 32.0f, 128.0f ); VectorSet( base->r.mins, -32.0f, -32.0f, 0.0f ); base->use = turret_base_use; base->think = turret_base_think; // don't start working right away base->nextthink = level.time + FRAMETIME * 5; trap->LinkEntity( (sharedEntity_t *)base ); if ( !turret_base_spawn_top( base ) ) { G_FreeEntity( base ); } } //----------------------------------------------------- qboolean turret_base_spawn_top( gentity_t *base ) { vec3_t org; int t; gentity_t *top = G_Spawn(); if ( !top ) { return qfalse; } top->s.modelindex = G_ModelIndex( "models/map_objects/hoth/turret_top_new.md3" ); top->s.modelindex2 = G_ModelIndex( "models/map_objects/hoth/turret_top.md3" ); G_SetAngles( top, base->s.angles ); VectorCopy( base->s.origin, org ); org[2] += 128; G_SetOrigin( top, org ); base->r.ownerNum = top->s.number; top->r.ownerNum = base->s.number; if ( base->team && base->team[0] && //level.gametype == GT_SIEGE && !base->teamnodmg) { base->teamnodmg = atoi(base->team); } base->team = NULL; top->teamnodmg = base->teamnodmg; top->alliedTeam = base->alliedTeam; base->s.eType = ET_GENERAL; // Set up our explosion effect for the ExplodeDeath code.... G_EffectIndex( "turret/explode" ); G_EffectIndex( "sparks/spark_exp_nosnd" ); G_EffectIndex( "turret/hoth_muzzle_flash" ); // this is really the pitch angle..... top->speed = 0; // this is a random time offset for the no-enemy-search-around-mode top->count = random() * 9000; if ( !base->health ) { base->health = 3000; } top->health = base->health; G_SpawnInt( "showhealth", "0", &t ); if (t) { //a non-0 maxhealth value will mean we want to show the health on the hud top->maxHealth = base->health; //acts as "maxhealth" G_ScaleNetHealth(top); base->maxHealth = base->health; G_ScaleNetHealth(base); } base->takedamage = qtrue; base->pain = TurretBasePain; base->die = bottom_die; //design specified shot speed G_SpawnFloat( "shotspeed", "1100", &base->mass ); top->mass = base->mass; //even if we don't want to show health, let's at least light the crosshair up properly over ourself if ( !top->s.teamowner ) { top->s.teamowner = top->alliedTeam; } base->alliedTeam = top->alliedTeam; base->s.teamowner = top->s.teamowner; base->s.shouldtarget = qtrue; top->s.shouldtarget = qtrue; //link them to each other base->target_ent = top; top->target_ent = base; //top->s.owner = MAX_CLIENTS; //not owned by any client // search radius if ( !base->radius ) { base->radius = 1024; } top->radius = base->radius; // How quickly to fire if ( !base->wait ) { base->wait = 300 + random() * 55; } top->wait = base->wait; if ( !base->splashDamage ) { base->splashDamage = 300; } top->splashDamage = base->splashDamage; if ( !base->splashRadius ) { base->splashRadius = 128; } top->splashRadius = base->splashRadius; // how much damage each shot does if ( !base->damage ) { base->damage = 100; } top->damage = base->damage; // how fast it turns if ( !base->speed ) { base->speed = 20; } top->speed = base->speed; VectorSet( top->r.maxs, 48.0f, 48.0f, 16.0f ); VectorSet( top->r.mins, -48.0f, -48.0f, 0.0f ); // Precache moving sounds //G_SoundIndex( "sound/chars/turret/startup.wav" ); //G_SoundIndex( "sound/chars/turret/shutdown.wav" ); //G_SoundIndex( "sound/chars/turret/ping.wav" ); G_SoundIndex( "sound/vehicles/weapons/hoth_turret/turn.wav" ); top->genericValue13 = G_EffectIndex( "turret/hoth_muzzle_flash" ); top->genericValue14 = G_EffectIndex( "turret/hoth_shot" ); top->genericValue15 = G_EffectIndex( "turret/hoth_impact" ); top->r.contents = CONTENTS_BODY; //base->max_health = base->health; top->takedamage = qtrue; top->pain = TurretPain; top->die = auto_turret_die; top->material = MAT_METAL; //base->r.svFlags |= SVF_NO_TELEPORT|SVF_NONNPC_ENEMY|SVF_SELF_ANIMATING; // Register this so that we can use it for the missile effect RegisterItem( BG_FindItemForWeapon( WP_EMPLACED_GUN )); // But set us as a turret so that we can be identified as a turret top->s.weapon = WP_EMPLACED_GUN; trap->LinkEntity( (sharedEntity_t *)top ); return qtrue; }
gpl-2.0
coreboot/coreboot
src/cpu/intel/model_67x/model_67x_init.c
2
1141
/* SPDX-License-Identifier: GPL-2.0-only */ #include <device/device.h> #include <cpu/cpu.h> #include <cpu/x86/mtrr.h> #include <cpu/intel/microcode.h> #include <cpu/x86/cache.h> #include <cpu/intel/l2_cache.h> static void model_67x_init(struct device *cpu) { /* Update the microcode */ intel_update_microcode_from_cbfs(); /* Initialize L2 cache */ p6_configure_l2_cache(); /* Turn on caching if we haven't already */ enable_cache(); /* Setup MTRRs */ x86_setup_mtrrs(); x86_mtrr_check(); } static struct device_operations cpu_dev_ops = { .init = model_67x_init, }; /* * Intel Pentium III Processor Identification and Package Information * http://www.intel.com/design/pentiumiii/qit/update.pdf * * Intel Pentium III Processor Specification Update * http://download.intel.com/design/intarch/specupdt/24445358.pdf */ static const struct cpu_device_id cpu_table[] = { { X86_VENDOR_INTEL, 0x0671 }, { X86_VENDOR_INTEL, 0x0672 }, /* PIII, kB0 */ { X86_VENDOR_INTEL, 0x0673 }, /* PIII, kC0 */ { 0, 0 }, }; static const struct cpu_driver driver __cpu_driver = { .ops = &cpu_dev_ops, .id_table = cpu_table, };
gpl-2.0
evanphx/yoke
src/VBox/Main/src-server/darwin/iokit.cpp
2
70978
/* $Id: iokit.cpp $ */ /** @file * Main - Darwin IOKit Routines. * * Because IOKit makes use of COM like interfaces, it does not mix very * well with COM/XPCOM and must therefore be isolated from it using a * simpler C interface. */ /* * Copyright (C) 2006-2012 Oracle Corporation * * This file is part of VirtualBox Open Source Edition (OSE), as * available from http://www.virtualbox.org. This file is free software; * you can redistribute it and/or modify it under the terms of the GNU * General Public License (GPL) as published by the Free Software * Foundation, in version 2 as it comes in the "COPYING" file of the * VirtualBox OSE distribution. VirtualBox OSE is distributed in the * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind. */ /******************************************************************************* * Header Files * *******************************************************************************/ #define LOG_GROUP LOG_GROUP_MAIN #ifdef STANDALONE_TESTCASE # define VBOX_WITH_USB #endif #include <mach/mach.h> #include <Carbon/Carbon.h> #include <IOKit/IOKitLib.h> #include <IOKit/storage/IOStorageDeviceCharacteristics.h> #include <IOKit/scsi/SCSITaskLib.h> #include <SystemConfiguration/SystemConfiguration.h> #include <mach/mach_error.h> #ifdef VBOX_WITH_USB # include <IOKit/usb/IOUSBLib.h> # include <IOKit/IOCFPlugIn.h> #endif #include <VBox/log.h> #include <VBox/err.h> #include <iprt/mem.h> #include <iprt/string.h> #include <iprt/process.h> #include <iprt/assert.h> #include <iprt/thread.h> #include <iprt/uuid.h> #ifdef STANDALONE_TESTCASE # include <iprt/initterm.h> # include <iprt/stream.h> #endif #include "iokit.h" /* A small hack... */ #ifdef STANDALONE_TESTCASE # define DarwinFreeUSBDeviceFromIOKit(a) do { } while (0) #endif /******************************************************************************* * Defined Constants And Macros * *******************************************************************************/ /** An attempt at catching reference leaks. */ #define MY_CHECK_CREFS(cRefs) do { AssertMsg(cRefs < 25, ("%ld\n", cRefs)); NOREF(cRefs); } while (0) /** Contains the pid of the current client. If 0, the kernel is the current client. */ #define VBOXUSB_CLIENT_KEY "VBoxUSB-Client" /** Contains the pid of the filter owner (i.e. the VBoxSVC pid). */ #define VBOXUSB_OWNER_KEY "VBoxUSB-Owner" /** The VBoxUSBDevice class name. */ #define VBOXUSBDEVICE_CLASS_NAME "org_virtualbox_VBoxUSBDevice" /******************************************************************************* * Global Variables * *******************************************************************************/ /** The IO Master Port. */ static mach_port_t g_MasterPort = NULL; /** * Lazily opens the master port. * * @returns true if the port is open, false on failure (very unlikely). */ static bool darwinOpenMasterPort(void) { if (!g_MasterPort) { kern_return_t krc = IOMasterPort(MACH_PORT_NULL, &g_MasterPort); AssertReturn(krc == KERN_SUCCESS, false); } return true; } /** * Checks whether the value exists. * * @returns true / false accordingly. * @param DictRef The dictionary. * @param KeyStrRef The key name. */ static bool darwinDictIsPresent(CFDictionaryRef DictRef, CFStringRef KeyStrRef) { return !!CFDictionaryGetValue(DictRef, KeyStrRef); } /** * Gets a boolean value. * * @returns Success indicator (true/false). * @param DictRef The dictionary. * @param KeyStrRef The key name. * @param pf Where to store the key value. */ static bool darwinDictGetBool(CFDictionaryRef DictRef, CFStringRef KeyStrRef, bool *pf) { CFTypeRef BoolRef = CFDictionaryGetValue(DictRef, KeyStrRef); if ( BoolRef && CFGetTypeID(BoolRef) == CFBooleanGetTypeID()) { *pf = CFBooleanGetValue((CFBooleanRef)BoolRef); return true; } *pf = false; return false; } /** * Gets an unsigned 8-bit integer value. * * @returns Success indicator (true/false). * @param DictRef The dictionary. * @param KeyStrRef The key name. * @param pu8 Where to store the key value. */ static bool darwinDictGetU8(CFDictionaryRef DictRef, CFStringRef KeyStrRef, uint8_t *pu8) { CFTypeRef ValRef = CFDictionaryGetValue(DictRef, KeyStrRef); if (ValRef) { if (CFNumberGetValue((CFNumberRef)ValRef, kCFNumberSInt8Type, pu8)) return true; } *pu8 = 0; return false; } /** * Gets an unsigned 16-bit integer value. * * @returns Success indicator (true/false). * @param DictRef The dictionary. * @param KeyStrRef The key name. * @param pu16 Where to store the key value. */ static bool darwinDictGetU16(CFDictionaryRef DictRef, CFStringRef KeyStrRef, uint16_t *pu16) { CFTypeRef ValRef = CFDictionaryGetValue(DictRef, KeyStrRef); if (ValRef) { if (CFNumberGetValue((CFNumberRef)ValRef, kCFNumberSInt16Type, pu16)) return true; } *pu16 = 0; return false; } /** * Gets an unsigned 32-bit integer value. * * @returns Success indicator (true/false). * @param DictRef The dictionary. * @param KeyStrRef The key name. * @param pu32 Where to store the key value. */ static bool darwinDictGetU32(CFDictionaryRef DictRef, CFStringRef KeyStrRef, uint32_t *pu32) { CFTypeRef ValRef = CFDictionaryGetValue(DictRef, KeyStrRef); if (ValRef) { if (CFNumberGetValue((CFNumberRef)ValRef, kCFNumberSInt32Type, pu32)) return true; } *pu32 = 0; return false; } /** * Gets an unsigned 64-bit integer value. * * @returns Success indicator (true/false). * @param DictRef The dictionary. * @param KeyStrRef The key name. * @param pu64 Where to store the key value. */ static bool darwinDictGetU64(CFDictionaryRef DictRef, CFStringRef KeyStrRef, uint64_t *pu64) { CFTypeRef ValRef = CFDictionaryGetValue(DictRef, KeyStrRef); if (ValRef) { if (CFNumberGetValue((CFNumberRef)ValRef, kCFNumberSInt64Type, pu64)) return true; } *pu64 = 0; return false; } /** * Gets a RTPROCESS value. * * @returns Success indicator (true/false). * @param DictRef The dictionary. * @param KeyStrRef The key name. * @param pProcess Where to store the key value. */ static bool darwinDictGetProcess(CFMutableDictionaryRef DictRef, CFStringRef KeyStrRef, PRTPROCESS pProcess) { switch (sizeof(*pProcess)) { case sizeof(uint16_t): return darwinDictGetU16(DictRef, KeyStrRef, (uint16_t *)pProcess); case sizeof(uint32_t): return darwinDictGetU32(DictRef, KeyStrRef, (uint32_t *)pProcess); case sizeof(uint64_t): return darwinDictGetU64(DictRef, KeyStrRef, (uint64_t *)pProcess); default: AssertMsgFailedReturn(("%d\n", sizeof(*pProcess)), false); } } /** * Gets string value, converted to UTF-8 and put in user buffer. * * @returns Success indicator (true/false). * @param DictRef The dictionary. * @param KeyStrRef The key name. * @param psz The string buffer. On failure this will be an empty string (""). * @param cch The size of the buffer. */ static bool darwinDictGetString(CFDictionaryRef DictRef, CFStringRef KeyStrRef, char *psz, size_t cch) { CFTypeRef ValRef = CFDictionaryGetValue(DictRef, KeyStrRef); if (ValRef) { if (CFStringGetCString((CFStringRef)ValRef, psz, cch, kCFStringEncodingUTF8)) return true; } Assert(cch > 0); *psz = '\0'; return false; } /** * Gets string value, converted to UTF-8 and put in a IPRT string buffer. * * @returns Success indicator (true/false). * @param DictRef The dictionary. * @param KeyStrRef The key name. * @param ppsz Where to store the key value. Free with RTStrFree. Set to NULL on failure. */ static bool darwinDictDupString(CFDictionaryRef DictRef, CFStringRef KeyStrRef, char **ppsz) { char szBuf[512]; if (darwinDictGetString(DictRef, KeyStrRef, szBuf, sizeof(szBuf))) { *ppsz = RTStrDup(RTStrStrip(szBuf)); if (*ppsz) return true; } *ppsz = NULL; return false; } /** * Gets a byte string (data) of a specific size. * * @returns Success indicator (true/false). * @param DictRef The dictionary. * @param KeyStrRef The key name. * @param pvBuf The buffer to store the bytes in. * @param cbBuf The size of the buffer. This must exactly match the data size. */ static bool darwinDictGetData(CFDictionaryRef DictRef, CFStringRef KeyStrRef, void *pvBuf, size_t cbBuf) { CFTypeRef ValRef = CFDictionaryGetValue(DictRef, KeyStrRef); if (ValRef) { CFIndex cbActual = CFDataGetLength((CFDataRef)ValRef); if (cbActual >= 0 && cbBuf == (size_t)cbActual) { CFDataGetBytes((CFDataRef)ValRef, CFRangeMake(0, cbBuf), (uint8_t *)pvBuf); return true; } } memset(pvBuf, '\0', cbBuf); return false; } #if 1 && !defined(STANDALONE_TESTCASE) /* dumping disabled */ # define DARWIN_IOKIT_LOG(a) Log(a) # define DARWIN_IOKIT_LOG_FLUSH() do {} while (0) # define DARWIN_IOKIT_DUMP_OBJ(o) do {} while (0) #else # if defined(STANDALONE_TESTCASE) # include <iprt/stream.h> # define DARWIN_IOKIT_LOG(a) RTPrintf a # define DARWIN_IOKIT_LOG_FLUSH() RTStrmFlush(g_pStdOut) # else # define DARWIN_IOKIT_LOG(a) RTLogPrintf a # define DARWIN_IOKIT_LOG_FLUSH() RTLogFlush(NULL) # endif # define DARWIN_IOKIT_DUMP_OBJ(o) darwinDumpObj(o) /** * Callback for dumping a dictionary key. * * @param pvKey The key name. * @param pvValue The key value * @param pvUser The recursion depth. */ static void darwinDumpDictCallback(const void *pvKey, const void *pvValue, void *pvUser) { /* display the key name. */ char *pszKey = (char *)RTMemTmpAlloc(1024); if (!CFStringGetCString((CFStringRef)pvKey, pszKey, 1024, kCFStringEncodingUTF8)) strcpy(pszKey, "CFStringGetCString failure"); DARWIN_IOKIT_LOG(("%+*s%s", (int)(uintptr_t)pvUser, "", pszKey)); RTMemTmpFree(pszKey); /* display the value type */ CFTypeID Type = CFGetTypeID(pvValue); DARWIN_IOKIT_LOG((" [%d-", Type)); /* display the value */ if (Type == CFDictionaryGetTypeID()) { DARWIN_IOKIT_LOG(("dictionary] =\n" "%-*s{\n", (int)(uintptr_t)pvUser, "")); CFDictionaryApplyFunction((CFDictionaryRef)pvValue, darwinDumpDictCallback, (void *)((uintptr_t)pvUser + 4)); DARWIN_IOKIT_LOG(("%-*s}\n", (int)(uintptr_t)pvUser, "")); } else if (Type == CFBooleanGetTypeID()) DARWIN_IOKIT_LOG(("bool] = %s\n", CFBooleanGetValue((CFBooleanRef)pvValue) ? "true" : "false")); else if (Type == CFNumberGetTypeID()) { union { SInt8 s8; SInt16 s16; SInt32 s32; SInt64 s64; Float32 rf32; Float64 rd64; char ch; short s; int i; long l; long long ll; float rf; double rd; CFIndex iCF; } u; memset(&u, 0, sizeof(u)); CFNumberType NumType = CFNumberGetType((CFNumberRef)pvValue); if (CFNumberGetValue((CFNumberRef)pvValue, NumType, &u)) { switch (CFNumberGetType((CFNumberRef)pvValue)) { case kCFNumberSInt8Type: DARWIN_IOKIT_LOG(("SInt8] = %RI8 (%#RX8)\n", NumType, u.s8, u.s8)); break; case kCFNumberSInt16Type: DARWIN_IOKIT_LOG(("SInt16] = %RI16 (%#RX16)\n", NumType, u.s16, u.s16)); break; case kCFNumberSInt32Type: DARWIN_IOKIT_LOG(("SInt32] = %RI32 (%#RX32)\n", NumType, u.s32, u.s32)); break; case kCFNumberSInt64Type: DARWIN_IOKIT_LOG(("SInt64] = %RI64 (%#RX64)\n", NumType, u.s64, u.s64)); break; case kCFNumberFloat32Type: DARWIN_IOKIT_LOG(("float32] = %#lx\n", NumType, u.l)); break; case kCFNumberFloat64Type: DARWIN_IOKIT_LOG(("float64] = %#llx\n", NumType, u.ll)); break; case kCFNumberFloatType: DARWIN_IOKIT_LOG(("float] = %#lx\n", NumType, u.l)); break; case kCFNumberDoubleType: DARWIN_IOKIT_LOG(("double] = %#llx\n", NumType, u.ll)); break; case kCFNumberCharType: DARWIN_IOKIT_LOG(("char] = %hhd (%hhx)\n", NumType, u.ch, u.ch)); break; case kCFNumberShortType: DARWIN_IOKIT_LOG(("short] = %hd (%hx)\n", NumType, u.s, u.s)); break; case kCFNumberIntType: DARWIN_IOKIT_LOG(("int] = %d (%#x)\n", NumType, u.i, u.i)); break; case kCFNumberLongType: DARWIN_IOKIT_LOG(("long] = %ld (%#lx)\n", NumType, u.l, u.l)); break; case kCFNumberLongLongType: DARWIN_IOKIT_LOG(("long long] = %lld (%#llx)\n", NumType, u.ll, u.ll)); break; case kCFNumberCFIndexType: DARWIN_IOKIT_LOG(("CFIndex] = %lld (%#llx)\n", NumType, (long long)u.iCF, (long long)u.iCF)); break; break; default: DARWIN_IOKIT_LOG(("%d?] = %lld (%llx)\n", NumType, u.ll, u.ll)); break; } } else DARWIN_IOKIT_LOG(("number] = CFNumberGetValue failed\n")); } else if (Type == CFBooleanGetTypeID()) DARWIN_IOKIT_LOG(("boolean] = %RTbool\n", CFBooleanGetValue((CFBooleanRef)pvValue))); else if (Type == CFStringGetTypeID()) { DARWIN_IOKIT_LOG(("string] = ")); char *pszValue = (char *)RTMemTmpAlloc(16*_1K); if (!CFStringGetCString((CFStringRef)pvValue, pszValue, 16*_1K, kCFStringEncodingUTF8)) strcpy(pszValue, "CFStringGetCString failure"); DARWIN_IOKIT_LOG(("\"%s\"\n", pszValue)); RTMemTmpFree(pszValue); } else if (Type == CFDataGetTypeID()) { CFIndex cb = CFDataGetLength((CFDataRef)pvValue); DARWIN_IOKIT_LOG(("%zu bytes] =", (size_t)cb)); void *pvData = RTMemTmpAlloc(cb + 8); CFDataGetBytes((CFDataRef)pvValue, CFRangeMake(0, cb), (uint8_t *)pvData); if (!cb) DARWIN_IOKIT_LOG((" \n")); else if (cb <= 32) DARWIN_IOKIT_LOG((" %.*Rhxs\n", cb, pvData)); else DARWIN_IOKIT_LOG(("\n%.*Rhxd\n", cb, pvData)); RTMemTmpFree(pvData); } else DARWIN_IOKIT_LOG(("??] = %p\n", pvValue)); } /** * Dumps a dictionary to the log. * * @param DictRef The dictionary to dump. */ static void darwinDumpDict(CFDictionaryRef DictRef, unsigned cIndents) { CFDictionaryApplyFunction(DictRef, darwinDumpDictCallback, (void *)(uintptr_t)cIndents); DARWIN_IOKIT_LOG_FLUSH(); } /** * Dumps an I/O kit registry object and all it children. * @param Object The object to dump. * @param cIndents The number of indents to use. */ static void darwinDumpObjInt(io_object_t Object, unsigned cIndents) { static io_string_t s_szPath; kern_return_t krc = IORegistryEntryGetPath(Object, kIOServicePlane, s_szPath); if (krc != KERN_SUCCESS) strcpy(s_szPath, "IORegistryEntryGetPath failed"); DARWIN_IOKIT_LOG(("Dumping %p - %s:\n", (const void *)Object, s_szPath)); CFMutableDictionaryRef PropsRef = 0; krc = IORegistryEntryCreateCFProperties(Object, &PropsRef, kCFAllocatorDefault, kNilOptions); if (krc == KERN_SUCCESS) { darwinDumpDict(PropsRef, cIndents + 4); CFRelease(PropsRef); } /* * Children. */ io_iterator_t Children; krc = IORegistryEntryGetChildIterator(Object, kIOServicePlane, &Children); if (krc == KERN_SUCCESS) { io_object_t Child; while ((Child = IOIteratorNext(Children))) { darwinDumpObjInt(Child, cIndents + 4); IOObjectRelease(Child); } IOObjectRelease(Children); } else DARWIN_IOKIT_LOG(("IORegistryEntryGetChildIterator -> %#x\n", krc)); } /** * Dumps an I/O kit registry object and all it children. * @param Object The object to dump. */ static void darwinDumpObj(io_object_t Object) { darwinDumpObjInt(Object, 0); } #endif /* helpers for dumping registry dictionaries */ #ifdef VBOX_WITH_USB /** * Notification data created by DarwinSubscribeUSBNotifications, used by * the callbacks and finally freed by DarwinUnsubscribeUSBNotifications. */ typedef struct DARWINUSBNOTIFY { /** The notification port. * It's shared between the notification callbacks. */ IONotificationPortRef NotifyPort; /** The run loop source for NotifyPort. */ CFRunLoopSourceRef NotifyRLSrc; /** The attach notification iterator. */ io_iterator_t AttachIterator; /** The 2nd attach notification iterator. */ io_iterator_t AttachIterator2; /** The detach notification iterator. */ io_iterator_t DetachIterator; } DARWINUSBNOTIFY, *PDARWINUSBNOTIFY; /** * Run thru an iterator. * * The docs says this is necessary to start getting notifications, * so this function is called in the callbacks and right after * registering the notification. * * @param pIterator The iterator reference. */ static void darwinDrainIterator(io_iterator_t pIterator) { io_object_t Object; while ((Object = IOIteratorNext(pIterator))) { DARWIN_IOKIT_DUMP_OBJ(Object); IOObjectRelease(Object); } } /** * Callback for the 1st attach notification. * * @param pvNotify Our data. * @param NotifyIterator The notification iterator. */ static void darwinUSBAttachNotification1(void *pvNotify, io_iterator_t NotifyIterator) { DARWIN_IOKIT_LOG(("USB Attach Notification1\n")); NOREF(pvNotify); //PDARWINUSBNOTIFY pNotify = (PDARWINUSBNOTIFY)pvNotify; darwinDrainIterator(NotifyIterator); } /** * Callback for the 2nd attach notification. * * @param pvNotify Our data. * @param NotifyIterator The notification iterator. */ static void darwinUSBAttachNotification2(void *pvNotify, io_iterator_t NotifyIterator) { DARWIN_IOKIT_LOG(("USB Attach Notification2\n")); NOREF(pvNotify); //PDARWINUSBNOTIFY pNotify = (PDARWINUSBNOTIFY)pvNotify; darwinDrainIterator(NotifyIterator); } /** * Callback for the detach notifications. * * @param pvNotify Our data. * @param NotifyIterator The notification iterator. */ static void darwinUSBDetachNotification(void *pvNotify, io_iterator_t NotifyIterator) { DARWIN_IOKIT_LOG(("USB Detach Notification\n")); NOREF(pvNotify); //PDARWINUSBNOTIFY pNotify = (PDARWINUSBNOTIFY)pvNotify; darwinDrainIterator(NotifyIterator); } /** * Subscribes the run loop to USB notification events relevant to * device attach/detach. * * The source mode for these events is defined as VBOX_IOKIT_MODE_STRING * so that the caller can listen to events from this mode only and * re-evalutate the list of attached devices whenever an event arrives. * * @returns opaque for passing to the unsubscribe function. If NULL * something unexpectedly failed during subscription. */ void *DarwinSubscribeUSBNotifications(void) { AssertReturn(darwinOpenMasterPort(), NULL); PDARWINUSBNOTIFY pNotify = (PDARWINUSBNOTIFY)RTMemAllocZ(sizeof(*pNotify)); AssertReturn(pNotify, NULL); /* * Create the notification port, bake it into a runloop source which we * then add to our run loop. */ pNotify->NotifyPort = IONotificationPortCreate(g_MasterPort); Assert(pNotify->NotifyPort); if (pNotify->NotifyPort) { pNotify->NotifyRLSrc = IONotificationPortGetRunLoopSource(pNotify->NotifyPort); Assert(pNotify->NotifyRLSrc); if (pNotify->NotifyRLSrc) { CFRunLoopRef RunLoopRef = CFRunLoopGetCurrent(); CFRetain(RunLoopRef); /* Workaround for crash when cleaning up the TLS / runloop((sub)mode). See @bugref{2807}. */ CFRunLoopAddSource(RunLoopRef, pNotify->NotifyRLSrc, CFSTR(VBOX_IOKIT_MODE_STRING)); /* * Create the notification callbacks. */ kern_return_t rc = IOServiceAddMatchingNotification(pNotify->NotifyPort, kIOPublishNotification, IOServiceMatching(kIOUSBDeviceClassName), darwinUSBAttachNotification1, pNotify, &pNotify->AttachIterator); if (rc == KERN_SUCCESS) { darwinDrainIterator(pNotify->AttachIterator); rc = IOServiceAddMatchingNotification(pNotify->NotifyPort, kIOMatchedNotification, IOServiceMatching(kIOUSBDeviceClassName), darwinUSBAttachNotification2, pNotify, &pNotify->AttachIterator2); if (rc == KERN_SUCCESS) { darwinDrainIterator(pNotify->AttachIterator2); rc = IOServiceAddMatchingNotification(pNotify->NotifyPort, kIOTerminatedNotification, IOServiceMatching(kIOUSBDeviceClassName), darwinUSBDetachNotification, pNotify, &pNotify->DetachIterator); { darwinDrainIterator(pNotify->DetachIterator); return pNotify; } IOObjectRelease(pNotify->AttachIterator2); } IOObjectRelease(pNotify->AttachIterator); } CFRunLoopRemoveSource(RunLoopRef, pNotify->NotifyRLSrc, CFSTR(VBOX_IOKIT_MODE_STRING)); } IONotificationPortDestroy(pNotify->NotifyPort); } RTMemFree(pNotify); return NULL; } /** * Unsubscribe the run loop from USB notification subscribed to * by DarwinSubscribeUSBNotifications. * * @param pvOpaque The return value from DarwinSubscribeUSBNotifications. */ void DarwinUnsubscribeUSBNotifications(void *pvOpaque) { PDARWINUSBNOTIFY pNotify = (PDARWINUSBNOTIFY)pvOpaque; if (!pNotify) return; IOObjectRelease(pNotify->AttachIterator); pNotify->AttachIterator = NULL; IOObjectRelease(pNotify->AttachIterator2); pNotify->AttachIterator2 = NULL; IOObjectRelease(pNotify->DetachIterator); pNotify->DetachIterator = NULL; CFRunLoopRemoveSource(CFRunLoopGetCurrent(), pNotify->NotifyRLSrc, CFSTR(VBOX_IOKIT_MODE_STRING)); IONotificationPortDestroy(pNotify->NotifyPort); pNotify->NotifyRLSrc = NULL; pNotify->NotifyPort = NULL; RTMemFree(pNotify); } /** * Descends recursively into a IORegistry tree locating the first object of a given class. * * The search is performed depth first. * * @returns Object reference if found, NULL if not. * @param Object The current tree root. * @param pszClass The name of the class we're looking for. * @param pszNameBuf A scratch buffer for query the class name in to avoid * wasting 128 bytes on an io_name_t object for every recursion. */ static io_object_t darwinFindObjectByClass(io_object_t Object, const char *pszClass, io_name_t pszNameBuf) { io_iterator_t Children; kern_return_t krc = IORegistryEntryGetChildIterator(Object, kIOServicePlane, &Children); if (krc != KERN_SUCCESS) return NULL; io_object_t Child; while ((Child = IOIteratorNext(Children))) { krc = IOObjectGetClass(Child, pszNameBuf); if ( krc == KERN_SUCCESS && !strcmp(pszNameBuf, pszClass)) break; io_object_t GrandChild = darwinFindObjectByClass(Child, pszClass, pszNameBuf); IOObjectRelease(Child); if (GrandChild) { Child = GrandChild; break; } } IOObjectRelease(Children); return Child; } /** * Descends recursively into IOUSBMassStorageClass tree to check whether * the MSD is mounted or not. * * The current heuristic is to look for the IOMedia class. * * @returns true if mounted, false if not. * @param MSDObj The IOUSBMassStorageClass object. * @param pszNameBuf A scratch buffer for query the class name in to avoid * wasting 128 bytes on an io_name_t object for every recursion. */ static bool darwinIsMassStorageInterfaceInUse(io_object_t MSDObj, io_name_t pszNameBuf) { io_object_t MediaObj = darwinFindObjectByClass(MSDObj, "IOMedia", pszNameBuf); if (MediaObj) { /* more checks? */ IOObjectRelease(MediaObj); return true; } return false; } /** * Worker function for DarwinGetUSBDevices() that tries to figure out * what state the device is in and set enmState. * * This is mostly a matter of distinguishing between devices that nobody * uses, devices that can be seized and devices that cannot be grabbed. * * @param pCur The USB device data. * @param USBDevice The USB device object. * @param PropsRef The USB device properties. */ static void darwinDeterminUSBDeviceState(PUSBDEVICE pCur, io_object_t USBDevice, CFMutableDictionaryRef /* PropsRef */) { /* * Iterate the interfaces (among the children of the IOUSBDevice object). */ io_iterator_t Interfaces; kern_return_t krc = IORegistryEntryGetChildIterator(USBDevice, kIOServicePlane, &Interfaces); if (krc != KERN_SUCCESS) return; bool fHaveOwner = false; RTPROCESS Owner = NIL_RTPROCESS; bool fHaveClient = false; RTPROCESS Client = NIL_RTPROCESS; bool fUserClientOnly = true; bool fConfigured = false; bool fInUse = false; bool fSeizable = true; io_object_t Interface; while ((Interface = IOIteratorNext(Interfaces))) { io_name_t szName; krc = IOObjectGetClass(Interface, szName); if ( krc == KERN_SUCCESS && !strcmp(szName, "IOUSBInterface")) { fConfigured = true; /* * Iterate the interface children looking for stuff other than * IOUSBUserClientInit objects. */ io_iterator_t Children1; krc = IORegistryEntryGetChildIterator(Interface, kIOServicePlane, &Children1); if (krc == KERN_SUCCESS) { io_object_t Child1; while ((Child1 = IOIteratorNext(Children1))) { krc = IOObjectGetClass(Child1, szName); if ( krc == KERN_SUCCESS && strcmp(szName, "IOUSBUserClientInit")) { fUserClientOnly = false; if (!strcmp(szName, "IOUSBMassStorageClass")) { /* Only permit capturing MSDs that aren't mounted, at least until the GUI starts poping up warnings about data loss and such when capturing a busy device. */ fSeizable = false; fInUse |= darwinIsMassStorageInterfaceInUse(Child1, szName); } else if (!strcmp(szName, "IOUSBHIDDriver") || !strcmp(szName, "AppleHIDMouse") /** @todo more? */) { /* For now, just assume that all HID devices are inaccessible because of the greedy HID service. */ fSeizable = false; fInUse = true; } else fInUse = true; } IOObjectRelease(Child1); } IOObjectRelease(Children1); } } /* * Not an interface, could it be VBoxUSBDevice? * If it is, get the owner and client properties. */ else if ( krc == KERN_SUCCESS && !strcmp(szName, VBOXUSBDEVICE_CLASS_NAME)) { CFMutableDictionaryRef PropsRef = 0; krc = IORegistryEntryCreateCFProperties(Interface, &PropsRef, kCFAllocatorDefault, kNilOptions); if (krc == KERN_SUCCESS) { fHaveOwner = darwinDictGetProcess(PropsRef, CFSTR(VBOXUSB_OWNER_KEY), &Owner); fHaveClient = darwinDictGetProcess(PropsRef, CFSTR(VBOXUSB_CLIENT_KEY), &Client); CFRelease(PropsRef); } } IOObjectRelease(Interface); } IOObjectRelease(Interfaces); /* * Calc the status. */ if (fHaveOwner) { if (Owner == RTProcSelf()) pCur->enmState = !fHaveClient || Client == NIL_RTPROCESS || !Client ? USBDEVICESTATE_HELD_BY_PROXY : USBDEVICESTATE_USED_BY_GUEST; else pCur->enmState = USBDEVICESTATE_USED_BY_HOST; } else if (fUserClientOnly) /** @todo how to detect other user client?!? - Look for IOUSBUserClient! */ pCur->enmState = !fConfigured ? USBDEVICESTATE_UNUSED : USBDEVICESTATE_USED_BY_HOST_CAPTURABLE; else if (!fInUse) pCur->enmState = USBDEVICESTATE_UNUSED; else pCur->enmState = fSeizable ? USBDEVICESTATE_USED_BY_HOST_CAPTURABLE : USBDEVICESTATE_USED_BY_HOST; } /** * Enumerate the USB devices returning a FIFO of them. * * @returns Pointer to the head. * USBProxyService::freeDevice is expected to free each of the list elements. */ PUSBDEVICE DarwinGetUSBDevices(void) { AssertReturn(darwinOpenMasterPort(), NULL); //DARWIN_IOKIT_LOG(("DarwinGetUSBDevices\n")); /* * Create a matching dictionary for searching for USB Devices in the IOKit. */ CFMutableDictionaryRef RefMatchingDict = IOServiceMatching(kIOUSBDeviceClassName); AssertReturn(RefMatchingDict, NULL); /* * Perform the search and get a collection of USB Device back. */ io_iterator_t USBDevices = NULL; IOReturn rc = IOServiceGetMatchingServices(g_MasterPort, RefMatchingDict, &USBDevices); AssertMsgReturn(rc == kIOReturnSuccess, ("rc=%d\n", rc), NULL); RefMatchingDict = NULL; /* the reference is consumed by IOServiceGetMatchingServices. */ /* * Enumerate the USB Devices. */ PUSBDEVICE pHead = NULL; PUSBDEVICE pTail = NULL; unsigned i = 0; io_object_t USBDevice; while ((USBDevice = IOIteratorNext(USBDevices)) != 0) { DARWIN_IOKIT_DUMP_OBJ(USBDevice); /* * Query the device properties from the registry. * * We could alternatively use the device and such, but that will be * slower and we would have to resort to the registry for the three * string anyway. */ CFMutableDictionaryRef PropsRef = 0; kern_return_t krc = IORegistryEntryCreateCFProperties(USBDevice, &PropsRef, kCFAllocatorDefault, kNilOptions); if (krc == KERN_SUCCESS) { bool fOk = false; PUSBDEVICE pCur = (PUSBDEVICE)RTMemAllocZ(sizeof(*pCur)); do /* loop for breaking out of on failure. */ { AssertBreak(pCur); /* * Mandatory */ pCur->bcdUSB = 0; /* we've no idea. */ pCur->enmState = USBDEVICESTATE_USED_BY_HOST_CAPTURABLE; /* just a default, we'll try harder in a bit. */ AssertBreak(darwinDictGetU8(PropsRef, CFSTR(kUSBDeviceClass), &pCur->bDeviceClass)); /* skip hubs */ if (pCur->bDeviceClass == 0x09 /* hub, find a define! */) break; AssertBreak(darwinDictGetU8(PropsRef, CFSTR(kUSBDeviceSubClass), &pCur->bDeviceSubClass)); AssertBreak(darwinDictGetU8(PropsRef, CFSTR(kUSBDeviceProtocol), &pCur->bDeviceProtocol)); AssertBreak(darwinDictGetU16(PropsRef, CFSTR(kUSBVendorID), &pCur->idVendor)); AssertBreak(darwinDictGetU16(PropsRef, CFSTR(kUSBProductID), &pCur->idProduct)); AssertBreak(darwinDictGetU16(PropsRef, CFSTR(kUSBDeviceReleaseNumber), &pCur->bcdDevice)); uint32_t u32LocationId; AssertBreak(darwinDictGetU32(PropsRef, CFSTR(kUSBDevicePropertyLocationID), &u32LocationId)); uint64_t u64SessionId; AssertBreak(darwinDictGetU64(PropsRef, CFSTR("sessionID"), &u64SessionId)); char szAddress[64]; RTStrPrintf(szAddress, sizeof(szAddress), "p=0x%04RX16;v=0x%04RX16;s=0x%016RX64;l=0x%08RX32", pCur->idProduct, pCur->idVendor, u64SessionId, u32LocationId); pCur->pszAddress = RTStrDup(szAddress); AssertBreak(pCur->pszAddress); pCur->bBus = u32LocationId >> 24; AssertBreak(darwinDictGetU8(PropsRef, CFSTR("PortNum"), &pCur->bPort)); uint8_t bSpeed; AssertBreak(darwinDictGetU8(PropsRef, CFSTR(kUSBDevicePropertySpeed), &bSpeed)); Assert(bSpeed <= 2); pCur->enmSpeed = bSpeed == 2 ? USBDEVICESPEED_HIGH : bSpeed == 1 ? USBDEVICESPEED_FULL : bSpeed == 0 ? USBDEVICESPEED_LOW : USBDEVICESPEED_UNKNOWN; /* * Optional. * There are some nameless device in the iMac, apply names to them. */ darwinDictDupString(PropsRef, CFSTR("USB Vendor Name"), (char **)&pCur->pszManufacturer); if ( !pCur->pszManufacturer && pCur->idVendor == kIOUSBVendorIDAppleComputer) pCur->pszManufacturer = RTStrDup("Apple Computer, Inc."); darwinDictDupString(PropsRef, CFSTR("USB Product Name"), (char **)&pCur->pszProduct); if ( !pCur->pszProduct && pCur->bDeviceClass == 224 /* Wireless */ && pCur->bDeviceSubClass == 1 /* Radio Frequency */ && pCur->bDeviceProtocol == 1 /* Bluetooth */) pCur->pszProduct = RTStrDup("Bluetooth"); darwinDictDupString(PropsRef, CFSTR("USB Serial Number"), (char **)&pCur->pszSerialNumber); #if 0 /* leave the remainder as zero for now. */ /* * Create a plugin interface for the service and query its USB Device interface. */ SInt32 Score = 0; IOCFPlugInInterface **ppPlugInInterface = NULL; rc = IOCreatePlugInInterfaceForService(USBDevice, kIOUSBDeviceUserClientTypeID, kIOCFPlugInInterfaceID, &ppPlugInInterface, &Score); if (rc == kIOReturnSuccess) { IOUSBDeviceInterface245 **ppUSBDevI = NULL; HRESULT hrc = (*ppPlugInInterface)->QueryInterface(ppPlugInInterface, CFUUIDGetUUIDBytes(kIOUSBDeviceInterfaceID245), (LPVOID *)&ppUSBDevI); rc = IODestroyPlugInInterface(ppPlugInInterface); Assert(rc == kIOReturnSuccess); ppPlugInInterface = NULL; if (hrc == S_OK) { /** @todo enumerate configurations and interfaces if we actually need them. */ //IOReturn (*GetNumberOfConfigurations)(void *self, UInt8 *numConfig); //IOReturn (*GetConfigurationDescriptorPtr)(void *self, UInt8 configIndex, IOUSBConfigurationDescriptorPtr *desc); //IOReturn (*CreateInterfaceIterator)(void *self, IOUSBFindInterfaceRequest *req, io_iterator_t *iter); } long cReft = (*ppUSBDeviceInterface)->Release(ppUSBDeviceInterface); MY_CHECK_CREFS(cRefs); } #endif /* * Try determine the state. */ darwinDeterminUSBDeviceState(pCur, USBDevice, PropsRef); /* * We're good. Link the device. */ pCur->pPrev = pTail; if (pTail) pTail = pTail->pNext = pCur; else pTail = pHead = pCur; fOk = true; } while (0); /* cleanup on failure / skipped device. */ if (!fOk && pCur) DarwinFreeUSBDeviceFromIOKit(pCur); CFRelease(PropsRef); } else AssertMsgFailed(("krc=%#x\n", krc)); IOObjectRelease(USBDevice); i++; } IOObjectRelease(USBDevices); //DARWIN_IOKIT_LOG_FLUSH(); /* * Some post processing. There are a couple of things we have to * make 100% sure about, and that is that the (Apple) keyboard * and mouse most likely to be in use by the user aren't available * for capturing. If there is no Apple mouse or keyboard we'll * take the first one from another vendor. */ /* As it turns out, the HID service will take all keyboards and mice and we're not currently able to seize them. */ PUSBDEVICE pMouse = NULL; PUSBDEVICE pKeyboard = NULL; for (PUSBDEVICE pCur = pHead; pCur; pCur = pCur->pNext) if (pCur->idVendor == kIOUSBVendorIDAppleComputer) { /* * This test is a bit rough, should check device class/protocol but * we don't have interface info yet so that might be a bit tricky. */ if ( ( !pKeyboard || pKeyboard->idVendor != kIOUSBVendorIDAppleComputer) && pCur->pszProduct && strstr(pCur->pszProduct, " Keyboard")) pKeyboard = pCur; else if ( ( !pMouse || pMouse->idVendor != kIOUSBVendorIDAppleComputer) && pCur->pszProduct && strstr(pCur->pszProduct, " Mouse") ) pMouse = pCur; } else if (!pKeyboard || !pMouse) { if ( pCur->bDeviceClass == 3 /* HID */ && pCur->bDeviceProtocol == 1 /* Keyboard */) pKeyboard = pCur; else if ( pCur->bDeviceClass == 3 /* HID */ && pCur->bDeviceProtocol == 2 /* Mouse */) pMouse = pCur; /** @todo examin interfaces */ } if (pKeyboard) pKeyboard->enmState = USBDEVICESTATE_USED_BY_HOST; if (pMouse) pMouse->enmState = USBDEVICESTATE_USED_BY_HOST; return pHead; } /** * Triggers re-enumeration of a device. * * @returns VBox status code. * @param pCur The USBDEVICE structure for the device. */ int DarwinReEnumerateUSBDevice(PCUSBDEVICE pCur) { int vrc; const char *pszAddress = pCur->pszAddress; AssertPtrReturn(pszAddress, VERR_INVALID_POINTER); AssertReturn(darwinOpenMasterPort(), VERR_GENERAL_FAILURE); /* * This code is a short version of the Open method in USBProxyDevice-darwin.cpp stuff. * Fixes made to this code probably applies there too! */ CFMutableDictionaryRef RefMatchingDict = IOServiceMatching(kIOUSBDeviceClassName); AssertReturn(RefMatchingDict, NULL); uint64_t u64SessionId = 0; uint32_t u32LocationId = 0; const char *psz = pszAddress; do { const char chValue = *psz; AssertReleaseReturn(psz[1] == '=', VERR_INTERNAL_ERROR); uint64_t u64Value; int rc = RTStrToUInt64Ex(psz + 2, (char **)&psz, 0, &u64Value); AssertReleaseRCReturn(rc, rc); AssertReleaseReturn(!*psz || *psz == ';', rc); switch (chValue) { case 'l': u32LocationId = (uint32_t)u64Value; break; case 's': u64SessionId = u64Value; break; case 'p': case 'v': { #if 0 /* Guess what, this doesn't 'ing work either! */ SInt32 i32 = (int16_t)u64Value; CFNumberRef Num = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &i32); AssertBreak(Num); CFDictionarySetValue(RefMatchingDict, chValue == 'p' ? CFSTR(kUSBProductID) : CFSTR(kUSBVendorID), Num); CFRelease(Num); #endif break; } default: AssertReleaseMsgFailedReturn(("chValue=%#x\n", chValue), VERR_INTERNAL_ERROR); } if (*psz == ';') psz++; } while (*psz); io_iterator_t USBDevices = NULL; IOReturn irc = IOServiceGetMatchingServices(g_MasterPort, RefMatchingDict, &USBDevices); AssertMsgReturn(irc == kIOReturnSuccess, ("irc=%#x\n", irc), NULL); RefMatchingDict = NULL; /* the reference is consumed by IOServiceGetMatchingServices. */ unsigned cMatches = 0; io_object_t USBDevice; while ((USBDevice = IOIteratorNext(USBDevices))) { cMatches++; CFMutableDictionaryRef PropsRef = 0; kern_return_t krc = IORegistryEntryCreateCFProperties(USBDevice, &PropsRef, kCFAllocatorDefault, kNilOptions); if (krc == KERN_SUCCESS) { uint64_t u64CurSessionId; uint32_t u32CurLocationId; if ( ( !u64SessionId || ( darwinDictGetU64(PropsRef, CFSTR("sessionID"), &u64CurSessionId) && u64CurSessionId == u64SessionId)) && ( !u32LocationId || ( darwinDictGetU32(PropsRef, CFSTR(kUSBDevicePropertyLocationID), &u32CurLocationId) && u32CurLocationId == u32LocationId)) ) { CFRelease(PropsRef); break; } CFRelease(PropsRef); } IOObjectRelease(USBDevice); } IOObjectRelease(USBDevices); USBDevices = NULL; if (!USBDevice) { LogRel(("USB: Device '%s' not found (%d pid+vid matches)\n", pszAddress, cMatches)); IOObjectRelease(USBDevices); return VERR_VUSB_DEVICE_NAME_NOT_FOUND; } /* * Create a plugin interface for the device and query its IOUSBDeviceInterface. */ SInt32 Score = 0; IOCFPlugInInterface **ppPlugInInterface = NULL; irc = IOCreatePlugInInterfaceForService(USBDevice, kIOUSBDeviceUserClientTypeID, kIOCFPlugInInterfaceID, &ppPlugInInterface, &Score); if (irc == kIOReturnSuccess) { IOUSBDeviceInterface245 **ppDevI = NULL; HRESULT hrc = (*ppPlugInInterface)->QueryInterface(ppPlugInInterface, CFUUIDGetUUIDBytes(kIOUSBDeviceInterfaceID245), (LPVOID *)&ppDevI); irc = IODestroyPlugInInterface(ppPlugInInterface); Assert(irc == kIOReturnSuccess); ppPlugInInterface = NULL; if (hrc == S_OK) { /* * Try open the device for exclusive access. */ irc = (*ppDevI)->USBDeviceOpenSeize(ppDevI); if (irc == kIOReturnExclusiveAccess) { RTThreadSleep(20); irc = (*ppDevI)->USBDeviceOpenSeize(ppDevI); } if (irc == kIOReturnSuccess) { /* * Re-enumerate the device and bail out. */ irc = (*ppDevI)->USBDeviceReEnumerate(ppDevI, 0); if (irc == kIOReturnSuccess) vrc = VINF_SUCCESS; else { LogRel(("USB: Failed to open device '%s', plug-in creation failed with irc=%#x.\n", pszAddress, irc)); vrc = RTErrConvertFromDarwinIO(irc); } (*ppDevI)->USBDeviceClose(ppDevI); } else if (irc == kIOReturnExclusiveAccess) { LogRel(("USB: Device '%s' is being used by another process\n", pszAddress)); vrc = VERR_SHARING_VIOLATION; } else { LogRel(("USB: Failed to open device '%s', irc=%#x.\n", pszAddress, irc)); vrc = VERR_OPEN_FAILED; } } else { LogRel(("USB: Failed to create plugin interface for device '%s', hrc=%#x.\n", pszAddress, hrc)); vrc = VERR_OPEN_FAILED; } (*ppDevI)->Release(ppDevI); } else { LogRel(("USB: Failed to open device '%s', plug-in creation failed with irc=%#x.\n", pszAddress, irc)); vrc = RTErrConvertFromDarwinIO(irc); } return vrc; } #endif /* VBOX_WITH_USB */ /** * Enumerate the DVD drives returning a FIFO of device name strings. * * @returns Pointer to the head. * The caller is responsible for calling RTMemFree() on each of the nodes. */ PDARWINDVD DarwinGetDVDDrives(void) { AssertReturn(darwinOpenMasterPort(), NULL); /* * Create a matching dictionary for searching for DVD services in the IOKit. * * [If I understand this correctly, plain CDROMs doesn't show up as * IODVDServices. Too keep things simple, we will only support DVDs * until somebody complains about it and we get hardware to test it on. * (Unless I'm much mistaken, there aren't any (orignal) intel macs with * plain cdroms.)] */ CFMutableDictionaryRef RefMatchingDict = IOServiceMatching("IODVDServices"); AssertReturn(RefMatchingDict, NULL); /* * Perform the search and get a collection of DVD services. */ io_iterator_t DVDServices = NULL; IOReturn rc = IOServiceGetMatchingServices(g_MasterPort, RefMatchingDict, &DVDServices); AssertMsgReturn(rc == kIOReturnSuccess, ("rc=%d\n", rc), NULL); RefMatchingDict = NULL; /* the reference is consumed by IOServiceGetMatchingServices. */ /* * Enumerate the DVD services. * (This enumeration must be identical to the one performed in DrvHostBase.cpp.) */ PDARWINDVD pHead = NULL; PDARWINDVD pTail = NULL; unsigned i = 0; io_object_t DVDService; while ((DVDService = IOIteratorNext(DVDServices)) != 0) { DARWIN_IOKIT_DUMP_OBJ(DVDService); /* * Get the properties we use to identify the DVD drive. * * While there is a (weird 12 byte) GUID, it isn't persistent * across boots. So, we have to use a combination of the * vendor name and product name properties with an optional * sequence number for identification. */ CFMutableDictionaryRef PropsRef = 0; kern_return_t krc = IORegistryEntryCreateCFProperties(DVDService, &PropsRef, kCFAllocatorDefault, kNilOptions); if (krc == KERN_SUCCESS) { /* Get the Device Characteristics dictionary. */ CFDictionaryRef DevCharRef = (CFDictionaryRef)CFDictionaryGetValue(PropsRef, CFSTR(kIOPropertyDeviceCharacteristicsKey)); if (DevCharRef) { /* The vendor name. */ char szVendor[128]; char *pszVendor = &szVendor[0]; CFTypeRef ValueRef = CFDictionaryGetValue(DevCharRef, CFSTR(kIOPropertyVendorNameKey)); if ( ValueRef && CFGetTypeID(ValueRef) == CFStringGetTypeID() && CFStringGetCString((CFStringRef)ValueRef, szVendor, sizeof(szVendor), kCFStringEncodingUTF8)) pszVendor = RTStrStrip(szVendor); else *pszVendor = '\0'; /* The product name. */ char szProduct[128]; char *pszProduct = &szProduct[0]; ValueRef = CFDictionaryGetValue(DevCharRef, CFSTR(kIOPropertyProductNameKey)); if ( ValueRef && CFGetTypeID(ValueRef) == CFStringGetTypeID() && CFStringGetCString((CFStringRef)ValueRef, szProduct, sizeof(szProduct), kCFStringEncodingUTF8)) pszProduct = RTStrStrip(szProduct); else *pszProduct = '\0'; /* Construct the name and check for duplicates. */ char szName[256 + 32]; if (*pszVendor || *pszProduct) { if (*pszVendor && *pszProduct) RTStrPrintf(szName, sizeof(szName), "%s %s", pszVendor, pszProduct); else strcpy(szName, *pszVendor ? pszVendor : pszProduct); for (PDARWINDVD pCur = pHead; pCur; pCur = pCur->pNext) { if (!strcmp(szName, pCur->szName)) { if (*pszVendor && *pszProduct) RTStrPrintf(szName, sizeof(szName), "%s %s (#%u)", pszVendor, pszProduct, i); else RTStrPrintf(szName, sizeof(szName), "%s %s (#%u)", *pszVendor ? pszVendor : pszProduct, i); break; } } } else RTStrPrintf(szName, sizeof(szName), "(#%u)", i); /* Create the device. */ size_t cbName = strlen(szName) + 1; PDARWINDVD pNew = (PDARWINDVD)RTMemAlloc(RT_OFFSETOF(DARWINDVD, szName[cbName])); if (pNew) { pNew->pNext = NULL; memcpy(pNew->szName, szName, cbName); if (pTail) pTail = pTail->pNext = pNew; else pTail = pHead = pNew; } } CFRelease(PropsRef); } else AssertMsgFailed(("krc=%#x\n", krc)); IOObjectRelease(DVDService); i++; } IOObjectRelease(DVDServices); return pHead; } /** * Enumerate the ethernet capable network devices returning a FIFO of them. * * @returns Pointer to the head. */ PDARWINETHERNIC DarwinGetEthernetControllers(void) { AssertReturn(darwinOpenMasterPort(), NULL); /* * Create a matching dictionary for searching for ethernet controller * services in the IOKit. * * For some really stupid reason I don't get all the controllers if I look for * objects that are instances of IOEthernetController or its descendants (only * get the AirPort on my mac pro). But fortunately using IOEthernetInterface * seems to work. Weird s**t! */ //CFMutableDictionaryRef RefMatchingDict = IOServiceMatching("IOEthernetController"); - this doesn't work :-( CFMutableDictionaryRef RefMatchingDict = IOServiceMatching("IOEthernetInterface"); AssertReturn(RefMatchingDict, NULL); /* * Perform the search and get a collection of ethernet controller services. */ io_iterator_t EtherIfServices = NULL; IOReturn rc = IOServiceGetMatchingServices(g_MasterPort, RefMatchingDict, &EtherIfServices); AssertMsgReturn(rc == kIOReturnSuccess, ("rc=%d\n", rc), NULL); RefMatchingDict = NULL; /* the reference is consumed by IOServiceGetMatchingServices. */ /* * Get a copy of the current network interfaces from the system configuration service. * We'll use this for looking up the proper interface names. */ CFArrayRef IfsRef = SCNetworkInterfaceCopyAll(); CFIndex cIfs = IfsRef ? CFArrayGetCount(IfsRef) : 0; /* * Get the current preferences and make a copy of the network services so we * can look up the right interface names. The IfsRef is just for fallback. */ CFArrayRef ServicesRef = NULL; CFIndex cServices = 0; SCPreferencesRef PrefsRef = SCPreferencesCreate(kCFAllocatorDefault, CFSTR("org.virtualbox.VBoxSVC"), NULL); if (PrefsRef) { SCNetworkSetRef SetRef = SCNetworkSetCopyCurrent(PrefsRef); CFRelease(PrefsRef); if (SetRef) { ServicesRef = SCNetworkSetCopyServices(SetRef); CFRelease(SetRef); cServices = ServicesRef ? CFArrayGetCount(ServicesRef) : 0; } } /* * Enumerate the ethernet controller services. */ PDARWINETHERNIC pHead = NULL; PDARWINETHERNIC pTail = NULL; io_object_t EtherIfService; while ((EtherIfService = IOIteratorNext(EtherIfServices)) != 0) { /* * Dig up the parent, meaning the IOEthernetController. */ io_object_t EtherNICService; kern_return_t krc = IORegistryEntryGetParentEntry(EtherIfService, kIOServicePlane, &EtherNICService); /*krc = IORegistryEntryGetChildEntry(EtherNICService, kIOServicePlane, &EtherIfService); */ if (krc == KERN_SUCCESS) { DARWIN_IOKIT_DUMP_OBJ(EtherNICService); /* * Get the properties we use to identify and name the Ethernet NIC. * We need the both the IOEthernetController and it's IONetworkInterface child. */ CFMutableDictionaryRef PropsRef = 0; krc = IORegistryEntryCreateCFProperties(EtherNICService, &PropsRef, kCFAllocatorDefault, kNilOptions); if (krc == KERN_SUCCESS) { CFMutableDictionaryRef IfPropsRef = 0; krc = IORegistryEntryCreateCFProperties(EtherIfService, &IfPropsRef, kCFAllocatorDefault, kNilOptions); if (krc == KERN_SUCCESS) { /* * Gather the required data. * We'll create a UUID from the MAC address and the BSD name. */ char szTmp[256]; do { /* Check if airport (a bit heuristical - it's com.apple.driver.AirPortBrcm43xx here). */ darwinDictGetString(PropsRef, CFSTR("CFBundleIdentifier"), szTmp, sizeof(szTmp)); bool fWireless; bool fAirPort = fWireless = strstr(szTmp, ".AirPort") != NULL; /* Check if it's USB. */ darwinDictGetString(PropsRef, CFSTR("IOProviderClass"), szTmp, sizeof(szTmp)); bool fUSB = strstr(szTmp, "USB") != NULL; /* Is it builtin? */ bool fBuiltin; darwinDictGetBool(IfPropsRef, CFSTR("IOBuiltin"), &fBuiltin); /* Is it the primary interface */ bool fPrimaryIf; darwinDictGetBool(IfPropsRef, CFSTR("IOPrimaryInterface"), &fPrimaryIf); /* Get the MAC address. */ RTMAC Mac; AssertBreak(darwinDictGetData(PropsRef, CFSTR("IOMACAddress"), &Mac, sizeof(Mac))); /* The BSD Name from the interface dictionary. */ char szBSDName[RT_SIZEOFMEMB(DARWINETHERNIC, szBSDName)]; AssertBreak(darwinDictGetString(IfPropsRef, CFSTR("BSD Name"), szBSDName, sizeof(szBSDName))); /* Check if it's really wireless. */ if ( darwinDictIsPresent(IfPropsRef, CFSTR("IO80211CountryCode")) || darwinDictIsPresent(IfPropsRef, CFSTR("IO80211DriverVersion")) || darwinDictIsPresent(IfPropsRef, CFSTR("IO80211HardwareVersion")) || darwinDictIsPresent(IfPropsRef, CFSTR("IO80211Locale"))) fWireless = true; else fAirPort = fWireless = false; /** @todo IOPacketFilters / IONetworkFilterGroup? */ /* * Create the interface name. * * Note! The ConsoleImpl2.cpp code ASSUMES things about the name. It is also * stored in the VM config files. (really bright idea) */ strcpy(szTmp, szBSDName); char *psz = strchr(szTmp, '\0'); *psz++ = ':'; *psz++ = ' '; size_t cchLeft = sizeof(szTmp) - (psz - &szTmp[0]) - (sizeof(" (Wireless)") - 1); bool fFound = false; CFIndex i; /* look it up among the current services */ for (i = 0; i < cServices; i++) { SCNetworkServiceRef ServiceRef = (SCNetworkServiceRef)CFArrayGetValueAtIndex(ServicesRef, i); SCNetworkInterfaceRef IfRef = SCNetworkServiceGetInterface(ServiceRef); if (IfRef) { CFStringRef BSDNameRef = SCNetworkInterfaceGetBSDName(IfRef); if ( BSDNameRef && CFStringGetCString(BSDNameRef, psz, cchLeft, kCFStringEncodingUTF8) && !strcmp(psz, szBSDName)) { CFStringRef ServiceNameRef = SCNetworkServiceGetName(ServiceRef); if ( ServiceNameRef && CFStringGetCString(ServiceNameRef, psz, cchLeft, kCFStringEncodingUTF8)) { fFound = true; break; } } } } /* Look it up in the interface list. */ if (!fFound) for (i = 0; i < cIfs; i++) { SCNetworkInterfaceRef IfRef = (SCNetworkInterfaceRef)CFArrayGetValueAtIndex(IfsRef, i); CFStringRef BSDNameRef = SCNetworkInterfaceGetBSDName(IfRef); if ( BSDNameRef && CFStringGetCString(BSDNameRef, psz, cchLeft, kCFStringEncodingUTF8) && !strcmp(psz, szBSDName)) { CFStringRef DisplayNameRef = SCNetworkInterfaceGetLocalizedDisplayName(IfRef); if ( DisplayNameRef && CFStringGetCString(DisplayNameRef, psz, cchLeft, kCFStringEncodingUTF8)) { fFound = true; break; } } } /* Generate a half plausible name if we for some silly reason didn't find the interface. */ if (!fFound) RTStrPrintf(szTmp, sizeof(szTmp), "%s: %s%s(?)", szBSDName, fUSB ? "USB " : "", fWireless ? fAirPort ? "AirPort " : "Wireless" : "Ethernet"); /* If we did find it and it's wireless but without "AirPort" or "Wireless", fix it */ else if ( fWireless && !strstr(psz, "AirPort") && !strstr(psz, "Wireless")) strcat(szTmp, fAirPort ? " (AirPort)" : " (Wireless)"); /* * Create the list entry. */ DARWIN_IOKIT_LOG(("Found: if=%s mac=%.6Rhxs fWireless=%RTbool fAirPort=%RTbool fBuiltin=%RTbool fPrimaryIf=%RTbool fUSB=%RTbool\n", szBSDName, &Mac, fWireless, fAirPort, fBuiltin, fPrimaryIf, fUSB)); size_t cchName = strlen(szTmp); PDARWINETHERNIC pNew = (PDARWINETHERNIC)RTMemAlloc(RT_OFFSETOF(DARWINETHERNIC, szName[cchName + 1])); if (pNew) { strncpy(pNew->szBSDName, szBSDName, sizeof(pNew->szBSDName)); /* the '\0' padding is intentional! */ RTUuidClear(&pNew->Uuid); memcpy(&pNew->Uuid, pNew->szBSDName, RT_MIN(sizeof(pNew->szBSDName), sizeof(pNew->Uuid))); pNew->Uuid.Gen.u8ClockSeqHiAndReserved = (pNew->Uuid.Gen.u8ClockSeqHiAndReserved & 0x3f) | 0x80; pNew->Uuid.Gen.u16TimeHiAndVersion = (pNew->Uuid.Gen.u16TimeHiAndVersion & 0x0fff) | 0x4000; pNew->Uuid.Gen.au8Node[0] = Mac.au8[0]; pNew->Uuid.Gen.au8Node[1] = Mac.au8[1]; pNew->Uuid.Gen.au8Node[2] = Mac.au8[2]; pNew->Uuid.Gen.au8Node[3] = Mac.au8[3]; pNew->Uuid.Gen.au8Node[4] = Mac.au8[4]; pNew->Uuid.Gen.au8Node[5] = Mac.au8[5]; pNew->Mac = Mac; pNew->fWireless = fWireless; pNew->fAirPort = fAirPort; pNew->fBuiltin = fBuiltin; pNew->fUSB = fUSB; pNew->fPrimaryIf = fPrimaryIf; memcpy(pNew->szName, szTmp, cchName + 1); /* * Link it into the list, keep the list sorted by fPrimaryIf and the BSD name. */ if (pTail) { PDARWINETHERNIC pPrev = pTail; if (strcmp(pNew->szBSDName, pPrev->szBSDName) < 0) { pPrev = NULL; for (PDARWINETHERNIC pCur = pHead; pCur; pPrev = pCur, pCur = pCur->pNext) if ( (int)pNew->fPrimaryIf - (int)pCur->fPrimaryIf > 0 || ( (int)pNew->fPrimaryIf - (int)pCur->fPrimaryIf == 0 && strcmp(pNew->szBSDName, pCur->szBSDName) >= 0)) break; } if (pPrev) { /* tail or in list. */ pNew->pNext = pPrev->pNext; pPrev->pNext = pNew; if (pPrev == pTail) pTail = pNew; } else { /* head */ pNew->pNext = pHead; pHead = pNew; } } else { /* empty list */ pNew->pNext = NULL; pTail = pHead = pNew; } } } while (0); CFRelease(IfPropsRef); } CFRelease(PropsRef); } IOObjectRelease(EtherNICService); } else AssertMsgFailed(("krc=%#x\n", krc)); IOObjectRelease(EtherIfService); } IOObjectRelease(EtherIfServices); if (ServicesRef) CFRelease(ServicesRef); if (IfsRef) CFRelease(IfsRef); return pHead; } #ifdef STANDALONE_TESTCASE /** * This file can optionally be compiled into a testcase, this is the main function. * To build: * g++ -I ../../../../include -D IN_RING3 iokit.cpp ../../../../out/darwin.x86/debug/lib/RuntimeR3.a ../../../../out/darwin.x86/debug/lib/SUPR3.a ../../../../out/darwin.x86/debug/lib/RuntimeR3.a ../../../../out/darwin.x86/debug/lib/VBox-kStuff.a ../../../../out/darwin.x86/debug/lib/RuntimeR3.a -framework CoreFoundation -framework IOKit -framework SystemConfiguration -liconv -D STANDALONE_TESTCASE -o iokit -g && ./iokit */ int main(int argc, char **argv) { RTR3InitExe(argc, &argv, 0); if (1) { /* * Network preferences. */ RTPrintf("Preferences: Network Services\n"); SCPreferencesRef PrefsRef = SCPreferencesCreate(kCFAllocatorDefault, CFSTR("org.virtualbox.VBoxSVC"), NULL); if (PrefsRef) { CFDictionaryRef NetworkServiceRef = (CFDictionaryRef)SCPreferencesGetValue(PrefsRef, kSCPrefNetworkServices); darwinDumpDict(NetworkServiceRef, 4); CFRelease(PrefsRef); } } if (1) { /* * Network services interfaces in the current config. */ RTPrintf("Preferences: Network Service Interfaces\n"); SCPreferencesRef PrefsRef = SCPreferencesCreate(kCFAllocatorDefault, CFSTR("org.virtualbox.VBoxSVC"), NULL); if (PrefsRef) { SCNetworkSetRef SetRef = SCNetworkSetCopyCurrent(PrefsRef); if (SetRef) { CFArrayRef ServicesRef = SCNetworkSetCopyServices(SetRef); CFIndex cServices = CFArrayGetCount(ServicesRef); for (CFIndex i = 0; i < cServices; i++) { SCNetworkServiceRef ServiceRef = (SCNetworkServiceRef)CFArrayGetValueAtIndex(ServicesRef, i); char szServiceName[128] = {0}; CFStringGetCString(SCNetworkServiceGetName(ServiceRef), szServiceName, sizeof(szServiceName), kCFStringEncodingUTF8); SCNetworkInterfaceRef IfRef = SCNetworkServiceGetInterface(ServiceRef); char szBSDName[16] = {0}; if (SCNetworkInterfaceGetBSDName(IfRef)) CFStringGetCString(SCNetworkInterfaceGetBSDName(IfRef), szBSDName, sizeof(szBSDName), kCFStringEncodingUTF8); char szDisplayName[128] = {0}; if (SCNetworkInterfaceGetLocalizedDisplayName(IfRef)) CFStringGetCString(SCNetworkInterfaceGetLocalizedDisplayName(IfRef), szDisplayName, sizeof(szDisplayName), kCFStringEncodingUTF8); RTPrintf(" #%u ServiceName=\"%s\" IfBSDName=\"%s\" IfDisplayName=\"%s\"\n", i, szServiceName, szBSDName, szDisplayName); } CFRelease(ServicesRef); CFRelease(SetRef); } CFRelease(PrefsRef); } } if (1) { /* * Network interfaces. */ RTPrintf("Preferences: Network Interfaces\n"); CFArrayRef IfsRef = SCNetworkInterfaceCopyAll(); if (IfsRef) { CFIndex cIfs = CFArrayGetCount(IfsRef); for (CFIndex i = 0; i < cIfs; i++) { SCNetworkInterfaceRef IfRef = (SCNetworkInterfaceRef)CFArrayGetValueAtIndex(IfsRef, i); char szBSDName[16] = {0}; if (SCNetworkInterfaceGetBSDName(IfRef)) CFStringGetCString(SCNetworkInterfaceGetBSDName(IfRef), szBSDName, sizeof(szBSDName), kCFStringEncodingUTF8); char szDisplayName[128] = {0}; if (SCNetworkInterfaceGetLocalizedDisplayName(IfRef)) CFStringGetCString(SCNetworkInterfaceGetLocalizedDisplayName(IfRef), szDisplayName, sizeof(szDisplayName), kCFStringEncodingUTF8); RTPrintf(" #%u BSDName=\"%s\" DisplayName=\"%s\"\n", i, szBSDName, szDisplayName); } CFRelease(IfsRef); } } if (1) { /* * Get and display the ethernet controllers. */ RTPrintf("Ethernet controllers:\n"); PDARWINETHERNIC pEtherNICs = DarwinGetEthernetControllers(); for (PDARWINETHERNIC pCur = pEtherNICs; pCur; pCur = pCur->pNext) { RTPrintf("%s\n", pCur->szName); RTPrintf(" szBSDName=%s\n", pCur->szBSDName); RTPrintf(" UUID=%RTuuid\n", &pCur->Uuid); RTPrintf(" Mac=%.6Rhxs\n", &pCur->Mac); RTPrintf(" fWireless=%RTbool\n", pCur->fWireless); RTPrintf(" fAirPort=%RTbool\n", pCur->fAirPort); RTPrintf(" fBuiltin=%RTbool\n", pCur->fBuiltin); RTPrintf(" fUSB=%RTbool\n", pCur->fUSB); RTPrintf(" fPrimaryIf=%RTbool\n", pCur->fPrimaryIf); } } return 0; } #endif
gpl-2.0
bogzybodo/DarkBlade-Core
dep/mysqllite/mysys/my_winfile.c
2
17816
/* Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ /* The purpose of this file is to provide implementation of file IO routines on Windows that can be thought as drop-in replacement for corresponding C runtime functionality. Compared to Windows CRT, this one - does not have the same file descriptor limitation (default is 16384 and can be increased further, whereas CRT poses a hard limit of 2048 file descriptors) - the file operations are not serialized - positional IO pread/pwrite is ported here. - no text mode for files, all IO is "binary" Naming convention: All routines are prefixed with my_win_, e.g Posix open() is implemented with my_win_open() Implemented are - POSIX routines(e.g open, read, lseek ...) - Some ANSI C stream routines (fopen, fdopen, fileno, fclose) - Windows CRT equvalients (my_get_osfhandle, open_osfhandle) Worth to note: - File descriptors used here are located in a range that is not compatible with CRT on purpose. Attempt to use a file descriptor from Windows CRT library range in my_win_* function will be punished with DBUG_ASSERT() - File streams (FILE *) are actually from the C runtime. The routines provided here are useful only in scernarios that use low-level IO with my_win_fileno() */ #ifdef _WIN32 #include "mysys_priv.h" #include <share.h> #include <sys/stat.h> /* Associates a file descriptor with an existing operating-system file handle.*/ File my_open_osfhandle(HANDLE handle, int oflag) { int offset= -1; uint i; DBUG_ENTER("my_open_osfhandle"); mysql_mutex_lock(&THR_LOCK_open); for (i= MY_FILE_MIN; i < my_file_limit;i++) { if (my_file_info[i].fhandle == 0) { struct st_my_file_info *finfo= &(my_file_info[i]); finfo->type= FILE_BY_OPEN; finfo->fhandle= handle; finfo->oflag= oflag; offset= i; break; } } mysql_mutex_unlock(&THR_LOCK_open); if (offset == -1) errno= EMFILE; /* to many file handles open */ DBUG_RETURN(offset); } static void invalidate_fd(File fd) { DBUG_ENTER("invalidate_fd"); DBUG_ASSERT(fd >= MY_FILE_MIN && fd < (int)my_file_limit); my_file_info[fd].fhandle= 0; DBUG_VOID_RETURN; } /* Get Windows handle for a file descriptor */ HANDLE my_get_osfhandle(File fd) { DBUG_ENTER("my_get_osfhandle"); DBUG_ASSERT(fd >= MY_FILE_MIN && fd < (int)my_file_limit); DBUG_RETURN(my_file_info[fd].fhandle); } static int my_get_open_flags(File fd) { DBUG_ENTER("my_get_open_flags"); DBUG_ASSERT(fd >= MY_FILE_MIN && fd < (int)my_file_limit); DBUG_RETURN(my_file_info[fd].oflag); } /* Open a file with sharing. Similar to _sopen() from libc, but allows managing share delete on win32 SYNOPSIS my_win_sopen() path file name oflag operation flags shflag share flag pmode permission flags RETURN VALUE File descriptor of opened file if success -1 and sets errno if fails. */ File my_win_sopen(const char *path, int oflag, int shflag, int pmode) { int fh; /* handle of opened file */ int mask; HANDLE osfh; /* OS handle of opened file */ DWORD fileaccess; /* OS file access (requested) */ DWORD fileshare; /* OS file sharing mode */ DWORD filecreate; /* OS method of opening/creating */ DWORD fileattrib; /* OS file attribute flags */ SECURITY_ATTRIBUTES SecurityAttributes; DBUG_ENTER("my_win_sopen"); if (check_if_legal_filename(path)) { errno= EACCES; DBUG_RETURN(-1); } SecurityAttributes.nLength= sizeof(SecurityAttributes); SecurityAttributes.lpSecurityDescriptor= NULL; SecurityAttributes.bInheritHandle= !(oflag & _O_NOINHERIT); /* decode the access flags */ switch (oflag & (_O_RDONLY | _O_WRONLY | _O_RDWR)) { case _O_RDONLY: /* read access */ fileaccess= GENERIC_READ; break; case _O_WRONLY: /* write access */ fileaccess= GENERIC_WRITE; break; case _O_RDWR: /* read and write access */ fileaccess= GENERIC_READ | GENERIC_WRITE; break; default: /* error, bad oflag */ errno= EINVAL; DBUG_RETURN(-1); } /* decode sharing flags */ switch (shflag) { case _SH_DENYRW: /* exclusive access except delete */ fileshare= FILE_SHARE_DELETE; break; case _SH_DENYWR: /* share read and delete access */ fileshare= FILE_SHARE_READ | FILE_SHARE_DELETE; break; case _SH_DENYRD: /* share write and delete access */ fileshare= FILE_SHARE_WRITE | FILE_SHARE_DELETE; break; case _SH_DENYNO: /* share read, write and delete access */ fileshare= FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE; break; case _SH_DENYRWD: /* exclusive access */ fileshare= 0L; break; case _SH_DENYWRD: /* share read access */ fileshare= FILE_SHARE_READ; break; case _SH_DENYRDD: /* share write access */ fileshare= FILE_SHARE_WRITE; break; case _SH_DENYDEL: /* share read and write access */ fileshare= FILE_SHARE_READ | FILE_SHARE_WRITE; break; default: /* error, bad shflag */ errno= EINVAL; DBUG_RETURN(-1); } /* decode open/create method flags */ switch (oflag & (_O_CREAT | _O_EXCL | _O_TRUNC)) { case 0: case _O_EXCL: /* ignore EXCL w/o CREAT */ filecreate= OPEN_EXISTING; break; case _O_CREAT: filecreate= OPEN_ALWAYS; break; case _O_CREAT | _O_EXCL: case _O_CREAT | _O_TRUNC | _O_EXCL: filecreate= CREATE_NEW; break; case _O_TRUNC: case _O_TRUNC | _O_EXCL: /* ignore EXCL w/o CREAT */ filecreate= TRUNCATE_EXISTING; break; case _O_CREAT | _O_TRUNC: filecreate= CREATE_ALWAYS; break; default: /* this can't happen ... all cases are covered */ errno= EINVAL; DBUG_RETURN(-1); } /* decode file attribute flags if _O_CREAT was specified */ fileattrib= FILE_ATTRIBUTE_NORMAL; /* default */ if (oflag & _O_CREAT) { _umask((mask= _umask(0))); if (!((pmode & ~mask) & _S_IWRITE)) fileattrib= FILE_ATTRIBUTE_READONLY; } /* Set temporary file (delete-on-close) attribute if requested. */ if (oflag & _O_TEMPORARY) { fileattrib|= FILE_FLAG_DELETE_ON_CLOSE; fileaccess|= DELETE; } /* Set temporary file (delay-flush-to-disk) attribute if requested.*/ if (oflag & _O_SHORT_LIVED) fileattrib|= FILE_ATTRIBUTE_TEMPORARY; /* Set sequential or random access attribute if requested. */ if (oflag & _O_SEQUENTIAL) fileattrib|= FILE_FLAG_SEQUENTIAL_SCAN; else if (oflag & _O_RANDOM) fileattrib|= FILE_FLAG_RANDOM_ACCESS; /* try to open/create the file */ if ((osfh= CreateFile(path, fileaccess, fileshare, &SecurityAttributes, filecreate, fileattrib, NULL)) == INVALID_HANDLE_VALUE) { /* OS call to open/create file failed! map the error, release the lock, and return -1. note that it's not necessary to call _free_osfhnd (it hasn't been used yet). */ my_osmaperr(GetLastError()); /* map error */ DBUG_RETURN(-1); /* return error to caller */ } if ((fh= my_open_osfhandle(osfh, oflag & (_O_APPEND | _O_RDONLY | _O_TEXT))) == -1) { CloseHandle(osfh); } DBUG_RETURN(fh); /* return handle */ } File my_win_open(const char *path, int flags) { DBUG_ENTER("my_win_open"); DBUG_RETURN(my_win_sopen((char *) path, flags | _O_BINARY, _SH_DENYNO, _S_IREAD | S_IWRITE)); } int my_win_close(File fd) { DBUG_ENTER("my_win_close"); if (CloseHandle(my_get_osfhandle(fd))) { invalidate_fd(fd); DBUG_RETURN(0); } my_osmaperr(GetLastError()); DBUG_RETURN(-1); } size_t my_win_pread(File Filedes, uchar *Buffer, size_t Count, my_off_t offset) { DWORD nBytesRead; HANDLE hFile; OVERLAPPED ov= {0}; LARGE_INTEGER li; DBUG_ENTER("my_win_pread"); if (!Count) DBUG_RETURN(0); #ifdef _WIN64 if (Count > UINT_MAX) Count= UINT_MAX; #endif hFile= (HANDLE)my_get_osfhandle(Filedes); li.QuadPart= offset; ov.Offset= li.LowPart; ov.OffsetHigh= li.HighPart; if (!ReadFile(hFile, Buffer, (DWORD)Count, &nBytesRead, &ov)) { DWORD lastError= GetLastError(); /* ERROR_BROKEN_PIPE is returned when no more data coming through e.g. a command pipe in windows : see MSDN on ReadFile. */ if (lastError == ERROR_HANDLE_EOF || lastError == ERROR_BROKEN_PIPE) DBUG_RETURN(0); /*return 0 at EOF*/ my_osmaperr(lastError); DBUG_RETURN((size_t)-1); } DBUG_RETURN(nBytesRead); } size_t my_win_read(File Filedes, uchar *Buffer, size_t Count) { DWORD nBytesRead; HANDLE hFile; DBUG_ENTER("my_win_read"); if (!Count) DBUG_RETURN(0); #ifdef _WIN64 if (Count > UINT_MAX) Count= UINT_MAX; #endif hFile= (HANDLE)my_get_osfhandle(Filedes); if (!ReadFile(hFile, Buffer, (DWORD)Count, &nBytesRead, NULL)) { DWORD lastError= GetLastError(); /* ERROR_BROKEN_PIPE is returned when no more data coming through e.g. a command pipe in windows : see MSDN on ReadFile. */ if (lastError == ERROR_HANDLE_EOF || lastError == ERROR_BROKEN_PIPE) DBUG_RETURN(0); /*return 0 at EOF*/ my_osmaperr(lastError); DBUG_RETURN((size_t)-1); } DBUG_RETURN(nBytesRead); } size_t my_win_pwrite(File Filedes, const uchar *Buffer, size_t Count, my_off_t offset) { DWORD nBytesWritten; HANDLE hFile; OVERLAPPED ov= {0}; LARGE_INTEGER li; DBUG_ENTER("my_win_pwrite"); DBUG_PRINT("my",("Filedes: %d, Buffer: %p, Count: %llu, offset: %llu", Filedes, Buffer, (ulonglong)Count, (ulonglong)offset)); if (!Count) DBUG_RETURN(0); #ifdef _WIN64 if (Count > UINT_MAX) Count= UINT_MAX; #endif hFile= (HANDLE)my_get_osfhandle(Filedes); li.QuadPart= offset; ov.Offset= li.LowPart; ov.OffsetHigh= li.HighPart; if (!WriteFile(hFile, Buffer, (DWORD)Count, &nBytesWritten, &ov)) { my_osmaperr(GetLastError()); DBUG_RETURN((size_t)-1); } else DBUG_RETURN(nBytesWritten); } my_off_t my_win_lseek(File fd, my_off_t pos, int whence) { LARGE_INTEGER offset; LARGE_INTEGER newpos; DBUG_ENTER("my_win_lseek"); /* Check compatibility of Windows and Posix seek constants */ compile_time_assert(FILE_BEGIN == SEEK_SET && FILE_CURRENT == SEEK_CUR && FILE_END == SEEK_END); offset.QuadPart= pos; if (!SetFilePointerEx(my_get_osfhandle(fd), offset, &newpos, whence)) { my_osmaperr(GetLastError()); newpos.QuadPart= -1; } DBUG_RETURN(newpos.QuadPart); } #ifndef FILE_WRITE_TO_END_OF_FILE #define FILE_WRITE_TO_END_OF_FILE 0xffffffff #endif size_t my_win_write(File fd, const uchar *Buffer, size_t Count) { DWORD nWritten; OVERLAPPED ov; OVERLAPPED *pov= NULL; HANDLE hFile; DBUG_ENTER("my_win_write"); DBUG_PRINT("my",("Filedes: %d, Buffer: %p, Count %llu", fd, Buffer, (ulonglong)Count)); if (!Count) DBUG_RETURN(0); #ifdef _WIN64 if (Count > UINT_MAX) Count= UINT_MAX; #endif if (my_get_open_flags(fd) & _O_APPEND) { /* Atomic append to the end of file is is done by special initialization of the OVERLAPPED structure. See MSDN WriteFile documentation for more info. */ memset(&ov, 0, sizeof(ov)); ov.Offset= FILE_WRITE_TO_END_OF_FILE; ov.OffsetHigh= -1; pov= &ov; } hFile= my_get_osfhandle(fd); if (!WriteFile(hFile, Buffer, (DWORD)Count, &nWritten, pov)) { my_osmaperr(GetLastError()); DBUG_RETURN((size_t)-1); } DBUG_RETURN(nWritten); } int my_win_chsize(File fd, my_off_t newlength) { HANDLE hFile; LARGE_INTEGER length; DBUG_ENTER("my_win_chsize"); hFile= (HANDLE) my_get_osfhandle(fd); length.QuadPart= newlength; if (!SetFilePointerEx(hFile, length , NULL , FILE_BEGIN)) goto err; if (!SetEndOfFile(hFile)) goto err; DBUG_RETURN(0); err: my_osmaperr(GetLastError()); my_errno= errno; DBUG_RETURN(-1); } /* Get the file descriptor for stdin,stdout or stderr */ static File my_get_stdfile_descriptor(FILE *stream) { HANDLE hFile; DWORD nStdHandle; DBUG_ENTER("my_get_stdfile_descriptor"); if (stream == stdin) nStdHandle= STD_INPUT_HANDLE; else if (stream == stdout) nStdHandle= STD_OUTPUT_HANDLE; else if (stream == stderr) nStdHandle= STD_ERROR_HANDLE; else DBUG_RETURN(-1); hFile= GetStdHandle(nStdHandle); if (hFile != INVALID_HANDLE_VALUE) DBUG_RETURN(my_open_osfhandle(hFile, 0)); DBUG_RETURN(-1); } File my_win_fileno(FILE *file) { HANDLE hFile= (HANDLE)_get_osfhandle(fileno(file)); int retval= -1; uint i; DBUG_ENTER("my_win_fileno"); for (i= MY_FILE_MIN; i < my_file_limit; i++) { if (my_file_info[i].fhandle == hFile) { retval= i; break; } } if (retval == -1) /* try std stream */ DBUG_RETURN(my_get_stdfile_descriptor(file)); DBUG_RETURN(retval); } FILE *my_win_fopen(const char *filename, const char *type) { FILE *file; int flags= 0; DBUG_ENTER("my_win_open"); /* If we are not creating, then we need to use my_access to make sure the file exists since Windows doesn't handle files like "com1.sym" very well */ if (check_if_legal_filename(filename)) { errno= EACCES; DBUG_RETURN(NULL); } file= fopen(filename, type); if (!file) DBUG_RETURN(NULL); if (strchr(type,'a') != NULL) flags= O_APPEND; /* Register file handle in my_table_info. Necessary for my_fileno() */ if (my_open_osfhandle((HANDLE)_get_osfhandle(fileno(file)), flags) < 0) { fclose(file); DBUG_RETURN(NULL); } DBUG_RETURN(file); } FILE * my_win_fdopen(File fd, const char *type) { FILE *file; int crt_fd; int flags= 0; DBUG_ENTER("my_win_fdopen"); if (strchr(type,'a') != NULL) flags= O_APPEND; /* Convert OS file handle to CRT file descriptor and then call fdopen*/ crt_fd= _open_osfhandle((intptr_t)my_get_osfhandle(fd), flags); if (crt_fd < 0) file= NULL; else file= fdopen(crt_fd, type); DBUG_RETURN(file); } int my_win_fclose(FILE *file) { File fd; DBUG_ENTER("my_win_close"); fd= my_fileno(file); if (fd < 0) DBUG_RETURN(-1); if (fclose(file) < 0) DBUG_RETURN(-1); invalidate_fd(fd); DBUG_RETURN(0); } /* Quick and dirty my_fstat() implementation for Windows. Use CRT fstat on temporarily allocated file descriptor. Patch file size, because size that fstat returns is not reliable (may be outdated) */ int my_win_fstat(File fd, struct _stati64 *buf) { int crt_fd; int retval; HANDLE hFile, hDup; DBUG_ENTER("my_win_fstat"); hFile= my_get_osfhandle(fd); if (!DuplicateHandle( GetCurrentProcess(), hFile, GetCurrentProcess(), &hDup ,0,FALSE,DUPLICATE_SAME_ACCESS)) { my_osmaperr(GetLastError()); DBUG_RETURN(-1); } if ((crt_fd= _open_osfhandle((intptr_t)hDup,0)) < 0) DBUG_RETURN(-1); retval= _fstati64(crt_fd, buf); if (retval == 0) { /* File size returned by stat is not accurate (may be outdated), fix it*/ GetFileSizeEx(hDup, (PLARGE_INTEGER) (&(buf->st_size))); } _close(crt_fd); DBUG_RETURN(retval); } int my_win_stat( const char *path, struct _stati64 *buf) { DBUG_ENTER("my_win_stat"); if (_stati64( path, buf) == 0) { /* File size returned by stat is not accurate (may be outdated), fix it*/ WIN32_FILE_ATTRIBUTE_DATA data; if (GetFileAttributesEx(path, GetFileExInfoStandard, &data)) { LARGE_INTEGER li; li.LowPart= data.nFileSizeLow; li.HighPart= data.nFileSizeHigh; buf->st_size= li.QuadPart; } DBUG_RETURN(0); } DBUG_RETURN(-1); } int my_win_fsync(File fd) { DBUG_ENTER("my_win_fsync"); if (FlushFileBuffers(my_get_osfhandle(fd))) DBUG_RETURN(0); my_osmaperr(GetLastError()); DBUG_RETURN(-1); } int my_win_dup(File fd) { HANDLE hDup; DBUG_ENTER("my_win_dup"); if (DuplicateHandle(GetCurrentProcess(), my_get_osfhandle(fd), GetCurrentProcess(), &hDup, 0, FALSE, DUPLICATE_SAME_ACCESS)) { DBUG_RETURN(my_open_osfhandle(hDup, my_get_open_flags(fd))); } my_osmaperr(GetLastError()); DBUG_RETURN(-1); } #endif /*_WIN32*/
gpl-2.0
embecosm/avr-gcc
gcc/tree-vrp.c
2
290725
/* Support routines for Value Range Propagation (VRP). Copyright (C) 2005-2014 Free Software Foundation, Inc. Contributed by Diego Novillo <dnovillo@redhat.com>. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ #include "config.h" #include "system.h" #include "coretypes.h" #include "tm.h" #include "flags.h" #include "tree.h" #include "stor-layout.h" #include "calls.h" #include "basic-block.h" #include "tree-ssa-alias.h" #include "internal-fn.h" #include "gimple-fold.h" #include "tree-eh.h" #include "gimple-expr.h" #include "is-a.h" #include "gimple.h" #include "gimple-iterator.h" #include "gimple-walk.h" #include "gimple-ssa.h" #include "tree-cfg.h" #include "tree-phinodes.h" #include "ssa-iterators.h" #include "stringpool.h" #include "tree-ssanames.h" #include "tree-ssa-loop-manip.h" #include "tree-ssa-loop-niter.h" #include "tree-ssa-loop.h" #include "tree-into-ssa.h" #include "tree-ssa.h" #include "tree-pass.h" #include "tree-dump.h" #include "gimple-pretty-print.h" #include "diagnostic-core.h" #include "intl.h" #include "cfgloop.h" #include "tree-scalar-evolution.h" #include "tree-ssa-propagate.h" #include "tree-chrec.h" #include "tree-ssa-threadupdate.h" #include "expr.h" #include "optabs.h" #include "tree-ssa-threadedge.h" #include "wide-int.h" /* Range of values that can be associated with an SSA_NAME after VRP has executed. */ struct value_range_d { /* Lattice value represented by this range. */ enum value_range_type type; /* Minimum and maximum values represented by this range. These values should be interpreted as follows: - If TYPE is VR_UNDEFINED or VR_VARYING then MIN and MAX must be NULL. - If TYPE == VR_RANGE then MIN holds the minimum value and MAX holds the maximum value of the range [MIN, MAX]. - If TYPE == ANTI_RANGE the variable is known to NOT take any values in the range [MIN, MAX]. */ tree min; tree max; /* Set of SSA names whose value ranges are equivalent to this one. This set is only valid when TYPE is VR_RANGE or VR_ANTI_RANGE. */ bitmap equiv; }; typedef struct value_range_d value_range_t; #define VR_INITIALIZER { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL } /* Set of SSA names found live during the RPO traversal of the function for still active basic-blocks. */ static sbitmap *live; /* Return true if the SSA name NAME is live on the edge E. */ static bool live_on_edge (edge e, tree name) { return (live[e->dest->index] && bitmap_bit_p (live[e->dest->index], SSA_NAME_VERSION (name))); } /* Local functions. */ static int compare_values (tree val1, tree val2); static int compare_values_warnv (tree val1, tree val2, bool *); static void vrp_meet (value_range_t *, value_range_t *); static void vrp_intersect_ranges (value_range_t *, value_range_t *); static tree vrp_evaluate_conditional_warnv_with_ops (enum tree_code, tree, tree, bool, bool *, bool *); /* Location information for ASSERT_EXPRs. Each instance of this structure describes an ASSERT_EXPR for an SSA name. Since a single SSA name may have more than one assertion associated with it, these locations are kept in a linked list attached to the corresponding SSA name. */ struct assert_locus_d { /* Basic block where the assertion would be inserted. */ basic_block bb; /* Some assertions need to be inserted on an edge (e.g., assertions generated by COND_EXPRs). In those cases, BB will be NULL. */ edge e; /* Pointer to the statement that generated this assertion. */ gimple_stmt_iterator si; /* Predicate code for the ASSERT_EXPR. Must be COMPARISON_CLASS_P. */ enum tree_code comp_code; /* Value being compared against. */ tree val; /* Expression to compare. */ tree expr; /* Next node in the linked list. */ struct assert_locus_d *next; }; typedef struct assert_locus_d *assert_locus_t; /* If bit I is present, it means that SSA name N_i has a list of assertions that should be inserted in the IL. */ static bitmap need_assert_for; /* Array of locations lists where to insert assertions. ASSERTS_FOR[I] holds a list of ASSERT_LOCUS_T nodes that describe where ASSERT_EXPRs for SSA name N_I should be inserted. */ static assert_locus_t *asserts_for; /* Value range array. After propagation, VR_VALUE[I] holds the range of values that SSA name N_I may take. */ static unsigned num_vr_values; static value_range_t **vr_value; static bool values_propagated; /* For a PHI node which sets SSA name N_I, VR_COUNTS[I] holds the number of executable edges we saw the last time we visited the node. */ static int *vr_phi_edge_counts; typedef struct { gimple stmt; tree vec; } switch_update; static vec<edge> to_remove_edges; static vec<switch_update> to_update_switch_stmts; /* Return the maximum value for TYPE. */ static inline tree vrp_val_max (const_tree type) { if (!INTEGRAL_TYPE_P (type)) return NULL_TREE; return TYPE_MAX_VALUE (type); } /* Return the minimum value for TYPE. */ static inline tree vrp_val_min (const_tree type) { if (!INTEGRAL_TYPE_P (type)) return NULL_TREE; return TYPE_MIN_VALUE (type); } /* Return whether VAL is equal to the maximum value of its type. This will be true for a positive overflow infinity. We can't do a simple equality comparison with TYPE_MAX_VALUE because C typedefs and Ada subtypes can produce types whose TYPE_MAX_VALUE is not == to the integer constant with the same value in the type. */ static inline bool vrp_val_is_max (const_tree val) { tree type_max = vrp_val_max (TREE_TYPE (val)); return (val == type_max || (type_max != NULL_TREE && operand_equal_p (val, type_max, 0))); } /* Return whether VAL is equal to the minimum value of its type. This will be true for a negative overflow infinity. */ static inline bool vrp_val_is_min (const_tree val) { tree type_min = vrp_val_min (TREE_TYPE (val)); return (val == type_min || (type_min != NULL_TREE && operand_equal_p (val, type_min, 0))); } /* Return whether TYPE should use an overflow infinity distinct from TYPE_{MIN,MAX}_VALUE. We use an overflow infinity value to represent a signed overflow during VRP computations. An infinity is distinct from a half-range, which will go from some number to TYPE_{MIN,MAX}_VALUE. */ static inline bool needs_overflow_infinity (const_tree type) { return INTEGRAL_TYPE_P (type) && !TYPE_OVERFLOW_WRAPS (type); } /* Return whether TYPE can support our overflow infinity representation: we use the TREE_OVERFLOW flag, which only exists for constants. If TYPE doesn't support this, we don't optimize cases which would require signed overflow--we drop them to VARYING. */ static inline bool supports_overflow_infinity (const_tree type) { tree min = vrp_val_min (type), max = vrp_val_max (type); #ifdef ENABLE_CHECKING gcc_assert (needs_overflow_infinity (type)); #endif return (min != NULL_TREE && CONSTANT_CLASS_P (min) && max != NULL_TREE && CONSTANT_CLASS_P (max)); } /* VAL is the maximum or minimum value of a type. Return a corresponding overflow infinity. */ static inline tree make_overflow_infinity (tree val) { gcc_checking_assert (val != NULL_TREE && CONSTANT_CLASS_P (val)); val = copy_node (val); TREE_OVERFLOW (val) = 1; return val; } /* Return a negative overflow infinity for TYPE. */ static inline tree negative_overflow_infinity (tree type) { gcc_checking_assert (supports_overflow_infinity (type)); return make_overflow_infinity (vrp_val_min (type)); } /* Return a positive overflow infinity for TYPE. */ static inline tree positive_overflow_infinity (tree type) { gcc_checking_assert (supports_overflow_infinity (type)); return make_overflow_infinity (vrp_val_max (type)); } /* Return whether VAL is a negative overflow infinity. */ static inline bool is_negative_overflow_infinity (const_tree val) { return (TREE_OVERFLOW_P (val) && needs_overflow_infinity (TREE_TYPE (val)) && vrp_val_is_min (val)); } /* Return whether VAL is a positive overflow infinity. */ static inline bool is_positive_overflow_infinity (const_tree val) { return (TREE_OVERFLOW_P (val) && needs_overflow_infinity (TREE_TYPE (val)) && vrp_val_is_max (val)); } /* Return whether VAL is a positive or negative overflow infinity. */ static inline bool is_overflow_infinity (const_tree val) { return (TREE_OVERFLOW_P (val) && needs_overflow_infinity (TREE_TYPE (val)) && (vrp_val_is_min (val) || vrp_val_is_max (val))); } /* Return whether STMT has a constant rhs that is_overflow_infinity. */ static inline bool stmt_overflow_infinity (gimple stmt) { if (is_gimple_assign (stmt) && get_gimple_rhs_class (gimple_assign_rhs_code (stmt)) == GIMPLE_SINGLE_RHS) return is_overflow_infinity (gimple_assign_rhs1 (stmt)); return false; } /* If VAL is now an overflow infinity, return VAL. Otherwise, return the same value with TREE_OVERFLOW clear. This can be used to avoid confusing a regular value with an overflow value. */ static inline tree avoid_overflow_infinity (tree val) { if (!is_overflow_infinity (val)) return val; if (vrp_val_is_max (val)) return vrp_val_max (TREE_TYPE (val)); else { gcc_checking_assert (vrp_val_is_min (val)); return vrp_val_min (TREE_TYPE (val)); } } /* Return true if ARG is marked with the nonnull attribute in the current function signature. */ static bool nonnull_arg_p (const_tree arg) { tree t, attrs, fntype; unsigned HOST_WIDE_INT arg_num; gcc_assert (TREE_CODE (arg) == PARM_DECL && POINTER_TYPE_P (TREE_TYPE (arg))); /* The static chain decl is always non null. */ if (arg == cfun->static_chain_decl) return true; fntype = TREE_TYPE (current_function_decl); for (attrs = TYPE_ATTRIBUTES (fntype); attrs; attrs = TREE_CHAIN (attrs)) { attrs = lookup_attribute ("nonnull", attrs); /* If "nonnull" wasn't specified, we know nothing about the argument. */ if (attrs == NULL_TREE) return false; /* If "nonnull" applies to all the arguments, then ARG is non-null. */ if (TREE_VALUE (attrs) == NULL_TREE) return true; /* Get the position number for ARG in the function signature. */ for (arg_num = 1, t = DECL_ARGUMENTS (current_function_decl); t; t = DECL_CHAIN (t), arg_num++) { if (t == arg) break; } gcc_assert (t == arg); /* Now see if ARG_NUM is mentioned in the nonnull list. */ for (t = TREE_VALUE (attrs); t; t = TREE_CHAIN (t)) { if (compare_tree_int (TREE_VALUE (t), arg_num) == 0) return true; } } return false; } /* Set value range VR to VR_UNDEFINED. */ static inline void set_value_range_to_undefined (value_range_t *vr) { vr->type = VR_UNDEFINED; vr->min = vr->max = NULL_TREE; if (vr->equiv) bitmap_clear (vr->equiv); } /* Set value range VR to VR_VARYING. */ static inline void set_value_range_to_varying (value_range_t *vr) { vr->type = VR_VARYING; vr->min = vr->max = NULL_TREE; if (vr->equiv) bitmap_clear (vr->equiv); } /* Set value range VR to {T, MIN, MAX, EQUIV}. */ static void set_value_range (value_range_t *vr, enum value_range_type t, tree min, tree max, bitmap equiv) { #if defined ENABLE_CHECKING /* Check the validity of the range. */ if (t == VR_RANGE || t == VR_ANTI_RANGE) { int cmp; gcc_assert (min && max); gcc_assert ((!TREE_OVERFLOW_P (min) || is_overflow_infinity (min)) && (!TREE_OVERFLOW_P (max) || is_overflow_infinity (max))); if (INTEGRAL_TYPE_P (TREE_TYPE (min)) && t == VR_ANTI_RANGE) gcc_assert (!vrp_val_is_min (min) || !vrp_val_is_max (max)); cmp = compare_values (min, max); gcc_assert (cmp == 0 || cmp == -1 || cmp == -2); if (needs_overflow_infinity (TREE_TYPE (min))) gcc_assert (!is_overflow_infinity (min) || !is_overflow_infinity (max)); } if (t == VR_UNDEFINED || t == VR_VARYING) gcc_assert (min == NULL_TREE && max == NULL_TREE); if (t == VR_UNDEFINED || t == VR_VARYING) gcc_assert (equiv == NULL || bitmap_empty_p (equiv)); #endif vr->type = t; vr->min = min; vr->max = max; /* Since updating the equivalence set involves deep copying the bitmaps, only do it if absolutely necessary. */ if (vr->equiv == NULL && equiv != NULL) vr->equiv = BITMAP_ALLOC (NULL); if (equiv != vr->equiv) { if (equiv && !bitmap_empty_p (equiv)) bitmap_copy (vr->equiv, equiv); else bitmap_clear (vr->equiv); } } /* Set value range VR to the canonical form of {T, MIN, MAX, EQUIV}. This means adjusting T, MIN and MAX representing the case of a wrapping range with MAX < MIN covering [MIN, type_max] U [type_min, MAX] as anti-rage ~[MAX+1, MIN-1]. Likewise for wrapping anti-ranges. In corner cases where MAX+1 or MIN-1 wraps this will fall back to varying. This routine exists to ease canonicalization in the case where we extract ranges from var + CST op limit. */ static void set_and_canonicalize_value_range (value_range_t *vr, enum value_range_type t, tree min, tree max, bitmap equiv) { /* Use the canonical setters for VR_UNDEFINED and VR_VARYING. */ if (t == VR_UNDEFINED) { set_value_range_to_undefined (vr); return; } else if (t == VR_VARYING) { set_value_range_to_varying (vr); return; } /* Nothing to canonicalize for symbolic ranges. */ if (TREE_CODE (min) != INTEGER_CST || TREE_CODE (max) != INTEGER_CST) { set_value_range (vr, t, min, max, equiv); return; } /* Wrong order for min and max, to swap them and the VR type we need to adjust them. */ if (tree_int_cst_lt (max, min)) { tree one, tmp; /* For one bit precision if max < min, then the swapped range covers all values, so for VR_RANGE it is varying and for VR_ANTI_RANGE empty range, so drop to varying as well. */ if (TYPE_PRECISION (TREE_TYPE (min)) == 1) { set_value_range_to_varying (vr); return; } one = build_int_cst (TREE_TYPE (min), 1); tmp = int_const_binop (PLUS_EXPR, max, one); max = int_const_binop (MINUS_EXPR, min, one); min = tmp; /* There's one corner case, if we had [C+1, C] before we now have that again. But this represents an empty value range, so drop to varying in this case. */ if (tree_int_cst_lt (max, min)) { set_value_range_to_varying (vr); return; } t = t == VR_RANGE ? VR_ANTI_RANGE : VR_RANGE; } /* Anti-ranges that can be represented as ranges should be so. */ if (t == VR_ANTI_RANGE) { bool is_min = vrp_val_is_min (min); bool is_max = vrp_val_is_max (max); if (is_min && is_max) { /* We cannot deal with empty ranges, drop to varying. ??? This could be VR_UNDEFINED instead. */ set_value_range_to_varying (vr); return; } else if (TYPE_PRECISION (TREE_TYPE (min)) == 1 && (is_min || is_max)) { /* Non-empty boolean ranges can always be represented as a singleton range. */ if (is_min) min = max = vrp_val_max (TREE_TYPE (min)); else min = max = vrp_val_min (TREE_TYPE (min)); t = VR_RANGE; } else if (is_min /* As a special exception preserve non-null ranges. */ && !(TYPE_UNSIGNED (TREE_TYPE (min)) && integer_zerop (max))) { tree one = build_int_cst (TREE_TYPE (max), 1); min = int_const_binop (PLUS_EXPR, max, one); max = vrp_val_max (TREE_TYPE (max)); t = VR_RANGE; } else if (is_max) { tree one = build_int_cst (TREE_TYPE (min), 1); max = int_const_binop (MINUS_EXPR, min, one); min = vrp_val_min (TREE_TYPE (min)); t = VR_RANGE; } } /* Drop [-INF(OVF), +INF(OVF)] to varying. */ if (needs_overflow_infinity (TREE_TYPE (min)) && is_overflow_infinity (min) && is_overflow_infinity (max)) { set_value_range_to_varying (vr); return; } set_value_range (vr, t, min, max, equiv); } /* Copy value range FROM into value range TO. */ static inline void copy_value_range (value_range_t *to, value_range_t *from) { set_value_range (to, from->type, from->min, from->max, from->equiv); } /* Set value range VR to a single value. This function is only called with values we get from statements, and exists to clear the TREE_OVERFLOW flag so that we don't think we have an overflow infinity when we shouldn't. */ static inline void set_value_range_to_value (value_range_t *vr, tree val, bitmap equiv) { gcc_assert (is_gimple_min_invariant (val)); if (TREE_OVERFLOW_P (val)) val = drop_tree_overflow (val); set_value_range (vr, VR_RANGE, val, val, equiv); } /* Set value range VR to a non-negative range of type TYPE. OVERFLOW_INFINITY indicates whether to use an overflow infinity rather than TYPE_MAX_VALUE; this should be true if we determine that the range is nonnegative based on the assumption that signed overflow does not occur. */ static inline void set_value_range_to_nonnegative (value_range_t *vr, tree type, bool overflow_infinity) { tree zero; if (overflow_infinity && !supports_overflow_infinity (type)) { set_value_range_to_varying (vr); return; } zero = build_int_cst (type, 0); set_value_range (vr, VR_RANGE, zero, (overflow_infinity ? positive_overflow_infinity (type) : TYPE_MAX_VALUE (type)), vr->equiv); } /* Set value range VR to a non-NULL range of type TYPE. */ static inline void set_value_range_to_nonnull (value_range_t *vr, tree type) { tree zero = build_int_cst (type, 0); set_value_range (vr, VR_ANTI_RANGE, zero, zero, vr->equiv); } /* Set value range VR to a NULL range of type TYPE. */ static inline void set_value_range_to_null (value_range_t *vr, tree type) { set_value_range_to_value (vr, build_int_cst (type, 0), vr->equiv); } /* Set value range VR to a range of a truthvalue of type TYPE. */ static inline void set_value_range_to_truthvalue (value_range_t *vr, tree type) { if (TYPE_PRECISION (type) == 1) set_value_range_to_varying (vr); else set_value_range (vr, VR_RANGE, build_int_cst (type, 0), build_int_cst (type, 1), vr->equiv); } /* If abs (min) < abs (max), set VR to [-max, max], if abs (min) >= abs (max), set VR to [-min, min]. */ static void abs_extent_range (value_range_t *vr, tree min, tree max) { int cmp; gcc_assert (TREE_CODE (min) == INTEGER_CST); gcc_assert (TREE_CODE (max) == INTEGER_CST); gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (min))); gcc_assert (!TYPE_UNSIGNED (TREE_TYPE (min))); min = fold_unary (ABS_EXPR, TREE_TYPE (min), min); max = fold_unary (ABS_EXPR, TREE_TYPE (max), max); if (TREE_OVERFLOW (min) || TREE_OVERFLOW (max)) { set_value_range_to_varying (vr); return; } cmp = compare_values (min, max); if (cmp == -1) min = fold_unary (NEGATE_EXPR, TREE_TYPE (min), max); else if (cmp == 0 || cmp == 1) { max = min; min = fold_unary (NEGATE_EXPR, TREE_TYPE (min), min); } else { set_value_range_to_varying (vr); return; } set_and_canonicalize_value_range (vr, VR_RANGE, min, max, NULL); } /* Return value range information for VAR. If we have no values ranges recorded (ie, VRP is not running), then return NULL. Otherwise create an empty range if none existed for VAR. */ static value_range_t * get_value_range (const_tree var) { static const struct value_range_d vr_const_varying = { VR_VARYING, NULL_TREE, NULL_TREE, NULL }; value_range_t *vr; tree sym; unsigned ver = SSA_NAME_VERSION (var); /* If we have no recorded ranges, then return NULL. */ if (! vr_value) return NULL; /* If we query the range for a new SSA name return an unmodifiable VARYING. We should get here at most from the substitute-and-fold stage which will never try to change values. */ if (ver >= num_vr_values) return CONST_CAST (value_range_t *, &vr_const_varying); vr = vr_value[ver]; if (vr) return vr; /* After propagation finished do not allocate new value-ranges. */ if (values_propagated) return CONST_CAST (value_range_t *, &vr_const_varying); /* Create a default value range. */ vr_value[ver] = vr = XCNEW (value_range_t); /* Defer allocating the equivalence set. */ vr->equiv = NULL; /* If VAR is a default definition of a parameter, the variable can take any value in VAR's type. */ if (SSA_NAME_IS_DEFAULT_DEF (var)) { sym = SSA_NAME_VAR (var); if (TREE_CODE (sym) == PARM_DECL) { /* Try to use the "nonnull" attribute to create ~[0, 0] anti-ranges for pointers. Note that this is only valid with default definitions of PARM_DECLs. */ if (POINTER_TYPE_P (TREE_TYPE (sym)) && nonnull_arg_p (sym)) set_value_range_to_nonnull (vr, TREE_TYPE (sym)); else set_value_range_to_varying (vr); } else if (TREE_CODE (sym) == RESULT_DECL && DECL_BY_REFERENCE (sym)) set_value_range_to_nonnull (vr, TREE_TYPE (sym)); } return vr; } /* Return true, if VAL1 and VAL2 are equal values for VRP purposes. */ static inline bool vrp_operand_equal_p (const_tree val1, const_tree val2) { if (val1 == val2) return true; if (!val1 || !val2 || !operand_equal_p (val1, val2, 0)) return false; return is_overflow_infinity (val1) == is_overflow_infinity (val2); } /* Return true, if the bitmaps B1 and B2 are equal. */ static inline bool vrp_bitmap_equal_p (const_bitmap b1, const_bitmap b2) { return (b1 == b2 || ((!b1 || bitmap_empty_p (b1)) && (!b2 || bitmap_empty_p (b2))) || (b1 && b2 && bitmap_equal_p (b1, b2))); } /* Update the value range and equivalence set for variable VAR to NEW_VR. Return true if NEW_VR is different from VAR's previous value. NOTE: This function assumes that NEW_VR is a temporary value range object created for the sole purpose of updating VAR's range. The storage used by the equivalence set from NEW_VR will be freed by this function. Do not call update_value_range when NEW_VR is the range object associated with another SSA name. */ static inline bool update_value_range (const_tree var, value_range_t *new_vr) { value_range_t *old_vr; bool is_new; /* Update the value range, if necessary. */ old_vr = get_value_range (var); is_new = old_vr->type != new_vr->type || !vrp_operand_equal_p (old_vr->min, new_vr->min) || !vrp_operand_equal_p (old_vr->max, new_vr->max) || !vrp_bitmap_equal_p (old_vr->equiv, new_vr->equiv); if (is_new) { /* Do not allow transitions up the lattice. The following is slightly more awkward than just new_vr->type < old_vr->type because VR_RANGE and VR_ANTI_RANGE need to be considered the same. We may not have is_new when transitioning to UNDEFINED or from VARYING. */ if (new_vr->type == VR_UNDEFINED || old_vr->type == VR_VARYING) set_value_range_to_varying (old_vr); else set_value_range (old_vr, new_vr->type, new_vr->min, new_vr->max, new_vr->equiv); } BITMAP_FREE (new_vr->equiv); return is_new; } /* Add VAR and VAR's equivalence set to EQUIV. This is the central point where equivalence processing can be turned on/off. */ static void add_equivalence (bitmap *equiv, const_tree var) { unsigned ver = SSA_NAME_VERSION (var); value_range_t *vr = vr_value[ver]; if (*equiv == NULL) *equiv = BITMAP_ALLOC (NULL); bitmap_set_bit (*equiv, ver); if (vr && vr->equiv) bitmap_ior_into (*equiv, vr->equiv); } /* Return true if VR is ~[0, 0]. */ static inline bool range_is_nonnull (value_range_t *vr) { return vr->type == VR_ANTI_RANGE && integer_zerop (vr->min) && integer_zerop (vr->max); } /* Return true if VR is [0, 0]. */ static inline bool range_is_null (value_range_t *vr) { return vr->type == VR_RANGE && integer_zerop (vr->min) && integer_zerop (vr->max); } /* Return true if max and min of VR are INTEGER_CST. It's not necessary a singleton. */ static inline bool range_int_cst_p (value_range_t *vr) { return (vr->type == VR_RANGE && TREE_CODE (vr->max) == INTEGER_CST && TREE_CODE (vr->min) == INTEGER_CST); } /* Return true if VR is a INTEGER_CST singleton. */ static inline bool range_int_cst_singleton_p (value_range_t *vr) { return (range_int_cst_p (vr) && !is_overflow_infinity (vr->min) && !is_overflow_infinity (vr->max) && tree_int_cst_equal (vr->min, vr->max)); } /* Return true if value range VR involves at least one symbol. */ static inline bool symbolic_range_p (value_range_t *vr) { return (!is_gimple_min_invariant (vr->min) || !is_gimple_min_invariant (vr->max)); } /* Return true if value range VR uses an overflow infinity. */ static inline bool overflow_infinity_range_p (value_range_t *vr) { return (vr->type == VR_RANGE && (is_overflow_infinity (vr->min) || is_overflow_infinity (vr->max))); } /* Return false if we can not make a valid comparison based on VR; this will be the case if it uses an overflow infinity and overflow is not undefined (i.e., -fno-strict-overflow is in effect). Otherwise return true, and set *STRICT_OVERFLOW_P to true if VR uses an overflow infinity. */ static bool usable_range_p (value_range_t *vr, bool *strict_overflow_p) { gcc_assert (vr->type == VR_RANGE); if (is_overflow_infinity (vr->min)) { *strict_overflow_p = true; if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (vr->min))) return false; } if (is_overflow_infinity (vr->max)) { *strict_overflow_p = true; if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (vr->max))) return false; } return true; } /* Return true if the result of assignment STMT is know to be non-negative. If the return value is based on the assumption that signed overflow is undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change *STRICT_OVERFLOW_P.*/ static bool gimple_assign_nonnegative_warnv_p (gimple stmt, bool *strict_overflow_p) { enum tree_code code = gimple_assign_rhs_code (stmt); switch (get_gimple_rhs_class (code)) { case GIMPLE_UNARY_RHS: return tree_unary_nonnegative_warnv_p (gimple_assign_rhs_code (stmt), gimple_expr_type (stmt), gimple_assign_rhs1 (stmt), strict_overflow_p); case GIMPLE_BINARY_RHS: return tree_binary_nonnegative_warnv_p (gimple_assign_rhs_code (stmt), gimple_expr_type (stmt), gimple_assign_rhs1 (stmt), gimple_assign_rhs2 (stmt), strict_overflow_p); case GIMPLE_TERNARY_RHS: return false; case GIMPLE_SINGLE_RHS: return tree_single_nonnegative_warnv_p (gimple_assign_rhs1 (stmt), strict_overflow_p); case GIMPLE_INVALID_RHS: gcc_unreachable (); default: gcc_unreachable (); } } /* Return true if return value of call STMT is know to be non-negative. If the return value is based on the assumption that signed overflow is undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change *STRICT_OVERFLOW_P.*/ static bool gimple_call_nonnegative_warnv_p (gimple stmt, bool *strict_overflow_p) { tree arg0 = gimple_call_num_args (stmt) > 0 ? gimple_call_arg (stmt, 0) : NULL_TREE; tree arg1 = gimple_call_num_args (stmt) > 1 ? gimple_call_arg (stmt, 1) : NULL_TREE; return tree_call_nonnegative_warnv_p (gimple_expr_type (stmt), gimple_call_fndecl (stmt), arg0, arg1, strict_overflow_p); } /* Return true if STMT is know to to compute a non-negative value. If the return value is based on the assumption that signed overflow is undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change *STRICT_OVERFLOW_P.*/ static bool gimple_stmt_nonnegative_warnv_p (gimple stmt, bool *strict_overflow_p) { switch (gimple_code (stmt)) { case GIMPLE_ASSIGN: return gimple_assign_nonnegative_warnv_p (stmt, strict_overflow_p); case GIMPLE_CALL: return gimple_call_nonnegative_warnv_p (stmt, strict_overflow_p); default: gcc_unreachable (); } } /* Return true if the result of assignment STMT is know to be non-zero. If the return value is based on the assumption that signed overflow is undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change *STRICT_OVERFLOW_P.*/ static bool gimple_assign_nonzero_warnv_p (gimple stmt, bool *strict_overflow_p) { enum tree_code code = gimple_assign_rhs_code (stmt); switch (get_gimple_rhs_class (code)) { case GIMPLE_UNARY_RHS: return tree_unary_nonzero_warnv_p (gimple_assign_rhs_code (stmt), gimple_expr_type (stmt), gimple_assign_rhs1 (stmt), strict_overflow_p); case GIMPLE_BINARY_RHS: return tree_binary_nonzero_warnv_p (gimple_assign_rhs_code (stmt), gimple_expr_type (stmt), gimple_assign_rhs1 (stmt), gimple_assign_rhs2 (stmt), strict_overflow_p); case GIMPLE_TERNARY_RHS: return false; case GIMPLE_SINGLE_RHS: return tree_single_nonzero_warnv_p (gimple_assign_rhs1 (stmt), strict_overflow_p); case GIMPLE_INVALID_RHS: gcc_unreachable (); default: gcc_unreachable (); } } /* Return true if STMT is known to compute a non-zero value. If the return value is based on the assumption that signed overflow is undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change *STRICT_OVERFLOW_P.*/ static bool gimple_stmt_nonzero_warnv_p (gimple stmt, bool *strict_overflow_p) { switch (gimple_code (stmt)) { case GIMPLE_ASSIGN: return gimple_assign_nonzero_warnv_p (stmt, strict_overflow_p); case GIMPLE_CALL: { tree fndecl = gimple_call_fndecl (stmt); if (!fndecl) return false; if (flag_delete_null_pointer_checks && !flag_check_new && DECL_IS_OPERATOR_NEW (fndecl) && !TREE_NOTHROW (fndecl)) return true; if (flag_delete_null_pointer_checks && lookup_attribute ("returns_nonnull", TYPE_ATTRIBUTES (gimple_call_fntype (stmt)))) return true; return gimple_alloca_call_p (stmt); } default: gcc_unreachable (); } } /* Like tree_expr_nonzero_warnv_p, but this function uses value ranges obtained so far. */ static bool vrp_stmt_computes_nonzero (gimple stmt, bool *strict_overflow_p) { if (gimple_stmt_nonzero_warnv_p (stmt, strict_overflow_p)) return true; /* If we have an expression of the form &X->a, then the expression is nonnull if X is nonnull. */ if (is_gimple_assign (stmt) && gimple_assign_rhs_code (stmt) == ADDR_EXPR) { tree expr = gimple_assign_rhs1 (stmt); tree base = get_base_address (TREE_OPERAND (expr, 0)); if (base != NULL_TREE && TREE_CODE (base) == MEM_REF && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME) { value_range_t *vr = get_value_range (TREE_OPERAND (base, 0)); if (range_is_nonnull (vr)) return true; } } return false; } /* Returns true if EXPR is a valid value (as expected by compare_values) -- a gimple invariant, or SSA_NAME +- CST. */ static bool valid_value_p (tree expr) { if (TREE_CODE (expr) == SSA_NAME) return true; if (TREE_CODE (expr) == PLUS_EXPR || TREE_CODE (expr) == MINUS_EXPR) return (TREE_CODE (TREE_OPERAND (expr, 0)) == SSA_NAME && TREE_CODE (TREE_OPERAND (expr, 1)) == INTEGER_CST); return is_gimple_min_invariant (expr); } /* Return 1 if VAL < VAL2 0 if !(VAL < VAL2) -2 if those are incomparable. */ static inline int operand_less_p (tree val, tree val2) { /* LT is folded faster than GE and others. Inline the common case. */ if (TREE_CODE (val) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST) return tree_int_cst_lt (val, val2); else { tree tcmp; fold_defer_overflow_warnings (); tcmp = fold_binary_to_constant (LT_EXPR, boolean_type_node, val, val2); fold_undefer_and_ignore_overflow_warnings (); if (!tcmp || TREE_CODE (tcmp) != INTEGER_CST) return -2; if (!integer_zerop (tcmp)) return 1; } /* val >= val2, not considering overflow infinity. */ if (is_negative_overflow_infinity (val)) return is_negative_overflow_infinity (val2) ? 0 : 1; else if (is_positive_overflow_infinity (val2)) return is_positive_overflow_infinity (val) ? 0 : 1; return 0; } /* Compare two values VAL1 and VAL2. Return -2 if VAL1 and VAL2 cannot be compared at compile-time, -1 if VAL1 < VAL2, 0 if VAL1 == VAL2, +1 if VAL1 > VAL2, and +2 if VAL1 != VAL2 This is similar to tree_int_cst_compare but supports pointer values and values that cannot be compared at compile time. If STRICT_OVERFLOW_P is not NULL, then set *STRICT_OVERFLOW_P to true if the return value is only valid if we assume that signed overflow is undefined. */ static int compare_values_warnv (tree val1, tree val2, bool *strict_overflow_p) { if (val1 == val2) return 0; /* Below we rely on the fact that VAL1 and VAL2 are both pointers or both integers. */ gcc_assert (POINTER_TYPE_P (TREE_TYPE (val1)) == POINTER_TYPE_P (TREE_TYPE (val2))); /* Convert the two values into the same type. This is needed because sizetype causes sign extension even for unsigned types. */ val2 = fold_convert (TREE_TYPE (val1), val2); STRIP_USELESS_TYPE_CONVERSION (val2); if ((TREE_CODE (val1) == SSA_NAME || TREE_CODE (val1) == PLUS_EXPR || TREE_CODE (val1) == MINUS_EXPR) && (TREE_CODE (val2) == SSA_NAME || TREE_CODE (val2) == PLUS_EXPR || TREE_CODE (val2) == MINUS_EXPR)) { tree n1, c1, n2, c2; enum tree_code code1, code2; /* If VAL1 and VAL2 are of the form 'NAME [+-] CST' or 'NAME', return -1 or +1 accordingly. If VAL1 and VAL2 don't use the same name, return -2. */ if (TREE_CODE (val1) == SSA_NAME) { code1 = SSA_NAME; n1 = val1; c1 = NULL_TREE; } else { code1 = TREE_CODE (val1); n1 = TREE_OPERAND (val1, 0); c1 = TREE_OPERAND (val1, 1); if (tree_int_cst_sgn (c1) == -1) { if (is_negative_overflow_infinity (c1)) return -2; c1 = fold_unary_to_constant (NEGATE_EXPR, TREE_TYPE (c1), c1); if (!c1) return -2; code1 = code1 == MINUS_EXPR ? PLUS_EXPR : MINUS_EXPR; } } if (TREE_CODE (val2) == SSA_NAME) { code2 = SSA_NAME; n2 = val2; c2 = NULL_TREE; } else { code2 = TREE_CODE (val2); n2 = TREE_OPERAND (val2, 0); c2 = TREE_OPERAND (val2, 1); if (tree_int_cst_sgn (c2) == -1) { if (is_negative_overflow_infinity (c2)) return -2; c2 = fold_unary_to_constant (NEGATE_EXPR, TREE_TYPE (c2), c2); if (!c2) return -2; code2 = code2 == MINUS_EXPR ? PLUS_EXPR : MINUS_EXPR; } } /* Both values must use the same name. */ if (n1 != n2) return -2; if (code1 == SSA_NAME && code2 == SSA_NAME) /* NAME == NAME */ return 0; /* If overflow is defined we cannot simplify more. */ if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (val1))) return -2; if (strict_overflow_p != NULL && (code1 == SSA_NAME || !TREE_NO_WARNING (val1)) && (code2 == SSA_NAME || !TREE_NO_WARNING (val2))) *strict_overflow_p = true; if (code1 == SSA_NAME) { if (code2 == PLUS_EXPR) /* NAME < NAME + CST */ return -1; else if (code2 == MINUS_EXPR) /* NAME > NAME - CST */ return 1; } else if (code1 == PLUS_EXPR) { if (code2 == SSA_NAME) /* NAME + CST > NAME */ return 1; else if (code2 == PLUS_EXPR) /* NAME + CST1 > NAME + CST2, if CST1 > CST2 */ return compare_values_warnv (c1, c2, strict_overflow_p); else if (code2 == MINUS_EXPR) /* NAME + CST1 > NAME - CST2 */ return 1; } else if (code1 == MINUS_EXPR) { if (code2 == SSA_NAME) /* NAME - CST < NAME */ return -1; else if (code2 == PLUS_EXPR) /* NAME - CST1 < NAME + CST2 */ return -1; else if (code2 == MINUS_EXPR) /* NAME - CST1 > NAME - CST2, if CST1 < CST2. Notice that C1 and C2 are swapped in the call to compare_values. */ return compare_values_warnv (c2, c1, strict_overflow_p); } gcc_unreachable (); } /* We cannot compare non-constants. */ if (!is_gimple_min_invariant (val1) || !is_gimple_min_invariant (val2)) return -2; if (!POINTER_TYPE_P (TREE_TYPE (val1))) { /* We cannot compare overflowed values, except for overflow infinities. */ if (TREE_OVERFLOW (val1) || TREE_OVERFLOW (val2)) { if (strict_overflow_p != NULL) *strict_overflow_p = true; if (is_negative_overflow_infinity (val1)) return is_negative_overflow_infinity (val2) ? 0 : -1; else if (is_negative_overflow_infinity (val2)) return 1; else if (is_positive_overflow_infinity (val1)) return is_positive_overflow_infinity (val2) ? 0 : 1; else if (is_positive_overflow_infinity (val2)) return -1; return -2; } return tree_int_cst_compare (val1, val2); } else { tree t; /* First see if VAL1 and VAL2 are not the same. */ if (val1 == val2 || operand_equal_p (val1, val2, 0)) return 0; /* If VAL1 is a lower address than VAL2, return -1. */ if (operand_less_p (val1, val2) == 1) return -1; /* If VAL1 is a higher address than VAL2, return +1. */ if (operand_less_p (val2, val1) == 1) return 1; /* If VAL1 is different than VAL2, return +2. For integer constants we either have already returned -1 or 1 or they are equivalent. We still might succeed in proving something about non-trivial operands. */ if (TREE_CODE (val1) != INTEGER_CST || TREE_CODE (val2) != INTEGER_CST) { t = fold_binary_to_constant (NE_EXPR, boolean_type_node, val1, val2); if (t && integer_onep (t)) return 2; } return -2; } } /* Compare values like compare_values_warnv, but treat comparisons of nonconstants which rely on undefined overflow as incomparable. */ static int compare_values (tree val1, tree val2) { bool sop; int ret; sop = false; ret = compare_values_warnv (val1, val2, &sop); if (sop && (!is_gimple_min_invariant (val1) || !is_gimple_min_invariant (val2))) ret = -2; return ret; } /* Return 1 if VAL is inside value range MIN <= VAL <= MAX, 0 if VAL is not inside [MIN, MAX], -2 if we cannot tell either way. Benchmark compile/20001226-1.c compilation time after changing this function. */ static inline int value_inside_range (tree val, tree min, tree max) { int cmp1, cmp2; cmp1 = operand_less_p (val, min); if (cmp1 == -2) return -2; if (cmp1 == 1) return 0; cmp2 = operand_less_p (max, val); if (cmp2 == -2) return -2; return !cmp2; } /* Return true if value ranges VR0 and VR1 have a non-empty intersection. Benchmark compile/20001226-1.c compilation time after changing this function. */ static inline bool value_ranges_intersect_p (value_range_t *vr0, value_range_t *vr1) { /* The value ranges do not intersect if the maximum of the first range is less than the minimum of the second range or vice versa. When those relations are unknown, we can't do any better. */ if (operand_less_p (vr0->max, vr1->min) != 0) return false; if (operand_less_p (vr1->max, vr0->min) != 0) return false; return true; } /* Return 1 if [MIN, MAX] includes the value zero, 0 if it does not include the value zero, -2 if we cannot tell. */ static inline int range_includes_zero_p (tree min, tree max) { tree zero = build_int_cst (TREE_TYPE (min), 0); return value_inside_range (zero, min, max); } /* Return true if *VR is know to only contain nonnegative values. */ static inline bool value_range_nonnegative_p (value_range_t *vr) { /* Testing for VR_ANTI_RANGE is not useful here as any anti-range which would return a useful value should be encoded as a VR_RANGE. */ if (vr->type == VR_RANGE) { int result = compare_values (vr->min, integer_zero_node); return (result == 0 || result == 1); } return false; } /* If *VR has a value rante that is a single constant value return that, otherwise return NULL_TREE. */ static tree value_range_constant_singleton (value_range_t *vr) { if (vr->type == VR_RANGE && operand_equal_p (vr->min, vr->max, 0) && is_gimple_min_invariant (vr->min)) return vr->min; return NULL_TREE; } /* If OP has a value range with a single constant value return that, otherwise return NULL_TREE. This returns OP itself if OP is a constant. */ static tree op_with_constant_singleton_value_range (tree op) { if (is_gimple_min_invariant (op)) return op; if (TREE_CODE (op) != SSA_NAME) return NULL_TREE; return value_range_constant_singleton (get_value_range (op)); } /* Return true if op is in a boolean [0, 1] value-range. */ static bool op_with_boolean_value_range_p (tree op) { value_range_t *vr; if (TYPE_PRECISION (TREE_TYPE (op)) == 1) return true; if (integer_zerop (op) || integer_onep (op)) return true; if (TREE_CODE (op) != SSA_NAME) return false; vr = get_value_range (op); return (vr->type == VR_RANGE && integer_zerop (vr->min) && integer_onep (vr->max)); } /* Extract value range information from an ASSERT_EXPR EXPR and store it in *VR_P. */ static void extract_range_from_assert (value_range_t *vr_p, tree expr) { tree var, cond, limit, min, max, type; value_range_t *limit_vr; enum tree_code cond_code; var = ASSERT_EXPR_VAR (expr); cond = ASSERT_EXPR_COND (expr); gcc_assert (COMPARISON_CLASS_P (cond)); /* Find VAR in the ASSERT_EXPR conditional. */ if (var == TREE_OPERAND (cond, 0) || TREE_CODE (TREE_OPERAND (cond, 0)) == PLUS_EXPR || TREE_CODE (TREE_OPERAND (cond, 0)) == NOP_EXPR) { /* If the predicate is of the form VAR COMP LIMIT, then we just take LIMIT from the RHS and use the same comparison code. */ cond_code = TREE_CODE (cond); limit = TREE_OPERAND (cond, 1); cond = TREE_OPERAND (cond, 0); } else { /* If the predicate is of the form LIMIT COMP VAR, then we need to flip around the comparison code to create the proper range for VAR. */ cond_code = swap_tree_comparison (TREE_CODE (cond)); limit = TREE_OPERAND (cond, 0); cond = TREE_OPERAND (cond, 1); } limit = avoid_overflow_infinity (limit); type = TREE_TYPE (var); gcc_assert (limit != var); /* For pointer arithmetic, we only keep track of pointer equality and inequality. */ if (POINTER_TYPE_P (type) && cond_code != NE_EXPR && cond_code != EQ_EXPR) { set_value_range_to_varying (vr_p); return; } /* If LIMIT is another SSA name and LIMIT has a range of its own, try to use LIMIT's range to avoid creating symbolic ranges unnecessarily. */ limit_vr = (TREE_CODE (limit) == SSA_NAME) ? get_value_range (limit) : NULL; /* LIMIT's range is only interesting if it has any useful information. */ if (limit_vr && (limit_vr->type == VR_UNDEFINED || limit_vr->type == VR_VARYING || symbolic_range_p (limit_vr))) limit_vr = NULL; /* Initially, the new range has the same set of equivalences of VAR's range. This will be revised before returning the final value. Since assertions may be chained via mutually exclusive predicates, we will need to trim the set of equivalences before we are done. */ gcc_assert (vr_p->equiv == NULL); add_equivalence (&vr_p->equiv, var); /* Extract a new range based on the asserted comparison for VAR and LIMIT's value range. Notice that if LIMIT has an anti-range, we will only use it for equality comparisons (EQ_EXPR). For any other kind of assertion, we cannot derive a range from LIMIT's anti-range that can be used to describe the new range. For instance, ASSERT_EXPR <x_2, x_2 <= b_4>. If b_4 is ~[2, 10], then b_4 takes on the ranges [-INF, 1] and [11, +INF]. There is no single range for x_2 that could describe LE_EXPR, so we might as well build the range [b_4, +INF] for it. One special case we handle is extracting a range from a range test encoded as (unsigned)var + CST <= limit. */ if (TREE_CODE (cond) == NOP_EXPR || TREE_CODE (cond) == PLUS_EXPR) { if (TREE_CODE (cond) == PLUS_EXPR) { min = fold_build1 (NEGATE_EXPR, TREE_TYPE (TREE_OPERAND (cond, 1)), TREE_OPERAND (cond, 1)); max = int_const_binop (PLUS_EXPR, limit, min); cond = TREE_OPERAND (cond, 0); } else { min = build_int_cst (TREE_TYPE (var), 0); max = limit; } /* Make sure to not set TREE_OVERFLOW on the final type conversion. We are willingly interpreting large positive unsigned values as negative singed values here. */ min = force_fit_type (TREE_TYPE (var), wi::to_widest (min), 0, false); max = force_fit_type (TREE_TYPE (var), wi::to_widest (max), 0, false); /* We can transform a max, min range to an anti-range or vice-versa. Use set_and_canonicalize_value_range which does this for us. */ if (cond_code == LE_EXPR) set_and_canonicalize_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv); else if (cond_code == GT_EXPR) set_and_canonicalize_value_range (vr_p, VR_ANTI_RANGE, min, max, vr_p->equiv); else gcc_unreachable (); } else if (cond_code == EQ_EXPR) { enum value_range_type range_type; if (limit_vr) { range_type = limit_vr->type; min = limit_vr->min; max = limit_vr->max; } else { range_type = VR_RANGE; min = limit; max = limit; } set_value_range (vr_p, range_type, min, max, vr_p->equiv); /* When asserting the equality VAR == LIMIT and LIMIT is another SSA name, the new range will also inherit the equivalence set from LIMIT. */ if (TREE_CODE (limit) == SSA_NAME) add_equivalence (&vr_p->equiv, limit); } else if (cond_code == NE_EXPR) { /* As described above, when LIMIT's range is an anti-range and this assertion is an inequality (NE_EXPR), then we cannot derive anything from the anti-range. For instance, if LIMIT's range was ~[0, 0], the assertion 'VAR != LIMIT' does not imply that VAR's range is [0, 0]. So, in the case of anti-ranges, we just assert the inequality using LIMIT and not its anti-range. If LIMIT_VR is a range, we can only use it to build a new anti-range if LIMIT_VR is a single-valued range. For instance, if LIMIT_VR is [0, 1], the predicate VAR != [0, 1] does not mean that VAR's range is ~[0, 1]. Rather, it means that for value 0 VAR should be ~[0, 0] and for value 1, VAR should be ~[1, 1]. We cannot represent these ranges. The only situation in which we can build a valid anti-range is when LIMIT_VR is a single-valued range (i.e., LIMIT_VR->MIN == LIMIT_VR->MAX). In that case, build the anti-range ~[LIMIT_VR->MIN, LIMIT_VR->MAX]. */ if (limit_vr && limit_vr->type == VR_RANGE && compare_values (limit_vr->min, limit_vr->max) == 0) { min = limit_vr->min; max = limit_vr->max; } else { /* In any other case, we cannot use LIMIT's range to build a valid anti-range. */ min = max = limit; } /* If MIN and MAX cover the whole range for their type, then just use the original LIMIT. */ if (INTEGRAL_TYPE_P (type) && vrp_val_is_min (min) && vrp_val_is_max (max)) min = max = limit; set_and_canonicalize_value_range (vr_p, VR_ANTI_RANGE, min, max, vr_p->equiv); } else if (cond_code == LE_EXPR || cond_code == LT_EXPR) { min = TYPE_MIN_VALUE (type); if (limit_vr == NULL || limit_vr->type == VR_ANTI_RANGE) max = limit; else { /* If LIMIT_VR is of the form [N1, N2], we need to build the range [MIN, N2] for LE_EXPR and [MIN, N2 - 1] for LT_EXPR. */ max = limit_vr->max; } /* If the maximum value forces us to be out of bounds, simply punt. It would be pointless to try and do anything more since this all should be optimized away above us. */ if ((cond_code == LT_EXPR && compare_values (max, min) == 0) || is_overflow_infinity (max)) set_value_range_to_varying (vr_p); else { /* For LT_EXPR, we create the range [MIN, MAX - 1]. */ if (cond_code == LT_EXPR) { if (TYPE_PRECISION (TREE_TYPE (max)) == 1 && !TYPE_UNSIGNED (TREE_TYPE (max))) max = fold_build2 (PLUS_EXPR, TREE_TYPE (max), max, build_int_cst (TREE_TYPE (max), -1)); else max = fold_build2 (MINUS_EXPR, TREE_TYPE (max), max, build_int_cst (TREE_TYPE (max), 1)); if (EXPR_P (max)) TREE_NO_WARNING (max) = 1; } set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv); } } else if (cond_code == GE_EXPR || cond_code == GT_EXPR) { max = TYPE_MAX_VALUE (type); if (limit_vr == NULL || limit_vr->type == VR_ANTI_RANGE) min = limit; else { /* If LIMIT_VR is of the form [N1, N2], we need to build the range [N1, MAX] for GE_EXPR and [N1 + 1, MAX] for GT_EXPR. */ min = limit_vr->min; } /* If the minimum value forces us to be out of bounds, simply punt. It would be pointless to try and do anything more since this all should be optimized away above us. */ if ((cond_code == GT_EXPR && compare_values (min, max) == 0) || is_overflow_infinity (min)) set_value_range_to_varying (vr_p); else { /* For GT_EXPR, we create the range [MIN + 1, MAX]. */ if (cond_code == GT_EXPR) { if (TYPE_PRECISION (TREE_TYPE (min)) == 1 && !TYPE_UNSIGNED (TREE_TYPE (min))) min = fold_build2 (MINUS_EXPR, TREE_TYPE (min), min, build_int_cst (TREE_TYPE (min), -1)); else min = fold_build2 (PLUS_EXPR, TREE_TYPE (min), min, build_int_cst (TREE_TYPE (min), 1)); if (EXPR_P (min)) TREE_NO_WARNING (min) = 1; } set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv); } } else gcc_unreachable (); /* Finally intersect the new range with what we already know about var. */ vrp_intersect_ranges (vr_p, get_value_range (var)); } /* Extract range information from SSA name VAR and store it in VR. If VAR has an interesting range, use it. Otherwise, create the range [VAR, VAR] and return it. This is useful in situations where we may have conditionals testing values of VARYING names. For instance, x_3 = y_5; if (x_3 > y_5) ... Even if y_5 is deemed VARYING, we can determine that x_3 > y_5 is always false. */ static void extract_range_from_ssa_name (value_range_t *vr, tree var) { value_range_t *var_vr = get_value_range (var); if (var_vr->type != VR_VARYING) copy_value_range (vr, var_vr); else set_value_range (vr, VR_RANGE, var, var, NULL); add_equivalence (&vr->equiv, var); } /* Wrapper around int_const_binop. If the operation overflows and we are not using wrapping arithmetic, then adjust the result to be -INF or +INF depending on CODE, VAL1 and VAL2. This can return NULL_TREE if we need to use an overflow infinity representation but the type does not support it. */ static tree vrp_int_const_binop (enum tree_code code, tree val1, tree val2) { tree res; res = int_const_binop (code, val1, val2); /* If we are using unsigned arithmetic, operate symbolically on -INF and +INF as int_const_binop only handles signed overflow. */ if (TYPE_UNSIGNED (TREE_TYPE (val1))) { int checkz = compare_values (res, val1); bool overflow = false; /* Ensure that res = val1 [+*] val2 >= val1 or that res = val1 - val2 <= val1. */ if ((code == PLUS_EXPR && !(checkz == 1 || checkz == 0)) || (code == MINUS_EXPR && !(checkz == 0 || checkz == -1))) { overflow = true; } /* Checking for multiplication overflow is done by dividing the output of the multiplication by the first input of the multiplication. If the result of that division operation is not equal to the second input of the multiplication, then the multiplication overflowed. */ else if (code == MULT_EXPR && !integer_zerop (val1)) { tree tmp = int_const_binop (TRUNC_DIV_EXPR, res, val1); int check = compare_values (tmp, val2); if (check != 0) overflow = true; } if (overflow) { res = copy_node (res); TREE_OVERFLOW (res) = 1; } } else if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (val1))) /* If the singed operation wraps then int_const_binop has done everything we want. */ ; /* Signed division of -1/0 overflows and by the time it gets here returns NULL_TREE. */ else if (!res) return NULL_TREE; else if ((TREE_OVERFLOW (res) && !TREE_OVERFLOW (val1) && !TREE_OVERFLOW (val2)) || is_overflow_infinity (val1) || is_overflow_infinity (val2)) { /* If the operation overflowed but neither VAL1 nor VAL2 are overflown, return -INF or +INF depending on the operation and the combination of signs of the operands. */ int sgn1 = tree_int_cst_sgn (val1); int sgn2 = tree_int_cst_sgn (val2); if (needs_overflow_infinity (TREE_TYPE (res)) && !supports_overflow_infinity (TREE_TYPE (res))) return NULL_TREE; /* We have to punt on adding infinities of different signs, since we can't tell what the sign of the result should be. Likewise for subtracting infinities of the same sign. */ if (((code == PLUS_EXPR && sgn1 != sgn2) || (code == MINUS_EXPR && sgn1 == sgn2)) && is_overflow_infinity (val1) && is_overflow_infinity (val2)) return NULL_TREE; /* Don't try to handle division or shifting of infinities. */ if ((code == TRUNC_DIV_EXPR || code == FLOOR_DIV_EXPR || code == CEIL_DIV_EXPR || code == EXACT_DIV_EXPR || code == ROUND_DIV_EXPR || code == RSHIFT_EXPR) && (is_overflow_infinity (val1) || is_overflow_infinity (val2))) return NULL_TREE; /* Notice that we only need to handle the restricted set of operations handled by extract_range_from_binary_expr. Among them, only multiplication, addition and subtraction can yield overflow without overflown operands because we are working with integral types only... except in the case VAL1 = -INF and VAL2 = -1 which overflows to +INF for division too. */ /* For multiplication, the sign of the overflow is given by the comparison of the signs of the operands. */ if ((code == MULT_EXPR && sgn1 == sgn2) /* For addition, the operands must be of the same sign to yield an overflow. Its sign is therefore that of one of the operands, for example the first. For infinite operands X + -INF is negative, not positive. */ || (code == PLUS_EXPR && (sgn1 >= 0 ? !is_negative_overflow_infinity (val2) : is_positive_overflow_infinity (val2))) /* For subtraction, non-infinite operands must be of different signs to yield an overflow. Its sign is therefore that of the first operand or the opposite of that of the second operand. A first operand of 0 counts as positive here, for the corner case 0 - (-INF), which overflows, but must yield +INF. For infinite operands 0 - INF is negative, not positive. */ || (code == MINUS_EXPR && (sgn1 >= 0 ? !is_positive_overflow_infinity (val2) : is_negative_overflow_infinity (val2))) /* We only get in here with positive shift count, so the overflow direction is the same as the sign of val1. Actually rshift does not overflow at all, but we only handle the case of shifting overflowed -INF and +INF. */ || (code == RSHIFT_EXPR && sgn1 >= 0) /* For division, the only case is -INF / -1 = +INF. */ || code == TRUNC_DIV_EXPR || code == FLOOR_DIV_EXPR || code == CEIL_DIV_EXPR || code == EXACT_DIV_EXPR || code == ROUND_DIV_EXPR) return (needs_overflow_infinity (TREE_TYPE (res)) ? positive_overflow_infinity (TREE_TYPE (res)) : TYPE_MAX_VALUE (TREE_TYPE (res))); else return (needs_overflow_infinity (TREE_TYPE (res)) ? negative_overflow_infinity (TREE_TYPE (res)) : TYPE_MIN_VALUE (TREE_TYPE (res))); } return res; } /* For range VR compute two wide_int bitmasks. In *MAY_BE_NONZERO bitmask if some bit is unset, it means for all numbers in the range the bit is 0, otherwise it might be 0 or 1. In *MUST_BE_NONZERO bitmask if some bit is set, it means for all numbers in the range the bit is 1, otherwise it might be 0 or 1. */ static bool zero_nonzero_bits_from_vr (const tree expr_type, value_range_t *vr, wide_int *may_be_nonzero, wide_int *must_be_nonzero) { *may_be_nonzero = wi::minus_one (TYPE_PRECISION (expr_type)); *must_be_nonzero = wi::zero (TYPE_PRECISION (expr_type)); if (!range_int_cst_p (vr) || is_overflow_infinity (vr->min) || is_overflow_infinity (vr->max)) return false; if (range_int_cst_singleton_p (vr)) { *may_be_nonzero = vr->min; *must_be_nonzero = *may_be_nonzero; } else if (tree_int_cst_sgn (vr->min) >= 0 || tree_int_cst_sgn (vr->max) < 0) { wide_int xor_mask = wi::bit_xor (vr->min, vr->max); *may_be_nonzero = wi::bit_or (vr->min, vr->max); *must_be_nonzero = wi::bit_and (vr->min, vr->max); if (xor_mask != 0) { wide_int mask = wi::mask (wi::floor_log2 (xor_mask), false, may_be_nonzero->get_precision ()); *may_be_nonzero = *may_be_nonzero | mask; *must_be_nonzero = must_be_nonzero->and_not (mask); } } return true; } /* Create two value-ranges in *VR0 and *VR1 from the anti-range *AR so that *VR0 U *VR1 == *AR. Returns true if that is possible, false otherwise. If *AR can be represented with a single range *VR1 will be VR_UNDEFINED. */ static bool ranges_from_anti_range (value_range_t *ar, value_range_t *vr0, value_range_t *vr1) { tree type = TREE_TYPE (ar->min); vr0->type = VR_UNDEFINED; vr1->type = VR_UNDEFINED; if (ar->type != VR_ANTI_RANGE || TREE_CODE (ar->min) != INTEGER_CST || TREE_CODE (ar->max) != INTEGER_CST || !vrp_val_min (type) || !vrp_val_max (type)) return false; if (!vrp_val_is_min (ar->min)) { vr0->type = VR_RANGE; vr0->min = vrp_val_min (type); vr0->max = wide_int_to_tree (type, wi::sub (ar->min, 1)); } if (!vrp_val_is_max (ar->max)) { vr1->type = VR_RANGE; vr1->min = wide_int_to_tree (type, wi::add (ar->max, 1)); vr1->max = vrp_val_max (type); } if (vr0->type == VR_UNDEFINED) { *vr0 = *vr1; vr1->type = VR_UNDEFINED; } return vr0->type != VR_UNDEFINED; } /* Helper to extract a value-range *VR for a multiplicative operation *VR0 CODE *VR1. */ static void extract_range_from_multiplicative_op_1 (value_range_t *vr, enum tree_code code, value_range_t *vr0, value_range_t *vr1) { enum value_range_type type; tree val[4]; size_t i; tree min, max; bool sop; int cmp; /* Multiplications, divisions and shifts are a bit tricky to handle, depending on the mix of signs we have in the two ranges, we need to operate on different values to get the minimum and maximum values for the new range. One approach is to figure out all the variations of range combinations and do the operations. However, this involves several calls to compare_values and it is pretty convoluted. It's simpler to do the 4 operations (MIN0 OP MIN1, MIN0 OP MAX1, MAX0 OP MIN1 and MAX0 OP MAX0 OP MAX1) and then figure the smallest and largest values to form the new range. */ gcc_assert (code == MULT_EXPR || code == TRUNC_DIV_EXPR || code == FLOOR_DIV_EXPR || code == CEIL_DIV_EXPR || code == EXACT_DIV_EXPR || code == ROUND_DIV_EXPR || code == RSHIFT_EXPR || code == LSHIFT_EXPR); gcc_assert ((vr0->type == VR_RANGE || (code == MULT_EXPR && vr0->type == VR_ANTI_RANGE)) && vr0->type == vr1->type); type = vr0->type; /* Compute the 4 cross operations. */ sop = false; val[0] = vrp_int_const_binop (code, vr0->min, vr1->min); if (val[0] == NULL_TREE) sop = true; if (vr1->max == vr1->min) val[1] = NULL_TREE; else { val[1] = vrp_int_const_binop (code, vr0->min, vr1->max); if (val[1] == NULL_TREE) sop = true; } if (vr0->max == vr0->min) val[2] = NULL_TREE; else { val[2] = vrp_int_const_binop (code, vr0->max, vr1->min); if (val[2] == NULL_TREE) sop = true; } if (vr0->min == vr0->max || vr1->min == vr1->max) val[3] = NULL_TREE; else { val[3] = vrp_int_const_binop (code, vr0->max, vr1->max); if (val[3] == NULL_TREE) sop = true; } if (sop) { set_value_range_to_varying (vr); return; } /* Set MIN to the minimum of VAL[i] and MAX to the maximum of VAL[i]. */ min = val[0]; max = val[0]; for (i = 1; i < 4; i++) { if (!is_gimple_min_invariant (min) || (TREE_OVERFLOW (min) && !is_overflow_infinity (min)) || !is_gimple_min_invariant (max) || (TREE_OVERFLOW (max) && !is_overflow_infinity (max))) break; if (val[i]) { if (!is_gimple_min_invariant (val[i]) || (TREE_OVERFLOW (val[i]) && !is_overflow_infinity (val[i]))) { /* If we found an overflowed value, set MIN and MAX to it so that we set the resulting range to VARYING. */ min = max = val[i]; break; } if (compare_values (val[i], min) == -1) min = val[i]; if (compare_values (val[i], max) == 1) max = val[i]; } } /* If either MIN or MAX overflowed, then set the resulting range to VARYING. But we do accept an overflow infinity representation. */ if (min == NULL_TREE || !is_gimple_min_invariant (min) || (TREE_OVERFLOW (min) && !is_overflow_infinity (min)) || max == NULL_TREE || !is_gimple_min_invariant (max) || (TREE_OVERFLOW (max) && !is_overflow_infinity (max))) { set_value_range_to_varying (vr); return; } /* We punt if: 1) [-INF, +INF] 2) [-INF, +-INF(OVF)] 3) [+-INF(OVF), +INF] 4) [+-INF(OVF), +-INF(OVF)] We learn nothing when we have INF and INF(OVF) on both sides. Note that we do accept [-INF, -INF] and [+INF, +INF] without overflow. */ if ((vrp_val_is_min (min) || is_overflow_infinity (min)) && (vrp_val_is_max (max) || is_overflow_infinity (max))) { set_value_range_to_varying (vr); return; } cmp = compare_values (min, max); if (cmp == -2 || cmp == 1) { /* If the new range has its limits swapped around (MIN > MAX), then the operation caused one of them to wrap around, mark the new range VARYING. */ set_value_range_to_varying (vr); } else set_value_range (vr, type, min, max, NULL); } /* Extract range information from a binary operation CODE based on the ranges of each of its operands, *VR0 and *VR1 with resulting type EXPR_TYPE. The resulting range is stored in *VR. */ static void extract_range_from_binary_expr_1 (value_range_t *vr, enum tree_code code, tree expr_type, value_range_t *vr0_, value_range_t *vr1_) { value_range_t vr0 = *vr0_, vr1 = *vr1_; value_range_t vrtem0 = VR_INITIALIZER, vrtem1 = VR_INITIALIZER; enum value_range_type type; tree min = NULL_TREE, max = NULL_TREE; int cmp; if (!INTEGRAL_TYPE_P (expr_type) && !POINTER_TYPE_P (expr_type)) { set_value_range_to_varying (vr); return; } /* Not all binary expressions can be applied to ranges in a meaningful way. Handle only arithmetic operations. */ if (code != PLUS_EXPR && code != MINUS_EXPR && code != POINTER_PLUS_EXPR && code != MULT_EXPR && code != TRUNC_DIV_EXPR && code != FLOOR_DIV_EXPR && code != CEIL_DIV_EXPR && code != EXACT_DIV_EXPR && code != ROUND_DIV_EXPR && code != TRUNC_MOD_EXPR && code != RSHIFT_EXPR && code != LSHIFT_EXPR && code != MIN_EXPR && code != MAX_EXPR && code != BIT_AND_EXPR && code != BIT_IOR_EXPR && code != BIT_XOR_EXPR) { set_value_range_to_varying (vr); return; } /* If both ranges are UNDEFINED, so is the result. */ if (vr0.type == VR_UNDEFINED && vr1.type == VR_UNDEFINED) { set_value_range_to_undefined (vr); return; } /* If one of the ranges is UNDEFINED drop it to VARYING for the following code. At some point we may want to special-case operations that have UNDEFINED result for all or some value-ranges of the not UNDEFINED operand. */ else if (vr0.type == VR_UNDEFINED) set_value_range_to_varying (&vr0); else if (vr1.type == VR_UNDEFINED) set_value_range_to_varying (&vr1); /* Now canonicalize anti-ranges to ranges when they are not symbolic and express ~[] op X as ([]' op X) U ([]'' op X). */ if (vr0.type == VR_ANTI_RANGE && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1)) { extract_range_from_binary_expr_1 (vr, code, expr_type, &vrtem0, vr1_); if (vrtem1.type != VR_UNDEFINED) { value_range_t vrres = VR_INITIALIZER; extract_range_from_binary_expr_1 (&vrres, code, expr_type, &vrtem1, vr1_); vrp_meet (vr, &vrres); } return; } /* Likewise for X op ~[]. */ if (vr1.type == VR_ANTI_RANGE && ranges_from_anti_range (&vr1, &vrtem0, &vrtem1)) { extract_range_from_binary_expr_1 (vr, code, expr_type, vr0_, &vrtem0); if (vrtem1.type != VR_UNDEFINED) { value_range_t vrres = VR_INITIALIZER; extract_range_from_binary_expr_1 (&vrres, code, expr_type, vr0_, &vrtem1); vrp_meet (vr, &vrres); } return; } /* The type of the resulting value range defaults to VR0.TYPE. */ type = vr0.type; /* Refuse to operate on VARYING ranges, ranges of different kinds and symbolic ranges. As an exception, we allow BIT_AND_EXPR because we may be able to derive a useful range even if one of the operands is VR_VARYING or symbolic range. Similarly for divisions. TODO, we may be able to derive anti-ranges in some cases. */ if (code != BIT_AND_EXPR && code != BIT_IOR_EXPR && code != TRUNC_DIV_EXPR && code != FLOOR_DIV_EXPR && code != CEIL_DIV_EXPR && code != EXACT_DIV_EXPR && code != ROUND_DIV_EXPR && code != TRUNC_MOD_EXPR && code != MIN_EXPR && code != MAX_EXPR && (vr0.type == VR_VARYING || vr1.type == VR_VARYING || vr0.type != vr1.type || symbolic_range_p (&vr0) || symbolic_range_p (&vr1))) { set_value_range_to_varying (vr); return; } /* Now evaluate the expression to determine the new range. */ if (POINTER_TYPE_P (expr_type)) { if (code == MIN_EXPR || code == MAX_EXPR) { /* For MIN/MAX expressions with pointers, we only care about nullness, if both are non null, then the result is nonnull. If both are null, then the result is null. Otherwise they are varying. */ if (range_is_nonnull (&vr0) && range_is_nonnull (&vr1)) set_value_range_to_nonnull (vr, expr_type); else if (range_is_null (&vr0) && range_is_null (&vr1)) set_value_range_to_null (vr, expr_type); else set_value_range_to_varying (vr); } else if (code == POINTER_PLUS_EXPR) { /* For pointer types, we are really only interested in asserting whether the expression evaluates to non-NULL. */ if (range_is_nonnull (&vr0) || range_is_nonnull (&vr1)) set_value_range_to_nonnull (vr, expr_type); else if (range_is_null (&vr0) && range_is_null (&vr1)) set_value_range_to_null (vr, expr_type); else set_value_range_to_varying (vr); } else if (code == BIT_AND_EXPR) { /* For pointer types, we are really only interested in asserting whether the expression evaluates to non-NULL. */ if (range_is_nonnull (&vr0) && range_is_nonnull (&vr1)) set_value_range_to_nonnull (vr, expr_type); else if (range_is_null (&vr0) || range_is_null (&vr1)) set_value_range_to_null (vr, expr_type); else set_value_range_to_varying (vr); } else set_value_range_to_varying (vr); return; } /* For integer ranges, apply the operation to each end of the range and see what we end up with. */ if (code == PLUS_EXPR || code == MINUS_EXPR) { /* If we have a PLUS_EXPR with two VR_RANGE integer constant ranges compute the precise range for such case if possible. */ if (range_int_cst_p (&vr0) && range_int_cst_p (&vr1)) { signop sgn = TYPE_SIGN (expr_type); unsigned int prec = TYPE_PRECISION (expr_type); wide_int type_min = wi::min_value (TYPE_PRECISION (expr_type), sgn); wide_int type_max = wi::max_value (TYPE_PRECISION (expr_type), sgn); wide_int wmin, wmax; int min_ovf = 0; int max_ovf = 0; if (code == PLUS_EXPR) { wmin = wi::add (vr0.min, vr1.min); wmax = wi::add (vr0.max, vr1.max); /* Check for overflow. */ if (wi::cmp (vr1.min, 0, sgn) != wi::cmp (wmin, vr0.min, sgn)) min_ovf = wi::cmp (vr0.min, wmin, sgn); if (wi::cmp (vr1.max, 0, sgn) != wi::cmp (wmax, vr0.max, sgn)) max_ovf = wi::cmp (vr0.max, wmax, sgn); } else /* if (code == MINUS_EXPR) */ { wmin = wi::sub (vr0.min, vr1.max); wmax = wi::sub (vr0.max, vr1.min); if (wi::cmp (0, vr1.max, sgn) != wi::cmp (wmin, vr0.min, sgn)) min_ovf = wi::cmp (vr0.min, vr1.max, sgn); if (wi::cmp (0, vr1.min, sgn) != wi::cmp (wmax, vr0.max, sgn)) max_ovf = wi::cmp (vr0.max, vr1.min, sgn); } /* For non-wrapping arithmetic look at possibly smaller value-ranges of the type. */ if (!TYPE_OVERFLOW_WRAPS (expr_type)) { if (vrp_val_min (expr_type)) type_min = vrp_val_min (expr_type); if (vrp_val_max (expr_type)) type_max = vrp_val_max (expr_type); } /* Check for type overflow. */ if (min_ovf == 0) { if (wi::cmp (wmin, type_min, sgn) == -1) min_ovf = -1; else if (wi::cmp (wmin, type_max, sgn) == 1) min_ovf = 1; } if (max_ovf == 0) { if (wi::cmp (wmax, type_min, sgn) == -1) max_ovf = -1; else if (wi::cmp (wmax, type_max, sgn) == 1) max_ovf = 1; } if (TYPE_OVERFLOW_WRAPS (expr_type)) { /* If overflow wraps, truncate the values and adjust the range kind and bounds appropriately. */ wide_int tmin = wide_int::from (wmin, prec, sgn); wide_int tmax = wide_int::from (wmax, prec, sgn); if (min_ovf == max_ovf) { /* No overflow or both overflow or underflow. The range kind stays VR_RANGE. */ min = wide_int_to_tree (expr_type, tmin); max = wide_int_to_tree (expr_type, tmax); } else if (min_ovf == -1 && max_ovf == 1) { /* Underflow and overflow, drop to VR_VARYING. */ set_value_range_to_varying (vr); return; } else { /* Min underflow or max overflow. The range kind changes to VR_ANTI_RANGE. */ bool covers = false; wide_int tem = tmin; gcc_assert ((min_ovf == -1 && max_ovf == 0) || (max_ovf == 1 && min_ovf == 0)); type = VR_ANTI_RANGE; tmin = tmax + 1; if (wi::cmp (tmin, tmax, sgn) < 0) covers = true; tmax = tem - 1; if (wi::cmp (tmax, tem, sgn) > 0) covers = true; /* If the anti-range would cover nothing, drop to varying. Likewise if the anti-range bounds are outside of the types values. */ if (covers || wi::cmp (tmin, tmax, sgn) > 0) { set_value_range_to_varying (vr); return; } min = wide_int_to_tree (expr_type, tmin); max = wide_int_to_tree (expr_type, tmax); } } else { /* If overflow does not wrap, saturate to the types min/max value. */ if (min_ovf == -1) { if (needs_overflow_infinity (expr_type) && supports_overflow_infinity (expr_type)) min = negative_overflow_infinity (expr_type); else min = wide_int_to_tree (expr_type, type_min); } else if (min_ovf == 1) { if (needs_overflow_infinity (expr_type) && supports_overflow_infinity (expr_type)) min = positive_overflow_infinity (expr_type); else min = wide_int_to_tree (expr_type, type_max); } else min = wide_int_to_tree (expr_type, wmin); if (max_ovf == -1) { if (needs_overflow_infinity (expr_type) && supports_overflow_infinity (expr_type)) max = negative_overflow_infinity (expr_type); else max = wide_int_to_tree (expr_type, type_min); } else if (max_ovf == 1) { if (needs_overflow_infinity (expr_type) && supports_overflow_infinity (expr_type)) max = positive_overflow_infinity (expr_type); else max = wide_int_to_tree (expr_type, type_max); } else max = wide_int_to_tree (expr_type, wmax); } if (needs_overflow_infinity (expr_type) && supports_overflow_infinity (expr_type)) { if (is_negative_overflow_infinity (vr0.min) || (code == PLUS_EXPR ? is_negative_overflow_infinity (vr1.min) : is_positive_overflow_infinity (vr1.max))) min = negative_overflow_infinity (expr_type); if (is_positive_overflow_infinity (vr0.max) || (code == PLUS_EXPR ? is_positive_overflow_infinity (vr1.max) : is_negative_overflow_infinity (vr1.min))) max = positive_overflow_infinity (expr_type); } } else { /* For other cases, for example if we have a PLUS_EXPR with two VR_ANTI_RANGEs, drop to VR_VARYING. It would take more effort to compute a precise range for such a case. ??? General even mixed range kind operations can be expressed by for example transforming ~[3, 5] + [1, 2] to range-only operations and a union primitive: [-INF, 2] + [1, 2] U [5, +INF] + [1, 2] [-INF+1, 4] U [6, +INF(OVF)] though usually the union is not exactly representable with a single range or anti-range as the above is [-INF+1, +INF(OVF)] intersected with ~[5, 5] but one could use a scheme similar to equivalences for this. */ set_value_range_to_varying (vr); return; } } else if (code == MIN_EXPR || code == MAX_EXPR) { if (vr0.type == VR_RANGE && !symbolic_range_p (&vr0)) { type = VR_RANGE; if (vr1.type == VR_RANGE && !symbolic_range_p (&vr1)) { /* For operations that make the resulting range directly proportional to the original ranges, apply the operation to the same end of each range. */ min = vrp_int_const_binop (code, vr0.min, vr1.min); max = vrp_int_const_binop (code, vr0.max, vr1.max); } else if (code == MIN_EXPR) { min = vrp_val_min (expr_type); max = vr0.max; } else if (code == MAX_EXPR) { min = vr0.min; max = vrp_val_max (expr_type); } } else if (vr1.type == VR_RANGE && !symbolic_range_p (&vr1)) { type = VR_RANGE; if (code == MIN_EXPR) { min = vrp_val_min (expr_type); max = vr1.max; } else if (code == MAX_EXPR) { min = vr1.min; max = vrp_val_max (expr_type); } } else { set_value_range_to_varying (vr); return; } } else if (code == MULT_EXPR) { /* Fancy code so that with unsigned, [-3,-1]*[-3,-1] does not drop to varying. This test requires 2*prec bits if both operands are signed and 2*prec + 2 bits if either is not. */ signop sign = TYPE_SIGN (expr_type); unsigned int prec = TYPE_PRECISION (expr_type); if (range_int_cst_p (&vr0) && range_int_cst_p (&vr1) && TYPE_OVERFLOW_WRAPS (expr_type)) { typedef FIXED_WIDE_INT (WIDE_INT_MAX_PRECISION * 2) vrp_int; typedef generic_wide_int <wi::extended_tree <WIDE_INT_MAX_PRECISION * 2> > vrp_int_cst; vrp_int sizem1 = wi::mask <vrp_int> (prec, false); vrp_int size = sizem1 + 1; /* Extend the values using the sign of the result to PREC2. From here on out, everthing is just signed math no matter what the input types were. */ vrp_int min0 = vrp_int_cst (vr0.min); vrp_int max0 = vrp_int_cst (vr0.max); vrp_int min1 = vrp_int_cst (vr1.min); vrp_int max1 = vrp_int_cst (vr1.max); /* Canonicalize the intervals. */ if (sign == UNSIGNED) { if (wi::ltu_p (size, min0 + max0)) { min0 -= size; max0 -= size; } if (wi::ltu_p (size, min1 + max1)) { min1 -= size; max1 -= size; } } vrp_int prod0 = min0 * min1; vrp_int prod1 = min0 * max1; vrp_int prod2 = max0 * min1; vrp_int prod3 = max0 * max1; /* Sort the 4 products so that min is in prod0 and max is in prod3. */ /* min0min1 > max0max1 */ if (wi::gts_p (prod0, prod3)) { vrp_int tmp = prod3; prod3 = prod0; prod0 = tmp; } /* min0max1 > max0min1 */ if (wi::gts_p (prod1, prod2)) { vrp_int tmp = prod2; prod2 = prod1; prod1 = tmp; } if (wi::gts_p (prod0, prod1)) { vrp_int tmp = prod1; prod1 = prod0; prod0 = tmp; } if (wi::gts_p (prod2, prod3)) { vrp_int tmp = prod3; prod3 = prod2; prod2 = tmp; } /* diff = max - min. */ prod2 = prod3 - prod0; if (wi::geu_p (prod2, sizem1)) { /* the range covers all values. */ set_value_range_to_varying (vr); return; } /* The following should handle the wrapping and selecting VR_ANTI_RANGE for us. */ min = wide_int_to_tree (expr_type, prod0); max = wide_int_to_tree (expr_type, prod3); set_and_canonicalize_value_range (vr, VR_RANGE, min, max, NULL); return; } /* If we have an unsigned MULT_EXPR with two VR_ANTI_RANGEs, drop to VR_VARYING. It would take more effort to compute a precise range for such a case. For example, if we have op0 == 65536 and op1 == 65536 with their ranges both being ~[0,0] on a 32-bit machine, we would have op0 * op1 == 0, so we cannot claim that the product is in ~[0,0]. Note that we are guaranteed to have vr0.type == vr1.type at this point. */ if (vr0.type == VR_ANTI_RANGE && !TYPE_OVERFLOW_UNDEFINED (expr_type)) { set_value_range_to_varying (vr); return; } extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1); return; } else if (code == RSHIFT_EXPR || code == LSHIFT_EXPR) { /* If we have a RSHIFT_EXPR with any shift values outside [0..prec-1], then drop to VR_VARYING. Outside of this range we get undefined behavior from the shift operation. We cannot even trust SHIFT_COUNT_TRUNCATED at this stage, because that applies to rtl shifts, and the operation at the tree level may be widened. */ if (range_int_cst_p (&vr1) && compare_tree_int (vr1.min, 0) >= 0 && compare_tree_int (vr1.max, TYPE_PRECISION (expr_type)) == -1) { if (code == RSHIFT_EXPR) { extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1); return; } /* We can map lshifts by constants to MULT_EXPR handling. */ else if (code == LSHIFT_EXPR && range_int_cst_singleton_p (&vr1)) { bool saved_flag_wrapv; value_range_t vr1p = VR_INITIALIZER; vr1p.type = VR_RANGE; vr1p.min = (wide_int_to_tree (expr_type, wi::set_bit_in_zero (tree_to_shwi (vr1.min), TYPE_PRECISION (expr_type)))); vr1p.max = vr1p.min; /* We have to use a wrapping multiply though as signed overflow on lshifts is implementation defined in C89. */ saved_flag_wrapv = flag_wrapv; flag_wrapv = 1; extract_range_from_binary_expr_1 (vr, MULT_EXPR, expr_type, &vr0, &vr1p); flag_wrapv = saved_flag_wrapv; return; } else if (code == LSHIFT_EXPR && range_int_cst_p (&vr0)) { int prec = TYPE_PRECISION (expr_type); int overflow_pos = prec; int bound_shift; wide_int low_bound, high_bound; bool uns = TYPE_UNSIGNED (expr_type); bool in_bounds = false; if (!uns) overflow_pos -= 1; bound_shift = overflow_pos - tree_to_shwi (vr1.max); /* If bound_shift == HOST_BITS_PER_WIDE_INT, the llshift can overflow. However, for that to happen, vr1.max needs to be zero, which means vr1 is a singleton range of zero, which means it should be handled by the previous LSHIFT_EXPR if-clause. */ wide_int bound = wi::set_bit_in_zero (bound_shift, prec); wide_int complement = ~(bound - 1); if (uns) { low_bound = bound; high_bound = complement; if (wi::ltu_p (vr0.max, low_bound)) { /* [5, 6] << [1, 2] == [10, 24]. */ /* We're shifting out only zeroes, the value increases monotonically. */ in_bounds = true; } else if (wi::ltu_p (high_bound, vr0.min)) { /* [0xffffff00, 0xffffffff] << [1, 2] == [0xfffffc00, 0xfffffffe]. */ /* We're shifting out only ones, the value decreases monotonically. */ in_bounds = true; } } else { /* [-1, 1] << [1, 2] == [-4, 4]. */ low_bound = complement; high_bound = bound; if (wi::lts_p (vr0.max, high_bound) && wi::lts_p (low_bound, vr0.min)) { /* For non-negative numbers, we're shifting out only zeroes, the value increases monotonically. For negative numbers, we're shifting out only ones, the value decreases monotomically. */ in_bounds = true; } } if (in_bounds) { extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1); return; } } } set_value_range_to_varying (vr); return; } else if (code == TRUNC_DIV_EXPR || code == FLOOR_DIV_EXPR || code == CEIL_DIV_EXPR || code == EXACT_DIV_EXPR || code == ROUND_DIV_EXPR) { if (vr0.type != VR_RANGE || symbolic_range_p (&vr0)) { /* For division, if op1 has VR_RANGE but op0 does not, something can be deduced just from that range. Say [min, max] / [4, max] gives [min / 4, max / 4] range. */ if (vr1.type == VR_RANGE && !symbolic_range_p (&vr1) && range_includes_zero_p (vr1.min, vr1.max) == 0) { vr0.type = type = VR_RANGE; vr0.min = vrp_val_min (expr_type); vr0.max = vrp_val_max (expr_type); } else { set_value_range_to_varying (vr); return; } } /* For divisions, if flag_non_call_exceptions is true, we must not eliminate a division by zero. */ if (cfun->can_throw_non_call_exceptions && (vr1.type != VR_RANGE || range_includes_zero_p (vr1.min, vr1.max) != 0)) { set_value_range_to_varying (vr); return; } /* For divisions, if op0 is VR_RANGE, we can deduce a range even if op1 is VR_VARYING, VR_ANTI_RANGE, symbolic or can include 0. */ if (vr0.type == VR_RANGE && (vr1.type != VR_RANGE || range_includes_zero_p (vr1.min, vr1.max) != 0)) { tree zero = build_int_cst (TREE_TYPE (vr0.min), 0); int cmp; min = NULL_TREE; max = NULL_TREE; if (TYPE_UNSIGNED (expr_type) || value_range_nonnegative_p (&vr1)) { /* For unsigned division or when divisor is known to be non-negative, the range has to cover all numbers from 0 to max for positive max and all numbers from min to 0 for negative min. */ cmp = compare_values (vr0.max, zero); if (cmp == -1) max = zero; else if (cmp == 0 || cmp == 1) max = vr0.max; else type = VR_VARYING; cmp = compare_values (vr0.min, zero); if (cmp == 1) min = zero; else if (cmp == 0 || cmp == -1) min = vr0.min; else type = VR_VARYING; } else { /* Otherwise the range is -max .. max or min .. -min depending on which bound is bigger in absolute value, as the division can change the sign. */ abs_extent_range (vr, vr0.min, vr0.max); return; } if (type == VR_VARYING) { set_value_range_to_varying (vr); return; } } else { extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1); return; } } else if (code == TRUNC_MOD_EXPR) { if (vr1.type != VR_RANGE || range_includes_zero_p (vr1.min, vr1.max) != 0 || vrp_val_is_min (vr1.min)) { set_value_range_to_varying (vr); return; } type = VR_RANGE; /* Compute MAX <|vr1.min|, |vr1.max|> - 1. */ max = fold_unary_to_constant (ABS_EXPR, expr_type, vr1.min); if (tree_int_cst_lt (max, vr1.max)) max = vr1.max; max = int_const_binop (MINUS_EXPR, max, build_int_cst (TREE_TYPE (max), 1)); /* If the dividend is non-negative the modulus will be non-negative as well. */ if (TYPE_UNSIGNED (expr_type) || value_range_nonnegative_p (&vr0)) min = build_int_cst (TREE_TYPE (max), 0); else min = fold_unary_to_constant (NEGATE_EXPR, expr_type, max); } else if (code == BIT_AND_EXPR || code == BIT_IOR_EXPR || code == BIT_XOR_EXPR) { bool int_cst_range0, int_cst_range1; wide_int may_be_nonzero0, may_be_nonzero1; wide_int must_be_nonzero0, must_be_nonzero1; int_cst_range0 = zero_nonzero_bits_from_vr (expr_type, &vr0, &may_be_nonzero0, &must_be_nonzero0); int_cst_range1 = zero_nonzero_bits_from_vr (expr_type, &vr1, &may_be_nonzero1, &must_be_nonzero1); type = VR_RANGE; if (code == BIT_AND_EXPR) { min = wide_int_to_tree (expr_type, must_be_nonzero0 & must_be_nonzero1); wide_int wmax = may_be_nonzero0 & may_be_nonzero1; /* If both input ranges contain only negative values we can truncate the result range maximum to the minimum of the input range maxima. */ if (int_cst_range0 && int_cst_range1 && tree_int_cst_sgn (vr0.max) < 0 && tree_int_cst_sgn (vr1.max) < 0) { wmax = wi::min (wmax, vr0.max, TYPE_SIGN (expr_type)); wmax = wi::min (wmax, vr1.max, TYPE_SIGN (expr_type)); } /* If either input range contains only non-negative values we can truncate the result range maximum to the respective maximum of the input range. */ if (int_cst_range0 && tree_int_cst_sgn (vr0.min) >= 0) wmax = wi::min (wmax, vr0.max, TYPE_SIGN (expr_type)); if (int_cst_range1 && tree_int_cst_sgn (vr1.min) >= 0) wmax = wi::min (wmax, vr1.max, TYPE_SIGN (expr_type)); max = wide_int_to_tree (expr_type, wmax); } else if (code == BIT_IOR_EXPR) { max = wide_int_to_tree (expr_type, may_be_nonzero0 | may_be_nonzero1); wide_int wmin = must_be_nonzero0 | must_be_nonzero1; /* If the input ranges contain only positive values we can truncate the minimum of the result range to the maximum of the input range minima. */ if (int_cst_range0 && int_cst_range1 && tree_int_cst_sgn (vr0.min) >= 0 && tree_int_cst_sgn (vr1.min) >= 0) { wmin = wi::max (wmin, vr0.min, TYPE_SIGN (expr_type)); wmin = wi::max (wmin, vr1.min, TYPE_SIGN (expr_type)); } /* If either input range contains only negative values we can truncate the minimum of the result range to the respective minimum range. */ if (int_cst_range0 && tree_int_cst_sgn (vr0.max) < 0) wmin = wi::max (wmin, vr0.min, TYPE_SIGN (expr_type)); if (int_cst_range1 && tree_int_cst_sgn (vr1.max) < 0) wmin = wi::max (wmin, vr1.min, TYPE_SIGN (expr_type)); min = wide_int_to_tree (expr_type, wmin); } else if (code == BIT_XOR_EXPR) { wide_int result_zero_bits = ((must_be_nonzero0 & must_be_nonzero1) | ~(may_be_nonzero0 | may_be_nonzero1)); wide_int result_one_bits = (must_be_nonzero0.and_not (may_be_nonzero1) | must_be_nonzero1.and_not (may_be_nonzero0)); max = wide_int_to_tree (expr_type, ~result_zero_bits); min = wide_int_to_tree (expr_type, result_one_bits); /* If the range has all positive or all negative values the result is better than VARYING. */ if (tree_int_cst_sgn (min) < 0 || tree_int_cst_sgn (max) >= 0) ; else max = min = NULL_TREE; } } else gcc_unreachable (); /* If either MIN or MAX overflowed, then set the resulting range to VARYING. But we do accept an overflow infinity representation. */ if (min == NULL_TREE || !is_gimple_min_invariant (min) || (TREE_OVERFLOW (min) && !is_overflow_infinity (min)) || max == NULL_TREE || !is_gimple_min_invariant (max) || (TREE_OVERFLOW (max) && !is_overflow_infinity (max))) { set_value_range_to_varying (vr); return; } /* We punt if: 1) [-INF, +INF] 2) [-INF, +-INF(OVF)] 3) [+-INF(OVF), +INF] 4) [+-INF(OVF), +-INF(OVF)] We learn nothing when we have INF and INF(OVF) on both sides. Note that we do accept [-INF, -INF] and [+INF, +INF] without overflow. */ if ((vrp_val_is_min (min) || is_overflow_infinity (min)) && (vrp_val_is_max (max) || is_overflow_infinity (max))) { set_value_range_to_varying (vr); return; } cmp = compare_values (min, max); if (cmp == -2 || cmp == 1) { /* If the new range has its limits swapped around (MIN > MAX), then the operation caused one of them to wrap around, mark the new range VARYING. */ set_value_range_to_varying (vr); } else set_value_range (vr, type, min, max, NULL); } /* Extract range information from a binary expression OP0 CODE OP1 based on the ranges of each of its operands with resulting type EXPR_TYPE. The resulting range is stored in *VR. */ static void extract_range_from_binary_expr (value_range_t *vr, enum tree_code code, tree expr_type, tree op0, tree op1) { value_range_t vr0 = VR_INITIALIZER; value_range_t vr1 = VR_INITIALIZER; /* Get value ranges for each operand. For constant operands, create a new value range with the operand to simplify processing. */ if (TREE_CODE (op0) == SSA_NAME) vr0 = *(get_value_range (op0)); else if (is_gimple_min_invariant (op0)) set_value_range_to_value (&vr0, op0, NULL); else set_value_range_to_varying (&vr0); if (TREE_CODE (op1) == SSA_NAME) vr1 = *(get_value_range (op1)); else if (is_gimple_min_invariant (op1)) set_value_range_to_value (&vr1, op1, NULL); else set_value_range_to_varying (&vr1); extract_range_from_binary_expr_1 (vr, code, expr_type, &vr0, &vr1); } /* Extract range information from a unary operation CODE based on the range of its operand *VR0 with type OP0_TYPE with resulting type TYPE. The The resulting range is stored in *VR. */ static void extract_range_from_unary_expr_1 (value_range_t *vr, enum tree_code code, tree type, value_range_t *vr0_, tree op0_type) { value_range_t vr0 = *vr0_, vrtem0 = VR_INITIALIZER, vrtem1 = VR_INITIALIZER; /* VRP only operates on integral and pointer types. */ if (!(INTEGRAL_TYPE_P (op0_type) || POINTER_TYPE_P (op0_type)) || !(INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))) { set_value_range_to_varying (vr); return; } /* If VR0 is UNDEFINED, so is the result. */ if (vr0.type == VR_UNDEFINED) { set_value_range_to_undefined (vr); return; } /* Handle operations that we express in terms of others. */ if (code == PAREN_EXPR || code == OBJ_TYPE_REF) { /* PAREN_EXPR and OBJ_TYPE_REF are simple copies. */ copy_value_range (vr, &vr0); return; } else if (code == NEGATE_EXPR) { /* -X is simply 0 - X, so re-use existing code that also handles anti-ranges fine. */ value_range_t zero = VR_INITIALIZER; set_value_range_to_value (&zero, build_int_cst (type, 0), NULL); extract_range_from_binary_expr_1 (vr, MINUS_EXPR, type, &zero, &vr0); return; } else if (code == BIT_NOT_EXPR) { /* ~X is simply -1 - X, so re-use existing code that also handles anti-ranges fine. */ value_range_t minusone = VR_INITIALIZER; set_value_range_to_value (&minusone, build_int_cst (type, -1), NULL); extract_range_from_binary_expr_1 (vr, MINUS_EXPR, type, &minusone, &vr0); return; } /* Now canonicalize anti-ranges to ranges when they are not symbolic and express op ~[] as (op []') U (op []''). */ if (vr0.type == VR_ANTI_RANGE && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1)) { extract_range_from_unary_expr_1 (vr, code, type, &vrtem0, op0_type); if (vrtem1.type != VR_UNDEFINED) { value_range_t vrres = VR_INITIALIZER; extract_range_from_unary_expr_1 (&vrres, code, type, &vrtem1, op0_type); vrp_meet (vr, &vrres); } return; } if (CONVERT_EXPR_CODE_P (code)) { tree inner_type = op0_type; tree outer_type = type; /* If the expression evaluates to a pointer, we are only interested in determining if it evaluates to NULL [0, 0] or non-NULL (~[0, 0]). */ if (POINTER_TYPE_P (type)) { if (range_is_nonnull (&vr0)) set_value_range_to_nonnull (vr, type); else if (range_is_null (&vr0)) set_value_range_to_null (vr, type); else set_value_range_to_varying (vr); return; } /* If VR0 is varying and we increase the type precision, assume a full range for the following transformation. */ if (vr0.type == VR_VARYING && INTEGRAL_TYPE_P (inner_type) && TYPE_PRECISION (inner_type) < TYPE_PRECISION (outer_type)) { vr0.type = VR_RANGE; vr0.min = TYPE_MIN_VALUE (inner_type); vr0.max = TYPE_MAX_VALUE (inner_type); } /* If VR0 is a constant range or anti-range and the conversion is not truncating we can convert the min and max values and canonicalize the resulting range. Otherwise we can do the conversion if the size of the range is less than what the precision of the target type can represent and the range is not an anti-range. */ if ((vr0.type == VR_RANGE || vr0.type == VR_ANTI_RANGE) && TREE_CODE (vr0.min) == INTEGER_CST && TREE_CODE (vr0.max) == INTEGER_CST && (!is_overflow_infinity (vr0.min) || (vr0.type == VR_RANGE && TYPE_PRECISION (outer_type) > TYPE_PRECISION (inner_type) && needs_overflow_infinity (outer_type) && supports_overflow_infinity (outer_type))) && (!is_overflow_infinity (vr0.max) || (vr0.type == VR_RANGE && TYPE_PRECISION (outer_type) > TYPE_PRECISION (inner_type) && needs_overflow_infinity (outer_type) && supports_overflow_infinity (outer_type))) && (TYPE_PRECISION (outer_type) >= TYPE_PRECISION (inner_type) || (vr0.type == VR_RANGE && integer_zerop (int_const_binop (RSHIFT_EXPR, int_const_binop (MINUS_EXPR, vr0.max, vr0.min), size_int (TYPE_PRECISION (outer_type))))))) { tree new_min, new_max; if (is_overflow_infinity (vr0.min)) new_min = negative_overflow_infinity (outer_type); else new_min = force_fit_type (outer_type, wi::to_widest (vr0.min), 0, false); if (is_overflow_infinity (vr0.max)) new_max = positive_overflow_infinity (outer_type); else new_max = force_fit_type (outer_type, wi::to_widest (vr0.max), 0, false); set_and_canonicalize_value_range (vr, vr0.type, new_min, new_max, NULL); return; } set_value_range_to_varying (vr); return; } else if (code == ABS_EXPR) { tree min, max; int cmp; /* Pass through vr0 in the easy cases. */ if (TYPE_UNSIGNED (type) || value_range_nonnegative_p (&vr0)) { copy_value_range (vr, &vr0); return; } /* For the remaining varying or symbolic ranges we can't do anything useful. */ if (vr0.type == VR_VARYING || symbolic_range_p (&vr0)) { set_value_range_to_varying (vr); return; } /* -TYPE_MIN_VALUE = TYPE_MIN_VALUE with flag_wrapv so we can't get a useful range. */ if (!TYPE_OVERFLOW_UNDEFINED (type) && ((vr0.type == VR_RANGE && vrp_val_is_min (vr0.min)) || (vr0.type == VR_ANTI_RANGE && !vrp_val_is_min (vr0.min)))) { set_value_range_to_varying (vr); return; } /* ABS_EXPR may flip the range around, if the original range included negative values. */ if (is_overflow_infinity (vr0.min)) min = positive_overflow_infinity (type); else if (!vrp_val_is_min (vr0.min)) min = fold_unary_to_constant (code, type, vr0.min); else if (!needs_overflow_infinity (type)) min = TYPE_MAX_VALUE (type); else if (supports_overflow_infinity (type)) min = positive_overflow_infinity (type); else { set_value_range_to_varying (vr); return; } if (is_overflow_infinity (vr0.max)) max = positive_overflow_infinity (type); else if (!vrp_val_is_min (vr0.max)) max = fold_unary_to_constant (code, type, vr0.max); else if (!needs_overflow_infinity (type)) max = TYPE_MAX_VALUE (type); else if (supports_overflow_infinity (type) /* We shouldn't generate [+INF, +INF] as set_value_range doesn't like this and ICEs. */ && !is_positive_overflow_infinity (min)) max = positive_overflow_infinity (type); else { set_value_range_to_varying (vr); return; } cmp = compare_values (min, max); /* If a VR_ANTI_RANGEs contains zero, then we have ~[-INF, min(MIN, MAX)]. */ if (vr0.type == VR_ANTI_RANGE) { if (range_includes_zero_p (vr0.min, vr0.max) == 1) { /* Take the lower of the two values. */ if (cmp != 1) max = min; /* Create ~[-INF, min (abs(MIN), abs(MAX))] or ~[-INF + 1, min (abs(MIN), abs(MAX))] when flag_wrapv is set and the original anti-range doesn't include TYPE_MIN_VALUE, remember -TYPE_MIN_VALUE = TYPE_MIN_VALUE. */ if (TYPE_OVERFLOW_WRAPS (type)) { tree type_min_value = TYPE_MIN_VALUE (type); min = (vr0.min != type_min_value ? int_const_binop (PLUS_EXPR, type_min_value, build_int_cst (TREE_TYPE (type_min_value), 1)) : type_min_value); } else { if (overflow_infinity_range_p (&vr0)) min = negative_overflow_infinity (type); else min = TYPE_MIN_VALUE (type); } } else { /* All else has failed, so create the range [0, INF], even for flag_wrapv since TYPE_MIN_VALUE is in the original anti-range. */ vr0.type = VR_RANGE; min = build_int_cst (type, 0); if (needs_overflow_infinity (type)) { if (supports_overflow_infinity (type)) max = positive_overflow_infinity (type); else { set_value_range_to_varying (vr); return; } } else max = TYPE_MAX_VALUE (type); } } /* If the range contains zero then we know that the minimum value in the range will be zero. */ else if (range_includes_zero_p (vr0.min, vr0.max) == 1) { if (cmp == 1) max = min; min = build_int_cst (type, 0); } else { /* If the range was reversed, swap MIN and MAX. */ if (cmp == 1) { tree t = min; min = max; max = t; } } cmp = compare_values (min, max); if (cmp == -2 || cmp == 1) { /* If the new range has its limits swapped around (MIN > MAX), then the operation caused one of them to wrap around, mark the new range VARYING. */ set_value_range_to_varying (vr); } else set_value_range (vr, vr0.type, min, max, NULL); return; } /* For unhandled operations fall back to varying. */ set_value_range_to_varying (vr); return; } /* Extract range information from a unary expression CODE OP0 based on the range of its operand with resulting type TYPE. The resulting range is stored in *VR. */ static void extract_range_from_unary_expr (value_range_t *vr, enum tree_code code, tree type, tree op0) { value_range_t vr0 = VR_INITIALIZER; /* Get value ranges for the operand. For constant operands, create a new value range with the operand to simplify processing. */ if (TREE_CODE (op0) == SSA_NAME) vr0 = *(get_value_range (op0)); else if (is_gimple_min_invariant (op0)) set_value_range_to_value (&vr0, op0, NULL); else set_value_range_to_varying (&vr0); extract_range_from_unary_expr_1 (vr, code, type, &vr0, TREE_TYPE (op0)); } /* Extract range information from a conditional expression STMT based on the ranges of each of its operands and the expression code. */ static void extract_range_from_cond_expr (value_range_t *vr, gimple stmt) { tree op0, op1; value_range_t vr0 = VR_INITIALIZER; value_range_t vr1 = VR_INITIALIZER; /* Get value ranges for each operand. For constant operands, create a new value range with the operand to simplify processing. */ op0 = gimple_assign_rhs2 (stmt); if (TREE_CODE (op0) == SSA_NAME) vr0 = *(get_value_range (op0)); else if (is_gimple_min_invariant (op0)) set_value_range_to_value (&vr0, op0, NULL); else set_value_range_to_varying (&vr0); op1 = gimple_assign_rhs3 (stmt); if (TREE_CODE (op1) == SSA_NAME) vr1 = *(get_value_range (op1)); else if (is_gimple_min_invariant (op1)) set_value_range_to_value (&vr1, op1, NULL); else set_value_range_to_varying (&vr1); /* The resulting value range is the union of the operand ranges */ copy_value_range (vr, &vr0); vrp_meet (vr, &vr1); } /* Extract range information from a comparison expression EXPR based on the range of its operand and the expression code. */ static void extract_range_from_comparison (value_range_t *vr, enum tree_code code, tree type, tree op0, tree op1) { bool sop = false; tree val; val = vrp_evaluate_conditional_warnv_with_ops (code, op0, op1, false, &sop, NULL); /* A disadvantage of using a special infinity as an overflow representation is that we lose the ability to record overflow when we don't have an infinity. So we have to ignore a result which relies on overflow. */ if (val && !is_overflow_infinity (val) && !sop) { /* Since this expression was found on the RHS of an assignment, its type may be different from _Bool. Convert VAL to EXPR's type. */ val = fold_convert (type, val); if (is_gimple_min_invariant (val)) set_value_range_to_value (vr, val, vr->equiv); else set_value_range (vr, VR_RANGE, val, val, vr->equiv); } else /* The result of a comparison is always true or false. */ set_value_range_to_truthvalue (vr, type); } /* Try to derive a nonnegative or nonzero range out of STMT relying primarily on generic routines in fold in conjunction with range data. Store the result in *VR */ static void extract_range_basic (value_range_t *vr, gimple stmt) { bool sop = false; tree type = gimple_expr_type (stmt); if (gimple_call_builtin_p (stmt, BUILT_IN_NORMAL)) { tree fndecl = gimple_call_fndecl (stmt), arg; int mini, maxi, zerov = 0, prec; switch (DECL_FUNCTION_CODE (fndecl)) { case BUILT_IN_CONSTANT_P: /* If the call is __builtin_constant_p and the argument is a function parameter resolve it to false. This avoids bogus array bound warnings. ??? We could do this as early as inlining is finished. */ arg = gimple_call_arg (stmt, 0); if (TREE_CODE (arg) == SSA_NAME && SSA_NAME_IS_DEFAULT_DEF (arg) && TREE_CODE (SSA_NAME_VAR (arg)) == PARM_DECL) { set_value_range_to_null (vr, type); return; } break; /* Both __builtin_ffs* and __builtin_popcount return [0, prec]. */ CASE_INT_FN (BUILT_IN_FFS): CASE_INT_FN (BUILT_IN_POPCOUNT): arg = gimple_call_arg (stmt, 0); prec = TYPE_PRECISION (TREE_TYPE (arg)); mini = 0; maxi = prec; if (TREE_CODE (arg) == SSA_NAME) { value_range_t *vr0 = get_value_range (arg); /* If arg is non-zero, then ffs or popcount are non-zero. */ if (((vr0->type == VR_RANGE && range_includes_zero_p (vr0->min, vr0->max) == 0) || (vr0->type == VR_ANTI_RANGE && range_includes_zero_p (vr0->min, vr0->max) == 1)) && !is_overflow_infinity (vr0->min) && !is_overflow_infinity (vr0->max)) mini = 1; /* If some high bits are known to be zero, we can decrease the maximum. */ if (vr0->type == VR_RANGE && TREE_CODE (vr0->max) == INTEGER_CST && !operand_less_p (vr0->min, build_zero_cst (TREE_TYPE (vr0->min))) && !is_overflow_infinity (vr0->max)) maxi = tree_floor_log2 (vr0->max) + 1; } goto bitop_builtin; /* __builtin_parity* returns [0, 1]. */ CASE_INT_FN (BUILT_IN_PARITY): mini = 0; maxi = 1; goto bitop_builtin; /* __builtin_c[lt]z* return [0, prec-1], except for when the argument is 0, but that is undefined behavior. On many targets where the CLZ RTL or optab value is defined for 0 the value is prec, so include that in the range by default. */ CASE_INT_FN (BUILT_IN_CLZ): arg = gimple_call_arg (stmt, 0); prec = TYPE_PRECISION (TREE_TYPE (arg)); mini = 0; maxi = prec; if (optab_handler (clz_optab, TYPE_MODE (TREE_TYPE (arg))) != CODE_FOR_nothing && CLZ_DEFINED_VALUE_AT_ZERO (TYPE_MODE (TREE_TYPE (arg)), zerov) /* Handle only the single common value. */ && zerov != prec) /* Magic value to give up, unless vr0 proves arg is non-zero. */ mini = -2; if (TREE_CODE (arg) == SSA_NAME) { value_range_t *vr0 = get_value_range (arg); /* From clz of VR_RANGE minimum we can compute result maximum. */ if (vr0->type == VR_RANGE && TREE_CODE (vr0->min) == INTEGER_CST && !is_overflow_infinity (vr0->min)) { maxi = prec - 1 - tree_floor_log2 (vr0->min); if (maxi != prec) mini = 0; } else if (vr0->type == VR_ANTI_RANGE && integer_zerop (vr0->min) && !is_overflow_infinity (vr0->min)) { maxi = prec - 1; mini = 0; } if (mini == -2) break; /* From clz of VR_RANGE maximum we can compute result minimum. */ if (vr0->type == VR_RANGE && TREE_CODE (vr0->max) == INTEGER_CST && !is_overflow_infinity (vr0->max)) { mini = prec - 1 - tree_floor_log2 (vr0->max); if (mini == prec) break; } } if (mini == -2) break; goto bitop_builtin; /* __builtin_ctz* return [0, prec-1], except for when the argument is 0, but that is undefined behavior. If there is a ctz optab for this mode and CTZ_DEFINED_VALUE_AT_ZERO, include that in the range, otherwise just assume 0 won't be seen. */ CASE_INT_FN (BUILT_IN_CTZ): arg = gimple_call_arg (stmt, 0); prec = TYPE_PRECISION (TREE_TYPE (arg)); mini = 0; maxi = prec - 1; if (optab_handler (ctz_optab, TYPE_MODE (TREE_TYPE (arg))) != CODE_FOR_nothing && CTZ_DEFINED_VALUE_AT_ZERO (TYPE_MODE (TREE_TYPE (arg)), zerov)) { /* Handle only the two common values. */ if (zerov == -1) mini = -1; else if (zerov == prec) maxi = prec; else /* Magic value to give up, unless vr0 proves arg is non-zero. */ mini = -2; } if (TREE_CODE (arg) == SSA_NAME) { value_range_t *vr0 = get_value_range (arg); /* If arg is non-zero, then use [0, prec - 1]. */ if (((vr0->type == VR_RANGE && integer_nonzerop (vr0->min)) || (vr0->type == VR_ANTI_RANGE && integer_zerop (vr0->min))) && !is_overflow_infinity (vr0->min)) { mini = 0; maxi = prec - 1; } /* If some high bits are known to be zero, we can decrease the result maximum. */ if (vr0->type == VR_RANGE && TREE_CODE (vr0->max) == INTEGER_CST && !is_overflow_infinity (vr0->max)) { maxi = tree_floor_log2 (vr0->max); /* For vr0 [0, 0] give up. */ if (maxi == -1) break; } } if (mini == -2) break; goto bitop_builtin; /* __builtin_clrsb* returns [0, prec-1]. */ CASE_INT_FN (BUILT_IN_CLRSB): arg = gimple_call_arg (stmt, 0); prec = TYPE_PRECISION (TREE_TYPE (arg)); mini = 0; maxi = prec - 1; goto bitop_builtin; bitop_builtin: set_value_range (vr, VR_RANGE, build_int_cst (type, mini), build_int_cst (type, maxi), NULL); return; default: break; } } else if (is_gimple_call (stmt) && gimple_call_internal_p (stmt)) { enum tree_code subcode = ERROR_MARK; switch (gimple_call_internal_fn (stmt)) { case IFN_UBSAN_CHECK_ADD: subcode = PLUS_EXPR; break; case IFN_UBSAN_CHECK_SUB: subcode = MINUS_EXPR; break; case IFN_UBSAN_CHECK_MUL: subcode = MULT_EXPR; break; default: break; } if (subcode != ERROR_MARK) { bool saved_flag_wrapv = flag_wrapv; /* Pretend the arithmetics is wrapping. If there is any overflow, we'll complain, but will actually do wrapping operation. */ flag_wrapv = 1; extract_range_from_binary_expr (vr, subcode, type, gimple_call_arg (stmt, 0), gimple_call_arg (stmt, 1)); flag_wrapv = saved_flag_wrapv; /* If for both arguments vrp_valueize returned non-NULL, this should have been already folded and if not, it wasn't folded because of overflow. Avoid removing the UBSAN_CHECK_* calls in that case. */ if (vr->type == VR_RANGE && (vr->min == vr->max || operand_equal_p (vr->min, vr->max, 0))) set_value_range_to_varying (vr); return; } } if (INTEGRAL_TYPE_P (type) && gimple_stmt_nonnegative_warnv_p (stmt, &sop)) set_value_range_to_nonnegative (vr, type, sop || stmt_overflow_infinity (stmt)); else if (vrp_stmt_computes_nonzero (stmt, &sop) && !sop) set_value_range_to_nonnull (vr, type); else set_value_range_to_varying (vr); } /* Try to compute a useful range out of assignment STMT and store it in *VR. */ static void extract_range_from_assignment (value_range_t *vr, gimple stmt) { enum tree_code code = gimple_assign_rhs_code (stmt); if (code == ASSERT_EXPR) extract_range_from_assert (vr, gimple_assign_rhs1 (stmt)); else if (code == SSA_NAME) extract_range_from_ssa_name (vr, gimple_assign_rhs1 (stmt)); else if (TREE_CODE_CLASS (code) == tcc_binary) extract_range_from_binary_expr (vr, gimple_assign_rhs_code (stmt), gimple_expr_type (stmt), gimple_assign_rhs1 (stmt), gimple_assign_rhs2 (stmt)); else if (TREE_CODE_CLASS (code) == tcc_unary) extract_range_from_unary_expr (vr, gimple_assign_rhs_code (stmt), gimple_expr_type (stmt), gimple_assign_rhs1 (stmt)); else if (code == COND_EXPR) extract_range_from_cond_expr (vr, stmt); else if (TREE_CODE_CLASS (code) == tcc_comparison) extract_range_from_comparison (vr, gimple_assign_rhs_code (stmt), gimple_expr_type (stmt), gimple_assign_rhs1 (stmt), gimple_assign_rhs2 (stmt)); else if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS && is_gimple_min_invariant (gimple_assign_rhs1 (stmt))) set_value_range_to_value (vr, gimple_assign_rhs1 (stmt), NULL); else set_value_range_to_varying (vr); if (vr->type == VR_VARYING) extract_range_basic (vr, stmt); } /* Given a range VR, a LOOP and a variable VAR, determine whether it would be profitable to adjust VR using scalar evolution information for VAR. If so, update VR with the new limits. */ static void adjust_range_with_scev (value_range_t *vr, struct loop *loop, gimple stmt, tree var) { tree init, step, chrec, tmin, tmax, min, max, type, tem; enum ev_direction dir; /* TODO. Don't adjust anti-ranges. An anti-range may provide better opportunities than a regular range, but I'm not sure. */ if (vr->type == VR_ANTI_RANGE) return; chrec = instantiate_parameters (loop, analyze_scalar_evolution (loop, var)); /* Like in PR19590, scev can return a constant function. */ if (is_gimple_min_invariant (chrec)) { set_value_range_to_value (vr, chrec, vr->equiv); return; } if (TREE_CODE (chrec) != POLYNOMIAL_CHREC) return; init = initial_condition_in_loop_num (chrec, loop->num); tem = op_with_constant_singleton_value_range (init); if (tem) init = tem; step = evolution_part_in_loop_num (chrec, loop->num); tem = op_with_constant_singleton_value_range (step); if (tem) step = tem; /* If STEP is symbolic, we can't know whether INIT will be the minimum or maximum value in the range. Also, unless INIT is a simple expression, compare_values and possibly other functions in tree-vrp won't be able to handle it. */ if (step == NULL_TREE || !is_gimple_min_invariant (step) || !valid_value_p (init)) return; dir = scev_direction (chrec); if (/* Do not adjust ranges if we do not know whether the iv increases or decreases, ... */ dir == EV_DIR_UNKNOWN /* ... or if it may wrap. */ || scev_probably_wraps_p (init, step, stmt, get_chrec_loop (chrec), true)) return; /* We use TYPE_MIN_VALUE and TYPE_MAX_VALUE here instead of negative_overflow_infinity and positive_overflow_infinity, because we have concluded that the loop probably does not wrap. */ type = TREE_TYPE (var); if (POINTER_TYPE_P (type) || !TYPE_MIN_VALUE (type)) tmin = lower_bound_in_type (type, type); else tmin = TYPE_MIN_VALUE (type); if (POINTER_TYPE_P (type) || !TYPE_MAX_VALUE (type)) tmax = upper_bound_in_type (type, type); else tmax = TYPE_MAX_VALUE (type); /* Try to use estimated number of iterations for the loop to constrain the final value in the evolution. */ if (TREE_CODE (step) == INTEGER_CST && is_gimple_val (init) && (TREE_CODE (init) != SSA_NAME || get_value_range (init)->type == VR_RANGE)) { widest_int nit; /* We are only entering here for loop header PHI nodes, so using the number of latch executions is the correct thing to use. */ if (max_loop_iterations (loop, &nit)) { value_range_t maxvr = VR_INITIALIZER; signop sgn = TYPE_SIGN (TREE_TYPE (step)); bool overflow; widest_int wtmp = wi::mul (wi::to_widest (step), nit, sgn, &overflow); /* If the multiplication overflowed we can't do a meaningful adjustment. Likewise if the result doesn't fit in the type of the induction variable. For a signed type we have to check whether the result has the expected signedness which is that of the step as number of iterations is unsigned. */ if (!overflow && wi::fits_to_tree_p (wtmp, TREE_TYPE (init)) && (sgn == UNSIGNED || wi::gts_p (wtmp, 0) == wi::gts_p (step, 0))) { tem = wide_int_to_tree (TREE_TYPE (init), wtmp); extract_range_from_binary_expr (&maxvr, PLUS_EXPR, TREE_TYPE (init), init, tem); /* Likewise if the addition did. */ if (maxvr.type == VR_RANGE) { tmin = maxvr.min; tmax = maxvr.max; } } } } if (vr->type == VR_VARYING || vr->type == VR_UNDEFINED) { min = tmin; max = tmax; /* For VARYING or UNDEFINED ranges, just about anything we get from scalar evolutions should be better. */ if (dir == EV_DIR_DECREASES) max = init; else min = init; } else if (vr->type == VR_RANGE) { min = vr->min; max = vr->max; if (dir == EV_DIR_DECREASES) { /* INIT is the maximum value. If INIT is lower than VR->MAX but no smaller than VR->MIN, set VR->MAX to INIT. */ if (compare_values (init, max) == -1) max = init; /* According to the loop information, the variable does not overflow. If we think it does, probably because of an overflow due to arithmetic on a different INF value, reset now. */ if (is_negative_overflow_infinity (min) || compare_values (min, tmin) == -1) min = tmin; } else { /* If INIT is bigger than VR->MIN, set VR->MIN to INIT. */ if (compare_values (init, min) == 1) min = init; if (is_positive_overflow_infinity (max) || compare_values (tmax, max) == -1) max = tmax; } } else return; /* If we just created an invalid range with the minimum greater than the maximum, we fail conservatively. This should happen only in unreachable parts of code, or for invalid programs. */ if (compare_values (min, max) == 1 || (is_negative_overflow_infinity (min) && is_positive_overflow_infinity (max))) return; set_value_range (vr, VR_RANGE, min, max, vr->equiv); } /* Given two numeric value ranges VR0, VR1 and a comparison code COMP: - Return BOOLEAN_TRUE_NODE if VR0 COMP VR1 always returns true for all the values in the ranges. - Return BOOLEAN_FALSE_NODE if the comparison always returns false. - Return NULL_TREE if it is not always possible to determine the value of the comparison. Also set *STRICT_OVERFLOW_P to indicate whether a range with an overflow infinity was used in the test. */ static tree compare_ranges (enum tree_code comp, value_range_t *vr0, value_range_t *vr1, bool *strict_overflow_p) { /* VARYING or UNDEFINED ranges cannot be compared. */ if (vr0->type == VR_VARYING || vr0->type == VR_UNDEFINED || vr1->type == VR_VARYING || vr1->type == VR_UNDEFINED) return NULL_TREE; /* Anti-ranges need to be handled separately. */ if (vr0->type == VR_ANTI_RANGE || vr1->type == VR_ANTI_RANGE) { /* If both are anti-ranges, then we cannot compute any comparison. */ if (vr0->type == VR_ANTI_RANGE && vr1->type == VR_ANTI_RANGE) return NULL_TREE; /* These comparisons are never statically computable. */ if (comp == GT_EXPR || comp == GE_EXPR || comp == LT_EXPR || comp == LE_EXPR) return NULL_TREE; /* Equality can be computed only between a range and an anti-range. ~[VAL1, VAL2] == [VAL1, VAL2] is always false. */ if (vr0->type == VR_RANGE) { /* To simplify processing, make VR0 the anti-range. */ value_range_t *tmp = vr0; vr0 = vr1; vr1 = tmp; } gcc_assert (comp == NE_EXPR || comp == EQ_EXPR); if (compare_values_warnv (vr0->min, vr1->min, strict_overflow_p) == 0 && compare_values_warnv (vr0->max, vr1->max, strict_overflow_p) == 0) return (comp == NE_EXPR) ? boolean_true_node : boolean_false_node; return NULL_TREE; } if (!usable_range_p (vr0, strict_overflow_p) || !usable_range_p (vr1, strict_overflow_p)) return NULL_TREE; /* Simplify processing. If COMP is GT_EXPR or GE_EXPR, switch the operands around and change the comparison code. */ if (comp == GT_EXPR || comp == GE_EXPR) { value_range_t *tmp; comp = (comp == GT_EXPR) ? LT_EXPR : LE_EXPR; tmp = vr0; vr0 = vr1; vr1 = tmp; } if (comp == EQ_EXPR) { /* Equality may only be computed if both ranges represent exactly one value. */ if (compare_values_warnv (vr0->min, vr0->max, strict_overflow_p) == 0 && compare_values_warnv (vr1->min, vr1->max, strict_overflow_p) == 0) { int cmp_min = compare_values_warnv (vr0->min, vr1->min, strict_overflow_p); int cmp_max = compare_values_warnv (vr0->max, vr1->max, strict_overflow_p); if (cmp_min == 0 && cmp_max == 0) return boolean_true_node; else if (cmp_min != -2 && cmp_max != -2) return boolean_false_node; } /* If [V0_MIN, V1_MAX] < [V1_MIN, V1_MAX] then V0 != V1. */ else if (compare_values_warnv (vr0->min, vr1->max, strict_overflow_p) == 1 || compare_values_warnv (vr1->min, vr0->max, strict_overflow_p) == 1) return boolean_false_node; return NULL_TREE; } else if (comp == NE_EXPR) { int cmp1, cmp2; /* If VR0 is completely to the left or completely to the right of VR1, they are always different. Notice that we need to make sure that both comparisons yield similar results to avoid comparing values that cannot be compared at compile-time. */ cmp1 = compare_values_warnv (vr0->max, vr1->min, strict_overflow_p); cmp2 = compare_values_warnv (vr0->min, vr1->max, strict_overflow_p); if ((cmp1 == -1 && cmp2 == -1) || (cmp1 == 1 && cmp2 == 1)) return boolean_true_node; /* If VR0 and VR1 represent a single value and are identical, return false. */ else if (compare_values_warnv (vr0->min, vr0->max, strict_overflow_p) == 0 && compare_values_warnv (vr1->min, vr1->max, strict_overflow_p) == 0 && compare_values_warnv (vr0->min, vr1->min, strict_overflow_p) == 0 && compare_values_warnv (vr0->max, vr1->max, strict_overflow_p) == 0) return boolean_false_node; /* Otherwise, they may or may not be different. */ else return NULL_TREE; } else if (comp == LT_EXPR || comp == LE_EXPR) { int tst; /* If VR0 is to the left of VR1, return true. */ tst = compare_values_warnv (vr0->max, vr1->min, strict_overflow_p); if ((comp == LT_EXPR && tst == -1) || (comp == LE_EXPR && (tst == -1 || tst == 0))) { if (overflow_infinity_range_p (vr0) || overflow_infinity_range_p (vr1)) *strict_overflow_p = true; return boolean_true_node; } /* If VR0 is to the right of VR1, return false. */ tst = compare_values_warnv (vr0->min, vr1->max, strict_overflow_p); if ((comp == LT_EXPR && (tst == 0 || tst == 1)) || (comp == LE_EXPR && tst == 1)) { if (overflow_infinity_range_p (vr0) || overflow_infinity_range_p (vr1)) *strict_overflow_p = true; return boolean_false_node; } /* Otherwise, we don't know. */ return NULL_TREE; } gcc_unreachable (); } /* Given a value range VR, a value VAL and a comparison code COMP, return BOOLEAN_TRUE_NODE if VR COMP VAL always returns true for all the values in VR. Return BOOLEAN_FALSE_NODE if the comparison always returns false. Return NULL_TREE if it is not always possible to determine the value of the comparison. Also set *STRICT_OVERFLOW_P to indicate whether a range with an overflow infinity was used in the test. */ static tree compare_range_with_value (enum tree_code comp, value_range_t *vr, tree val, bool *strict_overflow_p) { if (vr->type == VR_VARYING || vr->type == VR_UNDEFINED) return NULL_TREE; /* Anti-ranges need to be handled separately. */ if (vr->type == VR_ANTI_RANGE) { /* For anti-ranges, the only predicates that we can compute at compile time are equality and inequality. */ if (comp == GT_EXPR || comp == GE_EXPR || comp == LT_EXPR || comp == LE_EXPR) return NULL_TREE; /* ~[VAL_1, VAL_2] OP VAL is known if VAL_1 <= VAL <= VAL_2. */ if (value_inside_range (val, vr->min, vr->max) == 1) return (comp == NE_EXPR) ? boolean_true_node : boolean_false_node; return NULL_TREE; } if (!usable_range_p (vr, strict_overflow_p)) return NULL_TREE; if (comp == EQ_EXPR) { /* EQ_EXPR may only be computed if VR represents exactly one value. */ if (compare_values_warnv (vr->min, vr->max, strict_overflow_p) == 0) { int cmp = compare_values_warnv (vr->min, val, strict_overflow_p); if (cmp == 0) return boolean_true_node; else if (cmp == -1 || cmp == 1 || cmp == 2) return boolean_false_node; } else if (compare_values_warnv (val, vr->min, strict_overflow_p) == -1 || compare_values_warnv (vr->max, val, strict_overflow_p) == -1) return boolean_false_node; return NULL_TREE; } else if (comp == NE_EXPR) { /* If VAL is not inside VR, then they are always different. */ if (compare_values_warnv (vr->max, val, strict_overflow_p) == -1 || compare_values_warnv (vr->min, val, strict_overflow_p) == 1) return boolean_true_node; /* If VR represents exactly one value equal to VAL, then return false. */ if (compare_values_warnv (vr->min, vr->max, strict_overflow_p) == 0 && compare_values_warnv (vr->min, val, strict_overflow_p) == 0) return boolean_false_node; /* Otherwise, they may or may not be different. */ return NULL_TREE; } else if (comp == LT_EXPR || comp == LE_EXPR) { int tst; /* If VR is to the left of VAL, return true. */ tst = compare_values_warnv (vr->max, val, strict_overflow_p); if ((comp == LT_EXPR && tst == -1) || (comp == LE_EXPR && (tst == -1 || tst == 0))) { if (overflow_infinity_range_p (vr)) *strict_overflow_p = true; return boolean_true_node; } /* If VR is to the right of VAL, return false. */ tst = compare_values_warnv (vr->min, val, strict_overflow_p); if ((comp == LT_EXPR && (tst == 0 || tst == 1)) || (comp == LE_EXPR && tst == 1)) { if (overflow_infinity_range_p (vr)) *strict_overflow_p = true; return boolean_false_node; } /* Otherwise, we don't know. */ return NULL_TREE; } else if (comp == GT_EXPR || comp == GE_EXPR) { int tst; /* If VR is to the right of VAL, return true. */ tst = compare_values_warnv (vr->min, val, strict_overflow_p); if ((comp == GT_EXPR && tst == 1) || (comp == GE_EXPR && (tst == 0 || tst == 1))) { if (overflow_infinity_range_p (vr)) *strict_overflow_p = true; return boolean_true_node; } /* If VR is to the left of VAL, return false. */ tst = compare_values_warnv (vr->max, val, strict_overflow_p); if ((comp == GT_EXPR && (tst == -1 || tst == 0)) || (comp == GE_EXPR && tst == -1)) { if (overflow_infinity_range_p (vr)) *strict_overflow_p = true; return boolean_false_node; } /* Otherwise, we don't know. */ return NULL_TREE; } gcc_unreachable (); } /* Debugging dumps. */ void dump_value_range (FILE *, value_range_t *); void debug_value_range (value_range_t *); void dump_all_value_ranges (FILE *); void debug_all_value_ranges (void); void dump_vr_equiv (FILE *, bitmap); void debug_vr_equiv (bitmap); /* Dump value range VR to FILE. */ void dump_value_range (FILE *file, value_range_t *vr) { if (vr == NULL) fprintf (file, "[]"); else if (vr->type == VR_UNDEFINED) fprintf (file, "UNDEFINED"); else if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE) { tree type = TREE_TYPE (vr->min); fprintf (file, "%s[", (vr->type == VR_ANTI_RANGE) ? "~" : ""); if (is_negative_overflow_infinity (vr->min)) fprintf (file, "-INF(OVF)"); else if (INTEGRAL_TYPE_P (type) && !TYPE_UNSIGNED (type) && vrp_val_is_min (vr->min)) fprintf (file, "-INF"); else print_generic_expr (file, vr->min, 0); fprintf (file, ", "); if (is_positive_overflow_infinity (vr->max)) fprintf (file, "+INF(OVF)"); else if (INTEGRAL_TYPE_P (type) && vrp_val_is_max (vr->max)) fprintf (file, "+INF"); else print_generic_expr (file, vr->max, 0); fprintf (file, "]"); if (vr->equiv) { bitmap_iterator bi; unsigned i, c = 0; fprintf (file, " EQUIVALENCES: { "); EXECUTE_IF_SET_IN_BITMAP (vr->equiv, 0, i, bi) { print_generic_expr (file, ssa_name (i), 0); fprintf (file, " "); c++; } fprintf (file, "} (%u elements)", c); } } else if (vr->type == VR_VARYING) fprintf (file, "VARYING"); else fprintf (file, "INVALID RANGE"); } /* Dump value range VR to stderr. */ DEBUG_FUNCTION void debug_value_range (value_range_t *vr) { dump_value_range (stderr, vr); fprintf (stderr, "\n"); } /* Dump value ranges of all SSA_NAMEs to FILE. */ void dump_all_value_ranges (FILE *file) { size_t i; for (i = 0; i < num_vr_values; i++) { if (vr_value[i]) { print_generic_expr (file, ssa_name (i), 0); fprintf (file, ": "); dump_value_range (file, vr_value[i]); fprintf (file, "\n"); } } fprintf (file, "\n"); } /* Dump all value ranges to stderr. */ DEBUG_FUNCTION void debug_all_value_ranges (void) { dump_all_value_ranges (stderr); } /* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V, create a new SSA name N and return the assertion assignment 'N = ASSERT_EXPR <V, V OP W>'. */ static gimple build_assert_expr_for (tree cond, tree v) { tree a; gimple assertion; gcc_assert (TREE_CODE (v) == SSA_NAME && COMPARISON_CLASS_P (cond)); a = build2 (ASSERT_EXPR, TREE_TYPE (v), v, cond); assertion = gimple_build_assign (NULL_TREE, a); /* The new ASSERT_EXPR, creates a new SSA name that replaces the operand of the ASSERT_EXPR. Create it so the new name and the old one are registered in the replacement table so that we can fix the SSA web after adding all the ASSERT_EXPRs. */ create_new_def_for (v, assertion, NULL); return assertion; } /* Return false if EXPR is a predicate expression involving floating point values. */ static inline bool fp_predicate (gimple stmt) { GIMPLE_CHECK (stmt, GIMPLE_COND); return FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt))); } /* If the range of values taken by OP can be inferred after STMT executes, return the comparison code (COMP_CODE_P) and value (VAL_P) that describes the inferred range. Return true if a range could be inferred. */ static bool infer_value_range (gimple stmt, tree op, enum tree_code *comp_code_p, tree *val_p) { *val_p = NULL_TREE; *comp_code_p = ERROR_MARK; /* Do not attempt to infer anything in names that flow through abnormal edges. */ if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op)) return false; /* Similarly, don't infer anything from statements that may throw exceptions. ??? Relax this requirement? */ if (stmt_could_throw_p (stmt)) return false; /* If STMT is the last statement of a basic block with no normal successors, there is no point inferring anything about any of its operands. We would not be able to find a proper insertion point for the assertion, anyway. */ if (stmt_ends_bb_p (stmt)) { edge_iterator ei; edge e; FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs) if (!(e->flags & EDGE_ABNORMAL)) break; if (e == NULL) return false; } if (infer_nonnull_range (stmt, op, true, true)) { *val_p = build_int_cst (TREE_TYPE (op), 0); *comp_code_p = NE_EXPR; return true; } return false; } void dump_asserts_for (FILE *, tree); void debug_asserts_for (tree); void dump_all_asserts (FILE *); void debug_all_asserts (void); /* Dump all the registered assertions for NAME to FILE. */ void dump_asserts_for (FILE *file, tree name) { assert_locus_t loc; fprintf (file, "Assertions to be inserted for "); print_generic_expr (file, name, 0); fprintf (file, "\n"); loc = asserts_for[SSA_NAME_VERSION (name)]; while (loc) { fprintf (file, "\t"); print_gimple_stmt (file, gsi_stmt (loc->si), 0, 0); fprintf (file, "\n\tBB #%d", loc->bb->index); if (loc->e) { fprintf (file, "\n\tEDGE %d->%d", loc->e->src->index, loc->e->dest->index); dump_edge_info (file, loc->e, dump_flags, 0); } fprintf (file, "\n\tPREDICATE: "); print_generic_expr (file, name, 0); fprintf (file, " %s ", get_tree_code_name (loc->comp_code)); print_generic_expr (file, loc->val, 0); fprintf (file, "\n\n"); loc = loc->next; } fprintf (file, "\n"); } /* Dump all the registered assertions for NAME to stderr. */ DEBUG_FUNCTION void debug_asserts_for (tree name) { dump_asserts_for (stderr, name); } /* Dump all the registered assertions for all the names to FILE. */ void dump_all_asserts (FILE *file) { unsigned i; bitmap_iterator bi; fprintf (file, "\nASSERT_EXPRs to be inserted\n\n"); EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi) dump_asserts_for (file, ssa_name (i)); fprintf (file, "\n"); } /* Dump all the registered assertions for all the names to stderr. */ DEBUG_FUNCTION void debug_all_asserts (void) { dump_all_asserts (stderr); } /* If NAME doesn't have an ASSERT_EXPR registered for asserting 'EXPR COMP_CODE VAL' at a location that dominates block BB or E->DEST, then register this location as a possible insertion point for ASSERT_EXPR <NAME, EXPR COMP_CODE VAL>. BB, E and SI provide the exact insertion point for the new ASSERT_EXPR. If BB is NULL, then the ASSERT_EXPR is to be inserted on edge E. Otherwise, if E is NULL, the ASSERT_EXPR is inserted on BB. If SI points to a COND_EXPR or a SWITCH_EXPR statement, then E must not be NULL. */ static void register_new_assert_for (tree name, tree expr, enum tree_code comp_code, tree val, basic_block bb, edge e, gimple_stmt_iterator si) { assert_locus_t n, loc, last_loc; basic_block dest_bb; gcc_checking_assert (bb == NULL || e == NULL); if (e == NULL) gcc_checking_assert (gimple_code (gsi_stmt (si)) != GIMPLE_COND && gimple_code (gsi_stmt (si)) != GIMPLE_SWITCH); /* Never build an assert comparing against an integer constant with TREE_OVERFLOW set. This confuses our undefined overflow warning machinery. */ if (TREE_OVERFLOW_P (val)) val = drop_tree_overflow (val); /* The new assertion A will be inserted at BB or E. We need to determine if the new location is dominated by a previously registered location for A. If we are doing an edge insertion, assume that A will be inserted at E->DEST. Note that this is not necessarily true. If E is a critical edge, it will be split. But even if E is split, the new block will dominate the same set of blocks that E->DEST dominates. The reverse, however, is not true, blocks dominated by E->DEST will not be dominated by the new block created to split E. So, if the insertion location is on a critical edge, we will not use the new location to move another assertion previously registered at a block dominated by E->DEST. */ dest_bb = (bb) ? bb : e->dest; /* If NAME already has an ASSERT_EXPR registered for COMP_CODE and VAL at a block dominating DEST_BB, then we don't need to insert a new one. Similarly, if the same assertion already exists at a block dominated by DEST_BB and the new location is not on a critical edge, then update the existing location for the assertion (i.e., move the assertion up in the dominance tree). Note, this is implemented as a simple linked list because there should not be more than a handful of assertions registered per name. If this becomes a performance problem, a table hashed by COMP_CODE and VAL could be implemented. */ loc = asserts_for[SSA_NAME_VERSION (name)]; last_loc = loc; while (loc) { if (loc->comp_code == comp_code && (loc->val == val || operand_equal_p (loc->val, val, 0)) && (loc->expr == expr || operand_equal_p (loc->expr, expr, 0))) { /* If E is not a critical edge and DEST_BB dominates the existing location for the assertion, move the assertion up in the dominance tree by updating its location information. */ if ((e == NULL || !EDGE_CRITICAL_P (e)) && dominated_by_p (CDI_DOMINATORS, loc->bb, dest_bb)) { loc->bb = dest_bb; loc->e = e; loc->si = si; return; } } /* Update the last node of the list and move to the next one. */ last_loc = loc; loc = loc->next; } /* If we didn't find an assertion already registered for NAME COMP_CODE VAL, add a new one at the end of the list of assertions associated with NAME. */ n = XNEW (struct assert_locus_d); n->bb = dest_bb; n->e = e; n->si = si; n->comp_code = comp_code; n->val = val; n->expr = expr; n->next = NULL; if (last_loc) last_loc->next = n; else asserts_for[SSA_NAME_VERSION (name)] = n; bitmap_set_bit (need_assert_for, SSA_NAME_VERSION (name)); } /* (COND_OP0 COND_CODE COND_OP1) is a predicate which uses NAME. Extract a suitable test code and value and store them into *CODE_P and *VAL_P so the predicate is normalized to NAME *CODE_P *VAL_P. If no extraction was possible, return FALSE, otherwise return TRUE. If INVERT is true, then we invert the result stored into *CODE_P. */ static bool extract_code_and_val_from_cond_with_ops (tree name, enum tree_code cond_code, tree cond_op0, tree cond_op1, bool invert, enum tree_code *code_p, tree *val_p) { enum tree_code comp_code; tree val; /* Otherwise, we have a comparison of the form NAME COMP VAL or VAL COMP NAME. */ if (name == cond_op1) { /* If the predicate is of the form VAL COMP NAME, flip COMP around because we need to register NAME as the first operand in the predicate. */ comp_code = swap_tree_comparison (cond_code); val = cond_op0; } else { /* The comparison is of the form NAME COMP VAL, so the comparison code remains unchanged. */ comp_code = cond_code; val = cond_op1; } /* Invert the comparison code as necessary. */ if (invert) comp_code = invert_tree_comparison (comp_code, 0); /* VRP does not handle float types. */ if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (val))) return false; /* Do not register always-false predicates. FIXME: this works around a limitation in fold() when dealing with enumerations. Given 'enum { N1, N2 } x;', fold will not fold 'if (x > N2)' to 'if (0)'. */ if ((comp_code == GT_EXPR || comp_code == LT_EXPR) && INTEGRAL_TYPE_P (TREE_TYPE (val))) { tree min = TYPE_MIN_VALUE (TREE_TYPE (val)); tree max = TYPE_MAX_VALUE (TREE_TYPE (val)); if (comp_code == GT_EXPR && (!max || compare_values (val, max) == 0)) return false; if (comp_code == LT_EXPR && (!min || compare_values (val, min) == 0)) return false; } *code_p = comp_code; *val_p = val; return true; } /* Find out smallest RES where RES > VAL && (RES & MASK) == RES, if any (otherwise return VAL). VAL and MASK must be zero-extended for precision PREC. If SGNBIT is non-zero, first xor VAL with SGNBIT (to transform signed values into unsigned) and at the end xor SGNBIT back. */ static wide_int masked_increment (const wide_int &val_in, const wide_int &mask, const wide_int &sgnbit, unsigned int prec) { wide_int bit = wi::one (prec), res; unsigned int i; wide_int val = val_in ^ sgnbit; for (i = 0; i < prec; i++, bit += bit) { res = mask; if ((res & bit) == 0) continue; res = bit - 1; res = (val + bit).and_not (res); res &= mask; if (wi::gtu_p (res, val)) return res ^ sgnbit; } return val ^ sgnbit; } /* Try to register an edge assertion for SSA name NAME on edge E for the condition COND contributing to the conditional jump pointed to by BSI. Invert the condition COND if INVERT is true. Return true if an assertion for NAME could be registered. */ static bool register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi, enum tree_code cond_code, tree cond_op0, tree cond_op1, bool invert) { tree val; enum tree_code comp_code; bool retval = false; if (!extract_code_and_val_from_cond_with_ops (name, cond_code, cond_op0, cond_op1, invert, &comp_code, &val)) return false; /* Only register an ASSERT_EXPR if NAME was found in the sub-graph reachable from E. */ if (live_on_edge (e, name) && !has_single_use (name)) { register_new_assert_for (name, name, comp_code, val, NULL, e, bsi); retval = true; } /* In the case of NAME <= CST and NAME being defined as NAME = (unsigned) NAME2 + CST2 we can assert NAME2 >= -CST2 and NAME2 <= CST - CST2. We can do the same for NAME > CST. This catches range and anti-range tests. */ if ((comp_code == LE_EXPR || comp_code == GT_EXPR) && TREE_CODE (val) == INTEGER_CST && TYPE_UNSIGNED (TREE_TYPE (val))) { gimple def_stmt = SSA_NAME_DEF_STMT (name); tree cst2 = NULL_TREE, name2 = NULL_TREE, name3 = NULL_TREE; /* Extract CST2 from the (optional) addition. */ if (is_gimple_assign (def_stmt) && gimple_assign_rhs_code (def_stmt) == PLUS_EXPR) { name2 = gimple_assign_rhs1 (def_stmt); cst2 = gimple_assign_rhs2 (def_stmt); if (TREE_CODE (name2) == SSA_NAME && TREE_CODE (cst2) == INTEGER_CST) def_stmt = SSA_NAME_DEF_STMT (name2); } /* Extract NAME2 from the (optional) sign-changing cast. */ if (gimple_assign_cast_p (def_stmt)) { if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt)) && ! TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (def_stmt))) && (TYPE_PRECISION (gimple_expr_type (def_stmt)) == TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt))))) name3 = gimple_assign_rhs1 (def_stmt); } /* If name3 is used later, create an ASSERT_EXPR for it. */ if (name3 != NULL_TREE && TREE_CODE (name3) == SSA_NAME && (cst2 == NULL_TREE || TREE_CODE (cst2) == INTEGER_CST) && INTEGRAL_TYPE_P (TREE_TYPE (name3)) && live_on_edge (e, name3) && !has_single_use (name3)) { tree tmp; /* Build an expression for the range test. */ tmp = build1 (NOP_EXPR, TREE_TYPE (name), name3); if (cst2 != NULL_TREE) tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2); if (dump_file) { fprintf (dump_file, "Adding assert for "); print_generic_expr (dump_file, name3, 0); fprintf (dump_file, " from "); print_generic_expr (dump_file, tmp, 0); fprintf (dump_file, "\n"); } register_new_assert_for (name3, tmp, comp_code, val, NULL, e, bsi); retval = true; } /* If name2 is used later, create an ASSERT_EXPR for it. */ if (name2 != NULL_TREE && TREE_CODE (name2) == SSA_NAME && TREE_CODE (cst2) == INTEGER_CST && INTEGRAL_TYPE_P (TREE_TYPE (name2)) && live_on_edge (e, name2) && !has_single_use (name2)) { tree tmp; /* Build an expression for the range test. */ tmp = name2; if (TREE_TYPE (name) != TREE_TYPE (name2)) tmp = build1 (NOP_EXPR, TREE_TYPE (name), tmp); if (cst2 != NULL_TREE) tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2); if (dump_file) { fprintf (dump_file, "Adding assert for "); print_generic_expr (dump_file, name2, 0); fprintf (dump_file, " from "); print_generic_expr (dump_file, tmp, 0); fprintf (dump_file, "\n"); } register_new_assert_for (name2, tmp, comp_code, val, NULL, e, bsi); retval = true; } } /* In the case of post-in/decrement tests like if (i++) ... and uses of the in/decremented value on the edge the extra name we want to assert for is not on the def chain of the name compared. Instead it is in the set of use stmts. */ if ((comp_code == NE_EXPR || comp_code == EQ_EXPR) && TREE_CODE (val) == INTEGER_CST) { imm_use_iterator ui; gimple use_stmt; FOR_EACH_IMM_USE_STMT (use_stmt, ui, name) { /* Cut off to use-stmts that are in the predecessor. */ if (gimple_bb (use_stmt) != e->src) continue; if (!is_gimple_assign (use_stmt)) continue; enum tree_code code = gimple_assign_rhs_code (use_stmt); if (code != PLUS_EXPR && code != MINUS_EXPR) continue; tree cst = gimple_assign_rhs2 (use_stmt); if (TREE_CODE (cst) != INTEGER_CST) continue; tree name2 = gimple_assign_lhs (use_stmt); if (live_on_edge (e, name2)) { cst = int_const_binop (code, val, cst); register_new_assert_for (name2, name2, comp_code, cst, NULL, e, bsi); retval = true; } } } if (TREE_CODE_CLASS (comp_code) == tcc_comparison && TREE_CODE (val) == INTEGER_CST) { gimple def_stmt = SSA_NAME_DEF_STMT (name); tree name2 = NULL_TREE, names[2], cst2 = NULL_TREE; tree val2 = NULL_TREE; unsigned int prec = TYPE_PRECISION (TREE_TYPE (val)); wide_int mask = wi::zero (prec); unsigned int nprec = prec; enum tree_code rhs_code = ERROR_MARK; if (is_gimple_assign (def_stmt)) rhs_code = gimple_assign_rhs_code (def_stmt); /* Add asserts for NAME cmp CST and NAME being defined as NAME = (int) NAME2. */ if (!TYPE_UNSIGNED (TREE_TYPE (val)) && (comp_code == LE_EXPR || comp_code == LT_EXPR || comp_code == GT_EXPR || comp_code == GE_EXPR) && gimple_assign_cast_p (def_stmt)) { name2 = gimple_assign_rhs1 (def_stmt); if (CONVERT_EXPR_CODE_P (rhs_code) && INTEGRAL_TYPE_P (TREE_TYPE (name2)) && TYPE_UNSIGNED (TREE_TYPE (name2)) && prec == TYPE_PRECISION (TREE_TYPE (name2)) && (comp_code == LE_EXPR || comp_code == GT_EXPR || !tree_int_cst_equal (val, TYPE_MIN_VALUE (TREE_TYPE (val)))) && live_on_edge (e, name2) && !has_single_use (name2)) { tree tmp, cst; enum tree_code new_comp_code = comp_code; cst = fold_convert (TREE_TYPE (name2), TYPE_MIN_VALUE (TREE_TYPE (val))); /* Build an expression for the range test. */ tmp = build2 (PLUS_EXPR, TREE_TYPE (name2), name2, cst); cst = fold_build2 (PLUS_EXPR, TREE_TYPE (name2), cst, fold_convert (TREE_TYPE (name2), val)); if (comp_code == LT_EXPR || comp_code == GE_EXPR) { new_comp_code = comp_code == LT_EXPR ? LE_EXPR : GT_EXPR; cst = fold_build2 (MINUS_EXPR, TREE_TYPE (name2), cst, build_int_cst (TREE_TYPE (name2), 1)); } if (dump_file) { fprintf (dump_file, "Adding assert for "); print_generic_expr (dump_file, name2, 0); fprintf (dump_file, " from "); print_generic_expr (dump_file, tmp, 0); fprintf (dump_file, "\n"); } register_new_assert_for (name2, tmp, new_comp_code, cst, NULL, e, bsi); retval = true; } } /* Add asserts for NAME cmp CST and NAME being defined as NAME = NAME2 >> CST2. Extract CST2 from the right shift. */ if (rhs_code == RSHIFT_EXPR) { name2 = gimple_assign_rhs1 (def_stmt); cst2 = gimple_assign_rhs2 (def_stmt); if (TREE_CODE (name2) == SSA_NAME && tree_fits_uhwi_p (cst2) && INTEGRAL_TYPE_P (TREE_TYPE (name2)) && IN_RANGE (tree_to_uhwi (cst2), 1, prec - 1) && prec == GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (val))) && live_on_edge (e, name2) && !has_single_use (name2)) { mask = wi::mask (tree_to_uhwi (cst2), false, prec); val2 = fold_binary (LSHIFT_EXPR, TREE_TYPE (val), val, cst2); } } if (val2 != NULL_TREE && TREE_CODE (val2) == INTEGER_CST && simple_cst_equal (fold_build2 (RSHIFT_EXPR, TREE_TYPE (val), val2, cst2), val)) { enum tree_code new_comp_code = comp_code; tree tmp, new_val; tmp = name2; if (comp_code == EQ_EXPR || comp_code == NE_EXPR) { if (!TYPE_UNSIGNED (TREE_TYPE (val))) { tree type = build_nonstandard_integer_type (prec, 1); tmp = build1 (NOP_EXPR, type, name2); val2 = fold_convert (type, val2); } tmp = fold_build2 (MINUS_EXPR, TREE_TYPE (tmp), tmp, val2); new_val = wide_int_to_tree (TREE_TYPE (tmp), mask); new_comp_code = comp_code == EQ_EXPR ? LE_EXPR : GT_EXPR; } else if (comp_code == LT_EXPR || comp_code == GE_EXPR) { wide_int minval = wi::min_value (prec, TYPE_SIGN (TREE_TYPE (val))); new_val = val2; if (minval == new_val) new_val = NULL_TREE; } else { wide_int maxval = wi::max_value (prec, TYPE_SIGN (TREE_TYPE (val))); mask |= val2; if (mask == maxval) new_val = NULL_TREE; else new_val = wide_int_to_tree (TREE_TYPE (val2), mask); } if (new_val) { if (dump_file) { fprintf (dump_file, "Adding assert for "); print_generic_expr (dump_file, name2, 0); fprintf (dump_file, " from "); print_generic_expr (dump_file, tmp, 0); fprintf (dump_file, "\n"); } register_new_assert_for (name2, tmp, new_comp_code, new_val, NULL, e, bsi); retval = true; } } /* Add asserts for NAME cmp CST and NAME being defined as NAME = NAME2 & CST2. Extract CST2 from the and. Also handle NAME = (unsigned) NAME2; casts where NAME's type is unsigned and has smaller precision than NAME2's type as if it was NAME = NAME2 & MASK. */ names[0] = NULL_TREE; names[1] = NULL_TREE; cst2 = NULL_TREE; if (rhs_code == BIT_AND_EXPR || (CONVERT_EXPR_CODE_P (rhs_code) && TREE_CODE (TREE_TYPE (val)) == INTEGER_TYPE && TYPE_UNSIGNED (TREE_TYPE (val)) && TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt))) > prec && !retval)) { name2 = gimple_assign_rhs1 (def_stmt); if (rhs_code == BIT_AND_EXPR) cst2 = gimple_assign_rhs2 (def_stmt); else { cst2 = TYPE_MAX_VALUE (TREE_TYPE (val)); nprec = TYPE_PRECISION (TREE_TYPE (name2)); } if (TREE_CODE (name2) == SSA_NAME && INTEGRAL_TYPE_P (TREE_TYPE (name2)) && TREE_CODE (cst2) == INTEGER_CST && !integer_zerop (cst2) && (nprec > 1 || TYPE_UNSIGNED (TREE_TYPE (val)))) { gimple def_stmt2 = SSA_NAME_DEF_STMT (name2); if (gimple_assign_cast_p (def_stmt2)) { names[1] = gimple_assign_rhs1 (def_stmt2); if (!CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt2)) || !INTEGRAL_TYPE_P (TREE_TYPE (names[1])) || (TYPE_PRECISION (TREE_TYPE (name2)) != TYPE_PRECISION (TREE_TYPE (names[1]))) || !live_on_edge (e, names[1]) || has_single_use (names[1])) names[1] = NULL_TREE; } if (live_on_edge (e, name2) && !has_single_use (name2)) names[0] = name2; } } if (names[0] || names[1]) { wide_int minv, maxv, valv, cst2v; wide_int tem, sgnbit; bool valid_p = false, valn, cst2n; enum tree_code ccode = comp_code; valv = wide_int::from (val, nprec, UNSIGNED); cst2v = wide_int::from (cst2, nprec, UNSIGNED); valn = wi::neg_p (valv, TYPE_SIGN (TREE_TYPE (val))); cst2n = wi::neg_p (cst2v, TYPE_SIGN (TREE_TYPE (val))); /* If CST2 doesn't have most significant bit set, but VAL is negative, we have comparison like if ((x & 0x123) > -4) (always true). Just give up. */ if (!cst2n && valn) ccode = ERROR_MARK; if (cst2n) sgnbit = wi::set_bit_in_zero (nprec - 1, nprec); else sgnbit = wi::zero (nprec); minv = valv & cst2v; switch (ccode) { case EQ_EXPR: /* Minimum unsigned value for equality is VAL & CST2 (should be equal to VAL, otherwise we probably should have folded the comparison into false) and maximum unsigned value is VAL | ~CST2. */ maxv = valv | ~cst2v; valid_p = true; break; case NE_EXPR: tem = valv | ~cst2v; /* If VAL is 0, handle (X & CST2) != 0 as (X & CST2) > 0U. */ if (valv == 0) { cst2n = false; sgnbit = wi::zero (nprec); goto gt_expr; } /* If (VAL | ~CST2) is all ones, handle it as (X & CST2) < VAL. */ if (tem == -1) { cst2n = false; valn = false; sgnbit = wi::zero (nprec); goto lt_expr; } if (!cst2n && wi::neg_p (cst2v)) sgnbit = wi::set_bit_in_zero (nprec - 1, nprec); if (sgnbit != 0) { if (valv == sgnbit) { cst2n = true; valn = true; goto gt_expr; } if (tem == wi::mask (nprec - 1, false, nprec)) { cst2n = true; goto lt_expr; } if (!cst2n) sgnbit = wi::zero (nprec); } break; case GE_EXPR: /* Minimum unsigned value for >= if (VAL & CST2) == VAL is VAL and maximum unsigned value is ~0. For signed comparison, if CST2 doesn't have most significant bit set, handle it similarly. If CST2 has MSB set, the minimum is the same, and maximum is ~0U/2. */ if (minv != valv) { /* If (VAL & CST2) != VAL, X & CST2 can't be equal to VAL. */ minv = masked_increment (valv, cst2v, sgnbit, nprec); if (minv == valv) break; } maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec); valid_p = true; break; case GT_EXPR: gt_expr: /* Find out smallest MINV where MINV > VAL && (MINV & CST2) == MINV, if any. If VAL is signed and CST2 has MSB set, compute it biased by 1 << (nprec - 1). */ minv = masked_increment (valv, cst2v, sgnbit, nprec); if (minv == valv) break; maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec); valid_p = true; break; case LE_EXPR: /* Minimum unsigned value for <= is 0 and maximum unsigned value is VAL | ~CST2 if (VAL & CST2) == VAL. Otherwise, find smallest VAL2 where VAL2 > VAL && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2 as maximum. For signed comparison, if CST2 doesn't have most significant bit set, handle it similarly. If CST2 has MSB set, the maximum is the same and minimum is INT_MIN. */ if (minv == valv) maxv = valv; else { maxv = masked_increment (valv, cst2v, sgnbit, nprec); if (maxv == valv) break; maxv -= 1; } maxv |= ~cst2v; minv = sgnbit; valid_p = true; break; case LT_EXPR: lt_expr: /* Minimum unsigned value for < is 0 and maximum unsigned value is (VAL-1) | ~CST2 if (VAL & CST2) == VAL. Otherwise, find smallest VAL2 where VAL2 > VAL && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2 as maximum. For signed comparison, if CST2 doesn't have most significant bit set, handle it similarly. If CST2 has MSB set, the maximum is the same and minimum is INT_MIN. */ if (minv == valv) { if (valv == sgnbit) break; maxv = valv; } else { maxv = masked_increment (valv, cst2v, sgnbit, nprec); if (maxv == valv) break; } maxv -= 1; maxv |= ~cst2v; minv = sgnbit; valid_p = true; break; default: break; } if (valid_p && (maxv - minv) != -1) { tree tmp, new_val, type; int i; for (i = 0; i < 2; i++) if (names[i]) { wide_int maxv2 = maxv; tmp = names[i]; type = TREE_TYPE (names[i]); if (!TYPE_UNSIGNED (type)) { type = build_nonstandard_integer_type (nprec, 1); tmp = build1 (NOP_EXPR, type, names[i]); } if (minv != 0) { tmp = build2 (PLUS_EXPR, type, tmp, wide_int_to_tree (type, -minv)); maxv2 = maxv - minv; } new_val = wide_int_to_tree (type, maxv2); if (dump_file) { fprintf (dump_file, "Adding assert for "); print_generic_expr (dump_file, names[i], 0); fprintf (dump_file, " from "); print_generic_expr (dump_file, tmp, 0); fprintf (dump_file, "\n"); } register_new_assert_for (names[i], tmp, LE_EXPR, new_val, NULL, e, bsi); retval = true; } } } } return retval; } /* OP is an operand of a truth value expression which is known to have a particular value. Register any asserts for OP and for any operands in OP's defining statement. If CODE is EQ_EXPR, then we want to register OP is zero (false), if CODE is NE_EXPR, then we want to register OP is nonzero (true). */ static bool register_edge_assert_for_1 (tree op, enum tree_code code, edge e, gimple_stmt_iterator bsi) { bool retval = false; gimple op_def; tree val; enum tree_code rhs_code; /* We only care about SSA_NAMEs. */ if (TREE_CODE (op) != SSA_NAME) return false; /* We know that OP will have a zero or nonzero value. If OP is used more than once go ahead and register an assert for OP. */ if (live_on_edge (e, op) && !has_single_use (op)) { val = build_int_cst (TREE_TYPE (op), 0); register_new_assert_for (op, op, code, val, NULL, e, bsi); retval = true; } /* Now look at how OP is set. If it's set from a comparison, a truth operation or some bit operations, then we may be able to register information about the operands of that assignment. */ op_def = SSA_NAME_DEF_STMT (op); if (gimple_code (op_def) != GIMPLE_ASSIGN) return retval; rhs_code = gimple_assign_rhs_code (op_def); if (TREE_CODE_CLASS (rhs_code) == tcc_comparison) { bool invert = (code == EQ_EXPR ? true : false); tree op0 = gimple_assign_rhs1 (op_def); tree op1 = gimple_assign_rhs2 (op_def); if (TREE_CODE (op0) == SSA_NAME) retval |= register_edge_assert_for_2 (op0, e, bsi, rhs_code, op0, op1, invert); if (TREE_CODE (op1) == SSA_NAME) retval |= register_edge_assert_for_2 (op1, e, bsi, rhs_code, op0, op1, invert); } else if ((code == NE_EXPR && gimple_assign_rhs_code (op_def) == BIT_AND_EXPR) || (code == EQ_EXPR && gimple_assign_rhs_code (op_def) == BIT_IOR_EXPR)) { /* Recurse on each operand. */ tree op0 = gimple_assign_rhs1 (op_def); tree op1 = gimple_assign_rhs2 (op_def); if (TREE_CODE (op0) == SSA_NAME && has_single_use (op0)) retval |= register_edge_assert_for_1 (op0, code, e, bsi); if (TREE_CODE (op1) == SSA_NAME && has_single_use (op1)) retval |= register_edge_assert_for_1 (op1, code, e, bsi); } else if (gimple_assign_rhs_code (op_def) == BIT_NOT_EXPR && TYPE_PRECISION (TREE_TYPE (gimple_assign_lhs (op_def))) == 1) { /* Recurse, flipping CODE. */ code = invert_tree_comparison (code, false); retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), code, e, bsi); } else if (gimple_assign_rhs_code (op_def) == SSA_NAME) { /* Recurse through the copy. */ retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), code, e, bsi); } else if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (op_def))) { /* Recurse through the type conversion, unless it is a narrowing conversion or conversion from non-integral type. */ tree rhs = gimple_assign_rhs1 (op_def); if (INTEGRAL_TYPE_P (TREE_TYPE (rhs)) && (TYPE_PRECISION (TREE_TYPE (rhs)) <= TYPE_PRECISION (TREE_TYPE (op)))) retval |= register_edge_assert_for_1 (rhs, code, e, bsi); } return retval; } /* Try to register an edge assertion for SSA name NAME on edge E for the condition COND contributing to the conditional jump pointed to by SI. Return true if an assertion for NAME could be registered. */ static bool register_edge_assert_for (tree name, edge e, gimple_stmt_iterator si, enum tree_code cond_code, tree cond_op0, tree cond_op1) { tree val; enum tree_code comp_code; bool retval = false; bool is_else_edge = (e->flags & EDGE_FALSE_VALUE) != 0; /* Do not attempt to infer anything in names that flow through abnormal edges. */ if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name)) return false; if (!extract_code_and_val_from_cond_with_ops (name, cond_code, cond_op0, cond_op1, is_else_edge, &comp_code, &val)) return false; /* Register ASSERT_EXPRs for name. */ retval |= register_edge_assert_for_2 (name, e, si, cond_code, cond_op0, cond_op1, is_else_edge); /* If COND is effectively an equality test of an SSA_NAME against the value zero or one, then we may be able to assert values for SSA_NAMEs which flow into COND. */ /* In the case of NAME == 1 or NAME != 0, for BIT_AND_EXPR defining statement of NAME we can assert both operands of the BIT_AND_EXPR have nonzero value. */ if (((comp_code == EQ_EXPR && integer_onep (val)) || (comp_code == NE_EXPR && integer_zerop (val)))) { gimple def_stmt = SSA_NAME_DEF_STMT (name); if (is_gimple_assign (def_stmt) && gimple_assign_rhs_code (def_stmt) == BIT_AND_EXPR) { tree op0 = gimple_assign_rhs1 (def_stmt); tree op1 = gimple_assign_rhs2 (def_stmt); retval |= register_edge_assert_for_1 (op0, NE_EXPR, e, si); retval |= register_edge_assert_for_1 (op1, NE_EXPR, e, si); } } /* In the case of NAME == 0 or NAME != 1, for BIT_IOR_EXPR defining statement of NAME we can assert both operands of the BIT_IOR_EXPR have zero value. */ if (((comp_code == EQ_EXPR && integer_zerop (val)) || (comp_code == NE_EXPR && integer_onep (val)))) { gimple def_stmt = SSA_NAME_DEF_STMT (name); /* For BIT_IOR_EXPR only if NAME == 0 both operands have necessarily zero value, or if type-precision is one. */ if (is_gimple_assign (def_stmt) && (gimple_assign_rhs_code (def_stmt) == BIT_IOR_EXPR && (TYPE_PRECISION (TREE_TYPE (name)) == 1 || comp_code == EQ_EXPR))) { tree op0 = gimple_assign_rhs1 (def_stmt); tree op1 = gimple_assign_rhs2 (def_stmt); retval |= register_edge_assert_for_1 (op0, EQ_EXPR, e, si); retval |= register_edge_assert_for_1 (op1, EQ_EXPR, e, si); } } return retval; } /* Determine whether the outgoing edges of BB should receive an ASSERT_EXPR for each of the operands of BB's LAST statement. The last statement of BB must be a COND_EXPR. If any of the sub-graphs rooted at BB have an interesting use of the predicate operands, an assert location node is added to the list of assertions for the corresponding operands. */ static bool find_conditional_asserts (basic_block bb, gimple last) { bool need_assert; gimple_stmt_iterator bsi; tree op; edge_iterator ei; edge e; ssa_op_iter iter; need_assert = false; bsi = gsi_for_stmt (last); /* Look for uses of the operands in each of the sub-graphs rooted at BB. We need to check each of the outgoing edges separately, so that we know what kind of ASSERT_EXPR to insert. */ FOR_EACH_EDGE (e, ei, bb->succs) { if (e->dest == bb) continue; /* Register the necessary assertions for each operand in the conditional predicate. */ FOR_EACH_SSA_TREE_OPERAND (op, last, iter, SSA_OP_USE) { need_assert |= register_edge_assert_for (op, e, bsi, gimple_cond_code (last), gimple_cond_lhs (last), gimple_cond_rhs (last)); } } return need_assert; } struct case_info { tree expr; basic_block bb; }; /* Compare two case labels sorting first by the destination bb index and then by the case value. */ static int compare_case_labels (const void *p1, const void *p2) { const struct case_info *ci1 = (const struct case_info *) p1; const struct case_info *ci2 = (const struct case_info *) p2; int idx1 = ci1->bb->index; int idx2 = ci2->bb->index; if (idx1 < idx2) return -1; else if (idx1 == idx2) { /* Make sure the default label is first in a group. */ if (!CASE_LOW (ci1->expr)) return -1; else if (!CASE_LOW (ci2->expr)) return 1; else return tree_int_cst_compare (CASE_LOW (ci1->expr), CASE_LOW (ci2->expr)); } else return 1; } /* Determine whether the outgoing edges of BB should receive an ASSERT_EXPR for each of the operands of BB's LAST statement. The last statement of BB must be a SWITCH_EXPR. If any of the sub-graphs rooted at BB have an interesting use of the predicate operands, an assert location node is added to the list of assertions for the corresponding operands. */ static bool find_switch_asserts (basic_block bb, gimple last) { bool need_assert; gimple_stmt_iterator bsi; tree op; edge e; struct case_info *ci; size_t n = gimple_switch_num_labels (last); #if GCC_VERSION >= 4000 unsigned int idx; #else /* Work around GCC 3.4 bug (PR 37086). */ volatile unsigned int idx; #endif need_assert = false; bsi = gsi_for_stmt (last); op = gimple_switch_index (last); if (TREE_CODE (op) != SSA_NAME) return false; /* Build a vector of case labels sorted by destination label. */ ci = XNEWVEC (struct case_info, n); for (idx = 0; idx < n; ++idx) { ci[idx].expr = gimple_switch_label (last, idx); ci[idx].bb = label_to_block (CASE_LABEL (ci[idx].expr)); } qsort (ci, n, sizeof (struct case_info), compare_case_labels); for (idx = 0; idx < n; ++idx) { tree min, max; tree cl = ci[idx].expr; basic_block cbb = ci[idx].bb; min = CASE_LOW (cl); max = CASE_HIGH (cl); /* If there are multiple case labels with the same destination we need to combine them to a single value range for the edge. */ if (idx + 1 < n && cbb == ci[idx + 1].bb) { /* Skip labels until the last of the group. */ do { ++idx; } while (idx < n && cbb == ci[idx].bb); --idx; /* Pick up the maximum of the case label range. */ if (CASE_HIGH (ci[idx].expr)) max = CASE_HIGH (ci[idx].expr); else max = CASE_LOW (ci[idx].expr); } /* Nothing to do if the range includes the default label until we can register anti-ranges. */ if (min == NULL_TREE) continue; /* Find the edge to register the assert expr on. */ e = find_edge (bb, cbb); /* Register the necessary assertions for the operand in the SWITCH_EXPR. */ need_assert |= register_edge_assert_for (op, e, bsi, max ? GE_EXPR : EQ_EXPR, op, fold_convert (TREE_TYPE (op), min)); if (max) { need_assert |= register_edge_assert_for (op, e, bsi, LE_EXPR, op, fold_convert (TREE_TYPE (op), max)); } } XDELETEVEC (ci); return need_assert; } /* Traverse all the statements in block BB looking for statements that may generate useful assertions for the SSA names in their operand. If a statement produces a useful assertion A for name N_i, then the list of assertions already generated for N_i is scanned to determine if A is actually needed. If N_i already had the assertion A at a location dominating the current location, then nothing needs to be done. Otherwise, the new location for A is recorded instead. 1- For every statement S in BB, all the variables used by S are added to bitmap FOUND_IN_SUBGRAPH. 2- If statement S uses an operand N in a way that exposes a known value range for N, then if N was not already generated by an ASSERT_EXPR, create a new assert location for N. For instance, if N is a pointer and the statement dereferences it, we can assume that N is not NULL. 3- COND_EXPRs are a special case of #2. We can derive range information from the predicate but need to insert different ASSERT_EXPRs for each of the sub-graphs rooted at the conditional block. If the last statement of BB is a conditional expression of the form 'X op Y', then a) Remove X and Y from the set FOUND_IN_SUBGRAPH. b) If the conditional is the only entry point to the sub-graph corresponding to the THEN_CLAUSE, recurse into it. On return, if X and/or Y are marked in FOUND_IN_SUBGRAPH, then an ASSERT_EXPR is added for the corresponding variable. c) Repeat step (b) on the ELSE_CLAUSE. d) Mark X and Y in FOUND_IN_SUBGRAPH. For instance, if (a == 9) b = a; else b = c + 1; In this case, an assertion on the THEN clause is useful to determine that 'a' is always 9 on that edge. However, an assertion on the ELSE clause would be unnecessary. 4- If BB does not end in a conditional expression, then we recurse into BB's dominator children. At the end of the recursive traversal, every SSA name will have a list of locations where ASSERT_EXPRs should be added. When a new location for name N is found, it is registered by calling register_new_assert_for. That function keeps track of all the registered assertions to prevent adding unnecessary assertions. For instance, if a pointer P_4 is dereferenced more than once in a dominator tree, only the location dominating all the dereference of P_4 will receive an ASSERT_EXPR. If this function returns true, then it means that there are names for which we need to generate ASSERT_EXPRs. Those assertions are inserted by process_assert_insertions. */ static bool find_assert_locations_1 (basic_block bb, sbitmap live) { gimple_stmt_iterator si; gimple last; bool need_assert; need_assert = false; last = last_stmt (bb); /* If BB's last statement is a conditional statement involving integer operands, determine if we need to add ASSERT_EXPRs. */ if (last && gimple_code (last) == GIMPLE_COND && !fp_predicate (last) && !ZERO_SSA_OPERANDS (last, SSA_OP_USE)) need_assert |= find_conditional_asserts (bb, last); /* If BB's last statement is a switch statement involving integer operands, determine if we need to add ASSERT_EXPRs. */ if (last && gimple_code (last) == GIMPLE_SWITCH && !ZERO_SSA_OPERANDS (last, SSA_OP_USE)) need_assert |= find_switch_asserts (bb, last); /* Traverse all the statements in BB marking used names and looking for statements that may infer assertions for their used operands. */ for (si = gsi_last_bb (bb); !gsi_end_p (si); gsi_prev (&si)) { gimple stmt; tree op; ssa_op_iter i; stmt = gsi_stmt (si); if (is_gimple_debug (stmt)) continue; /* See if we can derive an assertion for any of STMT's operands. */ FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE) { tree value; enum tree_code comp_code; /* If op is not live beyond this stmt, do not bother to insert asserts for it. */ if (!bitmap_bit_p (live, SSA_NAME_VERSION (op))) continue; /* If OP is used in such a way that we can infer a value range for it, and we don't find a previous assertion for it, create a new assertion location node for OP. */ if (infer_value_range (stmt, op, &comp_code, &value)) { /* If we are able to infer a nonzero value range for OP, then walk backwards through the use-def chain to see if OP was set via a typecast. If so, then we can also infer a nonzero value range for the operand of the NOP_EXPR. */ if (comp_code == NE_EXPR && integer_zerop (value)) { tree t = op; gimple def_stmt = SSA_NAME_DEF_STMT (t); while (is_gimple_assign (def_stmt) && gimple_assign_rhs_code (def_stmt) == NOP_EXPR && TREE_CODE (gimple_assign_rhs1 (def_stmt)) == SSA_NAME && POINTER_TYPE_P (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))) { t = gimple_assign_rhs1 (def_stmt); def_stmt = SSA_NAME_DEF_STMT (t); /* Note we want to register the assert for the operand of the NOP_EXPR after SI, not after the conversion. */ if (! has_single_use (t)) { register_new_assert_for (t, t, comp_code, value, bb, NULL, si); need_assert = true; } } } register_new_assert_for (op, op, comp_code, value, bb, NULL, si); need_assert = true; } } /* Update live. */ FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE) bitmap_set_bit (live, SSA_NAME_VERSION (op)); FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_DEF) bitmap_clear_bit (live, SSA_NAME_VERSION (op)); } /* Traverse all PHI nodes in BB, updating live. */ for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si)) { use_operand_p arg_p; ssa_op_iter i; gimple phi = gsi_stmt (si); tree res = gimple_phi_result (phi); if (virtual_operand_p (res)) continue; FOR_EACH_PHI_ARG (arg_p, phi, i, SSA_OP_USE) { tree arg = USE_FROM_PTR (arg_p); if (TREE_CODE (arg) == SSA_NAME) bitmap_set_bit (live, SSA_NAME_VERSION (arg)); } bitmap_clear_bit (live, SSA_NAME_VERSION (res)); } return need_assert; } /* Do an RPO walk over the function computing SSA name liveness on-the-fly and deciding on assert expressions to insert. Returns true if there are assert expressions to be inserted. */ static bool find_assert_locations (void) { int *rpo = XNEWVEC (int, last_basic_block_for_fn (cfun)); int *bb_rpo = XNEWVEC (int, last_basic_block_for_fn (cfun)); int *last_rpo = XCNEWVEC (int, last_basic_block_for_fn (cfun)); int rpo_cnt, i; bool need_asserts; live = XCNEWVEC (sbitmap, last_basic_block_for_fn (cfun)); rpo_cnt = pre_and_rev_post_order_compute (NULL, rpo, false); for (i = 0; i < rpo_cnt; ++i) bb_rpo[rpo[i]] = i; /* Pre-seed loop latch liveness from loop header PHI nodes. Due to the order we compute liveness and insert asserts we otherwise fail to insert asserts into the loop latch. */ loop_p loop; FOR_EACH_LOOP (loop, 0) { i = loop->latch->index; unsigned int j = single_succ_edge (loop->latch)->dest_idx; for (gimple_stmt_iterator gsi = gsi_start_phis (loop->header); !gsi_end_p (gsi); gsi_next (&gsi)) { gimple phi = gsi_stmt (gsi); if (virtual_operand_p (gimple_phi_result (phi))) continue; tree arg = gimple_phi_arg_def (phi, j); if (TREE_CODE (arg) == SSA_NAME) { if (live[i] == NULL) { live[i] = sbitmap_alloc (num_ssa_names); bitmap_clear (live[i]); } bitmap_set_bit (live[i], SSA_NAME_VERSION (arg)); } } } need_asserts = false; for (i = rpo_cnt - 1; i >= 0; --i) { basic_block bb = BASIC_BLOCK_FOR_FN (cfun, rpo[i]); edge e; edge_iterator ei; if (!live[rpo[i]]) { live[rpo[i]] = sbitmap_alloc (num_ssa_names); bitmap_clear (live[rpo[i]]); } /* Process BB and update the live information with uses in this block. */ need_asserts |= find_assert_locations_1 (bb, live[rpo[i]]); /* Merge liveness into the predecessor blocks and free it. */ if (!bitmap_empty_p (live[rpo[i]])) { int pred_rpo = i; FOR_EACH_EDGE (e, ei, bb->preds) { int pred = e->src->index; if ((e->flags & EDGE_DFS_BACK) || pred == ENTRY_BLOCK) continue; if (!live[pred]) { live[pred] = sbitmap_alloc (num_ssa_names); bitmap_clear (live[pred]); } bitmap_ior (live[pred], live[pred], live[rpo[i]]); if (bb_rpo[pred] < pred_rpo) pred_rpo = bb_rpo[pred]; } /* Record the RPO number of the last visited block that needs live information from this block. */ last_rpo[rpo[i]] = pred_rpo; } else { sbitmap_free (live[rpo[i]]); live[rpo[i]] = NULL; } /* We can free all successors live bitmaps if all their predecessors have been visited already. */ FOR_EACH_EDGE (e, ei, bb->succs) if (last_rpo[e->dest->index] == i && live[e->dest->index]) { sbitmap_free (live[e->dest->index]); live[e->dest->index] = NULL; } } XDELETEVEC (rpo); XDELETEVEC (bb_rpo); XDELETEVEC (last_rpo); for (i = 0; i < last_basic_block_for_fn (cfun); ++i) if (live[i]) sbitmap_free (live[i]); XDELETEVEC (live); return need_asserts; } /* Create an ASSERT_EXPR for NAME and insert it in the location indicated by LOC. Return true if we made any edge insertions. */ static bool process_assert_insertions_for (tree name, assert_locus_t loc) { /* Build the comparison expression NAME_i COMP_CODE VAL. */ gimple stmt; tree cond; gimple assert_stmt; edge_iterator ei; edge e; /* If we have X <=> X do not insert an assert expr for that. */ if (loc->expr == loc->val) return false; cond = build2 (loc->comp_code, boolean_type_node, loc->expr, loc->val); assert_stmt = build_assert_expr_for (cond, name); if (loc->e) { /* We have been asked to insert the assertion on an edge. This is used only by COND_EXPR and SWITCH_EXPR assertions. */ gcc_checking_assert (gimple_code (gsi_stmt (loc->si)) == GIMPLE_COND || (gimple_code (gsi_stmt (loc->si)) == GIMPLE_SWITCH)); gsi_insert_on_edge (loc->e, assert_stmt); return true; } /* Otherwise, we can insert right after LOC->SI iff the statement must not be the last statement in the block. */ stmt = gsi_stmt (loc->si); if (!stmt_ends_bb_p (stmt)) { gsi_insert_after (&loc->si, assert_stmt, GSI_SAME_STMT); return false; } /* If STMT must be the last statement in BB, we can only insert new assertions on the non-abnormal edge out of BB. Note that since STMT is not control flow, there may only be one non-abnormal edge out of BB. */ FOR_EACH_EDGE (e, ei, loc->bb->succs) if (!(e->flags & EDGE_ABNORMAL)) { gsi_insert_on_edge (e, assert_stmt); return true; } gcc_unreachable (); } /* Process all the insertions registered for every name N_i registered in NEED_ASSERT_FOR. The list of assertions to be inserted are found in ASSERTS_FOR[i]. */ static void process_assert_insertions (void) { unsigned i; bitmap_iterator bi; bool update_edges_p = false; int num_asserts = 0; if (dump_file && (dump_flags & TDF_DETAILS)) dump_all_asserts (dump_file); EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi) { assert_locus_t loc = asserts_for[i]; gcc_assert (loc); while (loc) { assert_locus_t next = loc->next; update_edges_p |= process_assert_insertions_for (ssa_name (i), loc); free (loc); loc = next; num_asserts++; } } if (update_edges_p) gsi_commit_edge_inserts (); statistics_counter_event (cfun, "Number of ASSERT_EXPR expressions inserted", num_asserts); } /* Traverse the flowgraph looking for conditional jumps to insert range expressions. These range expressions are meant to provide information to optimizations that need to reason in terms of value ranges. They will not be expanded into RTL. For instance, given: x = ... y = ... if (x < y) y = x - 2; else x = y + 3; this pass will transform the code into: x = ... y = ... if (x < y) { x = ASSERT_EXPR <x, x < y> y = x - 2 } else { y = ASSERT_EXPR <y, x >= y> x = y + 3 } The idea is that once copy and constant propagation have run, other optimizations will be able to determine what ranges of values can 'x' take in different paths of the code, simply by checking the reaching definition of 'x'. */ static void insert_range_assertions (void) { need_assert_for = BITMAP_ALLOC (NULL); asserts_for = XCNEWVEC (assert_locus_t, num_ssa_names); calculate_dominance_info (CDI_DOMINATORS); if (find_assert_locations ()) { process_assert_insertions (); update_ssa (TODO_update_ssa_no_phi); } if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "\nSSA form after inserting ASSERT_EXPRs\n"); dump_function_to_file (current_function_decl, dump_file, dump_flags); } free (asserts_for); BITMAP_FREE (need_assert_for); } /* Checks one ARRAY_REF in REF, located at LOCUS. Ignores flexible arrays and "struct" hacks. If VRP can determine that the array subscript is a constant, check if it is outside valid range. If the array subscript is a RANGE, warn if it is non-overlapping with valid range. IGNORE_OFF_BY_ONE is true if the ARRAY_REF is inside a ADDR_EXPR. */ static void check_array_ref (location_t location, tree ref, bool ignore_off_by_one) { value_range_t* vr = NULL; tree low_sub, up_sub; tree low_bound, up_bound, up_bound_p1; tree base; if (TREE_NO_WARNING (ref)) return; low_sub = up_sub = TREE_OPERAND (ref, 1); up_bound = array_ref_up_bound (ref); /* Can not check flexible arrays. */ if (!up_bound || TREE_CODE (up_bound) != INTEGER_CST) return; /* Accesses to trailing arrays via pointers may access storage beyond the types array bounds. */ base = get_base_address (ref); if (base && TREE_CODE (base) == MEM_REF) { tree cref, next = NULL_TREE; if (TREE_CODE (TREE_OPERAND (ref, 0)) != COMPONENT_REF) return; cref = TREE_OPERAND (ref, 0); if (TREE_CODE (TREE_TYPE (TREE_OPERAND (cref, 0))) == RECORD_TYPE) for (next = DECL_CHAIN (TREE_OPERAND (cref, 1)); next && TREE_CODE (next) != FIELD_DECL; next = DECL_CHAIN (next)) ; /* If this is the last field in a struct type or a field in a union type do not warn. */ if (!next) return; } low_bound = array_ref_low_bound (ref); up_bound_p1 = int_const_binop (PLUS_EXPR, up_bound, build_int_cst (TREE_TYPE (up_bound), 1)); if (TREE_CODE (low_sub) == SSA_NAME) { vr = get_value_range (low_sub); if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE) { low_sub = vr->type == VR_RANGE ? vr->max : vr->min; up_sub = vr->type == VR_RANGE ? vr->min : vr->max; } } if (vr && vr->type == VR_ANTI_RANGE) { if (TREE_CODE (up_sub) == INTEGER_CST && tree_int_cst_lt (up_bound, up_sub) && TREE_CODE (low_sub) == INTEGER_CST && tree_int_cst_lt (low_sub, low_bound)) { warning_at (location, OPT_Warray_bounds, "array subscript is outside array bounds"); TREE_NO_WARNING (ref) = 1; } } else if (TREE_CODE (up_sub) == INTEGER_CST && (ignore_off_by_one ? (tree_int_cst_lt (up_bound, up_sub) && !tree_int_cst_equal (up_bound_p1, up_sub)) : (tree_int_cst_lt (up_bound, up_sub) || tree_int_cst_equal (up_bound_p1, up_sub)))) { if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "Array bound warning for "); dump_generic_expr (MSG_NOTE, TDF_SLIM, ref); fprintf (dump_file, "\n"); } warning_at (location, OPT_Warray_bounds, "array subscript is above array bounds"); TREE_NO_WARNING (ref) = 1; } else if (TREE_CODE (low_sub) == INTEGER_CST && tree_int_cst_lt (low_sub, low_bound)) { if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "Array bound warning for "); dump_generic_expr (MSG_NOTE, TDF_SLIM, ref); fprintf (dump_file, "\n"); } warning_at (location, OPT_Warray_bounds, "array subscript is below array bounds"); TREE_NO_WARNING (ref) = 1; } } /* Searches if the expr T, located at LOCATION computes address of an ARRAY_REF, and call check_array_ref on it. */ static void search_for_addr_array (tree t, location_t location) { while (TREE_CODE (t) == SSA_NAME) { gimple g = SSA_NAME_DEF_STMT (t); if (gimple_code (g) != GIMPLE_ASSIGN) return; if (get_gimple_rhs_class (gimple_assign_rhs_code (g)) != GIMPLE_SINGLE_RHS) return; t = gimple_assign_rhs1 (g); } /* We are only interested in addresses of ARRAY_REF's. */ if (TREE_CODE (t) != ADDR_EXPR) return; /* Check each ARRAY_REFs in the reference chain. */ do { if (TREE_CODE (t) == ARRAY_REF) check_array_ref (location, t, true /*ignore_off_by_one*/); t = TREE_OPERAND (t, 0); } while (handled_component_p (t)); if (TREE_CODE (t) == MEM_REF && TREE_CODE (TREE_OPERAND (t, 0)) == ADDR_EXPR && !TREE_NO_WARNING (t)) { tree tem = TREE_OPERAND (TREE_OPERAND (t, 0), 0); tree low_bound, up_bound, el_sz; offset_int idx; if (TREE_CODE (TREE_TYPE (tem)) != ARRAY_TYPE || TREE_CODE (TREE_TYPE (TREE_TYPE (tem))) == ARRAY_TYPE || !TYPE_DOMAIN (TREE_TYPE (tem))) return; low_bound = TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (tem))); up_bound = TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (tem))); el_sz = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (tem))); if (!low_bound || TREE_CODE (low_bound) != INTEGER_CST || !up_bound || TREE_CODE (up_bound) != INTEGER_CST || !el_sz || TREE_CODE (el_sz) != INTEGER_CST) return; idx = mem_ref_offset (t); idx = wi::sdiv_trunc (idx, wi::to_offset (el_sz)); if (wi::lts_p (idx, 0)) { if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "Array bound warning for "); dump_generic_expr (MSG_NOTE, TDF_SLIM, t); fprintf (dump_file, "\n"); } warning_at (location, OPT_Warray_bounds, "array subscript is below array bounds"); TREE_NO_WARNING (t) = 1; } else if (wi::gts_p (idx, (wi::to_offset (up_bound) - wi::to_offset (low_bound) + 1))) { if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "Array bound warning for "); dump_generic_expr (MSG_NOTE, TDF_SLIM, t); fprintf (dump_file, "\n"); } warning_at (location, OPT_Warray_bounds, "array subscript is above array bounds"); TREE_NO_WARNING (t) = 1; } } } /* walk_tree() callback that checks if *TP is an ARRAY_REF inside an ADDR_EXPR (in which an array subscript one outside the valid range is allowed). Call check_array_ref for each ARRAY_REF found. The location is passed in DATA. */ static tree check_array_bounds (tree *tp, int *walk_subtree, void *data) { tree t = *tp; struct walk_stmt_info *wi = (struct walk_stmt_info *) data; location_t location; if (EXPR_HAS_LOCATION (t)) location = EXPR_LOCATION (t); else { location_t *locp = (location_t *) wi->info; location = *locp; } *walk_subtree = TRUE; if (TREE_CODE (t) == ARRAY_REF) check_array_ref (location, t, false /*ignore_off_by_one*/); if (TREE_CODE (t) == MEM_REF || (TREE_CODE (t) == RETURN_EXPR && TREE_OPERAND (t, 0))) search_for_addr_array (TREE_OPERAND (t, 0), location); if (TREE_CODE (t) == ADDR_EXPR) *walk_subtree = FALSE; return NULL_TREE; } /* Walk over all statements of all reachable BBs and call check_array_bounds on them. */ static void check_all_array_refs (void) { basic_block bb; gimple_stmt_iterator si; FOR_EACH_BB_FN (bb, cfun) { edge_iterator ei; edge e; bool executable = false; /* Skip blocks that were found to be unreachable. */ FOR_EACH_EDGE (e, ei, bb->preds) executable |= !!(e->flags & EDGE_EXECUTABLE); if (!executable) continue; for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si)) { gimple stmt = gsi_stmt (si); struct walk_stmt_info wi; if (!gimple_has_location (stmt)) continue; if (is_gimple_call (stmt)) { size_t i; size_t n = gimple_call_num_args (stmt); for (i = 0; i < n; i++) { tree arg = gimple_call_arg (stmt, i); search_for_addr_array (arg, gimple_location (stmt)); } } else { memset (&wi, 0, sizeof (wi)); wi.info = CONST_CAST (void *, (const void *) gimple_location_ptr (stmt)); walk_gimple_op (gsi_stmt (si), check_array_bounds, &wi); } } } } /* Return true if all imm uses of VAR are either in STMT, or feed (optionally through a chain of single imm uses) GIMPLE_COND in basic block COND_BB. */ static bool all_imm_uses_in_stmt_or_feed_cond (tree var, gimple stmt, basic_block cond_bb) { use_operand_p use_p, use2_p; imm_use_iterator iter; FOR_EACH_IMM_USE_FAST (use_p, iter, var) if (USE_STMT (use_p) != stmt) { gimple use_stmt = USE_STMT (use_p), use_stmt2; if (is_gimple_debug (use_stmt)) continue; while (is_gimple_assign (use_stmt) && TREE_CODE (gimple_assign_lhs (use_stmt)) == SSA_NAME && single_imm_use (gimple_assign_lhs (use_stmt), &use2_p, &use_stmt2)) use_stmt = use_stmt2; if (gimple_code (use_stmt) != GIMPLE_COND || gimple_bb (use_stmt) != cond_bb) return false; } return true; } /* Handle _4 = x_3 & 31; if (_4 != 0) goto <bb 6>; else goto <bb 7>; <bb 6>: __builtin_unreachable (); <bb 7>: x_5 = ASSERT_EXPR <x_3, ...>; If x_3 has no other immediate uses (checked by caller), var is the x_3 var from ASSERT_EXPR, we can clear low 5 bits from the non-zero bitmask. */ static void maybe_set_nonzero_bits (basic_block bb, tree var) { edge e = single_pred_edge (bb); basic_block cond_bb = e->src; gimple stmt = last_stmt (cond_bb); tree cst; if (stmt == NULL || gimple_code (stmt) != GIMPLE_COND || gimple_cond_code (stmt) != ((e->flags & EDGE_TRUE_VALUE) ? EQ_EXPR : NE_EXPR) || TREE_CODE (gimple_cond_lhs (stmt)) != SSA_NAME || !integer_zerop (gimple_cond_rhs (stmt))) return; stmt = SSA_NAME_DEF_STMT (gimple_cond_lhs (stmt)); if (!is_gimple_assign (stmt) || gimple_assign_rhs_code (stmt) != BIT_AND_EXPR || TREE_CODE (gimple_assign_rhs2 (stmt)) != INTEGER_CST) return; if (gimple_assign_rhs1 (stmt) != var) { gimple stmt2; if (TREE_CODE (gimple_assign_rhs1 (stmt)) != SSA_NAME) return; stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt)); if (!gimple_assign_cast_p (stmt2) || gimple_assign_rhs1 (stmt2) != var || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt2)) || (TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (stmt))) != TYPE_PRECISION (TREE_TYPE (var)))) return; } cst = gimple_assign_rhs2 (stmt); set_nonzero_bits (var, wi::bit_and_not (get_nonzero_bits (var), cst)); } /* Convert range assertion expressions into the implied copies and copy propagate away the copies. Doing the trivial copy propagation here avoids the need to run the full copy propagation pass after VRP. FIXME, this will eventually lead to copy propagation removing the names that had useful range information attached to them. For instance, if we had the assertion N_i = ASSERT_EXPR <N_j, N_j > 3>, then N_i will have the range [3, +INF]. However, by converting the assertion into the implied copy operation N_i = N_j, we will then copy-propagate N_j into the uses of N_i and lose the range information. We may want to hold on to ASSERT_EXPRs a little while longer as the ranges could be used in things like jump threading. The problem with keeping ASSERT_EXPRs around is that passes after VRP need to handle them appropriately. Another approach would be to make the range information a first class property of the SSA_NAME so that it can be queried from any pass. This is made somewhat more complex by the need for multiple ranges to be associated with one SSA_NAME. */ static void remove_range_assertions (void) { basic_block bb; gimple_stmt_iterator si; /* 1 if looking at ASSERT_EXPRs immediately at the beginning of a basic block preceeded by GIMPLE_COND branching to it and __builtin_trap, -1 if not yet checked, 0 otherwise. */ int is_unreachable; /* Note that the BSI iterator bump happens at the bottom of the loop and no bump is necessary if we're removing the statement referenced by the current BSI. */ FOR_EACH_BB_FN (bb, cfun) for (si = gsi_after_labels (bb), is_unreachable = -1; !gsi_end_p (si);) { gimple stmt = gsi_stmt (si); gimple use_stmt; if (is_gimple_assign (stmt) && gimple_assign_rhs_code (stmt) == ASSERT_EXPR) { tree lhs = gimple_assign_lhs (stmt); tree rhs = gimple_assign_rhs1 (stmt); tree var; tree cond = fold (ASSERT_EXPR_COND (rhs)); use_operand_p use_p; imm_use_iterator iter; gcc_assert (cond != boolean_false_node); var = ASSERT_EXPR_VAR (rhs); gcc_assert (TREE_CODE (var) == SSA_NAME); if (!POINTER_TYPE_P (TREE_TYPE (lhs)) && SSA_NAME_RANGE_INFO (lhs)) { if (is_unreachable == -1) { is_unreachable = 0; if (single_pred_p (bb) && assert_unreachable_fallthru_edge_p (single_pred_edge (bb))) is_unreachable = 1; } /* Handle if (x_7 >= 10 && x_7 < 20) __builtin_unreachable (); x_8 = ASSERT_EXPR <x_7, ...>; if the only uses of x_7 are in the ASSERT_EXPR and in the condition. In that case, we can copy the range info from x_8 computed in this pass also for x_7. */ if (is_unreachable && all_imm_uses_in_stmt_or_feed_cond (var, stmt, single_pred (bb))) { set_range_info (var, SSA_NAME_RANGE_TYPE (lhs), SSA_NAME_RANGE_INFO (lhs)->get_min (), SSA_NAME_RANGE_INFO (lhs)->get_max ()); maybe_set_nonzero_bits (bb, var); } } /* Propagate the RHS into every use of the LHS. */ FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs) FOR_EACH_IMM_USE_ON_STMT (use_p, iter) SET_USE (use_p, var); /* And finally, remove the copy, it is not needed. */ gsi_remove (&si, true); release_defs (stmt); } else { if (!is_gimple_debug (gsi_stmt (si))) is_unreachable = 0; gsi_next (&si); } } } /* Return true if STMT is interesting for VRP. */ static bool stmt_interesting_for_vrp (gimple stmt) { if (gimple_code (stmt) == GIMPLE_PHI) { tree res = gimple_phi_result (stmt); return (!virtual_operand_p (res) && (INTEGRAL_TYPE_P (TREE_TYPE (res)) || POINTER_TYPE_P (TREE_TYPE (res)))); } else if (is_gimple_assign (stmt) || is_gimple_call (stmt)) { tree lhs = gimple_get_lhs (stmt); /* In general, assignments with virtual operands are not useful for deriving ranges, with the obvious exception of calls to builtin functions. */ if (lhs && TREE_CODE (lhs) == SSA_NAME && (INTEGRAL_TYPE_P (TREE_TYPE (lhs)) || POINTER_TYPE_P (TREE_TYPE (lhs))) && (is_gimple_call (stmt) || !gimple_vuse (stmt))) return true; } else if (gimple_code (stmt) == GIMPLE_COND || gimple_code (stmt) == GIMPLE_SWITCH) return true; return false; } /* Initialize local data structures for VRP. */ static void vrp_initialize (void) { basic_block bb; values_propagated = false; num_vr_values = num_ssa_names; vr_value = XCNEWVEC (value_range_t *, num_vr_values); vr_phi_edge_counts = XCNEWVEC (int, num_ssa_names); FOR_EACH_BB_FN (bb, cfun) { gimple_stmt_iterator si; for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si)) { gimple phi = gsi_stmt (si); if (!stmt_interesting_for_vrp (phi)) { tree lhs = PHI_RESULT (phi); set_value_range_to_varying (get_value_range (lhs)); prop_set_simulate_again (phi, false); } else prop_set_simulate_again (phi, true); } for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si)) { gimple stmt = gsi_stmt (si); /* If the statement is a control insn, then we do not want to avoid simulating the statement once. Failure to do so means that those edges will never get added. */ if (stmt_ends_bb_p (stmt)) prop_set_simulate_again (stmt, true); else if (!stmt_interesting_for_vrp (stmt)) { ssa_op_iter i; tree def; FOR_EACH_SSA_TREE_OPERAND (def, stmt, i, SSA_OP_DEF) set_value_range_to_varying (get_value_range (def)); prop_set_simulate_again (stmt, false); } else prop_set_simulate_again (stmt, true); } } } /* Return the singleton value-range for NAME or NAME. */ static inline tree vrp_valueize (tree name) { if (TREE_CODE (name) == SSA_NAME) { value_range_t *vr = get_value_range (name); if (vr->type == VR_RANGE && (vr->min == vr->max || operand_equal_p (vr->min, vr->max, 0))) return vr->min; } return name; } /* Visit assignment STMT. If it produces an interesting range, record the SSA name in *OUTPUT_P. */ static enum ssa_prop_result vrp_visit_assignment_or_call (gimple stmt, tree *output_p) { tree def, lhs; ssa_op_iter iter; enum gimple_code code = gimple_code (stmt); lhs = gimple_get_lhs (stmt); /* We only keep track of ranges in integral and pointer types. */ if (TREE_CODE (lhs) == SSA_NAME && ((INTEGRAL_TYPE_P (TREE_TYPE (lhs)) /* It is valid to have NULL MIN/MAX values on a type. See build_range_type. */ && TYPE_MIN_VALUE (TREE_TYPE (lhs)) && TYPE_MAX_VALUE (TREE_TYPE (lhs))) || POINTER_TYPE_P (TREE_TYPE (lhs)))) { value_range_t new_vr = VR_INITIALIZER; /* Try folding the statement to a constant first. */ tree tem = gimple_fold_stmt_to_constant (stmt, vrp_valueize); if (tem) set_value_range_to_value (&new_vr, tem, NULL); /* Then dispatch to value-range extracting functions. */ else if (code == GIMPLE_CALL) extract_range_basic (&new_vr, stmt); else extract_range_from_assignment (&new_vr, stmt); if (update_value_range (lhs, &new_vr)) { *output_p = lhs; if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "Found new range for "); print_generic_expr (dump_file, lhs, 0); fprintf (dump_file, ": "); dump_value_range (dump_file, &new_vr); fprintf (dump_file, "\n"); } if (new_vr.type == VR_VARYING) return SSA_PROP_VARYING; return SSA_PROP_INTERESTING; } return SSA_PROP_NOT_INTERESTING; } /* Every other statement produces no useful ranges. */ FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_DEF) set_value_range_to_varying (get_value_range (def)); return SSA_PROP_VARYING; } /* Helper that gets the value range of the SSA_NAME with version I or a symbolic range containing the SSA_NAME only if the value range is varying or undefined. */ static inline value_range_t get_vr_for_comparison (int i) { value_range_t vr = *get_value_range (ssa_name (i)); /* If name N_i does not have a valid range, use N_i as its own range. This allows us to compare against names that may have N_i in their ranges. */ if (vr.type == VR_VARYING || vr.type == VR_UNDEFINED) { vr.type = VR_RANGE; vr.min = ssa_name (i); vr.max = ssa_name (i); } return vr; } /* Compare all the value ranges for names equivalent to VAR with VAL using comparison code COMP. Return the same value returned by compare_range_with_value, including the setting of *STRICT_OVERFLOW_P. */ static tree compare_name_with_value (enum tree_code comp, tree var, tree val, bool *strict_overflow_p) { bitmap_iterator bi; unsigned i; bitmap e; tree retval, t; int used_strict_overflow; bool sop; value_range_t equiv_vr; /* Get the set of equivalences for VAR. */ e = get_value_range (var)->equiv; /* Start at -1. Set it to 0 if we do a comparison without relying on overflow, or 1 if all comparisons rely on overflow. */ used_strict_overflow = -1; /* Compare vars' value range with val. */ equiv_vr = get_vr_for_comparison (SSA_NAME_VERSION (var)); sop = false; retval = compare_range_with_value (comp, &equiv_vr, val, &sop); if (retval) used_strict_overflow = sop ? 1 : 0; /* If the equiv set is empty we have done all work we need to do. */ if (e == NULL) { if (retval && used_strict_overflow > 0) *strict_overflow_p = true; return retval; } EXECUTE_IF_SET_IN_BITMAP (e, 0, i, bi) { equiv_vr = get_vr_for_comparison (i); sop = false; t = compare_range_with_value (comp, &equiv_vr, val, &sop); if (t) { /* If we get different answers from different members of the equivalence set this check must be in a dead code region. Folding it to a trap representation would be correct here. For now just return don't-know. */ if (retval != NULL && t != retval) { retval = NULL_TREE; break; } retval = t; if (!sop) used_strict_overflow = 0; else if (used_strict_overflow < 0) used_strict_overflow = 1; } } if (retval && used_strict_overflow > 0) *strict_overflow_p = true; return retval; } /* Given a comparison code COMP and names N1 and N2, compare all the ranges equivalent to N1 against all the ranges equivalent to N2 to determine the value of N1 COMP N2. Return the same value returned by compare_ranges. Set *STRICT_OVERFLOW_P to indicate whether we relied on an overflow infinity in the comparison. */ static tree compare_names (enum tree_code comp, tree n1, tree n2, bool *strict_overflow_p) { tree t, retval; bitmap e1, e2; bitmap_iterator bi1, bi2; unsigned i1, i2; int used_strict_overflow; static bitmap_obstack *s_obstack = NULL; static bitmap s_e1 = NULL, s_e2 = NULL; /* Compare the ranges of every name equivalent to N1 against the ranges of every name equivalent to N2. */ e1 = get_value_range (n1)->equiv; e2 = get_value_range (n2)->equiv; /* Use the fake bitmaps if e1 or e2 are not available. */ if (s_obstack == NULL) { s_obstack = XNEW (bitmap_obstack); bitmap_obstack_initialize (s_obstack); s_e1 = BITMAP_ALLOC (s_obstack); s_e2 = BITMAP_ALLOC (s_obstack); } if (e1 == NULL) e1 = s_e1; if (e2 == NULL) e2 = s_e2; /* Add N1 and N2 to their own set of equivalences to avoid duplicating the body of the loop just to check N1 and N2 ranges. */ bitmap_set_bit (e1, SSA_NAME_VERSION (n1)); bitmap_set_bit (e2, SSA_NAME_VERSION (n2)); /* If the equivalence sets have a common intersection, then the two names can be compared without checking their ranges. */ if (bitmap_intersect_p (e1, e2)) { bitmap_clear_bit (e1, SSA_NAME_VERSION (n1)); bitmap_clear_bit (e2, SSA_NAME_VERSION (n2)); return (comp == EQ_EXPR || comp == GE_EXPR || comp == LE_EXPR) ? boolean_true_node : boolean_false_node; } /* Start at -1. Set it to 0 if we do a comparison without relying on overflow, or 1 if all comparisons rely on overflow. */ used_strict_overflow = -1; /* Otherwise, compare all the equivalent ranges. First, add N1 and N2 to their own set of equivalences to avoid duplicating the body of the loop just to check N1 and N2 ranges. */ EXECUTE_IF_SET_IN_BITMAP (e1, 0, i1, bi1) { value_range_t vr1 = get_vr_for_comparison (i1); t = retval = NULL_TREE; EXECUTE_IF_SET_IN_BITMAP (e2, 0, i2, bi2) { bool sop = false; value_range_t vr2 = get_vr_for_comparison (i2); t = compare_ranges (comp, &vr1, &vr2, &sop); if (t) { /* If we get different answers from different members of the equivalence set this check must be in a dead code region. Folding it to a trap representation would be correct here. For now just return don't-know. */ if (retval != NULL && t != retval) { bitmap_clear_bit (e1, SSA_NAME_VERSION (n1)); bitmap_clear_bit (e2, SSA_NAME_VERSION (n2)); return NULL_TREE; } retval = t; if (!sop) used_strict_overflow = 0; else if (used_strict_overflow < 0) used_strict_overflow = 1; } } if (retval) { bitmap_clear_bit (e1, SSA_NAME_VERSION (n1)); bitmap_clear_bit (e2, SSA_NAME_VERSION (n2)); if (used_strict_overflow > 0) *strict_overflow_p = true; return retval; } } /* None of the equivalent ranges are useful in computing this comparison. */ bitmap_clear_bit (e1, SSA_NAME_VERSION (n1)); bitmap_clear_bit (e2, SSA_NAME_VERSION (n2)); return NULL_TREE; } /* Helper function for vrp_evaluate_conditional_warnv. */ static tree vrp_evaluate_conditional_warnv_with_ops_using_ranges (enum tree_code code, tree op0, tree op1, bool * strict_overflow_p) { value_range_t *vr0, *vr1; vr0 = (TREE_CODE (op0) == SSA_NAME) ? get_value_range (op0) : NULL; vr1 = (TREE_CODE (op1) == SSA_NAME) ? get_value_range (op1) : NULL; tree res = NULL_TREE; if (vr0 && vr1) res = compare_ranges (code, vr0, vr1, strict_overflow_p); if (!res && vr0) res = compare_range_with_value (code, vr0, op1, strict_overflow_p); if (!res && vr1) res = (compare_range_with_value (swap_tree_comparison (code), vr1, op0, strict_overflow_p)); return res; } /* Helper function for vrp_evaluate_conditional_warnv. */ static tree vrp_evaluate_conditional_warnv_with_ops (enum tree_code code, tree op0, tree op1, bool use_equiv_p, bool *strict_overflow_p, bool *only_ranges) { tree ret; if (only_ranges) *only_ranges = true; /* We only deal with integral and pointer types. */ if (!INTEGRAL_TYPE_P (TREE_TYPE (op0)) && !POINTER_TYPE_P (TREE_TYPE (op0))) return NULL_TREE; if (use_equiv_p) { if (only_ranges && (ret = vrp_evaluate_conditional_warnv_with_ops_using_ranges (code, op0, op1, strict_overflow_p))) return ret; *only_ranges = false; if (TREE_CODE (op0) == SSA_NAME && TREE_CODE (op1) == SSA_NAME) return compare_names (code, op0, op1, strict_overflow_p); else if (TREE_CODE (op0) == SSA_NAME) return compare_name_with_value (code, op0, op1, strict_overflow_p); else if (TREE_CODE (op1) == SSA_NAME) return (compare_name_with_value (swap_tree_comparison (code), op1, op0, strict_overflow_p)); } else return vrp_evaluate_conditional_warnv_with_ops_using_ranges (code, op0, op1, strict_overflow_p); return NULL_TREE; } /* Given (CODE OP0 OP1) within STMT, try to simplify it based on value range information. Return NULL if the conditional can not be evaluated. The ranges of all the names equivalent with the operands in COND will be used when trying to compute the value. If the result is based on undefined signed overflow, issue a warning if appropriate. */ static tree vrp_evaluate_conditional (enum tree_code code, tree op0, tree op1, gimple stmt) { bool sop; tree ret; bool only_ranges; /* Some passes and foldings leak constants with overflow flag set into the IL. Avoid doing wrong things with these and bail out. */ if ((TREE_CODE (op0) == INTEGER_CST && TREE_OVERFLOW (op0)) || (TREE_CODE (op1) == INTEGER_CST && TREE_OVERFLOW (op1))) return NULL_TREE; sop = false; ret = vrp_evaluate_conditional_warnv_with_ops (code, op0, op1, true, &sop, &only_ranges); if (ret && sop) { enum warn_strict_overflow_code wc; const char* warnmsg; if (is_gimple_min_invariant (ret)) { wc = WARN_STRICT_OVERFLOW_CONDITIONAL; warnmsg = G_("assuming signed overflow does not occur when " "simplifying conditional to constant"); } else { wc = WARN_STRICT_OVERFLOW_COMPARISON; warnmsg = G_("assuming signed overflow does not occur when " "simplifying conditional"); } if (issue_strict_overflow_warning (wc)) { location_t location; if (!gimple_has_location (stmt)) location = input_location; else location = gimple_location (stmt); warning_at (location, OPT_Wstrict_overflow, "%s", warnmsg); } } if (warn_type_limits && ret && only_ranges && TREE_CODE_CLASS (code) == tcc_comparison && TREE_CODE (op0) == SSA_NAME) { /* If the comparison is being folded and the operand on the LHS is being compared against a constant value that is outside of the natural range of OP0's type, then the predicate will always fold regardless of the value of OP0. If -Wtype-limits was specified, emit a warning. */ tree type = TREE_TYPE (op0); value_range_t *vr0 = get_value_range (op0); if (vr0->type != VR_VARYING && INTEGRAL_TYPE_P (type) && vrp_val_is_min (vr0->min) && vrp_val_is_max (vr0->max) && is_gimple_min_invariant (op1)) { location_t location; if (!gimple_has_location (stmt)) location = input_location; else location = gimple_location (stmt); warning_at (location, OPT_Wtype_limits, integer_zerop (ret) ? G_("comparison always false " "due to limited range of data type") : G_("comparison always true " "due to limited range of data type")); } } return ret; } /* Visit conditional statement STMT. If we can determine which edge will be taken out of STMT's basic block, record it in *TAKEN_EDGE_P and return SSA_PROP_INTERESTING. Otherwise, return SSA_PROP_VARYING. */ static enum ssa_prop_result vrp_visit_cond_stmt (gimple stmt, edge *taken_edge_p) { tree val; bool sop; *taken_edge_p = NULL; if (dump_file && (dump_flags & TDF_DETAILS)) { tree use; ssa_op_iter i; fprintf (dump_file, "\nVisiting conditional with predicate: "); print_gimple_stmt (dump_file, stmt, 0, 0); fprintf (dump_file, "\nWith known ranges\n"); FOR_EACH_SSA_TREE_OPERAND (use, stmt, i, SSA_OP_USE) { fprintf (dump_file, "\t"); print_generic_expr (dump_file, use, 0); fprintf (dump_file, ": "); dump_value_range (dump_file, vr_value[SSA_NAME_VERSION (use)]); } fprintf (dump_file, "\n"); } /* Compute the value of the predicate COND by checking the known ranges of each of its operands. Note that we cannot evaluate all the equivalent ranges here because those ranges may not yet be final and with the current propagation strategy, we cannot determine when the value ranges of the names in the equivalence set have changed. For instance, given the following code fragment i_5 = PHI <8, i_13> ... i_14 = ASSERT_EXPR <i_5, i_5 != 0> if (i_14 == 1) ... Assume that on the first visit to i_14, i_5 has the temporary range [8, 8] because the second argument to the PHI function is not yet executable. We derive the range ~[0, 0] for i_14 and the equivalence set { i_5 }. So, when we visit 'if (i_14 == 1)' for the first time, since i_14 is equivalent to the range [8, 8], we determine that the predicate is always false. On the next round of propagation, i_13 is determined to be VARYING, which causes i_5 to drop down to VARYING. So, another visit to i_14 is scheduled. In this second visit, we compute the exact same range and equivalence set for i_14, namely ~[0, 0] and { i_5 }. But we did not have the previous range for i_5 registered, so vrp_visit_assignment thinks that the range for i_14 has not changed. Therefore, the predicate 'if (i_14 == 1)' is not visited again, which stops propagation from visiting statements in the THEN clause of that if(). To properly fix this we would need to keep the previous range value for the names in the equivalence set. This way we would've discovered that from one visit to the other i_5 changed from range [8, 8] to VR_VARYING. However, fixing this apparent limitation may not be worth the additional checking. Testing on several code bases (GCC, DLV, MICO, TRAMP3D and SPEC2000) showed that doing this results in 4 more predicates folded in SPEC. */ sop = false; val = vrp_evaluate_conditional_warnv_with_ops (gimple_cond_code (stmt), gimple_cond_lhs (stmt), gimple_cond_rhs (stmt), false, &sop, NULL); if (val) { if (!sop) *taken_edge_p = find_taken_edge (gimple_bb (stmt), val); else { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "\nIgnoring predicate evaluation because " "it assumes that signed overflow is undefined"); val = NULL_TREE; } } if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "\nPredicate evaluates to: "); if (val == NULL_TREE) fprintf (dump_file, "DON'T KNOW\n"); else print_generic_stmt (dump_file, val, 0); } return (*taken_edge_p) ? SSA_PROP_INTERESTING : SSA_PROP_VARYING; } /* Searches the case label vector VEC for the index *IDX of the CASE_LABEL that includes the value VAL. The search is restricted to the range [START_IDX, n - 1] where n is the size of VEC. If there is a CASE_LABEL for VAL, its index is placed in IDX and true is returned. If there is no CASE_LABEL for VAL and there is one that is larger than VAL, it is placed in IDX and false is returned. If VAL is larger than any CASE_LABEL, n is placed on IDX and false is returned. */ static bool find_case_label_index (gimple stmt, size_t start_idx, tree val, size_t *idx) { size_t n = gimple_switch_num_labels (stmt); size_t low, high; /* Find case label for minimum of the value range or the next one. At each iteration we are searching in [low, high - 1]. */ for (low = start_idx, high = n; high != low; ) { tree t; int cmp; /* Note that i != high, so we never ask for n. */ size_t i = (high + low) / 2; t = gimple_switch_label (stmt, i); /* Cache the result of comparing CASE_LOW and val. */ cmp = tree_int_cst_compare (CASE_LOW (t), val); if (cmp == 0) { /* Ranges cannot be empty. */ *idx = i; return true; } else if (cmp > 0) high = i; else { low = i + 1; if (CASE_HIGH (t) != NULL && tree_int_cst_compare (CASE_HIGH (t), val) >= 0) { *idx = i; return true; } } } *idx = high; return false; } /* Searches the case label vector VEC for the range of CASE_LABELs that is used for values between MIN and MAX. The first index is placed in MIN_IDX. The last index is placed in MAX_IDX. If the range of CASE_LABELs is empty then MAX_IDX < MIN_IDX. Returns true if the default label is not needed. */ static bool find_case_label_range (gimple stmt, tree min, tree max, size_t *min_idx, size_t *max_idx) { size_t i, j; bool min_take_default = !find_case_label_index (stmt, 1, min, &i); bool max_take_default = !find_case_label_index (stmt, i, max, &j); if (i == j && min_take_default && max_take_default) { /* Only the default case label reached. Return an empty range. */ *min_idx = 1; *max_idx = 0; return false; } else { bool take_default = min_take_default || max_take_default; tree low, high; size_t k; if (max_take_default) j--; /* If the case label range is continuous, we do not need the default case label. Verify that. */ high = CASE_LOW (gimple_switch_label (stmt, i)); if (CASE_HIGH (gimple_switch_label (stmt, i))) high = CASE_HIGH (gimple_switch_label (stmt, i)); for (k = i + 1; k <= j; ++k) { low = CASE_LOW (gimple_switch_label (stmt, k)); if (!integer_onep (int_const_binop (MINUS_EXPR, low, high))) { take_default = true; break; } high = low; if (CASE_HIGH (gimple_switch_label (stmt, k))) high = CASE_HIGH (gimple_switch_label (stmt, k)); } *min_idx = i; *max_idx = j; return !take_default; } } /* Searches the case label vector VEC for the ranges of CASE_LABELs that are used in range VR. The indices are placed in MIN_IDX1, MAX_IDX, MIN_IDX2 and MAX_IDX2. If the ranges of CASE_LABELs are empty then MAX_IDX1 < MIN_IDX1. Returns true if the default label is not needed. */ static bool find_case_label_ranges (gimple stmt, value_range_t *vr, size_t *min_idx1, size_t *max_idx1, size_t *min_idx2, size_t *max_idx2) { size_t i, j, k, l; unsigned int n = gimple_switch_num_labels (stmt); bool take_default; tree case_low, case_high; tree min = vr->min, max = vr->max; gcc_checking_assert (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE); take_default = !find_case_label_range (stmt, min, max, &i, &j); /* Set second range to emtpy. */ *min_idx2 = 1; *max_idx2 = 0; if (vr->type == VR_RANGE) { *min_idx1 = i; *max_idx1 = j; return !take_default; } /* Set first range to all case labels. */ *min_idx1 = 1; *max_idx1 = n - 1; if (i > j) return false; /* Make sure all the values of case labels [i , j] are contained in range [MIN, MAX]. */ case_low = CASE_LOW (gimple_switch_label (stmt, i)); case_high = CASE_HIGH (gimple_switch_label (stmt, j)); if (tree_int_cst_compare (case_low, min) < 0) i += 1; if (case_high != NULL_TREE && tree_int_cst_compare (max, case_high) < 0) j -= 1; if (i > j) return false; /* If the range spans case labels [i, j], the corresponding anti-range spans the labels [1, i - 1] and [j + 1, n - 1]. */ k = j + 1; l = n - 1; if (k > l) { k = 1; l = 0; } j = i - 1; i = 1; if (i > j) { i = k; j = l; k = 1; l = 0; } *min_idx1 = i; *max_idx1 = j; *min_idx2 = k; *max_idx2 = l; return false; } /* Visit switch statement STMT. If we can determine which edge will be taken out of STMT's basic block, record it in *TAKEN_EDGE_P and return SSA_PROP_INTERESTING. Otherwise, return SSA_PROP_VARYING. */ static enum ssa_prop_result vrp_visit_switch_stmt (gimple stmt, edge *taken_edge_p) { tree op, val; value_range_t *vr; size_t i = 0, j = 0, k, l; bool take_default; *taken_edge_p = NULL; op = gimple_switch_index (stmt); if (TREE_CODE (op) != SSA_NAME) return SSA_PROP_VARYING; vr = get_value_range (op); if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "\nVisiting switch expression with operand "); print_generic_expr (dump_file, op, 0); fprintf (dump_file, " with known range "); dump_value_range (dump_file, vr); fprintf (dump_file, "\n"); } if ((vr->type != VR_RANGE && vr->type != VR_ANTI_RANGE) || symbolic_range_p (vr)) return SSA_PROP_VARYING; /* Find the single edge that is taken from the switch expression. */ take_default = !find_case_label_ranges (stmt, vr, &i, &j, &k, &l); /* Check if the range spans no CASE_LABEL. If so, we only reach the default label */ if (j < i) { gcc_assert (take_default); val = gimple_switch_default_label (stmt); } else { /* Check if labels with index i to j and maybe the default label are all reaching the same label. */ val = gimple_switch_label (stmt, i); if (take_default && CASE_LABEL (gimple_switch_default_label (stmt)) != CASE_LABEL (val)) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, " not a single destination for this " "range\n"); return SSA_PROP_VARYING; } for (++i; i <= j; ++i) { if (CASE_LABEL (gimple_switch_label (stmt, i)) != CASE_LABEL (val)) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, " not a single destination for this " "range\n"); return SSA_PROP_VARYING; } } for (; k <= l; ++k) { if (CASE_LABEL (gimple_switch_label (stmt, k)) != CASE_LABEL (val)) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, " not a single destination for this " "range\n"); return SSA_PROP_VARYING; } } } *taken_edge_p = find_edge (gimple_bb (stmt), label_to_block (CASE_LABEL (val))); if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, " will take edge to "); print_generic_stmt (dump_file, CASE_LABEL (val), 0); } return SSA_PROP_INTERESTING; } /* Evaluate statement STMT. If the statement produces a useful range, return SSA_PROP_INTERESTING and record the SSA name with the interesting range into *OUTPUT_P. If STMT is a conditional branch and we can determine its truth value, the taken edge is recorded in *TAKEN_EDGE_P. If STMT produces a varying value, return SSA_PROP_VARYING. */ static enum ssa_prop_result vrp_visit_stmt (gimple stmt, edge *taken_edge_p, tree *output_p) { tree def; ssa_op_iter iter; if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "\nVisiting statement:\n"); print_gimple_stmt (dump_file, stmt, 0, dump_flags); } if (!stmt_interesting_for_vrp (stmt)) gcc_assert (stmt_ends_bb_p (stmt)); else if (is_gimple_assign (stmt) || is_gimple_call (stmt)) return vrp_visit_assignment_or_call (stmt, output_p); else if (gimple_code (stmt) == GIMPLE_COND) return vrp_visit_cond_stmt (stmt, taken_edge_p); else if (gimple_code (stmt) == GIMPLE_SWITCH) return vrp_visit_switch_stmt (stmt, taken_edge_p); /* All other statements produce nothing of interest for VRP, so mark their outputs varying and prevent further simulation. */ FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_DEF) set_value_range_to_varying (get_value_range (def)); return SSA_PROP_VARYING; } /* Union the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and { VR1TYPE, VR0MIN, VR0MAX } and store the result in { *VR0TYPE, *VR0MIN, *VR0MAX }. This may not be the smallest possible such range. The resulting range is not canonicalized. */ static void union_ranges (enum value_range_type *vr0type, tree *vr0min, tree *vr0max, enum value_range_type vr1type, tree vr1min, tree vr1max) { bool mineq = operand_equal_p (*vr0min, vr1min, 0); bool maxeq = operand_equal_p (*vr0max, vr1max, 0); /* [] is vr0, () is vr1 in the following classification comments. */ if (mineq && maxeq) { /* [( )] */ if (*vr0type == vr1type) /* Nothing to do for equal ranges. */ ; else if ((*vr0type == VR_RANGE && vr1type == VR_ANTI_RANGE) || (*vr0type == VR_ANTI_RANGE && vr1type == VR_RANGE)) { /* For anti-range with range union the result is varying. */ goto give_up; } else gcc_unreachable (); } else if (operand_less_p (*vr0max, vr1min) == 1 || operand_less_p (vr1max, *vr0min) == 1) { /* [ ] ( ) or ( ) [ ] If the ranges have an empty intersection, result of the union operation is the anti-range or if both are anti-ranges it covers all. */ if (*vr0type == VR_ANTI_RANGE && vr1type == VR_ANTI_RANGE) goto give_up; else if (*vr0type == VR_ANTI_RANGE && vr1type == VR_RANGE) ; else if (*vr0type == VR_RANGE && vr1type == VR_ANTI_RANGE) { *vr0type = vr1type; *vr0min = vr1min; *vr0max = vr1max; } else if (*vr0type == VR_RANGE && vr1type == VR_RANGE) { /* The result is the convex hull of both ranges. */ if (operand_less_p (*vr0max, vr1min) == 1) { /* If the result can be an anti-range, create one. */ if (TREE_CODE (*vr0max) == INTEGER_CST && TREE_CODE (vr1min) == INTEGER_CST && vrp_val_is_min (*vr0min) && vrp_val_is_max (vr1max)) { tree min = int_const_binop (PLUS_EXPR, *vr0max, build_int_cst (TREE_TYPE (*vr0max), 1)); tree max = int_const_binop (MINUS_EXPR, vr1min, build_int_cst (TREE_TYPE (vr1min), 1)); if (!operand_less_p (max, min)) { *vr0type = VR_ANTI_RANGE; *vr0min = min; *vr0max = max; } else *vr0max = vr1max; } else *vr0max = vr1max; } else { /* If the result can be an anti-range, create one. */ if (TREE_CODE (vr1max) == INTEGER_CST && TREE_CODE (*vr0min) == INTEGER_CST && vrp_val_is_min (vr1min) && vrp_val_is_max (*vr0max)) { tree min = int_const_binop (PLUS_EXPR, vr1max, build_int_cst (TREE_TYPE (vr1max), 1)); tree max = int_const_binop (MINUS_EXPR, *vr0min, build_int_cst (TREE_TYPE (*vr0min), 1)); if (!operand_less_p (max, min)) { *vr0type = VR_ANTI_RANGE; *vr0min = min; *vr0max = max; } else *vr0min = vr1min; } else *vr0min = vr1min; } } else gcc_unreachable (); } else if ((maxeq || operand_less_p (vr1max, *vr0max) == 1) && (mineq || operand_less_p (*vr0min, vr1min) == 1)) { /* [ ( ) ] or [( ) ] or [ ( )] */ if (*vr0type == VR_RANGE && vr1type == VR_RANGE) ; else if (*vr0type == VR_ANTI_RANGE && vr1type == VR_ANTI_RANGE) { *vr0type = vr1type; *vr0min = vr1min; *vr0max = vr1max; } else if (*vr0type == VR_ANTI_RANGE && vr1type == VR_RANGE) { /* Arbitrarily choose the right or left gap. */ if (!mineq && TREE_CODE (vr1min) == INTEGER_CST) *vr0max = int_const_binop (MINUS_EXPR, vr1min, build_int_cst (TREE_TYPE (vr1min), 1)); else if (!maxeq && TREE_CODE (vr1max) == INTEGER_CST) *vr0min = int_const_binop (PLUS_EXPR, vr1max, build_int_cst (TREE_TYPE (vr1max), 1)); else goto give_up; } else if (*vr0type == VR_RANGE && vr1type == VR_ANTI_RANGE) /* The result covers everything. */ goto give_up; else gcc_unreachable (); } else if ((maxeq || operand_less_p (*vr0max, vr1max) == 1) && (mineq || operand_less_p (vr1min, *vr0min) == 1)) { /* ( [ ] ) or ([ ] ) or ( [ ]) */ if (*vr0type == VR_RANGE && vr1type == VR_RANGE) { *vr0type = vr1type; *vr0min = vr1min; *vr0max = vr1max; } else if (*vr0type == VR_ANTI_RANGE && vr1type == VR_ANTI_RANGE) ; else if (*vr0type == VR_RANGE && vr1type == VR_ANTI_RANGE) { *vr0type = VR_ANTI_RANGE; if (!mineq && TREE_CODE (*vr0min) == INTEGER_CST) { *vr0max = int_const_binop (MINUS_EXPR, *vr0min, build_int_cst (TREE_TYPE (*vr0min), 1)); *vr0min = vr1min; } else if (!maxeq && TREE_CODE (*vr0max) == INTEGER_CST) { *vr0min = int_const_binop (PLUS_EXPR, *vr0max, build_int_cst (TREE_TYPE (*vr0max), 1)); *vr0max = vr1max; } else goto give_up; } else if (*vr0type == VR_ANTI_RANGE && vr1type == VR_RANGE) /* The result covers everything. */ goto give_up; else gcc_unreachable (); } else if ((operand_less_p (vr1min, *vr0max) == 1 || operand_equal_p (vr1min, *vr0max, 0)) && operand_less_p (*vr0min, vr1min) == 1 && operand_less_p (*vr0max, vr1max) == 1) { /* [ ( ] ) or [ ]( ) */ if (*vr0type == VR_RANGE && vr1type == VR_RANGE) *vr0max = vr1max; else if (*vr0type == VR_ANTI_RANGE && vr1type == VR_ANTI_RANGE) *vr0min = vr1min; else if (*vr0type == VR_ANTI_RANGE && vr1type == VR_RANGE) { if (TREE_CODE (vr1min) == INTEGER_CST) *vr0max = int_const_binop (MINUS_EXPR, vr1min, build_int_cst (TREE_TYPE (vr1min), 1)); else goto give_up; } else if (*vr0type == VR_RANGE && vr1type == VR_ANTI_RANGE) { if (TREE_CODE (*vr0max) == INTEGER_CST) { *vr0type = vr1type; *vr0min = int_const_binop (PLUS_EXPR, *vr0max, build_int_cst (TREE_TYPE (*vr0max), 1)); *vr0max = vr1max; } else goto give_up; } else gcc_unreachable (); } else if ((operand_less_p (*vr0min, vr1max) == 1 || operand_equal_p (*vr0min, vr1max, 0)) && operand_less_p (vr1min, *vr0min) == 1 && operand_less_p (vr1max, *vr0max) == 1) { /* ( [ ) ] or ( )[ ] */ if (*vr0type == VR_RANGE && vr1type == VR_RANGE) *vr0min = vr1min; else if (*vr0type == VR_ANTI_RANGE && vr1type == VR_ANTI_RANGE) *vr0max = vr1max; else if (*vr0type == VR_ANTI_RANGE && vr1type == VR_RANGE) { if (TREE_CODE (vr1max) == INTEGER_CST) *vr0min = int_const_binop (PLUS_EXPR, vr1max, build_int_cst (TREE_TYPE (vr1max), 1)); else goto give_up; } else if (*vr0type == VR_RANGE && vr1type == VR_ANTI_RANGE) { if (TREE_CODE (*vr0min) == INTEGER_CST) { *vr0type = vr1type; *vr0min = vr1min; *vr0max = int_const_binop (MINUS_EXPR, *vr0min, build_int_cst (TREE_TYPE (*vr0min), 1)); } else goto give_up; } else gcc_unreachable (); } else goto give_up; return; give_up: *vr0type = VR_VARYING; *vr0min = NULL_TREE; *vr0max = NULL_TREE; } /* Intersect the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and { VR1TYPE, VR0MIN, VR0MAX } and store the result in { *VR0TYPE, *VR0MIN, *VR0MAX }. This may not be the smallest possible such range. The resulting range is not canonicalized. */ static void intersect_ranges (enum value_range_type *vr0type, tree *vr0min, tree *vr0max, enum value_range_type vr1type, tree vr1min, tree vr1max) { bool mineq = operand_equal_p (*vr0min, vr1min, 0); bool maxeq = operand_equal_p (*vr0max, vr1max, 0); /* [] is vr0, () is vr1 in the following classification comments. */ if (mineq && maxeq) { /* [( )] */ if (*vr0type == vr1type) /* Nothing to do for equal ranges. */ ; else if ((*vr0type == VR_RANGE && vr1type == VR_ANTI_RANGE) || (*vr0type == VR_ANTI_RANGE && vr1type == VR_RANGE)) { /* For anti-range with range intersection the result is empty. */ *vr0type = VR_UNDEFINED; *vr0min = NULL_TREE; *vr0max = NULL_TREE; } else gcc_unreachable (); } else if (operand_less_p (*vr0max, vr1min) == 1 || operand_less_p (vr1max, *vr0min) == 1) { /* [ ] ( ) or ( ) [ ] If the ranges have an empty intersection, the result of the intersect operation is the range for intersecting an anti-range with a range or empty when intersecting two ranges. */ if (*vr0type == VR_RANGE && vr1type == VR_ANTI_RANGE) ; else if (*vr0type == VR_ANTI_RANGE && vr1type == VR_RANGE) { *vr0type = vr1type; *vr0min = vr1min; *vr0max = vr1max; } else if (*vr0type == VR_RANGE && vr1type == VR_RANGE) { *vr0type = VR_UNDEFINED; *vr0min = NULL_TREE; *vr0max = NULL_TREE; } else if (*vr0type == VR_ANTI_RANGE && vr1type == VR_ANTI_RANGE) { /* If the anti-ranges are adjacent to each other merge them. */ if (TREE_CODE (*vr0max) == INTEGER_CST && TREE_CODE (vr1min) == INTEGER_CST && operand_less_p (*vr0max, vr1min) == 1 && integer_onep (int_const_binop (MINUS_EXPR, vr1min, *vr0max))) *vr0max = vr1max; else if (TREE_CODE (vr1max) == INTEGER_CST && TREE_CODE (*vr0min) == INTEGER_CST && operand_less_p (vr1max, *vr0min) == 1 && integer_onep (int_const_binop (MINUS_EXPR, *vr0min, vr1max))) *vr0min = vr1min; /* Else arbitrarily take VR0. */ } } else if ((maxeq || operand_less_p (vr1max, *vr0max) == 1) && (mineq || operand_less_p (*vr0min, vr1min) == 1)) { /* [ ( ) ] or [( ) ] or [ ( )] */ if (*vr0type == VR_RANGE && vr1type == VR_RANGE) { /* If both are ranges the result is the inner one. */ *vr0type = vr1type; *vr0min = vr1min; *vr0max = vr1max; } else if (*vr0type == VR_RANGE && vr1type == VR_ANTI_RANGE) { /* Choose the right gap if the left one is empty. */ if (mineq) { if (TREE_CODE (vr1max) == INTEGER_CST) *vr0min = int_const_binop (PLUS_EXPR, vr1max, build_int_cst (TREE_TYPE (vr1max), 1)); else *vr0min = vr1max; } /* Choose the left gap if the right one is empty. */ else if (maxeq) { if (TREE_CODE (vr1min) == INTEGER_CST) *vr0max = int_const_binop (MINUS_EXPR, vr1min, build_int_cst (TREE_TYPE (vr1min), 1)); else *vr0max = vr1min; } /* Choose the anti-range if the range is effectively varying. */ else if (vrp_val_is_min (*vr0min) && vrp_val_is_max (*vr0max)) { *vr0type = vr1type; *vr0min = vr1min; *vr0max = vr1max; } /* Else choose the range. */ } else if (*vr0type == VR_ANTI_RANGE && vr1type == VR_ANTI_RANGE) /* If both are anti-ranges the result is the outer one. */ ; else if (*vr0type == VR_ANTI_RANGE && vr1type == VR_RANGE) { /* The intersection is empty. */ *vr0type = VR_UNDEFINED; *vr0min = NULL_TREE; *vr0max = NULL_TREE; } else gcc_unreachable (); } else if ((maxeq || operand_less_p (*vr0max, vr1max) == 1) && (mineq || operand_less_p (vr1min, *vr0min) == 1)) { /* ( [ ] ) or ([ ] ) or ( [ ]) */ if (*vr0type == VR_RANGE && vr1type == VR_RANGE) /* Choose the inner range. */ ; else if (*vr0type == VR_ANTI_RANGE && vr1type == VR_RANGE) { /* Choose the right gap if the left is empty. */ if (mineq) { *vr0type = VR_RANGE; if (TREE_CODE (*vr0max) == INTEGER_CST) *vr0min = int_const_binop (PLUS_EXPR, *vr0max, build_int_cst (TREE_TYPE (*vr0max), 1)); else *vr0min = *vr0max; *vr0max = vr1max; } /* Choose the left gap if the right is empty. */ else if (maxeq) { *vr0type = VR_RANGE; if (TREE_CODE (*vr0min) == INTEGER_CST) *vr0max = int_const_binop (MINUS_EXPR, *vr0min, build_int_cst (TREE_TYPE (*vr0min), 1)); else *vr0max = *vr0min; *vr0min = vr1min; } /* Choose the anti-range if the range is effectively varying. */ else if (vrp_val_is_min (vr1min) && vrp_val_is_max (vr1max)) ; /* Else choose the range. */ else { *vr0type = vr1type; *vr0min = vr1min; *vr0max = vr1max; } } else if (*vr0type == VR_ANTI_RANGE && vr1type == VR_ANTI_RANGE) { /* If both are anti-ranges the result is the outer one. */ *vr0type = vr1type; *vr0min = vr1min; *vr0max = vr1max; } else if (vr1type == VR_ANTI_RANGE && *vr0type == VR_RANGE) { /* The intersection is empty. */ *vr0type = VR_UNDEFINED; *vr0min = NULL_TREE; *vr0max = NULL_TREE; } else gcc_unreachable (); } else if ((operand_less_p (vr1min, *vr0max) == 1 || operand_equal_p (vr1min, *vr0max, 0)) && operand_less_p (*vr0min, vr1min) == 1) { /* [ ( ] ) or [ ]( ) */ if (*vr0type == VR_ANTI_RANGE && vr1type == VR_ANTI_RANGE) *vr0max = vr1max; else if (*vr0type == VR_RANGE && vr1type == VR_RANGE) *vr0min = vr1min; else if (*vr0type == VR_RANGE && vr1type == VR_ANTI_RANGE) { if (TREE_CODE (vr1min) == INTEGER_CST) *vr0max = int_const_binop (MINUS_EXPR, vr1min, build_int_cst (TREE_TYPE (vr1min), 1)); else *vr0max = vr1min; } else if (*vr0type == VR_ANTI_RANGE && vr1type == VR_RANGE) { *vr0type = VR_RANGE; if (TREE_CODE (*vr0max) == INTEGER_CST) *vr0min = int_const_binop (PLUS_EXPR, *vr0max, build_int_cst (TREE_TYPE (*vr0max), 1)); else *vr0min = *vr0max; *vr0max = vr1max; } else gcc_unreachable (); } else if ((operand_less_p (*vr0min, vr1max) == 1 || operand_equal_p (*vr0min, vr1max, 0)) && operand_less_p (vr1min, *vr0min) == 1) { /* ( [ ) ] or ( )[ ] */ if (*vr0type == VR_ANTI_RANGE && vr1type == VR_ANTI_RANGE) *vr0min = vr1min; else if (*vr0type == VR_RANGE && vr1type == VR_RANGE) *vr0max = vr1max; else if (*vr0type == VR_RANGE && vr1type == VR_ANTI_RANGE) { if (TREE_CODE (vr1max) == INTEGER_CST) *vr0min = int_const_binop (PLUS_EXPR, vr1max, build_int_cst (TREE_TYPE (vr1max), 1)); else *vr0min = vr1max; } else if (*vr0type == VR_ANTI_RANGE && vr1type == VR_RANGE) { *vr0type = VR_RANGE; if (TREE_CODE (*vr0min) == INTEGER_CST) *vr0max = int_const_binop (MINUS_EXPR, *vr0min, build_int_cst (TREE_TYPE (*vr0min), 1)); else *vr0max = *vr0min; *vr0min = vr1min; } else gcc_unreachable (); } /* As a fallback simply use { *VRTYPE, *VR0MIN, *VR0MAX } as result for the intersection. That's always a conservative correct estimate. */ return; } /* Intersect the two value-ranges *VR0 and *VR1 and store the result in *VR0. This may not be the smallest possible such range. */ static void vrp_intersect_ranges_1 (value_range_t *vr0, value_range_t *vr1) { value_range_t saved; /* If either range is VR_VARYING the other one wins. */ if (vr1->type == VR_VARYING) return; if (vr0->type == VR_VARYING) { copy_value_range (vr0, vr1); return; } /* When either range is VR_UNDEFINED the resulting range is VR_UNDEFINED, too. */ if (vr0->type == VR_UNDEFINED) return; if (vr1->type == VR_UNDEFINED) { set_value_range_to_undefined (vr0); return; } /* Save the original vr0 so we can return it as conservative intersection result when our worker turns things to varying. */ saved = *vr0; intersect_ranges (&vr0->type, &vr0->min, &vr0->max, vr1->type, vr1->min, vr1->max); /* Make sure to canonicalize the result though as the inversion of a VR_RANGE can still be a VR_RANGE. */ set_and_canonicalize_value_range (vr0, vr0->type, vr0->min, vr0->max, vr0->equiv); /* If that failed, use the saved original VR0. */ if (vr0->type == VR_VARYING) { *vr0 = saved; return; } /* If the result is VR_UNDEFINED there is no need to mess with the equivalencies. */ if (vr0->type == VR_UNDEFINED) return; /* The resulting set of equivalences for range intersection is the union of the two sets. */ if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv) bitmap_ior_into (vr0->equiv, vr1->equiv); else if (vr1->equiv && !vr0->equiv) bitmap_copy (vr0->equiv, vr1->equiv); } static void vrp_intersect_ranges (value_range_t *vr0, value_range_t *vr1) { if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "Intersecting\n "); dump_value_range (dump_file, vr0); fprintf (dump_file, "\nand\n "); dump_value_range (dump_file, vr1); fprintf (dump_file, "\n"); } vrp_intersect_ranges_1 (vr0, vr1); if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "to\n "); dump_value_range (dump_file, vr0); fprintf (dump_file, "\n"); } } /* Meet operation for value ranges. Given two value ranges VR0 and VR1, store in VR0 a range that contains both VR0 and VR1. This may not be the smallest possible such range. */ static void vrp_meet_1 (value_range_t *vr0, value_range_t *vr1) { value_range_t saved; if (vr0->type == VR_UNDEFINED) { set_value_range (vr0, vr1->type, vr1->min, vr1->max, vr1->equiv); return; } if (vr1->type == VR_UNDEFINED) { /* VR0 already has the resulting range. */ return; } if (vr0->type == VR_VARYING) { /* Nothing to do. VR0 already has the resulting range. */ return; } if (vr1->type == VR_VARYING) { set_value_range_to_varying (vr0); return; } saved = *vr0; union_ranges (&vr0->type, &vr0->min, &vr0->max, vr1->type, vr1->min, vr1->max); if (vr0->type == VR_VARYING) { /* Failed to find an efficient meet. Before giving up and setting the result to VARYING, see if we can at least derive a useful anti-range. FIXME, all this nonsense about distinguishing anti-ranges from ranges is necessary because of the odd semantics of range_includes_zero_p and friends. */ if (((saved.type == VR_RANGE && range_includes_zero_p (saved.min, saved.max) == 0) || (saved.type == VR_ANTI_RANGE && range_includes_zero_p (saved.min, saved.max) == 1)) && ((vr1->type == VR_RANGE && range_includes_zero_p (vr1->min, vr1->max) == 0) || (vr1->type == VR_ANTI_RANGE && range_includes_zero_p (vr1->min, vr1->max) == 1))) { set_value_range_to_nonnull (vr0, TREE_TYPE (saved.min)); /* Since this meet operation did not result from the meeting of two equivalent names, VR0 cannot have any equivalences. */ if (vr0->equiv) bitmap_clear (vr0->equiv); return; } set_value_range_to_varying (vr0); return; } set_and_canonicalize_value_range (vr0, vr0->type, vr0->min, vr0->max, vr0->equiv); if (vr0->type == VR_VARYING) return; /* The resulting set of equivalences is always the intersection of the two sets. */ if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv) bitmap_and_into (vr0->equiv, vr1->equiv); else if (vr0->equiv && !vr1->equiv) bitmap_clear (vr0->equiv); } static void vrp_meet (value_range_t *vr0, value_range_t *vr1) { if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "Meeting\n "); dump_value_range (dump_file, vr0); fprintf (dump_file, "\nand\n "); dump_value_range (dump_file, vr1); fprintf (dump_file, "\n"); } vrp_meet_1 (vr0, vr1); if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "to\n "); dump_value_range (dump_file, vr0); fprintf (dump_file, "\n"); } } /* Visit all arguments for PHI node PHI that flow through executable edges. If a valid value range can be derived from all the incoming value ranges, set a new range for the LHS of PHI. */ static enum ssa_prop_result vrp_visit_phi_node (gimple phi) { size_t i; tree lhs = PHI_RESULT (phi); value_range_t *lhs_vr = get_value_range (lhs); value_range_t vr_result = VR_INITIALIZER; bool first = true; int edges, old_edges; struct loop *l; if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "\nVisiting PHI node: "); print_gimple_stmt (dump_file, phi, 0, dump_flags); } edges = 0; for (i = 0; i < gimple_phi_num_args (phi); i++) { edge e = gimple_phi_arg_edge (phi, i); if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, " Argument #%d (%d -> %d %sexecutable)\n", (int) i, e->src->index, e->dest->index, (e->flags & EDGE_EXECUTABLE) ? "" : "not "); } if (e->flags & EDGE_EXECUTABLE) { tree arg = PHI_ARG_DEF (phi, i); value_range_t vr_arg; ++edges; if (TREE_CODE (arg) == SSA_NAME) { vr_arg = *(get_value_range (arg)); /* Do not allow equivalences or symbolic ranges to leak in from backedges. That creates invalid equivalencies. See PR53465 and PR54767. */ if (e->flags & EDGE_DFS_BACK) { if (vr_arg.type == VR_RANGE || vr_arg.type == VR_ANTI_RANGE) { vr_arg.equiv = NULL; if (symbolic_range_p (&vr_arg)) { vr_arg.type = VR_VARYING; vr_arg.min = NULL_TREE; vr_arg.max = NULL_TREE; } } } else { /* If the non-backedge arguments range is VR_VARYING then we can still try recording a simple equivalence. */ if (vr_arg.type == VR_VARYING) { vr_arg.type = VR_RANGE; vr_arg.min = arg; vr_arg.max = arg; vr_arg.equiv = NULL; } } } else { if (TREE_OVERFLOW_P (arg)) arg = drop_tree_overflow (arg); vr_arg.type = VR_RANGE; vr_arg.min = arg; vr_arg.max = arg; vr_arg.equiv = NULL; } if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "\t"); print_generic_expr (dump_file, arg, dump_flags); fprintf (dump_file, ": "); dump_value_range (dump_file, &vr_arg); fprintf (dump_file, "\n"); } if (first) copy_value_range (&vr_result, &vr_arg); else vrp_meet (&vr_result, &vr_arg); first = false; if (vr_result.type == VR_VARYING) break; } } if (vr_result.type == VR_VARYING) goto varying; else if (vr_result.type == VR_UNDEFINED) goto update_range; old_edges = vr_phi_edge_counts[SSA_NAME_VERSION (lhs)]; vr_phi_edge_counts[SSA_NAME_VERSION (lhs)] = edges; /* To prevent infinite iterations in the algorithm, derive ranges when the new value is slightly bigger or smaller than the previous one. We don't do this if we have seen a new executable edge; this helps us avoid an overflow infinity for conditionals which are not in a loop. If the old value-range was VR_UNDEFINED use the updated range and iterate one more time. */ if (edges > 0 && gimple_phi_num_args (phi) > 1 && edges == old_edges && lhs_vr->type != VR_UNDEFINED) { /* Compare old and new ranges, fall back to varying if the values are not comparable. */ int cmp_min = compare_values (lhs_vr->min, vr_result.min); if (cmp_min == -2) goto varying; int cmp_max = compare_values (lhs_vr->max, vr_result.max); if (cmp_max == -2) goto varying; /* For non VR_RANGE or for pointers fall back to varying if the range changed. */ if ((lhs_vr->type != VR_RANGE || vr_result.type != VR_RANGE || POINTER_TYPE_P (TREE_TYPE (lhs))) && (cmp_min != 0 || cmp_max != 0)) goto varying; /* If the new minimum is larger than than the previous one retain the old value. If the new minimum value is smaller than the previous one and not -INF go all the way to -INF + 1. In the first case, to avoid infinite bouncing between different minimums, and in the other case to avoid iterating millions of times to reach -INF. Going to -INF + 1 also lets the following iteration compute whether there will be any overflow, at the expense of one additional iteration. */ if (cmp_min < 0) vr_result.min = lhs_vr->min; else if (cmp_min > 0 && !vrp_val_is_min (vr_result.min)) vr_result.min = int_const_binop (PLUS_EXPR, vrp_val_min (TREE_TYPE (vr_result.min)), build_int_cst (TREE_TYPE (vr_result.min), 1)); /* Similarly for the maximum value. */ if (cmp_max > 0) vr_result.max = lhs_vr->max; else if (cmp_max < 0 && !vrp_val_is_max (vr_result.max)) vr_result.max = int_const_binop (MINUS_EXPR, vrp_val_max (TREE_TYPE (vr_result.min)), build_int_cst (TREE_TYPE (vr_result.min), 1)); /* If we dropped either bound to +-INF then if this is a loop PHI node SCEV may known more about its value-range. */ if ((cmp_min > 0 || cmp_min < 0 || cmp_max < 0 || cmp_max > 0) && (l = loop_containing_stmt (phi)) && l->header == gimple_bb (phi)) adjust_range_with_scev (&vr_result, l, phi, lhs); /* If we will end up with a (-INF, +INF) range, set it to VARYING. Same if the previous max value was invalid for the type and we end up with vr_result.min > vr_result.max. */ if ((vrp_val_is_max (vr_result.max) && vrp_val_is_min (vr_result.min)) || compare_values (vr_result.min, vr_result.max) > 0) goto varying; } /* If the new range is different than the previous value, keep iterating. */ update_range: if (update_value_range (lhs, &vr_result)) { if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "Found new range for "); print_generic_expr (dump_file, lhs, 0); fprintf (dump_file, ": "); dump_value_range (dump_file, &vr_result); fprintf (dump_file, "\n"); } return SSA_PROP_INTERESTING; } /* Nothing changed, don't add outgoing edges. */ return SSA_PROP_NOT_INTERESTING; /* No match found. Set the LHS to VARYING. */ varying: set_value_range_to_varying (lhs_vr); return SSA_PROP_VARYING; } /* Simplify boolean operations if the source is known to be already a boolean. */ static bool simplify_truth_ops_using_ranges (gimple_stmt_iterator *gsi, gimple stmt) { enum tree_code rhs_code = gimple_assign_rhs_code (stmt); tree lhs, op0, op1; bool need_conversion; /* We handle only !=/== case here. */ gcc_assert (rhs_code == EQ_EXPR || rhs_code == NE_EXPR); op0 = gimple_assign_rhs1 (stmt); if (!op_with_boolean_value_range_p (op0)) return false; op1 = gimple_assign_rhs2 (stmt); if (!op_with_boolean_value_range_p (op1)) return false; /* Reduce number of cases to handle to NE_EXPR. As there is no BIT_XNOR_EXPR we cannot replace A == B with a single statement. */ if (rhs_code == EQ_EXPR) { if (TREE_CODE (op1) == INTEGER_CST) op1 = int_const_binop (BIT_XOR_EXPR, op1, build_int_cst (TREE_TYPE (op1), 1)); else return false; } lhs = gimple_assign_lhs (stmt); need_conversion = !useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (op0)); /* Make sure to not sign-extend a 1-bit 1 when converting the result. */ if (need_conversion && !TYPE_UNSIGNED (TREE_TYPE (op0)) && TYPE_PRECISION (TREE_TYPE (op0)) == 1 && TYPE_PRECISION (TREE_TYPE (lhs)) > 1) return false; /* For A != 0 we can substitute A itself. */ if (integer_zerop (op1)) gimple_assign_set_rhs_with_ops (gsi, need_conversion ? NOP_EXPR : TREE_CODE (op0), op0, NULL_TREE); /* For A != B we substitute A ^ B. Either with conversion. */ else if (need_conversion) { tree tem = make_ssa_name (TREE_TYPE (op0), NULL); gimple newop = gimple_build_assign_with_ops (BIT_XOR_EXPR, tem, op0, op1); gsi_insert_before (gsi, newop, GSI_SAME_STMT); gimple_assign_set_rhs_with_ops (gsi, NOP_EXPR, tem, NULL_TREE); } /* Or without. */ else gimple_assign_set_rhs_with_ops (gsi, BIT_XOR_EXPR, op0, op1); update_stmt (gsi_stmt (*gsi)); return true; } /* Simplify a division or modulo operator to a right shift or bitwise and if the first operand is unsigned or is greater than zero and the second operand is an exact power of two. */ static bool simplify_div_or_mod_using_ranges (gimple stmt) { enum tree_code rhs_code = gimple_assign_rhs_code (stmt); tree val = NULL; tree op0 = gimple_assign_rhs1 (stmt); tree op1 = gimple_assign_rhs2 (stmt); value_range_t *vr = get_value_range (gimple_assign_rhs1 (stmt)); if (TYPE_UNSIGNED (TREE_TYPE (op0))) { val = integer_one_node; } else { bool sop = false; val = compare_range_with_value (GE_EXPR, vr, integer_zero_node, &sop); if (val && sop && integer_onep (val) && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC)) { location_t location; if (!gimple_has_location (stmt)) location = input_location; else location = gimple_location (stmt); warning_at (location, OPT_Wstrict_overflow, "assuming signed overflow does not occur when " "simplifying %</%> or %<%%%> to %<>>%> or %<&%>"); } } if (val && integer_onep (val)) { tree t; if (rhs_code == TRUNC_DIV_EXPR) { t = build_int_cst (integer_type_node, tree_log2 (op1)); gimple_assign_set_rhs_code (stmt, RSHIFT_EXPR); gimple_assign_set_rhs1 (stmt, op0); gimple_assign_set_rhs2 (stmt, t); } else { t = build_int_cst (TREE_TYPE (op1), 1); t = int_const_binop (MINUS_EXPR, op1, t); t = fold_convert (TREE_TYPE (op0), t); gimple_assign_set_rhs_code (stmt, BIT_AND_EXPR); gimple_assign_set_rhs1 (stmt, op0); gimple_assign_set_rhs2 (stmt, t); } update_stmt (stmt); return true; } return false; } /* If the operand to an ABS_EXPR is >= 0, then eliminate the ABS_EXPR. If the operand is <= 0, then simplify the ABS_EXPR into a NEGATE_EXPR. */ static bool simplify_abs_using_ranges (gimple stmt) { tree val = NULL; tree op = gimple_assign_rhs1 (stmt); tree type = TREE_TYPE (op); value_range_t *vr = get_value_range (op); if (TYPE_UNSIGNED (type)) { val = integer_zero_node; } else if (vr) { bool sop = false; val = compare_range_with_value (LE_EXPR, vr, integer_zero_node, &sop); if (!val) { sop = false; val = compare_range_with_value (GE_EXPR, vr, integer_zero_node, &sop); if (val) { if (integer_zerop (val)) val = integer_one_node; else if (integer_onep (val)) val = integer_zero_node; } } if (val && (integer_onep (val) || integer_zerop (val))) { if (sop && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC)) { location_t location; if (!gimple_has_location (stmt)) location = input_location; else location = gimple_location (stmt); warning_at (location, OPT_Wstrict_overflow, "assuming signed overflow does not occur when " "simplifying %<abs (X)%> to %<X%> or %<-X%>"); } gimple_assign_set_rhs1 (stmt, op); if (integer_onep (val)) gimple_assign_set_rhs_code (stmt, NEGATE_EXPR); else gimple_assign_set_rhs_code (stmt, SSA_NAME); update_stmt (stmt); return true; } } return false; } /* Optimize away redundant BIT_AND_EXPR and BIT_IOR_EXPR. If all the bits that are being cleared by & are already known to be zero from VR, or all the bits that are being set by | are already known to be one from VR, the bit operation is redundant. */ static bool simplify_bit_ops_using_ranges (gimple_stmt_iterator *gsi, gimple stmt) { tree op0 = gimple_assign_rhs1 (stmt); tree op1 = gimple_assign_rhs2 (stmt); tree op = NULL_TREE; value_range_t vr0 = VR_INITIALIZER; value_range_t vr1 = VR_INITIALIZER; wide_int may_be_nonzero0, may_be_nonzero1; wide_int must_be_nonzero0, must_be_nonzero1; wide_int mask; if (TREE_CODE (op0) == SSA_NAME) vr0 = *(get_value_range (op0)); else if (is_gimple_min_invariant (op0)) set_value_range_to_value (&vr0, op0, NULL); else return false; if (TREE_CODE (op1) == SSA_NAME) vr1 = *(get_value_range (op1)); else if (is_gimple_min_invariant (op1)) set_value_range_to_value (&vr1, op1, NULL); else return false; if (!zero_nonzero_bits_from_vr (TREE_TYPE (op0), &vr0, &may_be_nonzero0, &must_be_nonzero0)) return false; if (!zero_nonzero_bits_from_vr (TREE_TYPE (op1), &vr1, &may_be_nonzero1, &must_be_nonzero1)) return false; switch (gimple_assign_rhs_code (stmt)) { case BIT_AND_EXPR: mask = may_be_nonzero0.and_not (must_be_nonzero1); if (mask == 0) { op = op0; break; } mask = may_be_nonzero1.and_not (must_be_nonzero0); if (mask == 0) { op = op1; break; } break; case BIT_IOR_EXPR: mask = may_be_nonzero0.and_not (must_be_nonzero1); if (mask == 0) { op = op1; break; } mask = may_be_nonzero1.and_not (must_be_nonzero0); if (mask == 0) { op = op0; break; } break; default: gcc_unreachable (); } if (op == NULL_TREE) return false; gimple_assign_set_rhs_with_ops (gsi, TREE_CODE (op), op, NULL); update_stmt (gsi_stmt (*gsi)); return true; } /* We are comparing trees OP0 and OP1 using COND_CODE. OP0 has a known value range VR. If there is one and only one value which will satisfy the conditional, then return that value. Else return NULL. */ static tree test_for_singularity (enum tree_code cond_code, tree op0, tree op1, value_range_t *vr) { tree min = NULL; tree max = NULL; /* Extract minimum/maximum values which satisfy the the conditional as it was written. */ if (cond_code == LE_EXPR || cond_code == LT_EXPR) { /* This should not be negative infinity; there is no overflow here. */ min = TYPE_MIN_VALUE (TREE_TYPE (op0)); max = op1; if (cond_code == LT_EXPR && !is_overflow_infinity (max)) { tree one = build_int_cst (TREE_TYPE (op0), 1); max = fold_build2 (MINUS_EXPR, TREE_TYPE (op0), max, one); if (EXPR_P (max)) TREE_NO_WARNING (max) = 1; } } else if (cond_code == GE_EXPR || cond_code == GT_EXPR) { /* This should not be positive infinity; there is no overflow here. */ max = TYPE_MAX_VALUE (TREE_TYPE (op0)); min = op1; if (cond_code == GT_EXPR && !is_overflow_infinity (min)) { tree one = build_int_cst (TREE_TYPE (op0), 1); min = fold_build2 (PLUS_EXPR, TREE_TYPE (op0), min, one); if (EXPR_P (min)) TREE_NO_WARNING (min) = 1; } } /* Now refine the minimum and maximum values using any value range information we have for op0. */ if (min && max) { if (compare_values (vr->min, min) == 1) min = vr->min; if (compare_values (vr->max, max) == -1) max = vr->max; /* If the new min/max values have converged to a single value, then there is only one value which can satisfy the condition, return that value. */ if (operand_equal_p (min, max, 0) && is_gimple_min_invariant (min)) return min; } return NULL; } /* Return whether the value range *VR fits in an integer type specified by PRECISION and UNSIGNED_P. */ static bool range_fits_type_p (value_range_t *vr, unsigned dest_precision, signop dest_sgn) { tree src_type; unsigned src_precision; widest_int tem; signop src_sgn; /* We can only handle integral and pointer types. */ src_type = TREE_TYPE (vr->min); if (!INTEGRAL_TYPE_P (src_type) && !POINTER_TYPE_P (src_type)) return false; /* An extension is fine unless VR is SIGNED and dest_sgn is UNSIGNED, and so is an identity transform. */ src_precision = TYPE_PRECISION (TREE_TYPE (vr->min)); src_sgn = TYPE_SIGN (src_type); if ((src_precision < dest_precision && !(dest_sgn == UNSIGNED && src_sgn == SIGNED)) || (src_precision == dest_precision && src_sgn == dest_sgn)) return true; /* Now we can only handle ranges with constant bounds. */ if (vr->type != VR_RANGE || TREE_CODE (vr->min) != INTEGER_CST || TREE_CODE (vr->max) != INTEGER_CST) return false; /* For sign changes, the MSB of the wide_int has to be clear. An unsigned value with its MSB set cannot be represented by a signed wide_int, while a negative value cannot be represented by an unsigned wide_int. */ if (src_sgn != dest_sgn && (wi::lts_p (vr->min, 0) || wi::lts_p (vr->max, 0))) return false; /* Then we can perform the conversion on both ends and compare the result for equality. */ tem = wi::ext (wi::to_widest (vr->min), dest_precision, dest_sgn); if (tem != wi::to_widest (vr->min)) return false; tem = wi::ext (wi::to_widest (vr->max), dest_precision, dest_sgn); if (tem != wi::to_widest (vr->max)) return false; return true; } /* Simplify a conditional using a relational operator to an equality test if the range information indicates only one value can satisfy the original conditional. */ static bool simplify_cond_using_ranges (gimple stmt) { tree op0 = gimple_cond_lhs (stmt); tree op1 = gimple_cond_rhs (stmt); enum tree_code cond_code = gimple_cond_code (stmt); if (cond_code != NE_EXPR && cond_code != EQ_EXPR && TREE_CODE (op0) == SSA_NAME && INTEGRAL_TYPE_P (TREE_TYPE (op0)) && is_gimple_min_invariant (op1)) { value_range_t *vr = get_value_range (op0); /* If we have range information for OP0, then we might be able to simplify this conditional. */ if (vr->type == VR_RANGE) { tree new_tree = test_for_singularity (cond_code, op0, op1, vr); if (new_tree) { if (dump_file) { fprintf (dump_file, "Simplified relational "); print_gimple_stmt (dump_file, stmt, 0, 0); fprintf (dump_file, " into "); } gimple_cond_set_code (stmt, EQ_EXPR); gimple_cond_set_lhs (stmt, op0); gimple_cond_set_rhs (stmt, new_tree); update_stmt (stmt); if (dump_file) { print_gimple_stmt (dump_file, stmt, 0, 0); fprintf (dump_file, "\n"); } return true; } /* Try again after inverting the condition. We only deal with integral types here, so no need to worry about issues with inverting FP comparisons. */ cond_code = invert_tree_comparison (cond_code, false); new_tree = test_for_singularity (cond_code, op0, op1, vr); if (new_tree) { if (dump_file) { fprintf (dump_file, "Simplified relational "); print_gimple_stmt (dump_file, stmt, 0, 0); fprintf (dump_file, " into "); } gimple_cond_set_code (stmt, NE_EXPR); gimple_cond_set_lhs (stmt, op0); gimple_cond_set_rhs (stmt, new_tree); update_stmt (stmt); if (dump_file) { print_gimple_stmt (dump_file, stmt, 0, 0); fprintf (dump_file, "\n"); } return true; } } } /* If we have a comparison of an SSA_NAME (OP0) against a constant, see if OP0 was set by a type conversion where the source of the conversion is another SSA_NAME with a range that fits into the range of OP0's type. If so, the conversion is redundant as the earlier SSA_NAME can be used for the comparison directly if we just massage the constant in the comparison. */ if (TREE_CODE (op0) == SSA_NAME && TREE_CODE (op1) == INTEGER_CST) { gimple def_stmt = SSA_NAME_DEF_STMT (op0); tree innerop; if (!is_gimple_assign (def_stmt) || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt))) return false; innerop = gimple_assign_rhs1 (def_stmt); if (TREE_CODE (innerop) == SSA_NAME && !POINTER_TYPE_P (TREE_TYPE (innerop))) { value_range_t *vr = get_value_range (innerop); if (range_int_cst_p (vr) && range_fits_type_p (vr, TYPE_PRECISION (TREE_TYPE (op0)), TYPE_SIGN (TREE_TYPE (op0))) && int_fits_type_p (op1, TREE_TYPE (innerop)) /* The range must not have overflowed, or if it did overflow we must not be wrapping/trapping overflow and optimizing with strict overflow semantics. */ && ((!is_negative_overflow_infinity (vr->min) && !is_positive_overflow_infinity (vr->max)) || TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (innerop)))) { /* If the range overflowed and the user has asked for warnings when strict overflow semantics were used to optimize code, issue an appropriate warning. */ if ((is_negative_overflow_infinity (vr->min) || is_positive_overflow_infinity (vr->max)) && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_CONDITIONAL)) { location_t location; if (!gimple_has_location (stmt)) location = input_location; else location = gimple_location (stmt); warning_at (location, OPT_Wstrict_overflow, "assuming signed overflow does not occur when " "simplifying conditional"); } tree newconst = fold_convert (TREE_TYPE (innerop), op1); gimple_cond_set_lhs (stmt, innerop); gimple_cond_set_rhs (stmt, newconst); return true; } } } return false; } /* Simplify a switch statement using the value range of the switch argument. */ static bool simplify_switch_using_ranges (gimple stmt) { tree op = gimple_switch_index (stmt); value_range_t *vr; bool take_default; edge e; edge_iterator ei; size_t i = 0, j = 0, n, n2; tree vec2; switch_update su; size_t k = 1, l = 0; if (TREE_CODE (op) == SSA_NAME) { vr = get_value_range (op); /* We can only handle integer ranges. */ if ((vr->type != VR_RANGE && vr->type != VR_ANTI_RANGE) || symbolic_range_p (vr)) return false; /* Find case label for min/max of the value range. */ take_default = !find_case_label_ranges (stmt, vr, &i, &j, &k, &l); } else if (TREE_CODE (op) == INTEGER_CST) { take_default = !find_case_label_index (stmt, 1, op, &i); if (take_default) { i = 1; j = 0; } else { j = i; } } else return false; n = gimple_switch_num_labels (stmt); /* Bail out if this is just all edges taken. */ if (i == 1 && j == n - 1 && take_default) return false; /* Build a new vector of taken case labels. */ vec2 = make_tree_vec (j - i + 1 + l - k + 1 + (int)take_default); n2 = 0; /* Add the default edge, if necessary. */ if (take_default) TREE_VEC_ELT (vec2, n2++) = gimple_switch_default_label (stmt); for (; i <= j; ++i, ++n2) TREE_VEC_ELT (vec2, n2) = gimple_switch_label (stmt, i); for (; k <= l; ++k, ++n2) TREE_VEC_ELT (vec2, n2) = gimple_switch_label (stmt, k); /* Mark needed edges. */ for (i = 0; i < n2; ++i) { e = find_edge (gimple_bb (stmt), label_to_block (CASE_LABEL (TREE_VEC_ELT (vec2, i)))); e->aux = (void *)-1; } /* Queue not needed edges for later removal. */ FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs) { if (e->aux == (void *)-1) { e->aux = NULL; continue; } if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "removing unreachable case label\n"); } to_remove_edges.safe_push (e); e->flags &= ~EDGE_EXECUTABLE; } /* And queue an update for the stmt. */ su.stmt = stmt; su.vec = vec2; to_update_switch_stmts.safe_push (su); return false; } /* Simplify an integral conversion from an SSA name in STMT. */ static bool simplify_conversion_using_ranges (gimple stmt) { tree innerop, middleop, finaltype; gimple def_stmt; value_range_t *innervr; signop inner_sgn, middle_sgn, final_sgn; unsigned inner_prec, middle_prec, final_prec; widest_int innermin, innermed, innermax, middlemin, middlemed, middlemax; finaltype = TREE_TYPE (gimple_assign_lhs (stmt)); if (!INTEGRAL_TYPE_P (finaltype)) return false; middleop = gimple_assign_rhs1 (stmt); def_stmt = SSA_NAME_DEF_STMT (middleop); if (!is_gimple_assign (def_stmt) || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt))) return false; innerop = gimple_assign_rhs1 (def_stmt); if (TREE_CODE (innerop) != SSA_NAME || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (innerop)) return false; /* Get the value-range of the inner operand. */ innervr = get_value_range (innerop); if (innervr->type != VR_RANGE || TREE_CODE (innervr->min) != INTEGER_CST || TREE_CODE (innervr->max) != INTEGER_CST) return false; /* Simulate the conversion chain to check if the result is equal if the middle conversion is removed. */ innermin = wi::to_widest (innervr->min); innermax = wi::to_widest (innervr->max); inner_prec = TYPE_PRECISION (TREE_TYPE (innerop)); middle_prec = TYPE_PRECISION (TREE_TYPE (middleop)); final_prec = TYPE_PRECISION (finaltype); /* If the first conversion is not injective, the second must not be widening. */ if (wi::gtu_p (innermax - innermin, wi::mask <widest_int> (middle_prec, false)) && middle_prec < final_prec) return false; /* We also want a medium value so that we can track the effect that narrowing conversions with sign change have. */ inner_sgn = TYPE_SIGN (TREE_TYPE (innerop)); if (inner_sgn == UNSIGNED) innermed = wi::shifted_mask <widest_int> (1, inner_prec - 1, false); else innermed = 0; if (wi::cmp (innermin, innermed, inner_sgn) >= 0 || wi::cmp (innermed, innermax, inner_sgn) >= 0) innermed = innermin; middle_sgn = TYPE_SIGN (TREE_TYPE (middleop)); middlemin = wi::ext (innermin, middle_prec, middle_sgn); middlemed = wi::ext (innermed, middle_prec, middle_sgn); middlemax = wi::ext (innermax, middle_prec, middle_sgn); /* Require that the final conversion applied to both the original and the intermediate range produces the same result. */ final_sgn = TYPE_SIGN (finaltype); if (wi::ext (middlemin, final_prec, final_sgn) != wi::ext (innermin, final_prec, final_sgn) || wi::ext (middlemed, final_prec, final_sgn) != wi::ext (innermed, final_prec, final_sgn) || wi::ext (middlemax, final_prec, final_sgn) != wi::ext (innermax, final_prec, final_sgn)) return false; gimple_assign_set_rhs1 (stmt, innerop); update_stmt (stmt); return true; } /* Simplify a conversion from integral SSA name to float in STMT. */ static bool simplify_float_conversion_using_ranges (gimple_stmt_iterator *gsi, gimple stmt) { tree rhs1 = gimple_assign_rhs1 (stmt); value_range_t *vr = get_value_range (rhs1); enum machine_mode fltmode = TYPE_MODE (TREE_TYPE (gimple_assign_lhs (stmt))); enum machine_mode mode; tree tem; gimple conv; /* We can only handle constant ranges. */ if (vr->type != VR_RANGE || TREE_CODE (vr->min) != INTEGER_CST || TREE_CODE (vr->max) != INTEGER_CST) return false; /* First check if we can use a signed type in place of an unsigned. */ if (TYPE_UNSIGNED (TREE_TYPE (rhs1)) && (can_float_p (fltmode, TYPE_MODE (TREE_TYPE (rhs1)), 0) != CODE_FOR_nothing) && range_fits_type_p (vr, TYPE_PRECISION (TREE_TYPE (rhs1)), SIGNED)) mode = TYPE_MODE (TREE_TYPE (rhs1)); /* If we can do the conversion in the current input mode do nothing. */ else if (can_float_p (fltmode, TYPE_MODE (TREE_TYPE (rhs1)), TYPE_UNSIGNED (TREE_TYPE (rhs1))) != CODE_FOR_nothing) return false; /* Otherwise search for a mode we can use, starting from the narrowest integer mode available. */ else { mode = GET_CLASS_NARROWEST_MODE (MODE_INT); do { /* If we cannot do a signed conversion to float from mode or if the value-range does not fit in the signed type try with a wider mode. */ if (can_float_p (fltmode, mode, 0) != CODE_FOR_nothing && range_fits_type_p (vr, GET_MODE_PRECISION (mode), SIGNED)) break; mode = GET_MODE_WIDER_MODE (mode); /* But do not widen the input. Instead leave that to the optabs expansion code. */ if (GET_MODE_PRECISION (mode) > TYPE_PRECISION (TREE_TYPE (rhs1))) return false; } while (mode != VOIDmode); if (mode == VOIDmode) return false; } /* It works, insert a truncation or sign-change before the float conversion. */ tem = make_ssa_name (build_nonstandard_integer_type (GET_MODE_PRECISION (mode), 0), NULL); conv = gimple_build_assign_with_ops (NOP_EXPR, tem, rhs1, NULL_TREE); gsi_insert_before (gsi, conv, GSI_SAME_STMT); gimple_assign_set_rhs1 (stmt, tem); update_stmt (stmt); return true; } /* Simplify an internal fn call using ranges if possible. */ static bool simplify_internal_call_using_ranges (gimple_stmt_iterator *gsi, gimple stmt) { enum tree_code subcode; switch (gimple_call_internal_fn (stmt)) { case IFN_UBSAN_CHECK_ADD: subcode = PLUS_EXPR; break; case IFN_UBSAN_CHECK_SUB: subcode = MINUS_EXPR; break; case IFN_UBSAN_CHECK_MUL: subcode = MULT_EXPR; break; default: return false; } value_range_t vr0 = VR_INITIALIZER; value_range_t vr1 = VR_INITIALIZER; tree op0 = gimple_call_arg (stmt, 0); tree op1 = gimple_call_arg (stmt, 1); if (TREE_CODE (op0) == SSA_NAME) vr0 = *get_value_range (op0); else if (TREE_CODE (op0) == INTEGER_CST) set_value_range_to_value (&vr0, op0, NULL); else set_value_range_to_varying (&vr0); if (TREE_CODE (op1) == SSA_NAME) vr1 = *get_value_range (op1); else if (TREE_CODE (op1) == INTEGER_CST) set_value_range_to_value (&vr1, op1, NULL); else set_value_range_to_varying (&vr1); if (!range_int_cst_p (&vr0)) { /* If one range is VR_ANTI_RANGE, VR_VARYING etc., optimize at least x = y + 0; x = y - 0; x = y * 0; and x = y * 1; which never overflow. */ if (!range_int_cst_p (&vr1)) return false; if (tree_int_cst_sgn (vr1.min) == -1) return false; if (compare_tree_int (vr1.max, subcode == MULT_EXPR) == 1) return false; } else if (!range_int_cst_p (&vr1)) { /* If one range is VR_ANTI_RANGE, VR_VARYING etc., optimize at least x = 0 + y; x = 0 * y; and x = 1 * y; which never overflow. */ if (subcode == MINUS_EXPR) return false; if (!range_int_cst_p (&vr0)) return false; if (tree_int_cst_sgn (vr0.min) == -1) return false; if (compare_tree_int (vr0.max, subcode == MULT_EXPR) == 1) return false; } else { tree r1 = int_const_binop (subcode, vr0.min, vr1.min); tree r2 = int_const_binop (subcode, vr0.max, vr1.max); if (r1 == NULL_TREE || TREE_OVERFLOW (r1) || r2 == NULL_TREE || TREE_OVERFLOW (r2)) return false; if (subcode == MULT_EXPR) { tree r3 = int_const_binop (subcode, vr0.min, vr1.max); tree r4 = int_const_binop (subcode, vr0.max, vr1.min); if (r3 == NULL_TREE || TREE_OVERFLOW (r3) || r4 == NULL_TREE || TREE_OVERFLOW (r4)) return false; } } gimple g = gimple_build_assign_with_ops (subcode, gimple_call_lhs (stmt), op0, op1); gsi_replace (gsi, g, false); return true; } /* Simplify STMT using ranges if possible. */ static bool simplify_stmt_using_ranges (gimple_stmt_iterator *gsi) { gimple stmt = gsi_stmt (*gsi); if (is_gimple_assign (stmt)) { enum tree_code rhs_code = gimple_assign_rhs_code (stmt); tree rhs1 = gimple_assign_rhs1 (stmt); switch (rhs_code) { case EQ_EXPR: case NE_EXPR: /* Transform EQ_EXPR, NE_EXPR into BIT_XOR_EXPR or identity if the RHS is zero or one, and the LHS are known to be boolean values. */ if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))) return simplify_truth_ops_using_ranges (gsi, stmt); break; /* Transform TRUNC_DIV_EXPR and TRUNC_MOD_EXPR into RSHIFT_EXPR and BIT_AND_EXPR respectively if the first operand is greater than zero and the second operand is an exact power of two. */ case TRUNC_DIV_EXPR: case TRUNC_MOD_EXPR: if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1)) && integer_pow2p (gimple_assign_rhs2 (stmt))) return simplify_div_or_mod_using_ranges (stmt); break; /* Transform ABS (X) into X or -X as appropriate. */ case ABS_EXPR: if (TREE_CODE (rhs1) == SSA_NAME && INTEGRAL_TYPE_P (TREE_TYPE (rhs1))) return simplify_abs_using_ranges (stmt); break; case BIT_AND_EXPR: case BIT_IOR_EXPR: /* Optimize away BIT_AND_EXPR and BIT_IOR_EXPR if all the bits being cleared are already cleared or all the bits being set are already set. */ if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))) return simplify_bit_ops_using_ranges (gsi, stmt); break; CASE_CONVERT: if (TREE_CODE (rhs1) == SSA_NAME && INTEGRAL_TYPE_P (TREE_TYPE (rhs1))) return simplify_conversion_using_ranges (stmt); break; case FLOAT_EXPR: if (TREE_CODE (rhs1) == SSA_NAME && INTEGRAL_TYPE_P (TREE_TYPE (rhs1))) return simplify_float_conversion_using_ranges (gsi, stmt); break; default: break; } } else if (gimple_code (stmt) == GIMPLE_COND) return simplify_cond_using_ranges (stmt); else if (gimple_code (stmt) == GIMPLE_SWITCH) return simplify_switch_using_ranges (stmt); else if (is_gimple_call (stmt) && gimple_call_internal_p (stmt)) return simplify_internal_call_using_ranges (gsi, stmt); return false; } /* If the statement pointed by SI has a predicate whose value can be computed using the value range information computed by VRP, compute its value and return true. Otherwise, return false. */ static bool fold_predicate_in (gimple_stmt_iterator *si) { bool assignment_p = false; tree val; gimple stmt = gsi_stmt (*si); if (is_gimple_assign (stmt) && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison) { assignment_p = true; val = vrp_evaluate_conditional (gimple_assign_rhs_code (stmt), gimple_assign_rhs1 (stmt), gimple_assign_rhs2 (stmt), stmt); } else if (gimple_code (stmt) == GIMPLE_COND) val = vrp_evaluate_conditional (gimple_cond_code (stmt), gimple_cond_lhs (stmt), gimple_cond_rhs (stmt), stmt); else return false; if (val) { if (assignment_p) val = fold_convert (gimple_expr_type (stmt), val); if (dump_file) { fprintf (dump_file, "Folding predicate "); print_gimple_expr (dump_file, stmt, 0, 0); fprintf (dump_file, " to "); print_generic_expr (dump_file, val, 0); fprintf (dump_file, "\n"); } if (is_gimple_assign (stmt)) gimple_assign_set_rhs_from_tree (si, val); else { gcc_assert (gimple_code (stmt) == GIMPLE_COND); if (integer_zerop (val)) gimple_cond_make_false (stmt); else if (integer_onep (val)) gimple_cond_make_true (stmt); else gcc_unreachable (); } return true; } return false; } /* Callback for substitute_and_fold folding the stmt at *SI. */ static bool vrp_fold_stmt (gimple_stmt_iterator *si) { if (fold_predicate_in (si)) return true; return simplify_stmt_using_ranges (si); } /* Stack of dest,src equivalency pairs that need to be restored after each attempt to thread a block's incoming edge to an outgoing edge. A NULL entry is used to mark the end of pairs which need to be restored. */ static vec<tree> equiv_stack; /* A trivial wrapper so that we can present the generic jump threading code with a simple API for simplifying statements. STMT is the statement we want to simplify, WITHIN_STMT provides the location for any overflow warnings. */ static tree simplify_stmt_for_jump_threading (gimple stmt, gimple within_stmt) { if (gimple_code (stmt) == GIMPLE_COND) return vrp_evaluate_conditional (gimple_cond_code (stmt), gimple_cond_lhs (stmt), gimple_cond_rhs (stmt), within_stmt); if (gimple_code (stmt) == GIMPLE_ASSIGN) { value_range_t new_vr = VR_INITIALIZER; tree lhs = gimple_assign_lhs (stmt); if (TREE_CODE (lhs) == SSA_NAME && (INTEGRAL_TYPE_P (TREE_TYPE (lhs)) || POINTER_TYPE_P (TREE_TYPE (lhs)))) { extract_range_from_assignment (&new_vr, stmt); if (range_int_cst_singleton_p (&new_vr)) return new_vr.min; } } return NULL_TREE; } /* Blocks which have more than one predecessor and more than one successor present jump threading opportunities, i.e., when the block is reached from a specific predecessor, we may be able to determine which of the outgoing edges will be traversed. When this optimization applies, we are able to avoid conditionals at runtime and we may expose secondary optimization opportunities. This routine is effectively a driver for the generic jump threading code. It basically just presents the generic code with edges that may be suitable for jump threading. Unlike DOM, we do not iterate VRP if jump threading was successful. While iterating may expose new opportunities for VRP, it is expected those opportunities would be very limited and the compile time cost to expose those opportunities would be significant. As jump threading opportunities are discovered, they are registered for later realization. */ static void identify_jump_threads (void) { basic_block bb; gimple dummy; int i; edge e; /* Ugh. When substituting values earlier in this pass we can wipe the dominance information. So rebuild the dominator information as we need it within the jump threading code. */ calculate_dominance_info (CDI_DOMINATORS); /* We do not allow VRP information to be used for jump threading across a back edge in the CFG. Otherwise it becomes too difficult to avoid eliminating loop exit tests. Of course EDGE_DFS_BACK is not accurate at this time so we have to recompute it. */ mark_dfs_back_edges (); /* Do not thread across edges we are about to remove. Just marking them as EDGE_DFS_BACK will do. */ FOR_EACH_VEC_ELT (to_remove_edges, i, e) e->flags |= EDGE_DFS_BACK; /* Allocate our unwinder stack to unwind any temporary equivalences that might be recorded. */ equiv_stack.create (20); /* To avoid lots of silly node creation, we create a single conditional and just modify it in-place when attempting to thread jumps. */ dummy = gimple_build_cond (EQ_EXPR, integer_zero_node, integer_zero_node, NULL, NULL); /* Walk through all the blocks finding those which present a potential jump threading opportunity. We could set this up as a dominator walker and record data during the walk, but I doubt it's worth the effort for the classes of jump threading opportunities we are trying to identify at this point in compilation. */ FOR_EACH_BB_FN (bb, cfun) { gimple last; /* If the generic jump threading code does not find this block interesting, then there is nothing to do. */ if (! potentially_threadable_block (bb)) continue; /* We only care about blocks ending in a COND_EXPR. While there may be some value in handling SWITCH_EXPR here, I doubt it's terribly important. */ last = gsi_stmt (gsi_last_bb (bb)); /* We're basically looking for a switch or any kind of conditional with integral or pointer type arguments. Note the type of the second argument will be the same as the first argument, so no need to check it explicitly. */ if (gimple_code (last) == GIMPLE_SWITCH || (gimple_code (last) == GIMPLE_COND && TREE_CODE (gimple_cond_lhs (last)) == SSA_NAME && (INTEGRAL_TYPE_P (TREE_TYPE (gimple_cond_lhs (last))) || POINTER_TYPE_P (TREE_TYPE (gimple_cond_lhs (last)))) && (TREE_CODE (gimple_cond_rhs (last)) == SSA_NAME || is_gimple_min_invariant (gimple_cond_rhs (last))))) { edge_iterator ei; /* We've got a block with multiple predecessors and multiple successors which also ends in a suitable conditional or switch statement. For each predecessor, see if we can thread it to a specific successor. */ FOR_EACH_EDGE (e, ei, bb->preds) { /* Do not thread across back edges or abnormal edges in the CFG. */ if (e->flags & (EDGE_DFS_BACK | EDGE_COMPLEX)) continue; thread_across_edge (dummy, e, true, &equiv_stack, simplify_stmt_for_jump_threading); } } } /* We do not actually update the CFG or SSA graphs at this point as ASSERT_EXPRs are still in the IL and cfg cleanup code does not yet handle ASSERT_EXPRs gracefully. */ } /* We identified all the jump threading opportunities earlier, but could not transform the CFG at that time. This routine transforms the CFG and arranges for the dominator tree to be rebuilt if necessary. Note the SSA graph update will occur during the normal TODO processing by the pass manager. */ static void finalize_jump_threads (void) { thread_through_all_blocks (false); equiv_stack.release (); } /* Traverse all the blocks folding conditionals with known ranges. */ static void vrp_finalize (void) { size_t i; values_propagated = true; if (dump_file) { fprintf (dump_file, "\nValue ranges after VRP:\n\n"); dump_all_value_ranges (dump_file); fprintf (dump_file, "\n"); } substitute_and_fold (op_with_constant_singleton_value_range, vrp_fold_stmt, false); if (warn_array_bounds) check_all_array_refs (); /* We must identify jump threading opportunities before we release the datastructures built by VRP. */ identify_jump_threads (); /* Set value range to non pointer SSA_NAMEs. */ for (i = 0; i < num_vr_values; i++) if (vr_value[i]) { tree name = ssa_name (i); if (!name || POINTER_TYPE_P (TREE_TYPE (name)) || (vr_value[i]->type == VR_VARYING) || (vr_value[i]->type == VR_UNDEFINED)) continue; if ((TREE_CODE (vr_value[i]->min) == INTEGER_CST) && (TREE_CODE (vr_value[i]->max) == INTEGER_CST) && (vr_value[i]->type == VR_RANGE || vr_value[i]->type == VR_ANTI_RANGE)) set_range_info (name, vr_value[i]->type, vr_value[i]->min, vr_value[i]->max); } /* Free allocated memory. */ for (i = 0; i < num_vr_values; i++) if (vr_value[i]) { BITMAP_FREE (vr_value[i]->equiv); free (vr_value[i]); } free (vr_value); free (vr_phi_edge_counts); /* So that we can distinguish between VRP data being available and not available. */ vr_value = NULL; vr_phi_edge_counts = NULL; } /* Main entry point to VRP (Value Range Propagation). This pass is loosely based on J. R. C. Patterson, ``Accurate Static Branch Prediction by Value Range Propagation,'' in SIGPLAN Conference on Programming Language Design and Implementation, pp. 67-78, 1995. Also available at http://citeseer.ist.psu.edu/patterson95accurate.html This is essentially an SSA-CCP pass modified to deal with ranges instead of constants. While propagating ranges, we may find that two or more SSA name have equivalent, though distinct ranges. For instance, 1 x_9 = p_3->a; 2 p_4 = ASSERT_EXPR <p_3, p_3 != 0> 3 if (p_4 == q_2) 4 p_5 = ASSERT_EXPR <p_4, p_4 == q_2>; 5 endif 6 if (q_2) In the code above, pointer p_5 has range [q_2, q_2], but from the code we can also determine that p_5 cannot be NULL and, if q_2 had a non-varying range, p_5's range should also be compatible with it. These equivalences are created by two expressions: ASSERT_EXPR and copy operations. Since p_5 is an assertion on p_4, and p_4 was the result of another assertion, then we can use the fact that p_5 and p_4 are equivalent when evaluating p_5's range. Together with value ranges, we also propagate these equivalences between names so that we can take advantage of information from multiple ranges when doing final replacement. Note that this equivalency relation is transitive but not symmetric. In the example above, p_5 is equivalent to p_4, q_2 and p_3, but we cannot assert that q_2 is equivalent to p_5 because q_2 may be used in contexts where that assertion does not hold (e.g., in line 6). TODO, the main difference between this pass and Patterson's is that we do not propagate edge probabilities. We only compute whether edges can be taken or not. That is, instead of having a spectrum of jump probabilities between 0 and 1, we only deal with 0, 1 and DON'T KNOW. In the future, it may be worthwhile to propagate probabilities to aid branch prediction. */ static unsigned int execute_vrp (void) { int i; edge e; switch_update *su; loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS); rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa); scev_initialize (); /* ??? This ends up using stale EDGE_DFS_BACK for liveness computation. Inserting assertions may split edges which will invalidate EDGE_DFS_BACK. */ insert_range_assertions (); to_remove_edges.create (10); to_update_switch_stmts.create (5); threadedge_initialize_values (); /* For visiting PHI nodes we need EDGE_DFS_BACK computed. */ mark_dfs_back_edges (); vrp_initialize (); ssa_propagate (vrp_visit_stmt, vrp_visit_phi_node); vrp_finalize (); free_numbers_of_iterations_estimates (); /* ASSERT_EXPRs must be removed before finalizing jump threads as finalizing jump threads calls the CFG cleanup code which does not properly handle ASSERT_EXPRs. */ remove_range_assertions (); /* If we exposed any new variables, go ahead and put them into SSA form now, before we handle jump threading. This simplifies interactions between rewriting of _DECL nodes into SSA form and rewriting SSA_NAME nodes into SSA form after block duplication and CFG manipulation. */ update_ssa (TODO_update_ssa); finalize_jump_threads (); /* Remove dead edges from SWITCH_EXPR optimization. This leaves the CFG in a broken state and requires a cfg_cleanup run. */ FOR_EACH_VEC_ELT (to_remove_edges, i, e) remove_edge (e); /* Update SWITCH_EXPR case label vector. */ FOR_EACH_VEC_ELT (to_update_switch_stmts, i, su) { size_t j; size_t n = TREE_VEC_LENGTH (su->vec); tree label; gimple_switch_set_num_labels (su->stmt, n); for (j = 0; j < n; j++) gimple_switch_set_label (su->stmt, j, TREE_VEC_ELT (su->vec, j)); /* As we may have replaced the default label with a regular one make sure to make it a real default label again. This ensures optimal expansion. */ label = gimple_switch_label (su->stmt, 0); CASE_LOW (label) = NULL_TREE; CASE_HIGH (label) = NULL_TREE; } if (to_remove_edges.length () > 0) { free_dominance_info (CDI_DOMINATORS); loops_state_set (LOOPS_NEED_FIXUP); } to_remove_edges.release (); to_update_switch_stmts.release (); threadedge_finalize_values (); scev_finalize (); loop_optimizer_finalize (); return 0; } namespace { const pass_data pass_data_vrp = { GIMPLE_PASS, /* type */ "vrp", /* name */ OPTGROUP_NONE, /* optinfo_flags */ TV_TREE_VRP, /* tv_id */ PROP_ssa, /* properties_required */ 0, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */ }; class pass_vrp : public gimple_opt_pass { public: pass_vrp (gcc::context *ctxt) : gimple_opt_pass (pass_data_vrp, ctxt) {} /* opt_pass methods: */ opt_pass * clone () { return new pass_vrp (m_ctxt); } virtual bool gate (function *) { return flag_tree_vrp != 0; } virtual unsigned int execute (function *) { return execute_vrp (); } }; // class pass_vrp } // anon namespace gimple_opt_pass * make_pass_vrp (gcc::context *ctxt) { return new pass_vrp (ctxt); }
gpl-2.0
fighterCui/L4ReFiascoOC
kernel/fiasco/src/kern/arm/thread-arm.cpp
2
21156
INTERFACE [arm]: class Trap_state; EXTENSION class Thread { public: static void init_per_cpu(Cpu_number cpu, bool resume); private: bool _in_exception; }; // ------------------------------------------------------------------------ IMPLEMENTATION [arm]: #include <cassert> #include <cstdio> #include "globals.h" #include "kmem_space.h" #include "mem_op.h" #include "static_assert.h" #include "thread_state.h" #include "types.h" enum { FSR_STATUS_MASK = 0x0d, FSR_TRANSL = 0x05, FSR_DOMAIN = 0x09, FSR_PERMISSION = 0x0d, }; DEFINE_PER_CPU Per_cpu<Thread::Dbg_stack> Thread::dbg_stack; PRIVATE static void Thread::print_page_fault_error(Mword e) { char const *const excpts[] = { "reset","undef. insn", "swi", "pref. abort", "data abort", "XXX", "XXX", "XXX" }; unsigned ex = (e >> 20) & 0x07; printf("(%lx) %s, %s(%c)",e & 0xff, excpts[ex], (e & 0x00010000)?"user":"kernel", (e & 0x00020000)?'r':'w'); } PUBLIC inline void FIASCO_NORETURN Thread::fast_return_to_user(Mword ip, Mword sp, Vcpu_state *arg) { extern char __iret[]; Entry_frame *r = regs(); assert_kdb(r->check_valid_user_psr()); r->ip(ip); r->sp(sp); // user-sp is in lazy user state and thus handled by // fill_user_state() fill_user_state(); //load_tpidruro(); r->psr &= ~Proc::Status_thumb; // extended vCPU runs the host code in ARM system mode if (Proc::Is_hyp && (state() & Thread_ext_vcpu_enabled)) r->psr_set_mode(Proc::PSR_m_svc); { register Vcpu_state *r0 asm("r0") = arg; asm volatile ("mov sp, %0 \t\n" "mov pc, %1 \t\n" : : "r" (nonull_static_cast<Return_frame*>(r)), "r" (__iret), "r"(r0) ); } panic("__builtin_trap()"); } IMPLEMENT_DEFAULT inline void Thread::init_per_cpu(Cpu_number, bool) {} // // Public services // IMPLEMENT void Thread::user_invoke() { user_invoke_generic(); assert (current()->state() & Thread_ready); Trap_state *ts = nonull_static_cast<Trap_state*> (nonull_static_cast<Return_frame*>(current()->regs())); assert (((Mword)ts & 7) == 4); // Return_frame has 5 words static_assert(sizeof(ts->r[0]) == sizeof(Mword), "Size mismatch"); Mem::memset_mwords(&ts->r[0], 0, sizeof(ts->r) / sizeof(ts->r[0])); if (current()->space()->is_sigma0()) ts->r[0] = Kmem_space::kdir()->virt_to_phys((Address)Kip::k()); ts->psr |= Proc::Status_always_mask; extern char __return_from_user_invoke; asm volatile (" mov sp, %[stack_p] \n" // set stack pointer to regs structure " mov pc, %[rfe] \n" : : [stack_p] "r" (ts), [rfe] "r" (&__return_from_user_invoke) ); panic("should never be reached"); while (1) { current()->state_del(Thread_ready); current()->schedule(); }; // never returns here } IMPLEMENT inline NEEDS["space.h", <cstdio>, "types.h" ,"config.h"] bool Thread::handle_sigma0_page_fault( Address pfa ) { return (mem_space()->v_insert( Mem_space::Phys_addr((pfa & Config::SUPERPAGE_MASK)), Virt_addr(pfa & Config::SUPERPAGE_MASK), Virt_order(Config::SUPERPAGE_SHIFT) /*mem_space()->largest_page_size()*/, Mem_space::Attr(L4_fpage::Rights::URWX())) != Mem_space::Insert_err_nomem); } PUBLIC static inline bool Thread::check_for_kernel_mem_access_pf(Trap_state *ts, Thread *t) { if (EXPECT_FALSE(t->is_kernel_mem_op_hit_and_clear())) { Mword pc = t->exception_triggered() ? t->_exc_cont.ip() : ts->pc; pc -= (ts->psr & Proc::Status_thumb) ? 2 : 4; if (t->exception_triggered()) t->_exc_cont.ip(pc); else ts->pc = pc; return true; } return false; } extern "C" { /** * The low-level page fault handler called from entry.S. We're invoked with * interrupts turned off. Apart from turning on interrupts in almost * all cases (except for kernel page faults in TCB area), just forwards * the call to Thread::handle_page_fault(). * @param pfa page-fault virtual address * @param error_code CPU error code * @return true if page fault could be resolved, false otherwise */ Mword pagefault_entry(const Mword pfa, Mword error_code, const Mword pc, Return_frame *ret_frame) { if (EXPECT_FALSE(PF::is_alignment_error(error_code))) { printf("KERNEL%d: alignment error at %08lx (PC: %08lx, SP: %08lx, FSR: %lx, PSR: %lx)\n", cxx::int_value<Cpu_number>(current_cpu()), pfa, pc, ret_frame->usp, error_code, ret_frame->psr); return false; } if (EXPECT_FALSE(Thread::is_debug_exception(error_code, true))) return 0; Thread *t = current_thread(); // Pagefault in user mode if (PF::is_usermode_error(error_code)) { // PFs in the kern_lib_page are always write PFs due to rollbacks and // insn decoding if (EXPECT_FALSE((pc & Kmem::Kern_lib_base) == Kmem::Kern_lib_base)) error_code |= (1UL << 6); if (t->vcpu_pagefault(pfa, error_code, pc)) return 1; t->state_del(Thread_cancel); Proc::sti(); return t->handle_page_fault(pfa, error_code, pc, ret_frame); } // or interrupts were enabled else if (!(ret_frame->psr & Proc::Status_preempt_disabled)) Proc::sti(); // Pagefault in kernel mode and interrupts were disabled else { // page fault in kernel memory region, not present, but mapping exists if (Kmem::is_kmem_page_fault(pfa, error_code)) { // We've interrupted a context in the kernel with disabled interrupts, // the page fault address is in the kernel region, the error code is // "not mapped" (as opposed to "access error"), and the region is // actually valid (that is, mapped in Kmem's shared page directory, // just not in the currently active page directory) // Remain cli'd !!! } else if (!Kmem::is_kmem_page_fault(pfa, error_code)) { // No error -- just enable interrupts. Proc::sti(); } else { // Error: We interrupted a cli'd kernel context touching kernel space if (!Thread::log_page_fault()) printf("*P[%lx,%lx,%lx] ", pfa, error_code, pc); kdb_ke("page fault in cli mode"); } } // cache operations we carry out for user space might cause PFs, we just // ignore those if (EXPECT_FALSE(t->is_ignore_mem_op_in_progress())) { t->set_kernel_mem_op_hit(); ret_frame->pc += 4; return 1; } return t->handle_page_fault(pfa, error_code, pc, ret_frame); } void slowtrap_entry(Trap_state *ts) { if (0) printf("Trap: pfa=%08lx pc=%08lx err=%08lx psr=%lx\n", ts->pf_address, ts->pc, ts->error_code, ts->psr); Thread *t = current_thread(); LOG_TRAP; if (Config::Support_arm_linux_cache_API) { if ( ts->hsr().ec() == 0x11 && ts->r[7] == 0xf0002) { if (ts->r[2] == 0) Mem_op::arm_mem_cache_maint(Mem_op::Op_cache_coherent, (void *)ts->r[0], (void *)ts->r[1]); ts->r[0] = 0; return; } } if (t->check_and_handle_coproc_faults(ts)) return; if (Thread::is_debug_exception(ts->error_code)) { Thread::handle_debug_exception(ts); return; } // send exception IPC if requested if (t->send_exception(ts)) return; t->halt(); } }; PUBLIC static inline NEEDS[Thread::call_nested_trap_handler] void Thread::handle_debug_exception(Trap_state *ts) { call_nested_trap_handler(ts); } IMPLEMENT inline bool Thread::pagein_tcb_request(Return_frame *regs) { //if ((*(Mword*)regs->pc & 0xfff00fff ) == 0xe5900000) if (*(Mword*)regs->pc == 0xe59ee000) { // printf("TCBR: %08lx\n", *(Mword*)regs->pc); // skip faulting instruction regs->pc += 4; // tell program that a pagefault occured we cannot handle regs->psr |= 0x40000000; // set zero flag in psr regs->km_lr = 0; return true; } return false; } //--------------------------------------------------------------------------- IMPLEMENTATION [arm && !arm_lpae]: PUBLIC static inline Mword Thread::is_debug_exception(Mword error_code, bool just_type = false) { if (just_type) return (error_code & 0x4f) == 2; Mword e = error_code & 0x00f0004f; return e == 0x00300002 || e == 0x00400002; } //--------------------------------------------------------------------------- IMPLEMENTATION [arm && arm_lpae]: PUBLIC static inline Mword Thread::is_debug_exception(Mword error_code, bool just_type = false) { if (just_type) return (error_code & 0x3f) == 0x22; Mword e = error_code & 0x00f0003f; return e == 0x00300022 || e == 0x00400022; } //--------------------------------------------------------------------------- IMPLEMENTATION [arm]: #include "trap_state.h" /** Constructor. @param space the address space @param id user-visible thread ID of the sender @param init_prio initial priority @param mcp thread's maximum controlled priority @post state() != 0 */ IMPLEMENT Thread::Thread() : Sender(0), // select optimized version of constructor _pager(Thread_ptr::Invalid), _exc_handler(Thread_ptr::Invalid), _del_observer(0) { assert (state(false) == 0); inc_ref(); _space.space(Kernel_task::kernel_task()); if (Config::Stack_depth) std::memset((char*)this + sizeof(Thread), '5', Thread::Size-sizeof(Thread)-64); // set a magic value -- we use it later to verify the stack hasn't // been overrun _magic = magic; _recover_jmpbuf = 0; _timeout = 0; _in_exception = false; *reinterpret_cast<void(**)()> (--_kernel_sp) = user_invoke; // clear out user regs that can be returned from the thread_ex_regs // system call to prevent covert channel Entry_frame *r = regs(); r->sp(0); r->ip(0); r->psr = Proc::Status_mode_user; //r->psr = 0x1f; //Proc::Status_mode_user; state_add_dirty(Thread_dead, false); // ok, we're ready to go! } IMPLEMENT inline Mword Thread::user_sp() const { return regs()->sp(); } IMPLEMENT inline void Thread::user_sp(Mword sp) { return regs()->sp(sp); } IMPLEMENT inline NEEDS[Thread::exception_triggered] Mword Thread::user_ip() const { return exception_triggered() ? _exc_cont.ip() : regs()->ip(); } IMPLEMENT inline Mword Thread::user_flags() const { return 0; } IMPLEMENT inline NEEDS[Thread::exception_triggered] void Thread::user_ip(Mword ip) { if (exception_triggered()) _exc_cont.ip(ip); else { Entry_frame *r = regs(); r->ip(ip); static_cast<Trap_state*>(static_cast<Return_frame*>(r))->sanitize_user_state(); } } PUBLIC inline NEEDS ["trap_state.h"] int Thread::send_exception_arch(Trap_state *) { // nothing to tweak on ARM return 1; } PRIVATE inline void Thread::save_fpu_state_to_utcb(Trap_state *ts, Utcb *u) { char *esu = (char *)&u->values[21]; Fpu::save_user_exception_state(state() & Thread_fpu_owner, fpu_state(), ts, (Fpu::Exception_state_user *)esu); } PRIVATE inline bool Thread::invalid_ipc_buffer(void const *a) { if (!_in_exception) return Mem_layout::in_kernel(((Address)a & Config::SUPERPAGE_MASK) + Config::SUPERPAGE_SIZE - 1); return false; } PROTECTED inline int Thread::do_trigger_exception(Entry_frame *r, void *ret_handler) { if (!_exc_cont.valid()) { _exc_cont.activate(r, ret_handler); return 1; } return 0; } PRIVATE static inline NEEDS[Thread::get_ts_tpidruro] bool FIASCO_WARN_RESULT Thread::copy_utcb_to_ts(L4_msg_tag const &tag, Thread *snd, Thread *rcv, L4_fpage::Rights rights) { Trap_state *ts = (Trap_state*)rcv->_utcb_handler; Utcb *snd_utcb = snd->utcb().access(); Mword s = tag.words(); if (EXPECT_FALSE(rcv->exception_triggered())) { // triggered exception pending Mem::memcpy_mwords (ts, snd_utcb->values, s > 16 ? 16 : s); if (EXPECT_TRUE(s > 20)) { Return_frame rf = *reinterpret_cast<Return_frame const *>((char const *)&snd_utcb->values[16]); rcv->sanitize_user_state(static_cast<Trap_state*>(&rf)); rcv->_exc_cont.set(ts, &rf); } } else { Mem::memcpy_mwords (ts, snd_utcb->values, s > 19 ? 19 : s); if (EXPECT_TRUE(s > 19)) ts->pc = snd_utcb->values[19]; if (EXPECT_TRUE(s > 20)) { // sanitize processor mode ts->psr = snd_utcb->values[20]; rcv->sanitize_user_state(ts); } } if (tag.transfer_fpu() && (rights & L4_fpage::Rights::W())) snd->transfer_fpu(rcv); if ((tag.flags() & 0x8000) && (rights & L4_fpage::Rights::W())) rcv->utcb().access()->user[2] = snd_utcb->values[25]; rcv->get_ts_tpidruro(ts); bool ret = transfer_msg_items(tag, snd, snd_utcb, rcv, rcv->utcb().access(), rights); rcv->state_del(Thread_in_exception); return ret; } PRIVATE static inline NEEDS[Thread::save_fpu_state_to_utcb, Thread::set_ts_tpidruro] bool FIASCO_WARN_RESULT Thread::copy_ts_to_utcb(L4_msg_tag const &, Thread *snd, Thread *rcv, L4_fpage::Rights rights) { Trap_state *ts = (Trap_state*)snd->_utcb_handler; { auto guard = lock_guard(cpu_lock); Utcb *rcv_utcb = rcv->utcb().access(); snd->set_ts_tpidruro(ts); Mem::memcpy_mwords(rcv_utcb->values, ts, 16); Continuation::User_return_frame *d = reinterpret_cast<Continuation::User_return_frame *>((char*)&rcv_utcb->values[16]); snd->_exc_cont.get(d, ts); if (EXPECT_TRUE(!snd->exception_triggered())) { rcv_utcb->values[19] = ts->pc; rcv_utcb->values[20] = ts->psr; } if (rcv_utcb->inherit_fpu() && (rights & L4_fpage::Rights::W())) { snd->save_fpu_state_to_utcb(ts, rcv_utcb); snd->transfer_fpu(rcv); } } return true; } PROTECTED inline NEEDS[Thread::set_tpidruro] L4_msg_tag Thread::invoke_arch(L4_msg_tag tag, Utcb *utcb) { switch (utcb->values[0] & Opcode_mask) { case Op_set_tpidruro_arm: return set_tpidruro(tag, utcb); default: return commit_result(-L4_err::ENosys); } } PROTECTED inline int Thread::sys_control_arch(Utcb *) { return 0; } PUBLIC static inline bool Thread::condition_valid(unsigned char cond, Unsigned32 psr) { // Matrix of instruction conditions and PSR flags, // index into the table is the condition from insn Unsigned16 v[16] = { 0xf0f0, 0x0f0f, 0xcccc, 0x3333, 0xff00, 0x00ff, 0xaaaa, 0x5555, 0x0c0c, 0xf3f3, 0xaa55, 0x55aa, 0x0a05, 0xf5fa, 0xffff, 0xffff }; return (v[cond] >> (psr >> 28)) & 1; } // ------------------------------------------------------------------------ IMPLEMENTATION [arm && armv6plus]: PROTECTED inline void Thread::vcpu_resume_user_arch() { // just an experiment for now, we cannot really take the // user-writable register because user-land might already use it asm volatile("mcr p15, 0, %0, c13, c0, 2" : : "r" (utcb().access(true)->values[25]) : "memory"); } PRIVATE inline L4_msg_tag Thread::set_tpidruro(L4_msg_tag tag, Utcb *utcb) { if (EXPECT_FALSE(tag.words() < 2)) return commit_result(-L4_err::EInval); _tpidruro = utcb->values[1]; if (EXPECT_FALSE(state() & Thread_vcpu_enabled)) arch_update_vcpu_state(vcpu_state().access()); if (this == current_thread()) load_tpidruro(); return commit_result(0); } PRIVATE inline void Thread::get_ts_tpidruro(Trap_state *ts) { _tpidruro = ts->tpidruro; if (this == current_thread()) load_tpidruro(); } PRIVATE inline void Thread::set_ts_tpidruro(Trap_state *ts) { ts->tpidruro = _tpidruro; } // ------------------------------------------------------------------------ IMPLEMENTATION [arm && !armv6plus]: PROTECTED inline void Thread::vcpu_resume_user_arch() {} PRIVATE inline L4_msg_tag Thread::set_tpidruro(L4_msg_tag, Utcb *) { return commit_result(-L4_err::EInval); } PRIVATE inline void Thread::get_ts_tpidruro(Trap_state *) {} PRIVATE inline void Thread::set_ts_tpidruro(Trap_state *) {} //----------------------------------------------------------------------------- IMPLEMENTATION [mp]: #include "ipi.h" #include "irq_mgr.h" EXTENSION class Thread { public: static void kern_kdebug_ipi_entry() asm("kern_kdebug_ipi_entry"); }; class Thread_remote_rq_irq : public Irq_base { public: // we assume IPIs to be top level, no upstream IRQ chips void handle(Upstream_irq const *) { Thread::handle_remote_requests_irq(); } Thread_remote_rq_irq() { set_hit(&handler_wrapper<Thread_remote_rq_irq>); unmask(); } void switch_mode(bool) {} }; class Thread_glbl_remote_rq_irq : public Irq_base { public: // we assume IPIs to be top level, no upstream IRQ chips void handle(Upstream_irq const *) { Thread::handle_global_remote_requests_irq(); } Thread_glbl_remote_rq_irq() { set_hit(&handler_wrapper<Thread_glbl_remote_rq_irq>); unmask(); } void switch_mode(bool) {} }; class Thread_debug_ipi : public Irq_base { public: // we assume IPIs to be top level, no upstream IRQ chips void handle(Upstream_irq const *) { Ipi::eoi(Ipi::Debug, current_cpu()); Thread::kern_kdebug_ipi_entry(); } Thread_debug_ipi() { set_hit(&handler_wrapper<Thread_debug_ipi>); unmask(); } void switch_mode(bool) {} }; class Thread_timer_tick_ipi : public Irq_base { public: void handle(Upstream_irq const *ui) { //Timer_tick *self = nonull_static_cast<Timer_tick *>(_s); //self->ack(); ui->ack(); //self->log_timer(); current_thread()->handle_timer_interrupt(); } Thread_timer_tick_ipi() { set_hit(&handler_wrapper<Thread_timer_tick_ipi>); } void switch_mode(bool) {} }; //----------------------------------------------------------------------------- IMPLEMENTATION [mp && !irregular_gic]: class Arm_ipis { public: Arm_ipis() { check(Irq_mgr::mgr->alloc(&remote_rq_ipi, Ipi::Request)); check(Irq_mgr::mgr->alloc(&glbl_remote_rq_ipi, Ipi::Global_request)); check(Irq_mgr::mgr->alloc(&debug_ipi, Ipi::Debug)); check(Irq_mgr::mgr->alloc(&timer_ipi, Ipi::Timer)); } Thread_remote_rq_irq remote_rq_ipi; Thread_glbl_remote_rq_irq glbl_remote_rq_ipi; Thread_debug_ipi debug_ipi; Thread_timer_tick_ipi timer_ipi; }; static Arm_ipis _arm_ipis; //----------------------------------------------------------------------------- IMPLEMENTATION [arm && !fpu]: PUBLIC inline bool Thread::check_and_handle_coproc_faults(Trap_state *) { return false; } //----------------------------------------------------------------------------- IMPLEMENTATION [arm && fpu]: PUBLIC inline bool Thread::check_and_handle_coproc_faults(Trap_state *ts) { if (!ts->exception_is_undef_insn()) return false; Unsigned32 opcode; if (ts->psr & Proc::Status_thumb) { Unsigned16 v = Thread::peek_user((Unsigned16 *)(ts->pc - 2), this); if (EXPECT_FALSE(Thread::check_for_kernel_mem_access_pf(ts, this))) return true; if ((v >> 11) <= 0x1c) return false; opcode = (v << 16) | Thread::peek_user((Unsigned16 *)ts->pc, this); } else opcode = Thread::peek_user((Unsigned32 *)(ts->pc - 4), this); if (EXPECT_FALSE(Thread::check_for_kernel_mem_access_pf(ts, this))) return true; if (ts->psr & Proc::Status_thumb) { if ( (opcode & 0xef000000) == 0xef000000 // A6.3.18 || (opcode & 0xff100000) == 0xf9000000) return Thread::handle_fpu_trap(opcode, ts); } else { if ( (opcode & 0xfe000000) == 0xf2000000 // A5.7.1 || (opcode & 0xff100000) == 0xf4000000) return Thread::handle_fpu_trap(opcode, ts); } if ((opcode & 0x0c000e00) == 0x0c000a00) return Thread::handle_fpu_trap(opcode, ts); return false; } PUBLIC static bool Thread::handle_fpu_trap(Unsigned32 opcode, Trap_state *ts) { if (!condition_valid(opcode >> 28, ts->psr)) { // FPU insns are 32bit, even for thumb if (ts->psr & Proc::Status_thumb) ts->pc += 2; return true; } if (Fpu::is_enabled()) { assert(Fpu::fpu.current().owner() == current()); if (Fpu::is_emu_insn(opcode)) return Fpu::emulate_insns(opcode, ts); ts->hsr().ec() = 0; // tag fpu undef insn } else if (current_thread()->switchin_fpu()) { if (Fpu::is_emu_insn(opcode)) return Fpu::emulate_insns(opcode, ts); ts->pc -= (ts->psr & Proc::Status_thumb) ? 2 : 4; return true; } else { ts->hsr().ec() = 0x07; ts->hsr().cond() = opcode >> 28; ts->hsr().cv() = 1; ts->hsr().cpt_cpnr() = 10; } return false; } //----------------------------------------------------------------------------- IMPLEMENTATION [arm && !hyp]: PUBLIC static inline template<typename T> T Thread::peek_user(T const *adr, Context *c) { T v; c->set_ignore_mem_op_in_progress(true); v = *adr; c->set_ignore_mem_op_in_progress(false); return v; }
gpl-2.0
santod/android_kernel_htc_m8
arch/arm/mach-msm/htc_battery_8960.c
2
81429
/* arch/arm/mach-msm/htc_battery_8960.c * * Copyright (C) 2011 HTC Corporation. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/init.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/err.h> #include <linux/platform_device.h> #include <linux/debugfs.h> #include <linux/wakelock.h> #include <linux/gpio.h> #include <mach/board.h> #include <asm/mach-types.h> #include <mach/devices_cmdline.h> #include <mach/devices_dtb.h> #include <mach/htc_battery_core.h> #include <mach/htc_battery_8960.h> #include <linux/workqueue.h> #include <linux/slab.h> #include <linux/reboot.h> #include <linux/miscdevice.h> #include <linux/pmic8058-xoadc.h> #include <mach/mpp.h> #include <linux/android_alarm.h> #include <linux/suspend.h> #if defined(CONFIG_FB) #include <linux/notifier.h> #include <linux/fb.h> #elif defined(CONFIG_HAS_EARLYSUSPEND) #include <linux/earlysuspend.h> #endif #include <mach/htc_gauge.h> #include <mach/htc_charger.h> #include <mach/htc_battery_cell.h> #define HTC_BATT_CHG_DIS_BIT_EOC (1) #define HTC_BATT_CHG_DIS_BIT_ID (1<<1) #define HTC_BATT_CHG_DIS_BIT_TMP (1<<2) #define HTC_BATT_CHG_DIS_BIT_OVP (1<<3) #define HTC_BATT_CHG_DIS_BIT_TMR (1<<4) #define HTC_BATT_CHG_DIS_BIT_MFG (1<<5) #define HTC_BATT_CHG_DIS_BIT_USR_TMR (1<<6) #define HTC_BATT_CHG_DIS_BIT_STOP_SWOLLEN (1<<7) static int chg_dis_reason; static int chg_dis_active_mask = HTC_BATT_CHG_DIS_BIT_ID | HTC_BATT_CHG_DIS_BIT_MFG | HTC_BATT_CHG_DIS_BIT_STOP_SWOLLEN | HTC_BATT_CHG_DIS_BIT_TMP | HTC_BATT_CHG_DIS_BIT_TMR | HTC_BATT_CHG_DIS_BIT_USR_TMR; static int chg_dis_control_mask = HTC_BATT_CHG_DIS_BIT_ID | HTC_BATT_CHG_DIS_BIT_MFG | HTC_BATT_CHG_DIS_BIT_STOP_SWOLLEN | HTC_BATT_CHG_DIS_BIT_USR_TMR; #define HTC_BATT_PWRSRC_DIS_BIT_MFG (1) #define HTC_BATT_PWRSRC_DIS_BIT_API (1<<1) static int pwrsrc_dis_reason; static int need_sw_stimer; static unsigned long sw_stimer_counter; static int sw_stimer_fault; #define HTC_SAFETY_TIME_16_HR_IN_MS (16*60*60*1000) static int chg_dis_user_timer; static int charger_dis_temp_fault; static int charger_under_rating; static int charger_safety_timeout; static int batt_full_eoc_stop; static int chg_limit_reason; static int chg_limit_active_mask; #ifdef CONFIG_DUTY_CYCLE_LIMIT static int chg_limit_timer_sub_mask; #endif #define SUSPEND_HIGHFREQ_CHECK_BIT_TALK (1) #define SUSPEND_HIGHFREQ_CHECK_BIT_SEARCH (1<<1) #define SUSPEND_HIGHFREQ_CHECK_BIT_MUSIC (1<<3) static int suspend_highfreq_check_reason; #define CONTEXT_STATE_BIT_TALK (1) #define CONTEXT_STATE_BIT_SEARCH (1<<1) #define CONTEXT_STATE_BIT_NAVIGATION (1<<2) #define CONTEXT_STATE_BIT_MUSIC (1<<3) static int context_state; #define STATE_WORKQUEUE_PENDING (1) #define STATE_EARLY_SUSPEND (1<<1) #define STATE_PREPARE (1<<2) #define STATE_SUSPEND (1<<3) #define BATT_SUSPEND_CHECK_TIME (3600) #define BATT_SUSPEND_HIGHFREQ_CHECK_TIME (300) #define BATT_TIMER_CHECK_TIME (360) #define BATT_TIMER_UPDATE_TIME (60) #define HTC_EXT_UNKNOWN_USB_CHARGER (1<<0) #define HTC_EXT_CHG_UNDER_RATING (1<<1) #define HTC_EXT_CHG_SAFTY_TIMEOUT (1<<2) #define HTC_EXT_CHG_FULL_EOC_STOP (1<<3) #ifdef CONFIG_ARCH_MSM8X60_LTE #endif static void mbat_in_func(struct work_struct *work); struct delayed_work mbat_in_struct; static struct kset *htc_batt_kset; #define BATT_REMOVED_SHUTDOWN_DELAY_MS (50) #define BATT_CRITICAL_VOL_SHUTDOWN_DELAY_MS (1000) static void shutdown_worker(struct work_struct *work); struct delayed_work shutdown_work; #define BATT_CRITICAL_LOW_VOLTAGE (3000) static int critical_shutdown = 0; static int critical_alarm_level; static int critical_alarm_level_set; struct wake_lock voltage_alarm_wake_lock; struct wake_lock batt_shutdown_wake_lock; #ifdef CONFIG_HAS_EARLYSUSPEND static struct early_suspend early_suspend; #endif #ifdef CONFIG_HTC_BATT_ALARM static int screen_state; static int ac_suspend_flag; #endif static int htc_ext_5v_output_now; static int htc_ext_5v_output_old; static int latest_chg_src = CHARGER_BATTERY; struct htc_battery_info { int device_id; struct mutex info_lock; spinlock_t batt_lock; int is_open; int critical_low_voltage_mv; int *critical_alarm_vol_ptr; int critical_alarm_vol_cols; int force_shutdown_batt_vol; int overload_vol_thr_mv; int overload_curr_thr_ma; int smooth_chg_full_delay_min; int decreased_batt_level_check; struct kobject batt_timer_kobj; struct kobject batt_cable_kobj; struct wake_lock vbus_wake_lock; char debug_log[DEBUG_LOG_LENGTH]; struct battery_info_reply rep; struct mpp_config_data *mpp_config; struct battery_adc_reply adc_data; int adc_vref[ADC_REPLY_ARRAY_SIZE]; int guage_driver; int charger; struct htc_gauge *igauge; struct htc_charger *icharger; struct htc_battery_cell *bcell; int state; unsigned int htc_extension; #if defined(CONFIG_FB) struct notifier_block fb_notif; struct workqueue_struct *batt_fb_wq; struct delayed_work work_fb; #endif }; static struct htc_battery_info htc_batt_info; struct htc_battery_timer { struct mutex schedule_lock; unsigned long batt_system_jiffies; unsigned long batt_suspend_ms; unsigned long total_time_ms; unsigned int batt_alarm_status; #ifdef CONFIG_HTC_BATT_ALARM unsigned int batt_critical_alarm_counter; #endif unsigned int batt_alarm_enabled; unsigned int alarm_timer_flag; unsigned int time_out; struct work_struct batt_work; struct delayed_work unknown_usb_detect_work; struct alarm batt_check_wakeup_alarm; struct timer_list batt_timer; struct workqueue_struct *batt_wq; struct wake_lock battery_lock; struct wake_lock unknown_usb_detect_lock; }; static struct htc_battery_timer htc_batt_timer; struct mutex cable_notifier_lock; static void cable_status_notifier_func(int online); static struct t_cable_status_notifier cable_status_notifier = { .name = "htc_battery_8960", .func = cable_status_notifier_func, }; static int htc_battery_initial; static int htc_full_level_flag; static int htc_battery_set_charging(int ctl); #ifdef CONFIG_HTC_BATT_ALARM static int battery_vol_alarm_mode; static struct battery_vol_alarm alarm_data; struct mutex batt_set_alarm_lock; #endif #if defined(CONFIG_FB) static int fb_notifier_callback(struct notifier_block *self, unsigned long event, void *data); #endif struct dec_level_by_current_ua { int threshold_ua; int dec_level; }; static struct dec_level_by_current_ua dec_level_curr_table[] = { {900000, 2}, {600000, 4}, {0, 6}, }; static const int DEC_LEVEL_CURR_TABLE_SIZE = sizeof(dec_level_curr_table) / sizeof (dec_level_curr_table[0]); #ifdef CONFIG_DUTY_CYCLE_LIMIT enum { LIMIT_CHG_TIMER_STATE_NONE = 0, LIMIT_CHG_TIMER_STATE_ON = 1, LIMIT_CHG_TIMER_STATE_OFF = 2, }; static uint limit_chg_timer_state = 0; struct delayed_work limit_chg_timer_work; static int limit_charge_timer_ma = 0; module_param(limit_charge_timer_ma, int, 0644); static int limit_charge_timer_on = 0; module_param(limit_charge_timer_on, int, 0644); static int limit_charge_timer_off = 0; module_param(limit_charge_timer_off, int, 0644); #endif int htc_gauge_get_battery_voltage(int *result) { if (htc_batt_info.igauge && htc_batt_info.igauge->get_battery_voltage) return htc_batt_info.igauge->get_battery_voltage(result); pr_warn("[BATT] interface doesn't exist\n"); return -EINVAL; } EXPORT_SYMBOL(htc_gauge_get_battery_voltage); int htc_gauge_set_chg_ovp(int is_ovp) { if (htc_batt_info.igauge && htc_batt_info.igauge->set_chg_ovp) return htc_batt_info.igauge->set_chg_ovp(is_ovp); pr_warn("[BATT] interface doesn't exist\n"); return -EINVAL; } EXPORT_SYMBOL(htc_gauge_set_chg_ovp); int htc_is_wireless_charger(void) { if (htc_battery_initial) return (htc_batt_info.rep.charging_source == CHARGER_WIRELESS) ? 1 : 0; else return -1; } int htc_batt_schedule_batt_info_update(void) { if (htc_batt_info.state & STATE_WORKQUEUE_PENDING) { htc_batt_info.state &= ~STATE_WORKQUEUE_PENDING; pr_debug("[BATT] %s(): Clear flag, htc_batt_info.state=0x%x\n", __func__, htc_batt_info.state); } wake_lock(&htc_batt_timer.battery_lock); queue_work(htc_batt_timer.batt_wq, &htc_batt_timer.batt_work); return 0; } static void batt_lower_voltage_alarm_handler(int status) { wake_lock(&voltage_alarm_wake_lock); if (status) { if (htc_batt_info.igauge->enable_lower_voltage_alarm) htc_batt_info.igauge->enable_lower_voltage_alarm(0); BATT_LOG("voltage_alarm level=%d (%d mV) triggered.", critical_alarm_level, htc_batt_info.critical_alarm_vol_ptr[critical_alarm_level]); if (critical_alarm_level == 0) critical_shutdown = 1; critical_alarm_level--; htc_batt_schedule_batt_info_update(); } else { pr_info("[BATT] voltage_alarm level=%d (%d mV) raised back.\n", critical_alarm_level, htc_batt_info.critical_alarm_vol_ptr[critical_alarm_level]); } wake_unlock(&voltage_alarm_wake_lock); } #define UNKNOWN_USB_DETECT_DELAY_MS (5000) static void unknown_usb_detect_worker(struct work_struct *work) { mutex_lock(&cable_notifier_lock); pr_info("[BATT] %s\n", __func__); if (latest_chg_src == CHARGER_DETECTING) { htc_charger_event_notify(HTC_CHARGER_EVENT_SRC_UNKNOWN_USB); } mutex_unlock(&cable_notifier_lock); wake_unlock(&htc_batt_timer.unknown_usb_detect_lock); } int htc_gauge_event_notify(enum htc_gauge_event event) { pr_info("[BATT] %s gauge event=%d\n", __func__, event); switch (event) { case HTC_GAUGE_EVENT_READY: if (!htc_batt_info.igauge) { pr_err("[BATT]err: htc_gauge is not hooked.\n"); break; } mutex_lock(&htc_batt_info.info_lock); htc_batt_info.igauge->ready = 1; if (htc_batt_info.icharger && htc_batt_info.icharger->ready && htc_batt_info.rep.cable_ready) htc_batt_info.rep.batt_state = 1; if (htc_batt_info.rep.batt_state) htc_batt_schedule_batt_info_update(); if (htc_batt_info.igauge && htc_batt_info.critical_alarm_vol_cols) { if (htc_batt_info.igauge->register_lower_voltage_alarm_notifier) htc_batt_info.igauge->register_lower_voltage_alarm_notifier( batt_lower_voltage_alarm_handler); if (htc_batt_info.igauge->set_lower_voltage_alarm_threshold) htc_batt_info.igauge->set_lower_voltage_alarm_threshold( htc_batt_info.critical_alarm_vol_ptr[critical_alarm_level]); if (htc_batt_info.igauge->enable_lower_voltage_alarm) htc_batt_info.igauge->enable_lower_voltage_alarm(1); } mutex_unlock(&htc_batt_info.info_lock); break; case HTC_GAUGE_EVENT_TEMP_ZONE_CHANGE: if (htc_batt_info.state & STATE_PREPARE) { htc_batt_info.state |= STATE_WORKQUEUE_PENDING; pr_info("[BATT] %s(): Skip due to htc_batt_info.state=0x%x\n", __func__, htc_batt_info.state); } else { pr_debug("[BATT] %s(): Run, htc_batt_info.state=0x%x\n", __func__, htc_batt_info.state); htc_batt_schedule_batt_info_update(); } break; case HTC_GAUGE_EVENT_EOC: case HTC_GAUGE_EVENT_OVERLOAD: htc_batt_schedule_batt_info_update(); break; case HTC_GAUGE_EVENT_LOW_VOLTAGE_ALARM: batt_lower_voltage_alarm_handler(1); break; case HTC_GAUGE_EVENT_BATT_REMOVED: if (!(get_kernel_flag() & KERNEL_FLAG_TEST_PWR_SUPPLY)) { wake_lock(&batt_shutdown_wake_lock); schedule_delayed_work(&shutdown_work, msecs_to_jiffies(BATT_REMOVED_SHUTDOWN_DELAY_MS)); } break; case HTC_GAUGE_EVENT_EOC_STOP_CHG: sw_stimer_counter = 0; htc_batt_schedule_batt_info_update(); break; default: pr_info("[BATT] unsupported gauge event(%d)\n", event); break; } return 0; } int htc_charger_event_notify(enum htc_charger_event event) { pr_info("[BATT] %s charger event=%d\n", __func__, event); switch (event) { case HTC_CHARGER_EVENT_BATT_UEVENT_CHANGE : htc_battery_update_batt_uevent(); break; case HTC_CHARGER_EVENT_VBUS_IN: break; case HTC_CHARGER_EVENT_SRC_INTERNAL: htc_ext_5v_output_now = 1; BATT_LOG("%s htc_ext_5v_output_now:%d", __func__, htc_ext_5v_output_now); htc_batt_schedule_batt_info_update(); break; case HTC_CHARGER_EVENT_SRC_CLEAR: latest_chg_src = CHARGER_BATTERY; htc_ext_5v_output_now = 0; BATT_LOG("%s htc_ext_5v_output_now:%d", __func__, htc_ext_5v_output_now); htc_batt_schedule_batt_info_update(); break; case HTC_CHARGER_EVENT_VBUS_OUT: case HTC_CHARGER_EVENT_SRC_NONE: latest_chg_src = CHARGER_BATTERY; if (delayed_work_pending(&htc_batt_timer.unknown_usb_detect_work)) { cancel_delayed_work_sync(&htc_batt_timer.unknown_usb_detect_work); wake_unlock(&htc_batt_timer.unknown_usb_detect_lock); } htc_batt_schedule_batt_info_update(); break; case HTC_CHARGER_EVENT_SRC_USB: latest_chg_src = CHARGER_USB; htc_batt_schedule_batt_info_update(); break; case HTC_CHARGER_EVENT_SRC_AC: latest_chg_src = CHARGER_AC; htc_batt_schedule_batt_info_update(); break; case HTC_CHARGER_EVENT_SRC_WIRELESS: latest_chg_src = CHARGER_WIRELESS; htc_batt_schedule_batt_info_update(); break; case HTC_CHARGER_EVENT_SRC_DETECTING: latest_chg_src = CHARGER_DETECTING; htc_batt_schedule_batt_info_update(); wake_lock(&htc_batt_timer.unknown_usb_detect_lock); queue_delayed_work(htc_batt_timer.batt_wq, &htc_batt_timer.unknown_usb_detect_work, round_jiffies_relative(msecs_to_jiffies( UNKNOWN_USB_DETECT_DELAY_MS))); break; case HTC_CHARGER_EVENT_SRC_UNKNOWN_USB: if (get_kernel_flag() & KERNEL_FLAG_ENABLE_FAST_CHARGE) latest_chg_src = CHARGER_AC; else latest_chg_src = CHARGER_UNKNOWN_USB; htc_batt_schedule_batt_info_update(); break; case HTC_CHARGER_EVENT_OVP: case HTC_CHARGER_EVENT_OVP_RESOLVE: case HTC_CHARGER_EVENT_SRC_UNDER_RATING: case HTC_CHARGER_EVENT_SAFETY_TIMEOUT: htc_batt_schedule_batt_info_update(); break; case HTC_CHARGER_EVENT_SRC_MHL_AC: latest_chg_src = CHARGER_MHL_AC; htc_batt_schedule_batt_info_update(); break; case HTC_CHARGER_EVENT_READY: if (!htc_batt_info.icharger) { pr_err("[BATT]err: htc_charger is not hooked.\n"); break; } mutex_lock(&htc_batt_info.info_lock); htc_batt_info.icharger->ready = 1; if (htc_batt_info.igauge && htc_batt_info.igauge->ready && htc_batt_info.rep.cable_ready) htc_batt_info.rep.batt_state = 1; if (htc_batt_info.rep.batt_state) htc_batt_schedule_batt_info_update(); mutex_unlock(&htc_batt_info.info_lock); break; case HTC_CHARGER_EVENT_SRC_CABLE_INSERT_NOTIFY: latest_chg_src = CHARGER_NOTIFY; htc_batt_schedule_batt_info_update(); break; default: pr_info("[BATT] unsupported charger event(%d)\n", event); break; } return 0; } #if 0 #ifdef CONFIG_HTC_BATT_ALARM static int batt_set_voltage_alarm(unsigned long lower_threshold, unsigned long upper_threshold) #else static int batt_alarm_config(unsigned long lower_threshold, unsigned long upper_threshold) #endif { int rc = 0; BATT_LOG("%s(lw = %lu, up = %lu)", __func__, lower_threshold, upper_threshold); rc = pm8058_batt_alarm_state_set(0, 0); if (rc) { BATT_ERR("state_set disabled failed, rc=%d", rc); goto done; } rc = pm8058_batt_alarm_threshold_set(lower_threshold, upper_threshold); if (rc) { BATT_ERR("threshold_set failed, rc=%d!", rc); goto done; } #ifdef CONFIG_HTC_BATT_ALARM rc = pm8058_batt_alarm_state_set(1, 0); if (rc) { BATT_ERR("state_set enabled failed, rc=%d", rc); goto done; } #endif done: return rc; } #ifdef CONFIG_HTC_BATT_ALARM static int batt_clear_voltage_alarm(void) { int rc = pm8058_batt_alarm_state_set(0, 0); BATT_LOG("disable voltage alarm"); if (rc) BATT_ERR("state_set disabled failed, rc=%d", rc); return rc; } static int batt_set_voltage_alarm_mode(int mode) { int rc = 0; BATT_LOG("%s , mode:%d\n", __func__, mode); mutex_lock(&batt_set_alarm_lock); switch (mode) { case BATT_ALARM_DISABLE_MODE: rc = batt_clear_voltage_alarm(); break; case BATT_ALARM_CRITICAL_MODE: rc = batt_set_voltage_alarm(BATT_CRITICAL_LOW_VOLTAGE, alarm_data.upper_threshold); break; default: case BATT_ALARM_NORMAL_MODE: rc = batt_set_voltage_alarm(alarm_data.lower_threshold, alarm_data.upper_threshold); break; } if (!rc) battery_vol_alarm_mode = mode; else { battery_vol_alarm_mode = BATT_ALARM_DISABLE_MODE; batt_clear_voltage_alarm(); } mutex_unlock(&batt_set_alarm_lock); return rc; } #endif static int battery_alarm_notifier_func(struct notifier_block *nfb, unsigned long value, void *data); static struct notifier_block battery_alarm_notifier = { .notifier_call = battery_alarm_notifier_func, }; static int battery_alarm_notifier_func(struct notifier_block *nfb, unsigned long status, void *data) { #ifdef CONFIG_HTC_BATT_ALARM BATT_LOG("%s \n", __func__); if (battery_vol_alarm_mode == BATT_ALARM_CRITICAL_MODE) { BATT_LOG("%s(): CRITICAL_MODE counter = %d", __func__, htc_batt_timer.batt_critical_alarm_counter + 1); if (++htc_batt_timer.batt_critical_alarm_counter >= 3) { BATT_LOG("%s: 3V voltage alarm is triggered.", __func__); htc_batt_info.rep.level = 1; htc_battery_core_update_changed(); } batt_set_voltage_alarm_mode(BATT_ALARM_CRITICAL_MODE); } else if (battery_vol_alarm_mode == BATT_ALARM_NORMAL_MODE) { htc_batt_timer.batt_alarm_status++; BATT_LOG("%s: NORMAL_MODE batt alarm status = %u", __func__, htc_batt_timer.batt_alarm_status); } else { BATT_ERR("%s:Warning: batt alarm triggerred in disable mode ", __func__); } #else htc_batt_timer.batt_alarm_status++; BATT_LOG("%s: batt alarm status %u", __func__, htc_batt_timer.batt_alarm_status); #endif return 0; } #endif #if 0 static void update_wake_lock(int status) { #ifdef CONFIG_HTC_BATT_ALARM if (status != CHARGER_BATTERY && !ac_suspend_flag) wake_lock(&htc_batt_info.vbus_wake_lock); else if (status == CHARGER_USB && ac_suspend_flag) wake_lock(&htc_batt_info.vbus_wake_lock); else wake_lock_timeout(&htc_batt_info.vbus_wake_lock, HZ * 5); #else if (status == CHARGER_USB) wake_lock(&htc_batt_info.vbus_wake_lock); else wake_lock_timeout(&htc_batt_info.vbus_wake_lock, HZ * 5); #endif } #endif static void cable_status_notifier_func(enum usb_connect_type online) { static int first_update = 1; mutex_lock(&cable_notifier_lock); htc_batt_info.rep.cable_ready = 1; if (htc_batt_info.igauge && htc_batt_info.icharger && !(htc_batt_info.rep.batt_state)) if(htc_batt_info.igauge->ready && htc_batt_info.icharger->ready) htc_batt_info.rep.batt_state = 1; BATT_LOG("%s(%d)", __func__, online); if (online == latest_chg_src && !first_update) { BATT_LOG("%s: charger type (%u) same return.", __func__, online); mutex_unlock(&cable_notifier_lock); return; } first_update = 0; switch (online) { case CONNECT_TYPE_USB: BATT_LOG("USB charger"); htc_charger_event_notify(HTC_CHARGER_EVENT_SRC_USB); break; case CONNECT_TYPE_AC: BATT_LOG("5V AC charger"); htc_charger_event_notify(HTC_CHARGER_EVENT_SRC_AC); break; case CONNECT_TYPE_WIRELESS: BATT_LOG("wireless charger"); htc_charger_event_notify(HTC_CHARGER_EVENT_SRC_WIRELESS); break; case CONNECT_TYPE_UNKNOWN: BATT_LOG("unknown type"); htc_charger_event_notify(HTC_CHARGER_EVENT_SRC_DETECTING); break; case CONNECT_TYPE_INTERNAL: BATT_LOG("delivers power to VBUS from battery (not supported)"); htc_charger_event_notify(HTC_CHARGER_EVENT_SRC_INTERNAL); break; case CONNECT_TYPE_CLEAR: BATT_LOG("stop 5V VBUS from battery (not supported)"); htc_charger_event_notify(HTC_CHARGER_EVENT_SRC_CLEAR); break; case CONNECT_TYPE_NONE: BATT_LOG("No cable exists"); htc_charger_event_notify(HTC_CHARGER_EVENT_SRC_NONE); break; case CONNECT_TYPE_MHL_AC: BATT_LOG("mhl_ac"); htc_charger_event_notify(HTC_CHARGER_EVENT_SRC_MHL_AC); break; case CONNECT_TYPE_NOTIFY: BATT_LOG("cable insert notify"); htc_charger_event_notify(HTC_CHARGER_EVENT_SRC_CABLE_INSERT_NOTIFY); break; default: BATT_LOG("unsupported connect_type=%d", online); htc_charger_event_notify(HTC_CHARGER_EVENT_SRC_NONE); break; } #if 0 htc_batt_timer.alarm_timer_flag = (unsigned int)htc_batt_info.rep.charging_source; update_wake_lock(htc_batt_info.rep.charging_source); #endif mutex_unlock(&cable_notifier_lock); } static int htc_battery_set_charging(int ctl) { int rc = 0; return rc; } struct mutex chg_limit_lock; static void set_limit_charge_with_reason(bool enable, int reason) { int prev_chg_limit_reason; #ifdef CONFIG_DUTY_CYCLE_LIMIT int chg_limit_current; #endif mutex_lock(&chg_limit_lock); prev_chg_limit_reason = chg_limit_reason; if (chg_limit_active_mask & reason) { if (enable) chg_limit_reason |= reason; else chg_limit_reason &= ~reason; if (prev_chg_limit_reason ^ chg_limit_reason) { BATT_LOG("chg_limit_reason:0x%x->0x%d", prev_chg_limit_reason, chg_limit_reason); if (!!prev_chg_limit_reason != !!chg_limit_reason && htc_batt_info.icharger && htc_batt_info.icharger->set_limit_charge_enable) { #ifdef CONFIG_DUTY_CYCLE_LIMIT chg_limit_current = limit_charge_timer_on != 0 ? limit_charge_timer_ma : 0; htc_batt_info.icharger->set_limit_charge_enable(chg_limit_reason, chg_limit_timer_sub_mask, chg_limit_current); #else htc_batt_info.icharger->set_limit_charge_enable(!!chg_limit_reason); #endif } } } mutex_unlock(&chg_limit_lock); } #ifdef CONFIG_DUTY_CYCLE_LIMIT static void limit_chg_timer_worker(struct work_struct *work) { mutex_lock(&chg_limit_lock); pr_info("%s: limit_chg_timer_state = %d\n", __func__, limit_chg_timer_state); switch (limit_chg_timer_state) { case LIMIT_CHG_TIMER_STATE_ON: if (limit_charge_timer_off) { limit_chg_timer_state = LIMIT_CHG_TIMER_STATE_OFF; schedule_delayed_work(&limit_chg_timer_work, round_jiffies_relative(msecs_to_jiffies (limit_charge_timer_off * 1000))); htc_batt_info.icharger->set_charger_enable(0); } break; case LIMIT_CHG_TIMER_STATE_OFF: if (limit_charge_timer_on) { limit_chg_timer_state = LIMIT_CHG_TIMER_STATE_ON; schedule_delayed_work(&limit_chg_timer_work, round_jiffies_relative(msecs_to_jiffies (limit_charge_timer_on * 1000))); } case LIMIT_CHG_TIMER_STATE_NONE: default: htc_batt_info.icharger->set_charger_enable(!!htc_batt_info.rep.charging_enabled); } mutex_unlock(&chg_limit_lock); } static void batt_update_limited_charge_timer(int charging_enabled) { bool is_schedule_timer = 0; if (limit_charge_timer_ma == 0 || limit_charge_timer_on == 0) return; mutex_lock(&chg_limit_lock); if ((charging_enabled != HTC_PWR_SOURCE_TYPE_BATT) && !!(chg_limit_reason & chg_limit_timer_sub_mask)) { if (limit_chg_timer_state == LIMIT_CHG_TIMER_STATE_NONE) { limit_chg_timer_state = LIMIT_CHG_TIMER_STATE_OFF; is_schedule_timer = 1; } } else if (limit_chg_timer_state != LIMIT_CHG_TIMER_STATE_NONE){ limit_chg_timer_state = LIMIT_CHG_TIMER_STATE_NONE; is_schedule_timer = 1; } if (is_schedule_timer) schedule_delayed_work(&limit_chg_timer_work, 0); mutex_unlock(&chg_limit_lock); } #endif static void __context_event_handler(enum batt_context_event event) { pr_info("[BATT] handle context event(%d)\n", event); switch (event) { case EVENT_TALK_START: set_limit_charge_with_reason(true, HTC_BATT_CHG_LIMIT_BIT_TALK); suspend_highfreq_check_reason |= SUSPEND_HIGHFREQ_CHECK_BIT_TALK; break; case EVENT_TALK_STOP: set_limit_charge_with_reason(false, HTC_BATT_CHG_LIMIT_BIT_TALK); suspend_highfreq_check_reason &= ~SUSPEND_HIGHFREQ_CHECK_BIT_TALK; break; case EVENT_NAVIGATION_START: set_limit_charge_with_reason(true, HTC_BATT_CHG_LIMIT_BIT_NAVI); break; case EVENT_NAVIGATION_STOP: set_limit_charge_with_reason(false, HTC_BATT_CHG_LIMIT_BIT_NAVI); break; case EVENT_NETWORK_SEARCH_START: suspend_highfreq_check_reason |= SUSPEND_HIGHFREQ_CHECK_BIT_SEARCH; break; case EVENT_NETWORK_SEARCH_STOP: suspend_highfreq_check_reason &= ~SUSPEND_HIGHFREQ_CHECK_BIT_SEARCH; break; case EVENT_MUSIC_START: suspend_highfreq_check_reason |= SUSPEND_HIGHFREQ_CHECK_BIT_MUSIC; break; case EVENT_MUSIC_STOP: suspend_highfreq_check_reason &= ~SUSPEND_HIGHFREQ_CHECK_BIT_MUSIC; break; default: pr_warn("unsupported context event (%d)\n", event); return; } htc_batt_schedule_batt_info_update(); } struct mutex context_event_handler_lock; static int htc_batt_context_event_handler(enum batt_context_event event) { int prev_context_state; mutex_lock(&context_event_handler_lock); prev_context_state = context_state; switch (event) { case EVENT_TALK_START: if (context_state & CONTEXT_STATE_BIT_TALK) goto exit; context_state |= CONTEXT_STATE_BIT_TALK; break; case EVENT_TALK_STOP: if (!(context_state & CONTEXT_STATE_BIT_TALK)) goto exit; context_state &= ~CONTEXT_STATE_BIT_TALK; break; case EVENT_NETWORK_SEARCH_START: if (context_state & CONTEXT_STATE_BIT_SEARCH) goto exit; context_state |= CONTEXT_STATE_BIT_SEARCH; break; case EVENT_NETWORK_SEARCH_STOP: if (!(context_state & CONTEXT_STATE_BIT_SEARCH)) goto exit; context_state &= ~CONTEXT_STATE_BIT_SEARCH; break; case EVENT_NAVIGATION_START: if (context_state & CONTEXT_STATE_BIT_NAVIGATION) goto exit; context_state |= CONTEXT_STATE_BIT_NAVIGATION; break; case EVENT_NAVIGATION_STOP: if (!(context_state & CONTEXT_STATE_BIT_NAVIGATION)) goto exit; context_state &= ~CONTEXT_STATE_BIT_NAVIGATION; break; case EVENT_MUSIC_START: if (context_state & CONTEXT_STATE_BIT_MUSIC) goto exit; context_state |= CONTEXT_STATE_BIT_MUSIC; break; case EVENT_MUSIC_STOP: if (!(context_state & CONTEXT_STATE_BIT_MUSIC)) goto exit; context_state &= ~CONTEXT_STATE_BIT_MUSIC; break; default: pr_warn("unsupported context event (%d)\n", event); goto exit; } BATT_LOG("context_state: 0x%x -> 0x%x", prev_context_state, context_state); __context_event_handler(event); exit: mutex_unlock(&context_event_handler_lock); return 0; } static int htc_batt_charger_control(enum charger_control_flag control) { int ret = 0; BATT_LOG("%s: user switch charger to mode: %u", __func__, control); switch (control) { case STOP_CHARGER: chg_dis_user_timer = 1; break; case ENABLE_CHARGER: chg_dis_user_timer = 0; break; case DISABLE_PWRSRC: pwrsrc_dis_reason |= HTC_BATT_PWRSRC_DIS_BIT_API; break; case ENABLE_PWRSRC: pwrsrc_dis_reason &= ~HTC_BATT_PWRSRC_DIS_BIT_API; break; case ENABLE_LIMIT_CHARGER: case DISABLE_LIMIT_CHARGER: BATT_LOG("%s: skip charger_contorl(%d)", __func__, control); return ret; break; default: BATT_LOG("%s: unsupported charger_contorl(%d)", __func__, control); ret = -1; break; } htc_batt_schedule_batt_info_update(); return ret; } static void htc_batt_set_full_level(int percent) { if (percent < 0) htc_batt_info.rep.full_level = 0; else if (100 < percent) htc_batt_info.rep.full_level = 100; else htc_batt_info.rep.full_level = percent; BATT_LOG(" set full_level constraint as %d.", percent); return; } static void htc_batt_set_full_level_dis_batt_chg(int percent) { if (percent < 0) htc_batt_info.rep.full_level_dis_batt_chg = 0; else if (100 < percent) htc_batt_info.rep.full_level_dis_batt_chg = 100; else htc_batt_info.rep.full_level_dis_batt_chg = percent; BATT_LOG(" set full_level_dis_batt_chg constraint as %d.", percent); return; } static void htc_batt_trigger_store_battery_data(int triggle_flag) { if (triggle_flag == 1) { if (htc_batt_info.igauge && htc_batt_info.igauge->store_battery_data) { htc_batt_info.igauge->store_battery_data(); } } return; } static void htc_batt_store_battery_ui_soc(int soc_ui) { if (soc_ui <= 0 || soc_ui > 100) return; if (htc_batt_info.igauge && htc_batt_info.igauge->store_battery_ui_soc) { htc_batt_info.igauge->store_battery_ui_soc(soc_ui); } return; } static void htc_batt_get_battery_ui_soc(int *soc_ui) { int temp_soc; if (htc_batt_info.igauge && htc_batt_info.igauge->get_battery_ui_soc) { temp_soc = htc_batt_info.igauge->get_battery_ui_soc(); if (temp_soc > 0 && temp_soc <= 100) *soc_ui = temp_soc; } return; } static int htc_battery_get_rt_attr(enum htc_batt_rt_attr attr, int *val) { int ret = -EINVAL; switch (attr) { case HTC_BATT_RT_VOLTAGE: if (htc_batt_info.igauge->get_battery_voltage) ret = htc_batt_info.igauge->get_battery_voltage(val); break; case HTC_BATT_RT_CURRENT: if (htc_batt_info.igauge->get_battery_current) ret = htc_batt_info.igauge->get_battery_current(val); break; case HTC_BATT_RT_TEMPERATURE: if (htc_batt_info.igauge->get_battery_temperature) ret = htc_batt_info.igauge->get_battery_temperature(val); break; case HTC_BATT_RT_VOLTAGE_UV: if (htc_batt_info.igauge->get_battery_voltage) { ret = htc_batt_info.igauge->get_battery_voltage(val); *val *= 1000; } break; default: break; } return ret; } static ssize_t htc_battery_show_batt_attr(struct device_attribute *attr, char *buf) { int len = 0; len += scnprintf(buf + len, PAGE_SIZE - len, "charging_source: %d;\n" "charging_enabled: %d;\n" "overload: %d;\n" "Percentage(%%): %d;\n" "Percentage_raw(%%): %d;\n" "htc_extension: 0x%x;\n", htc_batt_info.rep.charging_source, htc_batt_info.rep.charging_enabled, htc_batt_info.rep.overload, htc_batt_info.rep.level, htc_batt_info.rep.level_raw, htc_batt_info.htc_extension ); if (htc_batt_info.igauge) { #if 0 if (htc_batt_info.igauge->name) len += scnprintf(buf + len, PAGE_SIZE - len, "gauge: %s;\n", htc_batt_info.igauge->name); #endif if (htc_batt_info.igauge->get_attr_text) len += htc_batt_info.igauge->get_attr_text(buf + len, PAGE_SIZE - len); } if (htc_batt_info.icharger) { #if 0 if (htc_batt_info.icharger->name) len += scnprintf(buf + len, PAGE_SIZE - len, "charger: %s;\n", htc_batt_info.icharger->name); #endif if (htc_batt_info.icharger->get_attr_text) len += htc_batt_info.icharger->get_attr_text(buf + len, PAGE_SIZE - len); } return len; } static ssize_t htc_battery_show_cc_attr(struct device_attribute *attr, char *buf) { int len = 0, cc_uah = 0; if (htc_batt_info.igauge) { if (htc_batt_info.igauge->get_battery_cc) { htc_batt_info.igauge->get_battery_cc(&cc_uah); len += scnprintf(buf + len, PAGE_SIZE - len, "cc:%d\n", cc_uah); } } return len; } static int htc_batt_set_max_input_current(int target_ma) { if(htc_batt_info.icharger && htc_batt_info.icharger->max_input_current) { htc_batt_info.icharger->max_input_current(target_ma); return 0; } else return -1; } static ssize_t htc_battery_show_htc_extension_attr(struct device_attribute *attr, char *buf) { int len = 0; len += scnprintf(buf + len, PAGE_SIZE - len,"%d\n", htc_batt_info.htc_extension); return len; } static int htc_batt_open(struct inode *inode, struct file *filp) { int ret = 0; BATT_LOG("%s: open misc device driver.", __func__); spin_lock(&htc_batt_info.batt_lock); if (!htc_batt_info.is_open) htc_batt_info.is_open = 1; else ret = -EBUSY; spin_unlock(&htc_batt_info.batt_lock); #ifdef CONFIG_ARCH_MSM8X60_LTE if (board_mfg_mode() == 5) cpu_down(1); #endif return ret; } static int htc_batt_release(struct inode *inode, struct file *filp) { BATT_LOG("%s: release misc device driver.", __func__); spin_lock(&htc_batt_info.batt_lock); htc_batt_info.is_open = 0; spin_unlock(&htc_batt_info.batt_lock); return 0; } static int htc_batt_get_battery_info(struct battery_info_reply *htc_batt_update) { htc_batt_update->batt_vol = htc_batt_info.rep.batt_vol; htc_batt_update->batt_id = htc_batt_info.rep.batt_id; htc_batt_update->batt_temp = htc_batt_info.rep.batt_temp; htc_batt_update->batt_current = htc_batt_info.rep.batt_current; #if 0 htc_batt_update->batt_current = htc_batt_info.rep.batt_current - htc_batt_info.rep.batt_discharg_current; htc_batt_update->batt_discharg_current = htc_batt_info.rep.batt_discharg_current; #endif htc_batt_update->level = htc_batt_info.rep.level; htc_batt_update->level_raw = htc_batt_info.rep.level_raw; htc_batt_update->charging_source = htc_batt_info.rep.charging_source; htc_batt_update->charging_enabled = htc_batt_info.rep.charging_enabled; htc_batt_update->full_bat = htc_batt_info.rep.full_bat; htc_batt_update->full_level = htc_batt_info.rep.full_level; htc_batt_update->over_vchg = htc_batt_info.rep.over_vchg; htc_batt_update->temp_fault = htc_batt_info.rep.temp_fault; htc_batt_update->batt_state = htc_batt_info.rep.batt_state; htc_batt_update->cable_ready = htc_batt_info.rep.cable_ready; htc_batt_update->overload = htc_batt_info.rep.overload; return 0; } static int htc_batt_get_chg_status(enum power_supply_property psp) { switch (psp) { case POWER_SUPPLY_PROP_CHARGE_TYPE: if (htc_batt_info.icharger && htc_batt_info.icharger->get_charge_type) return htc_batt_info.icharger->get_charge_type(); else break; case POWER_SUPPLY_PROP_INPUT_CURRENT_MAX: if (htc_batt_info.icharger && htc_batt_info.icharger->get_chg_usb_iusbmax) return htc_batt_info.icharger->get_chg_usb_iusbmax(); else break; case POWER_SUPPLY_PROP_VOLTAGE_MIN: if (htc_batt_info.icharger && htc_batt_info.icharger->get_chg_vinmin) return htc_batt_info.icharger->get_chg_vinmin(); else break; case POWER_SUPPLY_PROP_INPUT_VOLTAGE_REGULATION: if (htc_batt_info.icharger && htc_batt_info.icharger->get_input_voltage_regulation) return htc_batt_info.icharger->get_input_voltage_regulation(); else break; default: break; } pr_info("%s: functoin doesn't exist! psp=%d\n", __func__, psp); return 0; } static int htc_batt_set_chg_property(enum power_supply_property psp, int val) { switch (psp) { case POWER_SUPPLY_PROP_INPUT_CURRENT_MAX: if (htc_batt_info.icharger && htc_batt_info.icharger->set_chg_iusbmax) return htc_batt_info.icharger->set_chg_iusbmax(val); else break; case POWER_SUPPLY_PROP_VOLTAGE_MIN: if (htc_batt_info.icharger && htc_batt_info.icharger->set_chg_vin_min) return htc_batt_info.icharger->set_chg_vin_min(val); else break; default: break; } pr_info("%s: functoin doesn't exist! psp=%d\n", __func__, psp); return 0; } static void batt_set_check_timer(u32 seconds) { pr_debug("[BATT] %s(%u sec)\n", __func__, seconds); mod_timer(&htc_batt_timer.batt_timer, jiffies + msecs_to_jiffies(seconds * 1000)); } u32 htc_batt_getmidvalue(int32_t *value) { int i, j, n, len; len = ADC_REPLY_ARRAY_SIZE; for (i = 0; i < len - 1; i++) { for (j = i + 1; j < len; j++) { if (value[i] > value[j]) { n = value[i]; value[i] = value[j]; value[j] = n; } } } return value[len / 2]; } #if 0 static int32_t htc_batt_get_battery_adc(void) { int ret = 0; u32 vref = 0; u32 battid_adc = 0; struct battery_adc_reply adc; ret = pm8058_htc_config_mpp_and_adc_read( adc.adc_voltage, ADC_REPLY_ARRAY_SIZE, CHANNEL_ADC_BATT_AMON, htc_batt_info.mpp_config->vol[XOADC_MPP], htc_batt_info.mpp_config->vol[PM_MPP_AIN_AMUX]); if (ret) goto get_adc_failed; ret = pm8058_htc_config_mpp_and_adc_read( adc.adc_current, ADC_REPLY_ARRAY_SIZE, CHANNEL_ADC_BATT_AMON, htc_batt_info.mpp_config->curr[XOADC_MPP], htc_batt_info.mpp_config->curr[PM_MPP_AIN_AMUX]); if (ret) goto get_adc_failed; ret = pm8058_htc_config_mpp_and_adc_read( adc.adc_temperature, ADC_REPLY_ARRAY_SIZE, CHANNEL_ADC_BATT_AMON, htc_batt_info.mpp_config->temp[XOADC_MPP], htc_batt_info.mpp_config->temp[PM_MPP_AIN_AMUX]); if (ret) goto get_adc_failed; ret = pm8058_htc_config_mpp_and_adc_read( adc.adc_battid, ADC_REPLY_ARRAY_SIZE, CHANNEL_ADC_BATT_AMON, htc_batt_info.mpp_config->battid[XOADC_MPP], htc_batt_info.mpp_config->battid[PM_MPP_AIN_AMUX]); vref = htc_batt_getmidvalue(adc.adc_voltage); battid_adc = htc_batt_getmidvalue(adc.adc_battid); BATT_LOG("%s , vref:%d, battid_adc:%d, battid:%d\n", __func__, vref, battid_adc, battid_adc * 1000 / vref); if (ret) goto get_adc_failed; memcpy(&htc_batt_info.adc_data, &adc, sizeof(struct battery_adc_reply)); get_adc_failed: return ret; } #endif static void batt_regular_timer_handler(unsigned long data) { if (htc_batt_info.state & STATE_PREPARE) { htc_batt_info.state |= STATE_WORKQUEUE_PENDING; pr_info("[BATT] %s(): Skip due to htc_batt_info.state=0x%x\n", __func__, htc_batt_info.state); } else { htc_batt_info.state &= ~STATE_WORKQUEUE_PENDING; pr_debug("[BATT] %s(): Run, htc_batt_info.state=0x%x\n", __func__, htc_batt_info.state); htc_batt_schedule_batt_info_update(); } } static void batt_check_alarm_handler(struct alarm *alarm) { BATT_LOG("alarm handler, but do nothing."); return; } static int bounding_fullly_charged_level(int upperbd, int current_level) { static int pingpong = 1; int lowerbd; int is_input_chg_off_by_bounding = 0; lowerbd = upperbd - 5; if (lowerbd < 0) lowerbd = 0; if (pingpong == 1 && upperbd <= current_level) { pr_info("MFG: lowerbd=%d, upperbd=%d, current=%d," " pingpong:1->0 turn off\n", lowerbd, upperbd, current_level); is_input_chg_off_by_bounding = 1; pingpong = 0; } else if (pingpong == 0 && lowerbd < current_level) { pr_info("MFG: lowerbd=%d, upperbd=%d, current=%d," " toward 0, turn off\n", lowerbd, upperbd, current_level); is_input_chg_off_by_bounding = 1; } else if (pingpong == 0 && current_level <= lowerbd) { pr_info("MFG: lowerbd=%d, upperbd=%d, current=%d," " pingpong:0->1 turn on\n", lowerbd, upperbd, current_level); pingpong = 1; } else { pr_info("MFG: lowerbd=%d, upperbd=%d, current=%d," " toward %d, turn on\n", lowerbd, upperbd, current_level, pingpong); } return is_input_chg_off_by_bounding; } static int bounding_fullly_charged_level_dis_batt_chg(int upperbd, int current_level) { static int pingpong = 1; int lowerbd; int is_batt_chg_off_by_bounding = 0; lowerbd = upperbd - 5; if (lowerbd < 0) lowerbd = 0; if (pingpong == 1 && upperbd <= current_level) { pr_info("[BATT] %s: lowerbd=%d, upperbd=%d, current=%d," " pingpong:1->0 turn off\n", __func__, lowerbd, upperbd, current_level); is_batt_chg_off_by_bounding = 1; pingpong = 0; } else if (pingpong == 0 && lowerbd < current_level) { pr_info("[BATT] %s: lowerbd=%d, upperbd=%d, current=%d," " toward 0, turn off\n", __func__, lowerbd, upperbd, current_level); is_batt_chg_off_by_bounding = 1; } else if (pingpong == 0 && current_level <= lowerbd) { pr_info("[BATT] %s: lowerbd=%d, upperbd=%d, current=%d," " pingpong:0->1 turn on\n", __func__, lowerbd, upperbd, current_level); pingpong = 1; } else { pr_info("[BATT] %s: lowerbd=%d, upperbd=%d, current=%d," " toward %d, turn on\n", __func__, lowerbd, upperbd, current_level, pingpong); } return is_batt_chg_off_by_bounding; } static inline int is_bounding_fully_charged_level(void) { if (0 < htc_batt_info.rep.full_level && htc_batt_info.rep.full_level < 100) return bounding_fullly_charged_level( htc_batt_info.rep.full_level, htc_batt_info.rep.level); return 0; } static inline int is_bounding_fully_charged_level_dis_batt_chg(void) { if (0 < htc_batt_info.rep.full_level_dis_batt_chg && htc_batt_info.rep.full_level_dis_batt_chg < 100) return bounding_fullly_charged_level_dis_batt_chg( htc_batt_info.rep.full_level_dis_batt_chg, htc_batt_info.rep.level); return 0; } static void batt_update_info_from_charger(void) { if (!htc_batt_info.icharger) { BATT_LOG("warn: charger interface is not hooked."); return; } if (htc_batt_info.icharger->is_batt_temp_fault_disable_chg) htc_batt_info.icharger->is_batt_temp_fault_disable_chg( &charger_dis_temp_fault); if (htc_batt_info.icharger->is_under_rating) htc_batt_info.icharger->is_under_rating( &charger_under_rating); if (htc_batt_info.icharger->is_safty_timer_timeout) htc_batt_info.icharger->is_safty_timer_timeout( &charger_safety_timeout); if (htc_batt_info.icharger->is_battery_full_eoc_stop) htc_batt_info.icharger->is_battery_full_eoc_stop( &batt_full_eoc_stop); } static void batt_update_info_from_gauge(void) { if (!htc_batt_info.igauge) { BATT_LOG("warn: gauge interface is not hooked."); return; } if (htc_batt_info.igauge->get_battery_voltage) htc_batt_info.igauge->get_battery_voltage( &htc_batt_info.rep.batt_vol); if (htc_batt_info.igauge->get_battery_current) htc_batt_info.igauge->get_battery_current( &htc_batt_info.rep.batt_current); if (htc_batt_info.igauge->get_battery_temperature) htc_batt_info.igauge->get_battery_temperature( &htc_batt_info.rep.batt_temp); if (htc_batt_info.igauge->is_battery_temp_fault) htc_batt_info.igauge->is_battery_temp_fault( &htc_batt_info.rep.temp_fault); if (htc_batt_info.igauge->get_battery_id) htc_batt_info.igauge->get_battery_id( &htc_batt_info.rep.batt_id); if (htc_battery_cell_get_cur_cell()) htc_batt_info.rep.full_bat = htc_battery_cell_get_cur_cell()->capacity; htc_batt_info.igauge->get_battery_soc( &htc_batt_info.rep.level_raw); htc_batt_info.rep.level = htc_batt_info.rep.level_raw; if (htc_batt_info.icharger->is_ovp) htc_batt_info.icharger->is_ovp(&htc_batt_info.rep.over_vchg); } inline static int is_voltage_critical_low(int voltage_mv) { return (voltage_mv < htc_batt_info.critical_low_voltage_mv) ? 1 : 0; } #define CHG_ONE_PERCENT_LIMIT_PERIOD_MS (1000 * 60) static void batt_check_overload(unsigned long time_since_last_update_ms) { static unsigned int overload_count; static unsigned long time_accumulation; int is_full = 0; if(htc_batt_info.igauge && htc_batt_info.igauge->is_battery_full) htc_batt_info.igauge->is_battery_full(&is_full); pr_debug("[BATT] Chk overload by CS=%d V=%d I=%d count=%d overload=%d " "is_full=%d\n", htc_batt_info.rep.charging_source, htc_batt_info.rep.batt_vol, htc_batt_info.rep.batt_current, overload_count, htc_batt_info.rep.overload, is_full); if ((htc_batt_info.rep.charging_source > 0) && (!is_full) && ((htc_batt_info.rep.batt_current / 1000) > htc_batt_info.overload_curr_thr_ma)) { time_accumulation += time_since_last_update_ms; if (time_accumulation >= CHG_ONE_PERCENT_LIMIT_PERIOD_MS) { if (overload_count++ < 3) { htc_batt_info.rep.overload = 0; } else htc_batt_info.rep.overload = 1; time_accumulation = 0; } } else { overload_count = 0; time_accumulation = 0; htc_batt_info.rep.overload = 0; } } static void batt_check_critical_low_level(int *dec_level, int batt_current) { int i; for(i = 0; i < DEC_LEVEL_CURR_TABLE_SIZE; i++) { if (batt_current > dec_level_curr_table[i].threshold_ua) { *dec_level = dec_level_curr_table[i].dec_level; pr_debug("%s: i=%d, dec_level=%d, threshold_ua=%d\n", __func__, i, *dec_level, dec_level_curr_table[i].threshold_ua); break; } } } static bool is_level_change_time_reached(unsigned long level_change_time, unsigned long pre_jiffies) { unsigned long cur_jiffies = jiffies; unsigned long level_since_last_update_ms; level_since_last_update_ms = (cur_jiffies - pre_jiffies) * MSEC_PER_SEC / HZ; BATT_LOG("%s: total_time since last batt level update = %lu ms.", __func__, level_since_last_update_ms); if (level_since_last_update_ms < level_change_time) { return false; } return true; } static void adjust_store_level(int *store_level, int drop_raw, int drop_ui, int prev) { int store = *store_level; store += drop_raw - drop_ui; if (store >= 0) htc_batt_info.rep.level = prev - drop_ui; else { htc_batt_info.rep.level = prev; store += drop_ui; } *store_level = store; } #define DISCHG_UPDATE_PERIOD_MS (1000 * 60) #define ONE_PERCENT_LIMIT_PERIOD_MS (1000 * (60 + 10)) #define FIVE_PERCENT_LIMIT_PERIOD_MS (1000 * (300 + 10)) #define ONE_MINUTES_MS (1000 * (60 + 10)) #define FOURTY_MINUTES_MS (1000 * (2400 + 10)) #define SIXTY_MINUTES_MS (1000 * (3600 + 10)) static void batt_level_adjust(unsigned long time_since_last_update_ms) { static int first = 1; static int critical_low_enter = 0; static int store_level = 0; static int pre_ten_digit, ten_digit; static bool stored_level_flag = false; static bool allow_drop_one_percent_flag = false; int prev_raw_level, drop_raw_level; int prev_level; int is_full = 0, dec_level = 0; int dropping_level; static unsigned long pre_jiffies; static unsigned long time_accumulated = 0; const struct battery_info_reply *prev_batt_info_rep = htc_battery_core_get_batt_info_rep(); if (!first) { prev_level = prev_batt_info_rep->level; prev_raw_level = prev_batt_info_rep->level_raw; } else { prev_level = htc_batt_info.rep.level; prev_raw_level = htc_batt_info.rep.level_raw; pre_jiffies = 0; pre_ten_digit = htc_batt_info.rep.level / 10; } drop_raw_level = prev_raw_level - htc_batt_info.rep.level_raw; if ((prev_batt_info_rep->charging_source > 0) && htc_batt_info.rep.charging_source == 0 && prev_level == 100) { BATT_LOG("%s: Cable plug out when level 100, reset timer.",__func__); pre_jiffies = jiffies; htc_batt_info.rep.level = prev_level; return; } if ((htc_batt_info.rep.charging_source == 0) && (stored_level_flag == false)) { store_level = prev_level - prev_raw_level; BATT_LOG("%s: Cable plug out, to store difference between" " UI & SOC. store_level:%d, prev_level:%d, prev_raw_level:%d" ,__func__, store_level, prev_level, prev_raw_level); stored_level_flag = true; } else if (htc_batt_info.rep.charging_source > 0) stored_level_flag = false; if (!prev_batt_info_rep->charging_enabled && !((prev_batt_info_rep->charging_source == 0) && htc_batt_info.rep.charging_source > 0)) { if (drop_raw_level > 0) { if (is_level_change_time_reached(DISCHG_UPDATE_PERIOD_MS, pre_jiffies) == false) { htc_batt_info.rep.level = prev_level; store_level += drop_raw_level; return; } } if (is_voltage_critical_low(htc_batt_info.rep.batt_vol)) { critical_low_enter = 1; if (htc_batt_info.decreased_batt_level_check) batt_check_critical_low_level(&dec_level, htc_batt_info.rep.batt_current); else dec_level = 6; htc_batt_info.rep.level = (prev_level - dec_level > 0) ? (prev_level - dec_level) : 0; pr_info("[BATT] battery level force decreses %d%% from %d%%" " (soc=%d)on critical low (%d mV)(%d mA)\n", dec_level, prev_level, htc_batt_info.rep.level, htc_batt_info.critical_low_voltage_mv, htc_batt_info.rep.batt_current); } else { if ((htc_batt_info.rep.level_raw < 30) || (prev_level - prev_raw_level > 10)) allow_drop_one_percent_flag = true; htc_batt_info.rep.level = prev_level; if (time_since_last_update_ms <= ONE_PERCENT_LIMIT_PERIOD_MS) { if (1 <= drop_raw_level) { adjust_store_level(&store_level, drop_raw_level, 1, prev_level); pr_info("[BATT] remap: normal soc drop = %d%% in %lu ms." " UI only allow -1%%, store_level:%d, ui:%d%%\n", drop_raw_level, time_since_last_update_ms, store_level, htc_batt_info.rep.level); } } else if ((chg_limit_reason & HTC_BATT_CHG_LIMIT_BIT_TALK) && (time_since_last_update_ms <= FIVE_PERCENT_LIMIT_PERIOD_MS)) { if (5 < drop_raw_level) { adjust_store_level(&store_level, drop_raw_level, 5, prev_level); } else if (1 <= drop_raw_level && drop_raw_level <= 5) { adjust_store_level(&store_level, drop_raw_level, 1, prev_level); } pr_info("[BATT] remap: phone soc drop = %d%% in %lu ms." " UI only allow -1%% or -5%%, store_level:%d, ui:%d%%\n", drop_raw_level, time_since_last_update_ms, store_level, htc_batt_info.rep.level); } else { if (1 <= drop_raw_level) { if ((ONE_MINUTES_MS < time_since_last_update_ms) && (time_since_last_update_ms <= FOURTY_MINUTES_MS)) { adjust_store_level(&store_level, drop_raw_level, 1, prev_level); } else if ((FOURTY_MINUTES_MS < time_since_last_update_ms) && (time_since_last_update_ms <= SIXTY_MINUTES_MS)) { if (2 <= drop_raw_level) { adjust_store_level(&store_level, drop_raw_level, 2, prev_level); } else { adjust_store_level(&store_level, drop_raw_level, 1, prev_level); } } else if (SIXTY_MINUTES_MS < time_since_last_update_ms) { if (3 <= drop_raw_level) { adjust_store_level(&store_level, drop_raw_level, 3, prev_level); } else if (drop_raw_level == 2) { adjust_store_level(&store_level, drop_raw_level, 2, prev_level); } else { adjust_store_level(&store_level, drop_raw_level, 1, prev_level); } } pr_info("[BATT] remap: suspend soc drop: %d%% in %lu ms." " UI only allow -1%% to -3%%, store_level:%d, ui:%d%%\n", drop_raw_level, time_since_last_update_ms, store_level, htc_batt_info.rep.level); } } if ((allow_drop_one_percent_flag == false) && (drop_raw_level == 0)) { htc_batt_info.rep.level = prev_level; pr_info("[BATT] remap: no soc drop and no additional 1%%," " ui:%d%%\n", htc_batt_info.rep.level); } else if ((allow_drop_one_percent_flag == true) && (drop_raw_level == 0) && (store_level > 0)) { store_level--; htc_batt_info.rep.level = prev_level - 1; allow_drop_one_percent_flag = false; pr_info("[BATT] remap: drop additional 1%%. store_level:%d," " ui:%d%%\n", store_level , htc_batt_info.rep.level); } else if (drop_raw_level < 0) { if (critical_low_enter) { pr_warn("[BATT] remap: level increase because of" " exit critical_low!\n"); } store_level += drop_raw_level; htc_batt_info.rep.level = prev_level; pr_info("[BATT] remap: soc increased. store_level:%d," " ui:%d%%\n", store_level, htc_batt_info.rep.level); } ten_digit = htc_batt_info.rep.level / 10; if (htc_batt_info.rep.level != 100) { if ((pre_ten_digit != 10) && (pre_ten_digit > ten_digit)) { allow_drop_one_percent_flag = true; pr_info("[BATT] remap: allow to drop additional 1%% at next" " level:%d%%.\n", htc_batt_info.rep.level - 1); } } pre_ten_digit = ten_digit; if (critical_low_enter) { critical_low_enter = 0; pr_warn("[BATT] exit critical_low without charge!\n"); } if (htc_batt_info.rep.batt_temp < 0 && drop_raw_level == 0 && store_level >= 2) { time_accumulated += time_since_last_update_ms; if (time_accumulated >= DISCHG_UPDATE_PERIOD_MS) { dropping_level = prev_level - htc_batt_info.rep.level; if((dropping_level == 1) || (dropping_level == 0)) { store_level = store_level - (2 - dropping_level); htc_batt_info.rep.level = htc_batt_info.rep.level - (2 - dropping_level); } time_accumulated = 0; pr_info("[BATT] remap: enter low temperature section, " "store_level:%d%%, dropping_level:%d%%, " "prev_level:%d%%, level:%d%%.\n" , store_level, prev_level, dropping_level , htc_batt_info.rep.level); } } else time_accumulated = 0; } if ((htc_batt_info.rep.level == 0) && (prev_level > 1)) { htc_batt_info.rep.level = 1; pr_info("[BATT] battery level forcely report %d%%" " since prev_level=%d%%\n", htc_batt_info.rep.level, prev_level); } } else { if (htc_batt_info.igauge && htc_batt_info.igauge->is_battery_full) { htc_batt_info.igauge->is_battery_full(&is_full); if (is_full != 0) { if (htc_batt_info.smooth_chg_full_delay_min && prev_level < 100) { if (is_level_change_time_reached(htc_batt_info.smooth_chg_full_delay_min * CHG_ONE_PERCENT_LIMIT_PERIOD_MS, pre_jiffies) == false) { htc_batt_info.rep.level = prev_level; } else { htc_batt_info.rep.level = prev_level + 1; } } else { htc_batt_info.rep.level = 100; } } else { if (prev_level > htc_batt_info.rep.level) { if (!htc_batt_info.rep.overload) { pr_info("[BATT] pre_level=%d, new_level=%d, " "level drop but overloading doesn't happen!\n", prev_level, htc_batt_info.rep.level); htc_batt_info.rep.level = prev_level; } } else if (99 < htc_batt_info.rep.level && prev_level < 100) htc_batt_info.rep.level = 99; else if (prev_level < htc_batt_info.rep.level) { if(time_since_last_update_ms > CHG_ONE_PERCENT_LIMIT_PERIOD_MS) htc_batt_info.rep.level = prev_level + 1; else htc_batt_info.rep.level = prev_level; if (htc_batt_info.rep.level > 100) htc_batt_info.rep.level = 100; } else { pr_info("[BATT] pre_level=%d, new_level=%d, " "level would use raw level!\n", prev_level, htc_batt_info.rep.level); } } } critical_low_enter = 0; allow_drop_one_percent_flag = false; } htc_batt_store_battery_ui_soc(htc_batt_info.rep.level); if (first) htc_batt_get_battery_ui_soc(&htc_batt_info.rep.level); if (htc_batt_info.rep.level != prev_level) pre_jiffies = jiffies; first = 0; } static void batt_update_limited_charge(void) { if (htc_batt_info.state & STATE_EARLY_SUSPEND) { if ((!(chg_limit_reason & HTC_BATT_CHG_LIMIT_BIT_THRML)) && htc_batt_info.rep.batt_temp > 450) { set_limit_charge_with_reason(true, HTC_BATT_CHG_LIMIT_BIT_THRML); } else if ((chg_limit_reason & HTC_BATT_CHG_LIMIT_BIT_THRML) && htc_batt_info.rep.batt_temp <= 430) { set_limit_charge_with_reason(false, HTC_BATT_CHG_LIMIT_BIT_THRML); } else { } } else { if ((!(chg_limit_reason & HTC_BATT_CHG_LIMIT_BIT_THRML)) && htc_batt_info.rep.batt_temp > 390) { set_limit_charge_with_reason(true, HTC_BATT_CHG_LIMIT_BIT_THRML); } else if ((chg_limit_reason & HTC_BATT_CHG_LIMIT_BIT_THRML) && htc_batt_info.rep.batt_temp <= 370) { set_limit_charge_with_reason(false, HTC_BATT_CHG_LIMIT_BIT_THRML); } else { } } } static void sw_safety_timer_check(unsigned long time_since_last_update_ms) { pr_info("%s: %lu ms", __func__, time_since_last_update_ms); if(latest_chg_src == HTC_PWR_SOURCE_TYPE_BATT) { sw_stimer_fault = 0; sw_stimer_counter = 0; } if(!htc_batt_info.rep.charging_enabled) sw_stimer_counter = 0; if((latest_chg_src == HTC_PWR_SOURCE_TYPE_AC) || (latest_chg_src == HTC_PWR_SOURCE_TYPE_9VAC)) { pr_info("%s enter\n", __func__); if(sw_stimer_fault) { pr_info("%s safety timer expired\n", __func__); return; } sw_stimer_counter += time_since_last_update_ms; if(sw_stimer_counter >= HTC_SAFETY_TIME_16_HR_IN_MS) { pr_info("%s sw_stimer_counter expired, count:%lu ms", __func__, sw_stimer_counter); sw_stimer_fault = 1; sw_stimer_counter = 0; } else { pr_debug("%s sw_stimer_counter left: %lu ms", __func__, HTC_SAFETY_TIME_16_HR_IN_MS - sw_stimer_counter); } } } void update_htc_extension_state(void) { if (HTC_PWR_SOURCE_TYPE_UNKNOWN_USB == htc_batt_info.rep.charging_source) htc_batt_info.htc_extension |= HTC_EXT_UNKNOWN_USB_CHARGER; else htc_batt_info.htc_extension &= ~HTC_EXT_UNKNOWN_USB_CHARGER; if (charger_under_rating && HTC_PWR_SOURCE_TYPE_AC == htc_batt_info.rep.charging_source) htc_batt_info.htc_extension |= HTC_EXT_CHG_UNDER_RATING; else htc_batt_info.htc_extension &= ~HTC_EXT_CHG_UNDER_RATING; if (charger_safety_timeout || sw_stimer_fault) htc_batt_info.htc_extension |= HTC_EXT_CHG_SAFTY_TIMEOUT; else htc_batt_info.htc_extension &= ~HTC_EXT_CHG_SAFTY_TIMEOUT; if (batt_full_eoc_stop != 0) htc_batt_info.htc_extension |= HTC_EXT_CHG_FULL_EOC_STOP; else htc_batt_info.htc_extension &= ~HTC_EXT_CHG_FULL_EOC_STOP; } static void batt_worker(struct work_struct *work) { static int first = 1; static int prev_pwrsrc_enabled = 1; static int prev_charging_enabled = 0; int charging_enabled = prev_charging_enabled; int pwrsrc_enabled = prev_pwrsrc_enabled; int prev_chg_src; unsigned long time_since_last_update_ms; unsigned long cur_jiffies; cur_jiffies = jiffies; time_since_last_update_ms = htc_batt_timer.total_time_ms + ((cur_jiffies - htc_batt_timer.batt_system_jiffies) * MSEC_PER_SEC / HZ); BATT_LOG("%s: total_time since last batt update = %lu ms.", __func__, time_since_last_update_ms); htc_batt_timer.total_time_ms = 0; htc_batt_timer.batt_system_jiffies = cur_jiffies; del_timer_sync(&htc_batt_timer.batt_timer); batt_set_check_timer(htc_batt_timer.time_out); htc_batt_timer.batt_alarm_status = 0; #ifdef CONFIG_HTC_BATT_ALARM htc_batt_timer.batt_critical_alarm_counter = 0; #endif prev_chg_src = htc_batt_info.rep.charging_source; htc_batt_info.rep.charging_source = latest_chg_src; batt_update_info_from_gauge(); batt_update_info_from_charger(); batt_level_adjust(time_since_last_update_ms); if (critical_shutdown || (htc_batt_info.force_shutdown_batt_vol && htc_batt_info.rep.batt_vol < htc_batt_info.force_shutdown_batt_vol)) { BATT_LOG("critical shutdown (set level=0 to force shutdown)"); htc_batt_info.rep.level = 0; critical_shutdown = 0; wake_lock(&batt_shutdown_wake_lock); schedule_delayed_work(&shutdown_work, msecs_to_jiffies(BATT_CRITICAL_VOL_SHUTDOWN_DELAY_MS)); } if (critical_alarm_level < 0 && prev_chg_src > 0 && htc_batt_info.rep.charging_source == HTC_PWR_SOURCE_TYPE_BATT) { pr_info("[BATT] critical_alarm_level: %d -> %d\n", critical_alarm_level, htc_batt_info.critical_alarm_vol_cols - 1); critical_alarm_level= htc_batt_info.critical_alarm_vol_cols - 1; critical_alarm_level_set = critical_alarm_level + 1; } batt_update_limited_charge(); batt_check_overload(time_since_last_update_ms); if (need_sw_stimer) { sw_safety_timer_check(time_since_last_update_ms); } pr_debug("[BATT] context_state=0x%x, suspend_highfreq_check_reason=0x%x\n", context_state, suspend_highfreq_check_reason); if (htc_batt_info.icharger && htc_batt_info.icharger->enable_5v_output) { if(htc_ext_5v_output_old != htc_ext_5v_output_now) { htc_batt_info.icharger->enable_5v_output(htc_ext_5v_output_now); htc_ext_5v_output_old = htc_ext_5v_output_now; } pr_info("[BATT] enable_5v_output: %d\n", htc_ext_5v_output_now); } update_htc_extension_state(); if (htc_batt_info.rep.charging_source > 0) { if (htc_batt_info.rep.batt_id == HTC_BATTERY_CELL_ID_UNKNOWN) chg_dis_reason |= HTC_BATT_CHG_DIS_BIT_ID; else chg_dis_reason &= ~HTC_BATT_CHG_DIS_BIT_ID; if (charger_safety_timeout || sw_stimer_fault) chg_dis_reason |= HTC_BATT_CHG_DIS_BIT_TMR; else chg_dis_reason &= ~HTC_BATT_CHG_DIS_BIT_TMR; if (charger_dis_temp_fault) chg_dis_reason |= HTC_BATT_CHG_DIS_BIT_TMP; else chg_dis_reason &= ~HTC_BATT_CHG_DIS_BIT_TMP; if (chg_dis_user_timer) chg_dis_reason |= HTC_BATT_CHG_DIS_BIT_USR_TMR; else chg_dis_reason &= ~HTC_BATT_CHG_DIS_BIT_USR_TMR; if (is_bounding_fully_charged_level()) { chg_dis_reason |= HTC_BATT_CHG_DIS_BIT_MFG; pwrsrc_dis_reason |= HTC_BATT_PWRSRC_DIS_BIT_MFG; } else { chg_dis_reason &= ~HTC_BATT_CHG_DIS_BIT_MFG; pwrsrc_dis_reason &= ~HTC_BATT_PWRSRC_DIS_BIT_MFG; } if (is_bounding_fully_charged_level_dis_batt_chg()) chg_dis_reason |= HTC_BATT_CHG_DIS_BIT_STOP_SWOLLEN; else chg_dis_reason &= ~HTC_BATT_CHG_DIS_BIT_STOP_SWOLLEN; if (htc_batt_info.rep.over_vchg) chg_dis_reason |= HTC_BATT_CHG_DIS_BIT_OVP; else chg_dis_reason &= ~HTC_BATT_CHG_DIS_BIT_OVP; if (pwrsrc_dis_reason) pwrsrc_enabled = 0; else pwrsrc_enabled = 1; if (chg_dis_reason & chg_dis_control_mask) charging_enabled = HTC_PWR_SOURCE_TYPE_BATT; else charging_enabled = htc_batt_info.rep.charging_source; if (chg_dis_reason & chg_dis_active_mask) htc_batt_info.rep.charging_enabled = HTC_PWR_SOURCE_TYPE_BATT; else htc_batt_info.rep.charging_enabled = htc_batt_info.rep.charging_source; pr_info("[BATT] prev_chg_src=%d, prev_chg_en=%d," " chg_dis_reason/control/active=0x%x/0x%x/0x%x," " chg_limit_reason=0x%x," " pwrsrc_dis_reason=0x%x, prev_pwrsrc_enabled=%d," " context_state=0x%x," " htc_extension=0x%x\n", prev_chg_src, prev_charging_enabled, chg_dis_reason, chg_dis_reason & chg_dis_control_mask, chg_dis_reason & chg_dis_active_mask, chg_limit_reason, pwrsrc_dis_reason, prev_pwrsrc_enabled, context_state, htc_batt_info.htc_extension); if (charging_enabled != prev_charging_enabled || prev_chg_src != htc_batt_info.rep.charging_source || first || pwrsrc_enabled != prev_pwrsrc_enabled) { if (prev_chg_src != htc_batt_info.rep.charging_source || first) { BATT_LOG("set_pwrsrc_and_charger_enable(%d, %d, %d)", htc_batt_info.rep.charging_source, charging_enabled, pwrsrc_enabled); if (htc_batt_info.icharger && htc_batt_info.icharger->set_pwrsrc_and_charger_enable) htc_batt_info.icharger->set_pwrsrc_and_charger_enable( htc_batt_info.rep.charging_source, charging_enabled, pwrsrc_enabled); } else { if (pwrsrc_enabled != prev_pwrsrc_enabled) { BATT_LOG("set_pwrsrc_enable(%d)", pwrsrc_enabled); if (htc_batt_info.icharger && htc_batt_info.icharger->set_pwrsrc_enable) htc_batt_info.icharger->set_pwrsrc_enable( pwrsrc_enabled); } if (charging_enabled != prev_charging_enabled) { BATT_LOG("set_charger_enable(%d)", charging_enabled); if (htc_batt_info.icharger && htc_batt_info.icharger->set_charger_enable) htc_batt_info.icharger->set_charger_enable( charging_enabled); } } } } else { if (prev_chg_src != htc_batt_info.rep.charging_source || first) { chg_dis_reason = 0; charging_enabled = 0; pwrsrc_enabled = 0; BATT_LOG("set_pwrsrc_and_charger_enable(%d, %d, %d)", HTC_PWR_SOURCE_TYPE_BATT, charging_enabled, pwrsrc_enabled); if (htc_batt_info.icharger && htc_batt_info.icharger->set_pwrsrc_and_charger_enable) htc_batt_info.icharger->set_pwrsrc_and_charger_enable( HTC_PWR_SOURCE_TYPE_BATT, charging_enabled, pwrsrc_enabled); htc_batt_info.rep.charging_enabled = htc_batt_info.rep.charging_source; } } #ifdef CONFIG_DUTY_CYCLE_LIMIT batt_update_limited_charge_timer(charging_enabled); #endif if (htc_batt_info.icharger) { htc_batt_info.icharger->dump_all(); } htc_battery_core_update_changed(); if (0 <= critical_alarm_level && critical_alarm_level < critical_alarm_level_set) { critical_alarm_level_set = critical_alarm_level; pr_info("[BATT] set voltage alarm level=%d\n", critical_alarm_level); htc_batt_info.igauge->set_lower_voltage_alarm_threshold( htc_batt_info.critical_alarm_vol_ptr[critical_alarm_level]); if (htc_batt_info.igauge->enable_lower_voltage_alarm) htc_batt_info.igauge->enable_lower_voltage_alarm(1); } first = 0; prev_charging_enabled = charging_enabled; prev_pwrsrc_enabled = pwrsrc_enabled; wake_unlock(&htc_batt_timer.battery_lock); pr_info("[BATT] %s: done\n", __func__); return; } static long htc_batt_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { int ret = 0; wake_lock(&htc_batt_timer.battery_lock); switch (cmd) { case HTC_BATT_IOCTL_READ_SOURCE: { if (copy_to_user((void __user *)arg, &htc_batt_info.rep.charging_source, sizeof(u32))) ret = -EFAULT; break; } case HTC_BATT_IOCTL_SET_BATT_ALARM: { u32 time_out = 0; if (copy_from_user(&time_out, (void *)arg, sizeof(u32))) { ret = -EFAULT; break; } htc_batt_timer.time_out = time_out; if (!htc_battery_initial) { htc_battery_initial = 1; batt_set_check_timer(htc_batt_timer.time_out); } break; } case HTC_BATT_IOCTL_GET_ADC_VREF: { if (copy_to_user((void __user *)arg, &htc_batt_info.adc_vref, sizeof(htc_batt_info.adc_vref))) { BATT_ERR("copy_to_user failed!"); ret = -EFAULT; } break; } case HTC_BATT_IOCTL_GET_ADC_ALL: { if (copy_to_user((void __user *)arg, &htc_batt_info.adc_data, sizeof(struct battery_adc_reply))) { BATT_ERR("copy_to_user failed!"); ret = -EFAULT; } break; } case HTC_BATT_IOCTL_CHARGER_CONTROL: { u32 charger_mode = 0; if (copy_from_user(&charger_mode, (void *)arg, sizeof(u32))) { ret = -EFAULT; break; } BATT_LOG("do charger control = %u", charger_mode); htc_battery_set_charging(charger_mode); break; } case HTC_BATT_IOCTL_UPDATE_BATT_INFO: { mutex_lock(&htc_batt_info.info_lock); if (copy_from_user(&htc_batt_info.rep, (void *)arg, sizeof(struct battery_info_reply))) { BATT_ERR("copy_from_user failed!"); ret = -EFAULT; mutex_unlock(&htc_batt_info.info_lock); break; } mutex_unlock(&htc_batt_info.info_lock); BATT_LOG("ioctl: battery level update: %u", htc_batt_info.rep.level); #ifdef CONFIG_HTC_BATT_ALARM if (screen_state == 1) { if (battery_vol_alarm_mode != BATT_ALARM_CRITICAL_MODE) batt_set_voltage_alarm_mode( BATT_ALARM_CRITICAL_MODE); } #endif htc_battery_core_update_changed(); break; } case HTC_BATT_IOCTL_BATT_DEBUG_LOG: if (copy_from_user(htc_batt_info.debug_log, (void *)arg, DEBUG_LOG_LENGTH)) { BATT_ERR("copy debug log from user failed!"); ret = -EFAULT; } break; case HTC_BATT_IOCTL_SET_VOLTAGE_ALARM: { #ifdef CONFIG_HTC_BATT_ALARM #else struct battery_vol_alarm alarm_data; #endif if (copy_from_user(&alarm_data, (void *)arg, sizeof(struct battery_vol_alarm))) { BATT_ERR("user set batt alarm failed!"); ret = -EFAULT; break; } htc_batt_timer.batt_alarm_status = 0; htc_batt_timer.batt_alarm_enabled = alarm_data.enable; BATT_LOG("Set lower threshold: %d, upper threshold: %d, " "Enabled:%u.", alarm_data.lower_threshold, alarm_data.upper_threshold, alarm_data.enable); break; } case HTC_BATT_IOCTL_SET_ALARM_TIMER_FLAG: { unsigned int flag; if (copy_from_user(&flag, (void *)arg, sizeof(unsigned int))) { BATT_ERR("Set timer type into alarm failed!"); ret = -EFAULT; break; } htc_batt_timer.alarm_timer_flag = flag; BATT_LOG("Set alarm timer flag:%u", flag); break; } default: BATT_ERR("%s: no matched ioctl cmd", __func__); break; } wake_unlock(&htc_batt_timer.battery_lock); return ret; } static void shutdown_worker(struct work_struct *work) { BATT_LOG("shutdown device"); kernel_power_off(); wake_unlock(&batt_shutdown_wake_lock); } static void mbat_in_func(struct work_struct *work) { #if defined(CONFIG_MACH_RUBY) || defined(CONFIG_MACH_HOLIDAY) || defined(CONFIG_MACH_VIGOR) #define LTE_GPIO_MBAT_IN (61) if (gpio_get_value(LTE_GPIO_MBAT_IN) == 0) { pr_info("re-enable MBAT_IN irq!! due to false alarm\n"); enable_irq(MSM_GPIO_TO_INT(LTE_GPIO_MBAT_IN)); return; } #endif BATT_LOG("shut down device due to MBAT_IN interrupt"); htc_battery_set_charging(0); machine_power_off(); } #if 0 static irqreturn_t mbat_int_handler(int irq, void *data) { struct htc_battery_platform_data *pdata = data; disable_irq_nosync(pdata->gpio_mbat_in); schedule_delayed_work(&mbat_in_struct, msecs_to_jiffies(50)); return IRQ_HANDLED; } #endif const struct file_operations htc_batt_fops = { .owner = THIS_MODULE, .open = htc_batt_open, .release = htc_batt_release, .unlocked_ioctl = htc_batt_ioctl, }; static struct miscdevice htc_batt_device_node = { .minor = MISC_DYNAMIC_MINOR, .name = "htc_batt", .fops = &htc_batt_fops, }; static void htc_batt_kobject_release(struct kobject *kobj) { printk(KERN_ERR "htc_batt_kobject_release.\n"); return; } static struct kobj_type htc_batt_ktype = { .release = htc_batt_kobject_release, }; #if defined(CONFIG_FB) static int fb_notifier_callback(struct notifier_block *self, unsigned long event, void *data) { struct fb_event *evdata = data; int *blank; if (evdata && evdata->data && event == FB_EVENT_BLANK) { blank = evdata->data; switch (*blank) { case FB_BLANK_UNBLANK: htc_batt_info.state &= ~STATE_EARLY_SUSPEND; BATT_LOG("%s-> display is On", __func__); htc_batt_schedule_batt_info_update(); break; case FB_BLANK_POWERDOWN: case FB_BLANK_HSYNC_SUSPEND: case FB_BLANK_VSYNC_SUSPEND: case FB_BLANK_NORMAL: htc_batt_info.state |= STATE_EARLY_SUSPEND; BATT_LOG("%s-> display is Off", __func__); htc_batt_schedule_batt_info_update(); break; } } return 0; } #elif defined(CONFIG_HAS_EARLYSUSPEND) static void htc_battery_early_suspend(struct early_suspend *h) { htc_batt_info.state |= STATE_EARLY_SUSPEND; #ifdef CONFIG_HTC_BATT_ALARM screen_state = 0; batt_set_voltage_alarm_mode(BATT_ALARM_DISABLE_MODE); #endif htc_batt_schedule_batt_info_update(); return; } static void htc_battery_late_resume(struct early_suspend *h) { htc_batt_info.state &= ~STATE_EARLY_SUSPEND; #ifdef CONFIG_HTC_BATT_ALARM screen_state = 1; batt_set_voltage_alarm_mode(BATT_ALARM_CRITICAL_MODE); #endif htc_batt_schedule_batt_info_update(); } #endif #define CHECH_TIME_TOLERANCE_MS (1000) static int htc_battery_prepare(struct device *dev) { ktime_t interval; ktime_t slack = ktime_set(0, 0); ktime_t next_alarm; struct timespec xtime; unsigned long cur_jiffies; s64 next_alarm_sec = 0; int check_time = 0; htc_batt_info.state |= STATE_PREPARE; xtime = CURRENT_TIME; cur_jiffies = jiffies; htc_batt_timer.total_time_ms += (cur_jiffies - htc_batt_timer.batt_system_jiffies) * MSEC_PER_SEC / HZ; htc_batt_timer.batt_system_jiffies = cur_jiffies; htc_batt_timer.batt_suspend_ms = xtime.tv_sec * MSEC_PER_SEC + xtime.tv_nsec / NSEC_PER_MSEC; if (suspend_highfreq_check_reason) check_time = BATT_SUSPEND_HIGHFREQ_CHECK_TIME; else check_time = BATT_SUSPEND_CHECK_TIME; interval = ktime_set(check_time - htc_batt_timer.total_time_ms / 1000, 0); next_alarm_sec = div_s64(interval.tv64, NSEC_PER_SEC); if (next_alarm_sec <= 1) { BATT_LOG("%s: passing time:%lu ms, trigger batt_work immediately." "(suspend_highfreq_check_reason=0x%x)", __func__, htc_batt_timer.total_time_ms, suspend_highfreq_check_reason); htc_batt_schedule_batt_info_update(); return -EBUSY; } BATT_LOG("%s: passing time:%lu ms, alarm will be triggered after %lld sec." "(suspend_highfreq_check_reason=0x%x, htc_batt_info.state=0x%x)", __func__, htc_batt_timer.total_time_ms, next_alarm_sec, suspend_highfreq_check_reason, htc_batt_info.state); next_alarm = ktime_add(alarm_get_elapsed_realtime(), interval); alarm_start_range(&htc_batt_timer.batt_check_wakeup_alarm, next_alarm, ktime_add(next_alarm, slack)); return 0; } static void htc_battery_complete(struct device *dev) { unsigned long resume_ms; unsigned long sr_time_period_ms; unsigned long check_time; struct timespec xtime; htc_batt_info.state &= ~STATE_PREPARE; xtime = CURRENT_TIME; htc_batt_timer.batt_system_jiffies = jiffies; resume_ms = xtime.tv_sec * MSEC_PER_SEC + xtime.tv_nsec / NSEC_PER_MSEC; sr_time_period_ms = resume_ms - htc_batt_timer.batt_suspend_ms; htc_batt_timer.total_time_ms += sr_time_period_ms; BATT_LOG("%s: sr_time_period=%lu ms; total passing time=%lu ms." "htc_batt_info.state=0x%x", __func__, sr_time_period_ms, htc_batt_timer.total_time_ms, htc_batt_info.state); if (suspend_highfreq_check_reason) check_time = BATT_SUSPEND_HIGHFREQ_CHECK_TIME * MSEC_PER_SEC; else check_time = BATT_SUSPEND_CHECK_TIME * MSEC_PER_SEC; check_time -= CHECH_TIME_TOLERANCE_MS; if (htc_batt_timer.total_time_ms >= check_time || (htc_batt_info.state & STATE_WORKQUEUE_PENDING)) { htc_batt_info.state &= ~STATE_WORKQUEUE_PENDING; BATT_LOG("trigger batt_work while resume." "(suspend_highfreq_check_reason=0x%x, " "htc_batt_info.state=0x%x)", suspend_highfreq_check_reason, htc_batt_info.state); htc_batt_schedule_batt_info_update(); } return; } static struct dev_pm_ops htc_battery_8960_pm_ops = { .prepare = htc_battery_prepare, .complete = htc_battery_complete, }; #if defined(CONFIG_FB) static void htc_battery_fb_register(struct work_struct *work) { int ret = 0; BATT_LOG("%s in", __func__); htc_batt_info.fb_notif.notifier_call = fb_notifier_callback; ret = fb_register_client(&htc_batt_info.fb_notif); if (ret) BATT_ERR("[warning]:Unable to register fb_notifier: %d\n", ret); } #endif static int htc_battery_probe(struct platform_device *pdev) { int i, rc = 0; struct htc_battery_platform_data *pdata = pdev->dev.platform_data; struct htc_battery_core *htc_battery_core_ptr; pr_info("[BATT] %s() in\n", __func__); htc_battery_core_ptr = kmalloc(sizeof(struct htc_battery_core), GFP_KERNEL); if (!htc_battery_core_ptr) { BATT_ERR("%s: kmalloc failed for htc_battery_core_ptr.", __func__); return -ENOMEM; } memset(htc_battery_core_ptr, 0, sizeof(struct htc_battery_core)); INIT_DELAYED_WORK(&mbat_in_struct, mbat_in_func); INIT_DELAYED_WORK(&shutdown_work, shutdown_worker); #if 0 if (pdata->gpio_mbat_in_trigger_level == MBAT_IN_HIGH_TRIGGER) rc = request_irq(pdata->gpio_mbat_in, mbat_int_handler, IRQF_TRIGGER_HIGH, "mbat_in", pdata); else if (pdata->gpio_mbat_in_trigger_level == MBAT_IN_LOW_TRIGGER) rc = request_irq(pdata->gpio_mbat_in, mbat_int_handler, IRQF_TRIGGER_LOW, "mbat_in", pdata); if (rc) BATT_ERR("request mbat_in irq failed!"); else set_irq_wake(pdata->gpio_mbat_in, 1); #endif htc_battery_core_ptr->func_get_batt_rt_attr = htc_battery_get_rt_attr; htc_battery_core_ptr->func_show_batt_attr = htc_battery_show_batt_attr; htc_battery_core_ptr->func_show_cc_attr = htc_battery_show_cc_attr; htc_battery_core_ptr->func_show_htc_extension_attr = htc_battery_show_htc_extension_attr; htc_battery_core_ptr->func_get_battery_info = htc_batt_get_battery_info; htc_battery_core_ptr->func_charger_control = htc_batt_charger_control; htc_battery_core_ptr->func_set_full_level = htc_batt_set_full_level; htc_battery_core_ptr->func_set_max_input_current = htc_batt_set_max_input_current; htc_battery_core_ptr->func_set_full_level_dis_batt_chg = htc_batt_set_full_level_dis_batt_chg; htc_battery_core_ptr->func_context_event_handler = htc_batt_context_event_handler; htc_battery_core_ptr->func_notify_pnpmgr_charging_enabled = pdata->notify_pnpmgr_charging_enabled; htc_battery_core_ptr->func_get_chg_status = htc_batt_get_chg_status; htc_battery_core_ptr->func_set_chg_property = htc_batt_set_chg_property; htc_battery_core_ptr->func_trigger_store_battery_data = htc_batt_trigger_store_battery_data; htc_battery_core_register(&pdev->dev, htc_battery_core_ptr); htc_batt_info.device_id = pdev->id; #if 0 htc_batt_info.guage_driver = pdata->guage_driver; htc_batt_info.charger = pdata->charger; #endif htc_batt_info.is_open = 0; for (i = 0; i < ADC_REPLY_ARRAY_SIZE; i++) htc_batt_info.adc_vref[i] = 66; htc_batt_info.critical_low_voltage_mv = pdata->critical_low_voltage_mv; if (pdata->critical_alarm_vol_ptr) { htc_batt_info.critical_alarm_vol_ptr = pdata->critical_alarm_vol_ptr; htc_batt_info.critical_alarm_vol_cols = pdata->critical_alarm_vol_cols; critical_alarm_level_set = htc_batt_info.critical_alarm_vol_cols - 1; critical_alarm_level = critical_alarm_level_set; } if (pdata->force_shutdown_batt_vol) htc_batt_info.force_shutdown_batt_vol = pdata->force_shutdown_batt_vol; htc_batt_info.overload_vol_thr_mv = pdata->overload_vol_thr_mv; htc_batt_info.overload_curr_thr_ma = pdata->overload_curr_thr_ma; htc_batt_info.smooth_chg_full_delay_min = pdata->smooth_chg_full_delay_min; htc_batt_info.decreased_batt_level_check = pdata->decreased_batt_level_check; chg_limit_active_mask = pdata->chg_limit_active_mask; #ifdef CONFIG_DUTY_CYCLE_LIMIT chg_limit_timer_sub_mask = pdata->chg_limit_timer_sub_mask; #endif if (pdata->igauge.name) htc_batt_info.igauge = &pdata->igauge; if (pdata->icharger.name) htc_batt_info.icharger = &pdata->icharger; #if 0 htc_batt_info.mpp_config = &pdata->mpp_data; #endif INIT_WORK(&htc_batt_timer.batt_work, batt_worker); INIT_DELAYED_WORK(&htc_batt_timer.unknown_usb_detect_work, unknown_usb_detect_worker); init_timer(&htc_batt_timer.batt_timer); htc_batt_timer.batt_timer.function = batt_regular_timer_handler; alarm_init(&htc_batt_timer.batt_check_wakeup_alarm, ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP, batt_check_alarm_handler); htc_batt_timer.batt_wq = create_singlethread_workqueue("batt_timer"); #ifdef CONFIG_DUTY_CYCLE_LIMIT INIT_DELAYED_WORK(&limit_chg_timer_work, limit_chg_timer_worker); #endif rc = misc_register(&htc_batt_device_node); if (rc) { BATT_ERR("Unable to register misc device %d", MISC_DYNAMIC_MINOR); goto fail; } htc_batt_kset = kset_create_and_add("event_to_daemon", NULL, kobject_get(&htc_batt_device_node.this_device->kobj)); if (!htc_batt_kset) { rc = -ENOMEM; goto fail; } htc_batt_info.batt_timer_kobj.kset = htc_batt_kset; rc = kobject_init_and_add(&htc_batt_info.batt_timer_kobj, &htc_batt_ktype, NULL, "htc_batt_timer"); if (rc) { BATT_ERR("init kobject htc_batt_timer failed."); kobject_put(&htc_batt_info.batt_timer_kobj); goto fail; } htc_batt_info.batt_cable_kobj.kset = htc_batt_kset; rc = kobject_init_and_add(&htc_batt_info.batt_cable_kobj, &htc_batt_ktype, NULL, "htc_cable_detect"); if (rc) { BATT_ERR("init kobject htc_cable_timer failed."); kobject_put(&htc_batt_info.batt_timer_kobj); goto fail; } if (htc_batt_info.icharger && htc_batt_info.icharger->charger_change_notifier_register) htc_batt_info.icharger->charger_change_notifier_register( &cable_status_notifier); if (htc_batt_info.icharger && (htc_batt_info.icharger->sw_safetytimer) && !(get_kernel_flag() & KERNEL_FLAG_KEEP_CHARG_ON) && !(get_kernel_flag() & KERNEL_FLAG_PA_RECHARG_TEST)) { need_sw_stimer = 1; chg_dis_active_mask |= HTC_BATT_CHG_DIS_BIT_TMR; chg_dis_control_mask |= HTC_BATT_CHG_DIS_BIT_TMR; } if((get_kernel_flag() & KERNEL_FLAG_KEEP_CHARG_ON) || (get_kernel_flag() & KERNEL_FLAG_PA_RECHARG_TEST)) { chg_limit_active_mask = 0; } #ifdef CONFIG_DUTY_CYCLE_LIMIT chg_limit_timer_sub_mask &= chg_limit_active_mask; #endif #if defined(CONFIG_FB) htc_batt_info.batt_fb_wq = create_singlethread_workqueue("HTC_BATTERY_FB"); if (!htc_batt_info.batt_fb_wq) { BATT_ERR("allocate batt_fb_wq failed\n"); rc = -ENOMEM; goto fail; } INIT_DELAYED_WORK(&htc_batt_info.work_fb, htc_battery_fb_register); queue_delayed_work(htc_batt_info.batt_fb_wq, &htc_batt_info.work_fb, msecs_to_jiffies(30000)); #elif defined(CONFIG_HAS_EARLYSUSPEND) early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN - 1; early_suspend.suspend = htc_battery_early_suspend; early_suspend.resume = htc_battery_late_resume; register_early_suspend(&early_suspend); #endif htc_batt_timer.time_out = BATT_TIMER_UPDATE_TIME; batt_set_check_timer(htc_batt_timer.time_out); BATT_LOG("htc_battery_probe(): finish"); fail: kfree(htc_battery_core_ptr); return rc; } static struct platform_driver htc_battery_driver = { .probe = htc_battery_probe, .driver = { .name = "htc_battery", .owner = THIS_MODULE, .pm = &htc_battery_8960_pm_ops, }, }; static int __init htc_battery_init(void) { htc_battery_initial = 0; htc_ext_5v_output_old = 0; htc_ext_5v_output_now = 0; htc_full_level_flag = 0; htc_batt_info.force_shutdown_batt_vol = 0; spin_lock_init(&htc_batt_info.batt_lock); wake_lock_init(&htc_batt_info.vbus_wake_lock, WAKE_LOCK_SUSPEND, "vbus_present"); wake_lock_init(&htc_batt_timer.battery_lock, WAKE_LOCK_SUSPEND, "htc_battery_8960"); wake_lock_init(&htc_batt_timer.unknown_usb_detect_lock, WAKE_LOCK_SUSPEND, "unknown_usb_detect"); wake_lock_init(&voltage_alarm_wake_lock, WAKE_LOCK_SUSPEND, "htc_voltage_alarm"); wake_lock_init(&batt_shutdown_wake_lock, WAKE_LOCK_SUSPEND, "batt_shutdown"); mutex_init(&htc_batt_info.info_lock); mutex_init(&htc_batt_timer.schedule_lock); mutex_init(&cable_notifier_lock); mutex_init(&chg_limit_lock); mutex_init(&context_event_handler_lock); #ifdef CONFIG_HTC_BATT_ALARM mutex_init(&batt_set_alarm_lock); #endif platform_driver_register(&htc_battery_driver); htc_batt_info.rep.batt_vol = 3700; htc_batt_info.rep.batt_id = 1; htc_batt_info.rep.batt_temp = 250; htc_batt_info.rep.level = 33; htc_batt_info.rep.level_raw = 33; htc_batt_info.rep.full_bat = 1579999; htc_batt_info.rep.full_level = 100; htc_batt_info.rep.batt_state = 0; htc_batt_info.rep.cable_ready = 0; htc_batt_info.rep.temp_fault = -1; htc_batt_info.rep.overload = 0; htc_batt_timer.total_time_ms = 0; htc_batt_timer.batt_system_jiffies = jiffies; htc_batt_timer.batt_alarm_status = 0; htc_batt_timer.alarm_timer_flag = 0; #ifdef CONFIG_HTC_BATT_ALARM battery_vol_alarm_mode = BATT_ALARM_NORMAL_MODE; screen_state = 1; alarm_data.lower_threshold = 2800; alarm_data.upper_threshold = 4400; #endif return 0; } module_init(htc_battery_init); MODULE_DESCRIPTION("HTC Battery Driver"); MODULE_LICENSE("GPL");
gpl-2.0
ysleu/RTL8685
uClinux-dist/user/squid/src/HttpMsg.c
2
3859
/* * $Id: HttpMsg.c,v 1.1.1.1 2003/08/18 05:40:23 kaohj Exp $ * * DEBUG: section 74 HTTP Message * AUTHOR: Alex Rousskov * * SQUID Internet Object Cache http://squid.nlanr.net/Squid/ * ---------------------------------------------------------- * * Squid is the result of efforts by numerous individuals from the * Internet community. Development is led by Duane Wessels of the * National Laboratory for Applied Network Research and funded by the * National Science Foundation. Squid is Copyrighted (C) 1998 by * the Regents of the University of California. Please see the * COPYRIGHT file for full details. Squid incorporates software * developed and/or copyrighted by other sources. Please see the * CREDITS file for full details. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA. * */ #include "squid.h" /* find end of headers */ int httpMsgIsolateHeaders(const char **parse_start, const char **blk_start, const char **blk_end) { /* * parse_start points to the first line of HTTP message *headers*, * not including the request or status lines */ size_t l = strlen(*parse_start); size_t end = headersEnd(*parse_start, l); int nnl; if (end) { *blk_start = *parse_start; *blk_end = *parse_start + end - 1; /* * leave blk_end pointing to the first character after the * first newline which terminates the headers */ assert(**blk_end == '\n'); while (*(*blk_end - 1) == '\r') (*blk_end)--; assert(*(*blk_end - 1) == '\n'); *parse_start += end; return 1; } /* * If we didn't find the end of headers, and parse_start does * NOT point to a CR or NL character, then return failure */ if (**parse_start != '\r' && **parse_start != '\n') return 0; /* failure */ /* * If we didn't find the end of headers, and parse_start does point * to an empty line, then we have empty headers. Skip all CR and * NL characters up to the first NL. Leave parse_start pointing at * the first character after the first NL. */ *blk_start = *parse_start; *blk_end = *blk_start; for (nnl = 0; nnl == 0; (*parse_start)++) { if (**parse_start == '\r') (void) 0; else if (**parse_start == '\n') nnl++; else break; } return 1; } /* returns true if connection should be "persistent" * after processing this message */ int httpMsgIsPersistent(float http_ver, const HttpHeader * hdr) { if (http_ver >= 1.1) { /* * for modern versions of HTTP: persistent unless there is * a "Connection: close" header. */ return !httpHeaderHasConnDir(hdr, "close"); } else { /* * Persistent connections in Netscape 3.x are allegedly broken, * return false if it is a browser connection. If there is a * VIA header, then we assume this is NOT a browser connection. */ const char *agent = httpHeaderGetStr(hdr, HDR_USER_AGENT); if (agent && !httpHeaderHas(hdr, HDR_VIA)) { if (!strncasecmp(agent, "Mozilla/3.", 10)) return 0; if (!strncasecmp(agent, "Netscape/3.", 11)) return 0; } /* for old versions of HTTP: persistent if has "keep-alive" */ return httpHeaderHasConnDir(hdr, "keep-alive"); } }
gpl-2.0
jmztaylor/android_kernel_amazon_ariel
drivers/gpu/mt8135/rgx_1.3_2876724/services/server/common/devicemem_heapcfg.c
2
4699
/*************************************************************************/ /*! @File devicemem_heapcfg.c @Title Temporary Device Memory 2 stuff @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved @Description Device memory management @License Dual MIT/GPLv2 The contents of this file are subject to the MIT license as set out below. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. Alternatively, the contents of this file may be used under the terms of the GNU General Public License Version 2 ("GPL") in which case the provisions of GPL are applicable instead of those above. If you wish to allow use of your version of this file only under the terms of GPL, and not to allow others to use your version of this file under the terms of the MIT license, indicate your decision by deleting the provisions above and replace them with the notice and other provisions required by GPL as set out in the file called "GPL-COPYING" included in this distribution. If you do not delete the provisions above, a recipient may use your version of this file under the terms of either the MIT license or GPL. This License is also included in this distribution in the file called "MIT-COPYING". EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ /***************************************************************************/ /* our exported API */ #include "devicemem_heapcfg.h" #include "device.h" #include "img_types.h" #include "pvr_debug.h" #include "pvrsrv_error.h" #include "osfunc.h" PVRSRV_ERROR HeapCfgHeapConfigCount( const PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT32 *puiNumHeapConfigsOut ) { *puiNumHeapConfigsOut = psDeviceNode->sDevMemoryInfo.uiNumHeapConfigs; return PVRSRV_OK; } PVRSRV_ERROR HeapCfgHeapCount( const PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT32 uiHeapConfigIndex, IMG_UINT32 *puiNumHeapsOut ) { if (uiHeapConfigIndex >= psDeviceNode->sDevMemoryInfo.uiNumHeapConfigs) { return PVRSRV_ERROR_DEVICEMEM_INVALID_HEAP_CONFIG_INDEX; } *puiNumHeapsOut = psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeapConfigArray[uiHeapConfigIndex].uiNumHeaps; return PVRSRV_OK; } PVRSRV_ERROR HeapCfgHeapConfigName( const PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT32 uiHeapConfigIndex, IMG_UINT32 uiHeapConfigNameBufSz, IMG_CHAR *pszHeapConfigNameOut ) { if (uiHeapConfigIndex >= psDeviceNode->sDevMemoryInfo.uiNumHeapConfigs) { return PVRSRV_ERROR_DEVICEMEM_INVALID_HEAP_CONFIG_INDEX; } OSSNPrintf(pszHeapConfigNameOut, uiHeapConfigNameBufSz, "%s", psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeapConfigArray[uiHeapConfigIndex].pszName); return PVRSRV_OK; } PVRSRV_ERROR HeapCfgHeapDetails( const PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT32 uiHeapConfigIndex, IMG_UINT32 uiHeapIndex, IMG_UINT32 uiHeapNameBufSz, IMG_CHAR *pszHeapNameOut, IMG_DEV_VIRTADDR *psDevVAddrBaseOut, IMG_DEVMEM_SIZE_T *puiHeapLengthOut, IMG_UINT32 *puiLog2DataPageSizeOut ) { DEVMEM_HEAP_BLUEPRINT *psHeapBlueprint; if (uiHeapConfigIndex >= psDeviceNode->sDevMemoryInfo.uiNumHeapConfigs) { return PVRSRV_ERROR_DEVICEMEM_INVALID_HEAP_CONFIG_INDEX; } if (uiHeapIndex >= psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeapConfigArray[uiHeapConfigIndex].uiNumHeaps) { return PVRSRV_ERROR_DEVICEMEM_INVALID_HEAP_INDEX; } psHeapBlueprint = &psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeapConfigArray[uiHeapConfigIndex].psHeapBlueprintArray[uiHeapIndex]; OSSNPrintf(pszHeapNameOut, uiHeapNameBufSz, "%s", psHeapBlueprint->pszName); *psDevVAddrBaseOut = psHeapBlueprint->sHeapBaseAddr; *puiHeapLengthOut = psHeapBlueprint->uiHeapLength; *puiLog2DataPageSizeOut = psHeapBlueprint->uiLog2DataPageSize; return PVRSRV_OK; }
gpl-2.0
Ljinod/SIPD
src/client/client.c
2
1104
/** * @file client.c * @brief The client daemon. * @author Loudet Julien <loudet.julien@gmail.com> * @author Maire Stéphane <stephane.maire@telecom-paristech.org> * @author Sabbagh Cyril <cyril.sabbagh@telecom-paristech.org> * @version 1.1 * @date 2015-09 * * @details (last edited by Loudet Julien - 2015/09) * The client daemon can be called through the command line through * "sipdcd" - SIPD Client Daemon - and accepts the following * arguments: * TODO create the sipdcd * TODO parse the arguments...! */ #include "../core/network/connection.h" #include "../core/configuration/configuration.h" #include "../core/beans/my_info.h" #include "../core/api/api.h" MyInfo_t *my_info; int main(int argc, char **argv) { /* First we determine who we are! */ my_info = ask_user_configuration_file(); /* Now we try to send a file to the tcell */ // store_file(my_info, "/home/julien/dev/OpenSSL/music.flac"); //list_files(my_info); read_file(my_info, "julien-music.flac-201592722561"); return 0; }
gpl-2.0
montonero/OpenJK
codeJK2/icarus/Sequencer.cpp
2
49995
/* This file is part of Jedi Knight 2. Jedi Knight 2 is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 2 of the License, or (at your option) any later version. Jedi Knight 2 is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with Jedi Knight 2. If not, see <http://www.gnu.org/licenses/>. */ // Copyright 2001-2013 Raven Software // Script Command Sequencer // // -- jweier // this include must remain at the top of every Icarus CPP file #include "icarus.h" #include "g_headers.h" #include "g_shared.h" #include "assert.h" // Sequencer CSequencer::CSequencer( void ) { m_numCommands = 0; m_curStream = NULL; m_curSequence = NULL; m_elseValid = 0; m_elseOwner = NULL; m_curGroup = NULL; } CSequencer::~CSequencer( void ) { Free(); //Safe even if already freed } /* ======================== Create Static creation function ======================== */ CSequencer *CSequencer::Create ( void ) { CSequencer *sequencer = new CSequencer; return sequencer; } /* ======================== Init Initializes the sequencer ======================== */ int CSequencer::Init( int ownerID, interface_export_t *ie, CTaskManager *taskManager, ICARUS_Instance *iICARUS ) { m_ownerID = ownerID; m_owner = iICARUS; m_taskManager = taskManager; m_ie = ie; return SEQ_OK; } /* ======================== Free Releases all resources and re-inits the sequencer ======================== */ int CSequencer::Free( void ) { sequence_l::iterator sli; //Flush the sequences for ( sli = m_sequences.begin(); sli != m_sequences.end(); sli++ ) { m_owner->DeleteSequence( (*sli) ); } m_sequences.clear(); m_sequenceMap.clear(); m_taskSequences.clear(); //Clean up any other info m_numCommands = 0; m_curSequence = NULL; bstream_t *streamToDel; while(!m_streamsCreated.empty()) { streamToDel = m_streamsCreated.back(); DeleteStream(streamToDel); } return SEQ_OK; } /* ------------------------- Flush ------------------------- */ int CSequencer::Flush( CSequence *owner ) { if ( owner == NULL ) return SEQ_FAILED; Recall(); sequence_l::iterator sli; //Flush the sequences for ( sli = m_sequences.begin(); sli != m_sequences.end(); ) { if ( ( (*sli) == owner ) || ( owner->HasChild( (*sli) ) ) || ( (*sli)->HasFlag( SQ_PENDING ) ) || ( (*sli)->HasFlag( SQ_TASK ) ) ) { sli++; continue; } //Remove it from the map m_sequenceMap.erase( (*sli)->GetID() ); //Delete it, and remove all references RemoveSequence( (*sli) ); m_owner->DeleteSequence( (*sli) ); //Delete from the sequence list and move on sli = m_sequences.erase( sli ); } //Make sure this owner knows it's now the root sequence owner->SetParent( NULL ); owner->SetReturn( NULL ); return SEQ_OK; } /* ======================== AddStream Creates a stream for parsing ======================== */ bstream_t *CSequencer::AddStream( void ) { bstream_t *stream; stream = new bstream_t; //deleted in Route() stream->stream = new CBlockStream; //deleted in Route() stream->last = m_curStream; m_streamsCreated.push_back(stream); return stream; } /* ======================== DeleteStream Deletes parsing stream ======================== */ void CSequencer::DeleteStream( bstream_t *bstream ) { vector<bstream_t*>::iterator finder = find(m_streamsCreated.begin(), m_streamsCreated.end(), bstream); if(finder != m_streamsCreated.end()) { m_streamsCreated.erase(finder); } bstream->stream->Free(); delete bstream->stream; delete bstream; bstream = NULL; } /* ------------------------- AddTaskSequence ------------------------- */ void CSequencer::AddTaskSequence( CSequence *sequence, CTaskGroup *group ) { m_taskSequences[ group ] = sequence; } /* ------------------------- GetTaskSequence ------------------------- */ CSequence *CSequencer::GetTaskSequence( CTaskGroup *group ) { taskSequence_m::iterator tsi; tsi = m_taskSequences.find( group ); if ( tsi == m_taskSequences.end() ) return NULL; return (*tsi).second; } /* ======================== AddSequence Creates and adds a sequence to the sequencer ======================== */ CSequence *CSequencer::AddSequence( void ) { CSequence *sequence = m_owner->GetSequence(); assert( sequence ); if ( sequence == NULL ) return NULL; //The rest is handled internally to the class m_sequenceMap[ sequence->GetID() ] = sequence; //Add it to the list m_sequences.insert( m_sequences.end(), sequence ); //FIXME: Temp fix sequence->SetFlag( SQ_PENDING ); return sequence; } CSequence *CSequencer::AddSequence( CSequence *parent, CSequence *returnSeq, int flags ) { CSequence *sequence = m_owner->GetSequence(); assert( sequence ); if ( sequence == NULL ) return NULL; //The rest is handled internally to the class m_sequenceMap[ sequence->GetID() ] = sequence; //Add it to the list m_sequences.insert( m_sequences.end(), sequence ); sequence->SetFlags( flags ); sequence->SetParent( parent ); sequence->SetReturn( returnSeq ); return sequence; } /* ======================== GetSequence Retrieves a sequence by its ID ======================== */ CSequence *CSequencer::GetSequence( int id ) { sequenceID_m::iterator mi; mi = m_sequenceMap.find( id ); if ( mi == m_sequenceMap.end() ) return NULL; return (*mi).second; } /* ------------------------- Interrupt ------------------------- */ void CSequencer::Interrupt( void ) { CBlock *command = m_taskManager->GetCurrentTask(); if ( command == NULL ) return; //Save it PushCommand( command, PUSH_BACK ); } /* ======================== Run Runs a script ======================== */ int CSequencer::Run( char *buffer, long size ) { bstream_t *blockStream; Recall(); //Create a new stream blockStream = AddStream(); //Open the stream as an IBI stream if (!blockStream->stream->Open( buffer, size )) { m_ie->I_DPrintf( WL_ERROR, "invalid stream" ); return SEQ_FAILED; } CSequence *sequence = AddSequence( NULL, m_curSequence, SQ_COMMON ); // Interpret the command blocks and route them properly if ( S_FAILED( Route( sequence, blockStream )) ) { //Error code is set inside of Route() return SEQ_FAILED; } return SEQ_OK; } /* ======================== ParseRun Parses a user triggered run command ======================== */ int CSequencer::ParseRun( CBlock *block ) { CSequence *new_sequence; bstream_t *new_stream; char *buffer; char newname[ MAX_STRING_SIZE ]; int buffer_size; //Get the name and format it StripExtension( (char*) block->GetMemberData( 0 ), (char *) newname ); //Get the file from the game engine buffer_size = m_ie->I_LoadFile( newname, (void **) &buffer ); if ( buffer_size <= 0 ) { m_ie->I_DPrintf( WL_ERROR, "'%s' : could not open file\n", (char*) block->GetMemberData( 0 )); delete block; block = NULL; return SEQ_FAILED; } //Create a new stream for this file new_stream = AddStream(); //Begin streaming the file if (!new_stream->stream->Open( buffer, buffer_size )) { m_ie->I_DPrintf( WL_ERROR, "invalid stream" ); delete block; block = NULL; return SEQ_FAILED; } //Create a new sequence new_sequence = AddSequence( m_curSequence, m_curSequence, ( SQ_RUN | SQ_PENDING ) ); m_curSequence->AddChild( new_sequence ); // Interpret the command blocks and route them properly if ( S_FAILED( Route( new_sequence, new_stream )) ) { //Error code is set inside of Route() delete block; block = NULL; return SEQ_FAILED; } m_curSequence = m_curSequence->GetReturn(); assert( m_curSequence ); block->Write( TK_FLOAT, (float) new_sequence->GetID() ); PushCommand( block, PUSH_FRONT ); return SEQ_OK; } /* ======================== ParseIf Parses an if statement ======================== */ int CSequencer::ParseIf( CBlock *block, bstream_t *bstream ) { CSequence *sequence; //Create the container sequence sequence = AddSequence( m_curSequence, m_curSequence, SQ_CONDITIONAL ); assert( sequence ); if ( sequence == NULL ) { m_ie->I_DPrintf( WL_ERROR, "ParseIf: failed to allocate container sequence" ); delete block; block = NULL; return SEQ_FAILED; } m_curSequence->AddChild( sequence ); //Add a unique conditional identifier to the block for reference later block->Write( TK_FLOAT, (float) sequence->GetID() ); //Push this onto the stack to mark the conditional entrance PushCommand( block, PUSH_FRONT ); //Recursively obtain the conditional body Route( sequence, bstream ); m_elseValid = 2; m_elseOwner = block; return SEQ_OK; } /* ======================== ParseElse Parses an else statement ======================== */ int CSequencer::ParseElse( CBlock *block, bstream_t *bstream ) { //The else is not retained delete block; block = NULL; CSequence *sequence; //Create the container sequence sequence = AddSequence( m_curSequence, m_curSequence, SQ_CONDITIONAL ); assert( sequence ); if ( sequence == NULL ) { m_ie->I_DPrintf( WL_ERROR, "ParseIf: failed to allocate container sequence" ); return SEQ_FAILED; } m_curSequence->AddChild( sequence ); //Add a unique conditional identifier to the block for reference later //TODO: Emit warning if ( m_elseOwner == NULL ) { m_ie->I_DPrintf( WL_ERROR, "Invalid 'else' found!\n" ); return SEQ_FAILED; } m_elseOwner->Write( TK_FLOAT, (float) sequence->GetID() ); m_elseOwner->SetFlag( BF_ELSE ); //Recursively obtain the conditional body Route( sequence, bstream ); m_elseValid = 0; m_elseOwner = NULL; return SEQ_OK; } /* ======================== ParseLoop Parses a loop command ======================== */ int CSequencer::ParseLoop( CBlock *block, bstream_t *bstream ) { CSequence *sequence; CBlockMember *bm; float min, max; int rIter; int memberNum = 0; //Set the parent sequence = AddSequence( m_curSequence, m_curSequence, ( SQ_LOOP | SQ_RETAIN ) ); assert( sequence ); if ( sequence == NULL ) { m_ie->I_DPrintf( WL_ERROR, "ParseLoop : failed to allocate container sequence" ); delete block; block = NULL; return SEQ_FAILED; } m_curSequence->AddChild( sequence ); //Set the number of iterations of this sequence bm = block->GetMember( memberNum++ ); if ( bm->GetID() == ID_RANDOM ) { //Parse out the random number min = *(float *) block->GetMemberData( memberNum++ ); max = *(float *) block->GetMemberData( memberNum++ ); rIter = (int) m_ie->I_Random( min, max ); sequence->SetIterations( rIter ); } else { sequence->SetIterations ( (int) (*(float *) bm->GetData()) ); } //Add a unique loop identifier to the block for reference later block->Write( TK_FLOAT, (float) sequence->GetID() ); //Push this onto the stack to mark the loop entrance PushCommand( block, PUSH_FRONT ); //Recursively obtain the loop Route( sequence, bstream ); return SEQ_OK; } /* ======================== AddAffect Adds a sequence that is saved until the affect is called by the parent ======================== */ int CSequencer::AddAffect( bstream_t *bstream, int retain, int *id ) { CSequence *sequence = AddSequence(); bstream_t new_stream; sequence->SetFlag( SQ_AFFECT | SQ_PENDING ); if ( retain ) sequence->SetFlag( SQ_RETAIN ); //This will be replaced once it's actually used, but this will restore the route state properly sequence->SetReturn( m_curSequence ); //We need this as a temp holder new_stream.last = m_curStream; new_stream.stream = bstream->stream; if S_FAILED( Route( sequence, &new_stream ) ) { return SEQ_FAILED; } *id = sequence->GetID(); sequence->SetReturn( NULL ); return SEQ_OK; } /* ======================== ParseAffect Parses an affect command ======================== */ int CSequencer::ParseAffect( CBlock *block, bstream_t *bstream ) { CSequencer *stream_sequencer = NULL; char *entname = NULL; int ret; gentity_t *ent = 0; entname = (char*) block->GetMemberData( 0 ); ent = m_ie->I_GetEntityByName( entname ); if( !ent ) // if there wasn't a valid entname in the affect, we need to check if it's a get command { //try to parse a 'get' command that is embeded in this 'affect' int id; char *p1 = NULL; char *name = 0; CBlockMember *bm = NULL; // // Get the first parameter (this should be the get) // bm = block->GetMember( 0 ); id = bm->GetID(); switch ( id ) { // these 3 cases probably aren't necessary case TK_STRING: case TK_IDENTIFIER: case TK_CHAR: p1 = (char *) bm->GetData(); break; case ID_GET: { int type; //get( TYPE, NAME ) type = (int) (*(float *) block->GetMemberData( 1 )); name = (char *) block->GetMemberData( 2 ); switch ( type ) // what type are they attempting to get { case TK_STRING: //only string is acceptable for affect, store result in p1 if ( m_ie->I_GetString( m_ownerID, type, name, &p1 ) == false) { delete block; block = NULL; return false; } break; default: //FIXME: Make an enum id for the error... m_ie->I_DPrintf( WL_ERROR, "Invalid parameter type on affect _1" ); delete block; block = NULL; return false; break; } break; } default: //FIXME: Make an enum id for the error... m_ie->I_DPrintf( WL_ERROR, "Invalid parameter type on affect _2" ); delete block; block = NULL; return false; break; }//end id switch if(p1) { ent = m_ie->I_GetEntityByName( p1 ); } if(!ent) { // a valid entity name was not returned from the get command m_ie->I_DPrintf( WL_WARNING, "'%s' : invalid affect() target\n"); } } // end if(!ent) if( ent ) { stream_sequencer = ent->sequencer; } if (stream_sequencer == NULL) { m_ie->I_DPrintf( WL_WARNING, "'%s' : invalid affect() target\n", entname ); //Fast-forward out of this affect block onto the next valid code CSequence *backSeq = m_curSequence; CSequence *trashSeq = m_owner->GetSequence(); Route( trashSeq, bstream ); Recall(); DestroySequence( trashSeq ); m_curSequence = backSeq; delete block; block = NULL; return SEQ_OK; } if S_FAILED ( stream_sequencer->AddAffect( bstream, (int) m_curSequence->HasFlag( SQ_RETAIN ), &ret ) ) { delete block; block = NULL; return SEQ_FAILED; } //Hold onto the id for later use //FIXME: If the target sequence is freed, what then? (!suspect!) block->Write( TK_FLOAT, (float) ret ); PushCommand( block, PUSH_FRONT ); /* //Don't actually do these right now, we're just pre-processing (parsing) the affect if( ent ) { // ents need to update upon being affected ent->taskManager->Update(); } */ return SEQ_OK; } /* ------------------------- ParseTask ------------------------- */ int CSequencer::ParseTask( CBlock *block, bstream_t *bstream ) { CSequence *sequence; CTaskGroup *group; const char *taskName; //Setup the container sequence sequence = AddSequence( m_curSequence, m_curSequence, SQ_TASK | SQ_RETAIN ); m_curSequence->AddChild( sequence ); //Get the name of this task for reference later taskName = (const char *) block->GetMemberData( 0 ); //Get a new task group from the task manager group = m_taskManager->AddTaskGroup( taskName ); if ( group == NULL ) { m_ie->I_DPrintf( WL_ERROR, "error : unable to allocate a new task group" ); delete block; block = NULL; return SEQ_FAILED; } //The current group is set to this group, all subsequent commands (until a block end) will fall into this task group group->SetParent( m_curGroup ); m_curGroup = group; //Keep an association between this task and the container sequence AddTaskSequence( sequence, group ); //PushCommand( block, PUSH_FRONT ); delete block; block = NULL; //Recursively obtain the loop Route( sequence, bstream ); return SEQ_OK; } /* ======================== Route Properly handles and routes commands to the sequencer ======================== */ //FIXME: Re-entering this code will produce unpredictable results if a script has already been routed and is running currently //FIXME: A sequencer cannot properly affect itself int CSequencer::Route( CSequence *sequence, bstream_t *bstream ) { CBlockStream *stream; CBlock *block; //Take the stream as the current stream m_curStream = bstream; stream = bstream->stream; m_curSequence = sequence; //Obtain all blocks while ( stream->BlockAvailable() ) { block = new CBlock; //deleted in Free() stream->ReadBlock( block ); //TEMP: HACK! if ( m_elseValid ) m_elseValid--; switch( block->GetBlockID() ) { //Marks the end of a blocked section case ID_BLOCK_END: //Save this as a pre-process marker PushCommand( block, PUSH_FRONT ); if ( m_curSequence->HasFlag( SQ_RUN ) || m_curSequence->HasFlag( SQ_AFFECT ) ) { //Go back to the last stream m_curStream = bstream->last; } if ( m_curSequence->HasFlag( SQ_TASK ) ) { //Go back to the last stream m_curStream = bstream->last; m_curGroup = m_curGroup->GetParent(); } m_curSequence = m_curSequence->GetReturn(); return SEQ_OK; break; //Affect pre-processor case ID_AFFECT: if S_FAILED( ParseAffect( block, bstream ) ) return SEQ_FAILED; break; //Run pre-processor case ID_RUN: if S_FAILED( ParseRun( block ) ) return SEQ_FAILED; break; //Loop pre-processor case ID_LOOP: if S_FAILED( ParseLoop( block, bstream ) ) return SEQ_FAILED; break; //Conditional pre-processor case ID_IF: if S_FAILED( ParseIf( block, bstream ) ) return SEQ_FAILED; break; case ID_ELSE: //TODO: Emit warning if ( m_elseValid == 0 ) { m_ie->I_DPrintf( WL_ERROR, "Invalid 'else' found!\n" ); return SEQ_FAILED; } if S_FAILED( ParseElse( block, bstream ) ) return SEQ_FAILED; break; case ID_TASK: if S_FAILED( ParseTask( block, bstream ) ) return SEQ_FAILED; break; //FIXME: For now this is to catch problems, but can ultimately be removed case ID_WAIT: case ID_PRINT: case ID_SOUND: case ID_MOVE: case ID_ROTATE: case ID_SET: case ID_USE: case ID_REMOVE: case ID_KILL: case ID_FLUSH: case ID_CAMERA: case ID_DO: case ID_DECLARE: case ID_FREE: case ID_SIGNAL: case ID_WAITSIGNAL: case ID_PLAY: //Commands go directly into the sequence without pre-process PushCommand( block, PUSH_FRONT ); break; //Error default: m_ie->I_DPrintf( WL_ERROR, "'%d' : invalid block ID", block->GetBlockID() ); return SEQ_FAILED; break; } } //Check for a run sequence, it must be marked if ( m_curSequence->HasFlag( SQ_RUN ) ) { block = new CBlock; block->Create( ID_BLOCK_END ); PushCommand( block, PUSH_FRONT ); //mark the end of the run /* //Free the stream m_curStream = bstream->last; DeleteStream( bstream ); */ return SEQ_OK; } //Check to start the communication if ( ( bstream->last == NULL ) && ( m_numCommands > 0 ) ) { //Everything is routed, so get it all rolling Prime( m_taskManager, PopCommand( POP_BACK ) ); } m_curStream = bstream->last; //Free the stream DeleteStream( bstream ); return SEQ_OK; } /* ======================== CheckRun Checks for run command pre-processing ======================== */ //Directly changes the parameter to avoid excess push/pop void CSequencer::CheckRun( CBlock **command ) { CBlock *block = *command; if ( block == NULL ) return; //Check for a run command if ( block->GetBlockID() == ID_RUN ) { int id = (int) (*(float *) block->GetMemberData( 1 )); m_ie->I_DPrintf( WL_DEBUG, "%4d run( \"%s\" ); [%d]", m_ownerID, (char *) block->GetMemberData(0), m_ie->I_GetTime() ); if ( m_curSequence->HasFlag( SQ_RETAIN ) ) { PushCommand( block, PUSH_FRONT ); } else { delete block; block = NULL; *command = NULL; } m_curSequence = GetSequence( id ); //TODO: Emit warning assert( m_curSequence ); if ( m_curSequence == NULL ) { m_ie->I_DPrintf( WL_ERROR, "Unable to find 'run' sequence!\n" ); *command = NULL; return; } if ( m_curSequence->GetNumCommands() > 0 ) { *command = PopCommand( POP_BACK ); Prep( command ); //Account for any other pre-processes return; } return; } //Check for the end of a run if ( ( block->GetBlockID() == ID_BLOCK_END ) && ( m_curSequence->HasFlag( SQ_RUN ) ) ) { if ( m_curSequence->HasFlag( SQ_RETAIN ) ) { PushCommand( block, PUSH_FRONT ); } else { delete block; block = NULL; *command = NULL; } m_curSequence = ReturnSequence( m_curSequence ); if ( m_curSequence && m_curSequence->GetNumCommands() > 0 ) { *command = PopCommand( POP_BACK ); Prep( command ); //Account for any other pre-processes return; } //FIXME: Check this... } } /* ------------------------- EvaluateConditional ------------------------- */ //FIXME: This function will be written better later once the functionality of the ideas here are tested int CSequencer::EvaluateConditional( CBlock *block ) { CBlockMember *bm; char tempString1[128], tempString2[128]; vector_t vec; int id, i, oper, memberNum = 0; char *p1 = NULL, *p2 = NULL; int t1, t2; // // Get the first parameter // bm = block->GetMember( memberNum++ ); id = bm->GetID(); t1 = id; switch ( id ) { case TK_FLOAT: Com_sprintf( tempString1, sizeof( tempString1 ), "%.3f", *(float *) bm->GetData() ); p1 = (char *) tempString1; break; case TK_VECTOR: tempString1[0] = '\0'; for ( i = 0; i < 3; i++ ) { bm = block->GetMember( memberNum++ ); vec[i] = *(float *) bm->GetData(); } Com_sprintf( tempString1, sizeof( tempString1 ), "%.3f %.3f %.3f", vec[0], vec[1], vec[2] ); p1 = (char *) tempString1; break; case TK_STRING: case TK_IDENTIFIER: case TK_CHAR: p1 = (char *) bm->GetData(); break; case ID_GET: { int type; char *name; //get( TYPE, NAME ) type = (int) (*(float *) block->GetMemberData( memberNum++ )); name = (char *) block->GetMemberData( memberNum++ ); //Get the type returned and hold onto it t1 = type; switch ( type ) { case TK_FLOAT: { float fVal; if ( m_ie->I_GetFloat( m_ownerID, type, name, &fVal ) == false) return false; Com_sprintf( tempString1, sizeof( tempString1 ), "%.3f", fVal ); p1 = (char *) tempString1; } break; case TK_INT: { float fVal; if ( m_ie->I_GetFloat( m_ownerID, type, name, &fVal ) == false) return false; Com_sprintf( tempString1, sizeof( tempString1 ), "%d", (int) fVal ); p1 = (char *) tempString1; } break; case TK_STRING: if ( m_ie->I_GetString( m_ownerID, type, name, &p1 ) == false) return false; break; case TK_VECTOR: { vector_t vVal; if ( m_ie->I_GetVector( m_ownerID, type, name, vVal ) == false) return false; Com_sprintf( tempString1, sizeof( tempString1 ), "%.3f %.3f %.3f", vVal[0], vVal[1], vVal[2] ); p1 = (char *) tempString1; } break; } break; } case ID_RANDOM: { float min, max; //FIXME: This will not account for nested random() statements min = *(float *) block->GetMemberData( memberNum++ ); max = *(float *) block->GetMemberData( memberNum++ ); //A float value is returned from the function t1 = TK_FLOAT; Com_sprintf( tempString1, sizeof( tempString1 ), "%.3f", m_ie->I_Random( min, max ) ); p1 = (char *) tempString1; } break; case ID_TAG: { char *name; float type; name = (char *) block->GetMemberData( memberNum++ ); type = *(float *) block->GetMemberData( memberNum++ ); t1 = TK_VECTOR; //TODO: Emit warning if ( m_ie->I_GetTag( m_ownerID, name, (int) type, vec ) == false) { m_ie->I_DPrintf( WL_ERROR, "Unable to find tag \"%s\"!\n", name ); return false; } Com_sprintf( tempString1, sizeof( tempString1 ), "%.3f %.3f %.3f", vec[0], vec[1], vec[2] ); p1 = (char *) tempString1; break; } default: //FIXME: Make an enum id for the error... m_ie->I_DPrintf( WL_ERROR, "Invalid parameter type on conditional" ); return false; break; } // // Get the comparison operator // bm = block->GetMember( memberNum++ ); id = bm->GetID(); switch ( id ) { case TK_EQUALS: case TK_GREATER_THAN: case TK_LESS_THAN: case TK_NOT: oper = id; break; default: m_ie->I_DPrintf( WL_ERROR, "Invalid operator type found on conditional!\n" ); return false; //FIXME: Emit warning break; } // // Get the second parameter // bm = block->GetMember( memberNum++ ); id = bm->GetID(); t2 = id; switch ( id ) { case TK_FLOAT: Com_sprintf( tempString2, sizeof( tempString2 ), "%.3f", *(float *) bm->GetData() ); p2 = (char *) tempString2; break; case TK_VECTOR: tempString2[0] = '\0'; for ( i = 0; i < 3; i++ ) { bm = block->GetMember( memberNum++ ); vec[i] = *(float *) bm->GetData(); } Com_sprintf( tempString2, sizeof( tempString2 ), "%.3f %.3f %.3f", vec[0], vec[1], vec[2] ); p2 = (char *) tempString2; break; case TK_STRING: case TK_IDENTIFIER: case TK_CHAR: p2 = (char *) bm->GetData(); break; case ID_GET: { int type; char *name; //get( TYPE, NAME ) type = (int) (*(float *) block->GetMemberData( memberNum++ )); name = (char *) block->GetMemberData( memberNum++ ); //Get the type returned and hold onto it t2 = type; switch ( type ) { case TK_FLOAT: { float fVal; if ( m_ie->I_GetFloat( m_ownerID, type, name, &fVal ) == false) return false; Com_sprintf( tempString2, sizeof( tempString2 ), "%.3f", fVal ); p2 = (char *) tempString2; } break; case TK_INT: { float fVal; if ( m_ie->I_GetFloat( m_ownerID, type, name, &fVal ) == false) return false; Com_sprintf( tempString2, sizeof( tempString2 ), "%d", (int) fVal ); p2 = (char *) tempString2; } break; case TK_STRING: if ( m_ie->I_GetString( m_ownerID, type, name, &p2 ) == false) return false; break; case TK_VECTOR: { vector_t vVal; if ( m_ie->I_GetVector( m_ownerID, type, name, vVal ) == false) return false; Com_sprintf( tempString2, sizeof( tempString2 ), "%.3f %.3f %.3f", vVal[0], vVal[1], vVal[2] ); p2 = (char *) tempString2; } break; } break; } case ID_RANDOM: { float min, max; //FIXME: This will not account for nested random() statements min = *(float *) block->GetMemberData( memberNum++ ); max = *(float *) block->GetMemberData( memberNum++ ); //A float value is returned from the function t2 = TK_FLOAT; Com_sprintf( tempString2, sizeof( tempString2 ), "%.3f", m_ie->I_Random( min, max ) ); p2 = (char *) tempString2; } break; case ID_TAG: { char *name; float type; name = (char *) block->GetMemberData( memberNum++ ); type = *(float *) block->GetMemberData( memberNum++ ); t2 = TK_VECTOR; //TODO: Emit warning if ( m_ie->I_GetTag( m_ownerID, name, (int) type, vec ) == false) { m_ie->I_DPrintf( WL_ERROR, "Unable to find tag \"%s\"!\n", name ); return false; } Com_sprintf( tempString2, sizeof( tempString2 ), "%.3f %.3f %.3f", vec[0], vec[1], vec[2] ); p2 = (char *) tempString2; break; } default: //FIXME: Make an enum id for the error... m_ie->I_DPrintf( WL_ERROR, "Invalid parameter type on conditional" ); return false; break; } return m_ie->I_Evaluate( t1, p1, t2, p2, oper ); } /* ======================== CheckIf Checks for if statement pre-processing ======================== */ void CSequencer::CheckIf( CBlock **command ) { CBlock *block = *command; int successID, failureID; CSequence *successSeq, *failureSeq; if ( block == NULL ) return; if ( block->GetBlockID() == ID_IF ) { int ret = EvaluateConditional( block ); if ( ret /*TRUE*/ ) { if ( block->HasFlag( BF_ELSE ) ) { successID = (int) (*(float *) block->GetMemberData( block->GetNumMembers() - 2 )); } else { successID = (int) (*(float *) block->GetMemberData( block->GetNumMembers() - 1 )); } successSeq = GetSequence( successID ); //TODO: Emit warning assert( successSeq ); if ( successSeq == NULL ) { m_ie->I_DPrintf( WL_ERROR, "Unable to find conditional success sequence!\n" ); *command = NULL; return; } //Only save the conditional statement if the calling sequence is retained if ( m_curSequence->HasFlag( SQ_RETAIN ) ) { PushCommand( block, PUSH_FRONT ); } else { delete block; block = NULL; *command = NULL; } m_curSequence = successSeq; //Recursively work out any other pre-processors *command = PopCommand( POP_BACK ); Prep( command ); return; } if ( ( ret == false ) && ( block->HasFlag( BF_ELSE ) ) ) { failureID = (int) (*(float *) block->GetMemberData( block->GetNumMembers() - 1 )); failureSeq = GetSequence( failureID ); //TODO: Emit warning assert( failureSeq ); if ( failureSeq == NULL ) { m_ie->I_DPrintf( WL_ERROR, "Unable to find conditional failure sequence!\n" ); *command = NULL; return; } //Only save the conditional statement if the calling sequence is retained if ( m_curSequence->HasFlag( SQ_RETAIN ) ) { PushCommand( block, PUSH_FRONT ); } else { delete block; block = NULL; *command = NULL; } m_curSequence = failureSeq; //Recursively work out any other pre-processors *command = PopCommand( POP_BACK ); Prep( command ); return; } //Only save the conditional statement if the calling sequence is retained if ( m_curSequence->HasFlag( SQ_RETAIN ) ) { PushCommand( block, PUSH_FRONT ); } else { delete block; block = NULL; *command = NULL; } //Conditional failed, just move on to the next command *command = PopCommand( POP_BACK ); Prep( command ); return; } if ( ( block->GetBlockID() == ID_BLOCK_END ) && ( m_curSequence->HasFlag( SQ_CONDITIONAL ) ) ) { assert( m_curSequence->GetReturn() ); if ( m_curSequence->GetReturn() == NULL ) { *command = NULL; return; } //Check to retain it if ( m_curSequence->GetParent()->HasFlag( SQ_RETAIN ) ) { PushCommand( block, PUSH_FRONT ); } else { delete block; block = NULL; *command = NULL; } //Back out of the conditional and resume the previous sequence m_curSequence = ReturnSequence( m_curSequence ); //This can safely happen if ( m_curSequence == NULL ) { *command = NULL; return; } *command = PopCommand( POP_BACK ); Prep( command ); } } /* ======================== CheckLoop Checks for loop command pre-processing ======================== */ void CSequencer::CheckLoop( CBlock **command ) { CBlockMember *bm; CBlock *block = *command; float min, max; int iterations; int loopID; int memberNum = 0; if ( block == NULL ) return; //Check for a loop if ( block->GetBlockID() == ID_LOOP ) { //Get the loop ID bm = block->GetMember( memberNum++ ); if ( bm->GetID() == ID_RANDOM ) { //Parse out the random number min = *(float *) block->GetMemberData( memberNum++ ); max = *(float *) block->GetMemberData( memberNum++ ); iterations = (int) m_ie->I_Random( min, max ); } else { iterations = (int) (*(float *) bm->GetData()); } loopID = (int) (*(float *) block->GetMemberData( memberNum++ )); CSequence *loop = GetSequence( loopID ); //TODO: Emit warning assert( loop ); if ( loop == NULL ) { m_ie->I_DPrintf( WL_ERROR, "Unable to find 'loop' sequence!\n" ); *command = NULL; return; } assert( loop->GetParent() ); if ( loop->GetParent() == NULL ) { *command = NULL; return; } //Restore the count if it has been lost loop->SetIterations( iterations ); //Only save the loop command if the calling sequence is retained if ( m_curSequence->HasFlag( SQ_RETAIN ) ) { PushCommand( block, PUSH_FRONT ); } else { delete block; block = NULL; *command = NULL; } m_curSequence = loop; //Recursively work out any other pre-processors *command = PopCommand( POP_BACK ); Prep( command ); return; } //Check for the end of the loop if ( ( block->GetBlockID() == ID_BLOCK_END ) && ( m_curSequence->HasFlag( SQ_LOOP ) ) ) { //We don't want to decrement -1 if ( m_curSequence->GetIterations() > 0 ) m_curSequence->SetIterations( m_curSequence->GetIterations()-1 ); //Nice, eh? //Either there's another iteration, or it's infinite if ( m_curSequence->GetIterations() != 0 ) { //Another iteration is going to happen, so this will need to be considered again PushCommand( block, PUSH_FRONT ); *command = PopCommand( POP_BACK ); Prep( command ); return; } else { assert( m_curSequence->GetReturn() ); if ( m_curSequence->GetReturn() == NULL ) { *command = NULL; return; } //Check to retain it if ( m_curSequence->GetParent()->HasFlag( SQ_RETAIN ) ) { PushCommand( block, PUSH_FRONT ); } else { delete block; block = NULL; *command = NULL; } //Back out of the loop and resume the previous sequence m_curSequence = ReturnSequence( m_curSequence ); //This can safely happen if ( m_curSequence == NULL ) { *command = NULL; return; } *command = PopCommand( POP_BACK ); Prep( command ); } } } /* ======================== CheckFlush Checks for flush command pre-processing ======================== */ void CSequencer::CheckFlush( CBlock **command ) { sequence_l::iterator sli; CBlock *block = *command; if ( block == NULL ) return; if ( block->GetBlockID() == ID_FLUSH ) { //Flush the sequence Flush( m_curSequence ); //Check to retain it if ( m_curSequence->HasFlag( SQ_RETAIN ) ) { PushCommand( block, PUSH_FRONT ); } else { delete block; block = NULL; *command = NULL; } *command = PopCommand( POP_BACK ); Prep( command ); return; } } /* ======================== CheckAffect Checks for affect command pre-processing ======================== */ void CSequencer::CheckAffect( CBlock **command ) { CBlock *block = *command; gentity_t *ent = NULL; char *entname = NULL; int memberNum = 0; if ( block == NULL ) { return; } if ( block->GetBlockID() == ID_AFFECT ) { CSequencer *sequencer = NULL; entname = (char*) block->GetMemberData( memberNum++ ); ent = m_ie->I_GetEntityByName( entname ); if( !ent ) // if there wasn't a valid entname in the affect, we need to check if it's a get command { //try to parse a 'get' command that is embeded in this 'affect' int id; char *p1 = NULL; char *name = 0; CBlockMember *bm = NULL; // // Get the first parameter (this should be the get) // bm = block->GetMember( 0 ); id = bm->GetID(); switch ( id ) { // these 3 cases probably aren't necessary case TK_STRING: case TK_IDENTIFIER: case TK_CHAR: p1 = (char *) bm->GetData(); break; case ID_GET: { int type; //get( TYPE, NAME ) type = (int) (*(float *) block->GetMemberData( memberNum++ )); name = (char *) block->GetMemberData( memberNum++ ); switch ( type ) // what type are they attempting to get { case TK_STRING: //only string is acceptable for affect, store result in p1 if ( m_ie->I_GetString( m_ownerID, type, name, &p1 ) == false) { return; } break; default: //FIXME: Make an enum id for the error... m_ie->I_DPrintf( WL_ERROR, "Invalid parameter type on affect _1" ); return; break; } break; } default: //FIXME: Make an enum id for the error... m_ie->I_DPrintf( WL_ERROR, "Invalid parameter type on affect _2" ); return; break; }//end id switch if(p1) { ent = m_ie->I_GetEntityByName( p1 ); } if(!ent) { // a valid entity name was not returned from the get command m_ie->I_DPrintf( WL_WARNING, "'%s' : invalid affect() target\n"); } } // end if(!ent) if( ent ) { sequencer = ent->sequencer; } if(memberNum == 0) { //there was no get, increment manually before next step memberNum++; } int type = (int) (*(float *) block->GetMemberData( memberNum )); int id = (int) (*(float *) block->GetMemberData( memberNum+1 )); if ( m_curSequence->HasFlag( SQ_RETAIN ) ) { PushCommand( block, PUSH_FRONT ); } else { delete block; block = NULL; *command = NULL; } //NOTENOTE: If this isn't found, continue on to the next command if ( sequencer == NULL ) { *command = PopCommand( POP_BACK ); Prep( command ); return; } sequencer->Affect( id, type ); *command = PopCommand( POP_BACK ); Prep( command ); if( ent ) { // ents need to update upon being affected ent->taskManager->Update(); } return; } if ( ( block->GetBlockID() == ID_BLOCK_END ) && ( m_curSequence->HasFlag( SQ_AFFECT ) ) ) { if ( m_curSequence->HasFlag( SQ_RETAIN ) ) { PushCommand( block, PUSH_FRONT ); } else { delete block; block = NULL; *command = NULL; } m_curSequence = ReturnSequence( m_curSequence ); if ( m_curSequence == NULL ) { *command = NULL; return; } *command = PopCommand( POP_BACK ); Prep( command ); if( ent ) { // ents need to update upon being affected ent->taskManager->Update(); } } } /* ------------------------- CheckDo ------------------------- */ void CSequencer::CheckDo( CBlock **command ) { CBlock *block = *command; if ( block == NULL ) return; if ( block->GetBlockID() == ID_DO ) { //Get the sequence const char *groupName = (const char *) block->GetMemberData( 0 ); CTaskGroup *group = m_taskManager->GetTaskGroup( groupName ); CSequence *sequence = GetTaskSequence( group ); //TODO: Emit warning assert( group ); if ( group == NULL ) { //TODO: Give name/number of entity trying to execute, too m_ie->I_DPrintf( WL_ERROR, "ICARUS Unable to find task group \"%s\"!\n", groupName ); *command = NULL; return; } //TODO: Emit warning assert( sequence ); if ( sequence == NULL ) { //TODO: Give name/number of entity trying to execute, too m_ie->I_DPrintf( WL_ERROR, "ICARUS Unable to find task 'group' sequence!\n", groupName ); *command = NULL; return; } //Only save the loop command if the calling sequence is retained if ( m_curSequence->HasFlag( SQ_RETAIN ) ) { PushCommand( block, PUSH_FRONT ); } else { delete block; block = NULL; *command = NULL; } //Set this to our current sequence sequence->SetReturn( m_curSequence ); m_curSequence = sequence; group->SetParent( m_curGroup ); m_curGroup = group; //Mark all the following commands as being in the task m_taskManager->MarkTask( group->GetGUID(), TASK_START ); //Recursively work out any other pre-processors *command = PopCommand( POP_BACK ); Prep( command ); return; } if ( ( block->GetBlockID() == ID_BLOCK_END ) && ( m_curSequence->HasFlag( SQ_TASK ) ) ) { if ( m_curSequence->HasFlag( SQ_RETAIN ) ) { PushCommand( block, PUSH_FRONT ); } else { delete block; block = NULL; *command = NULL; } m_taskManager->MarkTask( m_curGroup->GetGUID(), TASK_END ); m_curGroup = m_curGroup->GetParent(); CSequence *returnSeq = ReturnSequence( m_curSequence ); m_curSequence->SetReturn( NULL ); m_curSequence = returnSeq; if ( m_curSequence == NULL ) { *command = NULL; return; } *command = PopCommand( POP_BACK ); Prep( command ); } } /* ======================== Prep Handles internal sequencer maintenance ======================== */ void CSequencer::Prep( CBlock **command ) { //Check all pre-processes CheckAffect( command ); CheckFlush( command ); CheckLoop( command ); CheckRun( command ); CheckIf( command ); CheckDo( command ); } /* ======================== Prime Starts communication between the task manager and this sequencer ======================== */ int CSequencer::Prime( CTaskManager *taskManager, CBlock *command ) { Prep( &command ); if ( command ) { taskManager->SetCommand( command, PUSH_BACK ); } return SEQ_OK; } /* ======================== Callback Handles a completed task and returns a new task to be completed ======================== */ int CSequencer::Callback( CTaskManager *taskManager, CBlock *block, int returnCode ) { CBlock *command; if (returnCode == TASK_RETURN_COMPLETE) { //There are no more pending commands if ( m_curSequence == NULL ) { delete block; block = NULL; return SEQ_OK; } //Check to retain the command if ( m_curSequence->HasFlag( SQ_RETAIN ) ) //This isn't true for affect sequences...? { PushCommand( block, PUSH_FRONT ); } else { delete block; block = NULL; } //Check for pending commands if ( m_curSequence->GetNumCommands() <= 0 ) { if ( m_curSequence->GetReturn() == NULL) return SEQ_OK; m_curSequence = m_curSequence->GetReturn(); } command = PopCommand( POP_BACK ); Prep( &command ); if ( command ) taskManager->SetCommand( command, PUSH_FRONT ); return SEQ_OK; } //FIXME: This could be more descriptive m_ie->I_DPrintf( WL_ERROR, "command could not be called back\n" ); assert(0); return SEQ_FAILED; } /* ------------------------- Recall ------------------------- */ int CSequencer::Recall( void ) { CBlock *block = NULL; while ( ( block = m_taskManager->RecallTask() ) != NULL ) { if (m_curSequence) { PushCommand( block, PUSH_BACK ); } else { delete block; block = NULL; } } return true; } /* ------------------------- Affect ------------------------- */ int CSequencer::Affect( int id, int type ) { CSequence *sequence = GetSequence( id ); if ( sequence == NULL ) { return SEQ_FAILED; } switch ( type ) { case TYPE_FLUSH: //Get rid of all old code Flush( sequence ); sequence->RemoveFlag( SQ_PENDING, true ); m_curSequence = sequence; Prime( m_taskManager, PopCommand( POP_BACK ) ); break; case TYPE_INSERT: Recall(); sequence->SetReturn( m_curSequence ); sequence->RemoveFlag( SQ_PENDING, true ); m_curSequence = sequence; Prime( m_taskManager, PopCommand( POP_BACK ) ); break; default: m_ie->I_DPrintf( WL_ERROR, "unknown affect type found" ); break; } return SEQ_OK; } /* ======================== PushCommand Pushes a commands onto the current sequence ======================== */ int CSequencer::PushCommand( CBlock *command, int flag ) { //Make sure everything is ok assert( m_curSequence ); if ( m_curSequence == NULL ) return SEQ_FAILED; m_curSequence->PushCommand( command, flag ); m_numCommands++; //Invalid flag return SEQ_OK; } /* ======================== PopCommand Pops a command off the current sequence ======================== */ CBlock *CSequencer::PopCommand( int flag ) { //Make sure everything is ok assert( m_curSequence ); if ( m_curSequence == NULL ) return NULL; CBlock *block = m_curSequence->PopCommand( flag ); if ( block != NULL ) m_numCommands--; return block; } /* ======================== StripExtension Filename ultility. Probably get rid of this if I decided to use CStrings... ======================== */ void CSequencer::StripExtension( const char *in, char *out ) { int i = strlen(in) + 1; while ( (in[i] != '.') && (i >= 0) ) i--; if ( i < 0 ) { strcpy(out, in); return; } strncpy(out, in, i); } /* ------------------------- RemoveSequence ------------------------- */ //NOTENOTE: This only removes references to the sequence, IT DOES NOT FREE THE ALLOCATED MEMORY! You've be warned! =) int CSequencer::RemoveSequence( CSequence *sequence ) { CSequence *temp; int numChildren = sequence->GetNumChildren(); //Add all the children for ( int i = 0; i < numChildren; i++ ) { temp = sequence->GetChild( i ); //TODO: Emit warning assert( temp ); if ( temp == NULL ) { m_ie->I_DPrintf( WL_WARNING, "Unable to find child sequence on RemoveSequence call!\n" ); continue; } //Remove the references to this sequence temp->SetParent( NULL ); temp->SetReturn( NULL ); } return SEQ_OK; } int CSequencer::DestroySequence( CSequence *sequence ) { m_sequenceMap.erase( sequence->GetID() ); m_sequences.remove( sequence ); taskSequence_m::iterator tsi; for ( tsi = m_taskSequences.begin(); tsi != m_taskSequences.end(); ) { if((*tsi).second == sequence) { #ifdef _WIN32 tsi = m_taskSequences.erase(tsi); #else taskSequence_m::iterator itTemp = tsi; tsi++; m_taskSequences.erase(itTemp); #endif } else { ++tsi; } } CSequence* parent = sequence->GetParent(); if ( parent ) { parent->RemoveChild( sequence ); parent = NULL; } int curChild = sequence->GetNumChildren(); while(curChild) { curChild--; DestroySequence(sequence->GetChild( curChild )); } m_owner->DeleteSequence( sequence ); return SEQ_OK; } /* ------------------------- ReturnSequence ------------------------- */ inline CSequence *CSequencer::ReturnSequence( CSequence *sequence ) { while ( sequence->GetReturn() ) { assert(sequence != sequence->GetReturn() ); if ( sequence == sequence->GetReturn() ) return NULL; sequence = sequence->GetReturn(); if ( sequence->GetNumCommands() > 0 ) return sequence; } return NULL; } //Save / Load /* ------------------------- Save ------------------------- */ int CSequencer::Save( void ) { sequence_l::iterator si; taskSequence_m::iterator ti; int numSequences = 0, id, numTasks; //Get the number of sequences to save out numSequences = m_sequences.size(); //Save out the owner sequence m_ie->I_WriteSaveData( INT_ID('S','Q','R','E'), &m_ownerID, sizeof( m_ownerID ) ); //Write out the number of sequences we need to read m_ie->I_WriteSaveData( INT_ID('S','Q','R','#'), &numSequences, sizeof( numSequences ) ); //Second pass, save out all sequences, in order STL_ITERATE( si, m_sequences ) { id = (*si)->GetID(); m_ie->I_WriteSaveData( INT_ID('S','Q','R','I'), &id, sizeof( id ) ); } //Save out the taskManager m_taskManager->Save(); //Save out the task sequences mapping the name to the GUIDs numTasks = m_taskSequences.size(); m_ie->I_WriteSaveData( INT_ID('S','Q','T','#'), &numTasks, sizeof ( numTasks ) ); STL_ITERATE( ti, m_taskSequences ) { //Save the task group's ID id = ((*ti).first)->GetGUID(); m_ie->I_WriteSaveData( INT_ID('S','T','I','D'), &id, sizeof( id ) ); //Save the sequence's ID id = ((*ti).second)->GetID(); m_ie->I_WriteSaveData( INT_ID('S','S','I','D'), &id, sizeof( id ) ); } int curGroupID = ( m_curGroup == NULL ) ? -1 : m_curGroup->GetGUID(); m_ie->I_WriteSaveData( INT_ID('S','Q','C','T'), &curGroupID, sizeof ( m_numCommands ) ); //Output the number of commands m_ie->I_WriteSaveData( INT_ID('S','Q','#','C'), &m_numCommands, sizeof ( m_numCommands ) ); //FIXME: This can be reconstructed //Output the ID of the current sequence id = ( m_curSequence != NULL ) ? m_curSequence->GetID() : -1; m_ie->I_WriteSaveData( INT_ID('S','Q','C','S'), &id, sizeof ( id ) ); return true; } /* ------------------------- Load ------------------------- */ int CSequencer::Load( void ) { int i; //Get the owner of this sequencer m_ie->I_ReadSaveData( INT_ID('S','Q','R','E'), &m_ownerID, sizeof( m_ownerID ), NULL ); //Link the entity back to the sequencer m_ie->I_LinkEntity( m_ownerID, this, m_taskManager ); CTaskGroup *taskGroup; CSequence *seq; int numSequences, seqID, taskID, numTasks; //Get the number of sequences to read m_ie->I_ReadSaveData( INT_ID('S','Q','R','#'), &numSequences, sizeof( numSequences ), NULL ); //Read in all the sequences for ( i = 0; i < numSequences; i++ ) { m_ie->I_ReadSaveData( INT_ID('S','Q','R','I'), &seqID, sizeof( seqID ), NULL ); seq = m_owner->GetSequence( seqID ); assert( seq ); STL_INSERT( m_sequences, seq ); m_sequenceMap[ seqID ] = seq; } //Setup the task manager m_taskManager->Init( this ); //Load the task manager m_taskManager->Load(); //Get the number of tasks in the map m_ie->I_ReadSaveData( INT_ID('S','Q','T','#'), &numTasks, sizeof( numTasks ), NULL ); //Read in, and reassociate the tasks to the sequences for ( i = 0; i < numTasks; i++ ) { //Read in the task's ID m_ie->I_ReadSaveData( INT_ID('S','T','I','D'), &taskID, sizeof( taskID ), NULL ); //Read in the sequence's ID m_ie->I_ReadSaveData( INT_ID('S','S','I','D'), &seqID, sizeof( seqID ), NULL ); taskGroup = m_taskManager->GetTaskGroup( taskID ); assert( taskGroup ); seq = m_owner->GetSequence( seqID ); assert( seq ); //Associate the values m_taskSequences[ taskGroup ] = seq; } int curGroupID; //Get the current task group m_ie->I_ReadSaveData( INT_ID('S','Q','C','T'), &curGroupID, sizeof( curGroupID ), NULL ); m_curGroup = ( curGroupID == -1 ) ? NULL : m_taskManager->GetTaskGroup( curGroupID ); //Get the number of commands m_ie->I_ReadSaveData( INT_ID('S','Q','#','C'), &m_numCommands, sizeof( m_numCommands ), NULL ); //Get the current sequence m_ie->I_ReadSaveData( INT_ID('S','Q','C','S'), &seqID, sizeof( seqID ), NULL ); m_curSequence = ( seqID != -1 ) ? m_owner->GetSequence( seqID ) : NULL; return true; }
gpl-2.0
dduval/kernel-rhel5
arch/sparc64/mm/init.c
2
50100
/* $Id: init.c,v 1.209 2002/02/09 19:49:31 davem Exp $ * arch/sparc64/mm/init.c * * Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu) * Copyright (C) 1997-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz) */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/string.h> #include <linux/init.h> #include <linux/bootmem.h> #include <linux/mm.h> #include <linux/hugetlb.h> #include <linux/slab.h> #include <linux/initrd.h> #include <linux/swap.h> #include <linux/pagemap.h> #include <linux/poison.h> #include <linux/fs.h> #include <linux/seq_file.h> #include <linux/kprobes.h> #include <linux/cache.h> #include <linux/sort.h> #include <asm/head.h> #include <asm/system.h> #include <asm/page.h> #include <asm/pgalloc.h> #include <asm/pgtable.h> #include <asm/oplib.h> #include <asm/iommu.h> #include <asm/io.h> #include <asm/uaccess.h> #include <asm/mmu_context.h> #include <asm/tlbflush.h> #include <asm/dma.h> #include <asm/starfire.h> #include <asm/tlb.h> #include <asm/spitfire.h> #include <asm/sections.h> #include <asm/tsb.h> #include <asm/hypervisor.h> #include <asm/prom.h> extern void device_scan(void); #define MAX_PHYS_ADDRESS (1UL << 42UL) #define KPTE_BITMAP_CHUNK_SZ (256UL * 1024UL * 1024UL) #define KPTE_BITMAP_BYTES \ ((MAX_PHYS_ADDRESS / KPTE_BITMAP_CHUNK_SZ) / 8) unsigned long kern_linear_pte_xor[2] __read_mostly; /* A bitmap, one bit for every 256MB of physical memory. If the bit * is clear, we should use a 4MB page (via kern_linear_pte_xor[0]) else * if set we should use a 256MB page (via kern_linear_pte_xor[1]). */ unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)]; /* A special kernel TSB for 4MB and 256MB linear mappings. */ struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES]; #define MAX_BANKS 32 static struct linux_prom64_registers pavail[MAX_BANKS] __initdata; static struct linux_prom64_registers pavail_rescan[MAX_BANKS] __initdata; static int pavail_ents __initdata; static int pavail_rescan_ents __initdata; static int cmp_p64(const void *a, const void *b) { const struct linux_prom64_registers *x = a, *y = b; if (x->phys_addr > y->phys_addr) return 1; if (x->phys_addr < y->phys_addr) return -1; return 0; } static void __init read_obp_memory(const char *property, struct linux_prom64_registers *regs, int *num_ents) { int node = prom_finddevice("/memory"); int prop_size = prom_getproplen(node, property); int ents, ret, i; ents = prop_size / sizeof(struct linux_prom64_registers); if (ents > MAX_BANKS) { prom_printf("The machine has more %s property entries than " "this kernel can support (%d).\n", property, MAX_BANKS); prom_halt(); } ret = prom_getproperty(node, property, (char *) regs, prop_size); if (ret == -1) { prom_printf("Couldn't get %s property from /memory.\n"); prom_halt(); } /* Sanitize what we got from the firmware, by page aligning * everything. */ for (i = 0; i < ents; i++) { unsigned long base, size; base = regs[i].phys_addr; size = regs[i].reg_size; size &= PAGE_MASK; if (base & ~PAGE_MASK) { unsigned long new_base = PAGE_ALIGN(base); size -= new_base - base; if ((long) size < 0L) size = 0UL; base = new_base; } regs[i].phys_addr = base; regs[i].reg_size = size; } for (i = 0; i < ents; i++) { if (regs[i].reg_size == 0UL) { int j; for (j = i; j < ents - 1; j++) { regs[j].phys_addr = regs[j+1].phys_addr; regs[j].reg_size = regs[j+1].reg_size; } ents--; i--; } } *num_ents = ents; sort(regs, ents, sizeof(struct linux_prom64_registers), cmp_p64, NULL); } unsigned long *sparc64_valid_addr_bitmap __read_mostly; /* Kernel physical address base and size in bytes. */ unsigned long kern_base __read_mostly; unsigned long kern_size __read_mostly; /* get_new_mmu_context() uses "cache + 1". */ DEFINE_SPINLOCK(ctx_alloc_lock); unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1; #define CTX_BMAP_SLOTS (1UL << (CTX_NR_BITS - 6)) unsigned long mmu_context_bmap[CTX_BMAP_SLOTS]; /* References to special section boundaries */ extern char _start[], _end[]; /* Initial ramdisk setup */ extern unsigned long sparc_ramdisk_image64; extern unsigned int sparc_ramdisk_image; extern unsigned int sparc_ramdisk_size; struct page *mem_map_zero __read_mostly; unsigned int sparc64_highest_unlocked_tlb_ent __read_mostly; unsigned long sparc64_kern_pri_context __read_mostly; unsigned long sparc64_kern_pri_nuc_bits __read_mostly; unsigned long sparc64_kern_sec_context __read_mostly; int bigkernel = 0; kmem_cache_t *pgtable_cache __read_mostly; static void zero_ctor(void *addr, kmem_cache_t *cache, unsigned long flags) { clear_page(addr); } extern void tsb_cache_init(void); void pgtable_cache_init(void) { pgtable_cache = kmem_cache_create("pgtable_cache", PAGE_SIZE, PAGE_SIZE, SLAB_HWCACHE_ALIGN | SLAB_MUST_HWCACHE_ALIGN, zero_ctor, NULL); if (!pgtable_cache) { prom_printf("Could not create pgtable_cache\n"); prom_halt(); } tsb_cache_init(); } #ifdef CONFIG_DEBUG_DCFLUSH atomic_t dcpage_flushes = ATOMIC_INIT(0); #ifdef CONFIG_SMP atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0); #endif #endif inline void flush_dcache_page_impl(struct page *page) { BUG_ON(tlb_type == hypervisor); #ifdef CONFIG_DEBUG_DCFLUSH atomic_inc(&dcpage_flushes); #endif #ifdef DCACHE_ALIASING_POSSIBLE __flush_dcache_page(page_address(page), ((tlb_type == spitfire) && page_mapping(page) != NULL)); #else if (page_mapping(page) != NULL && tlb_type == spitfire) __flush_icache_page(__pa(page_address(page))); #endif } #define PG_dcache_dirty PG_arch_1 #define PG_dcache_cpu_shift 24UL #define PG_dcache_cpu_mask (256UL - 1UL) #if NR_CPUS > 256 #error D-cache dirty tracking and thread_info->cpu need fixing for > 256 cpus #endif #define dcache_dirty_cpu(page) \ (((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask) static __inline__ void set_dcache_dirty(struct page *page, int this_cpu) { unsigned long mask = this_cpu; unsigned long non_cpu_bits; non_cpu_bits = ~(PG_dcache_cpu_mask << PG_dcache_cpu_shift); mask = (mask << PG_dcache_cpu_shift) | (1UL << PG_dcache_dirty); __asm__ __volatile__("1:\n\t" "ldx [%2], %%g7\n\t" "and %%g7, %1, %%g1\n\t" "or %%g1, %0, %%g1\n\t" "casx [%2], %%g7, %%g1\n\t" "cmp %%g7, %%g1\n\t" "membar #StoreLoad | #StoreStore\n\t" "bne,pn %%xcc, 1b\n\t" " nop" : /* no outputs */ : "r" (mask), "r" (non_cpu_bits), "r" (&page->flags) : "g1", "g7"); } static __inline__ void clear_dcache_dirty_cpu(struct page *page, unsigned long cpu) { unsigned long mask = (1UL << PG_dcache_dirty); __asm__ __volatile__("! test_and_clear_dcache_dirty\n" "1:\n\t" "ldx [%2], %%g7\n\t" "srlx %%g7, %4, %%g1\n\t" "and %%g1, %3, %%g1\n\t" "cmp %%g1, %0\n\t" "bne,pn %%icc, 2f\n\t" " andn %%g7, %1, %%g1\n\t" "casx [%2], %%g7, %%g1\n\t" "cmp %%g7, %%g1\n\t" "membar #StoreLoad | #StoreStore\n\t" "bne,pn %%xcc, 1b\n\t" " nop\n" "2:" : /* no outputs */ : "r" (cpu), "r" (mask), "r" (&page->flags), "i" (PG_dcache_cpu_mask), "i" (PG_dcache_cpu_shift) : "g1", "g7"); } static inline void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long pte) { unsigned long tsb_addr = (unsigned long) ent; if (tlb_type == cheetah_plus || tlb_type == hypervisor) tsb_addr = __pa(tsb_addr); __tsb_insert(tsb_addr, tag, pte); } unsigned long _PAGE_ALL_SZ_BITS __read_mostly; unsigned long _PAGE_SZBITS __read_mostly; void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) { struct mm_struct *mm; struct tsb *tsb; unsigned long tag, flags; unsigned long tsb_index, tsb_hash_shift; if (tlb_type != hypervisor) { unsigned long pfn = pte_pfn(pte); unsigned long pg_flags; struct page *page; if (pfn_valid(pfn) && (page = pfn_to_page(pfn), page_mapping(page)) && ((pg_flags = page->flags) & (1UL << PG_dcache_dirty))) { int cpu = ((pg_flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask); int this_cpu = get_cpu(); /* This is just to optimize away some function calls * in the SMP case. */ if (cpu == this_cpu) flush_dcache_page_impl(page); else smp_flush_dcache_page_impl(page, cpu); clear_dcache_dirty_cpu(page, cpu); put_cpu(); } } mm = vma->vm_mm; tsb_index = MM_TSB_BASE; tsb_hash_shift = PAGE_SHIFT; spin_lock_irqsave(&mm->context.lock, flags); #ifdef CONFIG_HUGETLB_PAGE if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL) { if ((tlb_type == hypervisor && (pte_val(pte) & _PAGE_SZALL_4V) == _PAGE_SZHUGE_4V) || (tlb_type != hypervisor && (pte_val(pte) & _PAGE_SZALL_4U) == _PAGE_SZHUGE_4U)) { tsb_index = MM_TSB_HUGE; tsb_hash_shift = HPAGE_SHIFT; } } #endif tsb = mm->context.tsb_block[tsb_index].tsb; tsb += ((address >> tsb_hash_shift) & (mm->context.tsb_block[tsb_index].tsb_nentries - 1UL)); tag = (address >> 22UL); tsb_insert(tsb, tag, pte_val(pte)); spin_unlock_irqrestore(&mm->context.lock, flags); } void flush_dcache_page(struct page *page) { struct address_space *mapping; int this_cpu; if (tlb_type == hypervisor) return; /* Do not bother with the expensive D-cache flush if it * is merely the zero page. The 'bigcore' testcase in GDB * causes this case to run millions of times. */ if (page == ZERO_PAGE(0)) return; this_cpu = get_cpu(); mapping = page_mapping(page); if (mapping && !mapping_mapped(mapping)) { int dirty = test_bit(PG_dcache_dirty, &page->flags); if (dirty) { int dirty_cpu = dcache_dirty_cpu(page); if (dirty_cpu == this_cpu) goto out; smp_flush_dcache_page_impl(page, dirty_cpu); } set_dcache_dirty(page, this_cpu); } else { /* We could delay the flush for the !page_mapping * case too. But that case is for exec env/arg * pages and those are %99 certainly going to get * faulted into the tlb (and thus flushed) anyways. */ flush_dcache_page_impl(page); } out: put_cpu(); } void __kprobes flush_icache_range(unsigned long start, unsigned long end) { /* Cheetah and Hypervisor platform cpus have coherent I-cache. */ if (tlb_type == spitfire) { unsigned long kaddr; for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE) __flush_icache_page(__get_phys(kaddr)); } } void show_mem(void) { printk("Mem-info:\n"); show_free_areas(); printk("Free swap: %6ldkB\n", nr_swap_pages << (PAGE_SHIFT-10)); printk("%ld pages of RAM\n", num_physpages); printk("%d free pages\n", nr_free_pages()); } void mmu_info(struct seq_file *m) { if (tlb_type == cheetah) seq_printf(m, "MMU Type\t: Cheetah\n"); else if (tlb_type == cheetah_plus) seq_printf(m, "MMU Type\t: Cheetah+\n"); else if (tlb_type == spitfire) seq_printf(m, "MMU Type\t: Spitfire\n"); else if (tlb_type == hypervisor) seq_printf(m, "MMU Type\t: Hypervisor (sun4v)\n"); else seq_printf(m, "MMU Type\t: ???\n"); #ifdef CONFIG_DEBUG_DCFLUSH seq_printf(m, "DCPageFlushes\t: %d\n", atomic_read(&dcpage_flushes)); #ifdef CONFIG_SMP seq_printf(m, "DCPageFlushesXC\t: %d\n", atomic_read(&dcpage_flushes_xcall)); #endif /* CONFIG_SMP */ #endif /* CONFIG_DEBUG_DCFLUSH */ } struct linux_prom_translation { unsigned long virt; unsigned long size; unsigned long data; }; /* Exported for kernel TLB miss handling in ktlb.S */ struct linux_prom_translation prom_trans[512] __read_mostly; unsigned int prom_trans_ents __read_mostly; /* Exported for SMP bootup purposes. */ unsigned long kern_locked_tte_data; /* The obp translations are saved based on 8k pagesize, since obp can * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS -> * HI_OBP_ADDRESS range are handled in ktlb.S. */ static inline int in_obp_range(unsigned long vaddr) { return (vaddr >= LOW_OBP_ADDRESS && vaddr < HI_OBP_ADDRESS); } static int cmp_ptrans(const void *a, const void *b) { const struct linux_prom_translation *x = a, *y = b; if (x->virt > y->virt) return 1; if (x->virt < y->virt) return -1; return 0; } /* Read OBP translations property into 'prom_trans[]'. */ static void __init read_obp_translations(void) { int n, node, ents, first, last, i; node = prom_finddevice("/virtual-memory"); n = prom_getproplen(node, "translations"); if (unlikely(n == 0 || n == -1)) { prom_printf("prom_mappings: Couldn't get size.\n"); prom_halt(); } if (unlikely(n > sizeof(prom_trans))) { prom_printf("prom_mappings: Size %Zd is too big.\n", n); prom_halt(); } if ((n = prom_getproperty(node, "translations", (char *)&prom_trans[0], sizeof(prom_trans))) == -1) { prom_printf("prom_mappings: Couldn't get property.\n"); prom_halt(); } n = n / sizeof(struct linux_prom_translation); ents = n; sort(prom_trans, ents, sizeof(struct linux_prom_translation), cmp_ptrans, NULL); /* Now kick out all the non-OBP entries. */ for (i = 0; i < ents; i++) { if (in_obp_range(prom_trans[i].virt)) break; } first = i; for (; i < ents; i++) { if (!in_obp_range(prom_trans[i].virt)) break; } last = i; for (i = 0; i < (last - first); i++) { struct linux_prom_translation *src = &prom_trans[i + first]; struct linux_prom_translation *dest = &prom_trans[i]; *dest = *src; } for (; i < ents; i++) { struct linux_prom_translation *dest = &prom_trans[i]; dest->virt = dest->size = dest->data = 0x0UL; } prom_trans_ents = last - first; if (tlb_type == spitfire) { /* Clear diag TTE bits. */ for (i = 0; i < prom_trans_ents; i++) prom_trans[i].data &= ~0x0003fe0000000000UL; } } static void __init hypervisor_tlb_lock(unsigned long vaddr, unsigned long pte, unsigned long mmu) { register unsigned long func asm("%o5"); register unsigned long arg0 asm("%o0"); register unsigned long arg1 asm("%o1"); register unsigned long arg2 asm("%o2"); register unsigned long arg3 asm("%o3"); func = HV_FAST_MMU_MAP_PERM_ADDR; arg0 = vaddr; arg1 = 0; arg2 = pte; arg3 = mmu; __asm__ __volatile__("ta 0x80" : "=&r" (func), "=&r" (arg0), "=&r" (arg1), "=&r" (arg2), "=&r" (arg3) : "0" (func), "1" (arg0), "2" (arg1), "3" (arg2), "4" (arg3)); if (arg0 != 0) { prom_printf("hypervisor_tlb_lock[%lx:%lx:%lx:%lx]: " "errors with %lx\n", vaddr, 0, pte, mmu, arg0); prom_halt(); } } static unsigned long kern_large_tte(unsigned long paddr); static void __init remap_kernel(void) { unsigned long phys_page, tte_vaddr, tte_data; int tlb_ent = sparc64_highest_locked_tlbent(); tte_vaddr = (unsigned long) KERNBASE; phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL; tte_data = kern_large_tte(phys_page); kern_locked_tte_data = tte_data; /* Now lock us into the TLBs via Hypervisor or OBP. */ if (tlb_type == hypervisor) { hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU); hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU); if (bigkernel) { tte_vaddr += 0x400000; tte_data += 0x400000; hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU); hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU); } } else { prom_dtlb_load(tlb_ent, tte_data, tte_vaddr); prom_itlb_load(tlb_ent, tte_data, tte_vaddr); if (bigkernel) { tlb_ent -= 1; prom_dtlb_load(tlb_ent, tte_data + 0x400000, tte_vaddr + 0x400000); prom_itlb_load(tlb_ent, tte_data + 0x400000, tte_vaddr + 0x400000); } sparc64_highest_unlocked_tlb_ent = tlb_ent - 1; } if (tlb_type == cheetah_plus) { sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 | CTX_CHEETAH_PLUS_NUC); sparc64_kern_pri_nuc_bits = CTX_CHEETAH_PLUS_NUC; sparc64_kern_sec_context = CTX_CHEETAH_PLUS_CTX0; } } static void __init inherit_prom_mappings(void) { read_obp_translations(); /* Now fixup OBP's idea about where we really are mapped. */ prom_printf("Remapping the kernel... "); remap_kernel(); prom_printf("done.\n"); } void prom_world(int enter) { if (!enter) set_fs((mm_segment_t) { get_thread_current_ds() }); __asm__ __volatile__("flushw"); } #ifdef DCACHE_ALIASING_POSSIBLE void __flush_dcache_range(unsigned long start, unsigned long end) { unsigned long va; if (tlb_type == spitfire) { int n = 0; for (va = start; va < end; va += 32) { spitfire_put_dcache_tag(va & 0x3fe0, 0x0); if (++n >= 512) break; } } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { start = __pa(start); end = __pa(end); for (va = start; va < end; va += 32) __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" "membar #Sync" : /* no outputs */ : "r" (va), "i" (ASI_DCACHE_INVALIDATE)); } } #endif /* DCACHE_ALIASING_POSSIBLE */ /* Caller does TLB context flushing on local CPU if necessary. * The caller also ensures that CTX_VALID(mm->context) is false. * * We must be careful about boundary cases so that we never * let the user have CTX 0 (nucleus) or we ever use a CTX * version of zero (and thus NO_CONTEXT would not be caught * by version mis-match tests in mmu_context.h). * * Always invoked with interrupts disabled. */ void get_new_mmu_context(struct mm_struct *mm) { unsigned long ctx, new_ctx; unsigned long orig_pgsz_bits; unsigned long flags; int new_version; spin_lock_irqsave(&ctx_alloc_lock, flags); orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK); ctx = (tlb_context_cache + 1) & CTX_NR_MASK; new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx); new_version = 0; if (new_ctx >= (1 << CTX_NR_BITS)) { new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1); if (new_ctx >= ctx) { int i; new_ctx = (tlb_context_cache & CTX_VERSION_MASK) + CTX_FIRST_VERSION; if (new_ctx == 1) new_ctx = CTX_FIRST_VERSION; /* Don't call memset, for 16 entries that's just * plain silly... */ mmu_context_bmap[0] = 3; mmu_context_bmap[1] = 0; mmu_context_bmap[2] = 0; mmu_context_bmap[3] = 0; for (i = 4; i < CTX_BMAP_SLOTS; i += 4) { mmu_context_bmap[i + 0] = 0; mmu_context_bmap[i + 1] = 0; mmu_context_bmap[i + 2] = 0; mmu_context_bmap[i + 3] = 0; } new_version = 1; goto out; } } mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63)); new_ctx |= (tlb_context_cache & CTX_VERSION_MASK); out: tlb_context_cache = new_ctx; mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits; spin_unlock_irqrestore(&ctx_alloc_lock, flags); if (unlikely(new_version)) smp_new_mmu_context_version(); } void sparc_ultra_dump_itlb(void) { int slot; if (tlb_type == spitfire) { printk ("Contents of itlb: "); for (slot = 0; slot < 14; slot++) printk (" "); printk ("%2x:%016lx,%016lx\n", 0, spitfire_get_itlb_tag(0), spitfire_get_itlb_data(0)); for (slot = 1; slot < 64; slot+=3) { printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx %2x:%016lx,%016lx\n", slot, spitfire_get_itlb_tag(slot), spitfire_get_itlb_data(slot), slot+1, spitfire_get_itlb_tag(slot+1), spitfire_get_itlb_data(slot+1), slot+2, spitfire_get_itlb_tag(slot+2), spitfire_get_itlb_data(slot+2)); } } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { printk ("Contents of itlb0:\n"); for (slot = 0; slot < 16; slot+=2) { printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n", slot, cheetah_get_litlb_tag(slot), cheetah_get_litlb_data(slot), slot+1, cheetah_get_litlb_tag(slot+1), cheetah_get_litlb_data(slot+1)); } printk ("Contents of itlb2:\n"); for (slot = 0; slot < 128; slot+=2) { printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n", slot, cheetah_get_itlb_tag(slot), cheetah_get_itlb_data(slot), slot+1, cheetah_get_itlb_tag(slot+1), cheetah_get_itlb_data(slot+1)); } } } void sparc_ultra_dump_dtlb(void) { int slot; if (tlb_type == spitfire) { printk ("Contents of dtlb: "); for (slot = 0; slot < 14; slot++) printk (" "); printk ("%2x:%016lx,%016lx\n", 0, spitfire_get_dtlb_tag(0), spitfire_get_dtlb_data(0)); for (slot = 1; slot < 64; slot+=3) { printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx %2x:%016lx,%016lx\n", slot, spitfire_get_dtlb_tag(slot), spitfire_get_dtlb_data(slot), slot+1, spitfire_get_dtlb_tag(slot+1), spitfire_get_dtlb_data(slot+1), slot+2, spitfire_get_dtlb_tag(slot+2), spitfire_get_dtlb_data(slot+2)); } } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { printk ("Contents of dtlb0:\n"); for (slot = 0; slot < 16; slot+=2) { printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n", slot, cheetah_get_ldtlb_tag(slot), cheetah_get_ldtlb_data(slot), slot+1, cheetah_get_ldtlb_tag(slot+1), cheetah_get_ldtlb_data(slot+1)); } printk ("Contents of dtlb2:\n"); for (slot = 0; slot < 512; slot+=2) { printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n", slot, cheetah_get_dtlb_tag(slot, 2), cheetah_get_dtlb_data(slot, 2), slot+1, cheetah_get_dtlb_tag(slot+1, 2), cheetah_get_dtlb_data(slot+1, 2)); } if (tlb_type == cheetah_plus) { printk ("Contents of dtlb3:\n"); for (slot = 0; slot < 512; slot+=2) { printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n", slot, cheetah_get_dtlb_tag(slot, 3), cheetah_get_dtlb_data(slot, 3), slot+1, cheetah_get_dtlb_tag(slot+1, 3), cheetah_get_dtlb_data(slot+1, 3)); } } } } extern unsigned long cmdline_memory_size; /* Find a free area for the bootmem map, avoiding the kernel image * and the initial ramdisk. */ static unsigned long __init choose_bootmap_pfn(unsigned long start_pfn, unsigned long end_pfn) { unsigned long avoid_start, avoid_end, bootmap_size; int i; bootmap_size = ((end_pfn - start_pfn) + 7) / 8; bootmap_size = ALIGN(bootmap_size, sizeof(long)); avoid_start = avoid_end = 0; #ifdef CONFIG_BLK_DEV_INITRD avoid_start = initrd_start; avoid_end = PAGE_ALIGN(initrd_end); #endif #ifdef CONFIG_DEBUG_BOOTMEM prom_printf("choose_bootmap_pfn: kern[%lx:%lx] avoid[%lx:%lx]\n", kern_base, PAGE_ALIGN(kern_base + kern_size), avoid_start, avoid_end); #endif for (i = 0; i < pavail_ents; i++) { unsigned long start, end; start = pavail[i].phys_addr; end = start + pavail[i].reg_size; while (start < end) { if (start >= kern_base && start < PAGE_ALIGN(kern_base + kern_size)) { start = PAGE_ALIGN(kern_base + kern_size); continue; } if (start >= avoid_start && start < avoid_end) { start = avoid_end; continue; } if ((end - start) < bootmap_size) break; if (start < kern_base && (start + bootmap_size) > kern_base) { start = PAGE_ALIGN(kern_base + kern_size); continue; } if (start < avoid_start && (start + bootmap_size) > avoid_start) { start = avoid_end; continue; } /* OK, it doesn't overlap anything, use it. */ #ifdef CONFIG_DEBUG_BOOTMEM prom_printf("choose_bootmap_pfn: Using %lx [%lx]\n", start >> PAGE_SHIFT, start); #endif return start >> PAGE_SHIFT; } } prom_printf("Cannot find free area for bootmap, aborting.\n"); prom_halt(); } static unsigned long __init bootmem_init(unsigned long *pages_avail, unsigned long phys_base) { unsigned long bootmap_size, end_pfn; unsigned long end_of_phys_memory = 0UL; unsigned long bootmap_pfn, bytes_avail, size; int i; #ifdef CONFIG_DEBUG_BOOTMEM prom_printf("bootmem_init: Scan pavail, "); #endif bytes_avail = 0UL; for (i = 0; i < pavail_ents; i++) { end_of_phys_memory = pavail[i].phys_addr + pavail[i].reg_size; bytes_avail += pavail[i].reg_size; if (cmdline_memory_size) { if (bytes_avail > cmdline_memory_size) { unsigned long slack = bytes_avail - cmdline_memory_size; bytes_avail -= slack; end_of_phys_memory -= slack; pavail[i].reg_size -= slack; if ((long)pavail[i].reg_size <= 0L) { pavail[i].phys_addr = 0xdeadbeefUL; pavail[i].reg_size = 0UL; pavail_ents = i; } else { pavail[i+1].reg_size = 0Ul; pavail[i+1].phys_addr = 0xdeadbeefUL; pavail_ents = i + 1; } break; } } } *pages_avail = bytes_avail >> PAGE_SHIFT; end_pfn = end_of_phys_memory >> PAGE_SHIFT; #ifdef CONFIG_BLK_DEV_INITRD /* Now have to check initial ramdisk, so that bootmap does not overwrite it */ if (sparc_ramdisk_image || sparc_ramdisk_image64) { unsigned long ramdisk_image = sparc_ramdisk_image ? sparc_ramdisk_image : sparc_ramdisk_image64; ramdisk_image -= KERNBASE; initrd_start = ramdisk_image + phys_base; initrd_end = initrd_start + sparc_ramdisk_size; if (initrd_end > end_of_phys_memory) { printk(KERN_CRIT "initrd extends beyond end of memory " "(0x%016lx > 0x%016lx)\ndisabling initrd\n", initrd_end, end_of_phys_memory); initrd_start = 0; initrd_end = 0; } } #endif /* Initialize the boot-time allocator. */ max_pfn = max_low_pfn = end_pfn; min_low_pfn = (phys_base >> PAGE_SHIFT); bootmap_pfn = choose_bootmap_pfn(min_low_pfn, end_pfn); #ifdef CONFIG_DEBUG_BOOTMEM prom_printf("init_bootmem(min[%lx], bootmap[%lx], max[%lx])\n", min_low_pfn, bootmap_pfn, max_low_pfn); #endif bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap_pfn, min_low_pfn, end_pfn); /* Now register the available physical memory with the * allocator. */ for (i = 0; i < pavail_ents; i++) { #ifdef CONFIG_DEBUG_BOOTMEM prom_printf("free_bootmem(pavail:%d): base[%lx] size[%lx]\n", i, pavail[i].phys_addr, pavail[i].reg_size); #endif free_bootmem(pavail[i].phys_addr, pavail[i].reg_size); } #ifdef CONFIG_BLK_DEV_INITRD if (initrd_start) { size = initrd_end - initrd_start; /* Resert the initrd image area. */ #ifdef CONFIG_DEBUG_BOOTMEM prom_printf("reserve_bootmem(initrd): base[%llx] size[%lx]\n", initrd_start, initrd_end); #endif reserve_bootmem(initrd_start, size, BOOTMEM_DEFAULT); *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT; initrd_start += PAGE_OFFSET; initrd_end += PAGE_OFFSET; } #endif /* Reserve the kernel text/data/bss. */ #ifdef CONFIG_DEBUG_BOOTMEM prom_printf("reserve_bootmem(kernel): base[%lx] size[%lx]\n", kern_base, kern_size); #endif reserve_bootmem(kern_base, kern_size, BOOTMEM_DEFAULT); *pages_avail -= PAGE_ALIGN(kern_size) >> PAGE_SHIFT; /* Reserve the bootmem map. We do not account for it * in pages_avail because we will release that memory * in free_all_bootmem. */ size = bootmap_size; #ifdef CONFIG_DEBUG_BOOTMEM prom_printf("reserve_bootmem(bootmap): base[%lx] size[%lx]\n", (bootmap_pfn << PAGE_SHIFT), size); #endif reserve_bootmem((bootmap_pfn << PAGE_SHIFT), size, BOOTMEM_DEFAULT); *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT; for (i = 0; i < pavail_ents; i++) { unsigned long start_pfn, end_pfn; start_pfn = pavail[i].phys_addr >> PAGE_SHIFT; end_pfn = (start_pfn + (pavail[i].reg_size >> PAGE_SHIFT)); #ifdef CONFIG_DEBUG_BOOTMEM prom_printf("memory_present(0, %lx, %lx)\n", start_pfn, end_pfn); #endif memory_present(0, start_pfn, end_pfn); } sparse_init(); return end_pfn; } static struct linux_prom64_registers pall[MAX_BANKS] __initdata; static int pall_ents __initdata; #ifdef CONFIG_DEBUG_PAGEALLOC static unsigned long kernel_map_range(unsigned long pstart, unsigned long pend, pgprot_t prot) { unsigned long vstart = PAGE_OFFSET + pstart; unsigned long vend = PAGE_OFFSET + pend; unsigned long alloc_bytes = 0UL; if ((vstart & ~PAGE_MASK) || (vend & ~PAGE_MASK)) { prom_printf("kernel_map: Unaligned physmem[%lx:%lx]\n", vstart, vend); prom_halt(); } while (vstart < vend) { unsigned long this_end, paddr = __pa(vstart); pgd_t *pgd = pgd_offset_k(vstart); pud_t *pud; pmd_t *pmd; pte_t *pte; pud = pud_offset(pgd, vstart); if (pud_none(*pud)) { pmd_t *new; new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); alloc_bytes += PAGE_SIZE; pud_populate(&init_mm, pud, new); } pmd = pmd_offset(pud, vstart); if (!pmd_present(*pmd)) { pte_t *new; new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); alloc_bytes += PAGE_SIZE; pmd_populate_kernel(&init_mm, pmd, new); } pte = pte_offset_kernel(pmd, vstart); this_end = (vstart + PMD_SIZE) & PMD_MASK; if (this_end > vend) this_end = vend; while (vstart < this_end) { pte_val(*pte) = (paddr | pgprot_val(prot)); vstart += PAGE_SIZE; paddr += PAGE_SIZE; pte++; } } return alloc_bytes; } extern unsigned int kvmap_linear_patch[1]; #endif /* CONFIG_DEBUG_PAGEALLOC */ static void __init mark_kpte_bitmap(unsigned long start, unsigned long end) { const unsigned long shift_256MB = 28; const unsigned long mask_256MB = ((1UL << shift_256MB) - 1UL); const unsigned long size_256MB = (1UL << shift_256MB); while (start < end) { long remains; remains = end - start; if (remains < size_256MB) break; if (start & mask_256MB) { start = (start + size_256MB) & ~mask_256MB; continue; } while (remains >= size_256MB) { unsigned long index = start >> shift_256MB; __set_bit(index, kpte_linear_bitmap); start += size_256MB; remains -= size_256MB; } } } static void __init kernel_physical_mapping_init(void) { unsigned long i; #ifdef CONFIG_DEBUG_PAGEALLOC unsigned long mem_alloced = 0UL; #endif read_obp_memory("reg", &pall[0], &pall_ents); for (i = 0; i < pall_ents; i++) { unsigned long phys_start, phys_end; phys_start = pall[i].phys_addr; phys_end = phys_start + pall[i].reg_size; mark_kpte_bitmap(phys_start, phys_end); #ifdef CONFIG_DEBUG_PAGEALLOC mem_alloced += kernel_map_range(phys_start, phys_end, PAGE_KERNEL); #endif } #ifdef CONFIG_DEBUG_PAGEALLOC printk("Allocated %ld bytes for kernel page tables.\n", mem_alloced); kvmap_linear_patch[0] = 0x01000000; /* nop */ flushi(&kvmap_linear_patch[0]); __flush_tlb_all(); #endif } #ifdef CONFIG_DEBUG_PAGEALLOC void kernel_map_pages(struct page *page, int numpages, int enable) { unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT; unsigned long phys_end = phys_start + (numpages * PAGE_SIZE); kernel_map_range(phys_start, phys_end, (enable ? PAGE_KERNEL : __pgprot(0))); flush_tsb_kernel_range(PAGE_OFFSET + phys_start, PAGE_OFFSET + phys_end); /* we should perform an IPI and flush all tlbs, * but that can deadlock->flush only current cpu. */ __flush_tlb_kernel_range(PAGE_OFFSET + phys_start, PAGE_OFFSET + phys_end); } #endif unsigned long __init find_ecache_flush_span(unsigned long size) { int i; for (i = 0; i < pavail_ents; i++) { if (pavail[i].reg_size >= size) return pavail[i].phys_addr; } return ~0UL; } static void __init tsb_phys_patch(void) { struct tsb_ldquad_phys_patch_entry *pquad; struct tsb_phys_patch_entry *p; pquad = &__tsb_ldquad_phys_patch; while (pquad < &__tsb_ldquad_phys_patch_end) { unsigned long addr = pquad->addr; if (tlb_type == hypervisor) *(unsigned int *) addr = pquad->sun4v_insn; else *(unsigned int *) addr = pquad->sun4u_insn; wmb(); __asm__ __volatile__("flush %0" : /* no outputs */ : "r" (addr)); pquad++; } p = &__tsb_phys_patch; while (p < &__tsb_phys_patch_end) { unsigned long addr = p->addr; *(unsigned int *) addr = p->insn; wmb(); __asm__ __volatile__("flush %0" : /* no outputs */ : "r" (addr)); p++; } } /* Don't mark as init, we give this to the Hypervisor. */ static struct hv_tsb_descr ktsb_descr[2]; extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES]; static void __init sun4v_ktsb_init(void) { unsigned long ktsb_pa; /* First KTSB for PAGE_SIZE mappings. */ ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE); switch (PAGE_SIZE) { case 8 * 1024: default: ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_8K; ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_8K; break; case 64 * 1024: ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_64K; ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_64K; break; case 512 * 1024: ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_512K; ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_512K; break; case 4 * 1024 * 1024: ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_4MB; ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_4MB; break; }; ktsb_descr[0].assoc = 1; ktsb_descr[0].num_ttes = KERNEL_TSB_NENTRIES; ktsb_descr[0].ctx_idx = 0; ktsb_descr[0].tsb_base = ktsb_pa; ktsb_descr[0].resv = 0; /* Second KTSB for 4MB/256MB mappings. */ ktsb_pa = (kern_base + ((unsigned long)&swapper_4m_tsb[0] - KERNBASE)); ktsb_descr[1].pgsz_idx = HV_PGSZ_IDX_4MB; ktsb_descr[1].pgsz_mask = (HV_PGSZ_MASK_4MB | HV_PGSZ_MASK_256MB); ktsb_descr[1].assoc = 1; ktsb_descr[1].num_ttes = KERNEL_TSB4M_NENTRIES; ktsb_descr[1].ctx_idx = 0; ktsb_descr[1].tsb_base = ktsb_pa; ktsb_descr[1].resv = 0; } void __cpuinit sun4v_ktsb_register(void) { register unsigned long func asm("%o5"); register unsigned long arg0 asm("%o0"); register unsigned long arg1 asm("%o1"); unsigned long pa; pa = kern_base + ((unsigned long)&ktsb_descr[0] - KERNBASE); func = HV_FAST_MMU_TSB_CTX0; arg0 = 2; arg1 = pa; __asm__ __volatile__("ta %6" : "=&r" (func), "=&r" (arg0), "=&r" (arg1) : "0" (func), "1" (arg0), "2" (arg1), "i" (HV_FAST_TRAP)); } /* paging_init() sets up the page tables */ extern void cheetah_ecache_flush_init(void); extern void sun4v_patch_tlb_handlers(void); static unsigned long last_valid_pfn; pgd_t swapper_pg_dir[2048]; static void sun4u_pgprot_init(void); static void sun4v_pgprot_init(void); void __init paging_init(void) { unsigned long end_pfn, pages_avail, shift, phys_base; unsigned long real_end, i; kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL; kern_size = (unsigned long)&_end - (unsigned long)KERNBASE; /* Invalidate both kernel TSBs. */ memset(swapper_tsb, 0x40, sizeof(swapper_tsb)); memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb)); if (tlb_type == hypervisor) sun4v_pgprot_init(); else sun4u_pgprot_init(); if (tlb_type == cheetah_plus || tlb_type == hypervisor) tsb_phys_patch(); if (tlb_type == hypervisor) { sun4v_patch_tlb_handlers(); sun4v_ktsb_init(); } /* Find available physical memory... */ read_obp_memory("available", &pavail[0], &pavail_ents); phys_base = 0xffffffffffffffffUL; for (i = 0; i < pavail_ents; i++) phys_base = min(phys_base, pavail[i].phys_addr); set_bit(0, mmu_context_bmap); shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE); real_end = (unsigned long)_end; if ((real_end > ((unsigned long)KERNBASE + 0x400000))) bigkernel = 1; if ((real_end > ((unsigned long)KERNBASE + 0x800000))) { prom_printf("paging_init: Kernel > 8MB, too large.\n"); prom_halt(); } /* Set kernel pgd to upper alias so physical page computations * work. */ init_mm.pgd += ((shift) / (sizeof(pgd_t))); memset(swapper_low_pmd_dir, 0, sizeof(swapper_low_pmd_dir)); /* Now can init the kernel/bad page tables. */ pud_set(pud_offset(&swapper_pg_dir[0], 0), swapper_low_pmd_dir + (shift / sizeof(pgd_t))); inherit_prom_mappings(); /* Ok, we can use our TLB miss and window trap handlers safely. */ setup_tba(); __flush_tlb_all(); if (tlb_type == hypervisor) sun4v_ktsb_register(); /* Setup bootmem... */ pages_avail = 0; last_valid_pfn = end_pfn = bootmem_init(&pages_avail, phys_base); max_mapnr = last_valid_pfn; kernel_physical_mapping_init(); prom_build_devicetree(); { unsigned long zones_size[MAX_NR_ZONES]; unsigned long zholes_size[MAX_NR_ZONES]; int znum; for (znum = 0; znum < MAX_NR_ZONES; znum++) zones_size[znum] = zholes_size[znum] = 0; zones_size[ZONE_DMA] = end_pfn; zholes_size[ZONE_DMA] = end_pfn - pages_avail; free_area_init_node(0, &contig_page_data, zones_size, __pa(PAGE_OFFSET) >> PAGE_SHIFT, zholes_size); } device_scan(); } static void __init taint_real_pages(void) { int i; read_obp_memory("available", &pavail_rescan[0], &pavail_rescan_ents); /* Find changes discovered in the physmem available rescan and * reserve the lost portions in the bootmem maps. */ for (i = 0; i < pavail_ents; i++) { unsigned long old_start, old_end; old_start = pavail[i].phys_addr; old_end = old_start + pavail[i].reg_size; while (old_start < old_end) { int n; for (n = 0; n < pavail_rescan_ents; n++) { unsigned long new_start, new_end; new_start = pavail_rescan[n].phys_addr; new_end = new_start + pavail_rescan[n].reg_size; if (new_start <= old_start && new_end >= (old_start + PAGE_SIZE)) { set_bit(old_start >> 22, sparc64_valid_addr_bitmap); goto do_next_page; } } reserve_bootmem(old_start, PAGE_SIZE, BOOTMEM_DEFAULT); do_next_page: old_start += PAGE_SIZE; } } } int __init page_in_phys_avail(unsigned long paddr) { int i; paddr &= PAGE_MASK; for (i = 0; i < pavail_rescan_ents; i++) { unsigned long start, end; start = pavail_rescan[i].phys_addr; end = start + pavail_rescan[i].reg_size; if (paddr >= start && paddr < end) return 1; } if (paddr >= kern_base && paddr < (kern_base + kern_size)) return 1; #ifdef CONFIG_BLK_DEV_INITRD if (paddr >= __pa(initrd_start) && paddr < __pa(PAGE_ALIGN(initrd_end))) return 1; #endif return 0; } void __init mem_init(void) { unsigned long codepages, datapages, initpages; unsigned long addr, last; int i; i = last_valid_pfn >> ((22 - PAGE_SHIFT) + 6); i += 1; sparc64_valid_addr_bitmap = (unsigned long *) alloc_bootmem(i << 3); if (sparc64_valid_addr_bitmap == NULL) { prom_printf("mem_init: Cannot alloc valid_addr_bitmap.\n"); prom_halt(); } memset(sparc64_valid_addr_bitmap, 0, i << 3); addr = PAGE_OFFSET + kern_base; last = PAGE_ALIGN(kern_size) + addr; while (addr < last) { set_bit(__pa(addr) >> 22, sparc64_valid_addr_bitmap); addr += PAGE_SIZE; } taint_real_pages(); high_memory = __va(last_valid_pfn << PAGE_SHIFT); #ifdef CONFIG_DEBUG_BOOTMEM prom_printf("mem_init: Calling free_all_bootmem().\n"); #endif totalram_pages = num_physpages = free_all_bootmem() - 1; /* * Set up the zero page, mark it reserved, so that page count * is not manipulated when freeing the page from user ptes. */ mem_map_zero = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0); if (mem_map_zero == NULL) { prom_printf("paging_init: Cannot alloc zero page.\n"); prom_halt(); } SetPageReserved(mem_map_zero); codepages = (((unsigned long) _etext) - ((unsigned long) _start)); codepages = PAGE_ALIGN(codepages) >> PAGE_SHIFT; datapages = (((unsigned long) _edata) - ((unsigned long) _etext)); datapages = PAGE_ALIGN(datapages) >> PAGE_SHIFT; initpages = (((unsigned long) __init_end) - ((unsigned long) __init_begin)); initpages = PAGE_ALIGN(initpages) >> PAGE_SHIFT; printk("Memory: %uk available (%ldk kernel code, %ldk data, %ldk init) [%016lx,%016lx]\n", nr_free_pages() << (PAGE_SHIFT-10), codepages << (PAGE_SHIFT-10), datapages << (PAGE_SHIFT-10), initpages << (PAGE_SHIFT-10), PAGE_OFFSET, (last_valid_pfn << PAGE_SHIFT)); if (tlb_type == cheetah || tlb_type == cheetah_plus) cheetah_ecache_flush_init(); } void free_initmem(void) { unsigned long addr, initend; /* * The init section is aligned to 8k in vmlinux.lds. Page align for >8k pagesizes. */ addr = PAGE_ALIGN((unsigned long)(__init_begin)); initend = (unsigned long)(__init_end) & PAGE_MASK; for (; addr < initend; addr += PAGE_SIZE) { unsigned long page; struct page *p; page = (addr + ((unsigned long) __va(kern_base)) - ((unsigned long) KERNBASE)); memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE); p = virt_to_page(page); ClearPageReserved(p); init_page_count(p); __free_page(p); num_physpages++; totalram_pages++; } } #ifdef CONFIG_BLK_DEV_INITRD void free_initrd_mem(unsigned long start, unsigned long end) { if (start < end) printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10); for (; start < end; start += PAGE_SIZE) { struct page *p = virt_to_page(start); ClearPageReserved(p); init_page_count(p); __free_page(p); num_physpages++; totalram_pages++; } } #endif #define _PAGE_CACHE_4U (_PAGE_CP_4U | _PAGE_CV_4U) #define _PAGE_CACHE_4V (_PAGE_CP_4V | _PAGE_CV_4V) #define __DIRTY_BITS_4U (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U) #define __DIRTY_BITS_4V (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V) #define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_READ_4U | _PAGE_R) #define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R) pgprot_t PAGE_KERNEL __read_mostly; EXPORT_SYMBOL(PAGE_KERNEL); pgprot_t PAGE_KERNEL_LOCKED __read_mostly; pgprot_t PAGE_COPY __read_mostly; pgprot_t PAGE_SHARED __read_mostly; EXPORT_SYMBOL(PAGE_SHARED); pgprot_t PAGE_EXEC __read_mostly; unsigned long pg_iobits __read_mostly; unsigned long _PAGE_IE __read_mostly; EXPORT_SYMBOL(_PAGE_IE); unsigned long _PAGE_E __read_mostly; EXPORT_SYMBOL(_PAGE_E); unsigned long _PAGE_CACHE __read_mostly; EXPORT_SYMBOL(_PAGE_CACHE); static void prot_init_common(unsigned long page_none, unsigned long page_shared, unsigned long page_copy, unsigned long page_readonly, unsigned long page_exec_bit) { PAGE_COPY = __pgprot(page_copy); PAGE_SHARED = __pgprot(page_shared); protection_map[0x0] = __pgprot(page_none); protection_map[0x1] = __pgprot(page_readonly & ~page_exec_bit); protection_map[0x2] = __pgprot(page_copy & ~page_exec_bit); protection_map[0x3] = __pgprot(page_copy & ~page_exec_bit); protection_map[0x4] = __pgprot(page_readonly); protection_map[0x5] = __pgprot(page_readonly); protection_map[0x6] = __pgprot(page_copy); protection_map[0x7] = __pgprot(page_copy); protection_map[0x8] = __pgprot(page_none); protection_map[0x9] = __pgprot(page_readonly & ~page_exec_bit); protection_map[0xa] = __pgprot(page_shared & ~page_exec_bit); protection_map[0xb] = __pgprot(page_shared & ~page_exec_bit); protection_map[0xc] = __pgprot(page_readonly); protection_map[0xd] = __pgprot(page_readonly); protection_map[0xe] = __pgprot(page_shared); protection_map[0xf] = __pgprot(page_shared); } static void __init sun4u_pgprot_init(void) { unsigned long page_none, page_shared, page_copy, page_readonly; unsigned long page_exec_bit; PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID | _PAGE_CACHE_4U | _PAGE_P_4U | __ACCESS_BITS_4U | __DIRTY_BITS_4U | _PAGE_EXEC_4U); PAGE_KERNEL_LOCKED = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID | _PAGE_CACHE_4U | _PAGE_P_4U | __ACCESS_BITS_4U | __DIRTY_BITS_4U | _PAGE_EXEC_4U | _PAGE_L_4U); PAGE_EXEC = __pgprot(_PAGE_EXEC_4U); _PAGE_IE = _PAGE_IE_4U; _PAGE_E = _PAGE_E_4U; _PAGE_CACHE = _PAGE_CACHE_4U; pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4U | __DIRTY_BITS_4U | __ACCESS_BITS_4U | _PAGE_E_4U); kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4U) ^ 0xfffff80000000000; kern_linear_pte_xor[0] |= (_PAGE_CP_4U | _PAGE_CV_4U | _PAGE_P_4U | _PAGE_W_4U); /* XXX Should use 256MB on Panther. XXX */ kern_linear_pte_xor[1] = kern_linear_pte_xor[0]; _PAGE_SZBITS = _PAGE_SZBITS_4U; _PAGE_ALL_SZ_BITS = (_PAGE_SZ4MB_4U | _PAGE_SZ512K_4U | _PAGE_SZ64K_4U | _PAGE_SZ8K_4U | _PAGE_SZ32MB_4U | _PAGE_SZ256MB_4U); page_none = _PAGE_PRESENT_4U | _PAGE_ACCESSED_4U | _PAGE_CACHE_4U; page_shared = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U | __ACCESS_BITS_4U | _PAGE_WRITE_4U | _PAGE_EXEC_4U); page_copy = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U | __ACCESS_BITS_4U | _PAGE_EXEC_4U); page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U | __ACCESS_BITS_4U | _PAGE_EXEC_4U); page_exec_bit = _PAGE_EXEC_4U; prot_init_common(page_none, page_shared, page_copy, page_readonly, page_exec_bit); } static void __init sun4v_pgprot_init(void) { unsigned long page_none, page_shared, page_copy, page_readonly; unsigned long page_exec_bit; PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4V | _PAGE_VALID | _PAGE_CACHE_4V | _PAGE_P_4V | __ACCESS_BITS_4V | __DIRTY_BITS_4V | _PAGE_EXEC_4V); PAGE_KERNEL_LOCKED = PAGE_KERNEL; PAGE_EXEC = __pgprot(_PAGE_EXEC_4V); _PAGE_IE = _PAGE_IE_4V; _PAGE_E = _PAGE_E_4V; _PAGE_CACHE = _PAGE_CACHE_4V; kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^ 0xfffff80000000000; kern_linear_pte_xor[0] |= (_PAGE_CP_4V | _PAGE_CV_4V | _PAGE_P_4V | _PAGE_W_4V); kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZ256MB_4V) ^ 0xfffff80000000000; kern_linear_pte_xor[1] |= (_PAGE_CP_4V | _PAGE_CV_4V | _PAGE_P_4V | _PAGE_W_4V); pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4V | __DIRTY_BITS_4V | __ACCESS_BITS_4V | _PAGE_E_4V); _PAGE_SZBITS = _PAGE_SZBITS_4V; _PAGE_ALL_SZ_BITS = (_PAGE_SZ16GB_4V | _PAGE_SZ2GB_4V | _PAGE_SZ256MB_4V | _PAGE_SZ32MB_4V | _PAGE_SZ4MB_4V | _PAGE_SZ512K_4V | _PAGE_SZ64K_4V | _PAGE_SZ8K_4V); page_none = _PAGE_PRESENT_4V | _PAGE_ACCESSED_4V | _PAGE_CACHE_4V; page_shared = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V | __ACCESS_BITS_4V | _PAGE_WRITE_4V | _PAGE_EXEC_4V); page_copy = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V | __ACCESS_BITS_4V | _PAGE_EXEC_4V); page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V | __ACCESS_BITS_4V | _PAGE_EXEC_4V); page_exec_bit = _PAGE_EXEC_4V; prot_init_common(page_none, page_shared, page_copy, page_readonly, page_exec_bit); } unsigned long pte_sz_bits(unsigned long sz) { if (tlb_type == hypervisor) { switch (sz) { case 8 * 1024: default: return _PAGE_SZ8K_4V; case 64 * 1024: return _PAGE_SZ64K_4V; case 512 * 1024: return _PAGE_SZ512K_4V; case 4 * 1024 * 1024: return _PAGE_SZ4MB_4V; }; } else { switch (sz) { case 8 * 1024: default: return _PAGE_SZ8K_4U; case 64 * 1024: return _PAGE_SZ64K_4U; case 512 * 1024: return _PAGE_SZ512K_4U; case 4 * 1024 * 1024: return _PAGE_SZ4MB_4U; }; } } pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space, unsigned long page_size) { pte_t pte; pte_val(pte) = page | pgprot_val(pgprot_noncached(prot)); pte_val(pte) |= (((unsigned long)space) << 32); pte_val(pte) |= pte_sz_bits(page_size); return pte; } static unsigned long kern_large_tte(unsigned long paddr) { unsigned long val; val = (_PAGE_VALID | _PAGE_SZ4MB_4U | _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_P_4U | _PAGE_EXEC_4U | _PAGE_L_4U | _PAGE_W_4U); if (tlb_type == hypervisor) val = (_PAGE_VALID | _PAGE_SZ4MB_4V | _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_P_4V | _PAGE_EXEC_4V | _PAGE_W_4V); return val | paddr; } /* * Translate PROM's mapping we capture at boot time into physical address. * The second parameter is only set from prom_callback() invocations. */ unsigned long prom_virt_to_phys(unsigned long promva, int *error) { unsigned long mask; int i; mask = _PAGE_PADDR_4U; if (tlb_type == hypervisor) mask = _PAGE_PADDR_4V; for (i = 0; i < prom_trans_ents; i++) { struct linux_prom_translation *p = &prom_trans[i]; if (promva >= p->virt && promva < (p->virt + p->size)) { unsigned long base = p->data & mask; if (error) *error = 0; return base + (promva & (8192 - 1)); } } if (error) *error = 1; return 0UL; } /* XXX We should kill off this ugly thing at so me point. XXX */ unsigned long sun4u_get_pte(unsigned long addr) { pgd_t *pgdp; pud_t *pudp; pmd_t *pmdp; pte_t *ptep; unsigned long mask = _PAGE_PADDR_4U; if (tlb_type == hypervisor) mask = _PAGE_PADDR_4V; if (addr >= PAGE_OFFSET) return addr & mask; if ((addr >= LOW_OBP_ADDRESS) && (addr < HI_OBP_ADDRESS)) return prom_virt_to_phys(addr, NULL); pgdp = pgd_offset_k(addr); pudp = pud_offset(pgdp, addr); pmdp = pmd_offset(pudp, addr); ptep = pte_offset_kernel(pmdp, addr); return pte_val(*ptep) & mask; } /* If not locked, zap it. */ void __flush_tlb_all(void) { unsigned long pstate; int i; __asm__ __volatile__("flushw\n\t" "rdpr %%pstate, %0\n\t" "wrpr %0, %1, %%pstate" : "=r" (pstate) : "i" (PSTATE_IE)); if (tlb_type == spitfire) { for (i = 0; i < 64; i++) { /* Spitfire Errata #32 workaround */ /* NOTE: Always runs on spitfire, so no * cheetah+ page size encodings. */ __asm__ __volatile__("stxa %0, [%1] %2\n\t" "flush %%g6" : /* No outputs */ : "r" (0), "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); if (!(spitfire_get_dtlb_data(i) & _PAGE_L_4U)) { __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" "membar #Sync" : /* no outputs */ : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU)); spitfire_put_dtlb_data(i, 0x0UL); } /* Spitfire Errata #32 workaround */ /* NOTE: Always runs on spitfire, so no * cheetah+ page size encodings. */ __asm__ __volatile__("stxa %0, [%1] %2\n\t" "flush %%g6" : /* No outputs */ : "r" (0), "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); if (!(spitfire_get_itlb_data(i) & _PAGE_L_4U)) { __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" "membar #Sync" : /* no outputs */ : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU)); spitfire_put_itlb_data(i, 0x0UL); } } } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { cheetah_flush_dtlb_all(); cheetah_flush_itlb_all(); } __asm__ __volatile__("wrpr %0, 0, %%pstate" : : "r" (pstate)); } #ifdef CONFIG_MEMORY_HOTPLUG void online_page(struct page *page) { ClearPageReserved(page); init_page_count(page); __free_page(page); totalram_pages++; num_physpages++; } int remove_memory(u64 start, u64 size) { return -EINVAL; } #endif /* CONFIG_MEMORY_HOTPLUG */
gpl-2.0
ysleu/RTL8685
uClinux-dist/freeswan/pluto/kernel_comm.c
2
10140
/* whack communicating routines * Copyright (C) 1997 Angelos D. Keromytis. * Copyright (C) 1998-2001 D. Hugh Redelmeier. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * RCSID $Id: kernel_comm.c,v 1.1.1.1 2003/08/18 05:39:35 kaohj Exp $ */ #include <stdio.h> #include <stddef.h> #include <string.h> #include <unistd.h> #include <errno.h> #include <sys/types.h> #include <sys/socket.h> #include <sys/un.h> #include <netinet/in.h> #include <arpa/inet.h> #include <resolv.h> #include <arpa/nameser.h> /* missing from <resolv.h> on old systems */ #include <freeswan.h> #include "constants.h" #include "defs.h" #include "id.h" #include "x509.h" #include "connections.h" /* needs id.h */ #include "whack.h" /* needs connections.h */ #include "packet.h" #include "demux.h" /* needs packet.h */ #include "state.h" #include "ipsec_doi.h" /* needs demux.h and state.h */ #include "kernel.h" #include "kernel_comm.h" #include "log.h" #include "x509.h" #include "preshared.h" #include "adns.h" /* needs <resolv.h> */ #include "dnskey.h" /* needs preshared.h and adns.h */ #include "server.h" #include "kernel_alg.h" #include "ike_alg.h" #ifndef NO_DB_OPS_STATS #define NO_DB_CONTEXT #include "db_ops.h" #endif /* helper variables and function to decode strings from whack message */ static char *next_str , *str_roof; static bool unpack_str(char **p) { char *end = memchr(next_str, '\0', str_roof - next_str); if (end == NULL) { return FALSE; /* fishy: no end found */ } else { *p = next_str == end? NULL : next_str; next_str = end + 1; return TRUE; } } /* bits loading keys from asynchronous DNS */ struct key_add_continuation { struct adns_continuation ac; int whack_fd; }; static void key_add_ugh(const struct id *keyid, err_t ugh) { char name[IDTOA_BUF]; /* longer IDs will be truncated in message */ (void)idtoa(keyid, name, sizeof(name)); loglog(RC_NOKEY , "failure to fetch key for %s from DNS: %s", name, ugh); } static void key_add_continue(struct adns_continuation *ac, err_t ugh) { struct key_add_continuation *kc = (void *) ac; whack_log_fd = kc->whack_fd; if (ugh != NULL) { key_add_ugh(&ac->id, ugh); } else { remember_public_keys(&keys_from_dns); } close_any(whack_log_fd); } /* Handle a kernel request. Supposedly, there's a message in * the kernelsock socket. */ void whack_handle(int whackctlfd) { struct whack_message msg; struct sockaddr_un whackaddr; int whackaddrlen = sizeof(whackaddr); int whackfd = accept(whackctlfd, (struct sockaddr *)&whackaddr, &whackaddrlen); ssize_t n; if (whackfd < 0) { log_errno((e, "accept() failed in whack_handle()")); return; } n = read(whackfd, &msg, sizeof(msg)); if (n == -1) { log_errno((e, "read() failed in whack_handle()")); close(whackfd); return; } whack_log_fd = whackfd; /* sanity check message */ { err_t ugh = NULL; next_str = msg.string; str_roof = (char *)&msg + n; if (next_str > str_roof) { ugh = builddiag("truncated message from whack: got %d bytes; expected %d. Message ignored." , n, (int) sizeof(msg)); } else if (msg.magic != WHACK_MAGIC) { ugh = builddiag("message from whack has bad magic %d; should be %d; probably wrong version. Message ignored" , msg.magic, WHACK_MAGIC); } else if (!unpack_str(&msg.name) /* string 1 */ || !unpack_str(&msg.left.id) /* string 2 */ || !unpack_str(&msg.left.cert) /* string 3 */ || !unpack_str(&msg.left.updown) /* string 4 */ #ifdef VIRTUAL_IP || !unpack_str(&msg.left.virt) #endif || !unpack_str(&msg.right.id) /* string 5 */ || !unpack_str(&msg.right.cert) /* string 6 */ || !unpack_str(&msg.right.updown) /* string 7 */ #ifdef VIRTUAL_IP || !unpack_str(&msg.right.virt) #endif || !unpack_str(&msg.keyid) /* string 8 */ || !unpack_str(&msg.ike) /* string 9 */ || !unpack_str(&msg.esp) /* string 10 */ || !unpack_str(&msg.dnshostname) /* string 11 */ || str_roof - next_str != (ptrdiff_t)msg.keyval.len) /* check chunk */ { ugh = "message from whack contains bad string"; } else { msg.keyval.ptr = next_str; /* grab chunk */ } if (ugh != NULL) { loglog(RC_BADWHACKMESSAGE, "%s", ugh); whack_log_fd = NULL_FD; close(whackfd); return; } } if (msg.whack_options) { #ifdef DEBUG if (msg.name == NULL) { /* we do a two-step so that if either old or new would * cause the message to print, it will be printed. */ cur_debugging |= msg.debugging; DBG(DBG_CONTROL , DBG_log("base debugging = %s" , bitnamesof(debug_bit_names, msg.debugging))); cur_debugging = base_debugging = msg.debugging; } else if (!msg.whack_connection) { struct connection *c = con_by_name(msg.name, TRUE); if (c != NULL) { c->extra_debugging = msg.debugging; DBG(DBG_CONTROL , DBG_log("\"%s\" extra_debugging = %s" , c->name , bitnamesof(debug_bit_names, c->extra_debugging))); } } #endif } /* Deleting combined with adding a connection works as replace. * To make this more useful, in only this combination, * delete will silently ignore the lack of the connection. */ if (msg.whack_delete) { struct connection *c = con_by_name(msg.name, !msg.whack_connection); /* note: this is a "while" because road warrior * leads to multiple connections with the same name. */ for (; c != NULL; c = con_by_name(msg.name, FALSE)) delete_connection(c); } if (msg.whack_deletestate) { struct state *st = state_with_serialno(msg.whack_deletestateno); if (st == NULL) { loglog(RC_UNKNOWN_NAME, "no state #%lu to delete" , msg.whack_deletestateno); } else { delete_state(st); } } if (msg.whack_connection) add_connection(&msg); /* process "listen" before any operation that could require it */ if (msg.whack_listen) { log("listening for IKE messages"); listening = TRUE; find_ifaces(); load_preshared_secrets(); } if (msg.whack_unlisten) { log("no longer listening for IKE messages"); listening = FALSE; } if (msg.whack_reread & REREAD_SECRETS) { load_preshared_secrets(); } if (msg.whack_reread & REREAD_MYCERT) { load_mycert(); } if (msg.whack_reread & REREAD_CACERTS) { load_cacerts(); } if (msg.whack_reread & REREAD_CRLS) { load_crls(); } if (msg.whack_list & LIST_PUBKEYS) { list_public_keys(msg.whack_utc); } if (msg.whack_list & LIST_CERTS) { list_certs(msg.whack_utc); } if (msg.whack_list & LIST_CACERTS) { list_cacerts(msg.whack_utc); } if (msg.whack_list & LIST_CRLS) { list_crls(msg.whack_utc); } if (msg.whack_key) { /* add a public key */ struct id keyid; err_t ugh = atoid(msg.keyid, &keyid); if (ugh != NULL) { loglog(RC_BADID, "bad --keyid \"%s\": %s", msg.keyid, ugh); } else { if (!msg.whack_addkey) delete_public_keys(&keyid, msg.pubkey_alg); if (msg.keyval.len == 0) { struct key_add_continuation *kc = alloc_thing(struct key_add_continuation , "key add continuation"); int wfd = dup_any(whackfd); kc->whack_fd = wfd; ugh = start_adns_query(&keyid , NULL , T_KEY , key_add_continue , &kc->ac); if (ugh != NULL) { key_add_ugh(&keyid, ugh); close_any(wfd); } } else { ugh = add_public_key(&keyid, DAL_LOCAL, msg.pubkey_alg , &msg.keyval, &pubkeys); if (ugh != NULL) loglog(RC_LOG_SERIOUS, "%s", ugh); } } } if (msg.whack_route) { if (!listening) whack_log(RC_DEAF, "need --listen before --route"); else { struct connection *c = con_by_name(msg.name, TRUE); if (c != NULL) { set_cur_connection(c); if (!oriented(*c)) whack_log(RC_ORIENT , "we have no ipsecN interface for either end of this connection"); else if (!trap_connection(c)) whack_log(RC_ROUTE, "could not route"); reset_cur_connection(); } } } if (msg.whack_unroute) { struct connection *c = con_by_name(msg.name, TRUE); if (c != NULL) { set_cur_connection(c); if (c->routing >= RT_ROUTED_TUNNEL) whack_log(RC_RTBUSY, "cannot unroute: route busy"); else unroute_connection(c); reset_cur_connection(); } } if (msg.whack_initiate) { if (!listening) whack_log(RC_DEAF, "need --listen before --initiate"); else initiate_connection(msg.name , msg.whack_async? NULL_FD : dup_any(whackfd)); } if (msg.whack_oppo_initiate) { if (!listening) whack_log(RC_DEAF, "need --listen before opportunistic initiation"); else initiate_opportunistic(&msg.oppo_my_client, &msg.oppo_peer_client , FALSE , msg.whack_async? NULL_FD : dup_any(whackfd)); } if (msg.whack_terminate) terminate_connection(msg.name); if (msg.whack_status) { show_ifaces_status(); whack_log(RC_COMMENT, BLANK_FORMAT); /* spacer */ #ifndef NO_KERNEL_ALG kernel_alg_show_status(); whack_log(RC_COMMENT, BLANK_FORMAT); /* spacer */ #endif #ifndef NO_IKE_ALG ike_alg_show_status(); whack_log(RC_COMMENT, BLANK_FORMAT); /* spacer */ #endif #ifndef NO_DB_OPS_STATS db_ops_show_status(); whack_log(RC_COMMENT, BLANK_FORMAT); /* spacer */ #endif show_connections_status(); whack_log(RC_COMMENT, BLANK_FORMAT); /* spacer */ show_states_status(); } if (msg.whack_shutdown) { log("shutting down"); exit_pluto(0); /* delete lock and leave, with 0 status */ } whack_log_fd = NULL_FD; close(whackfd); }
gpl-2.0
DirtyUnicorns/android_kernel_samsung_ks01lte
mm/mempolicy.c
258
65944
/* * Simple NUMA memory policy for the Linux kernel. * * Copyright 2003,2004 Andi Kleen, SuSE Labs. * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc. * Subject to the GNU Public License, version 2. * * NUMA policy allows the user to give hints in which node(s) memory should * be allocated. * * Support four policies per VMA and per process: * * The VMA policy has priority over the process policy for a page fault. * * interleave Allocate memory interleaved over a set of nodes, * with normal fallback if it fails. * For VMA based allocations this interleaves based on the * offset into the backing object or offset into the mapping * for anonymous memory. For process policy an process counter * is used. * * bind Only allocate memory on a specific set of nodes, * no fallback. * FIXME: memory is allocated starting with the first node * to the last. It would be better if bind would truly restrict * the allocation to memory nodes instead * * preferred Try a specific node first before normal fallback. * As a special case node -1 here means do the allocation * on the local CPU. This is normally identical to default, * but useful to set in a VMA when you have a non default * process policy. * * default Allocate on the local node first, or when on a VMA * use the process policy. This is what Linux always did * in a NUMA aware kernel and still does by, ahem, default. * * The process policy is applied for most non interrupt memory allocations * in that process' context. Interrupts ignore the policies and always * try to allocate on the local CPU. The VMA policy is only applied for memory * allocations for a VMA in the VM. * * Currently there are a few corner cases in swapping where the policy * is not applied, but the majority should be handled. When process policy * is used it is not remembered over swap outs/swap ins. * * Only the highest zone in the zone hierarchy gets policied. Allocations * requesting a lower zone just use default policy. This implies that * on systems with highmem kernel lowmem allocation don't get policied. * Same with GFP_DMA allocations. * * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between * all users and remembered even when nobody has memory mapped. */ /* Notebook: fix mmap readahead to honour policy and enable policy for any page cache object statistics for bigpages global policy for page cache? currently it uses process policy. Requires first item above. handle mremap for shared memory (currently ignored for the policy) grows down? make bind policy root only? It can trigger oom much faster and the kernel is not always grateful with that. */ #include <linux/mempolicy.h> #include <linux/mm.h> #include <linux/highmem.h> #include <linux/hugetlb.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/nodemask.h> #include <linux/cpuset.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/export.h> #include <linux/nsproxy.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/compat.h> #include <linux/swap.h> #include <linux/seq_file.h> #include <linux/proc_fs.h> #include <linux/migrate.h> #include <linux/ksm.h> #include <linux/rmap.h> #include <linux/security.h> #include <linux/syscalls.h> #include <linux/ctype.h> #include <linux/mm_inline.h> #include <asm/tlbflush.h> #include <asm/uaccess.h> #include <linux/random.h> #include "internal.h" /* Internal flags */ #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */ #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */ static struct kmem_cache *policy_cache; static struct kmem_cache *sn_cache; /* Highest zone. An specific allocation for a zone below that is not policied. */ enum zone_type policy_zone = 0; /* * run-time system-wide default policy => local allocation */ static struct mempolicy default_policy = { .refcnt = ATOMIC_INIT(1), /* never free it */ .mode = MPOL_PREFERRED, .flags = MPOL_F_LOCAL, }; static const struct mempolicy_operations { int (*create)(struct mempolicy *pol, const nodemask_t *nodes); /* * If read-side task has no lock to protect task->mempolicy, write-side * task will rebind the task->mempolicy by two step. The first step is * setting all the newly nodes, and the second step is cleaning all the * disallowed nodes. In this way, we can avoid finding no node to alloc * page. * If we have a lock to protect task->mempolicy in read-side, we do * rebind directly. * * step: * MPOL_REBIND_ONCE - do rebind work at once * MPOL_REBIND_STEP1 - set all the newly nodes * MPOL_REBIND_STEP2 - clean all the disallowed nodes */ void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes, enum mpol_rebind_step step); } mpol_ops[MPOL_MAX]; /* Check that the nodemask contains at least one populated zone */ static int is_valid_nodemask(const nodemask_t *nodemask) { int nd, k; for_each_node_mask(nd, *nodemask) { struct zone *z; for (k = 0; k <= policy_zone; k++) { z = &NODE_DATA(nd)->node_zones[k]; if (z->present_pages > 0) return 1; } } return 0; } static inline int mpol_store_user_nodemask(const struct mempolicy *pol) { return pol->flags & MPOL_MODE_FLAGS; } static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig, const nodemask_t *rel) { nodemask_t tmp; nodes_fold(tmp, *orig, nodes_weight(*rel)); nodes_onto(*ret, tmp, *rel); } static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes) { if (nodes_empty(*nodes)) return -EINVAL; pol->v.nodes = *nodes; return 0; } static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes) { if (!nodes) pol->flags |= MPOL_F_LOCAL; /* local allocation */ else if (nodes_empty(*nodes)) return -EINVAL; /* no allowed nodes */ else pol->v.preferred_node = first_node(*nodes); return 0; } static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes) { if (!is_valid_nodemask(nodes)) return -EINVAL; pol->v.nodes = *nodes; return 0; } /* * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if * any, for the new policy. mpol_new() has already validated the nodes * parameter with respect to the policy mode and flags. But, we need to * handle an empty nodemask with MPOL_PREFERRED here. * * Must be called holding task's alloc_lock to protect task's mems_allowed * and mempolicy. May also be called holding the mmap_semaphore for write. */ static int mpol_set_nodemask(struct mempolicy *pol, const nodemask_t *nodes, struct nodemask_scratch *nsc) { int ret; /* if mode is MPOL_DEFAULT, pol is NULL. This is right. */ if (pol == NULL) return 0; /* Check N_HIGH_MEMORY */ nodes_and(nsc->mask1, cpuset_current_mems_allowed, node_states[N_HIGH_MEMORY]); VM_BUG_ON(!nodes); if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes)) nodes = NULL; /* explicit local allocation */ else { if (pol->flags & MPOL_F_RELATIVE_NODES) mpol_relative_nodemask(&nsc->mask2, nodes,&nsc->mask1); else nodes_and(nsc->mask2, *nodes, nsc->mask1); if (mpol_store_user_nodemask(pol)) pol->w.user_nodemask = *nodes; else pol->w.cpuset_mems_allowed = cpuset_current_mems_allowed; } if (nodes) ret = mpol_ops[pol->mode].create(pol, &nsc->mask2); else ret = mpol_ops[pol->mode].create(pol, NULL); return ret; } /* * This function just creates a new policy, does some check and simple * initialization. You must invoke mpol_set_nodemask() to set nodes. */ static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags, nodemask_t *nodes) { struct mempolicy *policy; pr_debug("setting mode %d flags %d nodes[0] %lx\n", mode, flags, nodes ? nodes_addr(*nodes)[0] : -1); if (mode == MPOL_DEFAULT) { if (nodes && !nodes_empty(*nodes)) return ERR_PTR(-EINVAL); return NULL; /* simply delete any existing policy */ } VM_BUG_ON(!nodes); /* * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation). * All other modes require a valid pointer to a non-empty nodemask. */ if (mode == MPOL_PREFERRED) { if (nodes_empty(*nodes)) { if (((flags & MPOL_F_STATIC_NODES) || (flags & MPOL_F_RELATIVE_NODES))) return ERR_PTR(-EINVAL); } } else if (nodes_empty(*nodes)) return ERR_PTR(-EINVAL); policy = kmem_cache_alloc(policy_cache, GFP_KERNEL); if (!policy) return ERR_PTR(-ENOMEM); atomic_set(&policy->refcnt, 1); policy->mode = mode; policy->flags = flags; return policy; } /* Slow path of a mpol destructor. */ void __mpol_put(struct mempolicy *p) { if (!atomic_dec_and_test(&p->refcnt)) return; kmem_cache_free(policy_cache, p); } static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes, enum mpol_rebind_step step) { } /* * step: * MPOL_REBIND_ONCE - do rebind work at once * MPOL_REBIND_STEP1 - set all the newly nodes * MPOL_REBIND_STEP2 - clean all the disallowed nodes */ static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes, enum mpol_rebind_step step) { nodemask_t tmp; if (pol->flags & MPOL_F_STATIC_NODES) nodes_and(tmp, pol->w.user_nodemask, *nodes); else if (pol->flags & MPOL_F_RELATIVE_NODES) mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes); else { /* * if step == 1, we use ->w.cpuset_mems_allowed to cache the * result */ if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP1) { nodes_remap(tmp, pol->v.nodes, pol->w.cpuset_mems_allowed, *nodes); pol->w.cpuset_mems_allowed = step ? tmp : *nodes; } else if (step == MPOL_REBIND_STEP2) { tmp = pol->w.cpuset_mems_allowed; pol->w.cpuset_mems_allowed = *nodes; } else BUG(); } if (nodes_empty(tmp)) tmp = *nodes; if (step == MPOL_REBIND_STEP1) nodes_or(pol->v.nodes, pol->v.nodes, tmp); else if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP2) pol->v.nodes = tmp; else BUG(); if (!node_isset(current->il_next, tmp)) { current->il_next = next_node(current->il_next, tmp); if (current->il_next >= MAX_NUMNODES) current->il_next = first_node(tmp); if (current->il_next >= MAX_NUMNODES) current->il_next = numa_node_id(); } } static void mpol_rebind_preferred(struct mempolicy *pol, const nodemask_t *nodes, enum mpol_rebind_step step) { nodemask_t tmp; if (pol->flags & MPOL_F_STATIC_NODES) { int node = first_node(pol->w.user_nodemask); if (node_isset(node, *nodes)) { pol->v.preferred_node = node; pol->flags &= ~MPOL_F_LOCAL; } else pol->flags |= MPOL_F_LOCAL; } else if (pol->flags & MPOL_F_RELATIVE_NODES) { mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes); pol->v.preferred_node = first_node(tmp); } else if (!(pol->flags & MPOL_F_LOCAL)) { pol->v.preferred_node = node_remap(pol->v.preferred_node, pol->w.cpuset_mems_allowed, *nodes); pol->w.cpuset_mems_allowed = *nodes; } } /* * mpol_rebind_policy - Migrate a policy to a different set of nodes * * If read-side task has no lock to protect task->mempolicy, write-side * task will rebind the task->mempolicy by two step. The first step is * setting all the newly nodes, and the second step is cleaning all the * disallowed nodes. In this way, we can avoid finding no node to alloc * page. * If we have a lock to protect task->mempolicy in read-side, we do * rebind directly. * * step: * MPOL_REBIND_ONCE - do rebind work at once * MPOL_REBIND_STEP1 - set all the newly nodes * MPOL_REBIND_STEP2 - clean all the disallowed nodes */ static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask, enum mpol_rebind_step step) { if (!pol) return; if (!mpol_store_user_nodemask(pol) && step == 0 && nodes_equal(pol->w.cpuset_mems_allowed, *newmask)) return; if (step == MPOL_REBIND_STEP1 && (pol->flags & MPOL_F_REBINDING)) return; if (step == MPOL_REBIND_STEP2 && !(pol->flags & MPOL_F_REBINDING)) BUG(); if (step == MPOL_REBIND_STEP1) pol->flags |= MPOL_F_REBINDING; else if (step == MPOL_REBIND_STEP2) pol->flags &= ~MPOL_F_REBINDING; else if (step >= MPOL_REBIND_NSTEP) BUG(); mpol_ops[pol->mode].rebind(pol, newmask, step); } /* * Wrapper for mpol_rebind_policy() that just requires task * pointer, and updates task mempolicy. * * Called with task's alloc_lock held. */ void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new, enum mpol_rebind_step step) { mpol_rebind_policy(tsk->mempolicy, new, step); } /* * Rebind each vma in mm to new nodemask. * * Call holding a reference to mm. Takes mm->mmap_sem during call. */ void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new) { struct vm_area_struct *vma; down_write(&mm->mmap_sem); for (vma = mm->mmap; vma; vma = vma->vm_next) mpol_rebind_policy(vma->vm_policy, new, MPOL_REBIND_ONCE); up_write(&mm->mmap_sem); } static const struct mempolicy_operations mpol_ops[MPOL_MAX] = { [MPOL_DEFAULT] = { .rebind = mpol_rebind_default, }, [MPOL_INTERLEAVE] = { .create = mpol_new_interleave, .rebind = mpol_rebind_nodemask, }, [MPOL_PREFERRED] = { .create = mpol_new_preferred, .rebind = mpol_rebind_preferred, }, [MPOL_BIND] = { .create = mpol_new_bind, .rebind = mpol_rebind_nodemask, }, }; static void migrate_page_add(struct page *page, struct list_head *pagelist, unsigned long flags); /* Scan through pages checking if pages follow certain conditions. */ static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, unsigned long end, const nodemask_t *nodes, unsigned long flags, void *private) { pte_t *orig_pte; pte_t *pte; spinlock_t *ptl; orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); do { struct page *page; int nid; if (!pte_present(*pte)) continue; page = vm_normal_page(vma, addr, *pte); if (!page) continue; /* * vm_normal_page() filters out zero pages, but there might * still be PageReserved pages to skip, perhaps in a VDSO. * And we cannot move PageKsm pages sensibly or safely yet. */ if (PageReserved(page) || PageKsm(page)) continue; nid = page_to_nid(page); if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT)) continue; if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) migrate_page_add(page, private, flags); else break; } while (pte++, addr += PAGE_SIZE, addr != end); pte_unmap_unlock(orig_pte, ptl); return addr != end; } static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud, unsigned long addr, unsigned long end, const nodemask_t *nodes, unsigned long flags, void *private) { pmd_t *pmd; unsigned long next; pmd = pmd_offset(pud, addr); do { next = pmd_addr_end(addr, end); split_huge_page_pmd(vma->vm_mm, pmd); if (pmd_none_or_trans_huge_or_clear_bad(pmd)) continue; if (check_pte_range(vma, pmd, addr, next, nodes, flags, private)) return -EIO; } while (pmd++, addr = next, addr != end); return 0; } static inline int check_pud_range(struct vm_area_struct *vma, pgd_t *pgd, unsigned long addr, unsigned long end, const nodemask_t *nodes, unsigned long flags, void *private) { pud_t *pud; unsigned long next; pud = pud_offset(pgd, addr); do { next = pud_addr_end(addr, end); if (pud_none_or_clear_bad(pud)) continue; if (check_pmd_range(vma, pud, addr, next, nodes, flags, private)) return -EIO; } while (pud++, addr = next, addr != end); return 0; } static inline int check_pgd_range(struct vm_area_struct *vma, unsigned long addr, unsigned long end, const nodemask_t *nodes, unsigned long flags, void *private) { pgd_t *pgd; unsigned long next; pgd = pgd_offset(vma->vm_mm, addr); do { next = pgd_addr_end(addr, end); if (pgd_none_or_clear_bad(pgd)) continue; if (check_pud_range(vma, pgd, addr, next, nodes, flags, private)) return -EIO; } while (pgd++, addr = next, addr != end); return 0; } /* * Check if all pages in a range are on a set of nodes. * If pagelist != NULL then isolate pages from the LRU and * put them on the pagelist. */ static struct vm_area_struct * check_range(struct mm_struct *mm, unsigned long start, unsigned long end, const nodemask_t *nodes, unsigned long flags, void *private) { int err; struct vm_area_struct *first, *vma, *prev; first = find_vma(mm, start); if (!first) return ERR_PTR(-EFAULT); prev = NULL; for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) { if (!(flags & MPOL_MF_DISCONTIG_OK)) { if (!vma->vm_next && vma->vm_end < end) return ERR_PTR(-EFAULT); if (prev && prev->vm_end < vma->vm_start) return ERR_PTR(-EFAULT); } if (!is_vm_hugetlb_page(vma) && ((flags & MPOL_MF_STRICT) || ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) && vma_migratable(vma)))) { unsigned long endvma = vma->vm_end; if (endvma > end) endvma = end; if (vma->vm_start > start) start = vma->vm_start; err = check_pgd_range(vma, start, endvma, nodes, flags, private); if (err) { first = ERR_PTR(err); break; } } prev = vma; } return first; } /* * Apply policy to a single VMA * This must be called with the mmap_sem held for writing. */ static int vma_replace_policy(struct vm_area_struct *vma, struct mempolicy *pol) { int err; struct mempolicy *old; struct mempolicy *new; pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n", vma->vm_start, vma->vm_end, vma->vm_pgoff, vma->vm_ops, vma->vm_file, vma->vm_ops ? vma->vm_ops->set_policy : NULL); new = mpol_dup(pol); if (IS_ERR(new)) return PTR_ERR(new); if (vma->vm_ops && vma->vm_ops->set_policy) { err = vma->vm_ops->set_policy(vma, new); if (err) goto err_out; } old = vma->vm_policy; vma->vm_policy = new; /* protected by mmap_sem */ mpol_put(old); return 0; err_out: mpol_put(new); return err; } /* Step 2: apply policy to a range and do splits. */ static int mbind_range(struct mm_struct *mm, unsigned long start, unsigned long end, struct mempolicy *new_pol) { struct vm_area_struct *next; struct vm_area_struct *prev; struct vm_area_struct *vma; int err = 0; pgoff_t pgoff; unsigned long vmstart; unsigned long vmend; vma = find_vma(mm, start); if (!vma || vma->vm_start > start) return -EFAULT; prev = vma->vm_prev; if (start > vma->vm_start) prev = vma; for (; vma && vma->vm_start < end; prev = vma, vma = next) { next = vma->vm_next; vmstart = max(start, vma->vm_start); vmend = min(end, vma->vm_end); if (mpol_equal(vma_policy(vma), new_pol)) continue; pgoff = vma->vm_pgoff + ((vmstart - vma->vm_start) >> PAGE_SHIFT); prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags, vma->anon_vma, vma->vm_file, pgoff, new_pol, vma_get_anon_name(name)); if (prev) { vma = prev; next = vma->vm_next; continue; } if (vma->vm_start != vmstart) { err = split_vma(vma->vm_mm, vma, vmstart, 1); if (err) goto out; } if (vma->vm_end != vmend) { err = split_vma(vma->vm_mm, vma, vmend, 0); if (err) goto out; } err = vma_replace_policy(vma, new_pol); if (err) goto out; } out: return err; } /* * Update task->flags PF_MEMPOLICY bit: set iff non-default * mempolicy. Allows more rapid checking of this (combined perhaps * with other PF_* flag bits) on memory allocation hot code paths. * * If called from outside this file, the task 'p' should -only- be * a newly forked child not yet visible on the task list, because * manipulating the task flags of a visible task is not safe. * * The above limitation is why this routine has the funny name * mpol_fix_fork_child_flag(). * * It is also safe to call this with a task pointer of current, * which the static wrapper mpol_set_task_struct_flag() does, * for use within this file. */ void mpol_fix_fork_child_flag(struct task_struct *p) { if (p->mempolicy) p->flags |= PF_MEMPOLICY; else p->flags &= ~PF_MEMPOLICY; } static void mpol_set_task_struct_flag(void) { mpol_fix_fork_child_flag(current); } /* Set the process memory policy */ static long do_set_mempolicy(unsigned short mode, unsigned short flags, nodemask_t *nodes) { struct mempolicy *new, *old; struct mm_struct *mm = current->mm; NODEMASK_SCRATCH(scratch); int ret; if (!scratch) return -ENOMEM; new = mpol_new(mode, flags, nodes); if (IS_ERR(new)) { ret = PTR_ERR(new); goto out; } /* * prevent changing our mempolicy while show_numa_maps() * is using it. * Note: do_set_mempolicy() can be called at init time * with no 'mm'. */ if (mm) down_write(&mm->mmap_sem); task_lock(current); ret = mpol_set_nodemask(new, nodes, scratch); if (ret) { task_unlock(current); if (mm) up_write(&mm->mmap_sem); mpol_put(new); goto out; } old = current->mempolicy; current->mempolicy = new; mpol_set_task_struct_flag(); if (new && new->mode == MPOL_INTERLEAVE && nodes_weight(new->v.nodes)) current->il_next = first_node(new->v.nodes); task_unlock(current); if (mm) up_write(&mm->mmap_sem); mpol_put(old); ret = 0; out: NODEMASK_SCRATCH_FREE(scratch); return ret; } /* * Return nodemask for policy for get_mempolicy() query * * Called with task's alloc_lock held */ static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes) { nodes_clear(*nodes); if (p == &default_policy) return; switch (p->mode) { case MPOL_BIND: /* Fall through */ case MPOL_INTERLEAVE: *nodes = p->v.nodes; break; case MPOL_PREFERRED: if (!(p->flags & MPOL_F_LOCAL)) node_set(p->v.preferred_node, *nodes); /* else return empty node mask for local allocation */ break; default: BUG(); } } static int lookup_node(struct mm_struct *mm, unsigned long addr) { struct page *p; int err; err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL); if (err >= 0) { err = page_to_nid(p); put_page(p); } return err; } /* Retrieve NUMA policy */ static long do_get_mempolicy(int *policy, nodemask_t *nmask, unsigned long addr, unsigned long flags) { int err; struct mm_struct *mm = current->mm; struct vm_area_struct *vma = NULL; struct mempolicy *pol = current->mempolicy; if (flags & ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED)) return -EINVAL; if (flags & MPOL_F_MEMS_ALLOWED) { if (flags & (MPOL_F_NODE|MPOL_F_ADDR)) return -EINVAL; *policy = 0; /* just so it's initialized */ task_lock(current); *nmask = cpuset_current_mems_allowed; task_unlock(current); return 0; } if (flags & MPOL_F_ADDR) { /* * Do NOT fall back to task policy if the * vma/shared policy at addr is NULL. We * want to return MPOL_DEFAULT in this case. */ down_read(&mm->mmap_sem); vma = find_vma_intersection(mm, addr, addr+1); if (!vma) { up_read(&mm->mmap_sem); return -EFAULT; } if (vma->vm_ops && vma->vm_ops->get_policy) pol = vma->vm_ops->get_policy(vma, addr); else pol = vma->vm_policy; } else if (addr) return -EINVAL; if (!pol) pol = &default_policy; /* indicates default behavior */ if (flags & MPOL_F_NODE) { if (flags & MPOL_F_ADDR) { err = lookup_node(mm, addr); if (err < 0) goto out; *policy = err; } else if (pol == current->mempolicy && pol->mode == MPOL_INTERLEAVE) { *policy = current->il_next; } else { err = -EINVAL; goto out; } } else { *policy = pol == &default_policy ? MPOL_DEFAULT : pol->mode; /* * Internal mempolicy flags must be masked off before exposing * the policy to userspace. */ *policy |= (pol->flags & MPOL_MODE_FLAGS); } if (vma) { up_read(&current->mm->mmap_sem); vma = NULL; } err = 0; if (nmask) { if (mpol_store_user_nodemask(pol)) { *nmask = pol->w.user_nodemask; } else { task_lock(current); get_policy_nodemask(pol, nmask); task_unlock(current); } } out: mpol_cond_put(pol); if (vma) up_read(&current->mm->mmap_sem); return err; } #ifdef CONFIG_MIGRATION /* * page migration */ static void migrate_page_add(struct page *page, struct list_head *pagelist, unsigned long flags) { /* * Avoid migrating a page that is shared with others. */ if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) { if (!isolate_lru_page(page)) { list_add_tail(&page->lru, pagelist); inc_zone_page_state(page, NR_ISOLATED_ANON + page_is_file_cache(page)); } } } static struct page *new_node_page(struct page *page, unsigned long node, int **x) { return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE, 0); } /* * Migrate pages from one node to a target node. * Returns error or the number of pages not migrated. */ static int migrate_to_node(struct mm_struct *mm, int source, int dest, int flags) { nodemask_t nmask; LIST_HEAD(pagelist); int err = 0; struct vm_area_struct *vma; nodes_clear(nmask); node_set(source, nmask); vma = check_range(mm, mm->mmap->vm_start, mm->task_size, &nmask, flags | MPOL_MF_DISCONTIG_OK, &pagelist); if (IS_ERR(vma)) return PTR_ERR(vma); if (!list_empty(&pagelist)) { err = migrate_pages(&pagelist, new_node_page, dest, false, MIGRATE_SYNC); if (err) putback_lru_pages(&pagelist); } return err; } /* * Move pages between the two nodesets so as to preserve the physical * layout as much as possible. * * Returns the number of page that could not be moved. */ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags) { int busy = 0; int err; nodemask_t tmp; err = migrate_prep(); if (err) return err; down_read(&mm->mmap_sem); err = migrate_vmas(mm, from_nodes, to_nodes, flags); if (err) goto out; /* * Find a 'source' bit set in 'tmp' whose corresponding 'dest' * bit in 'to' is not also set in 'tmp'. Clear the found 'source' * bit in 'tmp', and return that <source, dest> pair for migration. * The pair of nodemasks 'to' and 'from' define the map. * * If no pair of bits is found that way, fallback to picking some * pair of 'source' and 'dest' bits that are not the same. If the * 'source' and 'dest' bits are the same, this represents a node * that will be migrating to itself, so no pages need move. * * If no bits are left in 'tmp', or if all remaining bits left * in 'tmp' correspond to the same bit in 'to', return false * (nothing left to migrate). * * This lets us pick a pair of nodes to migrate between, such that * if possible the dest node is not already occupied by some other * source node, minimizing the risk of overloading the memory on a * node that would happen if we migrated incoming memory to a node * before migrating outgoing memory source that same node. * * A single scan of tmp is sufficient. As we go, we remember the * most recent <s, d> pair that moved (s != d). If we find a pair * that not only moved, but what's better, moved to an empty slot * (d is not set in tmp), then we break out then, with that pair. * Otherwise when we finish scanning from_tmp, we at least have the * most recent <s, d> pair that moved. If we get all the way through * the scan of tmp without finding any node that moved, much less * moved to an empty node, then there is nothing left worth migrating. */ tmp = *from_nodes; while (!nodes_empty(tmp)) { int s,d; int source = -1; int dest = 0; for_each_node_mask(s, tmp) { d = node_remap(s, *from_nodes, *to_nodes); if (s == d) continue; source = s; /* Node moved. Memorize */ dest = d; /* dest not in remaining from nodes? */ if (!node_isset(dest, tmp)) break; } if (source == -1) break; node_clear(source, tmp); err = migrate_to_node(mm, source, dest, flags); if (err > 0) busy += err; if (err < 0) break; } out: up_read(&mm->mmap_sem); if (err < 0) return err; return busy; } /* * Allocate a new page for page migration based on vma policy. * Start assuming that page is mapped by vma pointed to by @private. * Search forward from there, if not. N.B., this assumes that the * list of pages handed to migrate_pages()--which is how we get here-- * is in virtual address order. */ static struct page *new_vma_page(struct page *page, unsigned long private, int **x) { struct vm_area_struct *vma = (struct vm_area_struct *)private; unsigned long uninitialized_var(address); while (vma) { address = page_address_in_vma(page, vma); if (address != -EFAULT) break; vma = vma->vm_next; } /* * if !vma, alloc_page_vma() will use task or system default policy */ return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); } #else static void migrate_page_add(struct page *page, struct list_head *pagelist, unsigned long flags) { } int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags) { return -ENOSYS; } static struct page *new_vma_page(struct page *page, unsigned long private, int **x) { return NULL; } #endif static long do_mbind(unsigned long start, unsigned long len, unsigned short mode, unsigned short mode_flags, nodemask_t *nmask, unsigned long flags) { struct vm_area_struct *vma; struct mm_struct *mm = current->mm; struct mempolicy *new; unsigned long end; int err; LIST_HEAD(pagelist); if (flags & ~(unsigned long)(MPOL_MF_STRICT | MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) return -EINVAL; if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE)) return -EPERM; if (start & ~PAGE_MASK) return -EINVAL; if (mode == MPOL_DEFAULT) flags &= ~MPOL_MF_STRICT; len = (len + PAGE_SIZE - 1) & PAGE_MASK; end = start + len; if (end < start) return -EINVAL; if (end == start) return 0; new = mpol_new(mode, mode_flags, nmask); if (IS_ERR(new)) return PTR_ERR(new); /* * If we are using the default policy then operation * on discontinuous address spaces is okay after all */ if (!new) flags |= MPOL_MF_DISCONTIG_OK; pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n", start, start + len, mode, mode_flags, nmask ? nodes_addr(*nmask)[0] : -1); if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { err = migrate_prep(); if (err) goto mpol_out; } { NODEMASK_SCRATCH(scratch); if (scratch) { down_write(&mm->mmap_sem); task_lock(current); err = mpol_set_nodemask(new, nmask, scratch); task_unlock(current); if (err) up_write(&mm->mmap_sem); } else err = -ENOMEM; NODEMASK_SCRATCH_FREE(scratch); } if (err) goto mpol_out; vma = check_range(mm, start, end, nmask, flags | MPOL_MF_INVERT, &pagelist); err = PTR_ERR(vma); if (!IS_ERR(vma)) { int nr_failed = 0; err = mbind_range(mm, start, end, new); if (!list_empty(&pagelist)) { nr_failed = migrate_pages(&pagelist, new_vma_page, (unsigned long)vma, false, true); if (nr_failed) putback_lru_pages(&pagelist); } if (!err && nr_failed && (flags & MPOL_MF_STRICT)) err = -EIO; } else putback_lru_pages(&pagelist); up_write(&mm->mmap_sem); mpol_out: mpol_put(new); return err; } /* * User space interface with variable sized bitmaps for nodelists. */ /* Copy a node mask from user space. */ static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask, unsigned long maxnode) { unsigned long k; unsigned long nlongs; unsigned long endmask; --maxnode; nodes_clear(*nodes); if (maxnode == 0 || !nmask) return 0; if (maxnode > PAGE_SIZE*BITS_PER_BYTE) return -EINVAL; nlongs = BITS_TO_LONGS(maxnode); if ((maxnode % BITS_PER_LONG) == 0) endmask = ~0UL; else endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1; /* When the user specified more nodes than supported just check if the non supported part is all zero. */ if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) { if (nlongs > PAGE_SIZE/sizeof(long)) return -EINVAL; for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) { unsigned long t; if (get_user(t, nmask + k)) return -EFAULT; if (k == nlongs - 1) { if (t & endmask) return -EINVAL; } else if (t) return -EINVAL; } nlongs = BITS_TO_LONGS(MAX_NUMNODES); endmask = ~0UL; } if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long))) return -EFAULT; nodes_addr(*nodes)[nlongs-1] &= endmask; return 0; } /* Copy a kernel node mask to user space */ static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode, nodemask_t *nodes) { unsigned long copy = ALIGN(maxnode-1, 64) / 8; const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long); if (copy > nbytes) { if (copy > PAGE_SIZE) return -EINVAL; if (clear_user((char __user *)mask + nbytes, copy - nbytes)) return -EFAULT; copy = nbytes; } return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0; } SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len, unsigned long, mode, unsigned long __user *, nmask, unsigned long, maxnode, unsigned, flags) { nodemask_t nodes; int err; unsigned short mode_flags; mode_flags = mode & MPOL_MODE_FLAGS; mode &= ~MPOL_MODE_FLAGS; if (mode >= MPOL_MAX) return -EINVAL; if ((mode_flags & MPOL_F_STATIC_NODES) && (mode_flags & MPOL_F_RELATIVE_NODES)) return -EINVAL; err = get_nodes(&nodes, nmask, maxnode); if (err) return err; return do_mbind(start, len, mode, mode_flags, &nodes, flags); } /* Set the process memory policy */ SYSCALL_DEFINE3(set_mempolicy, int, mode, unsigned long __user *, nmask, unsigned long, maxnode) { int err; nodemask_t nodes; unsigned short flags; flags = mode & MPOL_MODE_FLAGS; mode &= ~MPOL_MODE_FLAGS; if ((unsigned int)mode >= MPOL_MAX) return -EINVAL; if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES)) return -EINVAL; err = get_nodes(&nodes, nmask, maxnode); if (err) return err; return do_set_mempolicy(mode, flags, &nodes); } SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode, const unsigned long __user *, old_nodes, const unsigned long __user *, new_nodes) { const struct cred *cred = current_cred(), *tcred; struct mm_struct *mm = NULL; struct task_struct *task; nodemask_t task_nodes; int err; nodemask_t *old; nodemask_t *new; NODEMASK_SCRATCH(scratch); if (!scratch) return -ENOMEM; old = &scratch->mask1; new = &scratch->mask2; err = get_nodes(old, old_nodes, maxnode); if (err) goto out; err = get_nodes(new, new_nodes, maxnode); if (err) goto out; /* Find the mm_struct */ rcu_read_lock(); task = pid ? find_task_by_vpid(pid) : current; if (!task) { rcu_read_unlock(); err = -ESRCH; goto out; } get_task_struct(task); err = -EINVAL; /* * Check if this process has the right to modify the specified * process. The right exists if the process has administrative * capabilities, superuser privileges or the same * userid as the target process. */ tcred = __task_cred(task); if (cred->euid != tcred->suid && cred->euid != tcred->uid && cred->uid != tcred->suid && cred->uid != tcred->uid && !capable(CAP_SYS_NICE)) { rcu_read_unlock(); err = -EPERM; goto out_put; } rcu_read_unlock(); task_nodes = cpuset_mems_allowed(task); /* Is the user allowed to access the target nodes? */ if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) { err = -EPERM; goto out_put; } if (!nodes_subset(*new, node_states[N_HIGH_MEMORY])) { err = -EINVAL; goto out_put; } err = security_task_movememory(task); if (err) goto out_put; mm = get_task_mm(task); put_task_struct(task); if (!mm) { err = -EINVAL; goto out; } err = do_migrate_pages(mm, old, new, capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE); mmput(mm); out: NODEMASK_SCRATCH_FREE(scratch); return err; out_put: put_task_struct(task); goto out; } /* Retrieve NUMA policy */ SYSCALL_DEFINE5(get_mempolicy, int __user *, policy, unsigned long __user *, nmask, unsigned long, maxnode, unsigned long, addr, unsigned long, flags) { int err; int uninitialized_var(pval); nodemask_t nodes; if (nmask != NULL && maxnode < MAX_NUMNODES) return -EINVAL; err = do_get_mempolicy(&pval, &nodes, addr, flags); if (err) return err; if (policy && put_user(pval, policy)) return -EFAULT; if (nmask) err = copy_nodes_to_user(nmask, maxnode, &nodes); return err; } #ifdef CONFIG_COMPAT asmlinkage long compat_sys_get_mempolicy(int __user *policy, compat_ulong_t __user *nmask, compat_ulong_t maxnode, compat_ulong_t addr, compat_ulong_t flags) { long err; unsigned long __user *nm = NULL; unsigned long nr_bits, alloc_size; DECLARE_BITMAP(bm, MAX_NUMNODES); nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; if (nmask) nm = compat_alloc_user_space(alloc_size); err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags); if (!err && nmask) { unsigned long copy_size; copy_size = min_t(unsigned long, sizeof(bm), alloc_size); err = copy_from_user(bm, nm, copy_size); /* ensure entire bitmap is zeroed */ err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8); err |= compat_put_bitmap(nmask, bm, nr_bits); } return err; } asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask, compat_ulong_t maxnode) { long err = 0; unsigned long __user *nm = NULL; unsigned long nr_bits, alloc_size; DECLARE_BITMAP(bm, MAX_NUMNODES); nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; if (nmask) { err = compat_get_bitmap(bm, nmask, nr_bits); nm = compat_alloc_user_space(alloc_size); err |= copy_to_user(nm, bm, alloc_size); } if (err) return -EFAULT; return sys_set_mempolicy(mode, nm, nr_bits+1); } asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len, compat_ulong_t mode, compat_ulong_t __user *nmask, compat_ulong_t maxnode, compat_ulong_t flags) { long err = 0; unsigned long __user *nm = NULL; unsigned long nr_bits, alloc_size; nodemask_t bm; nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; if (nmask) { err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits); nm = compat_alloc_user_space(alloc_size); err |= copy_to_user(nm, nodes_addr(bm), alloc_size); } if (err) return -EFAULT; return sys_mbind(start, len, mode, nm, nr_bits+1, flags); } #endif /* * get_vma_policy(@task, @vma, @addr) * @task - task for fallback if vma policy == default * @vma - virtual memory area whose policy is sought * @addr - address in @vma for shared policy lookup * * Returns effective policy for a VMA at specified address. * Falls back to @task or system default policy, as necessary. * Current or other task's task mempolicy and non-shared vma policies * are protected by the task's mmap_sem, which must be held for read by * the caller. * Shared policies [those marked as MPOL_F_SHARED] require an extra reference * count--added by the get_policy() vm_op, as appropriate--to protect against * freeing by another task. It is the caller's responsibility to free the * extra reference for shared policies. */ struct mempolicy *get_vma_policy(struct task_struct *task, struct vm_area_struct *vma, unsigned long addr) { struct mempolicy *pol = task->mempolicy; if (vma) { if (vma->vm_ops && vma->vm_ops->get_policy) { struct mempolicy *vpol = vma->vm_ops->get_policy(vma, addr); if (vpol) pol = vpol; } else if (vma->vm_policy) { pol = vma->vm_policy; /* * shmem_alloc_page() passes MPOL_F_SHARED policy with * a pseudo vma whose vma->vm_ops=NULL. Take a reference * count on these policies which will be dropped by * mpol_cond_put() later */ if (mpol_needs_cond_ref(pol)) mpol_get(pol); } } if (!pol) pol = &default_policy; return pol; } /* * Return a nodemask representing a mempolicy for filtering nodes for * page allocation */ static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy) { /* Lower zones don't get a nodemask applied for MPOL_BIND */ if (unlikely(policy->mode == MPOL_BIND) && gfp_zone(gfp) >= policy_zone && cpuset_nodemask_valid_mems_allowed(&policy->v.nodes)) return &policy->v.nodes; return NULL; } /* Return a zonelist indicated by gfp for node representing a mempolicy */ static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy, int nd) { switch (policy->mode) { case MPOL_PREFERRED: if (!(policy->flags & MPOL_F_LOCAL)) nd = policy->v.preferred_node; break; case MPOL_BIND: /* * Normally, MPOL_BIND allocations are node-local within the * allowed nodemask. However, if __GFP_THISNODE is set and the * current node isn't part of the mask, we use the zonelist for * the first node in the mask instead. */ if (unlikely(gfp & __GFP_THISNODE) && unlikely(!node_isset(nd, policy->v.nodes))) nd = first_node(policy->v.nodes); break; default: BUG(); } return node_zonelist(nd, gfp); } /* Do dynamic interleaving for a process */ static unsigned interleave_nodes(struct mempolicy *policy) { unsigned nid, next; struct task_struct *me = current; nid = me->il_next; next = next_node(nid, policy->v.nodes); if (next >= MAX_NUMNODES) next = first_node(policy->v.nodes); if (next < MAX_NUMNODES) me->il_next = next; return nid; } /* * Depending on the memory policy provide a node from which to allocate the * next slab entry. * @policy must be protected by freeing by the caller. If @policy is * the current task's mempolicy, this protection is implicit, as only the * task can change it's policy. The system default policy requires no * such protection. */ unsigned slab_node(struct mempolicy *policy) { if (!policy || policy->flags & MPOL_F_LOCAL) return numa_node_id(); switch (policy->mode) { case MPOL_PREFERRED: /* * handled MPOL_F_LOCAL above */ return policy->v.preferred_node; case MPOL_INTERLEAVE: return interleave_nodes(policy); case MPOL_BIND: { /* * Follow bind policy behavior and start allocation at the * first node. */ struct zonelist *zonelist; struct zone *zone; enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL); zonelist = &NODE_DATA(numa_node_id())->node_zonelists[0]; (void)first_zones_zonelist(zonelist, highest_zoneidx, &policy->v.nodes, &zone); return zone ? zone->node : numa_node_id(); } default: BUG(); } } /* Do static interleaving for a VMA with known offset. */ static unsigned offset_il_node(struct mempolicy *pol, struct vm_area_struct *vma, unsigned long off) { unsigned nnodes = nodes_weight(pol->v.nodes); unsigned target; int c; int nid = -1; if (!nnodes) return numa_node_id(); target = (unsigned int)off % nnodes; c = 0; do { nid = next_node(nid, pol->v.nodes); c++; } while (c <= target); return nid; } /* Determine a node number for interleave */ static inline unsigned interleave_nid(struct mempolicy *pol, struct vm_area_struct *vma, unsigned long addr, int shift) { if (vma) { unsigned long off; /* * for small pages, there is no difference between * shift and PAGE_SHIFT, so the bit-shift is safe. * for huge pages, since vm_pgoff is in units of small * pages, we need to shift off the always 0 bits to get * a useful offset. */ BUG_ON(shift < PAGE_SHIFT); off = vma->vm_pgoff >> (shift - PAGE_SHIFT); off += (addr - vma->vm_start) >> shift; return offset_il_node(pol, vma, off); } else return interleave_nodes(pol); } /* * Return the bit number of a random bit set in the nodemask. * (returns -1 if nodemask is empty) */ int node_random(const nodemask_t *maskp) { int w, bit = -1; w = nodes_weight(*maskp); if (w) bit = bitmap_ord_to_pos(maskp->bits, get_random_int() % w, MAX_NUMNODES); return bit; } #ifdef CONFIG_HUGETLBFS /* * huge_zonelist(@vma, @addr, @gfp_flags, @mpol) * @vma = virtual memory area whose policy is sought * @addr = address in @vma for shared policy lookup and interleave policy * @gfp_flags = for requested zone * @mpol = pointer to mempolicy pointer for reference counted mempolicy * @nodemask = pointer to nodemask pointer for MPOL_BIND nodemask * * Returns a zonelist suitable for a huge page allocation and a pointer * to the struct mempolicy for conditional unref after allocation. * If the effective policy is 'BIND, returns a pointer to the mempolicy's * @nodemask for filtering the zonelist. * * Must be protected by get_mems_allowed() */ struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags, struct mempolicy **mpol, nodemask_t **nodemask) { struct zonelist *zl; *mpol = get_vma_policy(current, vma, addr); *nodemask = NULL; /* assume !MPOL_BIND */ if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) { zl = node_zonelist(interleave_nid(*mpol, vma, addr, huge_page_shift(hstate_vma(vma))), gfp_flags); } else { zl = policy_zonelist(gfp_flags, *mpol, numa_node_id()); if ((*mpol)->mode == MPOL_BIND) *nodemask = &(*mpol)->v.nodes; } return zl; } /* * init_nodemask_of_mempolicy * * If the current task's mempolicy is "default" [NULL], return 'false' * to indicate default policy. Otherwise, extract the policy nodemask * for 'bind' or 'interleave' policy into the argument nodemask, or * initialize the argument nodemask to contain the single node for * 'preferred' or 'local' policy and return 'true' to indicate presence * of non-default mempolicy. * * We don't bother with reference counting the mempolicy [mpol_get/put] * because the current task is examining it's own mempolicy and a task's * mempolicy is only ever changed by the task itself. * * N.B., it is the caller's responsibility to free a returned nodemask. */ bool init_nodemask_of_mempolicy(nodemask_t *mask) { struct mempolicy *mempolicy; int nid; if (!(mask && current->mempolicy)) return false; task_lock(current); mempolicy = current->mempolicy; switch (mempolicy->mode) { case MPOL_PREFERRED: if (mempolicy->flags & MPOL_F_LOCAL) nid = numa_node_id(); else nid = mempolicy->v.preferred_node; init_nodemask_of_node(mask, nid); break; case MPOL_BIND: /* Fall through */ case MPOL_INTERLEAVE: *mask = mempolicy->v.nodes; break; default: BUG(); } task_unlock(current); return true; } #endif /* * mempolicy_nodemask_intersects * * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default * policy. Otherwise, check for intersection between mask and the policy * nodemask for 'bind' or 'interleave' policy. For 'perferred' or 'local' * policy, always return true since it may allocate elsewhere on fallback. * * Takes task_lock(tsk) to prevent freeing of its mempolicy. */ bool mempolicy_nodemask_intersects(struct task_struct *tsk, const nodemask_t *mask) { struct mempolicy *mempolicy; bool ret = true; if (!mask) return ret; task_lock(tsk); mempolicy = tsk->mempolicy; if (!mempolicy) goto out; switch (mempolicy->mode) { case MPOL_PREFERRED: /* * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to * allocate from, they may fallback to other nodes when oom. * Thus, it's possible for tsk to have allocated memory from * nodes in mask. */ break; case MPOL_BIND: case MPOL_INTERLEAVE: ret = nodes_intersects(mempolicy->v.nodes, *mask); break; default: BUG(); } out: task_unlock(tsk); return ret; } /* Allocate a page in interleaved policy. Own path because it needs to do special accounting. */ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, unsigned nid) { struct zonelist *zl; struct page *page; zl = node_zonelist(nid, gfp); page = __alloc_pages(gfp, order, zl); if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0])) inc_zone_page_state(page, NUMA_INTERLEAVE_HIT); return page; } /** * alloc_pages_vma - Allocate a page for a VMA. * * @gfp: * %GFP_USER user allocation. * %GFP_KERNEL kernel allocations, * %GFP_HIGHMEM highmem/user allocations, * %GFP_FS allocation should not call back into a file system. * %GFP_ATOMIC don't sleep. * * @order:Order of the GFP allocation. * @vma: Pointer to VMA or NULL if not available. * @addr: Virtual Address of the allocation. Must be inside the VMA. * * This function allocates a page from the kernel page pool and applies * a NUMA policy associated with the VMA or the current process. * When VMA is not NULL caller must hold down_read on the mmap_sem of the * mm_struct of the VMA to prevent it from going away. Should be used for * all allocations for pages that will be mapped into * user space. Returns NULL when no page can be allocated. * * Should be called with the mm_sem of the vma hold. */ struct page * alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, unsigned long addr, int node) { struct mempolicy *pol; struct zonelist *zl; struct page *page; unsigned int cpuset_mems_cookie; retry_cpuset: pol = get_vma_policy(current, vma, addr); cpuset_mems_cookie = get_mems_allowed(); if (unlikely(pol->mode == MPOL_INTERLEAVE)) { unsigned nid; nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order); mpol_cond_put(pol); page = alloc_page_interleave(gfp, order, nid); if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page)) goto retry_cpuset; return page; } zl = policy_zonelist(gfp, pol, node); if (unlikely(mpol_needs_cond_ref(pol))) { /* * slow path: ref counted shared policy */ struct page *page = __alloc_pages_nodemask(gfp, order, zl, policy_nodemask(gfp, pol)); __mpol_put(pol); if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page)) goto retry_cpuset; return page; } /* * fast path: default or task policy */ page = __alloc_pages_nodemask(gfp, order, zl, policy_nodemask(gfp, pol)); if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page)) goto retry_cpuset; return page; } /** * alloc_pages_current - Allocate pages. * * @gfp: * %GFP_USER user allocation, * %GFP_KERNEL kernel allocation, * %GFP_HIGHMEM highmem allocation, * %GFP_FS don't call back into a file system. * %GFP_ATOMIC don't sleep. * @order: Power of two of allocation size in pages. 0 is a single page. * * Allocate a page from the kernel page pool. When not in * interrupt context and apply the current process NUMA policy. * Returns NULL when no page can be allocated. * * Don't call cpuset_update_task_memory_state() unless * 1) it's ok to take cpuset_sem (can WAIT), and * 2) allocating for current task (not interrupt). */ struct page *alloc_pages_current(gfp_t gfp, unsigned order) { struct mempolicy *pol = current->mempolicy; struct page *page; unsigned int cpuset_mems_cookie; if (!pol || in_interrupt() || (gfp & __GFP_THISNODE)) pol = &default_policy; retry_cpuset: cpuset_mems_cookie = get_mems_allowed(); /* * No reference counting needed for current->mempolicy * nor system default_policy */ if (pol->mode == MPOL_INTERLEAVE) page = alloc_page_interleave(gfp, order, interleave_nodes(pol)); else page = __alloc_pages_nodemask(gfp, order, policy_zonelist(gfp, pol, numa_node_id()), policy_nodemask(gfp, pol)); if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page)) goto retry_cpuset; return page; } EXPORT_SYMBOL(alloc_pages_current); /* * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it * rebinds the mempolicy its copying by calling mpol_rebind_policy() * with the mems_allowed returned by cpuset_mems_allowed(). This * keeps mempolicies cpuset relative after its cpuset moves. See * further kernel/cpuset.c update_nodemask(). * * current's mempolicy may be rebinded by the other task(the task that changes * cpuset's mems), so we needn't do rebind work for current task. */ /* Slow path of a mempolicy duplicate */ struct mempolicy *__mpol_dup(struct mempolicy *old) { struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL); if (!new) return ERR_PTR(-ENOMEM); /* task's mempolicy is protected by alloc_lock */ if (old == current->mempolicy) { task_lock(current); *new = *old; task_unlock(current); } else *new = *old; rcu_read_lock(); if (current_cpuset_is_being_rebound()) { nodemask_t mems = cpuset_mems_allowed(current); if (new->flags & MPOL_F_REBINDING) mpol_rebind_policy(new, &mems, MPOL_REBIND_STEP2); else mpol_rebind_policy(new, &mems, MPOL_REBIND_ONCE); } rcu_read_unlock(); atomic_set(&new->refcnt, 1); return new; } /* Slow path of a mempolicy comparison */ bool __mpol_equal(struct mempolicy *a, struct mempolicy *b) { if (!a || !b) return false; if (a->mode != b->mode) return false; if (a->flags != b->flags) return false; if (mpol_store_user_nodemask(a)) if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask)) return false; switch (a->mode) { case MPOL_BIND: /* Fall through */ case MPOL_INTERLEAVE: return !!nodes_equal(a->v.nodes, b->v.nodes); case MPOL_PREFERRED: return a->v.preferred_node == b->v.preferred_node; default: BUG(); return false; } } /* * Shared memory backing store policy support. * * Remember policies even when nobody has shared memory mapped. * The policies are kept in Red-Black tree linked from the inode. * They are protected by the sp->lock spinlock, which should be held * for any accesses to the tree. */ /* lookup first element intersecting start-end */ /* Caller holds sp->mutex */ static struct sp_node * sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end) { struct rb_node *n = sp->root.rb_node; while (n) { struct sp_node *p = rb_entry(n, struct sp_node, nd); if (start >= p->end) n = n->rb_right; else if (end <= p->start) n = n->rb_left; else break; } if (!n) return NULL; for (;;) { struct sp_node *w = NULL; struct rb_node *prev = rb_prev(n); if (!prev) break; w = rb_entry(prev, struct sp_node, nd); if (w->end <= start) break; n = prev; } return rb_entry(n, struct sp_node, nd); } /* Insert a new shared policy into the list. */ /* Caller holds sp->lock */ static void sp_insert(struct shared_policy *sp, struct sp_node *new) { struct rb_node **p = &sp->root.rb_node; struct rb_node *parent = NULL; struct sp_node *nd; while (*p) { parent = *p; nd = rb_entry(parent, struct sp_node, nd); if (new->start < nd->start) p = &(*p)->rb_left; else if (new->end > nd->end) p = &(*p)->rb_right; else BUG(); } rb_link_node(&new->nd, parent, p); rb_insert_color(&new->nd, &sp->root); pr_debug("inserting %lx-%lx: %d\n", new->start, new->end, new->policy ? new->policy->mode : 0); } /* Find shared policy intersecting idx */ struct mempolicy * mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx) { struct mempolicy *pol = NULL; struct sp_node *sn; if (!sp->root.rb_node) return NULL; mutex_lock(&sp->mutex); sn = sp_lookup(sp, idx, idx+1); if (sn) { mpol_get(sn->policy); pol = sn->policy; } mutex_unlock(&sp->mutex); return pol; } static void sp_free(struct sp_node *n) { mpol_put(n->policy); kmem_cache_free(sn_cache, n); } static void sp_delete(struct shared_policy *sp, struct sp_node *n) { pr_debug("deleting %lx-l%lx\n", n->start, n->end); rb_erase(&n->nd, &sp->root); sp_free(n); } static struct sp_node *sp_alloc(unsigned long start, unsigned long end, struct mempolicy *pol) { struct sp_node *n; struct mempolicy *newpol; n = kmem_cache_alloc(sn_cache, GFP_KERNEL); if (!n) return NULL; newpol = mpol_dup(pol); if (IS_ERR(newpol)) { kmem_cache_free(sn_cache, n); return NULL; } newpol->flags |= MPOL_F_SHARED; n->start = start; n->end = end; n->policy = newpol; return n; } /* Replace a policy range. */ static int shared_policy_replace(struct shared_policy *sp, unsigned long start, unsigned long end, struct sp_node *new) { struct sp_node *n; int ret = 0; mutex_lock(&sp->mutex); n = sp_lookup(sp, start, end); /* Take care of old policies in the same range. */ while (n && n->start < end) { struct rb_node *next = rb_next(&n->nd); if (n->start >= start) { if (n->end <= end) sp_delete(sp, n); else n->start = end; } else { /* Old policy spanning whole new range. */ if (n->end > end) { struct sp_node *new2; new2 = sp_alloc(end, n->end, n->policy); if (!new2) { ret = -ENOMEM; goto out; } n->end = start; sp_insert(sp, new2); break; } else n->end = start; } if (!next) break; n = rb_entry(next, struct sp_node, nd); } if (new) sp_insert(sp, new); out: mutex_unlock(&sp->mutex); return ret; } /** * mpol_shared_policy_init - initialize shared policy for inode * @sp: pointer to inode shared policy * @mpol: struct mempolicy to install * * Install non-NULL @mpol in inode's shared policy rb-tree. * On entry, the current task has a reference on a non-NULL @mpol. * This must be released on exit. * This is called at get_inode() calls and we can use GFP_KERNEL. */ void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol) { int ret; sp->root = RB_ROOT; /* empty tree == default mempolicy */ mutex_init(&sp->mutex); if (mpol) { struct vm_area_struct pvma; struct mempolicy *new; NODEMASK_SCRATCH(scratch); if (!scratch) goto put_mpol; /* contextualize the tmpfs mount point mempolicy */ new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask); if (IS_ERR(new)) goto free_scratch; /* no valid nodemask intersection */ task_lock(current); ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch); task_unlock(current); if (ret) goto put_new; /* Create pseudo-vma that contains just the policy */ memset(&pvma, 0, sizeof(struct vm_area_struct)); pvma.vm_end = TASK_SIZE; /* policy covers entire file */ mpol_set_shared_policy(sp, &pvma, new); /* adds ref */ put_new: mpol_put(new); /* drop initial ref */ free_scratch: NODEMASK_SCRATCH_FREE(scratch); put_mpol: mpol_put(mpol); /* drop our incoming ref on sb mpol */ } } int mpol_set_shared_policy(struct shared_policy *info, struct vm_area_struct *vma, struct mempolicy *npol) { int err; struct sp_node *new = NULL; unsigned long sz = vma_pages(vma); pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n", vma->vm_pgoff, sz, npol ? npol->mode : -1, npol ? npol->flags : -1, npol ? nodes_addr(npol->v.nodes)[0] : -1); if (npol) { new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol); if (!new) return -ENOMEM; } err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new); if (err && new) sp_free(new); return err; } /* Free a backing policy store on inode delete. */ void mpol_free_shared_policy(struct shared_policy *p) { struct sp_node *n; struct rb_node *next; if (!p->root.rb_node) return; mutex_lock(&p->mutex); next = rb_first(&p->root); while (next) { n = rb_entry(next, struct sp_node, nd); next = rb_next(&n->nd); sp_delete(p, n); } mutex_unlock(&p->mutex); } /* assumes fs == KERNEL_DS */ void __init numa_policy_init(void) { nodemask_t interleave_nodes; unsigned long largest = 0; int nid, prefer = 0; policy_cache = kmem_cache_create("numa_policy", sizeof(struct mempolicy), 0, SLAB_PANIC, NULL); sn_cache = kmem_cache_create("shared_policy_node", sizeof(struct sp_node), 0, SLAB_PANIC, NULL); /* * Set interleaving policy for system init. Interleaving is only * enabled across suitably sized nodes (default is >= 16MB), or * fall back to the largest node if they're all smaller. */ nodes_clear(interleave_nodes); for_each_node_state(nid, N_HIGH_MEMORY) { unsigned long total_pages = node_present_pages(nid); /* Preserve the largest node */ if (largest < total_pages) { largest = total_pages; prefer = nid; } /* Interleave this node? */ if ((total_pages << PAGE_SHIFT) >= (16 << 20)) node_set(nid, interleave_nodes); } /* All too small, use the largest */ if (unlikely(nodes_empty(interleave_nodes))) node_set(prefer, interleave_nodes); if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes)) printk("numa_policy_init: interleaving failed\n"); } /* Reset policy of current process to default */ void numa_default_policy(void) { do_set_mempolicy(MPOL_DEFAULT, 0, NULL); } /* * Parse and format mempolicy from/to strings */ /* * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag. */ #define MPOL_LOCAL MPOL_MAX static const char * const policy_modes[] = { [MPOL_DEFAULT] = "default", [MPOL_PREFERRED] = "prefer", [MPOL_BIND] = "bind", [MPOL_INTERLEAVE] = "interleave", [MPOL_LOCAL] = "local" }; #ifdef CONFIG_TMPFS /** * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option. * @str: string containing mempolicy to parse * @mpol: pointer to struct mempolicy pointer, returned on success. * @unused: redundant argument, to be removed later. * * Format of input: * <mode>[=<flags>][:<nodelist>] * * On success, returns 0, else 1 */ int mpol_parse_str(char *str, struct mempolicy **mpol, int unused) { struct mempolicy *new = NULL; unsigned short mode; unsigned short mode_flags; nodemask_t nodes; char *nodelist = strchr(str, ':'); char *flags = strchr(str, '='); int err = 1; if (nodelist) { /* NUL-terminate mode or flags string */ *nodelist++ = '\0'; if (nodelist_parse(nodelist, nodes)) goto out; if (!nodes_subset(nodes, node_states[N_HIGH_MEMORY])) goto out; } else nodes_clear(nodes); if (flags) *flags++ = '\0'; /* terminate mode string */ for (mode = 0; mode <= MPOL_LOCAL; mode++) { if (!strcmp(str, policy_modes[mode])) { break; } } if (mode > MPOL_LOCAL) goto out; switch (mode) { case MPOL_PREFERRED: /* * Insist on a nodelist of one node only */ if (nodelist) { char *rest = nodelist; while (isdigit(*rest)) rest++; if (*rest) goto out; } break; case MPOL_INTERLEAVE: /* * Default to online nodes with memory if no nodelist */ if (!nodelist) nodes = node_states[N_HIGH_MEMORY]; break; case MPOL_LOCAL: /* * Don't allow a nodelist; mpol_new() checks flags */ if (nodelist) goto out; mode = MPOL_PREFERRED; break; case MPOL_DEFAULT: /* * Insist on a empty nodelist */ if (!nodelist) err = 0; goto out; case MPOL_BIND: /* * Insist on a nodelist */ if (!nodelist) goto out; } mode_flags = 0; if (flags) { /* * Currently, we only support two mutually exclusive * mode flags. */ if (!strcmp(flags, "static")) mode_flags |= MPOL_F_STATIC_NODES; else if (!strcmp(flags, "relative")) mode_flags |= MPOL_F_RELATIVE_NODES; else goto out; } new = mpol_new(mode, mode_flags, &nodes); if (IS_ERR(new)) goto out; /* * Save nodes for mpol_to_str() to show the tmpfs mount options * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo. */ if (mode != MPOL_PREFERRED) new->v.nodes = nodes; else if (nodelist) new->v.preferred_node = first_node(nodes); else new->flags |= MPOL_F_LOCAL; /* * Save nodes for contextualization: this will be used to "clone" * the mempolicy in a specific context [cpuset] at a later time. */ new->w.user_nodemask = nodes; err = 0; out: /* Restore string for error message */ if (nodelist) *--nodelist = ':'; if (flags) *--flags = '='; if (!err) *mpol = new; return err; } #endif /* CONFIG_TMPFS */ /** * mpol_to_str - format a mempolicy structure for printing * @buffer: to contain formatted mempolicy string * @maxlen: length of @buffer * @pol: pointer to mempolicy to be formatted * @unused: redundant argument, to be removed later. * * Convert a mempolicy into a string. * Returns the number of characters in buffer (if positive) * or an error (negative) */ int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, int unused) { char *p = buffer; int l; nodemask_t nodes; unsigned short mode; unsigned short flags = pol ? pol->flags : 0; /* * Sanity check: room for longest mode, flag and some nodes */ VM_BUG_ON(maxlen < strlen("interleave") + strlen("relative") + 16); if (!pol || pol == &default_policy) mode = MPOL_DEFAULT; else mode = pol->mode; switch (mode) { case MPOL_DEFAULT: nodes_clear(nodes); break; case MPOL_PREFERRED: nodes_clear(nodes); if (flags & MPOL_F_LOCAL) mode = MPOL_LOCAL; else node_set(pol->v.preferred_node, nodes); break; case MPOL_BIND: /* Fall through */ case MPOL_INTERLEAVE: nodes = pol->v.nodes; break; default: return -EINVAL; } l = strlen(policy_modes[mode]); if (buffer + maxlen < p + l + 1) return -ENOSPC; strcpy(p, policy_modes[mode]); p += l; if (flags & MPOL_MODE_FLAGS) { if (buffer + maxlen < p + 2) return -ENOSPC; *p++ = '='; /* * Currently, the only defined flags are mutually exclusive */ if (flags & MPOL_F_STATIC_NODES) p += snprintf(p, buffer + maxlen - p, "static"); else if (flags & MPOL_F_RELATIVE_NODES) p += snprintf(p, buffer + maxlen - p, "relative"); } if (!nodes_empty(nodes)) { if (buffer + maxlen < p + 2) return -ENOSPC; *p++ = ':'; p += nodelist_scnprintf(p, buffer + maxlen - p, nodes); } return p - buffer; }
gpl-2.0
toastcfh/mecha-htc-2.6.32
net/rds/tcp_send.c
514
7650
/* * Copyright (c) 2006 Oracle. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include <linux/kernel.h> #include <linux/in.h> #include <net/tcp.h> #include "rds.h" #include "tcp.h" static void rds_tcp_cork(struct socket *sock, int val) { mm_segment_t oldfs; oldfs = get_fs(); set_fs(KERNEL_DS); sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __user *)&val, sizeof(val)); set_fs(oldfs); } void rds_tcp_xmit_prepare(struct rds_connection *conn) { struct rds_tcp_connection *tc = conn->c_transport_data; rds_tcp_cork(tc->t_sock, 1); } void rds_tcp_xmit_complete(struct rds_connection *conn) { struct rds_tcp_connection *tc = conn->c_transport_data; rds_tcp_cork(tc->t_sock, 0); } /* the core send_sem serializes this with other xmit and shutdown */ int rds_tcp_sendmsg(struct socket *sock, void *data, unsigned int len) { struct kvec vec = { .iov_base = data, .iov_len = len, }; struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL, }; return kernel_sendmsg(sock, &msg, &vec, 1, vec.iov_len); } /* the core send_sem serializes this with other xmit and shutdown */ int rds_tcp_xmit_cong_map(struct rds_connection *conn, struct rds_cong_map *map, unsigned long offset) { static struct rds_header rds_tcp_map_header = { .h_flags = RDS_FLAG_CONG_BITMAP, }; struct rds_tcp_connection *tc = conn->c_transport_data; unsigned long i; int ret; int copied = 0; /* Some problem claims cpu_to_be32(constant) isn't a constant. */ rds_tcp_map_header.h_len = cpu_to_be32(RDS_CONG_MAP_BYTES); if (offset < sizeof(struct rds_header)) { ret = rds_tcp_sendmsg(tc->t_sock, (void *)&rds_tcp_map_header + offset, sizeof(struct rds_header) - offset); if (ret <= 0) return ret; offset += ret; copied = ret; if (offset < sizeof(struct rds_header)) return ret; } offset -= sizeof(struct rds_header); i = offset / PAGE_SIZE; offset = offset % PAGE_SIZE; BUG_ON(i >= RDS_CONG_MAP_PAGES); do { ret = tc->t_sock->ops->sendpage(tc->t_sock, virt_to_page(map->m_page_addrs[i]), offset, PAGE_SIZE - offset, MSG_DONTWAIT); if (ret <= 0) break; copied += ret; offset += ret; if (offset == PAGE_SIZE) { offset = 0; i++; } } while (i < RDS_CONG_MAP_PAGES); return copied ? copied : ret; } /* the core send_sem serializes this with other xmit and shutdown */ int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm, unsigned int hdr_off, unsigned int sg, unsigned int off) { struct rds_tcp_connection *tc = conn->c_transport_data; int done = 0; int ret = 0; if (hdr_off == 0) { /* * m_ack_seq is set to the sequence number of the last byte of * header and data. see rds_tcp_is_acked(). */ tc->t_last_sent_nxt = rds_tcp_snd_nxt(tc); rm->m_ack_seq = tc->t_last_sent_nxt + sizeof(struct rds_header) + be32_to_cpu(rm->m_inc.i_hdr.h_len) - 1; smp_mb__before_clear_bit(); set_bit(RDS_MSG_HAS_ACK_SEQ, &rm->m_flags); tc->t_last_expected_una = rm->m_ack_seq + 1; rdsdebug("rm %p tcp nxt %u ack_seq %llu\n", rm, rds_tcp_snd_nxt(tc), (unsigned long long)rm->m_ack_seq); } if (hdr_off < sizeof(struct rds_header)) { /* see rds_tcp_write_space() */ set_bit(SOCK_NOSPACE, &tc->t_sock->sk->sk_socket->flags); ret = rds_tcp_sendmsg(tc->t_sock, (void *)&rm->m_inc.i_hdr + hdr_off, sizeof(rm->m_inc.i_hdr) - hdr_off); if (ret < 0) goto out; done += ret; if (hdr_off + done != sizeof(struct rds_header)) goto out; } while (sg < rm->m_nents) { ret = tc->t_sock->ops->sendpage(tc->t_sock, sg_page(&rm->m_sg[sg]), rm->m_sg[sg].offset + off, rm->m_sg[sg].length - off, MSG_DONTWAIT|MSG_NOSIGNAL); rdsdebug("tcp sendpage %p:%u:%u ret %d\n", (void *)sg_page(&rm->m_sg[sg]), rm->m_sg[sg].offset + off, rm->m_sg[sg].length - off, ret); if (ret <= 0) break; off += ret; done += ret; if (off == rm->m_sg[sg].length) { off = 0; sg++; } } out: if (ret <= 0) { /* write_space will hit after EAGAIN, all else fatal */ if (ret == -EAGAIN) { rds_tcp_stats_inc(s_tcp_sndbuf_full); ret = 0; } else { printk(KERN_WARNING "RDS/tcp: send to %u.%u.%u.%u " "returned %d, disconnecting and reconnecting\n", NIPQUAD(conn->c_faddr), ret); rds_conn_drop(conn); } } if (done == 0) done = ret; return done; } /* * rm->m_ack_seq is set to the tcp sequence number that corresponds to the * last byte of the message, including the header. This means that the * entire message has been received if rm->m_ack_seq is "before" the next * unacked byte of the TCP sequence space. We have to do very careful * wrapping 32bit comparisons here. */ static int rds_tcp_is_acked(struct rds_message *rm, uint64_t ack) { if (!test_bit(RDS_MSG_HAS_ACK_SEQ, &rm->m_flags)) return 0; return (__s32)((u32)rm->m_ack_seq - (u32)ack) < 0; } void rds_tcp_write_space(struct sock *sk) { void (*write_space)(struct sock *sk); struct rds_connection *conn; struct rds_tcp_connection *tc; read_lock(&sk->sk_callback_lock); conn = sk->sk_user_data; if (conn == NULL) { write_space = sk->sk_write_space; goto out; } tc = conn->c_transport_data; rdsdebug("write_space for tc %p\n", tc); write_space = tc->t_orig_write_space; rds_tcp_stats_inc(s_tcp_write_space_calls); rdsdebug("tcp una %u\n", rds_tcp_snd_una(tc)); tc->t_last_seen_una = rds_tcp_snd_una(tc); rds_send_drop_acked(conn, rds_tcp_snd_una(tc), rds_tcp_is_acked); queue_delayed_work(rds_wq, &conn->c_send_w, 0); out: read_unlock(&sk->sk_callback_lock); /* * write_space is only called when data leaves tcp's send queue if * SOCK_NOSPACE is set. We set SOCK_NOSPACE every time we put * data in tcp's send queue because we use write_space to parse the * sequence numbers and notice that rds messages have been fully * received. * * tcp's write_space clears SOCK_NOSPACE if the send queue has more * than a certain amount of space. So we need to set it again *after* * we call tcp's write_space or else we might only get called on the * first of a series of incoming tcp acks. */ write_space(sk); if (sk->sk_socket) set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); }
gpl-2.0
sfjro/aufs3-linux
arch/sparc/kernel/unaligned_64.c
1282
17819
/* * unaligned.c: Unaligned load/store trap handling with special * cases for the kernel to do them more quickly. * * Copyright (C) 1996,2008 David S. Miller (davem@davemloft.net) * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz) */ #include <linux/jiffies.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/mm.h> #include <linux/module.h> #include <asm/asi.h> #include <asm/ptrace.h> #include <asm/pstate.h> #include <asm/processor.h> #include <asm/uaccess.h> #include <linux/smp.h> #include <linux/bitops.h> #include <linux/perf_event.h> #include <linux/ratelimit.h> #include <linux/context_tracking.h> #include <asm/fpumacro.h> #include <asm/cacheflush.h> #include <asm/setup.h> #include "entry.h" #include "kernel.h" enum direction { load, /* ld, ldd, ldh, ldsh */ store, /* st, std, sth, stsh */ both, /* Swap, ldstub, cas, ... */ fpld, fpst, invalid, }; static inline enum direction decode_direction(unsigned int insn) { unsigned long tmp = (insn >> 21) & 1; if (!tmp) return load; else { switch ((insn>>19)&0xf) { case 15: /* swap* */ return both; default: return store; } } } /* 16 = double-word, 8 = extra-word, 4 = word, 2 = half-word */ static inline int decode_access_size(struct pt_regs *regs, unsigned int insn) { unsigned int tmp; tmp = ((insn >> 19) & 0xf); if (tmp == 11 || tmp == 14) /* ldx/stx */ return 8; tmp &= 3; if (!tmp) return 4; else if (tmp == 3) return 16; /* ldd/std - Although it is actually 8 */ else if (tmp == 2) return 2; else { printk("Impossible unaligned trap. insn=%08x\n", insn); die_if_kernel("Byte sized unaligned access?!?!", regs); /* GCC should never warn that control reaches the end * of this function without returning a value because * die_if_kernel() is marked with attribute 'noreturn'. * Alas, some versions do... */ return 0; } } static inline int decode_asi(unsigned int insn, struct pt_regs *regs) { if (insn & 0x800000) { if (insn & 0x2000) return (unsigned char)(regs->tstate >> 24); /* %asi */ else return (unsigned char)(insn >> 5); /* imm_asi */ } else return ASI_P; } /* 0x400000 = signed, 0 = unsigned */ static inline int decode_signedness(unsigned int insn) { return (insn & 0x400000); } static inline void maybe_flush_windows(unsigned int rs1, unsigned int rs2, unsigned int rd, int from_kernel) { if (rs2 >= 16 || rs1 >= 16 || rd >= 16) { if (from_kernel != 0) __asm__ __volatile__("flushw"); else flushw_user(); } } static inline long sign_extend_imm13(long imm) { return imm << 51 >> 51; } static unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs) { unsigned long value, fp; if (reg < 16) return (!reg ? 0 : regs->u_regs[reg]); fp = regs->u_regs[UREG_FP]; if (regs->tstate & TSTATE_PRIV) { struct reg_window *win; win = (struct reg_window *)(fp + STACK_BIAS); value = win->locals[reg - 16]; } else if (!test_thread_64bit_stack(fp)) { struct reg_window32 __user *win32; win32 = (struct reg_window32 __user *)((unsigned long)((u32)fp)); get_user(value, &win32->locals[reg - 16]); } else { struct reg_window __user *win; win = (struct reg_window __user *)(fp + STACK_BIAS); get_user(value, &win->locals[reg - 16]); } return value; } static unsigned long *fetch_reg_addr(unsigned int reg, struct pt_regs *regs) { unsigned long fp; if (reg < 16) return &regs->u_regs[reg]; fp = regs->u_regs[UREG_FP]; if (regs->tstate & TSTATE_PRIV) { struct reg_window *win; win = (struct reg_window *)(fp + STACK_BIAS); return &win->locals[reg - 16]; } else if (!test_thread_64bit_stack(fp)) { struct reg_window32 *win32; win32 = (struct reg_window32 *)((unsigned long)((u32)fp)); return (unsigned long *)&win32->locals[reg - 16]; } else { struct reg_window *win; win = (struct reg_window *)(fp + STACK_BIAS); return &win->locals[reg - 16]; } } unsigned long compute_effective_address(struct pt_regs *regs, unsigned int insn, unsigned int rd) { int from_kernel = (regs->tstate & TSTATE_PRIV) != 0; unsigned int rs1 = (insn >> 14) & 0x1f; unsigned int rs2 = insn & 0x1f; unsigned long addr; if (insn & 0x2000) { maybe_flush_windows(rs1, 0, rd, from_kernel); addr = (fetch_reg(rs1, regs) + sign_extend_imm13(insn)); } else { maybe_flush_windows(rs1, rs2, rd, from_kernel); addr = (fetch_reg(rs1, regs) + fetch_reg(rs2, regs)); } if (!from_kernel && test_thread_flag(TIF_32BIT)) addr &= 0xffffffff; return addr; } /* This is just to make gcc think die_if_kernel does return... */ static void __used unaligned_panic(char *str, struct pt_regs *regs) { die_if_kernel(str, regs); } extern int do_int_load(unsigned long *dest_reg, int size, unsigned long *saddr, int is_signed, int asi); extern int __do_int_store(unsigned long *dst_addr, int size, unsigned long src_val, int asi); static inline int do_int_store(int reg_num, int size, unsigned long *dst_addr, struct pt_regs *regs, int asi, int orig_asi) { unsigned long zero = 0; unsigned long *src_val_p = &zero; unsigned long src_val; if (size == 16) { size = 8; zero = (((long)(reg_num ? (unsigned)fetch_reg(reg_num, regs) : 0)) << 32) | (unsigned)fetch_reg(reg_num + 1, regs); } else if (reg_num) { src_val_p = fetch_reg_addr(reg_num, regs); } src_val = *src_val_p; if (unlikely(asi != orig_asi)) { switch (size) { case 2: src_val = swab16(src_val); break; case 4: src_val = swab32(src_val); break; case 8: src_val = swab64(src_val); break; case 16: default: BUG(); break; } } return __do_int_store(dst_addr, size, src_val, asi); } static inline void advance(struct pt_regs *regs) { regs->tpc = regs->tnpc; regs->tnpc += 4; if (test_thread_flag(TIF_32BIT)) { regs->tpc &= 0xffffffff; regs->tnpc &= 0xffffffff; } } static inline int floating_point_load_or_store_p(unsigned int insn) { return (insn >> 24) & 1; } static inline int ok_for_kernel(unsigned int insn) { return !floating_point_load_or_store_p(insn); } static void kernel_mna_trap_fault(int fixup_tstate_asi) { struct pt_regs *regs = current_thread_info()->kern_una_regs; unsigned int insn = current_thread_info()->kern_una_insn; const struct exception_table_entry *entry; entry = search_exception_tables(regs->tpc); if (!entry) { unsigned long address; address = compute_effective_address(regs, insn, ((insn >> 25) & 0x1f)); if (address < PAGE_SIZE) { printk(KERN_ALERT "Unable to handle kernel NULL " "pointer dereference in mna handler"); } else printk(KERN_ALERT "Unable to handle kernel paging " "request in mna handler"); printk(KERN_ALERT " at virtual address %016lx\n",address); printk(KERN_ALERT "current->{active_,}mm->context = %016lx\n", (current->mm ? CTX_HWBITS(current->mm->context) : CTX_HWBITS(current->active_mm->context))); printk(KERN_ALERT "current->{active_,}mm->pgd = %016lx\n", (current->mm ? (unsigned long) current->mm->pgd : (unsigned long) current->active_mm->pgd)); die_if_kernel("Oops", regs); /* Not reached */ } regs->tpc = entry->fixup; regs->tnpc = regs->tpc + 4; if (fixup_tstate_asi) { regs->tstate &= ~TSTATE_ASI; regs->tstate |= (ASI_AIUS << 24UL); } } static void log_unaligned(struct pt_regs *regs) { static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5); if (__ratelimit(&ratelimit)) { printk("Kernel unaligned access at TPC[%lx] %pS\n", regs->tpc, (void *) regs->tpc); } } asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn) { enum direction dir = decode_direction(insn); int size = decode_access_size(regs, insn); int orig_asi, asi; current_thread_info()->kern_una_regs = regs; current_thread_info()->kern_una_insn = insn; orig_asi = asi = decode_asi(insn, regs); /* If this is a {get,put}_user() on an unaligned userspace pointer, * just signal a fault and do not log the event. */ if (asi == ASI_AIUS) { kernel_mna_trap_fault(0); return; } log_unaligned(regs); if (!ok_for_kernel(insn) || dir == both) { printk("Unsupported unaligned load/store trap for kernel " "at <%016lx>.\n", regs->tpc); unaligned_panic("Kernel does fpu/atomic " "unaligned load/store.", regs); kernel_mna_trap_fault(0); } else { unsigned long addr, *reg_addr; int err; addr = compute_effective_address(regs, insn, ((insn >> 25) & 0x1f)); perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr); switch (asi) { case ASI_NL: case ASI_AIUPL: case ASI_AIUSL: case ASI_PL: case ASI_SL: case ASI_PNFL: case ASI_SNFL: asi &= ~0x08; break; } switch (dir) { case load: reg_addr = fetch_reg_addr(((insn>>25)&0x1f), regs); err = do_int_load(reg_addr, size, (unsigned long *) addr, decode_signedness(insn), asi); if (likely(!err) && unlikely(asi != orig_asi)) { unsigned long val_in = *reg_addr; switch (size) { case 2: val_in = swab16(val_in); break; case 4: val_in = swab32(val_in); break; case 8: val_in = swab64(val_in); break; case 16: default: BUG(); break; } *reg_addr = val_in; } break; case store: err = do_int_store(((insn>>25)&0x1f), size, (unsigned long *) addr, regs, asi, orig_asi); break; default: panic("Impossible kernel unaligned trap."); /* Not reached... */ } if (unlikely(err)) kernel_mna_trap_fault(1); else advance(regs); } } int handle_popc(u32 insn, struct pt_regs *regs) { int from_kernel = (regs->tstate & TSTATE_PRIV) != 0; int ret, rd = ((insn >> 25) & 0x1f); u64 value; perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); if (insn & 0x2000) { maybe_flush_windows(0, 0, rd, from_kernel); value = sign_extend_imm13(insn); } else { maybe_flush_windows(0, insn & 0x1f, rd, from_kernel); value = fetch_reg(insn & 0x1f, regs); } ret = hweight64(value); if (rd < 16) { if (rd) regs->u_regs[rd] = ret; } else { unsigned long fp = regs->u_regs[UREG_FP]; if (!test_thread_64bit_stack(fp)) { struct reg_window32 __user *win32; win32 = (struct reg_window32 __user *)((unsigned long)((u32)fp)); put_user(ret, &win32->locals[rd - 16]); } else { struct reg_window __user *win; win = (struct reg_window __user *)(fp + STACK_BIAS); put_user(ret, &win->locals[rd - 16]); } } advance(regs); return 1; } extern void do_fpother(struct pt_regs *regs); extern void do_privact(struct pt_regs *regs); extern void sun4v_data_access_exception(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx); int handle_ldf_stq(u32 insn, struct pt_regs *regs) { unsigned long addr = compute_effective_address(regs, insn, 0); int freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20); struct fpustate *f = FPUSTATE; int asi = decode_asi(insn, regs); int flag = (freg < 32) ? FPRS_DL : FPRS_DU; perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); save_and_clear_fpu(); current_thread_info()->xfsr[0] &= ~0x1c000; if (freg & 3) { current_thread_info()->xfsr[0] |= (6 << 14) /* invalid_fp_register */; do_fpother(regs); return 0; } if (insn & 0x200000) { /* STQ */ u64 first = 0, second = 0; if (current_thread_info()->fpsaved[0] & flag) { first = *(u64 *)&f->regs[freg]; second = *(u64 *)&f->regs[freg+2]; } if (asi < 0x80) { do_privact(regs); return 1; } switch (asi) { case ASI_P: case ASI_S: break; case ASI_PL: case ASI_SL: { /* Need to convert endians */ u64 tmp = __swab64p(&first); first = __swab64p(&second); second = tmp; break; } default: if (tlb_type == hypervisor) sun4v_data_access_exception(regs, addr, 0); else spitfire_data_access_exception(regs, 0, addr); return 1; } if (put_user (first >> 32, (u32 __user *)addr) || __put_user ((u32)first, (u32 __user *)(addr + 4)) || __put_user (second >> 32, (u32 __user *)(addr + 8)) || __put_user ((u32)second, (u32 __user *)(addr + 12))) { if (tlb_type == hypervisor) sun4v_data_access_exception(regs, addr, 0); else spitfire_data_access_exception(regs, 0, addr); return 1; } } else { /* LDF, LDDF, LDQF */ u32 data[4] __attribute__ ((aligned(8))); int size, i; int err; if (asi < 0x80) { do_privact(regs); return 1; } else if (asi > ASI_SNFL) { if (tlb_type == hypervisor) sun4v_data_access_exception(regs, addr, 0); else spitfire_data_access_exception(regs, 0, addr); return 1; } switch (insn & 0x180000) { case 0x000000: size = 1; break; case 0x100000: size = 4; break; default: size = 2; break; } for (i = 0; i < size; i++) data[i] = 0; err = get_user (data[0], (u32 __user *) addr); if (!err) { for (i = 1; i < size; i++) err |= __get_user (data[i], (u32 __user *)(addr + 4*i)); } if (err && !(asi & 0x2 /* NF */)) { if (tlb_type == hypervisor) sun4v_data_access_exception(regs, addr, 0); else spitfire_data_access_exception(regs, 0, addr); return 1; } if (asi & 0x8) /* Little */ { u64 tmp; switch (size) { case 1: data[0] = le32_to_cpup(data + 0); break; default:*(u64 *)(data + 0) = le64_to_cpup((u64 *)(data + 0)); break; case 4: tmp = le64_to_cpup((u64 *)(data + 0)); *(u64 *)(data + 0) = le64_to_cpup((u64 *)(data + 2)); *(u64 *)(data + 2) = tmp; break; } } if (!(current_thread_info()->fpsaved[0] & FPRS_FEF)) { current_thread_info()->fpsaved[0] = FPRS_FEF; current_thread_info()->gsr[0] = 0; } if (!(current_thread_info()->fpsaved[0] & flag)) { if (freg < 32) memset(f->regs, 0, 32*sizeof(u32)); else memset(f->regs+32, 0, 32*sizeof(u32)); } memcpy(f->regs + freg, data, size * 4); current_thread_info()->fpsaved[0] |= flag; } advance(regs); return 1; } void handle_ld_nf(u32 insn, struct pt_regs *regs) { int rd = ((insn >> 25) & 0x1f); int from_kernel = (regs->tstate & TSTATE_PRIV) != 0; unsigned long *reg; perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); maybe_flush_windows(0, 0, rd, from_kernel); reg = fetch_reg_addr(rd, regs); if (from_kernel || rd < 16) { reg[0] = 0; if ((insn & 0x780000) == 0x180000) reg[1] = 0; } else if (!test_thread_64bit_stack(regs->u_regs[UREG_FP])) { put_user(0, (int __user *) reg); if ((insn & 0x780000) == 0x180000) put_user(0, ((int __user *) reg) + 1); } else { put_user(0, (unsigned long __user *) reg); if ((insn & 0x780000) == 0x180000) put_user(0, (unsigned long __user *) reg + 1); } advance(regs); } void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr) { enum ctx_state prev_state = exception_enter(); unsigned long pc = regs->tpc; unsigned long tstate = regs->tstate; u32 insn; u64 value; u8 freg; int flag; struct fpustate *f = FPUSTATE; if (tstate & TSTATE_PRIV) die_if_kernel("lddfmna from kernel", regs); perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, sfar); if (test_thread_flag(TIF_32BIT)) pc = (u32)pc; if (get_user(insn, (u32 __user *) pc) != -EFAULT) { int asi = decode_asi(insn, regs); u32 first, second; int err; if ((asi > ASI_SNFL) || (asi < ASI_P)) goto daex; first = second = 0; err = get_user(first, (u32 __user *)sfar); if (!err) err = get_user(second, (u32 __user *)(sfar + 4)); if (err) { if (!(asi & 0x2)) goto daex; first = second = 0; } save_and_clear_fpu(); freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20); value = (((u64)first) << 32) | second; if (asi & 0x8) /* Little */ value = __swab64p(&value); flag = (freg < 32) ? FPRS_DL : FPRS_DU; if (!(current_thread_info()->fpsaved[0] & FPRS_FEF)) { current_thread_info()->fpsaved[0] = FPRS_FEF; current_thread_info()->gsr[0] = 0; } if (!(current_thread_info()->fpsaved[0] & flag)) { if (freg < 32) memset(f->regs, 0, 32*sizeof(u32)); else memset(f->regs+32, 0, 32*sizeof(u32)); } *(u64 *)(f->regs + freg) = value; current_thread_info()->fpsaved[0] |= flag; } else { daex: if (tlb_type == hypervisor) sun4v_data_access_exception(regs, sfar, sfsr); else spitfire_data_access_exception(regs, sfsr, sfar); goto out; } advance(regs); out: exception_exit(prev_state); } void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr) { enum ctx_state prev_state = exception_enter(); unsigned long pc = regs->tpc; unsigned long tstate = regs->tstate; u32 insn; u64 value; u8 freg; int flag; struct fpustate *f = FPUSTATE; if (tstate & TSTATE_PRIV) die_if_kernel("stdfmna from kernel", regs); perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, sfar); if (test_thread_flag(TIF_32BIT)) pc = (u32)pc; if (get_user(insn, (u32 __user *) pc) != -EFAULT) { int asi = decode_asi(insn, regs); freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20); value = 0; flag = (freg < 32) ? FPRS_DL : FPRS_DU; if ((asi > ASI_SNFL) || (asi < ASI_P)) goto daex; save_and_clear_fpu(); if (current_thread_info()->fpsaved[0] & flag) value = *(u64 *)&f->regs[freg]; switch (asi) { case ASI_P: case ASI_S: break; case ASI_PL: case ASI_SL: value = __swab64p(&value); break; default: goto daex; } if (put_user (value >> 32, (u32 __user *) sfar) || __put_user ((u32)value, (u32 __user *)(sfar + 4))) goto daex; } else { daex: if (tlb_type == hypervisor) sun4v_data_access_exception(regs, sfar, sfsr); else spitfire_data_access_exception(regs, sfsr, sfar); goto out; } advance(regs); out: exception_exit(prev_state); }
gpl-2.0
wujiku/superstar-kernel-shooter-2.3.4gb
drivers/misc/cb710/sgbuf2.c
1794
3488
/* * cb710/sgbuf2.c * * Copyright by Michał Mirosław, 2008-2009 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/cb710.h> static bool sg_dwiter_next(struct sg_mapping_iter *miter) { if (sg_miter_next(miter)) { miter->consumed = 0; return true; } else return false; } static bool sg_dwiter_is_at_end(struct sg_mapping_iter *miter) { return miter->length == miter->consumed && !sg_dwiter_next(miter); } static uint32_t sg_dwiter_read_buffer(struct sg_mapping_iter *miter) { size_t len, left = 4; uint32_t data; void *addr = &data; do { len = min(miter->length - miter->consumed, left); memcpy(addr, miter->addr + miter->consumed, len); miter->consumed += len; left -= len; if (!left) return data; addr += len; } while (sg_dwiter_next(miter)); memset(addr, 0, left); return data; } static inline bool needs_unaligned_copy(const void *ptr) { #ifdef HAVE_EFFICIENT_UNALIGNED_ACCESS return false; #else return ((ptr - NULL) & 3) != 0; #endif } static bool sg_dwiter_get_next_block(struct sg_mapping_iter *miter, uint32_t **ptr) { size_t len; if (sg_dwiter_is_at_end(miter)) return true; len = miter->length - miter->consumed; if (likely(len >= 4 && !needs_unaligned_copy( miter->addr + miter->consumed))) { *ptr = miter->addr + miter->consumed; miter->consumed += 4; return true; } return false; } /** * cb710_sg_dwiter_read_next_block() - get next 32-bit word from sg buffer * @miter: sg mapping iterator used for reading * * Description: * Returns 32-bit word starting at byte pointed to by @miter@ * handling any alignment issues. Bytes past the buffer's end * are not accessed (read) but are returned as zeroes. @miter@ * is advanced by 4 bytes or to the end of buffer whichever is * closer. * * Context: * Same requirements as in sg_miter_next(). * * Returns: * 32-bit word just read. */ uint32_t cb710_sg_dwiter_read_next_block(struct sg_mapping_iter *miter) { uint32_t *ptr = NULL; if (likely(sg_dwiter_get_next_block(miter, &ptr))) return ptr ? *ptr : 0; return sg_dwiter_read_buffer(miter); } EXPORT_SYMBOL_GPL(cb710_sg_dwiter_read_next_block); static void sg_dwiter_write_slow(struct sg_mapping_iter *miter, uint32_t data) { size_t len, left = 4; void *addr = &data; do { len = min(miter->length - miter->consumed, left); memcpy(miter->addr, addr, len); miter->consumed += len; left -= len; if (!left) return; addr += len; } while (sg_dwiter_next(miter)); } /** * cb710_sg_dwiter_write_next_block() - write next 32-bit word to sg buffer * @miter: sg mapping iterator used for writing * * Description: * Writes 32-bit word starting at byte pointed to by @miter@ * handling any alignment issues. Bytes which would be written * past the buffer's end are silently discarded. @miter@ is * advanced by 4 bytes or to the end of buffer whichever is closer. * * Context: * Same requirements as in sg_miter_next(). */ void cb710_sg_dwiter_write_next_block(struct sg_mapping_iter *miter, uint32_t data) { uint32_t *ptr = NULL; if (likely(sg_dwiter_get_next_block(miter, &ptr))) { if (ptr) *ptr = data; else return; } else sg_dwiter_write_slow(miter, data); } EXPORT_SYMBOL_GPL(cb710_sg_dwiter_write_next_block);
gpl-2.0
ipaccess/linux-yocto-3.10
arch/arm/mach-omap2/board-igep0020.c
2050
18078
/* * Copyright (C) 2009 Integration Software and Electronic Engineering. * * Modified from mach-omap2/board-generic.c * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/gpio.h> #include <linux/interrupt.h> #include <linux/input.h> #include <linux/usb/phy.h> #include <linux/regulator/machine.h> #include <linux/regulator/fixed.h> #include <linux/i2c/twl.h> #include <linux/mmc/host.h> #include <linux/mtd/nand.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <video/omapdss.h> #include <video/omap-panel-data.h> #include <linux/platform_data/mtd-onenand-omap2.h> #include "common.h" #include "gpmc.h" #include "mux.h" #include "hsmmc.h" #include "sdram-numonyx-m65kxxxxam.h" #include "common-board-devices.h" #include "board-flash.h" #include "control.h" #include "gpmc-onenand.h" #define IGEP2_SMSC911X_CS 5 #define IGEP2_SMSC911X_GPIO 176 #define IGEP2_GPIO_USBH_NRESET 24 #define IGEP2_GPIO_LED0_GREEN 26 #define IGEP2_GPIO_LED0_RED 27 #define IGEP2_GPIO_LED1_RED 28 #define IGEP2_GPIO_DVI_PUP 170 #define IGEP2_RB_GPIO_WIFI_NPD 94 #define IGEP2_RB_GPIO_WIFI_NRESET 95 #define IGEP2_RB_GPIO_BT_NRESET 137 #define IGEP2_RC_GPIO_WIFI_NPD 138 #define IGEP2_RC_GPIO_WIFI_NRESET 139 #define IGEP2_RC_GPIO_BT_NRESET 137 #define IGEP3_GPIO_LED0_GREEN 54 #define IGEP3_GPIO_LED0_RED 53 #define IGEP3_GPIO_LED1_RED 16 #define IGEP3_GPIO_USBH_NRESET 183 #define IGEP_SYSBOOT_MASK 0x1f #define IGEP_SYSBOOT_NAND 0x0f #define IGEP_SYSBOOT_ONENAND 0x10 /* * IGEP2 Hardware Revision Table * * -------------------------------------------------------------------------- * | Id. | Hw Rev. | HW0 (28) | WIFI_NPD | WIFI_NRESET | BT_NRESET | * -------------------------------------------------------------------------- * | 0 | B | high | gpio94 | gpio95 | - | * | 0 | B/C (B-compatible) | high | gpio94 | gpio95 | gpio137 | * | 1 | C | low | gpio138 | gpio139 | gpio137 | * -------------------------------------------------------------------------- */ #define IGEP2_BOARD_HWREV_B 0 #define IGEP2_BOARD_HWREV_C 1 #define IGEP3_BOARD_HWREV 2 static u8 hwrev; static void __init igep2_get_revision(void) { u8 ret; if (machine_is_igep0030()) { hwrev = IGEP3_BOARD_HWREV; return; } omap_mux_init_gpio(IGEP2_GPIO_LED1_RED, OMAP_PIN_INPUT); if (gpio_request_one(IGEP2_GPIO_LED1_RED, GPIOF_IN, "GPIO_HW0_REV")) { pr_warning("IGEP2: Could not obtain gpio GPIO_HW0_REV\n"); pr_err("IGEP2: Unknown Hardware Revision\n"); return; } ret = gpio_get_value(IGEP2_GPIO_LED1_RED); if (ret == 0) { pr_info("IGEP2: Hardware Revision C (B-NON compatible)\n"); hwrev = IGEP2_BOARD_HWREV_C; } else if (ret == 1) { pr_info("IGEP2: Hardware Revision B/C (B compatible)\n"); hwrev = IGEP2_BOARD_HWREV_B; } else { pr_err("IGEP2: Unknown Hardware Revision\n"); hwrev = -1; } gpio_free(IGEP2_GPIO_LED1_RED); } #if defined(CONFIG_MTD_ONENAND_OMAP2) || \ defined(CONFIG_MTD_ONENAND_OMAP2_MODULE) || \ defined(CONFIG_MTD_NAND_OMAP2) || \ defined(CONFIG_MTD_NAND_OMAP2_MODULE) #define ONENAND_MAP 0x20000000 /* NAND04GR4E1A ( x2 Flash built-in COMBO POP MEMORY ) * Since the device is equipped with two DataRAMs, and two-plane NAND * Flash memory array, these two component enables simultaneous program * of 4KiB. Plane1 has only even blocks such as block0, block2, block4 * while Plane2 has only odd blocks such as block1, block3, block5. * So MTD regards it as 4KiB page size and 256KiB block size 64*(2*2048) */ static struct mtd_partition igep_flash_partitions[] = { { .name = "X-Loader", .offset = 0, .size = 2 * (64*(2*2048)) }, { .name = "U-Boot", .offset = MTDPART_OFS_APPEND, .size = 6 * (64*(2*2048)), }, { .name = "Environment", .offset = MTDPART_OFS_APPEND, .size = 2 * (64*(2*2048)), }, { .name = "Kernel", .offset = MTDPART_OFS_APPEND, .size = 12 * (64*(2*2048)), }, { .name = "File System", .offset = MTDPART_OFS_APPEND, .size = MTDPART_SIZ_FULL, }, }; static inline u32 igep_get_sysboot_value(void) { return omap_ctrl_readl(OMAP343X_CONTROL_STATUS) & IGEP_SYSBOOT_MASK; } static void __init igep_flash_init(void) { u32 mux; mux = igep_get_sysboot_value(); if (mux == IGEP_SYSBOOT_NAND) { pr_info("IGEP: initializing NAND memory device\n"); board_nand_init(igep_flash_partitions, ARRAY_SIZE(igep_flash_partitions), 0, NAND_BUSWIDTH_16, nand_default_timings); } else if (mux == IGEP_SYSBOOT_ONENAND) { pr_info("IGEP: initializing OneNAND memory device\n"); board_onenand_init(igep_flash_partitions, ARRAY_SIZE(igep_flash_partitions), 0); } else { pr_err("IGEP: Flash: unsupported sysboot sequence found\n"); } } #else static void __init igep_flash_init(void) {} #endif #if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE) #include <linux/smsc911x.h> #include "gpmc-smsc911x.h" static struct omap_smsc911x_platform_data smsc911x_cfg = { .cs = IGEP2_SMSC911X_CS, .gpio_irq = IGEP2_SMSC911X_GPIO, .gpio_reset = -EINVAL, .flags = SMSC911X_USE_32BIT | SMSC911X_SAVE_MAC_ADDRESS, }; static inline void __init igep2_init_smsc911x(void) { gpmc_smsc911x_init(&smsc911x_cfg); } #else static inline void __init igep2_init_smsc911x(void) { } #endif static struct regulator_consumer_supply igep_vmmc1_supply[] = { REGULATOR_SUPPLY("vmmc", "omap_hsmmc.0"), }; /* VMMC1 for OMAP VDD_MMC1 (i/o) and MMC1 card */ static struct regulator_init_data igep_vmmc1 = { .constraints = { .min_uV = 1850000, .max_uV = 3150000, .valid_modes_mask = REGULATOR_MODE_NORMAL | REGULATOR_MODE_STANDBY, .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE | REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_STATUS, }, .num_consumer_supplies = ARRAY_SIZE(igep_vmmc1_supply), .consumer_supplies = igep_vmmc1_supply, }; static struct regulator_consumer_supply igep_vio_supply[] = { REGULATOR_SUPPLY("vmmc_aux", "omap_hsmmc.1"), }; static struct regulator_init_data igep_vio = { .constraints = { .min_uV = 1800000, .max_uV = 1800000, .apply_uV = 1, .valid_modes_mask = REGULATOR_MODE_NORMAL | REGULATOR_MODE_STANDBY, .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE | REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_STATUS, }, .num_consumer_supplies = ARRAY_SIZE(igep_vio_supply), .consumer_supplies = igep_vio_supply, }; static struct regulator_consumer_supply igep_vmmc2_supply[] = { REGULATOR_SUPPLY("vmmc", "omap_hsmmc.1"), }; static struct regulator_init_data igep_vmmc2 = { .constraints = { .valid_modes_mask = REGULATOR_MODE_NORMAL, .always_on = 1, }, .num_consumer_supplies = ARRAY_SIZE(igep_vmmc2_supply), .consumer_supplies = igep_vmmc2_supply, }; static struct fixed_voltage_config igep_vwlan = { .supply_name = "vwlan", .microvolts = 3300000, .gpio = -EINVAL, .enabled_at_boot = 1, .init_data = &igep_vmmc2, }; static struct platform_device igep_vwlan_device = { .name = "reg-fixed-voltage", .id = 0, .dev = { .platform_data = &igep_vwlan, }, }; static struct omap2_hsmmc_info mmc[] = { { .mmc = 1, .caps = MMC_CAP_4_BIT_DATA, .gpio_cd = -EINVAL, .gpio_wp = -EINVAL, .deferred = true, }, #if defined(CONFIG_LIBERTAS_SDIO) || defined(CONFIG_LIBERTAS_SDIO_MODULE) { .mmc = 2, .caps = MMC_CAP_4_BIT_DATA, .gpio_cd = -EINVAL, .gpio_wp = -EINVAL, }, #endif {} /* Terminator */ }; #if defined(CONFIG_LEDS_GPIO) || defined(CONFIG_LEDS_GPIO_MODULE) #include <linux/leds.h> static struct gpio_led igep_gpio_leds[] = { [0] = { .name = "omap3:red:user0", .default_state = 0, }, [1] = { .name = "omap3:green:boot", .default_state = 1, }, [2] = { .name = "omap3:red:user1", .default_state = 0, }, [3] = { .name = "omap3:green:user1", .default_state = 0, .gpio = -EINVAL, /* gets replaced */ .active_low = 1, }, }; static struct gpio_led_platform_data igep_led_pdata = { .leds = igep_gpio_leds, .num_leds = ARRAY_SIZE(igep_gpio_leds), }; static struct platform_device igep_led_device = { .name = "leds-gpio", .id = -1, .dev = { .platform_data = &igep_led_pdata, }, }; static void __init igep_leds_init(void) { if (machine_is_igep0020()) { igep_gpio_leds[0].gpio = IGEP2_GPIO_LED0_RED; igep_gpio_leds[1].gpio = IGEP2_GPIO_LED0_GREEN; igep_gpio_leds[2].gpio = IGEP2_GPIO_LED1_RED; } else { igep_gpio_leds[0].gpio = IGEP3_GPIO_LED0_RED; igep_gpio_leds[1].gpio = IGEP3_GPIO_LED0_GREEN; igep_gpio_leds[2].gpio = IGEP3_GPIO_LED1_RED; } platform_device_register(&igep_led_device); } #else static struct gpio igep_gpio_leds[] __initdata = { { -EINVAL, GPIOF_OUT_INIT_LOW, "gpio-led:red:d0" }, { -EINVAL, GPIOF_OUT_INIT_LOW, "gpio-led:green:d0" }, { -EINVAL, GPIOF_OUT_INIT_LOW, "gpio-led:red:d1" }, }; static inline void igep_leds_init(void) { int i; if (machine_is_igep0020()) { igep_gpio_leds[0].gpio = IGEP2_GPIO_LED0_RED; igep_gpio_leds[1].gpio = IGEP2_GPIO_LED0_GREEN; igep_gpio_leds[2].gpio = IGEP2_GPIO_LED1_RED; } else { igep_gpio_leds[0].gpio = IGEP3_GPIO_LED0_RED; igep_gpio_leds[1].gpio = IGEP3_GPIO_LED0_GREEN; igep_gpio_leds[2].gpio = IGEP3_GPIO_LED1_RED; } if (gpio_request_array(igep_gpio_leds, ARRAY_SIZE(igep_gpio_leds))) { pr_warning("IGEP v2: Could not obtain leds gpios\n"); return; } for (i = 0; i < ARRAY_SIZE(igep_gpio_leds); i++) gpio_export(igep_gpio_leds[i].gpio, 0); } #endif static struct gpio igep2_twl_gpios[] = { { -EINVAL, GPIOF_IN, "GPIO_EHCI_NOC" }, { -EINVAL, GPIOF_OUT_INIT_LOW, "GPIO_USBH_CPEN" }, }; static int igep_twl_gpio_setup(struct device *dev, unsigned gpio, unsigned ngpio) { int ret; /* gpio + 0 is "mmc0_cd" (input/IRQ) */ mmc[0].gpio_cd = gpio + 0; omap_hsmmc_late_init(mmc); /* TWL4030_GPIO_MAX + 1 == ledB (out, active low LED) */ #if !defined(CONFIG_LEDS_GPIO) && !defined(CONFIG_LEDS_GPIO_MODULE) ret = gpio_request_one(gpio + TWL4030_GPIO_MAX + 1, GPIOF_OUT_INIT_HIGH, "gpio-led:green:d1"); if (ret == 0) gpio_export(gpio + TWL4030_GPIO_MAX + 1, 0); else pr_warning("IGEP: Could not obtain gpio GPIO_LED1_GREEN\n"); #else igep_gpio_leds[3].gpio = gpio + TWL4030_GPIO_MAX + 1; #endif if (machine_is_igep0030()) return 0; /* * REVISIT: need ehci-omap hooks for external VBUS * power switch and overcurrent detect */ igep2_twl_gpios[0].gpio = gpio + 1; /* TWL4030_GPIO_MAX + 0 == ledA, GPIO_USBH_CPEN (out, active low) */ igep2_twl_gpios[1].gpio = gpio + TWL4030_GPIO_MAX; ret = gpio_request_array(igep2_twl_gpios, ARRAY_SIZE(igep2_twl_gpios)); if (ret < 0) pr_err("IGEP2: Could not obtain gpio for USBH_CPEN"); return 0; }; static struct twl4030_gpio_platform_data igep_twl4030_gpio_pdata = { .use_leds = true, .setup = igep_twl_gpio_setup, }; static struct tfp410_platform_data dvi_panel = { .i2c_bus_num = 3, .power_down_gpio = IGEP2_GPIO_DVI_PUP, }; static struct omap_dss_device igep2_dvi_device = { .type = OMAP_DISPLAY_TYPE_DPI, .name = "dvi", .driver_name = "tfp410", .data = &dvi_panel, .phy.dpi.data_lines = 24, }; static struct omap_dss_device *igep2_dss_devices[] = { &igep2_dvi_device }; static struct omap_dss_board_info igep2_dss_data = { .num_devices = ARRAY_SIZE(igep2_dss_devices), .devices = igep2_dss_devices, .default_device = &igep2_dvi_device, }; static struct platform_device *igep_devices[] __initdata = { &igep_vwlan_device, }; static int igep2_keymap[] = { KEY(0, 0, KEY_LEFT), KEY(0, 1, KEY_RIGHT), KEY(0, 2, KEY_A), KEY(0, 3, KEY_B), KEY(1, 0, KEY_DOWN), KEY(1, 1, KEY_UP), KEY(1, 2, KEY_E), KEY(1, 3, KEY_F), KEY(2, 0, KEY_ENTER), KEY(2, 1, KEY_I), KEY(2, 2, KEY_J), KEY(2, 3, KEY_K), KEY(3, 0, KEY_M), KEY(3, 1, KEY_N), KEY(3, 2, KEY_O), KEY(3, 3, KEY_P) }; static struct matrix_keymap_data igep2_keymap_data = { .keymap = igep2_keymap, .keymap_size = ARRAY_SIZE(igep2_keymap), }; static struct twl4030_keypad_data igep2_keypad_pdata = { .keymap_data = &igep2_keymap_data, .rows = 4, .cols = 4, .rep = 1, }; static struct twl4030_platform_data igep_twldata = { /* platform_data for children goes here */ .gpio = &igep_twl4030_gpio_pdata, .vmmc1 = &igep_vmmc1, .vio = &igep_vio, }; static struct i2c_board_info __initdata igep2_i2c3_boardinfo[] = { { I2C_BOARD_INFO("eeprom", 0x50), }, }; static void __init igep_i2c_init(void) { int ret; omap3_pmic_get_config(&igep_twldata, TWL_COMMON_PDATA_USB, TWL_COMMON_REGULATOR_VPLL2); igep_twldata.vpll2->constraints.apply_uV = true; igep_twldata.vpll2->constraints.name = "VDVI"; if (machine_is_igep0020()) { /* * Bus 3 is attached to the DVI port where devices like the * pico DLP projector don't work reliably with 400kHz */ ret = omap_register_i2c_bus(3, 100, igep2_i2c3_boardinfo, ARRAY_SIZE(igep2_i2c3_boardinfo)); if (ret) pr_warning("IGEP2: Could not register I2C3 bus (%d)\n", ret); igep_twldata.keypad = &igep2_keypad_pdata; /* Get common pmic data */ omap3_pmic_get_config(&igep_twldata, TWL_COMMON_PDATA_AUDIO, 0); } omap3_pmic_init("twl4030", &igep_twldata); } static struct usbhs_phy_data igep2_phy_data[] __initdata = { { .port = 1, .reset_gpio = IGEP2_GPIO_USBH_NRESET, .vcc_gpio = -EINVAL, }, }; static struct usbhs_phy_data igep3_phy_data[] __initdata = { { .port = 2, .reset_gpio = IGEP3_GPIO_USBH_NRESET, .vcc_gpio = -EINVAL, }, }; static struct usbhs_omap_platform_data igep2_usbhs_bdata __initdata = { .port_mode[0] = OMAP_EHCI_PORT_MODE_PHY, }; static struct usbhs_omap_platform_data igep3_usbhs_bdata __initdata = { .port_mode[1] = OMAP_EHCI_PORT_MODE_PHY, }; #ifdef CONFIG_OMAP_MUX static struct omap_board_mux board_mux[] __initdata = { /* SMSC9221 LAN Controller ETH IRQ (GPIO_176) */ OMAP3_MUX(MCSPI1_CS2, OMAP_MUX_MODE4 | OMAP_PIN_INPUT), { .reg_offset = OMAP_MUX_TERMINATOR }, }; #endif #if defined(CONFIG_LIBERTAS_SDIO) || defined(CONFIG_LIBERTAS_SDIO_MODULE) static struct gpio igep_wlan_bt_gpios[] __initdata = { { -EINVAL, GPIOF_OUT_INIT_HIGH, "GPIO_WIFI_NPD" }, { -EINVAL, GPIOF_OUT_INIT_HIGH, "GPIO_WIFI_NRESET" }, { -EINVAL, GPIOF_OUT_INIT_HIGH, "GPIO_BT_NRESET" }, }; static void __init igep_wlan_bt_init(void) { int err; /* GPIO's for WLAN-BT combo depends on hardware revision */ if (hwrev == IGEP2_BOARD_HWREV_B) { igep_wlan_bt_gpios[0].gpio = IGEP2_RB_GPIO_WIFI_NPD; igep_wlan_bt_gpios[1].gpio = IGEP2_RB_GPIO_WIFI_NRESET; igep_wlan_bt_gpios[2].gpio = IGEP2_RB_GPIO_BT_NRESET; } else if (hwrev == IGEP2_BOARD_HWREV_C || machine_is_igep0030()) { igep_wlan_bt_gpios[0].gpio = IGEP2_RC_GPIO_WIFI_NPD; igep_wlan_bt_gpios[1].gpio = IGEP2_RC_GPIO_WIFI_NRESET; igep_wlan_bt_gpios[2].gpio = IGEP2_RC_GPIO_BT_NRESET; } else return; /* Make sure that the GPIO pins are muxed correctly */ omap_mux_init_gpio(igep_wlan_bt_gpios[0].gpio, OMAP_PIN_OUTPUT); omap_mux_init_gpio(igep_wlan_bt_gpios[1].gpio, OMAP_PIN_OUTPUT); omap_mux_init_gpio(igep_wlan_bt_gpios[2].gpio, OMAP_PIN_OUTPUT); err = gpio_request_array(igep_wlan_bt_gpios, ARRAY_SIZE(igep_wlan_bt_gpios)); if (err) { pr_warning("IGEP2: Could not obtain WIFI/BT gpios\n"); return; } gpio_export(igep_wlan_bt_gpios[0].gpio, 0); gpio_export(igep_wlan_bt_gpios[1].gpio, 0); gpio_export(igep_wlan_bt_gpios[2].gpio, 0); gpio_set_value(igep_wlan_bt_gpios[1].gpio, 0); udelay(10); gpio_set_value(igep_wlan_bt_gpios[1].gpio, 1); } #else static inline void __init igep_wlan_bt_init(void) { } #endif static struct regulator_consumer_supply dummy_supplies[] = { REGULATOR_SUPPLY("vddvario", "smsc911x.0"), REGULATOR_SUPPLY("vdd33a", "smsc911x.0"), }; static void __init igep_init(void) { regulator_register_fixed(1, dummy_supplies, ARRAY_SIZE(dummy_supplies)); omap3_mux_init(board_mux, OMAP_PACKAGE_CBB); /* Get IGEP2 hardware revision */ igep2_get_revision(); omap_hsmmc_init(mmc); /* Register I2C busses and drivers */ igep_i2c_init(); platform_add_devices(igep_devices, ARRAY_SIZE(igep_devices)); omap_serial_init(); omap_sdrc_init(m65kxxxxam_sdrc_params, m65kxxxxam_sdrc_params); usb_bind_phy("musb-hdrc.0.auto", 0, "twl4030_usb"); usb_musb_init(NULL); igep_flash_init(); igep_leds_init(); omap_twl4030_audio_init("igep2", NULL); /* * WLAN-BT combo module from MuRata which has a Marvell WLAN * (88W8686) + CSR Bluetooth chipset. Uses SDIO interface. */ igep_wlan_bt_init(); if (machine_is_igep0020()) { omap_display_init(&igep2_dss_data); igep2_init_smsc911x(); usbhs_init_phys(igep2_phy_data, ARRAY_SIZE(igep2_phy_data)); usbhs_init(&igep2_usbhs_bdata); } else { usbhs_init_phys(igep3_phy_data, ARRAY_SIZE(igep3_phy_data)); usbhs_init(&igep3_usbhs_bdata); } } MACHINE_START(IGEP0020, "IGEP v2 board") .atag_offset = 0x100, .reserve = omap_reserve, .map_io = omap3_map_io, .init_early = omap35xx_init_early, .init_irq = omap3_init_irq, .handle_irq = omap3_intc_handle_irq, .init_machine = igep_init, .init_late = omap35xx_init_late, .init_time = omap3_sync32k_timer_init, .restart = omap3xxx_restart, MACHINE_END MACHINE_START(IGEP0030, "IGEP OMAP3 module") .atag_offset = 0x100, .reserve = omap_reserve, .map_io = omap3_map_io, .init_early = omap35xx_init_early, .init_irq = omap3_init_irq, .handle_irq = omap3_intc_handle_irq, .init_machine = igep_init, .init_late = omap35xx_init_late, .init_time = omap3_sync32k_timer_init, .restart = omap3xxx_restart, MACHINE_END
gpl-2.0
KylinMod/android_kernel_motorola_msm8960-common
drivers/net/arm/ks8695net.c
2306
43543
/* * Micrel KS8695 (Centaur) Ethernet. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * Copyright 2008 Simtec Electronics * Daniel Silverstone <dsilvers@simtec.co.uk> * Vincent Sanders <vince@simtec.co.uk> */ #include <linux/module.h> #include <linux/ioport.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/init.h> #include <linux/skbuff.h> #include <linux/spinlock.h> #include <linux/crc32.h> #include <linux/mii.h> #include <linux/ethtool.h> #include <linux/delay.h> #include <linux/platform_device.h> #include <linux/irq.h> #include <linux/io.h> #include <linux/slab.h> #include <asm/irq.h> #include <mach/regs-switch.h> #include <mach/regs-misc.h> #include <asm/mach/irq.h> #include <mach/regs-irq.h> #include "ks8695net.h" #define MODULENAME "ks8695_ether" #define MODULEVERSION "1.02" /* * Transmit and device reset timeout, default 5 seconds. */ static int watchdog = 5000; /* Hardware structures */ /** * struct rx_ring_desc - Receive descriptor ring element * @status: The status of the descriptor element (E.g. who owns it) * @length: The number of bytes in the block pointed to by data_ptr * @data_ptr: The physical address of the data block to receive into * @next_desc: The physical address of the next descriptor element. */ struct rx_ring_desc { __le32 status; __le32 length; __le32 data_ptr; __le32 next_desc; }; /** * struct tx_ring_desc - Transmit descriptor ring element * @owner: Who owns the descriptor * @status: The number of bytes in the block pointed to by data_ptr * @data_ptr: The physical address of the data block to receive into * @next_desc: The physical address of the next descriptor element. */ struct tx_ring_desc { __le32 owner; __le32 status; __le32 data_ptr; __le32 next_desc; }; /** * struct ks8695_skbuff - sk_buff wrapper for rx/tx rings. * @skb: The buffer in the ring * @dma_ptr: The mapped DMA pointer of the buffer * @length: The number of bytes mapped to dma_ptr */ struct ks8695_skbuff { struct sk_buff *skb; dma_addr_t dma_ptr; u32 length; }; /* Private device structure */ #define MAX_TX_DESC 8 #define MAX_TX_DESC_MASK 0x7 #define MAX_RX_DESC 16 #define MAX_RX_DESC_MASK 0xf /*napi_weight have better more than rx DMA buffers*/ #define NAPI_WEIGHT 64 #define MAX_RXBUF_SIZE 0x700 #define TX_RING_DMA_SIZE (sizeof(struct tx_ring_desc) * MAX_TX_DESC) #define RX_RING_DMA_SIZE (sizeof(struct rx_ring_desc) * MAX_RX_DESC) #define RING_DMA_SIZE (TX_RING_DMA_SIZE + RX_RING_DMA_SIZE) /** * enum ks8695_dtype - Device type * @KS8695_DTYPE_WAN: This device is a WAN interface * @KS8695_DTYPE_LAN: This device is a LAN interface * @KS8695_DTYPE_HPNA: This device is an HPNA interface */ enum ks8695_dtype { KS8695_DTYPE_WAN, KS8695_DTYPE_LAN, KS8695_DTYPE_HPNA, }; /** * struct ks8695_priv - Private data for the KS8695 Ethernet * @in_suspend: Flag to indicate if we're suspending/resuming * @ndev: The net_device for this interface * @dev: The platform device object for this interface * @dtype: The type of this device * @io_regs: The ioremapped registers for this interface * @napi : Add support NAPI for Rx * @rx_irq_name: The textual name of the RX IRQ from the platform data * @tx_irq_name: The textual name of the TX IRQ from the platform data * @link_irq_name: The textual name of the link IRQ from the * platform data if available * @rx_irq: The IRQ number for the RX IRQ * @tx_irq: The IRQ number for the TX IRQ * @link_irq: The IRQ number for the link IRQ if available * @regs_req: The resource request for the registers region * @phyiface_req: The resource request for the phy/switch region * if available * @phyiface_regs: The ioremapped registers for the phy/switch if available * @ring_base: The base pointer of the dma coherent memory for the rings * @ring_base_dma: The DMA mapped equivalent of ring_base * @tx_ring: The pointer in ring_base of the TX ring * @tx_ring_used: The number of slots in the TX ring which are occupied * @tx_ring_next_slot: The next slot to fill in the TX ring * @tx_ring_dma: The DMA mapped equivalent of tx_ring * @tx_buffers: The sk_buff mappings for the TX ring * @txq_lock: A lock to protect the tx_buffers tx_ring_used etc variables * @rx_ring: The pointer in ring_base of the RX ring * @rx_ring_dma: The DMA mapped equivalent of rx_ring * @rx_buffers: The sk_buff mappings for the RX ring * @next_rx_desc_read: The next RX descriptor to read from on IRQ * @rx_lock: A lock to protect Rx irq function * @msg_enable: The flags for which messages to emit */ struct ks8695_priv { int in_suspend; struct net_device *ndev; struct device *dev; enum ks8695_dtype dtype; void __iomem *io_regs; struct napi_struct napi; const char *rx_irq_name, *tx_irq_name, *link_irq_name; int rx_irq, tx_irq, link_irq; struct resource *regs_req, *phyiface_req; void __iomem *phyiface_regs; void *ring_base; dma_addr_t ring_base_dma; struct tx_ring_desc *tx_ring; int tx_ring_used; int tx_ring_next_slot; dma_addr_t tx_ring_dma; struct ks8695_skbuff tx_buffers[MAX_TX_DESC]; spinlock_t txq_lock; struct rx_ring_desc *rx_ring; dma_addr_t rx_ring_dma; struct ks8695_skbuff rx_buffers[MAX_RX_DESC]; int next_rx_desc_read; spinlock_t rx_lock; int msg_enable; }; /* Register access */ /** * ks8695_readreg - Read from a KS8695 ethernet register * @ksp: The device to read from * @reg: The register to read */ static inline u32 ks8695_readreg(struct ks8695_priv *ksp, int reg) { return readl(ksp->io_regs + reg); } /** * ks8695_writereg - Write to a KS8695 ethernet register * @ksp: The device to write to * @reg: The register to write * @value: The value to write to the register */ static inline void ks8695_writereg(struct ks8695_priv *ksp, int reg, u32 value) { writel(value, ksp->io_regs + reg); } /* Utility functions */ /** * ks8695_port_type - Retrieve port-type as user-friendly string * @ksp: The device to return the type for * * Returns a string indicating which of the WAN, LAN or HPNA * ports this device is likely to represent. */ static const char * ks8695_port_type(struct ks8695_priv *ksp) { switch (ksp->dtype) { case KS8695_DTYPE_LAN: return "LAN"; case KS8695_DTYPE_WAN: return "WAN"; case KS8695_DTYPE_HPNA: return "HPNA"; } return "UNKNOWN"; } /** * ks8695_update_mac - Update the MAC registers in the device * @ksp: The device to update * * Updates the MAC registers in the KS8695 device from the address in the * net_device structure associated with this interface. */ static void ks8695_update_mac(struct ks8695_priv *ksp) { /* Update the HW with the MAC from the net_device */ struct net_device *ndev = ksp->ndev; u32 machigh, maclow; maclow = ((ndev->dev_addr[2] << 24) | (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5] << 0)); machigh = ((ndev->dev_addr[0] << 8) | (ndev->dev_addr[1] << 0)); ks8695_writereg(ksp, KS8695_MAL, maclow); ks8695_writereg(ksp, KS8695_MAH, machigh); } /** * ks8695_refill_rxbuffers - Re-fill the RX buffer ring * @ksp: The device to refill * * Iterates the RX ring of the device looking for empty slots. * For each empty slot, we allocate and map a new SKB and give it * to the hardware. * This can be called from interrupt context safely. */ static void ks8695_refill_rxbuffers(struct ks8695_priv *ksp) { /* Run around the RX ring, filling in any missing sk_buff's */ int buff_n; for (buff_n = 0; buff_n < MAX_RX_DESC; ++buff_n) { if (!ksp->rx_buffers[buff_n].skb) { struct sk_buff *skb = dev_alloc_skb(MAX_RXBUF_SIZE); dma_addr_t mapping; ksp->rx_buffers[buff_n].skb = skb; if (skb == NULL) { /* Failed to allocate one, perhaps * we'll try again later. */ break; } mapping = dma_map_single(ksp->dev, skb->data, MAX_RXBUF_SIZE, DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(ksp->dev, mapping))) { /* Failed to DMA map this SKB, try later */ dev_kfree_skb_irq(skb); ksp->rx_buffers[buff_n].skb = NULL; break; } ksp->rx_buffers[buff_n].dma_ptr = mapping; skb->dev = ksp->ndev; ksp->rx_buffers[buff_n].length = MAX_RXBUF_SIZE; /* Record this into the DMA ring */ ksp->rx_ring[buff_n].data_ptr = cpu_to_le32(mapping); ksp->rx_ring[buff_n].length = cpu_to_le32(MAX_RXBUF_SIZE); wmb(); /* And give ownership over to the hardware */ ksp->rx_ring[buff_n].status = cpu_to_le32(RDES_OWN); } } } /* Maximum number of multicast addresses which the KS8695 HW supports */ #define KS8695_NR_ADDRESSES 16 /** * ks8695_init_partial_multicast - Init the mcast addr registers * @ksp: The device to initialise * @addr: The multicast address list to use * @nr_addr: The number of addresses in the list * * This routine is a helper for ks8695_set_multicast - it writes * the additional-address registers in the KS8695 ethernet device * and cleans up any others left behind. */ static void ks8695_init_partial_multicast(struct ks8695_priv *ksp, struct net_device *ndev) { u32 low, high; int i; struct netdev_hw_addr *ha; i = 0; netdev_for_each_mc_addr(ha, ndev) { /* Ran out of space in chip? */ BUG_ON(i == KS8695_NR_ADDRESSES); low = (ha->addr[2] << 24) | (ha->addr[3] << 16) | (ha->addr[4] << 8) | (ha->addr[5]); high = (ha->addr[0] << 8) | (ha->addr[1]); ks8695_writereg(ksp, KS8695_AAL_(i), low); ks8695_writereg(ksp, KS8695_AAH_(i), AAH_E | high); i++; } /* Clear the remaining Additional Station Addresses */ for (; i < KS8695_NR_ADDRESSES; i++) { ks8695_writereg(ksp, KS8695_AAL_(i), 0); ks8695_writereg(ksp, KS8695_AAH_(i), 0); } } /* Interrupt handling */ /** * ks8695_tx_irq - Transmit IRQ handler * @irq: The IRQ which went off (ignored) * @dev_id: The net_device for the interrupt * * Process the TX ring, clearing out any transmitted slots. * Allows the net_device to pass us new packets once slots are * freed. */ static irqreturn_t ks8695_tx_irq(int irq, void *dev_id) { struct net_device *ndev = (struct net_device *)dev_id; struct ks8695_priv *ksp = netdev_priv(ndev); int buff_n; for (buff_n = 0; buff_n < MAX_TX_DESC; ++buff_n) { if (ksp->tx_buffers[buff_n].skb && !(ksp->tx_ring[buff_n].owner & cpu_to_le32(TDES_OWN))) { rmb(); /* An SKB which is not owned by HW is present */ /* Update the stats for the net_device */ ndev->stats.tx_packets++; ndev->stats.tx_bytes += ksp->tx_buffers[buff_n].length; /* Free the packet from the ring */ ksp->tx_ring[buff_n].data_ptr = 0; /* Free the sk_buff */ dma_unmap_single(ksp->dev, ksp->tx_buffers[buff_n].dma_ptr, ksp->tx_buffers[buff_n].length, DMA_TO_DEVICE); dev_kfree_skb_irq(ksp->tx_buffers[buff_n].skb); ksp->tx_buffers[buff_n].skb = NULL; ksp->tx_ring_used--; } } netif_wake_queue(ndev); return IRQ_HANDLED; } /** * ks8695_get_rx_enable_bit - Get rx interrupt enable/status bit * @ksp: Private data for the KS8695 Ethernet * * For KS8695 document: * Interrupt Enable Register (offset 0xE204) * Bit29 : WAN MAC Receive Interrupt Enable * Bit16 : LAN MAC Receive Interrupt Enable * Interrupt Status Register (Offset 0xF208) * Bit29: WAN MAC Receive Status * Bit16: LAN MAC Receive Status * So, this Rx interrrupt enable/status bit number is equal * as Rx IRQ number. */ static inline u32 ks8695_get_rx_enable_bit(struct ks8695_priv *ksp) { return ksp->rx_irq; } /** * ks8695_rx_irq - Receive IRQ handler * @irq: The IRQ which went off (ignored) * @dev_id: The net_device for the interrupt * * Inform NAPI that packet reception needs to be scheduled */ static irqreturn_t ks8695_rx_irq(int irq, void *dev_id) { struct net_device *ndev = (struct net_device *)dev_id; struct ks8695_priv *ksp = netdev_priv(ndev); spin_lock(&ksp->rx_lock); if (napi_schedule_prep(&ksp->napi)) { unsigned long status = readl(KS8695_IRQ_VA + KS8695_INTEN); unsigned long mask_bit = 1 << ks8695_get_rx_enable_bit(ksp); /*disable rx interrupt*/ status &= ~mask_bit; writel(status , KS8695_IRQ_VA + KS8695_INTEN); __napi_schedule(&ksp->napi); } spin_unlock(&ksp->rx_lock); return IRQ_HANDLED; } /** * ks8695_rx - Receive packets called by NAPI poll method * @ksp: Private data for the KS8695 Ethernet * @budget: Number of packets allowed to process */ static int ks8695_rx(struct ks8695_priv *ksp, int budget) { struct net_device *ndev = ksp->ndev; struct sk_buff *skb; int buff_n; u32 flags; int pktlen; int received = 0; buff_n = ksp->next_rx_desc_read; while (received < budget && ksp->rx_buffers[buff_n].skb && (!(ksp->rx_ring[buff_n].status & cpu_to_le32(RDES_OWN)))) { rmb(); flags = le32_to_cpu(ksp->rx_ring[buff_n].status); /* Found an SKB which we own, this means we * received a packet */ if ((flags & (RDES_FS | RDES_LS)) != (RDES_FS | RDES_LS)) { /* This packet is not the first and * the last segment. Therefore it is * a "spanning" packet and we can't * handle it */ goto rx_failure; } if (flags & (RDES_ES | RDES_RE)) { /* It's an error packet */ ndev->stats.rx_errors++; if (flags & RDES_TL) ndev->stats.rx_length_errors++; if (flags & RDES_RF) ndev->stats.rx_length_errors++; if (flags & RDES_CE) ndev->stats.rx_crc_errors++; if (flags & RDES_RE) ndev->stats.rx_missed_errors++; goto rx_failure; } pktlen = flags & RDES_FLEN; pktlen -= 4; /* Drop the CRC */ /* Retrieve the sk_buff */ skb = ksp->rx_buffers[buff_n].skb; /* Clear it from the ring */ ksp->rx_buffers[buff_n].skb = NULL; ksp->rx_ring[buff_n].data_ptr = 0; /* Unmap the SKB */ dma_unmap_single(ksp->dev, ksp->rx_buffers[buff_n].dma_ptr, ksp->rx_buffers[buff_n].length, DMA_FROM_DEVICE); /* Relinquish the SKB to the network layer */ skb_put(skb, pktlen); skb->protocol = eth_type_trans(skb, ndev); netif_receive_skb(skb); /* Record stats */ ndev->stats.rx_packets++; ndev->stats.rx_bytes += pktlen; goto rx_finished; rx_failure: /* This ring entry is an error, but we can * re-use the skb */ /* Give the ring entry back to the hardware */ ksp->rx_ring[buff_n].status = cpu_to_le32(RDES_OWN); rx_finished: received++; buff_n = (buff_n + 1) & MAX_RX_DESC_MASK; } /* And note which RX descriptor we last did */ ksp->next_rx_desc_read = buff_n; /* And refill the buffers */ ks8695_refill_rxbuffers(ksp); /* Kick the RX DMA engine, in case it became suspended */ ks8695_writereg(ksp, KS8695_DRSC, 0); return received; } /** * ks8695_poll - Receive packet by NAPI poll method * @ksp: Private data for the KS8695 Ethernet * @budget: The remaining number packets for network subsystem * * Invoked by the network core when it requests for new * packets from the driver */ static int ks8695_poll(struct napi_struct *napi, int budget) { struct ks8695_priv *ksp = container_of(napi, struct ks8695_priv, napi); unsigned long work_done; unsigned long isr = readl(KS8695_IRQ_VA + KS8695_INTEN); unsigned long mask_bit = 1 << ks8695_get_rx_enable_bit(ksp); work_done = ks8695_rx(ksp, budget); if (work_done < budget) { unsigned long flags; spin_lock_irqsave(&ksp->rx_lock, flags); __napi_complete(napi); /*enable rx interrupt*/ writel(isr | mask_bit, KS8695_IRQ_VA + KS8695_INTEN); spin_unlock_irqrestore(&ksp->rx_lock, flags); } return work_done; } /** * ks8695_link_irq - Link change IRQ handler * @irq: The IRQ which went off (ignored) * @dev_id: The net_device for the interrupt * * The WAN interface can generate an IRQ when the link changes, * report this to the net layer and the user. */ static irqreturn_t ks8695_link_irq(int irq, void *dev_id) { struct net_device *ndev = (struct net_device *)dev_id; struct ks8695_priv *ksp = netdev_priv(ndev); u32 ctrl; ctrl = readl(ksp->phyiface_regs + KS8695_WMC); if (ctrl & WMC_WLS) { netif_carrier_on(ndev); if (netif_msg_link(ksp)) dev_info(ksp->dev, "%s: Link is now up (10%sMbps/%s-duplex)\n", ndev->name, (ctrl & WMC_WSS) ? "0" : "", (ctrl & WMC_WDS) ? "Full" : "Half"); } else { netif_carrier_off(ndev); if (netif_msg_link(ksp)) dev_info(ksp->dev, "%s: Link is now down.\n", ndev->name); } return IRQ_HANDLED; } /* KS8695 Device functions */ /** * ks8695_reset - Reset a KS8695 ethernet interface * @ksp: The interface to reset * * Perform an engine reset of the interface and re-program it * with sensible defaults. */ static void ks8695_reset(struct ks8695_priv *ksp) { int reset_timeout = watchdog; /* Issue the reset via the TX DMA control register */ ks8695_writereg(ksp, KS8695_DTXC, DTXC_TRST); while (reset_timeout--) { if (!(ks8695_readreg(ksp, KS8695_DTXC) & DTXC_TRST)) break; msleep(1); } if (reset_timeout < 0) { dev_crit(ksp->dev, "Timeout waiting for DMA engines to reset\n"); /* And blithely carry on */ } /* Definitely wait long enough before attempting to program * the engines */ msleep(10); /* RX: unicast and broadcast */ ks8695_writereg(ksp, KS8695_DRXC, DRXC_RU | DRXC_RB); /* TX: pad and add CRC */ ks8695_writereg(ksp, KS8695_DTXC, DTXC_TEP | DTXC_TAC); } /** * ks8695_shutdown - Shut down a KS8695 ethernet interface * @ksp: The interface to shut down * * This disables packet RX/TX, cleans up IRQs, drains the rings, * and basically places the interface into a clean shutdown * state. */ static void ks8695_shutdown(struct ks8695_priv *ksp) { u32 ctrl; int buff_n; /* Disable packet transmission */ ctrl = ks8695_readreg(ksp, KS8695_DTXC); ks8695_writereg(ksp, KS8695_DTXC, ctrl & ~DTXC_TE); /* Disable packet reception */ ctrl = ks8695_readreg(ksp, KS8695_DRXC); ks8695_writereg(ksp, KS8695_DRXC, ctrl & ~DRXC_RE); /* Release the IRQs */ free_irq(ksp->rx_irq, ksp->ndev); free_irq(ksp->tx_irq, ksp->ndev); if (ksp->link_irq != -1) free_irq(ksp->link_irq, ksp->ndev); /* Throw away any pending TX packets */ for (buff_n = 0; buff_n < MAX_TX_DESC; ++buff_n) { if (ksp->tx_buffers[buff_n].skb) { /* Remove this SKB from the TX ring */ ksp->tx_ring[buff_n].owner = 0; ksp->tx_ring[buff_n].status = 0; ksp->tx_ring[buff_n].data_ptr = 0; /* Unmap and bin this SKB */ dma_unmap_single(ksp->dev, ksp->tx_buffers[buff_n].dma_ptr, ksp->tx_buffers[buff_n].length, DMA_TO_DEVICE); dev_kfree_skb_irq(ksp->tx_buffers[buff_n].skb); ksp->tx_buffers[buff_n].skb = NULL; } } /* Purge the RX buffers */ for (buff_n = 0; buff_n < MAX_RX_DESC; ++buff_n) { if (ksp->rx_buffers[buff_n].skb) { /* Remove the SKB from the RX ring */ ksp->rx_ring[buff_n].status = 0; ksp->rx_ring[buff_n].data_ptr = 0; /* Unmap and bin the SKB */ dma_unmap_single(ksp->dev, ksp->rx_buffers[buff_n].dma_ptr, ksp->rx_buffers[buff_n].length, DMA_FROM_DEVICE); dev_kfree_skb_irq(ksp->rx_buffers[buff_n].skb); ksp->rx_buffers[buff_n].skb = NULL; } } } /** * ks8695_setup_irq - IRQ setup helper function * @irq: The IRQ number to claim * @irq_name: The name to give the IRQ claimant * @handler: The function to call to handle the IRQ * @ndev: The net_device to pass in as the dev_id argument to the handler * * Return 0 on success. */ static int ks8695_setup_irq(int irq, const char *irq_name, irq_handler_t handler, struct net_device *ndev) { int ret; ret = request_irq(irq, handler, IRQF_SHARED, irq_name, ndev); if (ret) { dev_err(&ndev->dev, "failure to request IRQ %d\n", irq); return ret; } return 0; } /** * ks8695_init_net - Initialise a KS8695 ethernet interface * @ksp: The interface to initialise * * This routine fills the RX ring, initialises the DMA engines, * allocates the IRQs and then starts the packet TX and RX * engines. */ static int ks8695_init_net(struct ks8695_priv *ksp) { int ret; u32 ctrl; ks8695_refill_rxbuffers(ksp); /* Initialise the DMA engines */ ks8695_writereg(ksp, KS8695_RDLB, (u32) ksp->rx_ring_dma); ks8695_writereg(ksp, KS8695_TDLB, (u32) ksp->tx_ring_dma); /* Request the IRQs */ ret = ks8695_setup_irq(ksp->rx_irq, ksp->rx_irq_name, ks8695_rx_irq, ksp->ndev); if (ret) return ret; ret = ks8695_setup_irq(ksp->tx_irq, ksp->tx_irq_name, ks8695_tx_irq, ksp->ndev); if (ret) return ret; if (ksp->link_irq != -1) { ret = ks8695_setup_irq(ksp->link_irq, ksp->link_irq_name, ks8695_link_irq, ksp->ndev); if (ret) return ret; } /* Set up the ring indices */ ksp->next_rx_desc_read = 0; ksp->tx_ring_next_slot = 0; ksp->tx_ring_used = 0; /* Bring up transmission */ ctrl = ks8695_readreg(ksp, KS8695_DTXC); /* Enable packet transmission */ ks8695_writereg(ksp, KS8695_DTXC, ctrl | DTXC_TE); /* Bring up the reception */ ctrl = ks8695_readreg(ksp, KS8695_DRXC); /* Enable packet reception */ ks8695_writereg(ksp, KS8695_DRXC, ctrl | DRXC_RE); /* And start the DMA engine */ ks8695_writereg(ksp, KS8695_DRSC, 0); /* All done */ return 0; } /** * ks8695_release_device - HW resource release for KS8695 e-net * @ksp: The device to be freed * * This unallocates io memory regions, dma-coherent regions etc * which were allocated in ks8695_probe. */ static void ks8695_release_device(struct ks8695_priv *ksp) { /* Unmap the registers */ iounmap(ksp->io_regs); if (ksp->phyiface_regs) iounmap(ksp->phyiface_regs); /* And release the request */ release_resource(ksp->regs_req); kfree(ksp->regs_req); if (ksp->phyiface_req) { release_resource(ksp->phyiface_req); kfree(ksp->phyiface_req); } /* Free the ring buffers */ dma_free_coherent(ksp->dev, RING_DMA_SIZE, ksp->ring_base, ksp->ring_base_dma); } /* Ethtool support */ /** * ks8695_get_msglevel - Get the messages enabled for emission * @ndev: The network device to read from */ static u32 ks8695_get_msglevel(struct net_device *ndev) { struct ks8695_priv *ksp = netdev_priv(ndev); return ksp->msg_enable; } /** * ks8695_set_msglevel - Set the messages enabled for emission * @ndev: The network device to configure * @value: The messages to set for emission */ static void ks8695_set_msglevel(struct net_device *ndev, u32 value) { struct ks8695_priv *ksp = netdev_priv(ndev); ksp->msg_enable = value; } /** * ks8695_wan_get_settings - Get device-specific settings. * @ndev: The network device to read settings from * @cmd: The ethtool structure to read into */ static int ks8695_wan_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd) { struct ks8695_priv *ksp = netdev_priv(ndev); u32 ctrl; /* All ports on the KS8695 support these... */ cmd->supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_TP | SUPPORTED_MII); cmd->transceiver = XCVR_INTERNAL; cmd->advertising = ADVERTISED_TP | ADVERTISED_MII; cmd->port = PORT_MII; cmd->supported |= (SUPPORTED_Autoneg | SUPPORTED_Pause); cmd->phy_address = 0; ctrl = readl(ksp->phyiface_regs + KS8695_WMC); if ((ctrl & WMC_WAND) == 0) { /* auto-negotiation is enabled */ cmd->advertising |= ADVERTISED_Autoneg; if (ctrl & WMC_WANA100F) cmd->advertising |= ADVERTISED_100baseT_Full; if (ctrl & WMC_WANA100H) cmd->advertising |= ADVERTISED_100baseT_Half; if (ctrl & WMC_WANA10F) cmd->advertising |= ADVERTISED_10baseT_Full; if (ctrl & WMC_WANA10H) cmd->advertising |= ADVERTISED_10baseT_Half; if (ctrl & WMC_WANAP) cmd->advertising |= ADVERTISED_Pause; cmd->autoneg = AUTONEG_ENABLE; ethtool_cmd_speed_set(cmd, (ctrl & WMC_WSS) ? SPEED_100 : SPEED_10); cmd->duplex = (ctrl & WMC_WDS) ? DUPLEX_FULL : DUPLEX_HALF; } else { /* auto-negotiation is disabled */ cmd->autoneg = AUTONEG_DISABLE; ethtool_cmd_speed_set(cmd, ((ctrl & WMC_WANF100) ? SPEED_100 : SPEED_10)); cmd->duplex = (ctrl & WMC_WANFF) ? DUPLEX_FULL : DUPLEX_HALF; } return 0; } /** * ks8695_wan_set_settings - Set device-specific settings. * @ndev: The network device to configure * @cmd: The settings to configure */ static int ks8695_wan_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd) { struct ks8695_priv *ksp = netdev_priv(ndev); u32 ctrl; if ((cmd->speed != SPEED_10) && (cmd->speed != SPEED_100)) return -EINVAL; if ((cmd->duplex != DUPLEX_HALF) && (cmd->duplex != DUPLEX_FULL)) return -EINVAL; if (cmd->port != PORT_MII) return -EINVAL; if (cmd->transceiver != XCVR_INTERNAL) return -EINVAL; if ((cmd->autoneg != AUTONEG_DISABLE) && (cmd->autoneg != AUTONEG_ENABLE)) return -EINVAL; if (cmd->autoneg == AUTONEG_ENABLE) { if ((cmd->advertising & (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full)) == 0) return -EINVAL; ctrl = readl(ksp->phyiface_regs + KS8695_WMC); ctrl &= ~(WMC_WAND | WMC_WANA100F | WMC_WANA100H | WMC_WANA10F | WMC_WANA10H); if (cmd->advertising & ADVERTISED_100baseT_Full) ctrl |= WMC_WANA100F; if (cmd->advertising & ADVERTISED_100baseT_Half) ctrl |= WMC_WANA100H; if (cmd->advertising & ADVERTISED_10baseT_Full) ctrl |= WMC_WANA10F; if (cmd->advertising & ADVERTISED_10baseT_Half) ctrl |= WMC_WANA10H; /* force a re-negotiation */ ctrl |= WMC_WANR; writel(ctrl, ksp->phyiface_regs + KS8695_WMC); } else { ctrl = readl(ksp->phyiface_regs + KS8695_WMC); /* disable auto-negotiation */ ctrl |= WMC_WAND; ctrl &= ~(WMC_WANF100 | WMC_WANFF); if (cmd->speed == SPEED_100) ctrl |= WMC_WANF100; if (cmd->duplex == DUPLEX_FULL) ctrl |= WMC_WANFF; writel(ctrl, ksp->phyiface_regs + KS8695_WMC); } return 0; } /** * ks8695_wan_nwayreset - Restart the autonegotiation on the port. * @ndev: The network device to restart autoneotiation on */ static int ks8695_wan_nwayreset(struct net_device *ndev) { struct ks8695_priv *ksp = netdev_priv(ndev); u32 ctrl; ctrl = readl(ksp->phyiface_regs + KS8695_WMC); if ((ctrl & WMC_WAND) == 0) writel(ctrl | WMC_WANR, ksp->phyiface_regs + KS8695_WMC); else /* auto-negotiation not enabled */ return -EINVAL; return 0; } /** * ks8695_wan_get_pause - Retrieve network pause/flow-control advertising * @ndev: The device to retrieve settings from * @param: The structure to fill out with the information */ static void ks8695_wan_get_pause(struct net_device *ndev, struct ethtool_pauseparam *param) { struct ks8695_priv *ksp = netdev_priv(ndev); u32 ctrl; ctrl = readl(ksp->phyiface_regs + KS8695_WMC); /* advertise Pause */ param->autoneg = (ctrl & WMC_WANAP); /* current Rx Flow-control */ ctrl = ks8695_readreg(ksp, KS8695_DRXC); param->rx_pause = (ctrl & DRXC_RFCE); /* current Tx Flow-control */ ctrl = ks8695_readreg(ksp, KS8695_DTXC); param->tx_pause = (ctrl & DTXC_TFCE); } /** * ks8695_get_drvinfo - Retrieve driver information * @ndev: The network device to retrieve info about * @info: The info structure to fill out. */ static void ks8695_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info) { strlcpy(info->driver, MODULENAME, sizeof(info->driver)); strlcpy(info->version, MODULEVERSION, sizeof(info->version)); strlcpy(info->bus_info, dev_name(ndev->dev.parent), sizeof(info->bus_info)); } static const struct ethtool_ops ks8695_ethtool_ops = { .get_msglevel = ks8695_get_msglevel, .set_msglevel = ks8695_set_msglevel, .get_drvinfo = ks8695_get_drvinfo, }; static const struct ethtool_ops ks8695_wan_ethtool_ops = { .get_msglevel = ks8695_get_msglevel, .set_msglevel = ks8695_set_msglevel, .get_settings = ks8695_wan_get_settings, .set_settings = ks8695_wan_set_settings, .nway_reset = ks8695_wan_nwayreset, .get_link = ethtool_op_get_link, .get_pauseparam = ks8695_wan_get_pause, .get_drvinfo = ks8695_get_drvinfo, }; /* Network device interface functions */ /** * ks8695_set_mac - Update MAC in net dev and HW * @ndev: The network device to update * @addr: The new MAC address to set */ static int ks8695_set_mac(struct net_device *ndev, void *addr) { struct ks8695_priv *ksp = netdev_priv(ndev); struct sockaddr *address = addr; if (!is_valid_ether_addr(address->sa_data)) return -EADDRNOTAVAIL; memcpy(ndev->dev_addr, address->sa_data, ndev->addr_len); ks8695_update_mac(ksp); dev_dbg(ksp->dev, "%s: Updated MAC address to %pM\n", ndev->name, ndev->dev_addr); return 0; } /** * ks8695_set_multicast - Set up the multicast behaviour of the interface * @ndev: The net_device to configure * * This routine, called by the net layer, configures promiscuity * and multicast reception behaviour for the interface. */ static void ks8695_set_multicast(struct net_device *ndev) { struct ks8695_priv *ksp = netdev_priv(ndev); u32 ctrl; ctrl = ks8695_readreg(ksp, KS8695_DRXC); if (ndev->flags & IFF_PROMISC) { /* enable promiscuous mode */ ctrl |= DRXC_RA; } else if (ndev->flags & ~IFF_PROMISC) { /* disable promiscuous mode */ ctrl &= ~DRXC_RA; } if (ndev->flags & IFF_ALLMULTI) { /* enable all multicast mode */ ctrl |= DRXC_RM; } else if (netdev_mc_count(ndev) > KS8695_NR_ADDRESSES) { /* more specific multicast addresses than can be * handled in hardware */ ctrl |= DRXC_RM; } else { /* enable specific multicasts */ ctrl &= ~DRXC_RM; ks8695_init_partial_multicast(ksp, ndev); } ks8695_writereg(ksp, KS8695_DRXC, ctrl); } /** * ks8695_timeout - Handle a network tx/rx timeout. * @ndev: The net_device which timed out. * * A network transaction timed out, reset the device. */ static void ks8695_timeout(struct net_device *ndev) { struct ks8695_priv *ksp = netdev_priv(ndev); netif_stop_queue(ndev); ks8695_shutdown(ksp); ks8695_reset(ksp); ks8695_update_mac(ksp); /* We ignore the return from this since it managed to init * before it probably will be okay to init again. */ ks8695_init_net(ksp); /* Reconfigure promiscuity etc */ ks8695_set_multicast(ndev); /* And start the TX queue once more */ netif_start_queue(ndev); } /** * ks8695_start_xmit - Start a packet transmission * @skb: The packet to transmit * @ndev: The network device to send the packet on * * This routine, called by the net layer, takes ownership of the * sk_buff and adds it to the TX ring. It then kicks the TX DMA * engine to ensure transmission begins. */ static int ks8695_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct ks8695_priv *ksp = netdev_priv(ndev); int buff_n; dma_addr_t dmap; spin_lock_irq(&ksp->txq_lock); if (ksp->tx_ring_used == MAX_TX_DESC) { /* Somehow we got entered when we have no room */ spin_unlock_irq(&ksp->txq_lock); return NETDEV_TX_BUSY; } buff_n = ksp->tx_ring_next_slot; BUG_ON(ksp->tx_buffers[buff_n].skb); dmap = dma_map_single(ksp->dev, skb->data, skb->len, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(ksp->dev, dmap))) { /* Failed to DMA map this SKB, give it back for now */ spin_unlock_irq(&ksp->txq_lock); dev_dbg(ksp->dev, "%s: Could not map DMA memory for "\ "transmission, trying later\n", ndev->name); return NETDEV_TX_BUSY; } ksp->tx_buffers[buff_n].dma_ptr = dmap; /* Mapped okay, store the buffer pointer and length for later */ ksp->tx_buffers[buff_n].skb = skb; ksp->tx_buffers[buff_n].length = skb->len; /* Fill out the TX descriptor */ ksp->tx_ring[buff_n].data_ptr = cpu_to_le32(ksp->tx_buffers[buff_n].dma_ptr); ksp->tx_ring[buff_n].status = cpu_to_le32(TDES_IC | TDES_FS | TDES_LS | (skb->len & TDES_TBS)); wmb(); /* Hand it over to the hardware */ ksp->tx_ring[buff_n].owner = cpu_to_le32(TDES_OWN); if (++ksp->tx_ring_used == MAX_TX_DESC) netif_stop_queue(ndev); /* Kick the TX DMA in case it decided to go IDLE */ ks8695_writereg(ksp, KS8695_DTSC, 0); /* And update the next ring slot */ ksp->tx_ring_next_slot = (buff_n + 1) & MAX_TX_DESC_MASK; spin_unlock_irq(&ksp->txq_lock); return NETDEV_TX_OK; } /** * ks8695_stop - Stop (shutdown) a KS8695 ethernet interface * @ndev: The net_device to stop * * This disables the TX queue and cleans up a KS8695 ethernet * device. */ static int ks8695_stop(struct net_device *ndev) { struct ks8695_priv *ksp = netdev_priv(ndev); netif_stop_queue(ndev); napi_disable(&ksp->napi); ks8695_shutdown(ksp); return 0; } /** * ks8695_open - Open (bring up) a KS8695 ethernet interface * @ndev: The net_device to open * * This resets, configures the MAC, initialises the RX ring and * DMA engines and starts the TX queue for a KS8695 ethernet * device. */ static int ks8695_open(struct net_device *ndev) { struct ks8695_priv *ksp = netdev_priv(ndev); int ret; if (!is_valid_ether_addr(ndev->dev_addr)) return -EADDRNOTAVAIL; ks8695_reset(ksp); ks8695_update_mac(ksp); ret = ks8695_init_net(ksp); if (ret) { ks8695_shutdown(ksp); return ret; } napi_enable(&ksp->napi); netif_start_queue(ndev); return 0; } /* Platform device driver */ /** * ks8695_init_switch - Init LAN switch to known good defaults. * @ksp: The device to initialise * * This initialises the LAN switch in the KS8695 to a known-good * set of defaults. */ static void __devinit ks8695_init_switch(struct ks8695_priv *ksp) { u32 ctrl; /* Default value for SEC0 according to datasheet */ ctrl = 0x40819e00; /* LED0 = Speed LED1 = Link/Activity */ ctrl &= ~(SEC0_LLED1S | SEC0_LLED0S); ctrl |= (LLED0S_LINK | LLED1S_LINK_ACTIVITY); /* Enable Switch */ ctrl |= SEC0_ENABLE; writel(ctrl, ksp->phyiface_regs + KS8695_SEC0); /* Defaults for SEC1 */ writel(0x9400100, ksp->phyiface_regs + KS8695_SEC1); } /** * ks8695_init_wan_phy - Initialise the WAN PHY to sensible defaults * @ksp: The device to initialise * * This initialises a KS8695's WAN phy to sensible values for * autonegotiation etc. */ static void __devinit ks8695_init_wan_phy(struct ks8695_priv *ksp) { u32 ctrl; /* Support auto-negotiation */ ctrl = (WMC_WANAP | WMC_WANA100F | WMC_WANA100H | WMC_WANA10F | WMC_WANA10H); /* LED0 = Activity , LED1 = Link */ ctrl |= (WLED0S_ACTIVITY | WLED1S_LINK); /* Restart Auto-negotiation */ ctrl |= WMC_WANR; writel(ctrl, ksp->phyiface_regs + KS8695_WMC); writel(0, ksp->phyiface_regs + KS8695_WPPM); writel(0, ksp->phyiface_regs + KS8695_PPS); } static const struct net_device_ops ks8695_netdev_ops = { .ndo_open = ks8695_open, .ndo_stop = ks8695_stop, .ndo_start_xmit = ks8695_start_xmit, .ndo_tx_timeout = ks8695_timeout, .ndo_set_mac_address = ks8695_set_mac, .ndo_validate_addr = eth_validate_addr, .ndo_set_multicast_list = ks8695_set_multicast, }; /** * ks8695_probe - Probe and initialise a KS8695 ethernet interface * @pdev: The platform device to probe * * Initialise a KS8695 ethernet device from platform data. * * This driver requires at least one IORESOURCE_MEM for the * registers and two IORESOURCE_IRQ for the RX and TX IRQs * respectively. It can optionally take an additional * IORESOURCE_MEM for the switch or phy in the case of the lan or * wan ports, and an IORESOURCE_IRQ for the link IRQ for the wan * port. */ static int __devinit ks8695_probe(struct platform_device *pdev) { struct ks8695_priv *ksp; struct net_device *ndev; struct resource *regs_res, *phyiface_res; struct resource *rxirq_res, *txirq_res, *linkirq_res; int ret = 0; int buff_n; u32 machigh, maclow; /* Initialise a net_device */ ndev = alloc_etherdev(sizeof(struct ks8695_priv)); if (!ndev) { dev_err(&pdev->dev, "could not allocate device.\n"); return -ENOMEM; } SET_NETDEV_DEV(ndev, &pdev->dev); dev_dbg(&pdev->dev, "ks8695_probe() called\n"); /* Configure our private structure a little */ ksp = netdev_priv(ndev); ksp->dev = &pdev->dev; ksp->ndev = ndev; ksp->msg_enable = NETIF_MSG_LINK; /* Retrieve resources */ regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); phyiface_res = platform_get_resource(pdev, IORESOURCE_MEM, 1); rxirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); txirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1); linkirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 2); if (!(regs_res && rxirq_res && txirq_res)) { dev_err(ksp->dev, "insufficient resources\n"); ret = -ENOENT; goto failure; } ksp->regs_req = request_mem_region(regs_res->start, resource_size(regs_res), pdev->name); if (!ksp->regs_req) { dev_err(ksp->dev, "cannot claim register space\n"); ret = -EIO; goto failure; } ksp->io_regs = ioremap(regs_res->start, resource_size(regs_res)); if (!ksp->io_regs) { dev_err(ksp->dev, "failed to ioremap registers\n"); ret = -EINVAL; goto failure; } if (phyiface_res) { ksp->phyiface_req = request_mem_region(phyiface_res->start, resource_size(phyiface_res), phyiface_res->name); if (!ksp->phyiface_req) { dev_err(ksp->dev, "cannot claim switch register space\n"); ret = -EIO; goto failure; } ksp->phyiface_regs = ioremap(phyiface_res->start, resource_size(phyiface_res)); if (!ksp->phyiface_regs) { dev_err(ksp->dev, "failed to ioremap switch registers\n"); ret = -EINVAL; goto failure; } } ksp->rx_irq = rxirq_res->start; ksp->rx_irq_name = rxirq_res->name ? rxirq_res->name : "Ethernet RX"; ksp->tx_irq = txirq_res->start; ksp->tx_irq_name = txirq_res->name ? txirq_res->name : "Ethernet TX"; ksp->link_irq = (linkirq_res ? linkirq_res->start : -1); ksp->link_irq_name = (linkirq_res && linkirq_res->name) ? linkirq_res->name : "Ethernet Link"; /* driver system setup */ ndev->netdev_ops = &ks8695_netdev_ops; ndev->watchdog_timeo = msecs_to_jiffies(watchdog); netif_napi_add(ndev, &ksp->napi, ks8695_poll, NAPI_WEIGHT); /* Retrieve the default MAC addr from the chip. */ /* The bootloader should have left it in there for us. */ machigh = ks8695_readreg(ksp, KS8695_MAH); maclow = ks8695_readreg(ksp, KS8695_MAL); ndev->dev_addr[0] = (machigh >> 8) & 0xFF; ndev->dev_addr[1] = machigh & 0xFF; ndev->dev_addr[2] = (maclow >> 24) & 0xFF; ndev->dev_addr[3] = (maclow >> 16) & 0xFF; ndev->dev_addr[4] = (maclow >> 8) & 0xFF; ndev->dev_addr[5] = maclow & 0xFF; if (!is_valid_ether_addr(ndev->dev_addr)) dev_warn(ksp->dev, "%s: Invalid ethernet MAC address. Please " "set using ifconfig\n", ndev->name); /* In order to be efficient memory-wise, we allocate both * rings in one go. */ ksp->ring_base = dma_alloc_coherent(&pdev->dev, RING_DMA_SIZE, &ksp->ring_base_dma, GFP_KERNEL); if (!ksp->ring_base) { ret = -ENOMEM; goto failure; } /* Specify the TX DMA ring buffer */ ksp->tx_ring = ksp->ring_base; ksp->tx_ring_dma = ksp->ring_base_dma; /* And initialise the queue's lock */ spin_lock_init(&ksp->txq_lock); spin_lock_init(&ksp->rx_lock); /* Specify the RX DMA ring buffer */ ksp->rx_ring = ksp->ring_base + TX_RING_DMA_SIZE; ksp->rx_ring_dma = ksp->ring_base_dma + TX_RING_DMA_SIZE; /* Zero the descriptor rings */ memset(ksp->tx_ring, 0, TX_RING_DMA_SIZE); memset(ksp->rx_ring, 0, RX_RING_DMA_SIZE); /* Build the rings */ for (buff_n = 0; buff_n < MAX_TX_DESC; ++buff_n) { ksp->tx_ring[buff_n].next_desc = cpu_to_le32(ksp->tx_ring_dma + (sizeof(struct tx_ring_desc) * ((buff_n + 1) & MAX_TX_DESC_MASK))); } for (buff_n = 0; buff_n < MAX_RX_DESC; ++buff_n) { ksp->rx_ring[buff_n].next_desc = cpu_to_le32(ksp->rx_ring_dma + (sizeof(struct rx_ring_desc) * ((buff_n + 1) & MAX_RX_DESC_MASK))); } /* Initialise the port (physically) */ if (ksp->phyiface_regs && ksp->link_irq == -1) { ks8695_init_switch(ksp); ksp->dtype = KS8695_DTYPE_LAN; SET_ETHTOOL_OPS(ndev, &ks8695_ethtool_ops); } else if (ksp->phyiface_regs && ksp->link_irq != -1) { ks8695_init_wan_phy(ksp); ksp->dtype = KS8695_DTYPE_WAN; SET_ETHTOOL_OPS(ndev, &ks8695_wan_ethtool_ops); } else { /* No initialisation since HPNA does not have a PHY */ ksp->dtype = KS8695_DTYPE_HPNA; SET_ETHTOOL_OPS(ndev, &ks8695_ethtool_ops); } /* And bring up the net_device with the net core */ platform_set_drvdata(pdev, ndev); ret = register_netdev(ndev); if (ret == 0) { dev_info(ksp->dev, "ks8695 ethernet (%s) MAC: %pM\n", ks8695_port_type(ksp), ndev->dev_addr); } else { /* Report the failure to register the net_device */ dev_err(ksp->dev, "ks8695net: failed to register netdev.\n"); goto failure; } /* All is well */ return 0; /* Error exit path */ failure: ks8695_release_device(ksp); free_netdev(ndev); return ret; } /** * ks8695_drv_suspend - Suspend a KS8695 ethernet platform device. * @pdev: The device to suspend * @state: The suspend state * * This routine detaches and shuts down a KS8695 ethernet device. */ static int ks8695_drv_suspend(struct platform_device *pdev, pm_message_t state) { struct net_device *ndev = platform_get_drvdata(pdev); struct ks8695_priv *ksp = netdev_priv(ndev); ksp->in_suspend = 1; if (netif_running(ndev)) { netif_device_detach(ndev); ks8695_shutdown(ksp); } return 0; } /** * ks8695_drv_resume - Resume a KS8695 ethernet platform device. * @pdev: The device to resume * * This routine re-initialises and re-attaches a KS8695 ethernet * device. */ static int ks8695_drv_resume(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct ks8695_priv *ksp = netdev_priv(ndev); if (netif_running(ndev)) { ks8695_reset(ksp); ks8695_init_net(ksp); ks8695_set_multicast(ndev); netif_device_attach(ndev); } ksp->in_suspend = 0; return 0; } /** * ks8695_drv_remove - Remove a KS8695 net device on driver unload. * @pdev: The platform device to remove * * This unregisters and releases a KS8695 ethernet device. */ static int __devexit ks8695_drv_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct ks8695_priv *ksp = netdev_priv(ndev); platform_set_drvdata(pdev, NULL); netif_napi_del(&ksp->napi); unregister_netdev(ndev); ks8695_release_device(ksp); free_netdev(ndev); dev_dbg(&pdev->dev, "released and freed device\n"); return 0; } static struct platform_driver ks8695_driver = { .driver = { .name = MODULENAME, .owner = THIS_MODULE, }, .probe = ks8695_probe, .remove = __devexit_p(ks8695_drv_remove), .suspend = ks8695_drv_suspend, .resume = ks8695_drv_resume, }; /* Module interface */ static int __init ks8695_init(void) { printk(KERN_INFO "%s Ethernet driver, V%s\n", MODULENAME, MODULEVERSION); return platform_driver_register(&ks8695_driver); } static void __exit ks8695_cleanup(void) { platform_driver_unregister(&ks8695_driver); } module_init(ks8695_init); module_exit(ks8695_cleanup); MODULE_AUTHOR("Simtec Electronics"); MODULE_DESCRIPTION("Micrel KS8695 (Centaur) Ethernet driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:" MODULENAME); module_param(watchdog, int, 0400); MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds");
gpl-2.0
cometzero/cometzero_e210s
arch/arm/mach-orion5x/irq.c
2818
1216
/* * arch/arm/mach-orion5x/irq.c * * Core IRQ functions for Marvell Orion System On Chip * * Maintainer: Tzachi Perelstein <tzachi@marvell.com> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/irq.h> #include <linux/io.h> #include <asm/gpio.h> #include <mach/bridge-regs.h> #include <plat/irq.h> #include "common.h" static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc) { BUG_ON(irq < IRQ_ORION5X_GPIO_0_7 || irq > IRQ_ORION5X_GPIO_24_31); orion_gpio_irq_handler((irq - IRQ_ORION5X_GPIO_0_7) << 3); } void __init orion5x_init_irq(void) { orion_irq_init(0, (void __iomem *)MAIN_IRQ_MASK); /* * Initialize gpiolib for GPIOs 0-31. */ orion_gpio_init(0, 32, GPIO_VIRT_BASE, 0, IRQ_ORION5X_GPIO_START); irq_set_chained_handler(IRQ_ORION5X_GPIO_0_7, gpio_irq_handler); irq_set_chained_handler(IRQ_ORION5X_GPIO_8_15, gpio_irq_handler); irq_set_chained_handler(IRQ_ORION5X_GPIO_16_23, gpio_irq_handler); irq_set_chained_handler(IRQ_ORION5X_GPIO_24_31, gpio_irq_handler); }
gpl-2.0
mydongistiny/kernel_huawei_angler-ak
drivers/s390/cio/crw.c
4354
4147
/* * Channel report handling code * * Copyright IBM Corp. 2000, 2009 * Author(s): Ingo Adlung <adlung@de.ibm.com>, * Martin Schwidefsky <schwidefsky@de.ibm.com>, * Cornelia Huck <cornelia.huck@de.ibm.com>, * Heiko Carstens <heiko.carstens@de.ibm.com>, */ #include <linux/mutex.h> #include <linux/kthread.h> #include <linux/init.h> #include <linux/wait.h> #include <asm/crw.h> #include <asm/ctl_reg.h> static DEFINE_MUTEX(crw_handler_mutex); static crw_handler_t crw_handlers[NR_RSCS]; static atomic_t crw_nr_req = ATOMIC_INIT(0); static DECLARE_WAIT_QUEUE_HEAD(crw_handler_wait_q); /** * crw_register_handler() - register a channel report word handler * @rsc: reporting source code to handle * @handler: handler to be registered * * Returns %0 on success and a negative error value otherwise. */ int crw_register_handler(int rsc, crw_handler_t handler) { int rc = 0; if ((rsc < 0) || (rsc >= NR_RSCS)) return -EINVAL; mutex_lock(&crw_handler_mutex); if (crw_handlers[rsc]) rc = -EBUSY; else crw_handlers[rsc] = handler; mutex_unlock(&crw_handler_mutex); return rc; } /** * crw_unregister_handler() - unregister a channel report word handler * @rsc: reporting source code to handle */ void crw_unregister_handler(int rsc) { if ((rsc < 0) || (rsc >= NR_RSCS)) return; mutex_lock(&crw_handler_mutex); crw_handlers[rsc] = NULL; mutex_unlock(&crw_handler_mutex); } /* * Retrieve CRWs and call function to handle event. */ static int crw_collect_info(void *unused) { struct crw crw[2]; int ccode, signal; unsigned int chain; repeat: signal = wait_event_interruptible(crw_handler_wait_q, atomic_read(&crw_nr_req) > 0); if (unlikely(signal)) atomic_inc(&crw_nr_req); chain = 0; while (1) { crw_handler_t handler; if (unlikely(chain > 1)) { struct crw tmp_crw; printk(KERN_WARNING"%s: Code does not support more " "than two chained crws; please report to " "linux390@de.ibm.com!\n", __func__); ccode = stcrw(&tmp_crw); printk(KERN_WARNING"%s: crw reports slct=%d, oflw=%d, " "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", __func__, tmp_crw.slct, tmp_crw.oflw, tmp_crw.chn, tmp_crw.rsc, tmp_crw.anc, tmp_crw.erc, tmp_crw.rsid); printk(KERN_WARNING"%s: This was crw number %x in the " "chain\n", __func__, chain); if (ccode != 0) break; chain = tmp_crw.chn ? chain + 1 : 0; continue; } ccode = stcrw(&crw[chain]); if (ccode != 0) break; printk(KERN_DEBUG "crw_info : CRW reports slct=%d, oflw=%d, " "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", crw[chain].slct, crw[chain].oflw, crw[chain].chn, crw[chain].rsc, crw[chain].anc, crw[chain].erc, crw[chain].rsid); /* Check for overflows. */ if (crw[chain].oflw) { int i; pr_debug("%s: crw overflow detected!\n", __func__); mutex_lock(&crw_handler_mutex); for (i = 0; i < NR_RSCS; i++) { if (crw_handlers[i]) crw_handlers[i](NULL, NULL, 1); } mutex_unlock(&crw_handler_mutex); chain = 0; continue; } if (crw[0].chn && !chain) { chain++; continue; } mutex_lock(&crw_handler_mutex); handler = crw_handlers[crw[chain].rsc]; if (handler) handler(&crw[0], chain ? &crw[1] : NULL, 0); mutex_unlock(&crw_handler_mutex); /* chain is always 0 or 1 here. */ chain = crw[chain].chn ? chain + 1 : 0; } if (atomic_dec_and_test(&crw_nr_req)) wake_up(&crw_handler_wait_q); goto repeat; return 0; } void crw_handle_channel_report(void) { atomic_inc(&crw_nr_req); wake_up(&crw_handler_wait_q); } void crw_wait_for_channel_report(void) { crw_handle_channel_report(); wait_event(crw_handler_wait_q, atomic_read(&crw_nr_req) == 0); } /* * Machine checks for the channel subsystem must be enabled * after the channel subsystem is initialized */ static int __init crw_machine_check_init(void) { struct task_struct *task; task = kthread_run(crw_collect_info, NULL, "kmcheck"); if (IS_ERR(task)) return PTR_ERR(task); ctl_set_bit(14, 28); /* enable channel report MCH */ return 0; } device_initcall(crw_machine_check_init);
gpl-2.0
ChangYeoun/bbbb
arch/arm/mach-omap1/leds-h2p2-debug.c
4866
3199
/* * linux/arch/arm/mach-omap1/leds-h2p2-debug.c * * Copyright 2003 by Texas Instruments Incorporated * * There are 16 LEDs on the debug board (all green); four may be used * for logical 'green', 'amber', 'red', and 'blue' (after "claiming"). * * The "surfer" expansion board and H2 sample board also have two-color * green+red LEDs (in parallel), used here for timer and idle indicators. */ #include <linux/gpio.h> #include <linux/init.h> #include <linux/kernel_stat.h> #include <linux/sched.h> #include <linux/io.h> #include <mach/hardware.h> #include <asm/leds.h> #include <asm/mach-types.h> #include <plat/fpga.h> #include "leds.h" #define GPIO_LED_RED 3 #define GPIO_LED_GREEN OMAP_MPUIO(4) #define LED_STATE_ENABLED 0x01 #define LED_STATE_CLAIMED 0x02 #define LED_TIMER_ON 0x04 #define GPIO_IDLE GPIO_LED_GREEN #define GPIO_TIMER GPIO_LED_RED void h2p2_dbg_leds_event(led_event_t evt) { unsigned long flags; static struct h2p2_dbg_fpga __iomem *fpga; static u16 led_state, hw_led_state; local_irq_save(flags); if (!(led_state & LED_STATE_ENABLED) && evt != led_start) goto done; switch (evt) { case led_start: if (!fpga) fpga = ioremap(H2P2_DBG_FPGA_START, H2P2_DBG_FPGA_SIZE); if (fpga) { led_state |= LED_STATE_ENABLED; __raw_writew(~0, &fpga->leds); } break; case led_stop: case led_halted: /* all leds off during suspend or shutdown */ if (! machine_is_omap_perseus2()) { gpio_set_value(GPIO_TIMER, 0); gpio_set_value(GPIO_IDLE, 0); } __raw_writew(~0, &fpga->leds); led_state &= ~LED_STATE_ENABLED; if (evt == led_halted) { iounmap(fpga); fpga = NULL; } goto done; case led_claim: led_state |= LED_STATE_CLAIMED; hw_led_state = 0; break; case led_release: led_state &= ~LED_STATE_CLAIMED; break; #ifdef CONFIG_LEDS_TIMER case led_timer: led_state ^= LED_TIMER_ON; if (machine_is_omap_perseus2()) hw_led_state ^= H2P2_DBG_FPGA_P2_LED_TIMER; else { gpio_set_value(GPIO_TIMER, led_state & LED_TIMER_ON); goto done; } break; #endif #ifdef CONFIG_LEDS_CPU case led_idle_start: if (machine_is_omap_perseus2()) hw_led_state |= H2P2_DBG_FPGA_P2_LED_IDLE; else { gpio_set_value(GPIO_IDLE, 1); goto done; } break; case led_idle_end: if (machine_is_omap_perseus2()) hw_led_state &= ~H2P2_DBG_FPGA_P2_LED_IDLE; else { gpio_set_value(GPIO_IDLE, 0); goto done; } break; #endif case led_green_on: hw_led_state |= H2P2_DBG_FPGA_LED_GREEN; break; case led_green_off: hw_led_state &= ~H2P2_DBG_FPGA_LED_GREEN; break; case led_amber_on: hw_led_state |= H2P2_DBG_FPGA_LED_AMBER; break; case led_amber_off: hw_led_state &= ~H2P2_DBG_FPGA_LED_AMBER; break; case led_red_on: hw_led_state |= H2P2_DBG_FPGA_LED_RED; break; case led_red_off: hw_led_state &= ~H2P2_DBG_FPGA_LED_RED; break; case led_blue_on: hw_led_state |= H2P2_DBG_FPGA_LED_BLUE; break; case led_blue_off: hw_led_state &= ~H2P2_DBG_FPGA_LED_BLUE; break; default: break; } /* * Actually burn the LEDs */ if (led_state & LED_STATE_ENABLED) __raw_writew(~hw_led_state, &fpga->leds); done: local_irq_restore(flags); }
gpl-2.0
aj700/nxzimg
drivers/staging/rtl8187se/ieee80211/ieee80211_crypt.c
5634
5621
/* * Host AP crypto routines * * Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi> * Portions Copyright (C) 2004, Intel Corporation <jketreno@linux.intel.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. See README and COPYING for * more details. * */ //#include <linux/config.h> #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <asm/string.h> #include <asm/errno.h> #include "ieee80211.h" MODULE_AUTHOR("Jouni Malinen"); MODULE_DESCRIPTION("HostAP crypto"); MODULE_LICENSE("GPL"); struct ieee80211_crypto_alg { struct list_head list; struct ieee80211_crypto_ops *ops; }; struct ieee80211_crypto { struct list_head algs; spinlock_t lock; }; static struct ieee80211_crypto *hcrypt; void ieee80211_crypt_deinit_entries(struct ieee80211_device *ieee, int force) { struct list_head *ptr, *n; struct ieee80211_crypt_data *entry; for (ptr = ieee->crypt_deinit_list.next, n = ptr->next; ptr != &ieee->crypt_deinit_list; ptr = n, n = ptr->next) { entry = list_entry(ptr, struct ieee80211_crypt_data, list); if (atomic_read(&entry->refcnt) != 0 && !force) continue; list_del(ptr); if (entry->ops) entry->ops->deinit(entry->priv); kfree(entry); } } void ieee80211_crypt_deinit_handler(unsigned long data) { struct ieee80211_device *ieee = (struct ieee80211_device *)data; unsigned long flags; spin_lock_irqsave(&ieee->lock, flags); ieee80211_crypt_deinit_entries(ieee, 0); if (!list_empty(&ieee->crypt_deinit_list)) { printk(KERN_DEBUG "%s: entries remaining in delayed crypt " "deletion list\n", ieee->dev->name); ieee->crypt_deinit_timer.expires = jiffies + HZ; add_timer(&ieee->crypt_deinit_timer); } spin_unlock_irqrestore(&ieee->lock, flags); } void ieee80211_crypt_delayed_deinit(struct ieee80211_device *ieee, struct ieee80211_crypt_data **crypt) { struct ieee80211_crypt_data *tmp; unsigned long flags; if (*crypt == NULL) return; tmp = *crypt; *crypt = NULL; /* must not run ops->deinit() while there may be pending encrypt or * decrypt operations. Use a list of delayed deinits to avoid needing * locking. */ spin_lock_irqsave(&ieee->lock, flags); list_add(&tmp->list, &ieee->crypt_deinit_list); if (!timer_pending(&ieee->crypt_deinit_timer)) { ieee->crypt_deinit_timer.expires = jiffies + HZ; add_timer(&ieee->crypt_deinit_timer); } spin_unlock_irqrestore(&ieee->lock, flags); } int ieee80211_register_crypto_ops(struct ieee80211_crypto_ops *ops) { unsigned long flags; struct ieee80211_crypto_alg *alg; if (hcrypt == NULL) return -1; alg = kzalloc(sizeof(*alg), GFP_KERNEL); if (alg == NULL) return -ENOMEM; alg->ops = ops; spin_lock_irqsave(&hcrypt->lock, flags); list_add(&alg->list, &hcrypt->algs); spin_unlock_irqrestore(&hcrypt->lock, flags); printk(KERN_DEBUG "ieee80211_crypt: registered algorithm '%s'\n", ops->name); return 0; } int ieee80211_unregister_crypto_ops(struct ieee80211_crypto_ops *ops) { unsigned long flags; struct list_head *ptr; struct ieee80211_crypto_alg *del_alg = NULL; if (hcrypt == NULL) return -1; spin_lock_irqsave(&hcrypt->lock, flags); for (ptr = hcrypt->algs.next; ptr != &hcrypt->algs; ptr = ptr->next) { struct ieee80211_crypto_alg *alg = (struct ieee80211_crypto_alg *) ptr; if (alg->ops == ops) { list_del(&alg->list); del_alg = alg; break; } } spin_unlock_irqrestore(&hcrypt->lock, flags); if (del_alg) { printk(KERN_DEBUG "ieee80211_crypt: unregistered algorithm " "'%s'\n", ops->name); kfree(del_alg); } return del_alg ? 0 : -1; } struct ieee80211_crypto_ops * ieee80211_get_crypto_ops(const char *name) { unsigned long flags; struct list_head *ptr; struct ieee80211_crypto_alg *found_alg = NULL; if (hcrypt == NULL) return NULL; spin_lock_irqsave(&hcrypt->lock, flags); for (ptr = hcrypt->algs.next; ptr != &hcrypt->algs; ptr = ptr->next) { struct ieee80211_crypto_alg *alg = (struct ieee80211_crypto_alg *) ptr; if (strcmp(alg->ops->name, name) == 0) { found_alg = alg; break; } } spin_unlock_irqrestore(&hcrypt->lock, flags); if (found_alg) return found_alg->ops; else return NULL; } static void * ieee80211_crypt_null_init(int keyidx) { return (void *) 1; } static void ieee80211_crypt_null_deinit(void *priv) {} static struct ieee80211_crypto_ops ieee80211_crypt_null = { .name = "NULL", .init = ieee80211_crypt_null_init, .deinit = ieee80211_crypt_null_deinit, .encrypt_mpdu = NULL, .decrypt_mpdu = NULL, .encrypt_msdu = NULL, .decrypt_msdu = NULL, .set_key = NULL, .get_key = NULL, .extra_prefix_len = 0, .extra_postfix_len = 0, .owner = THIS_MODULE, }; int ieee80211_crypto_init(void) { int ret = -ENOMEM; hcrypt = kzalloc(sizeof(*hcrypt), GFP_KERNEL); if (!hcrypt) goto out; INIT_LIST_HEAD(&hcrypt->algs); spin_lock_init(&hcrypt->lock); ret = ieee80211_register_crypto_ops(&ieee80211_crypt_null); if (ret < 0) { kfree(hcrypt); hcrypt = NULL; } out: return ret; } void ieee80211_crypto_deinit(void) { struct list_head *ptr, *n; struct ieee80211_crypto_alg *alg = NULL; if (hcrypt == NULL) return; list_for_each_safe(ptr, n, &hcrypt->algs) { alg = list_entry(ptr, struct ieee80211_crypto_alg, list); if (alg) { list_del(ptr); printk(KERN_DEBUG "ieee80211_crypt: unregistered algorithm '%s' (deinit)\n", alg->ops->name); kfree(alg); } } kfree(hcrypt); }
gpl-2.0
eoghan2t9/android_kernel_oppo_n1_test
arch/powerpc/platforms/maple/setup.c
7682
9816
/* * Maple (970 eval board) setup code * * (c) Copyright 2004 Benjamin Herrenschmidt (benh@kernel.crashing.org), * IBM Corp. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * */ #undef DEBUG #include <linux/init.h> #include <linux/errno.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/export.h> #include <linux/mm.h> #include <linux/stddef.h> #include <linux/unistd.h> #include <linux/ptrace.h> #include <linux/user.h> #include <linux/tty.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/ioport.h> #include <linux/major.h> #include <linux/initrd.h> #include <linux/vt_kern.h> #include <linux/console.h> #include <linux/pci.h> #include <linux/adb.h> #include <linux/cuda.h> #include <linux/pmu.h> #include <linux/irq.h> #include <linux/seq_file.h> #include <linux/root_dev.h> #include <linux/serial.h> #include <linux/smp.h> #include <linux/bitops.h> #include <linux/of_device.h> #include <linux/memblock.h> #include <asm/processor.h> #include <asm/sections.h> #include <asm/prom.h> #include <asm/pgtable.h> #include <asm/io.h> #include <asm/pci-bridge.h> #include <asm/iommu.h> #include <asm/machdep.h> #include <asm/dma.h> #include <asm/cputable.h> #include <asm/time.h> #include <asm/mpic.h> #include <asm/rtas.h> #include <asm/udbg.h> #include <asm/nvram.h> #include "maple.h" #ifdef DEBUG #define DBG(fmt...) udbg_printf(fmt) #else #define DBG(fmt...) #endif static unsigned long maple_find_nvram_base(void) { struct device_node *rtcs; unsigned long result = 0; /* find NVRAM device */ rtcs = of_find_compatible_node(NULL, "nvram", "AMD8111"); if (rtcs) { struct resource r; if (of_address_to_resource(rtcs, 0, &r)) { printk(KERN_EMERG "Maple: Unable to translate NVRAM" " address\n"); goto bail; } if (!(r.flags & IORESOURCE_IO)) { printk(KERN_EMERG "Maple: NVRAM address isn't PIO!\n"); goto bail; } result = r.start; } else printk(KERN_EMERG "Maple: Unable to find NVRAM\n"); bail: of_node_put(rtcs); return result; } static void maple_restart(char *cmd) { unsigned int maple_nvram_base; const unsigned int *maple_nvram_offset, *maple_nvram_command; struct device_node *sp; maple_nvram_base = maple_find_nvram_base(); if (maple_nvram_base == 0) goto fail; /* find service processor device */ sp = of_find_node_by_name(NULL, "service-processor"); if (!sp) { printk(KERN_EMERG "Maple: Unable to find Service Processor\n"); goto fail; } maple_nvram_offset = of_get_property(sp, "restart-addr", NULL); maple_nvram_command = of_get_property(sp, "restart-value", NULL); of_node_put(sp); /* send command */ outb_p(*maple_nvram_command, maple_nvram_base + *maple_nvram_offset); for (;;) ; fail: printk(KERN_EMERG "Maple: Manual Restart Required\n"); } static void maple_power_off(void) { unsigned int maple_nvram_base; const unsigned int *maple_nvram_offset, *maple_nvram_command; struct device_node *sp; maple_nvram_base = maple_find_nvram_base(); if (maple_nvram_base == 0) goto fail; /* find service processor device */ sp = of_find_node_by_name(NULL, "service-processor"); if (!sp) { printk(KERN_EMERG "Maple: Unable to find Service Processor\n"); goto fail; } maple_nvram_offset = of_get_property(sp, "power-off-addr", NULL); maple_nvram_command = of_get_property(sp, "power-off-value", NULL); of_node_put(sp); /* send command */ outb_p(*maple_nvram_command, maple_nvram_base + *maple_nvram_offset); for (;;) ; fail: printk(KERN_EMERG "Maple: Manual Power-Down Required\n"); } static void maple_halt(void) { maple_power_off(); } #ifdef CONFIG_SMP struct smp_ops_t maple_smp_ops = { .probe = smp_mpic_probe, .message_pass = smp_mpic_message_pass, .kick_cpu = smp_generic_kick_cpu, .setup_cpu = smp_mpic_setup_cpu, .give_timebase = smp_generic_give_timebase, .take_timebase = smp_generic_take_timebase, }; #endif /* CONFIG_SMP */ static void __init maple_use_rtas_reboot_and_halt_if_present(void) { if (rtas_service_present("system-reboot") && rtas_service_present("power-off")) { ppc_md.restart = rtas_restart; ppc_md.power_off = rtas_power_off; ppc_md.halt = rtas_halt; } } void __init maple_setup_arch(void) { /* init to some ~sane value until calibrate_delay() runs */ loops_per_jiffy = 50000000; /* Setup SMP callback */ #ifdef CONFIG_SMP smp_ops = &maple_smp_ops; #endif /* Lookup PCI hosts */ maple_pci_init(); #ifdef CONFIG_DUMMY_CONSOLE conswitchp = &dummy_con; #endif maple_use_rtas_reboot_and_halt_if_present(); printk(KERN_DEBUG "Using native/NAP idle loop\n"); mmio_nvram_init(); } /* * Early initialization. */ static void __init maple_init_early(void) { DBG(" -> maple_init_early\n"); iommu_init_early_dart(); DBG(" <- maple_init_early\n"); } /* * This is almost identical to pSeries and CHRP. We need to make that * code generic at one point, with appropriate bits in the device-tree to * identify the presence of an HT APIC */ static void __init maple_init_IRQ(void) { struct device_node *root, *np, *mpic_node = NULL; const unsigned int *opprop; unsigned long openpic_addr = 0; int naddr, n, i, opplen, has_isus = 0; struct mpic *mpic; unsigned int flags = 0; /* Locate MPIC in the device-tree. Note that there is a bug * in Maple device-tree where the type of the controller is * open-pic and not interrupt-controller */ for_each_node_by_type(np, "interrupt-controller") if (of_device_is_compatible(np, "open-pic")) { mpic_node = np; break; } if (mpic_node == NULL) for_each_node_by_type(np, "open-pic") { mpic_node = np; break; } if (mpic_node == NULL) { printk(KERN_ERR "Failed to locate the MPIC interrupt controller\n"); return; } /* Find address list in /platform-open-pic */ root = of_find_node_by_path("/"); naddr = of_n_addr_cells(root); opprop = of_get_property(root, "platform-open-pic", &opplen); if (opprop != 0) { openpic_addr = of_read_number(opprop, naddr); has_isus = (opplen > naddr); printk(KERN_DEBUG "OpenPIC addr: %lx, has ISUs: %d\n", openpic_addr, has_isus); } BUG_ON(openpic_addr == 0); /* Check for a big endian MPIC */ if (of_get_property(np, "big-endian", NULL) != NULL) flags |= MPIC_BIG_ENDIAN; /* XXX Maple specific bits */ flags |= MPIC_U3_HT_IRQS; /* All U3/U4 are big-endian, older SLOF firmware doesn't encode this */ flags |= MPIC_BIG_ENDIAN; /* Setup the openpic driver. More device-tree junks, we hard code no * ISUs for now. I'll have to revisit some stuffs with the folks doing * the firmware for those */ mpic = mpic_alloc(mpic_node, openpic_addr, flags, /*has_isus ? 16 :*/ 0, 0, " MPIC "); BUG_ON(mpic == NULL); /* Add ISUs */ opplen /= sizeof(u32); for (n = 0, i = naddr; i < opplen; i += naddr, n++) { unsigned long isuaddr = of_read_number(opprop + i, naddr); mpic_assign_isu(mpic, n, isuaddr); } /* All ISUs are setup, complete initialization */ mpic_init(mpic); ppc_md.get_irq = mpic_get_irq; of_node_put(mpic_node); of_node_put(root); } static void __init maple_progress(char *s, unsigned short hex) { printk("*** %04x : %s\n", hex, s ? s : ""); } /* * Called very early, MMU is off, device-tree isn't unflattened */ static int __init maple_probe(void) { unsigned long root = of_get_flat_dt_root(); if (!of_flat_dt_is_compatible(root, "Momentum,Maple") && !of_flat_dt_is_compatible(root, "Momentum,Apache")) return 0; /* * On U3, the DART (iommu) must be allocated now since it * has an impact on htab_initialize (due to the large page it * occupies having to be broken up so the DART itself is not * part of the cacheable linar mapping */ alloc_dart_table(); hpte_init_native(); return 1; } define_machine(maple) { .name = "Maple", .probe = maple_probe, .setup_arch = maple_setup_arch, .init_early = maple_init_early, .init_IRQ = maple_init_IRQ, .pci_irq_fixup = maple_pci_irq_fixup, .pci_get_legacy_ide_irq = maple_pci_get_legacy_ide_irq, .restart = maple_restart, .power_off = maple_power_off, .halt = maple_halt, .get_boot_time = maple_get_boot_time, .set_rtc_time = maple_set_rtc_time, .get_rtc_time = maple_get_rtc_time, .calibrate_decr = generic_calibrate_decr, .progress = maple_progress, .power_save = power4_idle, }; #ifdef CONFIG_EDAC /* * Register a platform device for CPC925 memory controller on * all boards with U3H (CPC925) bridge. */ static int __init maple_cpc925_edac_setup(void) { struct platform_device *pdev; struct device_node *np = NULL; struct resource r; int ret; volatile void __iomem *mem; u32 rev; np = of_find_node_by_type(NULL, "memory-controller"); if (!np) { printk(KERN_ERR "%s: Unable to find memory-controller node\n", __func__); return -ENODEV; } ret = of_address_to_resource(np, 0, &r); of_node_put(np); if (ret < 0) { printk(KERN_ERR "%s: Unable to get memory-controller reg\n", __func__); return -ENODEV; } mem = ioremap(r.start, resource_size(&r)); if (!mem) { printk(KERN_ERR "%s: Unable to map memory-controller memory\n", __func__); return -ENOMEM; } rev = __raw_readl(mem); iounmap(mem); if (rev < 0x34 || rev > 0x3f) { /* U3H */ printk(KERN_ERR "%s: Non-CPC925(U3H) bridge revision: %02x\n", __func__, rev); return 0; } pdev = platform_device_register_simple("cpc925_edac", 0, &r, 1); if (IS_ERR(pdev)) return PTR_ERR(pdev); printk(KERN_INFO "%s: CPC925 platform device created\n", __func__); return 0; } machine_device_initcall(maple, maple_cpc925_edac_setup); #endif
gpl-2.0
darkobas/android_kernel_oneplus_msm8974
drivers/clocksource/scx200_hrt.c
9474
2816
/* * Copyright (C) 2006 Jim Cromie * * This is a clocksource driver for the Geode SCx200's 1 or 27 MHz * high-resolution timer. The Geode SC-1100 (at least) has a buggy * time stamp counter (TSC), which loses time unless 'idle=poll' is * given as a boot-arg. In its absence, the Generic Timekeeping code * will detect and de-rate the bad TSC, allowing this timer to take * over timekeeping duties. * * Based on work by John Stultz, and Ted Phelps (in a 2.6.12-rc6 patch) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. */ #include <linux/clocksource.h> #include <linux/init.h> #include <linux/module.h> #include <linux/ioport.h> #include <linux/scx200.h> #define NAME "scx200_hrt" static int mhz27; module_param(mhz27, int, 0); /* load time only */ MODULE_PARM_DESC(mhz27, "count at 27.0 MHz (default is 1.0 MHz)"); static int ppm; module_param(ppm, int, 0); /* load time only */ MODULE_PARM_DESC(ppm, "+-adjust to actual XO freq (ppm)"); /* HiRes Timer configuration register address */ #define SCx200_TMCNFG_OFFSET (SCx200_TIMER_OFFSET + 5) /* and config settings */ #define HR_TMEN (1 << 0) /* timer interrupt enable */ #define HR_TMCLKSEL (1 << 1) /* 1|0 counts at 27|1 MHz */ #define HR_TM27MPD (1 << 2) /* 1 turns off input clock (power-down) */ /* The base timer frequency, * 27 if selected */ #define HRT_FREQ 1000000 static cycle_t read_hrt(struct clocksource *cs) { /* Read the timer value */ return (cycle_t) inl(scx200_cb_base + SCx200_TIMER_OFFSET); } static struct clocksource cs_hrt = { .name = "scx200_hrt", .rating = 250, .read = read_hrt, .mask = CLOCKSOURCE_MASK(32), .flags = CLOCK_SOURCE_IS_CONTINUOUS, /* mult, shift are set based on mhz27 flag */ }; static int __init init_hrt_clocksource(void) { u32 freq; /* Make sure scx200 has initialized the configuration block */ if (!scx200_cb_present()) return -ENODEV; /* Reserve the timer's ISA io-region for ourselves */ if (!request_region(scx200_cb_base + SCx200_TIMER_OFFSET, SCx200_TIMER_SIZE, "NatSemi SCx200 High-Resolution Timer")) { pr_warn("unable to lock timer region\n"); return -ENODEV; } /* write timer config */ outb(HR_TMEN | (mhz27 ? HR_TMCLKSEL : 0), scx200_cb_base + SCx200_TMCNFG_OFFSET); freq = (HRT_FREQ + ppm); if (mhz27) freq *= 27; pr_info("enabling scx200 high-res timer (%s MHz +%d ppm)\n", mhz27 ? "27":"1", ppm); return clocksource_register_hz(&cs_hrt, freq); } module_init(init_hrt_clocksource); MODULE_AUTHOR("Jim Cromie <jim.cromie@gmail.com>"); MODULE_DESCRIPTION("clocksource on SCx200 HiRes Timer"); MODULE_LICENSE("GPL");
gpl-2.0
3EleVen/android_kernel_motorola_ghost
drivers/clocksource/scx200_hrt.c
9474
2816
/* * Copyright (C) 2006 Jim Cromie * * This is a clocksource driver for the Geode SCx200's 1 or 27 MHz * high-resolution timer. The Geode SC-1100 (at least) has a buggy * time stamp counter (TSC), which loses time unless 'idle=poll' is * given as a boot-arg. In its absence, the Generic Timekeeping code * will detect and de-rate the bad TSC, allowing this timer to take * over timekeeping duties. * * Based on work by John Stultz, and Ted Phelps (in a 2.6.12-rc6 patch) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. */ #include <linux/clocksource.h> #include <linux/init.h> #include <linux/module.h> #include <linux/ioport.h> #include <linux/scx200.h> #define NAME "scx200_hrt" static int mhz27; module_param(mhz27, int, 0); /* load time only */ MODULE_PARM_DESC(mhz27, "count at 27.0 MHz (default is 1.0 MHz)"); static int ppm; module_param(ppm, int, 0); /* load time only */ MODULE_PARM_DESC(ppm, "+-adjust to actual XO freq (ppm)"); /* HiRes Timer configuration register address */ #define SCx200_TMCNFG_OFFSET (SCx200_TIMER_OFFSET + 5) /* and config settings */ #define HR_TMEN (1 << 0) /* timer interrupt enable */ #define HR_TMCLKSEL (1 << 1) /* 1|0 counts at 27|1 MHz */ #define HR_TM27MPD (1 << 2) /* 1 turns off input clock (power-down) */ /* The base timer frequency, * 27 if selected */ #define HRT_FREQ 1000000 static cycle_t read_hrt(struct clocksource *cs) { /* Read the timer value */ return (cycle_t) inl(scx200_cb_base + SCx200_TIMER_OFFSET); } static struct clocksource cs_hrt = { .name = "scx200_hrt", .rating = 250, .read = read_hrt, .mask = CLOCKSOURCE_MASK(32), .flags = CLOCK_SOURCE_IS_CONTINUOUS, /* mult, shift are set based on mhz27 flag */ }; static int __init init_hrt_clocksource(void) { u32 freq; /* Make sure scx200 has initialized the configuration block */ if (!scx200_cb_present()) return -ENODEV; /* Reserve the timer's ISA io-region for ourselves */ if (!request_region(scx200_cb_base + SCx200_TIMER_OFFSET, SCx200_TIMER_SIZE, "NatSemi SCx200 High-Resolution Timer")) { pr_warn("unable to lock timer region\n"); return -ENODEV; } /* write timer config */ outb(HR_TMEN | (mhz27 ? HR_TMCLKSEL : 0), scx200_cb_base + SCx200_TMCNFG_OFFSET); freq = (HRT_FREQ + ppm); if (mhz27) freq *= 27; pr_info("enabling scx200 high-res timer (%s MHz +%d ppm)\n", mhz27 ? "27":"1", ppm); return clocksource_register_hz(&cs_hrt, freq); } module_init(init_hrt_clocksource); MODULE_AUTHOR("Jim Cromie <jim.cromie@gmail.com>"); MODULE_DESCRIPTION("clocksource on SCx200 HiRes Timer"); MODULE_LICENSE("GPL");
gpl-2.0
x456/kernel
drivers/acpi/reboot.c
12802
1376
#include <linux/pci.h> #include <linux/acpi.h> #include <acpi/reboot.h> void acpi_reboot(void) { struct acpi_generic_address *rr; struct pci_bus *bus0; u8 reset_value; unsigned int devfn; if (acpi_disabled) return; rr = &acpi_gbl_FADT.reset_register; /* ACPI reset register was only introduced with v2 of the FADT */ if (acpi_gbl_FADT.header.revision < 2) return; /* Is the reset register supported? The spec says we should be * checking the bit width and bit offset, but Windows ignores * these fields */ if (!(acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER)) return; reset_value = acpi_gbl_FADT.reset_value; /* The reset register can only exist in I/O, Memory or PCI config space * on a device on bus 0. */ switch (rr->space_id) { case ACPI_ADR_SPACE_PCI_CONFIG: /* The reset register can only live on bus 0. */ bus0 = pci_find_bus(0, 0); if (!bus0) return; /* Form PCI device/function pair. */ devfn = PCI_DEVFN((rr->address >> 32) & 0xffff, (rr->address >> 16) & 0xffff); printk(KERN_DEBUG "Resetting with ACPI PCI RESET_REG."); /* Write the value that resets us. */ pci_bus_write_config_byte(bus0, devfn, (rr->address & 0xffff), reset_value); break; case ACPI_ADR_SPACE_SYSTEM_MEMORY: case ACPI_ADR_SPACE_SYSTEM_IO: printk(KERN_DEBUG "ACPI MEMORY or I/O RESET_REG.\n"); acpi_reset(); break; } }
gpl-2.0
SelfDesignRobotics/DuinoPack
win/hardware/multiplo/avr/cores/brain.M644.v1.xx_0022/wiring.c
3
8389
/* wiring.c - Partial implementation of the Wiring API for the ATmega8. Part of Arduino - http://www.arduino.cc/ Copyright (c) 2005-2006 David A. Mellis This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA $Id$ */ #include "wiring_private.h" // the prescaler is set so that timer0 ticks every 64 clock cycles, and the // the overflow handler is called every 256 ticks. #define MICROSECONDS_PER_TIMER0_OVERFLOW (clockCyclesToMicroseconds(64 * 256)) // the whole number of milliseconds per timer0 overflow #define MILLIS_INC (MICROSECONDS_PER_TIMER0_OVERFLOW / 1000) // the fractional number of milliseconds per timer0 overflow. we shift right // by three to fit these numbers into a byte. (for the clock speeds we care // about - 8 and 16 MHz - this doesn't lose precision.) #define FRACT_INC ((MICROSECONDS_PER_TIMER0_OVERFLOW % 1000) >> 3) #define FRACT_MAX (1000 >> 3) volatile unsigned long timer0_overflow_count = 0; volatile unsigned long timer0_millis = 0; static unsigned char timer0_fract = 0; SIGNAL(TIMER0_OVF_vect) { // copy these to local variables so they can be stored in registers // (volatile variables must be read from memory on every access) unsigned long m = timer0_millis; unsigned char f = timer0_fract; m += MILLIS_INC; f += FRACT_INC; if (f >= FRACT_MAX) { f -= FRACT_MAX; m += 1; } timer0_fract = f; timer0_millis = m; timer0_overflow_count++; } unsigned long millis() { unsigned long m; uint8_t oldSREG = SREG; // disable interrupts while we read timer0_millis or we might get an // inconsistent value (e.g. in the middle of a write to timer0_millis) cli(); m = timer0_millis; SREG = oldSREG; return m; } unsigned long micros() { unsigned long m; uint8_t oldSREG = SREG, t; cli(); m = timer0_overflow_count; #if defined(TCNT0) t = TCNT0; #elif defined(TCNT0L) t = TCNT0L; #else #error TIMER 0 not defined #endif #ifdef TIFR0 if ((TIFR0 & _BV(TOV0)) && (t < 255)) m++; #else if ((TIFR & _BV(TOV0)) && (t < 255)) m++; #endif SREG = oldSREG; return ((m << 8) + t) * (64 / clockCyclesPerMicrosecond()); } void delay(unsigned long ms) { uint16_t start = (uint16_t)micros(); while (ms > 0) { if (((uint16_t)micros() - start) >= 1000) { ms--; start += 1000; } } } /* Delay for the given number of microseconds. Assumes a 8 or 16 MHz clock. */ void delayMicroseconds(unsigned int us) { // calling avrlib's delay_us() function with low values (e.g. 1 or // 2 microseconds) gives delays longer than desired. //delay_us(us); #if F_CPU >= 16000000L // for the 16 MHz clock on most Arduino boards // for a one-microsecond delay, simply return. the overhead // of the function call yields a delay of approximately 1 1/8 us. if (--us == 0) return; // the following loop takes a quarter of a microsecond (4 cycles) // per iteration, so execute it four times for each microsecond of // delay requested. us <<= 2; // account for the time taken in the preceeding commands. us -= 2; #else // for the 8 MHz internal clock on the ATmega168 // for a one- or two-microsecond delay, simply return. the overhead of // the function calls takes more than two microseconds. can't just // subtract two, since us is unsigned; we'd overflow. if (--us == 0) return; if (--us == 0) return; // the following loop takes half of a microsecond (4 cycles) // per iteration, so execute it twice for each microsecond of // delay requested. us <<= 1; // partially compensate for the time taken by the preceeding commands. // we can't subtract any more than this or we'd overflow w/ small delays. us--; #endif // busy wait __asm__ __volatile__ ( "1: sbiw %0,1" "\n\t" // 2 cycles "brne 1b" : "=w" (us) : "0" (us) // 2 cycles ); } void init() { // this needs to be called before setup() or some functions won't // work there sei(); // on the ATmega168, timer 0 is also used for fast hardware pwm // (using phase-correct PWM would mean that timer 0 overflowed half as often // resulting in different millis() behavior on the ATmega8 and ATmega168) #if defined(TCCR0A) && defined(WGM01) sbi(TCCR0A, WGM01); sbi(TCCR0A, WGM00); #endif // set timer 0 prescale factor to 64 #if defined(__AVR_ATmega128__) // CPU specific: different values for the ATmega128 sbi(TCCR0, CS02); #elif defined(TCCR0) && defined(CS01) && defined(CS00) // this combination is for the standard atmega8 sbi(TCCR0, CS01); sbi(TCCR0, CS00); #elif defined(TCCR0B) && defined(CS01) && defined(CS00) // this combination is for the standard 168/328/1280/2560 sbi(TCCR0B, CS01); sbi(TCCR0B, CS00); #elif defined(TCCR0A) && defined(CS01) && defined(CS00) // this combination is for the __AVR_ATmega645__ series sbi(TCCR0A, CS01); sbi(TCCR0A, CS00); #else #error Timer 0 prescale factor 64 not set correctly #endif // enable timer 0 overflow interrupt #if defined(TIMSK) && defined(TOIE0) sbi(TIMSK, TOIE0); #elif defined(TIMSK0) && defined(TOIE0) sbi(TIMSK0, TOIE0); #else #error Timer 0 overflow interrupt not set correctly #endif // timers 1 and 2 are used for phase-correct hardware pwm // this is better for motors as it ensures an even waveform // note, however, that fast pwm mode can achieve a frequency of up // 8 MHz (with a 16 MHz clock) at 50% duty cycle TCCR1B = 0; // set timer 1 prescale factor to 64 #if defined(TCCR1B) && defined(CS11) && defined(CS10) cbi(TCCR1B, CS11); sbi(TCCR1B, CS10); #elif defined(TCCR1) && defined(CS11) && defined(CS10) cbi(TCCR1, CS11); sbi(TCCR1, CS10); #endif // put timer 1 in 8-bit phase correct pwm mode #if defined(TCCR1A) && defined(WGM10) sbi(TCCR1A, WGM10); #elif defined(TCCR1) #warning this needs to be finished #endif // set timer 2 prescale factor to 64 #if defined(TCCR2) && defined(CS22) sbi(TCCR2, CS22); #elif defined(TCCR2B) && defined(CS22) sbi(TCCR2B, CS22); #else #warning Timer 2 not finished (may not be present on this CPU) #endif // configure timer 2 for phase correct pwm (8-bit) #if defined(TCCR2) && defined(WGM20) sbi(TCCR2, WGM20); #elif defined(TCCR2A) && defined(WGM20) sbi(TCCR2A, WGM20); #else #warning Timer 2 not finished (may not be present on this CPU) #endif #if defined(TCCR3B) && defined(CS31) && defined(WGM30) sbi(TCCR3B, CS31); // set timer 3 prescale factor to 64 sbi(TCCR3B, CS30); sbi(TCCR3A, WGM30); // put timer 3 in 8-bit phase correct pwm mode #endif #if defined(TCCR4B) && defined(CS41) && defined(WGM40) sbi(TCCR4B, CS41); // set timer 4 prescale factor to 64 sbi(TCCR4B, CS40); sbi(TCCR4A, WGM40); // put timer 4 in 8-bit phase correct pwm mode #endif #if defined(TCCR5B) && defined(CS51) && defined(WGM50) sbi(TCCR5B, CS51); // set timer 5 prescale factor to 64 sbi(TCCR5B, CS50); sbi(TCCR5A, WGM50); // put timer 5 in 8-bit phase correct pwm mode #endif #if defined(ADCSRA) // set a2d prescale factor to 128 // 16 MHz / 128 = 125 KHz, inside the desired 50-200 KHz range. // XXX: this will not work properly for other clock speeds, and // this code should use F_CPU to determine the prescale factor. sbi(ADCSRA, ADPS2); sbi(ADCSRA, ADPS1); sbi(ADCSRA, ADPS0); // enable a2d conversions sbi(ADCSRA, ADEN); #endif // the bootloader connects pins 0 and 1 to the USART; disconnect them // here so they can be used as normal digital i/o; they will be // reconnected in Serial.begin() #if defined(UCSRB) UCSRB = 0; #elif defined(UCSR0B) UCSR0B = 0; #endif }
gpl-2.0
ylatuya/gst-plugins-bad
sys/qtwrapper/videodecoders.c
3
26868
/* * GStreamer QuickTime video decoder codecs wrapper * Copyright <2006, 2007> Fluendo <gstreamer@fluendo.com> * Copyright <2006, 2007> Pioneers of the Inevitable <songbird@songbirdnest.com> * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Alternatively, the contents of this file may be used under the * GNU Lesser General Public License Version 2.1 (the "LGPL"), in * which case the following provisions apply instead of the ones * mentioned above: * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Library General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Library General Public License for more details. * * You should have received a copy of the GNU Library General Public * License along with this library; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 02111-1307, USA. */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <string.h> #include "qtwrapper.h" #include "codecmapping.h" #include "qtutils.h" #include "imagedescription.h" #define QTWRAPPER_VDEC_PARAMS_QDATA g_quark_from_static_string("qtwrapper-vdec-params") static GstStaticPadTemplate src_templ = GST_STATIC_PAD_TEMPLATE ("src", GST_PAD_SRC, GST_PAD_ALWAYS, GST_STATIC_CAPS ("video/x-raw-yuv")); typedef struct _QTWrapperVideoDecoder QTWrapperVideoDecoder; typedef struct _QTWrapperVideoDecoderClass QTWrapperVideoDecoderClass; #define MAC_LOCK(qtwrapper) g_mutex_lock (qtwrapper->lock) #define MAC_UNLOCK(qtwrapper) g_mutex_unlock (qtwrapper->lock) struct _QTWrapperVideoDecoder { GstElement parent; GstPad *sinkpad; GstPad *srcpad; GMutex *lock; ComponentInstance instance; CodecInfo codecinfo; ImageDescriptionHandle idesc; CodecDecompressParams *dparams; CodecCapabilities codeccaps; guint64 frameNumber; ICMDecompressionSessionRef decsession; GstFlowReturn lastret; guint64 outsize; guint width, height; GstClockTime last_ts; GstClockTime last_duration; GstBuffer *prevbuf; gboolean flushing; gboolean framebuffering; /* width/height of output buffer */ Rect rect; }; struct _QTWrapperVideoDecoderClass { GstElementClass parent_class; Component component; guint32 componentType; guint32 componentSubType; GstPadTemplate *sinktempl; }; typedef struct _QTWrapperVideoDecoderParams QTWrapperVideoDecoderParams; struct _QTWrapperVideoDecoderParams { Component component; GstCaps *sinkcaps; }; static GstElementClass *parent_class = NULL; static gboolean qtwrapper_video_decoder_sink_setcaps (GstPad * pad, GstCaps * caps); static GstFlowReturn qtwrapper_video_decoder_chain (GstPad * pad, GstBuffer * buf); static gboolean qtwrapper_video_decoder_sink_event (GstPad * pad, GstEvent * event); static void qtwrapper_video_decoder_finalize (GObject * object); static void decompressCb (void *decompressionTrackingRefCon, OSStatus result, ICMDecompressionTrackingFlags decompressionTrackingFlags, CVPixelBufferRef pixelBuffer, TimeValue64 displayTime, TimeValue64 displayDuration, ICMValidTimeFlags validTimeFlags, void *reserved, void *sourceFrameRefCon); /* * Class setup */ static void qtwrapper_video_decoder_base_init (QTWrapperVideoDecoderClass * klass) { GstElementClass *element_class = GST_ELEMENT_CLASS (klass); gchar *name = NULL; gchar *info = NULL; char *longname, *description; ComponentDescription desc; QTWrapperVideoDecoderParams *params; params = (QTWrapperVideoDecoderParams *) g_type_get_qdata (G_OBJECT_CLASS_TYPE (klass), QTWRAPPER_VDEC_PARAMS_QDATA); g_assert (params); get_name_info_from_component (params->component, &desc, &name, &info); /* Fill in details */ longname = g_strdup_printf ("QTWrapper Video Decoder : %s", GST_STR_NULL (name)); description = g_strdup_printf ("QTWrapper SCAudio wrapper for decoder: %s", GST_STR_NULL (info)); gst_element_class_set_metadata (element_class, longname, "Codec/Decoder/Video", description, "Fluendo <gstreamer@fluendo.com>, " "Pioneers of the Inevitable <songbird@songbirdnest.com>"); g_free (longname); g_free (description); g_free (name); g_free (info); klass->sinktempl = gst_pad_template_new ("sink", GST_PAD_SINK, GST_PAD_ALWAYS, params->sinkcaps); gst_element_class_add_pad_template (element_class, klass->sinktempl); gst_element_class_add_pad_template (element_class, gst_static_pad_template_get (&src_templ)); /* Store class-global values */ klass->component = params->component; klass->componentType = desc.componentType; klass->componentSubType = desc.componentSubType; } static void qtwrapper_video_decoder_class_init (QTWrapperVideoDecoderClass * klass) { GObjectClass *gobject_class = G_OBJECT_CLASS (klass); parent_class = g_type_class_peek_parent (klass); gobject_class->finalize = GST_DEBUG_FUNCPTR (qtwrapper_video_decoder_finalize); } static void qtwrapper_video_decoder_init (QTWrapperVideoDecoder * qtwrapper) { QTWrapperVideoDecoderClass *oclass; ImageSubCodecDecompressCapabilities capabs; GST_LOG ("..."); oclass = (QTWrapperVideoDecoderClass *) (G_OBJECT_GET_CLASS (qtwrapper)); /* 1. Create a ocmponent instance */ if (!(qtwrapper->instance = OpenComponent (oclass->component))) { GST_ERROR_OBJECT (qtwrapper, "Couldn't create a component instance !"); return; } /* 2. Initialize decoder */ memset (&capabs, 0, sizeof (ImageSubCodecDecompressCapabilities)); if (ImageCodecInitialize (qtwrapper->instance, &capabs) != noErr) { GST_ERROR_OBJECT (qtwrapper, "Couldn't initialize the QT component !"); return; } /* 3. Get codec info */ memset (&qtwrapper->codecinfo, 0, sizeof (CodecInfo)); if (ImageCodecGetCodecInfo (qtwrapper->instance, &qtwrapper->codecinfo) != noErr) { GST_ERROR_OBJECT (qtwrapper, "Couldn't get Codec Information !"); return; } /* sink pad */ qtwrapper->sinkpad = gst_pad_new_from_template (oclass->sinktempl, "sink"); gst_pad_set_setcaps_function (qtwrapper->sinkpad, GST_DEBUG_FUNCPTR (qtwrapper_video_decoder_sink_setcaps)); gst_pad_set_chain_function (qtwrapper->sinkpad, GST_DEBUG_FUNCPTR (qtwrapper_video_decoder_chain)); gst_pad_set_event_function (qtwrapper->sinkpad, GST_DEBUG_FUNCPTR (qtwrapper_video_decoder_sink_event)); gst_element_add_pad (GST_ELEMENT (qtwrapper), qtwrapper->sinkpad); /* src pad */ qtwrapper->srcpad = gst_pad_new_from_static_template (&src_templ, "src"); gst_element_add_pad (GST_ELEMENT (qtwrapper), qtwrapper->srcpad); qtwrapper->lock = g_mutex_new (); } static void qtwrapper_video_decoder_finalize (GObject * object) { QTWrapperVideoDecoder *qtwrapper; qtwrapper = (QTWrapperVideoDecoder *) object; if (qtwrapper->lock) g_mutex_free (qtwrapper->lock); if (G_OBJECT_CLASS (parent_class)->finalize) G_OBJECT_CLASS (parent_class)->finalize (object); } /* fill_image_description * Fills an ImageDescription with codec-specific values * * Doesn't fill in the idSize, width and height. */ static void fill_image_description (QTWrapperVideoDecoder * qtwrapper, ImageDescription * desc) { QTWrapperVideoDecoderClass *oclass; oclass = (QTWrapperVideoDecoderClass *) (G_OBJECT_GET_CLASS (qtwrapper)); desc->cType = oclass->componentSubType; desc->version = qtwrapper->codecinfo.version; desc->revisionLevel = qtwrapper->codecinfo.revisionLevel; desc->vendor = qtwrapper->codecinfo.vendor; desc->temporalQuality = codecMaxQuality; desc->spatialQuality = codecNormalQuality; desc->hRes = Long2Fix (72); desc->vRes = Long2Fix (72); desc->frameCount = 1; /* The following is a pure empiric calculation ... so there's are chances it * might not work. To be fixed when we can figure out what the exact value should * be. */ desc->depth = 24; /* no color table */ desc->clutID = -1; } /* new_image_description * * Create an ImageDescription for the given 'codec_data' buffer. */ static ImageDescription * new_image_description (QTWrapperVideoDecoder * qtwrapper, GstBuffer * buf, guint width, guint height) { QTWrapperVideoDecoderClass *oclass; ImageDescription *desc = NULL; oclass = (QTWrapperVideoDecoderClass *) (G_OBJECT_GET_CLASS (qtwrapper)); if (buf) { GST_LOG ("buf %p , size:%d", buf, GST_BUFFER_SIZE (buf)); #if DEBUG_DUMP gst_util_dump_mem (GST_BUFFER_DATA (buf), GST_BUFFER_SIZE (buf)); #endif } if (!buf) { /* standard case, no codec data */ desc = g_new0 (ImageDescription, 1); desc->idSize = sizeof (ImageDescription); fill_image_description (qtwrapper, desc); } else { if ((desc = image_description_from_codec_data (buf, oclass->componentSubType))) fill_image_description (qtwrapper, desc); else goto beach; } /* Fix up values */ desc->width = width; desc->height = height; desc->hRes = Long2Fix (72); desc->vRes = Long2Fix (72); /* if we have h264, we need frame buffering */ if ((oclass->componentSubType == QT_MAKE_FOURCC_LE ('a', 'v', 'c', '1'))) qtwrapper->framebuffering = TRUE; else qtwrapper->framebuffering = FALSE; beach: return desc; } /* close_decoder * * Close and free decoder */ #if 0 static void close_decoder (QTWrapperVideoDecoder * qtwrapper) { if (qtwrapper->idesc) { DisposeHandle ((Handle) qtwrapper->idesc); qtwrapper->idesc = NULL; } if (qtwrapper->prevbuf) { gst_buffer_unref (qtwrapper->prevbuf); qtwrapper->prevbuf = NULL; } if (qtwrapper->dparams) { g_free (qtwrapper->dparams); qtwrapper->dparams = NULL; } } #endif /* open_decoder * * Attempt to initialize the ImageDecompressorComponent with the given * caps. * * Returns TRUE and fills *outcaps if the decoder was properly initialized * Returns FALSE if something went wrong. */ static gboolean open_decoder (QTWrapperVideoDecoder * qtwrapper, GstCaps * caps, GstCaps ** outcaps) { ImageDescription *desc; gint width, height; GstStructure *s; const GValue *par = NULL; const GValue *rate = NULL; const GValue *cdata = NULL; OSStatus status; gboolean res = FALSE; guint32 outformat; ICMDecompressionSessionOptionsRef sessionoptions = NULL; ICMDecompressionTrackingCallbackRecord cbrecord; CFMutableDictionaryRef pixelBufferAttributes = NULL; s = gst_caps_get_structure (caps, 0); /* 1. Extract information from incoming caps */ if ((!gst_structure_get_int (s, "width", &width)) || (!gst_structure_get_int (s, "height", &height)) || (!(rate = gst_structure_get_value (s, "framerate")))) goto beach; par = gst_structure_get_value (s, "pixel-aspect-ratio"); cdata = gst_structure_get_value (s, "codec_data"); /* 2. Create ImageDescription */ if (cdata) { GstBuffer *cdatabuf; cdatabuf = gst_value_get_buffer (cdata); desc = new_image_description (qtwrapper, cdatabuf, width, height); } else { desc = new_image_description (qtwrapper, NULL, width, height); } #if DEBUG_DUMP dump_image_description (desc); #endif /* 3.a. Create a handle to receive the ImageDescription */ GST_LOG_OBJECT (qtwrapper, "Creating a new ImageDescriptionHandle of %" G_GSIZE_FORMAT " bytes", desc->idSize); qtwrapper->idesc = (ImageDescriptionHandle) NewHandleClear (desc->idSize); if (G_UNLIKELY (qtwrapper->idesc == NULL)) { GST_WARNING_OBJECT (qtwrapper, "Failed to create an ImageDescriptionHandle of size %" G_GSIZE_FORMAT, desc->idSize); g_free (desc); goto beach; } /* 3.b. Copy the ImageDescription to the handle */ GST_LOG_OBJECT (qtwrapper, "Copying %" G_GSIZE_FORMAT " bytes from desc [%p] to *qtwrapper->video [%p]", desc->idSize, desc, *qtwrapper->idesc); memcpy (*qtwrapper->idesc, desc, desc->idSize); g_free (desc); #if G_BYTE_ORDER == G_BIG_ENDIAN outformat = kYUVSPixelFormat; #else outformat = k2vuyPixelFormat; #endif /* 4. Put output pixel info in dictionnnary */ pixelBufferAttributes = CFDictionaryCreateMutable (NULL, 0, &kCFTypeDictionaryKeyCallBacks, &kCFTypeDictionaryValueCallBacks); addSInt32ToDictionary (pixelBufferAttributes, kCVPixelBufferWidthKey, width); addSInt32ToDictionary (pixelBufferAttributes, kCVPixelBufferHeightKey, height); addSInt32ToDictionary (pixelBufferAttributes, kCVPixelBufferPixelFormatTypeKey, outformat); /* 5. fill in callback structure */ cbrecord.decompressionTrackingCallback = decompressCb; cbrecord.decompressionTrackingRefCon = qtwrapper; /* 6. create decompressionsession */ status = ICMDecompressionSessionCreate (NULL, qtwrapper->idesc, sessionoptions, pixelBufferAttributes, &cbrecord, &qtwrapper->decsession); qtwrapper->outsize = width * height * 2; qtwrapper->width = width; qtwrapper->height = height; if (status) { GST_DEBUG_OBJECT (qtwrapper, "Error when Calling ICMDecompressionSessionCreate : %ld", status); goto beach; } #if G_BYTE_ORDER == G_BIG_ENDIAN outformat = GST_MAKE_FOURCC ('Y', 'U', 'Y', '2'); #else outformat = GST_MAKE_FOURCC ('U', 'Y', 'V', 'Y'); #endif /* 9. Create output caps */ *outcaps = gst_caps_new_simple ("video/x-raw-yuv", "format", GST_TYPE_FOURCC, outformat, "width", G_TYPE_INT, width, "height", G_TYPE_INT, height, "framerate", GST_TYPE_FRACTION, gst_value_get_fraction_numerator (rate), gst_value_get_fraction_denominator (rate), NULL); if (par) gst_structure_set_value (gst_caps_get_structure (*outcaps, 0), "pixel-aspect-ratio", par); res = TRUE; beach: return res; } static gboolean qtwrapper_video_decoder_sink_setcaps (GstPad * pad, GstCaps * caps) { QTWrapperVideoDecoder *qtwrapper; gboolean ret = FALSE; GstCaps *othercaps = NULL; qtwrapper = (QTWrapperVideoDecoder *) gst_pad_get_parent (pad); GST_LOG_OBJECT (qtwrapper, "caps:%" GST_PTR_FORMAT, caps); /* Setup the decoder with the given input caps */ if (!(open_decoder (qtwrapper, caps, &othercaps))) { goto beach; } ret = gst_pad_set_caps (qtwrapper->srcpad, othercaps); if (!ret) goto beach; beach: if (othercaps) gst_caps_unref (othercaps); gst_object_unref (qtwrapper); return ret; } static void decompressCb (void *decompressionTrackingRefCon, OSStatus result, ICMDecompressionTrackingFlags decompressionTrackingFlags, CVPixelBufferRef pixelBuffer, TimeValue64 displayTime, TimeValue64 displayDuration, ICMValidTimeFlags validTimeFlags, void *reserved, void *sourceFrameRefCon) { QTWrapperVideoDecoder *qtwrapper; GstBuffer *origbuf = (GstBuffer *) sourceFrameRefCon; qtwrapper = (QTWrapperVideoDecoder *) decompressionTrackingRefCon; GST_LOG_OBJECT (qtwrapper, "result:%d, flags:0x%x, pixelBuffer:%p, displayTime:%lld, displayDuration:%lld", (guint32) result, (guint32) decompressionTrackingFlags, pixelBuffer, displayTime, displayDuration); GST_LOG_OBJECT (qtwrapper, "validTimeFlags:0x%x, reserved:%p, sourceFrameRefCon:%p", (guint32) validTimeFlags, reserved, sourceFrameRefCon); if (decompressionTrackingFlags & kICMDecompressionTracking_ReleaseSourceData) { GST_LOG ("removing previous buffer : %p", origbuf); gst_buffer_unref (origbuf); } if (decompressionTrackingFlags & kICMDecompressionTracking_EmittingFrame) GST_LOG ("EMITTING FRAME"); if (decompressionTrackingFlags & kICMDecompressionTracking_FrameDecoded) GST_LOG ("FRAME DECODED"); if (decompressionTrackingFlags & kICMDecompressionTracking_FrameDropped) GST_LOG ("FRAME DROPPED"); if (decompressionTrackingFlags & kICMDecompressionTracking_FrameNeedsRequeueing) GST_LOG ("FRAME NEEDS REQUEUING"); if ((decompressionTrackingFlags & kICMDecompressionTracking_EmittingFrame) && pixelBuffer) { guint8 *addr; GstBuffer *outbuf; size_t size; GstClockTime outtime; size = CVPixelBufferGetDataSize (pixelBuffer); outtime = gst_util_uint64_scale (displayTime, GST_SECOND, 600); GST_LOG ("Got a buffer ready outtime : %" GST_TIME_FORMAT, GST_TIME_ARGS (outtime)); if (qtwrapper->flushing) { CVPixelBufferRelease (pixelBuffer); goto beach; } dump_cvpixel_buffer (pixelBuffer); CVPixelBufferRetain (pixelBuffer); if (CVPixelBufferLockBaseAddress (pixelBuffer, 0)) GST_WARNING ("Couldn't lock base adress on pixel buffer !"); addr = CVPixelBufferGetBaseAddress (pixelBuffer); /* allocate buffer */ qtwrapper->lastret = gst_pad_alloc_buffer (qtwrapper->srcpad, GST_BUFFER_OFFSET_NONE, (gint) qtwrapper->outsize, GST_PAD_CAPS (qtwrapper->srcpad), &outbuf); if (G_UNLIKELY (qtwrapper->lastret != GST_FLOW_OK)) { GST_LOG ("gst_pad_alloc_buffer() returned %s", gst_flow_get_name (qtwrapper->lastret)); goto beach; } /* copy data */ GST_LOG ("copying data in buffer from %p to %p", addr, GST_BUFFER_DATA (outbuf)); if (G_UNLIKELY ((qtwrapper->width * 2) != CVPixelBufferGetBytesPerRow (pixelBuffer))) { guint i; gulong realpixels; size_t stride; stride = CVPixelBufferGetBytesPerRow (pixelBuffer); realpixels = qtwrapper->width * 2; /* special copy for stride handling */ for (i = 0; i < qtwrapper->height; i++) g_memmove (GST_BUFFER_DATA (outbuf) + realpixels * i, addr + stride * i, realpixels); } else g_memmove (GST_BUFFER_DATA (outbuf), addr, (int) qtwrapper->outsize); /* Release CVPixelBuffer */ CVPixelBufferUnlockBaseAddress (pixelBuffer, 0); CVPixelBufferRelease (pixelBuffer); /* Set proper timestamp ! */ gst_buffer_set_caps (outbuf, GST_PAD_CAPS (qtwrapper->srcpad)); GST_BUFFER_TIMESTAMP (outbuf) = qtwrapper->last_ts; GST_BUFFER_DURATION (outbuf) = qtwrapper->last_duration; GST_BUFFER_SIZE (outbuf) = (int) qtwrapper->outsize; /* See if we push buffer downstream */ if (G_LIKELY (!qtwrapper->framebuffering)) { GST_LOG ("No buffering needed, pushing buffer downstream"); MAC_UNLOCK (qtwrapper); qtwrapper->lastret = gst_pad_push (qtwrapper->srcpad, outbuf); MAC_LOCK (qtwrapper); } else { /* Check if we push the current buffer or the stored buffer */ if (!qtwrapper->prevbuf) { GST_LOG ("Storing buffer"); qtwrapper->prevbuf = outbuf; qtwrapper->lastret = GST_FLOW_OK; } else if (GST_BUFFER_TIMESTAMP (qtwrapper->prevbuf) > GST_BUFFER_TIMESTAMP (outbuf)) { GST_LOG ("Newly decoded buffer is earliest, pushing that one !"); MAC_UNLOCK (qtwrapper); qtwrapper->lastret = gst_pad_push (qtwrapper->srcpad, outbuf); MAC_LOCK (qtwrapper); } else { GstBuffer *tmp; tmp = qtwrapper->prevbuf; qtwrapper->prevbuf = outbuf; GST_LOG ("Stored buffer is earliest, pushing that one !"); MAC_UNLOCK (qtwrapper); qtwrapper->lastret = gst_pad_push (qtwrapper->srcpad, tmp); MAC_LOCK (qtwrapper); } } } else { qtwrapper->lastret = GST_FLOW_OK; } beach: return; } /* _chain * * Here we feed the data to the decoder and ask to decode frames. * * Known issues/questions are: * * How can we be guaranteed that one frame in automatically gives one output * frame ? * * PTS/DTS timestamp issues. With mpeg-derivate formats, the incoming order is * different from the output order. */ static GstFlowReturn qtwrapper_video_decoder_chain (GstPad * pad, GstBuffer * buf) { QTWrapperVideoDecoder *qtwrapper; GstFlowReturn ret = GST_FLOW_OK; ICMFrameTimeRecord frameTime = { {0} }; OSStatus status; guint64 intime; qtwrapper = (QTWrapperVideoDecoder *) gst_pad_get_parent (pad); intime = gst_util_uint64_scale (GST_BUFFER_TIMESTAMP (buf), 600, GST_SECOND); GST_DEBUG_OBJECT (qtwrapper, "buffer:%p timestamp:%" GST_TIME_FORMAT " intime:%llu Size:%d", buf, GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf)), intime, GST_BUFFER_SIZE (buf)); frameTime.recordSize = sizeof (ICMFrameTimeRecord); /* *(TimeValue64 *)&frameTime.value = intime; */ frameTime.value.lo = (guint32) (intime & 0xffffffff); frameTime.value.hi = (guint32) (intime >> 32); frameTime.base = 0; frameTime.scale = 600; frameTime.rate = fixed1; frameTime.duration = 1; frameTime.flags = icmFrameTimeDecodeImmediately; /* frameTime.flags = icmFrameTimeIsNonScheduledDisplayTime; */ frameTime.frameNumber = (long) (++qtwrapper->frameNumber); MAC_LOCK (qtwrapper); qtwrapper->last_ts = GST_BUFFER_TIMESTAMP (buf); qtwrapper->last_duration = GST_BUFFER_DURATION (buf); status = ICMDecompressionSessionDecodeFrame (qtwrapper->decsession, GST_BUFFER_DATA (buf), GST_BUFFER_SIZE (buf), NULL, &frameTime, buf); MAC_UNLOCK (qtwrapper); if (status) { GST_WARNING_OBJECT (qtwrapper, "Error when Calling DecodeFrame() : %ld", status); ret = GST_FLOW_ERROR; goto beach; } beach: gst_object_unref (qtwrapper); return qtwrapper->lastret; } static gboolean qtwrapper_video_decoder_sink_event (GstPad * pad, GstEvent * event) { gboolean res; QTWrapperVideoDecoder *qtwrapper; qtwrapper = (QTWrapperVideoDecoder *) gst_pad_get_parent (pad); GST_LOG_OBJECT (pad, "event : %s", GST_EVENT_TYPE_NAME (event)); switch (GST_EVENT_TYPE (event)) { case GST_EVENT_FLUSH_START: MAC_LOCK (qtwrapper); qtwrapper->flushing = TRUE; if (qtwrapper->prevbuf) { GST_LOG ("About to unref buffer %p", qtwrapper->prevbuf); gst_buffer_unref (qtwrapper->prevbuf); qtwrapper->prevbuf = NULL; } ICMDecompressionSessionFlush (qtwrapper->decsession); MAC_UNLOCK (qtwrapper); break; case GST_EVENT_FLUSH_STOP: MAC_LOCK (qtwrapper); qtwrapper->flushing = FALSE; qtwrapper->prevbuf = NULL; MAC_UNLOCK (qtwrapper); break; default: break; } res = gst_pad_push_event (qtwrapper->srcpad, event); gst_object_unref (qtwrapper); return res; } /* _register * * Scan through all available Image Decompressor components to find the ones we * can handle and wrap in this plugin. */ gboolean qtwrapper_video_decoders_register (GstPlugin * plugin) { gboolean res = TRUE; Component componentID = NULL; ComponentDescription desc = { 'imdc', 0, 0, 0, 0 }; GTypeInfo typeinfo = { sizeof (QTWrapperVideoDecoderClass), (GBaseInitFunc) qtwrapper_video_decoder_base_init, NULL, (GClassInitFunc) qtwrapper_video_decoder_class_init, NULL, NULL, sizeof (QTWrapperVideoDecoder), 0, (GInstanceInitFunc) qtwrapper_video_decoder_init, }; /* Find all ImageDecoders ! */ GST_DEBUG ("There are %ld decompressors available", CountComponents (&desc)); /* loop over ImageDecoders */ do { componentID = FindNextComponent (componentID, &desc); GST_LOG ("componentID : %p", componentID); if (componentID) { ComponentDescription thisdesc; gchar *name = NULL, *info = NULL; GstCaps *caps = NULL; gchar *type_name = NULL; GType type; QTWrapperVideoDecoderParams *params = NULL; if (!(get_name_info_from_component (componentID, &thisdesc, &name, &info))) goto next; if (!get_output_info_from_component (componentID)) { GST_WARNING ("Couldn't get output info from component"); goto next; } GST_LOG (" name:%s", GST_STR_NULL (name)); GST_LOG (" info:%s", GST_STR_NULL (info)); GST_LOG (" type:%" GST_FOURCC_FORMAT, QT_FOURCC_ARGS (thisdesc.componentType)); GST_LOG (" subtype:%" GST_FOURCC_FORMAT, QT_FOURCC_ARGS (thisdesc.componentSubType)); GST_LOG (" manufacturer:%" GST_FOURCC_FORMAT, QT_FOURCC_ARGS (thisdesc.componentManufacturer)); if (!(caps = fourcc_to_caps (thisdesc.componentSubType))) { GST_LOG ("We can't find caps for this component, switching to the next one !"); goto next; } type_name = g_strdup_printf ("qtwrappervideodec_%" GST_FOURCC_FORMAT, QT_FOURCC_ARGS (thisdesc.componentSubType)); g_strdelimit (type_name, " ", '_'); if (g_type_from_name (type_name)) { GST_WARNING ("We already have a registered plugin for %s", type_name); goto next; } params = g_new0 (QTWrapperVideoDecoderParams, 1); params->component = componentID; params->sinkcaps = gst_caps_ref (caps); GST_INFO ("Registering g_type for type_name: %s", type_name); type = g_type_register_static (GST_TYPE_ELEMENT, type_name, &typeinfo, 0); /* Store params in type qdata */ g_type_set_qdata (type, QTWRAPPER_VDEC_PARAMS_QDATA, (gpointer) params); /* register type */ if (!gst_element_register (plugin, type_name, GST_RANK_MARGINAL, type)) { g_warning ("Failed to register %s", type_name);; g_type_set_qdata (type, QTWRAPPER_VDEC_PARAMS_QDATA, NULL); g_free (params); res = FALSE; goto next; } else { GST_LOG ("Reigstered video plugin %s", type_name); } next: if (name) g_free (name); if (info) g_free (info); if (type_name) g_free (type_name); if (caps) gst_caps_unref (caps); } } while (componentID && res); return res; }
gpl-2.0
evanphx/yoke
src/VBox/Main/src-client/GuestSessionImplTasks.cpp
3
59606
/* $Id: GuestSessionImplTasks.cpp $ */ /** @file * VirtualBox Main - XXX. */ /* * Copyright (C) 2012 Oracle Corporation * * This file is part of VirtualBox Open Source Edition (OSE), as * available from http://www.virtualbox.org. This file is free software; * you can redistribute it and/or modify it under the terms of the GNU * General Public License (GPL) as published by the Free Software * Foundation, in version 2 as it comes in the "COPYING" file of the * VirtualBox OSE distribution. VirtualBox OSE is distributed in the * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind. */ /******************************************************************************* * Header Files * *******************************************************************************/ #include "GuestImpl.h" #include "GuestSessionImpl.h" #include "GuestCtrlImplPrivate.h" #include "Global.h" #include "AutoCaller.h" #include "ConsoleImpl.h" #include "MachineImpl.h" #include "ProgressImpl.h" #include <memory> /* For auto_ptr. */ #include <iprt/env.h> #include <iprt/file.h> /* For CopyTo/From. */ #ifdef LOG_GROUP #undef LOG_GROUP #endif #define LOG_GROUP LOG_GROUP_GUEST_CONTROL #include <VBox/log.h> /******************************************************************************* * Defines * *******************************************************************************/ /** * Update file flags. */ #define UPDATEFILE_FLAG_NONE 0 /** Copy over the file from host to the * guest. */ #define UPDATEFILE_FLAG_COPY_FROM_ISO RT_BIT(0) /** Execute file on the guest after it has * been successfully transfered. */ #define UPDATEFILE_FLAG_EXECUTE RT_BIT(7) /** File is optional, does not have to be * existent on the .ISO. */ #define UPDATEFILE_FLAG_OPTIONAL RT_BIT(8) // session task classes ///////////////////////////////////////////////////////////////////////////// GuestSessionTask::GuestSessionTask(GuestSession *pSession) { mSession = pSession; } GuestSessionTask::~GuestSessionTask(void) { } int GuestSessionTask::getGuestProperty(const ComObjPtr<Guest> &pGuest, const Utf8Str &strPath, Utf8Str &strValue) { ComObjPtr<Console> pConsole = pGuest->getConsole(); const ComPtr<IMachine> pMachine = pConsole->machine(); Assert(!pMachine.isNull()); Bstr strTemp, strFlags; LONG64 i64Timestamp; HRESULT hr = pMachine->GetGuestProperty(Bstr(strPath).raw(), strTemp.asOutParam(), &i64Timestamp, strFlags.asOutParam()); if (SUCCEEDED(hr)) { strValue = strTemp; return VINF_SUCCESS; } return VERR_NOT_FOUND; } int GuestSessionTask::setProgress(ULONG uPercent) { if (mProgress.isNull()) /* Progress is optional. */ return VINF_SUCCESS; BOOL fCanceled; if ( SUCCEEDED(mProgress->COMGETTER(Canceled(&fCanceled))) && fCanceled) return VERR_CANCELLED; BOOL fCompleted; if ( SUCCEEDED(mProgress->COMGETTER(Completed(&fCompleted))) && fCompleted) { AssertMsgFailed(("Setting value of an already completed progress\n")); return VINF_SUCCESS; } HRESULT hr = mProgress->SetCurrentOperationProgress(uPercent); if (FAILED(hr)) return VERR_COM_UNEXPECTED; return VINF_SUCCESS; } int GuestSessionTask::setProgressSuccess(void) { if (mProgress.isNull()) /* Progress is optional. */ return VINF_SUCCESS; BOOL fCanceled; BOOL fCompleted; if ( SUCCEEDED(mProgress->COMGETTER(Canceled(&fCanceled))) && !fCanceled && SUCCEEDED(mProgress->COMGETTER(Completed(&fCompleted))) && !fCompleted) { HRESULT hr = mProgress->notifyComplete(S_OK); if (FAILED(hr)) return VERR_COM_UNEXPECTED; /** @todo Find a better rc. */ } return VINF_SUCCESS; } HRESULT GuestSessionTask::setProgressErrorMsg(HRESULT hr, const Utf8Str &strMsg) { if (mProgress.isNull()) /* Progress is optional. */ return hr; /* Return original rc. */ BOOL fCanceled; BOOL fCompleted; if ( SUCCEEDED(mProgress->COMGETTER(Canceled(&fCanceled))) && !fCanceled && SUCCEEDED(mProgress->COMGETTER(Completed(&fCompleted))) && !fCompleted) { HRESULT hr2 = mProgress->notifyComplete(hr, COM_IIDOF(IGuestSession), GuestSession::getStaticComponentName(), strMsg.c_str()); if (FAILED(hr2)) return hr2; } return hr; /* Return original rc. */ } SessionTaskCopyTo::SessionTaskCopyTo(GuestSession *pSession, const Utf8Str &strSource, const Utf8Str &strDest, uint32_t uFlags) : GuestSessionTask(pSession), mSource(strSource), mSourceFile(NULL), mSourceOffset(0), mSourceSize(0), mDest(strDest) { mCopyFileFlags = uFlags; } /** @todo Merge this and the above call and let the above call do the open/close file handling so that the * inner code only has to deal with file handles. No time now ... */ SessionTaskCopyTo::SessionTaskCopyTo(GuestSession *pSession, PRTFILE pSourceFile, size_t cbSourceOffset, uint64_t cbSourceSize, const Utf8Str &strDest, uint32_t uFlags) : GuestSessionTask(pSession) { mSourceFile = pSourceFile; mSourceOffset = cbSourceOffset; mSourceSize = cbSourceSize; mDest = strDest; mCopyFileFlags = uFlags; } SessionTaskCopyTo::~SessionTaskCopyTo(void) { } int SessionTaskCopyTo::Run(void) { LogFlowThisFuncEnter(); ComObjPtr<GuestSession> pSession = mSession; Assert(!pSession.isNull()); AutoCaller autoCaller(pSession); if (FAILED(autoCaller.rc())) return autoCaller.rc(); if (mCopyFileFlags) { setProgressErrorMsg(VBOX_E_IPRT_ERROR, Utf8StrFmt(GuestSession::tr("Copy flags (%#x) not implemented yet"), mCopyFileFlags)); return VERR_INVALID_PARAMETER; } int rc; RTFILE fileLocal; PRTFILE pFile = &fileLocal; if (!mSourceFile) { /* Does our source file exist? */ if (!RTFileExists(mSource.c_str())) { rc = setProgressErrorMsg(VBOX_E_IPRT_ERROR, Utf8StrFmt(GuestSession::tr("Source file \"%s\" does not exist or is not a file"), mSource.c_str())); } else { rc = RTFileOpen(pFile, mSource.c_str(), RTFILE_O_OPEN | RTFILE_O_READ | RTFILE_O_DENY_WRITE); if (RT_FAILURE(rc)) { rc = setProgressErrorMsg(VBOX_E_IPRT_ERROR, Utf8StrFmt(GuestSession::tr("Could not open source file \"%s\" for reading: %Rrc"), mSource.c_str(), rc)); } else { rc = RTFileGetSize(*pFile, &mSourceSize); if (RT_FAILURE(rc)) { setProgressErrorMsg(VBOX_E_IPRT_ERROR, Utf8StrFmt(GuestSession::tr("Could not query file size of \"%s\": %Rrc"), mSource.c_str(), rc)); } } } } else { pFile = mSourceFile; /* Size + offset are optional. */ } GuestProcessStartupInfo procInfo; procInfo.mCommand = Utf8Str(VBOXSERVICE_TOOL_CAT); procInfo.mFlags = ProcessCreateFlag_Hidden; /* Set arguments.*/ procInfo.mArguments.push_back(Utf8StrFmt("--output=%s", mDest.c_str())); /** @todo Do we need path conversion? */ /* Startup process. */ ComObjPtr<GuestProcess> pProcess; int guestRc; rc = pSession->processCreateExInteral(procInfo, pProcess); if (RT_SUCCESS(rc)) rc = pProcess->startProcess(&guestRc); if (RT_FAILURE(rc)) { switch (rc) { case VERR_GENERAL_FAILURE: /** @todo Special guest control rc needed! */ setProgressErrorMsg(VBOX_E_IPRT_ERROR, GuestProcess::guestErrorToString(guestRc)); break; default: setProgressErrorMsg(VBOX_E_IPRT_ERROR, Utf8StrFmt(GuestSession::tr("Error while creating guest process for copying file \"%s\" from guest to host: %Rrc"), mSource.c_str(), rc)); break; } } if (RT_SUCCESS(rc)) { ProcessWaitResult_T waitRes; BYTE byBuf[_64K]; BOOL fCanceled = FALSE; uint64_t cbWrittenTotal = 0; uint64_t cbToRead = mSourceSize; for (;;) { rc = pProcess->waitFor(ProcessWaitForFlag_StdIn, 30 * 1000 /* Timeout */, waitRes, &guestRc); if ( RT_FAILURE(rc) || ( waitRes != ProcessWaitResult_StdIn && waitRes != ProcessWaitResult_WaitFlagNotSupported)) { break; } /* If the guest does not support waiting for stdin, we now yield in * order to reduce the CPU load due to busy waiting. */ if (waitRes == ProcessWaitResult_WaitFlagNotSupported) RTThreadSleep(1); /* Optional, don't check rc. */ size_t cbRead = 0; if (mSourceSize) /* If we have nothing to write, take a shortcut. */ { /** @todo Not very efficient, but works for now. */ rc = RTFileSeek(*pFile, mSourceOffset + cbWrittenTotal, RTFILE_SEEK_BEGIN, NULL /* poffActual */); if (RT_SUCCESS(rc)) { rc = RTFileRead(*pFile, (uint8_t*)byBuf, RT_MIN(cbToRead, sizeof(byBuf)), &cbRead); /* * Some other error occured? There might be a chance that RTFileRead * could not resolve/map the native error code to an IPRT code, so just * print a generic error. */ if (RT_FAILURE(rc)) { setProgressErrorMsg(VBOX_E_IPRT_ERROR, Utf8StrFmt(GuestSession::tr("Could not read from file \"%s\" (%Rrc)"), mSource.c_str(), rc)); break; } } else { setProgressErrorMsg(VBOX_E_IPRT_ERROR, Utf8StrFmt(GuestSession::tr("Seeking file \"%s\" to offset %RU64 failed: %Rrc"), mSource.c_str(), cbWrittenTotal, rc)); break; } } uint32_t fFlags = ProcessInputFlag_None; /* Did we reach the end of the content we want to transfer (last chunk)? */ if ( (cbRead < sizeof(byBuf)) /* Did we reach the last block which is exactly _64K? */ || (cbToRead - cbRead == 0) /* ... or does the user want to cancel? */ || ( !mProgress.isNull() && SUCCEEDED(mProgress->COMGETTER(Canceled(&fCanceled))) && fCanceled) ) { LogFlowThisFunc(("Writing last chunk cbRead=%RU64\n", cbRead)); fFlags |= ProcessInputFlag_EndOfFile; } uint32_t cbWritten; Assert(sizeof(byBuf) >= cbRead); rc = pProcess->writeData(0 /* StdIn */, fFlags, byBuf, cbRead, 30 * 1000 /* Timeout */, &cbWritten, &guestRc); if (RT_FAILURE(rc)) { switch (rc) { case VERR_GENERAL_FAILURE: /** @todo Special guest control rc needed! */ setProgressErrorMsg(VBOX_E_IPRT_ERROR, GuestProcess::guestErrorToString(guestRc)); break; default: setProgressErrorMsg(VBOX_E_IPRT_ERROR, Utf8StrFmt(GuestSession::tr("Writing to file \"%s\" (offset %RU64) failed: %Rrc"), mDest.c_str(), cbWrittenTotal, rc)); break; } break; } /* Only subtract bytes reported written by the guest. */ Assert(cbToRead >= cbWritten); cbToRead -= cbWritten; /* Update total bytes written to the guest. */ cbWrittenTotal += cbWritten; Assert(cbWrittenTotal <= mSourceSize); LogFlowThisFunc(("rc=%Rrc, cbWritten=%RU32, cbToRead=%RU64, cbWrittenTotal=%RU64, cbFileSize=%RU64\n", rc, cbWritten, cbToRead, cbWrittenTotal, mSourceSize)); /* Did the user cancel the operation above? */ if (fCanceled) break; /* Update the progress. * Watch out for division by zero. */ mSourceSize > 0 ? rc = setProgress((ULONG)(cbWrittenTotal * 100 / mSourceSize)) : rc = setProgress(100); if (RT_FAILURE(rc)) break; /* End of file reached? */ if (!cbToRead) break; } /* for */ LogFlowThisFunc(("Copy loop ended with rc=%Rrc\n" ,rc)); if ( !fCanceled || RT_SUCCESS(rc)) { /* * Even if we succeeded until here make sure to check whether we really transfered * everything. */ if ( mSourceSize > 0 && cbWrittenTotal == 0) { /* If nothing was transfered but the file size was > 0 then "vbox_cat" wasn't able to write * to the destination -> access denied. */ setProgressErrorMsg(VBOX_E_IPRT_ERROR, Utf8StrFmt(GuestSession::tr("Access denied when copying file \"%s\" to \"%s\""), mSource.c_str(), mDest.c_str())); rc = VERR_GENERAL_FAILURE; /* Fudge. */ } else if (cbWrittenTotal < mSourceSize) { /* If we did not copy all let the user know. */ setProgressErrorMsg(VBOX_E_IPRT_ERROR, Utf8StrFmt(GuestSession::tr("Copying file \"%s\" failed (%RU64/%RU64 bytes transfered)"), mSource.c_str(), cbWrittenTotal, mSourceSize)); rc = VERR_GENERAL_FAILURE; /* Fudge. */ } else { rc = pProcess->waitFor(ProcessWaitForFlag_Terminate, 30 * 1000 /* Timeout */, waitRes, &guestRc); if ( RT_FAILURE(rc) || waitRes != ProcessWaitResult_Terminate) { if (RT_FAILURE(rc)) setProgressErrorMsg(VBOX_E_IPRT_ERROR, Utf8StrFmt(GuestSession::tr("Waiting on termination for copying file \"%s\" failed: %Rrc"), mSource.c_str(), rc)); else { setProgressErrorMsg(VBOX_E_IPRT_ERROR, Utf8StrFmt(GuestSession::tr("Waiting on termination for copying file \"%s\" failed with wait result %ld"), mSource.c_str(), waitRes)); rc = VERR_GENERAL_FAILURE; /* Fudge. */ } } if (RT_SUCCESS(rc)) { ProcessStatus_T procStatus; LONG exitCode; if ( ( SUCCEEDED(pProcess->COMGETTER(Status(&procStatus))) && procStatus != ProcessStatus_TerminatedNormally) || ( SUCCEEDED(pProcess->COMGETTER(ExitCode(&exitCode))) && exitCode != 0) ) { setProgressErrorMsg(VBOX_E_IPRT_ERROR, Utf8StrFmt(GuestSession::tr("Copying file \"%s\" failed with status %ld, exit code %ld"), mSource.c_str(), procStatus, exitCode)); /**@todo Add stringify methods! */ rc = VERR_GENERAL_FAILURE; /* Fudge. */ } } if (RT_SUCCESS(rc)) rc = setProgressSuccess(); } } if (!pProcess.isNull()) pProcess->uninit(); } /* processCreateExInteral */ if (!mSourceFile) /* Only close locally opened files. */ RTFileClose(*pFile); LogFlowFuncLeaveRC(rc); return rc; } int SessionTaskCopyTo::RunAsync(const Utf8Str &strDesc, ComObjPtr<Progress> &pProgress) { LogFlowThisFunc(("strDesc=%s, strSource=%s, strDest=%s, mCopyFileFlags=%x\n", strDesc.c_str(), mSource.c_str(), mDest.c_str(), mCopyFileFlags)); mDesc = strDesc; mProgress = pProgress; int rc = RTThreadCreate(NULL, SessionTaskCopyTo::taskThread, this, 0, RTTHREADTYPE_MAIN_HEAVY_WORKER, 0, "gctlCpyTo"); LogFlowFuncLeaveRC(rc); return rc; } /* static */ int SessionTaskCopyTo::taskThread(RTTHREAD Thread, void *pvUser) { std::auto_ptr<SessionTaskCopyTo> task(static_cast<SessionTaskCopyTo*>(pvUser)); AssertReturn(task.get(), VERR_GENERAL_FAILURE); LogFlowFunc(("pTask=%p\n", task.get())); return task->Run(); } SessionTaskCopyFrom::SessionTaskCopyFrom(GuestSession *pSession, const Utf8Str &strSource, const Utf8Str &strDest, uint32_t uFlags) : GuestSessionTask(pSession) { mSource = strSource; mDest = strDest; mFlags = uFlags; } SessionTaskCopyFrom::~SessionTaskCopyFrom(void) { } int SessionTaskCopyFrom::Run(void) { LogFlowThisFuncEnter(); ComObjPtr<GuestSession> pSession = mSession; Assert(!pSession.isNull()); AutoCaller autoCaller(pSession); if (FAILED(autoCaller.rc())) return autoCaller.rc(); /* * Note: There will be races between querying file size + reading the guest file's * content because we currently *do not* lock down the guest file when doing the * actual operations. ** @todo Implement guest file locking! */ GuestFsObjData objData; int guestRc; int rc = pSession->fileQueryInfoInternal(Utf8Str(mSource), objData, &guestRc); if (RT_FAILURE(rc)) { setProgressErrorMsg(VBOX_E_IPRT_ERROR, Utf8StrFmt(GuestSession::tr("Querying guest file information for \"%s\" failed: %Rrc"), mSource.c_str(), rc)); } else if (objData.mType != FsObjType_File) /* Only single files are supported at the moment. */ { setProgressErrorMsg(VBOX_E_IPRT_ERROR, Utf8StrFmt(GuestSession::tr("Object \"%s\" on the guest is not a file"), mSource.c_str())); rc = VERR_GENERAL_FAILURE; /* Fudge. */ } if (RT_SUCCESS(rc)) { RTFILE fileDest; rc = RTFileOpen(&fileDest, mDest.c_str(), RTFILE_O_WRITE | RTFILE_O_OPEN_CREATE | RTFILE_O_DENY_WRITE); /** @todo Use the correct open modes! */ if (RT_FAILURE(rc)) { setProgressErrorMsg(VBOX_E_IPRT_ERROR, Utf8StrFmt(GuestSession::tr("Error opening destination file \"%s\": %Rrc"), mDest.c_str(), rc)); } else { GuestProcessStartupInfo procInfo; procInfo.mName = Utf8StrFmt(GuestSession::tr("Copying file \"%s\" from guest to the host to \"%s\" (%RI64 bytes)"), mSource.c_str(), mDest.c_str(), objData.mObjectSize); procInfo.mCommand = Utf8Str(VBOXSERVICE_TOOL_CAT); procInfo.mFlags = ProcessCreateFlag_Hidden | ProcessCreateFlag_WaitForStdOut; /* Set arguments.*/ procInfo.mArguments.push_back(mSource); /* Which file to output? */ /* Startup process. */ ComObjPtr<GuestProcess> pProcess; rc = pSession->processCreateExInteral(procInfo, pProcess); if (RT_SUCCESS(rc)) rc = pProcess->startProcess(&guestRc); if (RT_FAILURE(rc)) { switch (rc) { case VERR_GENERAL_FAILURE: /** @todo Special guest control rc needed! */ setProgressErrorMsg(VBOX_E_IPRT_ERROR, GuestProcess::guestErrorToString(guestRc)); break; default: setProgressErrorMsg(VBOX_E_IPRT_ERROR, Utf8StrFmt(GuestSession::tr("Error while creating guest process for copying file \"%s\" from guest to host: %Rrc"), mSource.c_str(), rc)); break; } } else { ProcessWaitResult_T waitRes; BYTE byBuf[_64K]; BOOL fCanceled = FALSE; uint64_t cbWrittenTotal = 0; uint64_t cbToRead = objData.mObjectSize; for (;;) { rc = pProcess->waitFor(ProcessWaitForFlag_StdOut, 30 * 1000 /* Timeout */, waitRes, &guestRc); if (RT_FAILURE(rc)) { switch (rc) { case VERR_GENERAL_FAILURE: /** @todo Special guest control rc needed! */ setProgressErrorMsg(VBOX_E_IPRT_ERROR, GuestProcess::guestErrorToString(guestRc)); break; default: setProgressErrorMsg(VBOX_E_IPRT_ERROR, Utf8StrFmt(GuestSession::tr("Error while creating guest process for copying file \"%s\" from guest to host: %Rrc"), mSource.c_str(), rc)); break; } break; } if ( waitRes == ProcessWaitResult_StdOut || waitRes == ProcessWaitResult_WaitFlagNotSupported) { /* If the guest does not support waiting for stdin, we now yield in * order to reduce the CPU load due to busy waiting. */ if (waitRes == ProcessWaitResult_WaitFlagNotSupported) RTThreadSleep(1); /* Optional, don't check rc. */ size_t cbRead; rc = pProcess->readData(OUTPUT_HANDLE_ID_STDOUT, sizeof(byBuf), 30 * 1000 /* Timeout */, byBuf, sizeof(byBuf), &cbRead, &guestRc); if (RT_FAILURE(rc)) { switch (rc) { case VERR_GENERAL_FAILURE: /** @todo Special guest control rc needed! */ setProgressErrorMsg(VBOX_E_IPRT_ERROR, GuestProcess::guestErrorToString(guestRc)); break; default: setProgressErrorMsg(VBOX_E_IPRT_ERROR, Utf8StrFmt(GuestSession::tr("Reading from file \"%s\" (offset %RU64) failed: %Rrc"), mSource.c_str(), cbWrittenTotal, rc)); break; } break; } if (cbRead) { rc = RTFileWrite(fileDest, byBuf, cbRead, NULL /* No partial writes */); if (RT_FAILURE(rc)) { setProgressErrorMsg(VBOX_E_IPRT_ERROR, Utf8StrFmt(GuestSession::tr("Error writing to file \"%s\" (%RU64 bytes left): %Rrc"), mDest.c_str(), cbToRead, rc)); break; } /* Only subtract bytes reported written by the guest. */ Assert(cbToRead >= cbRead); cbToRead -= cbRead; /* Update total bytes written to the guest. */ cbWrittenTotal += cbRead; Assert(cbWrittenTotal <= (uint64_t)objData.mObjectSize); /* Did the user cancel the operation above? */ if ( SUCCEEDED(mProgress->COMGETTER(Canceled(&fCanceled))) && fCanceled) break; rc = setProgress((ULONG)(cbWrittenTotal / ((uint64_t)objData.mObjectSize / 100.0))); if (RT_FAILURE(rc)) break; } } else { break; } } /* for */ LogFlowThisFunc(("rc=%Rrc, guestrc=%Rrc, waitRes=%ld, cbWrittenTotal=%RU64, cbSize=%RI64, cbToRead=%RU64\n", rc, guestRc, waitRes, cbWrittenTotal, objData.mObjectSize, cbToRead)); if ( !fCanceled || RT_SUCCESS(rc)) { /* * Even if we succeeded until here make sure to check whether we really transfered * everything. */ if ( objData.mObjectSize > 0 && cbWrittenTotal == 0) { /* If nothing was transfered but the file size was > 0 then "vbox_cat" wasn't able to write * to the destination -> access denied. */ setProgressErrorMsg(VBOX_E_IPRT_ERROR, Utf8StrFmt(GuestSession::tr("Access denied when copying file \"%s\" to \"%s\""), mSource.c_str(), mDest.c_str())); rc = VERR_GENERAL_FAILURE; /* Fudge. */ } else if (cbWrittenTotal < (uint64_t)objData.mObjectSize) { /* If we did not copy all let the user know. */ setProgressErrorMsg(VBOX_E_IPRT_ERROR, Utf8StrFmt(GuestSession::tr("Copying file \"%s\" failed (%RU64/%RI64 bytes transfered)"), mSource.c_str(), cbWrittenTotal, objData.mObjectSize)); rc = VERR_GENERAL_FAILURE; /* Fudge. */ } else { ProcessStatus_T procStatus; LONG exitCode; if ( ( SUCCEEDED(pProcess->COMGETTER(Status(&procStatus))) && procStatus != ProcessStatus_TerminatedNormally) || ( SUCCEEDED(pProcess->COMGETTER(ExitCode(&exitCode))) && exitCode != 0) ) { setProgressErrorMsg(VBOX_E_IPRT_ERROR, Utf8StrFmt(GuestSession::tr("Copying file \"%s\" failed with status %ld, exit code %d"), mSource.c_str(), procStatus, exitCode)); /**@todo Add stringify methods! */ rc = VERR_GENERAL_FAILURE; /* Fudge. */ } else /* Yay, success! */ rc = setProgressSuccess(); } } if (!pProcess.isNull()) pProcess->uninit(); } RTFileClose(fileDest); } } LogFlowFuncLeaveRC(rc); return rc; } int SessionTaskCopyFrom::RunAsync(const Utf8Str &strDesc, ComObjPtr<Progress> &pProgress) { LogFlowThisFunc(("strDesc=%s, strSource=%s, strDest=%s, uFlags=%x\n", strDesc.c_str(), mSource.c_str(), mDest.c_str(), mFlags)); mDesc = strDesc; mProgress = pProgress; int rc = RTThreadCreate(NULL, SessionTaskCopyFrom::taskThread, this, 0, RTTHREADTYPE_MAIN_HEAVY_WORKER, 0, "gctlCpyFrom"); LogFlowFuncLeaveRC(rc); return rc; } /* static */ int SessionTaskCopyFrom::taskThread(RTTHREAD Thread, void *pvUser) { std::auto_ptr<SessionTaskCopyFrom> task(static_cast<SessionTaskCopyFrom*>(pvUser)); AssertReturn(task.get(), VERR_GENERAL_FAILURE); LogFlowFunc(("pTask=%p\n", task.get())); return task->Run(); } SessionTaskUpdateAdditions::SessionTaskUpdateAdditions(GuestSession *pSession, const Utf8Str &strSource, uint32_t uFlags) : GuestSessionTask(pSession) { mSource = strSource; mFlags = uFlags; } SessionTaskUpdateAdditions::~SessionTaskUpdateAdditions(void) { } int SessionTaskUpdateAdditions::copyFileToGuest(GuestSession *pSession, PRTISOFSFILE pISO, Utf8Str const &strFileSource, const Utf8Str &strFileDest, bool fOptional, uint32_t *pcbSize) { AssertPtrReturn(pSession, VERR_INVALID_POINTER); AssertPtrReturn(pISO, VERR_INVALID_POINTER); /* pcbSize is optional. */ uint32_t cbOffset; size_t cbSize; int rc = RTIsoFsGetFileInfo(pISO, strFileSource.c_str(), &cbOffset, &cbSize); if (RT_FAILURE(rc)) { if (fOptional) return VINF_SUCCESS; return rc; } Assert(cbOffset); Assert(cbSize); rc = RTFileSeek(pISO->file, cbOffset, RTFILE_SEEK_BEGIN, NULL); /* Copy over the Guest Additions file to the guest. */ if (RT_SUCCESS(rc)) { LogFlowThisFunc(("Copying Guest Additions installer file \"%s\" to \"%s\" on guest ...\n", strFileSource.c_str(), strFileDest.c_str())); if (RT_SUCCESS(rc)) { SessionTaskCopyTo *pTask = new SessionTaskCopyTo(pSession /* GuestSession */, &pISO->file, cbOffset, cbSize, strFileDest, CopyFileFlag_None); AssertPtrReturn(pTask, VERR_NO_MEMORY); ComObjPtr<Progress> pProgressCopyTo; rc = pSession->startTaskAsync(Utf8StrFmt(GuestSession::tr("Copying Guest Additions installer file \"%s\" to \"%s\" on guest"), mSource.c_str(), strFileDest.c_str()), pTask, pProgressCopyTo); if (RT_SUCCESS(rc)) { BOOL fCanceled = FALSE; HRESULT hr = pProgressCopyTo->WaitForCompletion(-1); if ( SUCCEEDED(pProgressCopyTo->COMGETTER(Canceled)(&fCanceled)) && fCanceled) { rc = VERR_GENERAL_FAILURE; /* Fudge. */ } else if (FAILED(hr)) { Assert(FAILED(hr)); rc = VERR_GENERAL_FAILURE; /* Fudge. */ } } } } /** @todo Note: Since there is no file locking involved at the moment, there can be modifications * between finished copying, the verification and the actual execution. */ /* Determine where the installer image ended up and if it has the correct size. */ if (RT_SUCCESS(rc)) { LogFlowThisFunc(("Verifying Guest Additions installer file \"%s\" ...\n", strFileDest.c_str())); GuestFsObjData objData; int64_t cbSizeOnGuest; int guestRc; rc = pSession->fileQuerySizeInternal(strFileDest, &cbSizeOnGuest, &guestRc); if ( RT_SUCCESS(rc) && cbSize == (uint64_t)cbSizeOnGuest) { LogFlowThisFunc(("Guest Additions installer file \"%s\" successfully verified\n", strFileDest.c_str())); } else { if (RT_SUCCESS(rc)) /* Size does not match. */ { LogFlowThisFunc(("Size of Guest Additions installer file \"%s\" does not match: %RI64bytes copied, %RU64bytes expected\n", strFileDest.c_str(), cbSizeOnGuest, cbSize)); rc = VERR_BROKEN_PIPE; /** @todo Find a better error. */ } else { switch (rc) { case VERR_GENERAL_FAILURE: /** @todo Special guest control rc needed! */ setProgressErrorMsg(VBOX_E_IPRT_ERROR, GuestProcess::guestErrorToString(guestRc)); break; default: setProgressErrorMsg(VBOX_E_IPRT_ERROR, Utf8StrFmt(GuestSession::tr("Error while querying size for file \"%s\": %Rrc"), strFileDest.c_str(), rc)); break; } } } if (RT_SUCCESS(rc)) { if (pcbSize) *pcbSize = cbSizeOnGuest; } } return rc; } int SessionTaskUpdateAdditions::runFileOnGuest(GuestSession *pSession, GuestProcessStartupInfo &procInfo) { AssertPtrReturn(pSession, VERR_INVALID_POINTER); LogRel(("Running %s ...\n", procInfo.mName.c_str())); LONG exitCode; GuestProcessTool procTool; int guestRc; int vrc = procTool.Init(pSession, procInfo, false /* Async */, &guestRc); if (RT_SUCCESS(vrc)) { if (RT_SUCCESS(guestRc)) vrc = procTool.Wait(GUESTPROCESSTOOL_FLAG_NONE, &guestRc); if (RT_SUCCESS(vrc)) vrc = procTool.TerminatedOk(&exitCode); } if (RT_FAILURE(vrc)) { switch (vrc) { case VERR_NOT_EQUAL: /** @todo Special guest control rc needed! */ setProgressErrorMsg(VBOX_E_IPRT_ERROR, Utf8StrFmt(GuestSession::tr("Running update file \"%s\" on guest terminated with exit code %ld"), procInfo.mCommand.c_str(), exitCode)); break; case VERR_GENERAL_FAILURE: /** @todo Special guest control rc needed! */ setProgressErrorMsg(VBOX_E_IPRT_ERROR, GuestProcess::guestErrorToString(guestRc)); break; case VERR_INVALID_STATE: /** @todo Special guest control rc needed! */ setProgressErrorMsg(VBOX_E_IPRT_ERROR, Utf8StrFmt(GuestSession::tr("Update file \"%s\" reported invalid running state"), procInfo.mCommand.c_str())); break; default: setProgressErrorMsg(VBOX_E_IPRT_ERROR, Utf8StrFmt(GuestSession::tr("Error while running update file \"%s\" on guest: %Rrc"), procInfo.mCommand.c_str(), vrc)); break; } } return vrc; } int SessionTaskUpdateAdditions::Run(void) { LogFlowThisFuncEnter(); ComObjPtr<GuestSession> pSession = mSession; Assert(!pSession.isNull()); AutoCaller autoCaller(pSession); if (FAILED(autoCaller.rc())) return autoCaller.rc(); int rc = setProgress(10); if (RT_FAILURE(rc)) return rc; HRESULT hr = S_OK; LogRel(("Automatic update of Guest Additions started, using \"%s\"\n", mSource.c_str())); ComObjPtr<Guest> pGuest(mSession->getParent()); #if 0 /* * Wait for the guest being ready within 30 seconds. */ AdditionsRunLevelType_T addsRunLevel; uint64_t tsStart = RTTimeSystemMilliTS(); while ( SUCCEEDED(hr = pGuest->COMGETTER(AdditionsRunLevel)(&addsRunLevel)) && ( addsRunLevel != AdditionsRunLevelType_Userland && addsRunLevel != AdditionsRunLevelType_Desktop)) { if ((RTTimeSystemMilliTS() - tsStart) > 30 * 1000) { rc = VERR_TIMEOUT; break; } RTThreadSleep(100); /* Wait a bit. */ } if (FAILED(hr)) rc = VERR_TIMEOUT; if (rc == VERR_TIMEOUT) hr = setProgressErrorMsg(VBOX_E_NOT_SUPPORTED, Utf8StrFmt(GuestSession::tr("Guest Additions were not ready within time, giving up"))); #else /* * For use with the GUI we don't want to wait, just return so that the manual .ISO mounting * can continue. */ AdditionsRunLevelType_T addsRunLevel; if ( FAILED(hr = pGuest->COMGETTER(AdditionsRunLevel)(&addsRunLevel)) || ( addsRunLevel != AdditionsRunLevelType_Userland && addsRunLevel != AdditionsRunLevelType_Desktop)) { if (addsRunLevel == AdditionsRunLevelType_System) hr = setProgressErrorMsg(VBOX_E_NOT_SUPPORTED, Utf8StrFmt(GuestSession::tr("Guest Additions are installed but not fully loaded yet, aborting automatic update"))); else hr = setProgressErrorMsg(VBOX_E_NOT_SUPPORTED, Utf8StrFmt(GuestSession::tr("Guest Additions not installed or ready, aborting automatic update"))); rc = VERR_NOT_SUPPORTED; } #endif if (RT_SUCCESS(rc)) { /* * Determine if we are able to update automatically. This only works * if there are recent Guest Additions installed already. */ Utf8Str strAddsVer; rc = getGuestProperty(pGuest, "/VirtualBox/GuestAdd/Version", strAddsVer); if ( RT_SUCCESS(rc) && RTStrVersionCompare(strAddsVer.c_str(), "4.1") < 0) { hr = setProgressErrorMsg(VBOX_E_NOT_SUPPORTED, Utf8StrFmt(GuestSession::tr("Guest has too old Guest Additions (%s) installed for automatic updating, please update manually"), strAddsVer.c_str())); rc = VERR_NOT_SUPPORTED; } } Utf8Str strOSVer; eOSType osType; if (RT_SUCCESS(rc)) { /* * Determine guest OS type and the required installer image. */ Utf8Str strOSType; rc = getGuestProperty(pGuest, "/VirtualBox/GuestInfo/OS/Product", strOSType); if (RT_SUCCESS(rc)) { if ( strOSType.contains("Microsoft", Utf8Str::CaseInsensitive) || strOSType.contains("Windows", Utf8Str::CaseInsensitive)) { osType = eOSType_Windows; /* * Determine guest OS version. */ rc = getGuestProperty(pGuest, "/VirtualBox/GuestInfo/OS/Release", strOSVer); if (RT_FAILURE(rc)) { hr = setProgressErrorMsg(VBOX_E_NOT_SUPPORTED, Utf8StrFmt(GuestSession::tr("Unable to detected guest OS version, please update manually"))); rc = VERR_NOT_SUPPORTED; } /* Because Windows 2000 + XP and is bitching with WHQL popups even if we have signed drivers we * can't do automated updates here. */ /* Windows XP 64-bit (5.2) is a Windows 2003 Server actually, so skip this here. */ if ( RT_SUCCESS(rc) && ( strOSVer.startsWith("5.0") /* Exclude the build number. */ || strOSVer.startsWith("5.1")) /* Exclude the build number. */ ) { /* If we don't have AdditionsUpdateFlag_WaitForUpdateStartOnly set we can't continue * because the Windows Guest Additions installer will fail because of WHQL popups. If the * flag is set this update routine ends successfully as soon as the installer was started * (and the user has to deal with it in the guest). */ if (!(mFlags & AdditionsUpdateFlag_WaitForUpdateStartOnly)) { hr = setProgressErrorMsg(VBOX_E_NOT_SUPPORTED, Utf8StrFmt(GuestSession::tr("Windows 2000 and XP are not supported for automatic updating due to WHQL interaction, please update manually"))); rc = VERR_NOT_SUPPORTED; } } } else if (strOSType.contains("Solaris", Utf8Str::CaseInsensitive)) { osType = eOSType_Solaris; } else /* Everything else hopefully means Linux :-). */ osType = eOSType_Linux; #if 1 /* Only Windows is supported (and tested) at the moment. */ if (osType != eOSType_Windows) { hr = setProgressErrorMsg(VBOX_E_NOT_SUPPORTED, Utf8StrFmt(GuestSession::tr("Detected guest OS (%s) does not support automatic Guest Additions updating, please update manually"), strOSType.c_str())); rc = VERR_NOT_SUPPORTED; } #endif } } RTISOFSFILE iso; if (RT_SUCCESS(rc)) { /* * Try to open the .ISO file to extract all needed files. */ rc = RTIsoFsOpen(&iso, mSource.c_str()); if (RT_FAILURE(rc)) { hr = setProgressErrorMsg(VBOX_E_IPRT_ERROR, Utf8StrFmt(GuestSession::tr("Unable to open Guest Additions .ISO file \"%s\": %Rrc"), mSource.c_str(), rc)); } else { /* Set default installation directories. */ Utf8Str strUpdateDir = "/tmp/"; if (osType == eOSType_Windows) strUpdateDir = "C:\\Temp\\"; rc = setProgress(5); /* Try looking up the Guest Additions installation directory. */ if (RT_SUCCESS(rc)) { /* Try getting the installed Guest Additions version to know whether we * can install our temporary Guest Addition data into the original installation * directory. * * Because versions prior to 4.2 had bugs wrt spaces in paths we have to choose * a different location then. */ bool fUseInstallDir = false; Utf8Str strAddsVer; rc = getGuestProperty(pGuest, "/VirtualBox/GuestAdd/Version", strAddsVer); if ( RT_SUCCESS(rc) && RTStrVersionCompare(strAddsVer.c_str(), "4.2r80329") > 0) { fUseInstallDir = true; } if (fUseInstallDir) { if (RT_SUCCESS(rc)) rc = getGuestProperty(pGuest, "/VirtualBox/GuestAdd/InstallDir", strUpdateDir); if (RT_SUCCESS(rc)) { if (osType == eOSType_Windows) { strUpdateDir.findReplace('/', '\\'); strUpdateDir.append("\\Update\\"); } else strUpdateDir.append("/update/"); } } } if (RT_SUCCESS(rc)) LogRel(("Guest Additions update directory is: %s\n", strUpdateDir.c_str())); /* Create the installation directory. */ int guestRc; rc = pSession->directoryCreateInternal(strUpdateDir, 755 /* Mode */, DirectoryCreateFlag_Parents, &guestRc); if (RT_FAILURE(rc)) { switch (rc) { case VERR_GENERAL_FAILURE: /** @todo Special guest control rc needed! */ setProgressErrorMsg(VBOX_E_IPRT_ERROR, GuestProcess::guestErrorToString(guestRc)); break; default: setProgressErrorMsg(VBOX_E_IPRT_ERROR, Utf8StrFmt(GuestSession::tr("Error creating installation directory \"%s\" on the guest: %Rrc"), strUpdateDir.c_str(), rc)); break; } } if (RT_SUCCESS(rc)) rc = setProgress(10); if (RT_SUCCESS(rc)) { /* Prepare the file(s) we want to copy over to the guest and * (maybe) want to run. */ switch (osType) { case eOSType_Windows: { /* Do we need to install our certificates? We do this for W2K and up. */ bool fInstallCert = false; /* Only Windows 2000 and up need certificates to be installed. */ if (RTStrVersionCompare(strOSVer.c_str(), "5.0") >= 0) { fInstallCert = true; LogRel(("Certificates for auto updating WHQL drivers will be installed\n")); } else LogRel(("Skipping installation of certificates for WHQL drivers\n")); if (fInstallCert) { /* Our certificate. */ mFiles.push_back(InstallerFile("CERT/ORACLE_VBOX.CER", strUpdateDir + "oracle-vbox.cer", UPDATEFILE_FLAG_COPY_FROM_ISO | UPDATEFILE_FLAG_OPTIONAL)); /* Our certificate installation utility. */ /* First pass: Copy over the file + execute it to remove any existing * VBox certificates. */ GuestProcessStartupInfo siCertUtilRem; siCertUtilRem.mName = "VirtualBox Certificate Utility, removing old VirtualBox certificates"; siCertUtilRem.mArguments.push_back(Utf8Str("remove-trusted-publisher")); siCertUtilRem.mArguments.push_back(Utf8Str("--root")); /* Add root certificate as well. */ siCertUtilRem.mArguments.push_back(Utf8Str(strUpdateDir + "oracle-vbox.cer")); siCertUtilRem.mArguments.push_back(Utf8Str(strUpdateDir + "oracle-vbox.cer")); mFiles.push_back(InstallerFile("CERT/VBOXCERTUTIL.EXE", strUpdateDir + "VBoxCertUtil.exe", UPDATEFILE_FLAG_COPY_FROM_ISO | UPDATEFILE_FLAG_EXECUTE | UPDATEFILE_FLAG_OPTIONAL, siCertUtilRem)); /* Second pass: Only execute (but don't copy) again, this time installng the * recent certificates just copied over. */ GuestProcessStartupInfo siCertUtilAdd; siCertUtilAdd.mName = "VirtualBox Certificate Utility, installing VirtualBox certificates"; siCertUtilAdd.mArguments.push_back(Utf8Str("add-trusted-publisher")); siCertUtilAdd.mArguments.push_back(Utf8Str("--root")); /* Add root certificate as well. */ siCertUtilAdd.mArguments.push_back(Utf8Str(strUpdateDir + "oracle-vbox.cer")); siCertUtilAdd.mArguments.push_back(Utf8Str(strUpdateDir + "oracle-vbox.cer")); mFiles.push_back(InstallerFile("CERT/VBOXCERTUTIL.EXE", strUpdateDir + "VBoxCertUtil.exe", UPDATEFILE_FLAG_EXECUTE | UPDATEFILE_FLAG_OPTIONAL, siCertUtilAdd)); } /* The installers in different flavors, as we don't know (and can't assume) * the guest's bitness. */ mFiles.push_back(InstallerFile("VBOXWINDOWSADDITIONS_X86.EXE", strUpdateDir + "VBoxWindowsAdditions-x86.exe", UPDATEFILE_FLAG_COPY_FROM_ISO)); mFiles.push_back(InstallerFile("VBOXWINDOWSADDITIONS_AMD64.EXE", strUpdateDir + "VBoxWindowsAdditions-amd64.exe", UPDATEFILE_FLAG_COPY_FROM_ISO)); /* The stub loader which decides which flavor to run. */ GuestProcessStartupInfo siInstaller; siInstaller.mName = "VirtualBox Windows Guest Additions Installer"; /* Set a running timeout of 5 minutes -- the Windows Guest Additions * setup can take quite a while, so be on the safe side. */ siInstaller.mTimeoutMS = 5 * 60 * 1000; siInstaller.mArguments.push_back(Utf8Str("/S")); /* We want to install in silent mode. */ siInstaller.mArguments.push_back(Utf8Str("/l")); /* ... and logging enabled. */ /* Don't quit VBoxService during upgrade because it still is used for this * piece of code we're in right now (that is, here!) ... */ siInstaller.mArguments.push_back(Utf8Str("/no_vboxservice_exit")); /* Tell the installer to report its current installation status * using a running VBoxTray instance via balloon messages in the * Windows taskbar. */ siInstaller.mArguments.push_back(Utf8Str("/post_installstatus")); /* If the caller does not want to wait for out guest update process to end, * complete the progress object now so that the caller can do other work. */ if (mFlags & AdditionsUpdateFlag_WaitForUpdateStartOnly) siInstaller.mFlags |= ProcessCreateFlag_WaitForProcessStartOnly; mFiles.push_back(InstallerFile("VBOXWINDOWSADDITIONS.EXE", strUpdateDir + "VBoxWindowsAdditions.exe", UPDATEFILE_FLAG_COPY_FROM_ISO | UPDATEFILE_FLAG_EXECUTE, siInstaller)); break; } case eOSType_Linux: /** @todo Add Linux support. */ break; case eOSType_Solaris: /** @todo Add Solaris support. */ break; default: AssertReleaseMsgFailed(("Unsupported guest type: %d\n", osType)); break; } } if (RT_SUCCESS(rc)) { /* We want to spend 40% total for all copying operations. So roughly * calculate the specific percentage step of each copied file. */ uint8_t uOffset = 20; /* Start at 20%. */ uint8_t uStep = 40 / mFiles.size(); LogRel(("Copying over Guest Additions update files to the guest ...\n")); std::vector<InstallerFile>::const_iterator itFiles = mFiles.begin(); while (itFiles != mFiles.end()) { if (itFiles->fFlags & UPDATEFILE_FLAG_COPY_FROM_ISO) { bool fOptional = false; if (itFiles->fFlags & UPDATEFILE_FLAG_OPTIONAL) fOptional = true; rc = copyFileToGuest(pSession, &iso, itFiles->strSource, itFiles->strDest, fOptional, NULL /* cbSize */); if (RT_FAILURE(rc)) { hr = setProgressErrorMsg(VBOX_E_IPRT_ERROR, Utf8StrFmt(GuestSession::tr("Error while copying file \"%s\" to \"%s\" on the guest: %Rrc"), itFiles->strSource.c_str(), itFiles->strDest.c_str(), rc)); break; } } rc = setProgress(uOffset); if (RT_FAILURE(rc)) break; uOffset += uStep; itFiles++; } } /* Done copying, close .ISO file. */ RTIsoFsClose(&iso); if (RT_SUCCESS(rc)) { /* We want to spend 35% total for all copying operations. So roughly * calculate the specific percentage step of each copied file. */ uint8_t uOffset = 60; /* Start at 60%. */ uint8_t uStep = 35 / mFiles.size(); LogRel(("Executing Guest Additions update files ...\n")); std::vector<InstallerFile>::iterator itFiles = mFiles.begin(); while (itFiles != mFiles.end()) { if (itFiles->fFlags & UPDATEFILE_FLAG_EXECUTE) { rc = runFileOnGuest(pSession, itFiles->mProcInfo); if (RT_FAILURE(rc)) break; } rc = setProgress(uOffset); if (RT_FAILURE(rc)) break; uOffset += uStep; itFiles++; } } if (RT_SUCCESS(rc)) { LogRel(("Automatic update of Guest Additions succeeded\n")); rc = setProgressSuccess(); } } } if (RT_FAILURE(rc)) { if (rc == VERR_CANCELLED) { LogRel(("Automatic update of Guest Additions was canceled\n")); hr = setProgressErrorMsg(VBOX_E_IPRT_ERROR, Utf8StrFmt(GuestSession::tr("Installation was canceled"))); } else { Utf8Str strError = Utf8StrFmt("No further error information available (%Rrc)", rc); if (!mProgress.isNull()) /* Progress object is optional. */ { ComPtr<IVirtualBoxErrorInfo> pError; hr = mProgress->COMGETTER(ErrorInfo)(pError.asOutParam()); Assert(!pError.isNull()); if (SUCCEEDED(hr)) { Bstr strVal; hr = pError->COMGETTER(Text)(strVal.asOutParam()); if ( SUCCEEDED(hr) && strVal.isNotEmpty()) strError = strVal; } } LogRel(("Automatic update of Guest Additions failed: %s\n", strError.c_str())); } LogRel(("Please install Guest Additions manually\n")); } LogFlowFuncLeaveRC(rc); return rc; } int SessionTaskUpdateAdditions::RunAsync(const Utf8Str &strDesc, ComObjPtr<Progress> &pProgress) { LogFlowThisFunc(("strDesc=%s, strSource=%s, uFlags=%x\n", strDesc.c_str(), mSource.c_str(), mFlags)); mDesc = strDesc; mProgress = pProgress; int rc = RTThreadCreate(NULL, SessionTaskUpdateAdditions::taskThread, this, 0, RTTHREADTYPE_MAIN_HEAVY_WORKER, 0, "gctlUpGA"); LogFlowFuncLeaveRC(rc); return rc; } /* static */ int SessionTaskUpdateAdditions::taskThread(RTTHREAD Thread, void *pvUser) { std::auto_ptr<SessionTaskUpdateAdditions> task(static_cast<SessionTaskUpdateAdditions*>(pvUser)); AssertReturn(task.get(), VERR_GENERAL_FAILURE); LogFlowFunc(("pTask=%p\n", task.get())); return task->Run(); }
gpl-2.0
Kittnz/ZeroServer
src/game/OutdoorPvP/OutdoorPvPMgr.cpp
3
3905
/* * This file is part of the CMaNGOS Project. See AUTHORS file for Copyright information * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include "OutdoorPvPMgr.h" #include "Policies/Singleton.h" #include "OutdoorPvP.h" #include "World.h" #include "Log.h" #include "OutdoorPvPEP.h" #include "OutdoorPvPSI.h" INSTANTIATE_SINGLETON_1(OutdoorPvPMgr); OutdoorPvPMgr::OutdoorPvPMgr() { m_updateTimer.SetInterval(TIMER_OPVP_MGR_UPDATE); memset(&m_scripts, 0, sizeof(m_scripts)); } OutdoorPvPMgr::~OutdoorPvPMgr() { for (uint8 i = 0; i < MAX_OPVP_ID; ++i) delete m_scripts[i]; } #define LOAD_OPVP_ZONE(a) \ if (sWorld.getConfig(CONFIG_BOOL_OUTDOORPVP_##a##_ENABLED)) \ { \ m_scripts[OPVP_ID_##a] = new OutdoorPvP##a(); \ ++counter; \ } /** Function which loads all outdoor pvp scripts */ void OutdoorPvPMgr::InitOutdoorPvP() { uint8 counter = 0; LOAD_OPVP_ZONE(SI); LOAD_OPVP_ZONE(EP); sLog.outString(); sLog.outString(">> Loaded %u Outdoor PvP zones", counter); } OutdoorPvP* OutdoorPvPMgr::GetScript(uint32 zoneId) { switch (zoneId) { case ZONE_ID_SILITHUS: return m_scripts[OPVP_ID_SI]; case ZONE_ID_EASTERN_PLAGUELANDS: return m_scripts[OPVP_ID_EP]; default: return NULL; } } OutdoorPvP* OutdoorPvPMgr::GetScriptOfAffectedZone(uint32 zoneId) { switch (zoneId) { case ZONE_ID_TEMPLE_OF_AQ: case ZONE_ID_RUINS_OF_AQ: case ZONE_ID_GATES_OF_AQ: return m_scripts[OPVP_ID_SI]; case ZONE_ID_STRATHOLME: case ZONE_ID_SCHOLOMANCE: return m_scripts[OPVP_ID_EP]; default: return NULL; } } /** Function that handles the players which enters a specific zone @param player to be handled in the event @param zone id used for the current outdoor pvp script */ void OutdoorPvPMgr::HandlePlayerEnterZone(Player* player, uint32 zoneId) { if (OutdoorPvP* script = GetScript(zoneId)) script->HandlePlayerEnterZone(player, true); else if (OutdoorPvP* script = GetScriptOfAffectedZone(zoneId)) script->HandlePlayerEnterZone(player, false); } /** Function that handles the player who leaves a specific zone @param player to be handled in the event @param zone id used for the current outdoor pvp script */ void OutdoorPvPMgr::HandlePlayerLeaveZone(Player* player, uint32 zoneId) { // teleport: called once from Player::CleanupsBeforeDelete, once from Player::UpdateZone if (OutdoorPvP* script = GetScript(zoneId)) script->HandlePlayerLeaveZone(player, true); else if (OutdoorPvP* script = GetScriptOfAffectedZone(zoneId)) script->HandlePlayerLeaveZone(player, false); } void OutdoorPvPMgr::Update(uint32 diff) { m_updateTimer.Update(diff); if (!m_updateTimer.Passed()) return; for (uint8 i = 0; i < MAX_OPVP_ID; ++i) if (m_scripts[i]) m_scripts[i]->Update(m_updateTimer.GetCurrent()); m_updateTimer.Reset(); }
gpl-2.0
flexydial/libpri
pridump.c
3
3688
/* * libpri: An implementation of Primary Rate ISDN * * Written by Mark Spencer <markster@digium.com> * * Copyright (C) 2001-2005, Digium, Inc. * All Rights Reserved. */ /* * See http://www.asterisk.org for more information about * the Asterisk project. Please do not directly contact * any of the maintainers of this project for assistance; * the project provides a web site, mailing lists and IRC * channels for your use. * * This program is free software, distributed under the terms of * the GNU General Public License Version 2 as published by the * Free Software Foundation. See the LICENSE file included with * this program for more details. * * In addition, when this program is distributed with Asterisk in * any form that would qualify as a 'combined work' or as a * 'derivative work' (but not mere aggregation), you can redistribute * and/or modify the combination under the terms of the license * provided with that copy of Asterisk, instead of the license * terms granted here. */ /* * This program tests libpri call reception using a zaptel interface. * Its state machines are setup for RECEIVING CALLS ONLY, so if you * are trying to both place and receive calls you have to a bit more. */ #include <stdio.h> #include <unistd.h> #include <stdlib.h> #include <fcntl.h> #include <errno.h> #include <string.h> #include <sys/ioctl.h> #include <sys/select.h> #include <sys/types.h> #include <dahdi/user.h> #include "libpri.h" #include "pri_q921.h" #include "pri_q931.h" static int pri_open(char *dev) { int dfd; struct dahdi_params p; dfd = open(dev, O_RDWR); if (dfd < 0) { fprintf(stderr, "Failed to open dchannel '%s': %s\n", dev, strerror(errno)); return -1; } if (ioctl(dfd, DAHDI_GET_PARAMS, &p)) { fprintf(stderr, "Unable to get parameters on '%s': %s\n", dev, strerror(errno)); return -1; } if ((p.sigtype != DAHDI_SIG_HDLCRAW) && (p.sigtype != DAHDI_SIG_HDLCFCS)) { fprintf(stderr, "%s is in %d signalling, not FCS HDLC or RAW HDLC mode\n", dev, p.sigtype); return -1; } return dfd; } static void dump_packet(struct pri *pri, char *buf, int len, int txrx) { q921_h *h = (q921_h *)buf; q921_dump(pri, h, len, PRI_DEBUG_ALL, txrx); if (!((h->h.data[0] & Q921_FRAMETYPE_MASK) & 0x3)) { q931_dump(pri, h->h.tei, (q931_h *)(h->i.data), len - 4 - 2 /* FCS */, txrx); } fflush(stdout); fflush(stderr); } static void pri_bridge(int d1, int d2) { char buf[1024]; fd_set fds; int max; int e; int res; for(;;) { FD_ZERO(&fds); FD_SET(d1, &fds); FD_SET(d2, &fds); max = d1; if (max < d2) max = d2; ioctl(d1, DAHDI_GETEVENT, &e); ioctl(d2, DAHDI_GETEVENT, &e); res = select(max + 1, &fds, NULL, NULL, NULL); if (res < 0) { fprintf(stderr, "Select returned %d: %s\n", res, strerror(errno)); continue; }; if (FD_ISSET(d1, &fds)) { /* Copy from d1 to d2 */ res = read(d1, buf, sizeof(buf)); dump_packet((struct pri *)NULL, buf, res, 1); res = write(d2, buf, res); } if (FD_ISSET(d2, &fds)) { /* Copy from d2 to d1 */ res = read(d2, buf, sizeof(buf)); dump_packet((struct pri *)NULL, buf, res, 0); res = write(d1, buf, res); } } } static void my_pri_message(struct pri *pri, char *stuff) { fprintf(stdout, "%s", stuff); } static void my_pri_error(struct pri *pri, char *stuff) { fprintf(stderr, "%s", stuff); } int main(int argc, char *argv[]) { int d1, d2; if (argc < 3) { fprintf(stderr, "Usage: pridump <dev1> <dev2>\n"); exit(1); } pri_set_message(my_pri_message); pri_set_error(my_pri_error); d1 = pri_open(argv[1]); if (d1 < 0) exit(1); d2 = pri_open(argv[2]); if (d2 < 0) exit(1); pri_bridge(d1, d2); return 0; }
gpl-2.0
celebdor/libvirt
src/openvz/openvz_driver.c
3
77778
/* * openvz_driver.c: core driver methods for managing OpenVZ VEs * * Copyright (C) 2010-2015 Red Hat, Inc. * Copyright (C) 2006, 2007 Binary Karma * Copyright (C) 2006 Shuveb Hussain * Copyright (C) 2007 Anoop Joe Cyriac * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library. If not, see * <http://www.gnu.org/licenses/>. * * Authors: * Shuveb Hussain <shuveb@binarykarma.com> * Anoop Joe Cyriac <anoop@binarykarma.com> * */ #include <config.h> #include <sys/types.h> #include <sys/poll.h> #include <limits.h> #include <string.h> #include <stdio.h> #include <stdarg.h> #include <stdlib.h> #include <unistd.h> #include <errno.h> #include <sys/stat.h> #include <fcntl.h> #include <paths.h> #include <pwd.h> #include <sys/wait.h> #include "virerror.h" #include "datatypes.h" #include "openvz_driver.h" #include "openvz_util.h" #include "virbuffer.h" #include "openvz_conf.h" #include "nodeinfo.h" #include "viralloc.h" #include "virfile.h" #include "virtypedparam.h" #include "virlog.h" #include "vircommand.h" #include "viruri.h" #include "virstats.h" #include "virstring.h" #define VIR_FROM_THIS VIR_FROM_OPENVZ VIR_LOG_INIT("openvz.openvz_driver"); #define OPENVZ_MAX_ARG 28 #define CMDBUF_LEN 1488 #define CMDOP_LEN 288 #define OPENVZ_NB_MEM_PARAM 3 static int openvzGetProcessInfo(unsigned long long *cpuTime, int vpsid); static int openvzConnectGetMaxVcpus(virConnectPtr conn, const char *type); static int openvzDomainGetMaxVcpus(virDomainPtr dom); static int openvzDomainSetVcpusInternal(virDomainObjPtr vm, unsigned int nvcpus); static int openvzDomainSetMemoryInternal(virDomainObjPtr vm, unsigned long long memory); static int openvzGetVEStatus(virDomainObjPtr vm, int *status, int *reason); static void openvzDriverLock(struct openvz_driver *driver) { virMutexLock(&driver->lock); } static void openvzDriverUnlock(struct openvz_driver *driver) { virMutexUnlock(&driver->lock); } struct openvz_driver ovz_driver; static int openvzDomainDefPostParse(virDomainDefPtr def, virCapsPtr caps ATTRIBUTE_UNUSED, void *opaque ATTRIBUTE_UNUSED) { /* fill the init path */ if (STREQ(def->os.type, "exe") && !def->os.init) return VIR_STRDUP(def->os.init, "/sbin/init") < 0 ? -1 : 0; return 0; } static int openvzDomainDeviceDefPostParse(virDomainDeviceDefPtr dev, const virDomainDef *def ATTRIBUTE_UNUSED, virCapsPtr caps ATTRIBUTE_UNUSED, void *opaque ATTRIBUTE_UNUSED) { if (dev->type == VIR_DOMAIN_DEVICE_CHR && dev->data.chr->deviceType == VIR_DOMAIN_CHR_DEVICE_TYPE_CONSOLE && dev->data.chr->targetType == VIR_DOMAIN_CHR_CONSOLE_TARGET_TYPE_NONE) dev->data.chr->targetType = VIR_DOMAIN_CHR_CONSOLE_TARGET_TYPE_OPENVZ; /* forbid capabilities mode hostdev in this kind of hypervisor */ if (dev->type == VIR_DOMAIN_DEVICE_HOSTDEV && dev->data.hostdev->mode == VIR_DOMAIN_HOSTDEV_MODE_CAPABILITIES) { virReportError(VIR_ERR_CONFIG_UNSUPPORTED, _("hostdev mode 'capabilities' is not " "supported in %s"), virDomainVirtTypeToString(def->virtType)); return -1; } return 0; } virDomainDefParserConfig openvzDomainDefParserConfig = { .domainPostParseCallback = openvzDomainDefPostParse, .devicesPostParseCallback = openvzDomainDeviceDefPostParse, }; /* generate arguments to create OpenVZ container return -1 - error 0 - OK Caller has to free the cmd */ static virCommandPtr openvzDomainDefineCmd(virDomainDefPtr vmdef) { virCommandPtr cmd = virCommandNewArgList(VZCTL, "--quiet", "create", NULL); if (vmdef == NULL) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("Container is not defined")); virCommandFree(cmd); return NULL; } virCommandAddArgList(cmd, vmdef->name, "--name", vmdef->name, NULL); if (vmdef->nfss == 1 && vmdef->fss[0]->type == VIR_DOMAIN_FS_TYPE_TEMPLATE) { virCommandAddArgList(cmd, "--ostemplate", vmdef->fss[0]->src, NULL); } return cmd; } static int openvzSetInitialConfig(virDomainDefPtr vmdef) { int ret = -1; int vpsid; char * confdir = NULL; virCommandPtr cmd = NULL; if (vmdef->nfss > 1) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("only one filesystem supported")); goto cleanup; } if (vmdef->nfss == 1 && vmdef->fss[0]->type != VIR_DOMAIN_FS_TYPE_TEMPLATE && vmdef->fss[0]->type != VIR_DOMAIN_FS_TYPE_MOUNT) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("filesystem is not of type 'template' or 'mount'")); goto cleanup; } if (vmdef->nfss == 1 && vmdef->fss[0]->type == VIR_DOMAIN_FS_TYPE_MOUNT) { if (virStrToLong_i(vmdef->name, NULL, 10, &vpsid) < 0) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("Could not convert domain name to VEID")); goto cleanup; } if (openvzCopyDefaultConfig(vpsid) < 0) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("Could not copy default config")); goto cleanup; } if (openvzWriteVPSConfigParam(vpsid, "VE_PRIVATE", vmdef->fss[0]->src) < 0) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("Could not set the source dir for the filesystem")); goto cleanup; } } else { cmd = openvzDomainDefineCmd(vmdef); if (virCommandRun(cmd, NULL) < 0) goto cleanup; } ret = 0; cleanup: VIR_FREE(confdir); virCommandFree(cmd); return ret; } static int openvzSetDiskQuota(virDomainDefPtr vmdef, virDomainFSDefPtr fss, bool persist) { int ret = -1; unsigned long long sl, hl; virCommandPtr cmd = virCommandNewArgList(VZCTL, "--quiet", "set", vmdef->name, NULL); if (persist) virCommandAddArg(cmd, "--save"); if (fss->type == VIR_DOMAIN_FS_TYPE_TEMPLATE) { if (fss->space_hard_limit) { hl = VIR_DIV_UP(fss->space_hard_limit, 1024); virCommandAddArg(cmd, "--diskspace"); if (fss->space_soft_limit) { sl = VIR_DIV_UP(fss->space_soft_limit, 1024); virCommandAddArgFormat(cmd, "%lld:%lld", sl, hl); } else { virCommandAddArgFormat(cmd, "%lld", hl); } } else if (fss->space_soft_limit) { virReportError(VIR_ERR_INVALID_ARG, "%s", _("Can't set soft limit without hard limit")); goto cleanup; } if (virCommandRun(cmd, NULL) < 0) goto cleanup; } ret = 0; cleanup: virCommandFree(cmd); return ret; } static char * openvzDomainGetHostname(virDomainPtr dom, unsigned int flags) { char *hostname = NULL; struct openvz_driver *driver = dom->conn->privateData; virDomainObjPtr vm; virCheckFlags(0, NULL); openvzDriverLock(driver); vm = virDomainObjListFindByUUID(driver->domains, dom->uuid); openvzDriverUnlock(driver); if (!vm) { virReportError(VIR_ERR_NO_DOMAIN, "%s", _("no domain with matching uuid")); goto cleanup; } hostname = openvzVEGetStringParam(dom, "hostname"); if (hostname == NULL) goto error; /* vzlist prints an unset hostname as '-' */ if (STREQ(hostname, "-")) { virReportError(VIR_ERR_OPERATION_FAILED, _("Hostname of '%s' is unset"), vm->def->name); goto error; } cleanup: if (vm) virObjectUnlock(vm); return hostname; error: VIR_FREE(hostname); goto cleanup; } static virDomainPtr openvzDomainLookupByID(virConnectPtr conn, int id) { struct openvz_driver *driver = conn->privateData; virDomainObjPtr vm; virDomainPtr dom = NULL; openvzDriverLock(driver); vm = virDomainObjListFindByID(driver->domains, id); openvzDriverUnlock(driver); if (!vm) { virReportError(VIR_ERR_NO_DOMAIN, NULL); goto cleanup; } dom = virGetDomain(conn, vm->def->name, vm->def->uuid); if (dom) dom->id = vm->def->id; cleanup: if (vm) virObjectUnlock(vm); return dom; } static int openvzConnectGetVersion(virConnectPtr conn, unsigned long *version) { struct openvz_driver *driver = conn->privateData; openvzDriverLock(driver); *version = driver->version; openvzDriverUnlock(driver); return 0; } static char *openvzConnectGetHostname(virConnectPtr conn ATTRIBUTE_UNUSED) { return virGetHostname(); } static char *openvzDomainGetOSType(virDomainPtr dom) { struct openvz_driver *driver = dom->conn->privateData; virDomainObjPtr vm; char *ret = NULL; openvzDriverLock(driver); vm = virDomainObjListFindByUUID(driver->domains, dom->uuid); openvzDriverUnlock(driver); if (!vm) { virReportError(VIR_ERR_NO_DOMAIN, NULL); goto cleanup; } ignore_value(VIR_STRDUP(ret, vm->def->os.type)); cleanup: if (vm) virObjectUnlock(vm); return ret; } static virDomainPtr openvzDomainLookupByUUID(virConnectPtr conn, const unsigned char *uuid) { struct openvz_driver *driver = conn->privateData; virDomainObjPtr vm; virDomainPtr dom = NULL; openvzDriverLock(driver); vm = virDomainObjListFindByUUID(driver->domains, uuid); openvzDriverUnlock(driver); if (!vm) { virReportError(VIR_ERR_NO_DOMAIN, NULL); goto cleanup; } dom = virGetDomain(conn, vm->def->name, vm->def->uuid); if (dom) dom->id = vm->def->id; cleanup: if (vm) virObjectUnlock(vm); return dom; } static virDomainPtr openvzDomainLookupByName(virConnectPtr conn, const char *name) { struct openvz_driver *driver = conn->privateData; virDomainObjPtr vm; virDomainPtr dom = NULL; openvzDriverLock(driver); vm = virDomainObjListFindByName(driver->domains, name); openvzDriverUnlock(driver); if (!vm) { virReportError(VIR_ERR_NO_DOMAIN, NULL); goto cleanup; } dom = virGetDomain(conn, vm->def->name, vm->def->uuid); if (dom) dom->id = vm->def->id; cleanup: if (vm) virObjectUnlock(vm); return dom; } static int openvzDomainGetInfo(virDomainPtr dom, virDomainInfoPtr info) { struct openvz_driver *driver = dom->conn->privateData; virDomainObjPtr vm; int state; int ret = -1; openvzDriverLock(driver); vm = virDomainObjListFindByUUID(driver->domains, dom->uuid); openvzDriverUnlock(driver); if (!vm) { virReportError(VIR_ERR_NO_DOMAIN, "%s", _("no domain with matching uuid")); goto cleanup; } if (openvzGetVEStatus(vm, &state, NULL) == -1) goto cleanup; info->state = state; if (info->state != VIR_DOMAIN_RUNNING) { info->cpuTime = 0; } else { if (openvzGetProcessInfo(&(info->cpuTime), dom->id) < 0) { virReportError(VIR_ERR_OPERATION_FAILED, _("cannot read cputime for domain %d"), dom->id); goto cleanup; } } info->maxMem = vm->def->mem.max_balloon; info->memory = vm->def->mem.cur_balloon; info->nrVirtCpu = vm->def->vcpus; ret = 0; cleanup: if (vm) virObjectUnlock(vm); return ret; } static int openvzDomainGetState(virDomainPtr dom, int *state, int *reason, unsigned int flags) { struct openvz_driver *driver = dom->conn->privateData; virDomainObjPtr vm; int ret = -1; virCheckFlags(0, -1); openvzDriverLock(driver); vm = virDomainObjListFindByUUID(driver->domains, dom->uuid); openvzDriverUnlock(driver); if (!vm) { virReportError(VIR_ERR_NO_DOMAIN, "%s", _("no domain with matching uuid")); goto cleanup; } ret = openvzGetVEStatus(vm, state, reason); cleanup: if (vm) virObjectUnlock(vm); return ret; } static int openvzDomainIsActive(virDomainPtr dom) { struct openvz_driver *driver = dom->conn->privateData; virDomainObjPtr obj; int ret = -1; openvzDriverLock(driver); obj = virDomainObjListFindByUUID(driver->domains, dom->uuid); openvzDriverUnlock(driver); if (!obj) { virReportError(VIR_ERR_NO_DOMAIN, NULL); goto cleanup; } ret = virDomainObjIsActive(obj); cleanup: if (obj) virObjectUnlock(obj); return ret; } static int openvzDomainIsPersistent(virDomainPtr dom) { struct openvz_driver *driver = dom->conn->privateData; virDomainObjPtr obj; int ret = -1; openvzDriverLock(driver); obj = virDomainObjListFindByUUID(driver->domains, dom->uuid); openvzDriverUnlock(driver); if (!obj) { virReportError(VIR_ERR_NO_DOMAIN, NULL); goto cleanup; } ret = obj->persistent; cleanup: if (obj) virObjectUnlock(obj); return ret; } static int openvzDomainIsUpdated(virDomainPtr dom ATTRIBUTE_UNUSED) { return 0; } static char *openvzDomainGetXMLDesc(virDomainPtr dom, unsigned int flags) { struct openvz_driver *driver = dom->conn->privateData; virDomainObjPtr vm; char *ret = NULL; /* Flags checked by virDomainDefFormat */ openvzDriverLock(driver); vm = virDomainObjListFindByUUID(driver->domains, dom->uuid); openvzDriverUnlock(driver); if (!vm) { virReportError(VIR_ERR_NO_DOMAIN, "%s", _("no domain with matching uuid")); goto cleanup; } ret = virDomainDefFormat(vm->def, virDomainDefFormatConvertXMLFlags(flags)); cleanup: if (vm) virObjectUnlock(vm); return ret; } /* * Convenient helper to target a command line argv * and fill in an empty slot with the supplied * key value. This lets us declare the argv on the * stack and just splice in the domain name after */ #define PROGRAM_SENTINEL ((char *)0x1) static void openvzSetProgramSentinal(const char **prog, const char *key) { const char **tmp = prog; while (tmp && *tmp) { if (*tmp == PROGRAM_SENTINEL) { *tmp = key; break; } tmp++; } } static int openvzDomainSuspend(virDomainPtr dom) { struct openvz_driver *driver = dom->conn->privateData; virDomainObjPtr vm; const char *prog[] = {VZCTL, "--quiet", "chkpnt", PROGRAM_SENTINEL, "--suspend", NULL}; int ret = -1; openvzDriverLock(driver); vm = virDomainObjListFindByUUID(driver->domains, dom->uuid); openvzDriverUnlock(driver); if (!vm) { virReportError(VIR_ERR_NO_DOMAIN, "%s", _("no domain with matching uuid")); goto cleanup; } if (!virDomainObjIsActive(vm)) { virReportError(VIR_ERR_OPERATION_INVALID, "%s", _("Domain is not running")); goto cleanup; } if (virDomainObjGetState(vm, NULL) != VIR_DOMAIN_PAUSED) { openvzSetProgramSentinal(prog, vm->def->name); if (virRun(prog, NULL) < 0) goto cleanup; virDomainObjSetState(vm, VIR_DOMAIN_PAUSED, VIR_DOMAIN_PAUSED_USER); } ret = 0; cleanup: if (vm) virObjectUnlock(vm); return ret; } static int openvzDomainResume(virDomainPtr dom) { struct openvz_driver *driver = dom->conn->privateData; virDomainObjPtr vm; const char *prog[] = {VZCTL, "--quiet", "chkpnt", PROGRAM_SENTINEL, "--resume", NULL}; int ret = -1; openvzDriverLock(driver); vm = virDomainObjListFindByUUID(driver->domains, dom->uuid); openvzDriverUnlock(driver); if (!vm) { virReportError(VIR_ERR_NO_DOMAIN, "%s", _("no domain with matching uuid")); goto cleanup; } if (!virDomainObjIsActive(vm)) { virReportError(VIR_ERR_OPERATION_INVALID, "%s", _("Domain is not running")); goto cleanup; } if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_PAUSED) { openvzSetProgramSentinal(prog, vm->def->name); if (virRun(prog, NULL) < 0) goto cleanup; virDomainObjSetState(vm, VIR_DOMAIN_RUNNING, VIR_DOMAIN_RUNNING_UNPAUSED); } ret = 0; cleanup: if (vm) virObjectUnlock(vm); return ret; } static int openvzDomainShutdownFlags(virDomainPtr dom, unsigned int flags) { struct openvz_driver *driver = dom->conn->privateData; virDomainObjPtr vm; const char *prog[] = {VZCTL, "--quiet", "stop", PROGRAM_SENTINEL, NULL}; int ret = -1; int status; virCheckFlags(0, -1); openvzDriverLock(driver); vm = virDomainObjListFindByUUID(driver->domains, dom->uuid); openvzDriverUnlock(driver); if (!vm) { virReportError(VIR_ERR_NO_DOMAIN, "%s", _("no domain with matching uuid")); goto cleanup; } if (openvzGetVEStatus(vm, &status, NULL) == -1) goto cleanup; openvzSetProgramSentinal(prog, vm->def->name); if (status != VIR_DOMAIN_RUNNING) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("domain is not in running state")); goto cleanup; } if (virRun(prog, NULL) < 0) goto cleanup; vm->def->id = -1; virDomainObjSetState(vm, VIR_DOMAIN_SHUTOFF, VIR_DOMAIN_SHUTOFF_SHUTDOWN); dom->id = -1; ret = 0; cleanup: if (vm) virObjectUnlock(vm); return ret; } static int openvzDomainShutdown(virDomainPtr dom) { return openvzDomainShutdownFlags(dom, 0); } static int openvzDomainDestroy(virDomainPtr dom) { return openvzDomainShutdownFlags(dom, 0); } static int openvzDomainDestroyFlags(virDomainPtr dom, unsigned int flags) { return openvzDomainShutdownFlags(dom, flags); } static int openvzDomainReboot(virDomainPtr dom, unsigned int flags) { struct openvz_driver *driver = dom->conn->privateData; virDomainObjPtr vm; const char *prog[] = {VZCTL, "--quiet", "restart", PROGRAM_SENTINEL, NULL}; int ret = -1; int status; virCheckFlags(0, -1); openvzDriverLock(driver); vm = virDomainObjListFindByUUID(driver->domains, dom->uuid); openvzDriverUnlock(driver); if (!vm) { virReportError(VIR_ERR_NO_DOMAIN, "%s", _("no domain with matching uuid")); goto cleanup; } if (openvzGetVEStatus(vm, &status, NULL) == -1) goto cleanup; openvzSetProgramSentinal(prog, vm->def->name); if (status != VIR_DOMAIN_RUNNING) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("domain is not in running state")); goto cleanup; } if (virRun(prog, NULL) < 0) goto cleanup; ret = 0; virDomainObjSetState(vm, VIR_DOMAIN_RUNNING, VIR_DOMAIN_RUNNING_BOOTED); cleanup: if (vm) virObjectUnlock(vm); return ret; } static char * openvzGenerateVethName(int veid, char *dev_name_ve) { int ifNo = 0; char *ret; if (sscanf(dev_name_ve, "%*[^0-9]%d", &ifNo) != 1) return NULL; ignore_value(virAsprintf(&ret, "veth%d.%d.", veid, ifNo)); return ret; } static char * openvzGenerateContainerVethName(int veid) { char *temp = NULL; char *name = NULL; /* try to get line "^NETIF=..." from config */ if (openvzReadVPSConfigParam(veid, "NETIF", &temp) <= 0) { ignore_value(VIR_STRDUP(name, "eth0")); } else { char *saveptr = NULL; char *s; int max = 0; /* get maximum interface number (actually, it is the last one) */ for (s = strtok_r(temp, ";", &saveptr); s; s = strtok_r(NULL, ";", &saveptr)) { int x; if (sscanf(s, "ifname=eth%d", &x) != 1) return NULL; if (x > max) max = x; } /* set new name */ ignore_value(virAsprintf(&name, "eth%d", max + 1)); } VIR_FREE(temp); return name; } static int openvzDomainSetNetwork(virConnectPtr conn, const char *vpsid, virDomainNetDefPtr net, virBufferPtr configBuf) { int rc = -1; char macaddr[VIR_MAC_STRING_BUFLEN]; virMacAddr host_mac; char host_macaddr[VIR_MAC_STRING_BUFLEN]; struct openvz_driver *driver = conn->privateData; virCommandPtr cmd = NULL; char *guest_ifname = NULL; if (net == NULL) return 0; if (vpsid == NULL) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("Container ID is not specified")); return -1; } if (net->type != VIR_DOMAIN_NET_TYPE_BRIDGE && net->type != VIR_DOMAIN_NET_TYPE_ETHERNET) return 0; cmd = virCommandNewArgList(VZCTL, "--quiet", "set", vpsid, NULL); virMacAddrFormat(&net->mac, macaddr); virDomainNetGenerateMAC(driver->xmlopt, &host_mac); virMacAddrFormat(&host_mac, host_macaddr); if (net->type == VIR_DOMAIN_NET_TYPE_BRIDGE || (net->type == VIR_DOMAIN_NET_TYPE_ETHERNET && net->nips == 0)) { virBuffer buf = VIR_BUFFER_INITIALIZER; int veid = openvzGetVEID(vpsid); /* if net is ethernet and the user has specified guest interface name, * let's use it; otherwise generate a new one */ if (net->type == VIR_DOMAIN_NET_TYPE_ETHERNET && net->data.ethernet.dev != NULL) { if (VIR_STRDUP(guest_ifname, net->data.ethernet.dev) == -1) goto cleanup; } else { guest_ifname = openvzGenerateContainerVethName(veid); if (guest_ifname == NULL) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("Could not generate eth name for container")); goto cleanup; } } /* if user doesn't specified host interface name, * than we need to generate it */ if (net->ifname == NULL) { net->ifname = openvzGenerateVethName(veid, guest_ifname); if (net->ifname == NULL) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("Could not generate veth name")); goto cleanup; } } virBufferAdd(&buf, guest_ifname, -1); /* Guest dev */ virBufferAsprintf(&buf, ",%s", macaddr); /* Guest dev mac */ virBufferAsprintf(&buf, ",%s", net->ifname); /* Host dev */ virBufferAsprintf(&buf, ",%s", host_macaddr); /* Host dev mac */ if (net->type == VIR_DOMAIN_NET_TYPE_BRIDGE) { if (driver->version >= VZCTL_BRIDGE_MIN_VERSION) { virBufferAsprintf(&buf, ",%s", net->data.bridge.brname); /* Host bridge */ } else { virBufferAsprintf(configBuf, "ifname=%s", guest_ifname); virBufferAsprintf(configBuf, ",mac=%s", macaddr); /* Guest dev mac */ virBufferAsprintf(configBuf, ",host_ifname=%s", net->ifname); /* Host dev */ virBufferAsprintf(configBuf, ",host_mac=%s", host_macaddr); /* Host dev mac */ virBufferAsprintf(configBuf, ",bridge=%s", net->data.bridge.brname); /* Host bridge */ } } /* --netif_add ifname[,mac,host_ifname,host_mac] */ virCommandAddArg(cmd, "--netif_add"); virCommandAddArgBuffer(cmd, &buf); } else if (net->type == VIR_DOMAIN_NET_TYPE_ETHERNET && net->nips > 0) { size_t i; /* --ipadd ip */ for (i = 0; i < net->nips; i++) { char *ipStr = virSocketAddrFormat(&net->ips[i]->address); if (!ipStr) goto cleanup; virCommandAddArgList(cmd, "--ipadd", ipStr, NULL); VIR_FREE(ipStr); } } /* TODO: processing NAT and physical device */ virCommandAddArg(cmd, "--save"); rc = virCommandRun(cmd, NULL); cleanup: virCommandFree(cmd); VIR_FREE(guest_ifname); return rc; } static int openvzDomainSetNetworkConfig(virConnectPtr conn, virDomainDefPtr def) { size_t i; virBuffer buf = VIR_BUFFER_INITIALIZER; char *param; int first = 1; struct openvz_driver *driver = conn->privateData; for (i = 0; i < def->nnets; i++) { if (driver->version < VZCTL_BRIDGE_MIN_VERSION && def->nets[i]->type == VIR_DOMAIN_NET_TYPE_BRIDGE) { if (first) first = 0; else virBufferAddLit(&buf, ";"); } if (openvzDomainSetNetwork(conn, def->name, def->nets[i], &buf) < 0) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("Could not configure network")); goto exit; } } if (driver->version < VZCTL_BRIDGE_MIN_VERSION && def->nnets) { param = virBufferContentAndReset(&buf); if (param) { if (openvzWriteVPSConfigParam(strtoI(def->name), "NETIF", param) < 0) { VIR_FREE(param); virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("cannot replace NETIF config")); return -1; } VIR_FREE(param); } } return 0; exit: virBufferFreeAndReset(&buf); return -1; } static virDomainPtr openvzDomainDefineXMLFlags(virConnectPtr conn, const char *xml, unsigned int flags) { struct openvz_driver *driver = conn->privateData; virDomainDefPtr vmdef = NULL; virDomainObjPtr vm = NULL; virDomainPtr dom = NULL; unsigned int parse_flags = VIR_DOMAIN_DEF_PARSE_INACTIVE; virCheckFlags(VIR_DOMAIN_DEFINE_VALIDATE, NULL); if (flags & VIR_DOMAIN_DEFINE_VALIDATE) parse_flags |= VIR_DOMAIN_DEF_PARSE_VALIDATE; openvzDriverLock(driver); if ((vmdef = virDomainDefParseString(xml, driver->caps, driver->xmlopt, 1 << VIR_DOMAIN_VIRT_OPENVZ, parse_flags)) == NULL) goto cleanup; vm = virDomainObjListFindByName(driver->domains, vmdef->name); if (vm) { virReportError(VIR_ERR_OPERATION_FAILED, _("Already an OPENVZ VM active with the id '%s'"), vmdef->name); goto cleanup; } if (!(vm = virDomainObjListAdd(driver->domains, vmdef, driver->xmlopt, 0, NULL))) goto cleanup; vmdef = NULL; vm->persistent = 1; if (openvzSetInitialConfig(vm->def) < 0) { VIR_ERROR(_("Error creating initial configuration")); goto cleanup; } if (vm->def->nfss == 1) { if (openvzSetDiskQuota(vm->def, vm->def->fss[0], true) < 0) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("Could not set disk quota")); goto cleanup; } } if (openvzSetDefinedUUID(strtoI(vm->def->name), vm->def->uuid) < 0) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("Could not set UUID")); goto cleanup; } if (openvzDomainSetNetworkConfig(conn, vm->def) < 0) goto cleanup; if (vm->def->vcpus != vm->def->maxvcpus) { virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s", _("current vcpu count must equal maximum")); goto cleanup; } if (vm->def->maxvcpus > 0) { if (openvzDomainSetVcpusInternal(vm, vm->def->maxvcpus) < 0) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("Could not set number of vCPUs")); goto cleanup; } } if (vm->def->mem.cur_balloon > 0) { if (openvzDomainSetMemoryInternal(vm, vm->def->mem.cur_balloon) < 0) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("Could not set memory size")); goto cleanup; } } dom = virGetDomain(conn, vm->def->name, vm->def->uuid); if (dom) dom->id = -1; cleanup: virDomainDefFree(vmdef); if (vm) virObjectUnlock(vm); openvzDriverUnlock(driver); return dom; } static virDomainPtr openvzDomainDefineXML(virConnectPtr conn, const char *xml) { return openvzDomainDefineXMLFlags(conn, xml, 0); } static virDomainPtr openvzDomainCreateXML(virConnectPtr conn, const char *xml, unsigned int flags) { struct openvz_driver *driver = conn->privateData; virDomainDefPtr vmdef = NULL; virDomainObjPtr vm = NULL; virDomainPtr dom = NULL; const char *progstart[] = {VZCTL, "--quiet", "start", PROGRAM_SENTINEL, NULL}; unsigned int parse_flags = VIR_DOMAIN_DEF_PARSE_INACTIVE; virCheckFlags(VIR_DOMAIN_START_VALIDATE, NULL); if (flags & VIR_DOMAIN_START_VALIDATE) parse_flags |= VIR_DOMAIN_DEF_PARSE_VALIDATE; openvzDriverLock(driver); if ((vmdef = virDomainDefParseString(xml, driver->caps, driver->xmlopt, 1 << VIR_DOMAIN_VIRT_OPENVZ, parse_flags)) == NULL) goto cleanup; vm = virDomainObjListFindByName(driver->domains, vmdef->name); if (vm) { virReportError(VIR_ERR_OPERATION_FAILED, _("Already an OPENVZ VM defined with the id '%s'"), vmdef->name); goto cleanup; } if (!(vm = virDomainObjListAdd(driver->domains, vmdef, driver->xmlopt, VIR_DOMAIN_OBJ_LIST_ADD_CHECK_LIVE, NULL))) goto cleanup; vmdef = NULL; /* All OpenVZ domains seem to be persistent - this is a bit of a violation * of this libvirt API which is intended for transient domain creation */ vm->persistent = 1; if (openvzSetInitialConfig(vm->def) < 0) { VIR_ERROR(_("Error creating initial configuration")); goto cleanup; } if (vm->def->nfss == 1) { if (openvzSetDiskQuota(vm->def, vm->def->fss[0], true) < 0) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("Could not set disk quota")); goto cleanup; } } if (openvzSetDefinedUUID(strtoI(vm->def->name), vm->def->uuid) < 0) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("Could not set UUID")); goto cleanup; } if (openvzDomainSetNetworkConfig(conn, vm->def) < 0) goto cleanup; openvzSetProgramSentinal(progstart, vm->def->name); if (virRun(progstart, NULL) < 0) goto cleanup; vm->pid = strtoI(vm->def->name); vm->def->id = vm->pid; virDomainObjSetState(vm, VIR_DOMAIN_RUNNING, VIR_DOMAIN_RUNNING_BOOTED); if (vm->def->maxvcpus > 0) { if (openvzDomainSetVcpusInternal(vm, vm->def->maxvcpus) < 0) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("Could not set number of vCPUs")); goto cleanup; } } dom = virGetDomain(conn, vm->def->name, vm->def->uuid); if (dom) dom->id = vm->def->id; cleanup: virDomainDefFree(vmdef); if (vm) virObjectUnlock(vm); openvzDriverUnlock(driver); return dom; } static int openvzDomainCreateWithFlags(virDomainPtr dom, unsigned int flags) { struct openvz_driver *driver = dom->conn->privateData; virDomainObjPtr vm; const char *prog[] = {VZCTL, "--quiet", "start", PROGRAM_SENTINEL, NULL }; int ret = -1; int status; virCheckFlags(0, -1); openvzDriverLock(driver); vm = virDomainObjListFindByName(driver->domains, dom->name); openvzDriverUnlock(driver); if (!vm) { virReportError(VIR_ERR_NO_DOMAIN, "%s", _("no domain with matching id")); goto cleanup; } if (openvzGetVEStatus(vm, &status, NULL) == -1) goto cleanup; if (status != VIR_DOMAIN_SHUTOFF) { virReportError(VIR_ERR_OPERATION_DENIED, "%s", _("domain is not in shutoff state")); goto cleanup; } openvzSetProgramSentinal(prog, vm->def->name); if (virRun(prog, NULL) < 0) goto cleanup; vm->pid = strtoI(vm->def->name); vm->def->id = vm->pid; dom->id = vm->pid; virDomainObjSetState(vm, VIR_DOMAIN_RUNNING, VIR_DOMAIN_RUNNING_BOOTED); ret = 0; cleanup: if (vm) virObjectUnlock(vm); return ret; } static int openvzDomainCreate(virDomainPtr dom) { return openvzDomainCreateWithFlags(dom, 0); } static int openvzDomainUndefineFlags(virDomainPtr dom, unsigned int flags) { struct openvz_driver *driver = dom->conn->privateData; virDomainObjPtr vm; const char *prog[] = { VZCTL, "--quiet", "destroy", PROGRAM_SENTINEL, NULL }; int ret = -1; int status; virCheckFlags(0, -1); openvzDriverLock(driver); vm = virDomainObjListFindByUUID(driver->domains, dom->uuid); if (!vm) { virReportError(VIR_ERR_NO_DOMAIN, "%s", _("no domain with matching uuid")); goto cleanup; } if (openvzGetVEStatus(vm, &status, NULL) == -1) goto cleanup; openvzSetProgramSentinal(prog, vm->def->name); if (virRun(prog, NULL) < 0) goto cleanup; if (virDomainObjIsActive(vm)) { vm->persistent = 0; } else { virDomainObjListRemove(driver->domains, vm); vm = NULL; } ret = 0; cleanup: if (vm) virObjectUnlock(vm); openvzDriverUnlock(driver); return ret; } static int openvzDomainUndefine(virDomainPtr dom) { return openvzDomainUndefineFlags(dom, 0); } static int openvzDomainSetAutostart(virDomainPtr dom, int autostart) { struct openvz_driver *driver = dom->conn->privateData; virDomainObjPtr vm; const char *prog[] = { VZCTL, "--quiet", "set", PROGRAM_SENTINEL, "--onboot", autostart ? "yes" : "no", "--save", NULL }; int ret = -1; openvzDriverLock(driver); vm = virDomainObjListFindByUUID(driver->domains, dom->uuid); openvzDriverUnlock(driver); if (!vm) { virReportError(VIR_ERR_NO_DOMAIN, "%s", _("no domain with matching uuid")); goto cleanup; } openvzSetProgramSentinal(prog, vm->def->name); if (virRun(prog, NULL) < 0) goto cleanup; ret = 0; cleanup: if (vm) virObjectUnlock(vm); return ret; } static int openvzDomainGetAutostart(virDomainPtr dom, int *autostart) { struct openvz_driver *driver = dom->conn->privateData; virDomainObjPtr vm; char *value = NULL; int ret = -1; openvzDriverLock(driver); vm = virDomainObjListFindByUUID(driver->domains, dom->uuid); openvzDriverUnlock(driver); if (!vm) { virReportError(VIR_ERR_NO_DOMAIN, "%s", _("no domain with matching uuid")); goto cleanup; } if (openvzReadVPSConfigParam(strtoI(vm->def->name), "ONBOOT", &value) < 0) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("Could not read container config")); goto cleanup; } *autostart = 0; if (STREQ(value, "yes")) *autostart = 1; ret = 0; cleanup: VIR_FREE(value); if (vm) virObjectUnlock(vm); return ret; } static int openvzConnectGetMaxVcpus(virConnectPtr conn ATTRIBUTE_UNUSED, const char *type) { if (type == NULL || STRCASEEQ(type, "openvz")) return 1028; /* OpenVZ has no limitation */ virReportError(VIR_ERR_INVALID_ARG, _("unknown type '%s'"), type); return -1; } static int openvzDomainGetVcpusFlags(virDomainPtr dom ATTRIBUTE_UNUSED, unsigned int flags) { if (flags != (VIR_DOMAIN_AFFECT_LIVE | VIR_DOMAIN_VCPU_MAXIMUM)) { virReportError(VIR_ERR_INVALID_ARG, _("unsupported flags (0x%x)"), flags); return -1; } return openvzConnectGetMaxVcpus(NULL, "openvz"); } static int openvzDomainGetMaxVcpus(virDomainPtr dom) { return openvzDomainGetVcpusFlags(dom, (VIR_DOMAIN_AFFECT_LIVE | VIR_DOMAIN_VCPU_MAXIMUM)); } static int openvzDomainSetVcpusInternal(virDomainObjPtr vm, unsigned int nvcpus) { char str_vcpus[32]; const char *prog[] = { VZCTL, "--quiet", "set", PROGRAM_SENTINEL, "--cpus", str_vcpus, "--save", NULL }; unsigned int pcpus; pcpus = openvzGetNodeCPUs(); if (pcpus > 0 && pcpus < nvcpus) nvcpus = pcpus; snprintf(str_vcpus, 31, "%d", nvcpus); str_vcpus[31] = '\0'; openvzSetProgramSentinal(prog, vm->def->name); if (virRun(prog, NULL) < 0) return -1; vm->def->maxvcpus = vm->def->vcpus = nvcpus; return 0; } static int openvzDomainSetVcpusFlags(virDomainPtr dom, unsigned int nvcpus, unsigned int flags) { virDomainObjPtr vm; struct openvz_driver *driver = dom->conn->privateData; int ret = -1; if (flags != VIR_DOMAIN_AFFECT_LIVE) { virReportError(VIR_ERR_INVALID_ARG, _("unsupported flags (0x%x)"), flags); return -1; } openvzDriverLock(driver); vm = virDomainObjListFindByUUID(driver->domains, dom->uuid); openvzDriverUnlock(driver); if (!vm) { virReportError(VIR_ERR_NO_DOMAIN, "%s", _("no domain with matching uuid")); goto cleanup; } if (nvcpus <= 0) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("Number of vCPUs should be >= 1")); goto cleanup; } if (openvzDomainSetVcpusInternal(vm, nvcpus) < 0) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("Could not set number of vCPUs")); goto cleanup; } ret = 0; cleanup: if (vm) virObjectUnlock(vm); return ret; } static int openvzDomainSetVcpus(virDomainPtr dom, unsigned int nvcpus) { return openvzDomainSetVcpusFlags(dom, nvcpus, VIR_DOMAIN_AFFECT_LIVE); } static virDrvOpenStatus openvzConnectOpen(virConnectPtr conn, virConnectAuthPtr auth ATTRIBUTE_UNUSED, unsigned int flags) { struct openvz_driver *driver; virCheckFlags(VIR_CONNECT_RO, VIR_DRV_OPEN_ERROR); if (conn->uri == NULL) { if (!virFileExists("/proc/vz")) return VIR_DRV_OPEN_DECLINED; if (access("/proc/vz", W_OK) < 0) return VIR_DRV_OPEN_DECLINED; if (!(conn->uri = virURIParse("openvz:///system"))) return VIR_DRV_OPEN_ERROR; } else { /* If scheme isn't 'openvz', then its for another driver */ if (conn->uri->scheme == NULL || STRNEQ(conn->uri->scheme, "openvz")) return VIR_DRV_OPEN_DECLINED; /* If server name is given, its for remote driver */ if (conn->uri->server != NULL) return VIR_DRV_OPEN_DECLINED; /* If path isn't /system, then they typoed, so tell them correct path */ if (conn->uri->path == NULL || STRNEQ(conn->uri->path, "/system")) { virReportError(VIR_ERR_INTERNAL_ERROR, _("unexpected OpenVZ URI path '%s', try openvz:///system"), conn->uri->path); return VIR_DRV_OPEN_ERROR; } if (!virFileExists("/proc/vz")) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("OpenVZ control file /proc/vz does not exist")); return VIR_DRV_OPEN_ERROR; } if (access("/proc/vz", W_OK) < 0) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("OpenVZ control file /proc/vz is not accessible")); return VIR_DRV_OPEN_ERROR; } } /* We now know the URI is definitely for this driver, so beyond * here, don't return DECLINED, always use ERROR */ if (VIR_ALLOC(driver) < 0) return VIR_DRV_OPEN_ERROR; if (!(driver->domains = virDomainObjListNew())) goto cleanup; if (!(driver->caps = openvzCapsInit())) goto cleanup; if (!(driver->xmlopt = virDomainXMLOptionNew(&openvzDomainDefParserConfig, NULL, NULL))) goto cleanup; if (openvzLoadDomains(driver) < 0) goto cleanup; if (openvzExtractVersion(driver) < 0) goto cleanup; conn->privateData = driver; return VIR_DRV_OPEN_SUCCESS; cleanup: openvzFreeDriver(driver); return VIR_DRV_OPEN_ERROR; }; static int openvzConnectClose(virConnectPtr conn) { struct openvz_driver *driver = conn->privateData; openvzFreeDriver(driver); conn->privateData = NULL; return 0; } static const char *openvzConnectGetType(virConnectPtr conn ATTRIBUTE_UNUSED) { return "OpenVZ"; } static int openvzConnectIsEncrypted(virConnectPtr conn ATTRIBUTE_UNUSED) { /* Encryption is not relevant / applicable to way we talk to openvz */ return 0; } static int openvzConnectIsSecure(virConnectPtr conn ATTRIBUTE_UNUSED) { /* We run CLI tools directly so this is secure */ return 1; } static int openvzConnectIsAlive(virConnectPtr conn ATTRIBUTE_UNUSED) { return 1; } static char *openvzConnectGetCapabilities(virConnectPtr conn) { struct openvz_driver *driver = conn->privateData; char *ret; openvzDriverLock(driver); ret = virCapabilitiesFormatXML(driver->caps); openvzDriverUnlock(driver); return ret; } static int openvzConnectListDomains(virConnectPtr conn ATTRIBUTE_UNUSED, int *ids, int nids) { int got = 0; int veid; int outfd = -1; int rc = -1; int ret; char buf[32]; char *endptr; virCommandPtr cmd = virCommandNewArgList(VZLIST, "-ovpsid", "-H", NULL); virCommandSetOutputFD(cmd, &outfd); if (virCommandRunAsync(cmd, NULL) < 0) goto cleanup; while (got < nids) { ret = openvz_readline(outfd, buf, 32); if (!ret) break; if (virStrToLong_i(buf, &endptr, 10, &veid) < 0) { virReportError(VIR_ERR_INTERNAL_ERROR, _("Could not parse VPS ID %s"), buf); continue; } ids[got] = veid; got ++; } if (virCommandWait(cmd, NULL) < 0) goto cleanup; if (VIR_CLOSE(outfd) < 0) { virReportSystemError(errno, "%s", _("failed to close file")); goto cleanup; } rc = got; cleanup: VIR_FORCE_CLOSE(outfd); virCommandFree(cmd); return rc; } static int openvzConnectNumOfDomains(virConnectPtr conn) { struct openvz_driver *driver = conn->privateData; int n; openvzDriverLock(driver); n = virDomainObjListNumOfDomains(driver->domains, true, NULL, NULL); openvzDriverUnlock(driver); return n; } static int openvzConnectListDefinedDomains(virConnectPtr conn ATTRIBUTE_UNUSED, char **const names, int nnames) { int got = 0; int veid, outfd = -1, ret; int rc = -1; char vpsname[32]; char buf[32]; char *endptr; virCommandPtr cmd = virCommandNewArgList(VZLIST, "-ovpsid", "-H", "-S", NULL); /* the -S options lists only stopped domains */ virCommandSetOutputFD(cmd, &outfd); if (virCommandRunAsync(cmd, NULL) < 0) goto out; while (got < nnames) { ret = openvz_readline(outfd, buf, 32); if (!ret) break; if (virStrToLong_i(buf, &endptr, 10, &veid) < 0) { virReportError(VIR_ERR_INTERNAL_ERROR, _("Could not parse VPS ID %s"), buf); continue; } snprintf(vpsname, sizeof(vpsname), "%d", veid); if (VIR_STRDUP(names[got], vpsname) < 0) goto out; got ++; } if (virCommandWait(cmd, NULL) < 0) goto out; if (VIR_CLOSE(outfd) < 0) { virReportSystemError(errno, "%s", _("failed to close file")); goto out; } rc = got; out: VIR_FORCE_CLOSE(outfd); virCommandFree(cmd); if (rc < 0) { for (; got >= 0; got--) VIR_FREE(names[got]); } return rc; } static int openvzGetProcessInfo(unsigned long long *cpuTime, int vpsid) { FILE *fp; char *line = NULL; size_t line_size = 0; unsigned long long usertime, systime, nicetime; int readvps = vpsid + 1; /* ensure readvps is initially different */ ssize_t ret; int err = 0; /* read statistic from /proc/vz/vestat. sample: Version: 2.2 VEID user nice system uptime idle other.. 33 78 0 1330 59454597 142650441835148 other.. 55 178 0 5340 59424597 542650441835148 other.. */ if ((fp = fopen("/proc/vz/vestat", "r")) == NULL) return -1; /*search line with VEID=vpsid*/ while (1) { ret = getline(&line, &line_size, fp); if (ret < 0) { err = !feof(fp); break; } if (sscanf(line, "%d %llu %llu %llu", &readvps, &usertime, &nicetime, &systime) == 4 && readvps == vpsid) { /*found vpsid*/ /* convert jiffies to nanoseconds */ *cpuTime = (1000ull * 1000ull * 1000ull * (usertime + nicetime + systime) / (unsigned long long)sysconf(_SC_CLK_TCK)); break; } } VIR_FREE(line); VIR_FORCE_FCLOSE(fp); if (err) return -1; if (readvps != vpsid) /*not found*/ return -1; return 0; } static int openvzConnectNumOfDefinedDomains(virConnectPtr conn) { struct openvz_driver *driver = conn->privateData; int n; openvzDriverLock(driver); n = virDomainObjListNumOfDomains(driver->domains, false, NULL, NULL); openvzDriverUnlock(driver); return n; } static int openvzDomainSetMemoryInternal(virDomainObjPtr vm, unsigned long long mem) { char str_mem[16]; const char *prog[] = { VZCTL, "--quiet", "set", PROGRAM_SENTINEL, "--kmemsize", str_mem, "--save", NULL }; /* memory has to be changed its format from kbyte to byte */ snprintf(str_mem, sizeof(str_mem), "%llu", mem * 1024); openvzSetProgramSentinal(prog, vm->def->name); if (virRun(prog, NULL) < 0) goto cleanup; return 0; cleanup: return -1; } static int openvzDomainGetBarrierLimit(virDomainPtr domain, const char *param, unsigned long long *barrier, unsigned long long *limit) { int ret = -1; char *endp, *output = NULL; const char *tmp; virCommandPtr cmd = virCommandNewArgList(VZLIST, "--no-header", NULL); virCommandSetOutputBuffer(cmd, &output); virCommandAddArgFormat(cmd, "-o%s.b,%s.l", param, param); virCommandAddArg(cmd, domain->name); if (virCommandRun(cmd, NULL) < 0) goto cleanup; tmp = output; virSkipSpaces(&tmp); if (virStrToLong_ull(tmp, &endp, 10, barrier) < 0) { virReportError(VIR_ERR_INTERNAL_ERROR, _("Can't parse limit from "VZLIST" output '%s'"), output); goto cleanup; } tmp = endp; virSkipSpaces(&tmp); if (virStrToLong_ull(tmp, &endp, 10, limit) < 0) { virReportError(VIR_ERR_INTERNAL_ERROR, _("Can't parse barrier from "VZLIST" output '%s'"), output); goto cleanup; } ret = 0; cleanup: VIR_FREE(output); virCommandFree(cmd); return ret; } static int openvzDomainSetBarrierLimit(virDomainPtr domain, const char *param, unsigned long long barrier, unsigned long long limit) { int ret = -1; virCommandPtr cmd = virCommandNewArgList(VZCTL, "--quiet", "set", NULL); /* LONG_MAX indicates unlimited so reject larger values */ if (barrier > LONG_MAX || limit > LONG_MAX) { virReportError(VIR_ERR_OPERATION_FAILED, _("Failed to set %s for %s: value too large"), param, domain->name); goto cleanup; } virCommandAddArg(cmd, domain->name); virCommandAddArgFormat(cmd, "--%s", param); virCommandAddArgFormat(cmd, "%llu:%llu", barrier, limit); virCommandAddArg(cmd, "--save"); if (virCommandRun(cmd, NULL) < 0) goto cleanup; ret = 0; cleanup: virCommandFree(cmd); return ret; } static int openvzDomainGetMemoryParameters(virDomainPtr domain, virTypedParameterPtr params, int *nparams, unsigned int flags) { size_t i; int result = -1; const char *name; long kb_per_pages; unsigned long long barrier, limit, val; virCheckFlags(0, -1); kb_per_pages = openvzKBPerPages(); if (kb_per_pages < 0) goto cleanup; if (*nparams == 0) { *nparams = OPENVZ_NB_MEM_PARAM; return 0; } for (i = 0; i <= *nparams; i++) { virMemoryParameterPtr param = &params[i]; switch (i) { case 0: name = "privvmpages"; if (openvzDomainGetBarrierLimit(domain, name, &barrier, &limit) < 0) goto cleanup; val = (limit == LONG_MAX) ? 0ull : limit * kb_per_pages; if (virTypedParameterAssign(param, VIR_DOMAIN_MEMORY_HARD_LIMIT, VIR_TYPED_PARAM_ULLONG, val) < 0) goto cleanup; break; case 1: name = "privvmpages"; if (openvzDomainGetBarrierLimit(domain, name, &barrier, &limit) < 0) goto cleanup; val = (barrier == LONG_MAX) ? 0ull : barrier * kb_per_pages; if (virTypedParameterAssign(param, VIR_DOMAIN_MEMORY_SOFT_LIMIT, VIR_TYPED_PARAM_ULLONG, val) < 0) goto cleanup; break; case 2: name = "vmguarpages"; if (openvzDomainGetBarrierLimit(domain, name, &barrier, &limit) < 0) goto cleanup; val = (barrier == LONG_MAX) ? 0ull : barrier * kb_per_pages; if (virTypedParameterAssign(param, VIR_DOMAIN_MEMORY_MIN_GUARANTEE, VIR_TYPED_PARAM_ULLONG, val) < 0) goto cleanup; break; } } if (*nparams > OPENVZ_NB_MEM_PARAM) *nparams = OPENVZ_NB_MEM_PARAM; result = 0; cleanup: return result; } static int openvzDomainSetMemoryParameters(virDomainPtr domain, virTypedParameterPtr params, int nparams, unsigned int flags) { size_t i; int result = -1; long kb_per_pages; kb_per_pages = openvzKBPerPages(); if (kb_per_pages < 0) goto cleanup; virCheckFlags(0, -1); if (virTypedParamsValidate(params, nparams, VIR_DOMAIN_MEMORY_HARD_LIMIT, VIR_TYPED_PARAM_ULLONG, VIR_DOMAIN_MEMORY_SOFT_LIMIT, VIR_TYPED_PARAM_ULLONG, VIR_DOMAIN_MEMORY_MIN_GUARANTEE, VIR_TYPED_PARAM_ULLONG, NULL) < 0) return -1; for (i = 0; i < nparams; i++) { virTypedParameterPtr param = &params[i]; unsigned long long barrier, limit; if (STREQ(param->field, VIR_DOMAIN_MEMORY_HARD_LIMIT)) { if (openvzDomainGetBarrierLimit(domain, "privvmpages", &barrier, &limit) < 0) goto cleanup; limit = params[i].value.ul / kb_per_pages; if (openvzDomainSetBarrierLimit(domain, "privvmpages", barrier, limit) < 0) goto cleanup; } else if (STREQ(param->field, VIR_DOMAIN_MEMORY_SOFT_LIMIT)) { if (openvzDomainGetBarrierLimit(domain, "privvmpages", &barrier, &limit) < 0) goto cleanup; barrier = params[i].value.ul / kb_per_pages; if (openvzDomainSetBarrierLimit(domain, "privvmpages", barrier, limit) < 0) goto cleanup; } else if (STREQ(param->field, VIR_DOMAIN_MEMORY_MIN_GUARANTEE)) { barrier = params[i].value.ul / kb_per_pages; if (openvzDomainSetBarrierLimit(domain, "vmguarpages", barrier, LONG_MAX) < 0) goto cleanup; } } result = 0; cleanup: return result; } static int openvzGetVEStatus(virDomainObjPtr vm, int *status, int *reason) { virCommandPtr cmd; char *outbuf; char *line; int state; int ret = -1; cmd = virCommandNewArgList(VZLIST, vm->def->name, "-ostatus", "-H", NULL); virCommandSetOutputBuffer(cmd, &outbuf); if (virCommandRun(cmd, NULL) < 0) goto cleanup; if ((line = strchr(outbuf, '\n')) == NULL) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("Failed to parse vzlist output")); goto cleanup; } *line++ = '\0'; state = virDomainObjGetState(vm, reason); if (STREQ(outbuf, "running")) { /* There is no way to detect whether a domain is paused or not * with vzlist */ if (state == VIR_DOMAIN_PAUSED) *status = state; else *status = VIR_DOMAIN_RUNNING; } else { *status = VIR_DOMAIN_SHUTOFF; } ret = 0; cleanup: virCommandFree(cmd); VIR_FREE(outbuf); return ret; } static int openvzDomainInterfaceStats(virDomainPtr dom, const char *path, virDomainInterfaceStatsPtr stats) { struct openvz_driver *driver = dom->conn->privateData; virDomainObjPtr vm; size_t i; int ret = -1; openvzDriverLock(driver); vm = virDomainObjListFindByUUID(driver->domains, dom->uuid); openvzDriverUnlock(driver); if (!vm) { char uuidstr[VIR_UUID_STRING_BUFLEN]; virUUIDFormat(dom->uuid, uuidstr); virReportError(VIR_ERR_NO_DOMAIN, _("no domain with matching uuid '%s'"), uuidstr); goto cleanup; } if (!virDomainObjIsActive(vm)) { virReportError(VIR_ERR_OPERATION_INVALID, "%s", _("domain is not running")); goto cleanup; } /* Check the path is one of the domain's network interfaces. */ for (i = 0; i < vm->def->nnets; i++) { if (vm->def->nets[i]->ifname && STREQ(vm->def->nets[i]->ifname, path)) { ret = 0; break; } } if (ret == 0) ret = virNetInterfaceStats(path, stats); else virReportError(VIR_ERR_INVALID_ARG, _("invalid path, '%s' is not a known interface"), path); cleanup: if (vm) virObjectUnlock(vm); return ret; } static int openvzUpdateDevice(virDomainDefPtr vmdef, virDomainDeviceDefPtr dev, bool persist) { virDomainFSDefPtr fs, cur; int pos; if (dev->type == VIR_DOMAIN_DEVICE_FS) { fs = dev->data.fs; pos = virDomainFSIndexByName(vmdef, fs->dst); if (pos < 0) { virReportError(VIR_ERR_INVALID_ARG, _("target %s doesn't exist."), fs->dst); return -1; } cur = vmdef->fss[pos]; /* We only allow updating the quota */ if (!STREQ(cur->src, fs->src) || cur->type != fs->type || cur->accessmode != fs->accessmode || cur->wrpolicy != fs->wrpolicy || cur->readonly != fs->readonly) { virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s", _("Can only modify disk quota")); return -1; } if (openvzSetDiskQuota(vmdef, fs, persist) < 0) return -1; cur->space_hard_limit = fs->space_hard_limit; cur->space_soft_limit = fs->space_soft_limit; } else { virReportError(VIR_ERR_CONFIG_UNSUPPORTED, _("Can't modify device type '%s'"), virDomainDeviceTypeToString(dev->type)); return -1; } return 0; } static int openvzDomainUpdateDeviceFlags(virDomainPtr dom, const char *xml, unsigned int flags) { int ret = -1; int veid; struct openvz_driver *driver = dom->conn->privateData; virDomainDeviceDefPtr dev = NULL; virDomainObjPtr vm = NULL; virDomainDefPtr vmdef = NULL; bool persist = false; virCheckFlags(VIR_DOMAIN_DEVICE_MODIFY_LIVE | VIR_DOMAIN_DEVICE_MODIFY_CONFIG, -1); openvzDriverLock(driver); vm = virDomainObjListFindByUUID(driver->domains, dom->uuid); if (!vm) { virReportError(VIR_ERR_NO_DOMAIN, "%s", _("no domain with matching uuid")); goto cleanup; } vmdef = vm->def; if (virStrToLong_i(vmdef->name, NULL, 10, &veid) < 0) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("Could not convert domain name to VEID")); goto cleanup; } if (virDomainLiveConfigHelperMethod(driver->caps, driver->xmlopt, vm, &flags, &vmdef) < 0) goto cleanup; dev = virDomainDeviceDefParse(xml, vmdef, driver->caps, driver->xmlopt, VIR_DOMAIN_DEF_PARSE_INACTIVE); if (!dev) goto cleanup; if (flags & VIR_DOMAIN_AFFECT_CONFIG) persist = true; if (openvzUpdateDevice(vmdef, dev, persist) < 0) goto cleanup; ret = 0; cleanup: openvzDriverUnlock(driver); virDomainDeviceDefFree(dev); if (vm) virObjectUnlock(vm); return ret; } static int openvzConnectListAllDomains(virConnectPtr conn, virDomainPtr **domains, unsigned int flags) { struct openvz_driver *driver = conn->privateData; int ret = -1; virCheckFlags(VIR_CONNECT_LIST_DOMAINS_FILTERS_ALL, -1); openvzDriverLock(driver); ret = virDomainObjListExport(driver->domains, conn, domains, NULL, flags); openvzDriverUnlock(driver); return ret; } static int openvzNodeGetInfo(virConnectPtr conn ATTRIBUTE_UNUSED, virNodeInfoPtr nodeinfo) { return nodeGetInfo(nodeinfo); } static int openvzNodeGetCPUStats(virConnectPtr conn ATTRIBUTE_UNUSED, int cpuNum, virNodeCPUStatsPtr params, int *nparams, unsigned int flags) { return nodeGetCPUStats(cpuNum, params, nparams, flags); } static int openvzNodeGetMemoryStats(virConnectPtr conn ATTRIBUTE_UNUSED, int cellNum, virNodeMemoryStatsPtr params, int *nparams, unsigned int flags) { return nodeGetMemoryStats(cellNum, params, nparams, flags); } static int openvzNodeGetCellsFreeMemory(virConnectPtr conn ATTRIBUTE_UNUSED, unsigned long long *freeMems, int startCell, int maxCells) { return nodeGetCellsFreeMemory(freeMems, startCell, maxCells); } static unsigned long long openvzNodeGetFreeMemory(virConnectPtr conn ATTRIBUTE_UNUSED) { unsigned long long freeMem; if (nodeGetMemory(NULL, &freeMem) < 0) return 0; return freeMem; } static int openvzNodeGetCPUMap(virConnectPtr conn ATTRIBUTE_UNUSED, unsigned char **cpumap, unsigned int *online, unsigned int flags) { return nodeGetCPUMap(cpumap, online, flags); } static int openvzConnectSupportsFeature(virConnectPtr conn ATTRIBUTE_UNUSED, int feature) { switch (feature) { case VIR_DRV_FEATURE_MIGRATION_PARAMS: case VIR_DRV_FEATURE_MIGRATION_V3: return 1; default: return 0; } } static char * openvzDomainMigrateBegin3Params(virDomainPtr domain, virTypedParameterPtr params, int nparams, char **cookieout ATTRIBUTE_UNUSED, int *cookieoutlen ATTRIBUTE_UNUSED, unsigned int flags) { virDomainObjPtr vm = NULL; struct openvz_driver *driver = domain->conn->privateData; char *xml = NULL; int status; virCheckFlags(OPENVZ_MIGRATION_FLAGS, NULL); if (virTypedParamsValidate(params, nparams, OPENVZ_MIGRATION_PARAMETERS) < 0) return NULL; openvzDriverLock(driver); vm = virDomainObjListFindByUUID(driver->domains, domain->uuid); openvzDriverUnlock(driver); if (!vm) { virReportError(VIR_ERR_NO_DOMAIN, "%s", _("no domain with matching uuid")); goto cleanup; } if (!virDomainObjIsActive(vm)) { virReportError(VIR_ERR_OPERATION_INVALID, "%s", _("domain is not running")); goto cleanup; } if (openvzGetVEStatus(vm, &status, NULL) == -1) goto cleanup; if (status != VIR_DOMAIN_RUNNING) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("domain is not in running state")); goto cleanup; } xml = virDomainDefFormat(vm->def, VIR_DOMAIN_DEF_FORMAT_SECURE); cleanup: if (vm) virObjectUnlock(vm); return xml; } static int openvzDomainMigratePrepare3Params(virConnectPtr dconn, virTypedParameterPtr params, int nparams, const char *cookiein ATTRIBUTE_UNUSED, int cookieinlen ATTRIBUTE_UNUSED, char **cookieout ATTRIBUTE_UNUSED, int *cookieoutlen ATTRIBUTE_UNUSED, char **uri_out, unsigned int fflags ATTRIBUTE_UNUSED) { struct openvz_driver *driver = dconn->privateData; const char *dom_xml = NULL; const char *uri_in = NULL; virDomainDefPtr def = NULL; virDomainObjPtr vm = NULL; char *my_hostname = NULL; const char *hostname = NULL; virURIPtr uri = NULL; int ret = -1; if (virTypedParamsValidate(params, nparams, OPENVZ_MIGRATION_PARAMETERS) < 0) goto error; if (virTypedParamsGetString(params, nparams, VIR_MIGRATE_PARAM_DEST_XML, &dom_xml) < 0 || virTypedParamsGetString(params, nparams, VIR_MIGRATE_PARAM_URI, &uri_in) < 0) goto error; if (!dom_xml) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("no domain XML passed")); goto error; } if (!(def = virDomainDefParseString(dom_xml, driver->caps, driver->xmlopt, 1 << VIR_DOMAIN_VIRT_OPENVZ, VIR_DOMAIN_DEF_PARSE_INACTIVE))) goto error; if (!(vm = virDomainObjListAdd(driver->domains, def, driver->xmlopt, VIR_DOMAIN_OBJ_LIST_ADD_LIVE | VIR_DOMAIN_OBJ_LIST_ADD_CHECK_LIVE, NULL))) goto error; def = NULL; if (!uri_in) { if ((my_hostname = virGetHostname()) == NULL) goto error; if (STRPREFIX(my_hostname, "localhost")) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("hostname on destination resolved to localhost," " but migration requires an FQDN")); goto error; } } else { uri = virURIParse(uri_in); if (uri == NULL) { virReportError(VIR_ERR_INVALID_ARG, _("unable to parse URI: %s"), uri_in); goto error; } if (uri->server == NULL) { virReportError(VIR_ERR_INVALID_ARG, _("missing host in migration URI: %s"), uri_in); goto error; } else { hostname = uri->server; } } if (virAsprintf(uri_out, "ssh://%s", hostname) < 0) goto error; ret = 0; goto done; error: virDomainDefFree(def); if (vm) { virDomainObjListRemove(driver->domains, vm); vm = NULL; } done: VIR_FREE(my_hostname); virURIFree(uri); if (vm) virObjectUnlock(vm); return ret; } static int openvzDomainMigratePerform3Params(virDomainPtr domain, const char *dconnuri ATTRIBUTE_UNUSED, virTypedParameterPtr params, int nparams, const char *cookiein ATTRIBUTE_UNUSED, int cookieinlen ATTRIBUTE_UNUSED, char **cookieout ATTRIBUTE_UNUSED, int *cookieoutlen ATTRIBUTE_UNUSED, unsigned int flags) { struct openvz_driver *driver = domain->conn->privateData; virDomainObjPtr vm = NULL; const char *uri_str = NULL; virURIPtr uri = NULL; virCommandPtr cmd = NULL; int ret = -1; virCheckFlags(OPENVZ_MIGRATION_FLAGS, -1); if (virTypedParamsValidate(params, nparams, OPENVZ_MIGRATION_PARAMETERS) < 0) goto cleanup; if (virTypedParamsGetString(params, nparams, VIR_MIGRATE_PARAM_URI, &uri_str) < 0) goto cleanup; openvzDriverLock(driver); vm = virDomainObjListFindByUUID(driver->domains, domain->uuid); openvzDriverUnlock(driver); if (!vm) { virReportError(VIR_ERR_NO_DOMAIN, "%s", _("no domain with matching uuid")); goto cleanup; } /* parse dst host:port from uri */ uri = virURIParse(uri_str); if (uri == NULL || uri->server == NULL) goto cleanup; cmd = virCommandNew(VZMIGRATE); if (flags & VIR_MIGRATE_LIVE) virCommandAddArg(cmd, "--live"); virCommandAddArg(cmd, uri->server); virCommandAddArg(cmd, vm->def->name); if (virCommandRun(cmd, NULL) < 0) goto cleanup; ret = 0; cleanup: virCommandFree(cmd); virURIFree(uri); if (vm) virObjectUnlock(vm); return ret; } static virDomainPtr openvzDomainMigrateFinish3Params(virConnectPtr dconn, virTypedParameterPtr params, int nparams, const char *cookiein ATTRIBUTE_UNUSED, int cookieinlen ATTRIBUTE_UNUSED, char **cookieout ATTRIBUTE_UNUSED, int *cookieoutlen ATTRIBUTE_UNUSED, unsigned int flags, int cancelled) { struct openvz_driver *driver = dconn->privateData; virDomainObjPtr vm = NULL; const char *dname = NULL; virDomainPtr dom = NULL; int status; if (cancelled) goto cleanup; virCheckFlags(OPENVZ_MIGRATION_FLAGS, NULL); if (virTypedParamsValidate(params, nparams, OPENVZ_MIGRATION_PARAMETERS) < 0) goto cleanup; if (virTypedParamsGetString(params, nparams, VIR_MIGRATE_PARAM_DEST_NAME, &dname) < 0) goto cleanup; if (!dname || !(vm = virDomainObjListFindByName(driver->domains, dname))) { /* Migration obviously failed if the domain doesn't exist */ virReportError(VIR_ERR_OPERATION_FAILED, _("Migration failed. No domain on destination host " "with matching name '%s'"), NULLSTR(dname)); goto cleanup; } if (openvzGetVEStatus(vm, &status, NULL) == -1) goto cleanup; if (status != VIR_DOMAIN_RUNNING) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("domain is not running on destination host")); goto cleanup; } vm->def->id = strtoI(vm->def->name); virDomainObjSetState(vm, VIR_DOMAIN_RUNNING, VIR_DOMAIN_RUNNING_MIGRATED); dom = virGetDomain(dconn, vm->def->name, vm->def->uuid); if (dom) dom->id = vm->def->id; cleanup: if (vm) virObjectUnlock(vm); return dom; } static int openvzDomainMigrateConfirm3Params(virDomainPtr domain, virTypedParameterPtr params, int nparams, const char *cookiein ATTRIBUTE_UNUSED, int cookieinlen ATTRIBUTE_UNUSED, unsigned int flags, int cancelled) { struct openvz_driver *driver = domain->conn->privateData; virDomainObjPtr vm = NULL; int status; int ret = -1; virCheckFlags(OPENVZ_MIGRATION_FLAGS, -1); if (virTypedParamsValidate(params, nparams, OPENVZ_MIGRATION_PARAMETERS) < 0) goto cleanup; openvzDriverLock(driver); vm = virDomainObjListFindByUUID(driver->domains, domain->uuid); openvzDriverUnlock(driver); if (!vm) { virReportError(VIR_ERR_NO_DOMAIN, "%s", _("no domain with matching uuid")); goto cleanup; } if (cancelled) { if (openvzGetVEStatus(vm, &status, NULL) == -1) goto cleanup; if (status == VIR_DOMAIN_RUNNING) { ret = 0; } else { VIR_DEBUG("Domain '%s' does not recover after failed migration", vm->def->name); } goto cleanup; } vm->def->id = -1; VIR_DEBUG("Domain '%s' successfully migrated", vm->def->name); virDomainObjListRemove(driver->domains, vm); vm = NULL; ret = 0; cleanup: if (vm) virObjectUnlock(vm); return ret; } static int openvzDomainHasManagedSaveImage(virDomainPtr dom, unsigned int flags) { struct openvz_driver *driver = dom->conn->privateData; virDomainObjPtr obj; int ret = -1; virCheckFlags(0, -1); openvzDriverLock(driver); obj = virDomainObjListFindByUUID(driver->domains, dom->uuid); openvzDriverUnlock(driver); if (!obj) { virReportError(VIR_ERR_NO_DOMAIN, NULL); goto cleanup; } ret = 0; cleanup: if (obj) virObjectUnlock(obj); return ret; } static virHypervisorDriver openvzHypervisorDriver = { .name = "OPENVZ", .connectOpen = openvzConnectOpen, /* 0.3.1 */ .connectClose = openvzConnectClose, /* 0.3.1 */ .connectGetType = openvzConnectGetType, /* 0.3.1 */ .connectGetVersion = openvzConnectGetVersion, /* 0.5.0 */ .connectGetHostname = openvzConnectGetHostname, /* 0.9.12 */ .connectGetMaxVcpus = openvzConnectGetMaxVcpus, /* 0.4.6 */ .nodeGetInfo = openvzNodeGetInfo, /* 0.3.2 */ .nodeGetCPUStats = openvzNodeGetCPUStats, /* 0.9.12 */ .nodeGetMemoryStats = openvzNodeGetMemoryStats, /* 0.9.12 */ .nodeGetCellsFreeMemory = openvzNodeGetCellsFreeMemory, /* 0.9.12 */ .nodeGetFreeMemory = openvzNodeGetFreeMemory, /* 0.9.12 */ .nodeGetCPUMap = openvzNodeGetCPUMap, /* 1.0.0 */ .connectGetCapabilities = openvzConnectGetCapabilities, /* 0.4.6 */ .connectListDomains = openvzConnectListDomains, /* 0.3.1 */ .connectNumOfDomains = openvzConnectNumOfDomains, /* 0.3.1 */ .connectListAllDomains = openvzConnectListAllDomains, /* 0.9.13 */ .domainCreateXML = openvzDomainCreateXML, /* 0.3.3 */ .domainLookupByID = openvzDomainLookupByID, /* 0.3.1 */ .domainLookupByUUID = openvzDomainLookupByUUID, /* 0.3.1 */ .domainLookupByName = openvzDomainLookupByName, /* 0.3.1 */ .domainSuspend = openvzDomainSuspend, /* 0.8.3 */ .domainResume = openvzDomainResume, /* 0.8.3 */ .domainShutdown = openvzDomainShutdown, /* 0.3.1 */ .domainShutdownFlags = openvzDomainShutdownFlags, /* 0.9.10 */ .domainReboot = openvzDomainReboot, /* 0.3.1 */ .domainDestroy = openvzDomainDestroy, /* 0.3.1 */ .domainDestroyFlags = openvzDomainDestroyFlags, /* 0.9.4 */ .domainGetOSType = openvzDomainGetOSType, /* 0.3.1 */ .domainGetMemoryParameters = openvzDomainGetMemoryParameters, /* 0.9.12 */ .domainSetMemoryParameters = openvzDomainSetMemoryParameters, /* 0.9.12 */ .domainGetInfo = openvzDomainGetInfo, /* 0.3.1 */ .domainGetState = openvzDomainGetState, /* 0.9.2 */ .domainSetVcpus = openvzDomainSetVcpus, /* 0.4.6 */ .domainSetVcpusFlags = openvzDomainSetVcpusFlags, /* 0.8.5 */ .domainGetVcpusFlags = openvzDomainGetVcpusFlags, /* 0.8.5 */ .domainGetMaxVcpus = openvzDomainGetMaxVcpus, /* 0.4.6 */ .domainGetXMLDesc = openvzDomainGetXMLDesc, /* 0.4.6 */ .connectListDefinedDomains = openvzConnectListDefinedDomains, /* 0.3.1 */ .connectNumOfDefinedDomains = openvzConnectNumOfDefinedDomains, /* 0.3.1 */ .domainCreate = openvzDomainCreate, /* 0.3.1 */ .domainCreateWithFlags = openvzDomainCreateWithFlags, /* 0.8.2 */ .domainDefineXML = openvzDomainDefineXML, /* 0.3.3 */ .domainDefineXMLFlags = openvzDomainDefineXMLFlags, /* 1.2.12 */ .domainUndefine = openvzDomainUndefine, /* 0.3.3 */ .domainUndefineFlags = openvzDomainUndefineFlags, /* 0.9.4 */ .domainGetAutostart = openvzDomainGetAutostart, /* 0.4.6 */ .domainSetAutostart = openvzDomainSetAutostart, /* 0.4.6 */ .domainInterfaceStats = openvzDomainInterfaceStats, /* 0.9.12 */ .connectIsEncrypted = openvzConnectIsEncrypted, /* 0.7.3 */ .connectIsSecure = openvzConnectIsSecure, /* 0.7.3 */ .domainIsActive = openvzDomainIsActive, /* 0.7.3 */ .domainIsPersistent = openvzDomainIsPersistent, /* 0.7.3 */ .domainIsUpdated = openvzDomainIsUpdated, /* 0.8.6 */ .connectIsAlive = openvzConnectIsAlive, /* 0.9.8 */ .domainUpdateDeviceFlags = openvzDomainUpdateDeviceFlags, /* 0.9.13 */ .domainGetHostname = openvzDomainGetHostname, /* 0.10.0 */ .connectSupportsFeature = openvzConnectSupportsFeature, /* 1.2.8 */ .domainMigrateBegin3Params = openvzDomainMigrateBegin3Params, /* 1.2.8 */ .domainMigratePrepare3Params = openvzDomainMigratePrepare3Params, /* 1.2.8 */ .domainMigratePerform3Params = openvzDomainMigratePerform3Params, /* 1.2.8 */ .domainMigrateFinish3Params = openvzDomainMigrateFinish3Params, /* 1.2.8 */ .domainMigrateConfirm3Params = openvzDomainMigrateConfirm3Params, /* 1.2.8 */ .domainHasManagedSaveImage = openvzDomainHasManagedSaveImage, /* 1.2.13 */ }; static virConnectDriver openvzConnectDriver = { .hypervisorDriver = &openvzHypervisorDriver, }; int openvzRegister(void) { return virRegisterConnectDriver(&openvzConnectDriver, false); }
gpl-2.0
Elite-Kernels/HTC-10
drivers/char/diag/diagfwd_peripheral.c
3
29102
/* Copyright (c) 2015, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/slab.h> #include <linux/err.h> #include <linux/sched.h> #include <linux/ratelimit.h> #include <linux/workqueue.h> #include <linux/diagchar.h> #include <linux/of.h> #include <linux/kmemleak.h> #include <linux/delay.h> #include <linux/atomic.h> #include "diagchar.h" #include "diagchar_hdlc.h" #include "diagfwd_peripheral.h" #include "diagfwd_cntl.h" #include "diag_masks.h" #include "diag_dci.h" #include "diagfwd.h" #include "diagfwd_smd.h" #include "diagfwd_socket.h" #include "diag_mux.h" #include "diag_ipc_logging.h" #include <linux/htc_flags.h> int diag_initialized; struct data_header { uint8_t control_char; uint8_t version; uint16_t length; }; static struct diagfwd_info *early_init_info[NUM_TRANSPORT]; static void diagfwd_queue_read(struct diagfwd_info *fwd_info); static void diagfwd_buffers_exit(struct diagfwd_info *fwd_info); static void diagfwd_cntl_open(struct diagfwd_info *fwd_info); static void diagfwd_cntl_close(struct diagfwd_info *fwd_info); static void diagfwd_dci_open(struct diagfwd_info *fwd_info); static void diagfwd_dci_close(struct diagfwd_info *fwd_info); static void diagfwd_data_read_done(struct diagfwd_info *fwd_info, unsigned char *buf, int len); static void diagfwd_cntl_read_done(struct diagfwd_info *fwd_info, unsigned char *buf, int len); static void diagfwd_dci_read_done(struct diagfwd_info *fwd_info, unsigned char *buf, int len); struct diagfwd_info peripheral_info[NUM_TYPES][NUM_PERIPHERALS]; static struct diag_channel_ops data_ch_ops = { .open = NULL, .close = NULL, .read_done = diagfwd_data_read_done }; static struct diag_channel_ops cntl_ch_ops = { .open = diagfwd_cntl_open, .close = diagfwd_cntl_close, .read_done = diagfwd_cntl_read_done }; static struct diag_channel_ops dci_ch_ops = { .open = diagfwd_dci_open, .close = diagfwd_dci_close, .read_done = diagfwd_dci_read_done }; static void diagfwd_cntl_open(struct diagfwd_info *fwd_info) { if (!fwd_info) return; diag_cntl_channel_open(fwd_info); } static void diagfwd_cntl_close(struct diagfwd_info *fwd_info) { if (!fwd_info) return; diag_cntl_channel_close(fwd_info); } static void diagfwd_dci_open(struct diagfwd_info *fwd_info) { if (!fwd_info) return; diag_dci_notify_client(PERIPHERAL_MASK(fwd_info->peripheral), DIAG_STATUS_OPEN, DCI_LOCAL_PROC); } static void diagfwd_dci_close(struct diagfwd_info *fwd_info) { if (!fwd_info) return; diag_dci_notify_client(PERIPHERAL_MASK(fwd_info->peripheral), DIAG_STATUS_CLOSED, DCI_LOCAL_PROC); } static int diag_add_hdlc_encoding(unsigned char *dest_buf, int *dest_len, unsigned char *buf, int len) { struct diag_send_desc_type send = { NULL, NULL, DIAG_STATE_START, 0 }; struct diag_hdlc_dest_type enc = { NULL, NULL, 0 }; struct data_header *header; int header_size = sizeof(struct data_header); uint8_t *end_control_char = NULL; uint8_t *payload = NULL; uint8_t *temp_buf = NULL; uint8_t *temp_encode_buf = NULL; int src_pkt_len; int encoded_pkt_length; int max_size; int total_processed = 0; int bytes_remaining; int err = 0; uint8_t loop_count = 0; if (!dest_buf || !dest_len || !buf) return -EIO; temp_buf = buf; temp_encode_buf = dest_buf; bytes_remaining = *dest_len; while (total_processed < len) { loop_count++; header = (struct data_header *)temp_buf; if (header->control_char != CONTROL_CHAR || header->version != 1) { err = -EINVAL; break; } if (header->length >= bytes_remaining) break; payload = temp_buf + header_size; end_control_char = payload + header->length; if (*end_control_char != CONTROL_CHAR) { err = -EINVAL; break; } max_size = 2 * header->length + 3; if (bytes_remaining < max_size) { err = -EINVAL; break; } send.state = DIAG_STATE_START; send.pkt = payload; send.last = (void *)(payload + header->length - 1); send.terminate = 1; enc.dest = temp_encode_buf; enc.dest_last = (void *)(temp_encode_buf + max_size); enc.crc = 0; diag_hdlc_encode(&send, &enc); src_pkt_len = (header_size + header->length + 1); total_processed += src_pkt_len; temp_buf += src_pkt_len; encoded_pkt_length = (uint8_t *)enc.dest - temp_encode_buf; bytes_remaining -= encoded_pkt_length; temp_encode_buf = enc.dest; } *dest_len = (int)(temp_encode_buf - dest_buf); return err; } static int check_bufsize_for_encoding(struct diagfwd_buf_t *buf, uint32_t len) { uint32_t max_size = 0; unsigned char *temp_buf = NULL; if (!buf || len == 0) return -EINVAL; max_size = (2 * len) + 3; if (max_size > PERIPHERAL_BUF_SZ) { if (max_size > MAX_PERIPHERAL_HDLC_BUF_SZ) { pr_err("diag: In %s, max_size is going beyond limit %d\n", __func__, max_size); max_size = MAX_PERIPHERAL_HDLC_BUF_SZ; } if (buf->len < max_size) { temp_buf = krealloc(buf->data, max_size + APF_DIAG_PADDING, GFP_KERNEL); if (!temp_buf) return -ENOMEM; buf->data = temp_buf; buf->len = max_size; } } return buf->len; } static void diagfwd_data_read_done(struct diagfwd_info *fwd_info, unsigned char *buf, int len) { int err = 0; int write_len = 0; unsigned char *write_buf = NULL; struct diagfwd_buf_t *temp_buf = NULL; int ret = 0; struct diag_md_session_t *session_info = NULL; uint8_t hdlc_disabled = 0; if (!fwd_info || !buf || len <= 0) { diag_ws_release(); return; } switch (fwd_info->type) { case TYPE_DATA: case TYPE_CMD: break; default: pr_err_ratelimited("diag: In %s, invalid type %d for peripheral %d\n", __func__, fwd_info->type, fwd_info->peripheral); diag_ws_release(); return; } mutex_lock(&driver->hdlc_disable_mutex); mutex_lock(&fwd_info->data_mutex); session_info = diag_md_session_get_peripheral(fwd_info->peripheral); if (session_info) hdlc_disabled = session_info->hdlc_disabled; else hdlc_disabled = driver->hdlc_disabled; if (!driver->feature[fwd_info->peripheral].encode_hdlc) { if (fwd_info->buf_1 && fwd_info->buf_1->data == buf) { temp_buf = fwd_info->buf_1; write_buf = fwd_info->buf_1->data; } else if (fwd_info->buf_2 && fwd_info->buf_2->data == buf) { temp_buf = fwd_info->buf_2; write_buf = fwd_info->buf_2->data; } else { pr_err("diag: In %s, no match for buffer %p, peripheral %d, type: %d\n", __func__, buf, fwd_info->peripheral, fwd_info->type); goto end; } write_len = len; } else if (hdlc_disabled) { if (fwd_info->buf_1 && fwd_info->buf_1->data_raw == buf) { temp_buf = fwd_info->buf_1; } else if (fwd_info->buf_2 && fwd_info->buf_2->data_raw == buf) { temp_buf = fwd_info->buf_2; } else { pr_err("diag: In %s, no match for non encode buffer %p, peripheral %d, type: %d\n", __func__, buf, fwd_info->peripheral, fwd_info->type); goto end; } if (len > PERIPHERAL_BUF_SZ) { pr_err("diag: In %s, Incoming buffer too large %d, peripheral %d, type: %d\n", __func__, len, fwd_info->peripheral, fwd_info->type); goto end; } write_len = len; write_buf = buf; } else { if (fwd_info->buf_1 && fwd_info->buf_1->data_raw == buf) { temp_buf = fwd_info->buf_1; } else if (fwd_info->buf_2 && fwd_info->buf_2->data_raw == buf) { temp_buf = fwd_info->buf_2; } else { pr_err("diag: In %s, no match for non encode buffer %p, peripheral %d, type: %d\n", __func__, buf, fwd_info->peripheral, fwd_info->type); goto end; } write_len = check_bufsize_for_encoding(temp_buf, len); if (write_len <= 0) { pr_err("diag: error in checking buf for encoding\n"); goto end; } write_buf = temp_buf->data; err = diag_add_hdlc_encoding(write_buf, &write_len, buf, len); if (err) { pr_err("diag: error in adding hdlc encoding\n"); goto end; } } if (fwd_info->peripheral == PERIPHERAL_MODEM) { DIAGFWD_7K_RAWDATA(buf, "modem", DIAG_DBG_READ); #if DIAG_XPST && !defined(CONFIG_DIAGFWD_BRIDGE_CODE) ret = checkcmd_modem_epst(buf); if (ret) { modem_to_userspace(buf, len, ret, 0); if (!(get_radio_ex2_flag() & 0x80000000)) goto end; } #endif } if (write_len > 0) { err = diag_mux_write(DIAG_LOCAL_PROC, write_buf, write_len, temp_buf->ctxt); if (err) { pr_err_ratelimited("diag: In %s, unable to write to mux error: %d\n", __func__, err); goto end; } } mutex_unlock(&fwd_info->data_mutex); mutex_unlock(&driver->hdlc_disable_mutex); diagfwd_queue_read(fwd_info); return; end: diag_ws_release(); mutex_unlock(&fwd_info->data_mutex); mutex_unlock(&driver->hdlc_disable_mutex); if (temp_buf) { diagfwd_write_done(fwd_info->peripheral, fwd_info->type, GET_BUF_NUM(temp_buf->ctxt)); } diagfwd_queue_read(fwd_info); return; } static void diagfwd_cntl_read_done(struct diagfwd_info *fwd_info, unsigned char *buf, int len) { if (!fwd_info) { diag_ws_release(); return; } if (fwd_info->type != TYPE_CNTL) { pr_err("diag: In %s, invalid type %d for peripheral %d\n", __func__, fwd_info->type, fwd_info->peripheral); diag_ws_release(); return; } diag_ws_on_read(DIAG_WS_MUX, len); diag_cntl_process_read_data(fwd_info, buf, len); diag_ws_on_copy_fail(DIAG_WS_MUX); if (fwd_info->buf_1) atomic_set(&fwd_info->buf_1->in_busy, 0); diagfwd_queue_read(fwd_info); diagfwd_queue_read(&peripheral_info[TYPE_DATA][fwd_info->peripheral]); diagfwd_queue_read(&peripheral_info[TYPE_CMD][fwd_info->peripheral]); } static void diagfwd_dci_read_done(struct diagfwd_info *fwd_info, unsigned char *buf, int len) { if (!fwd_info) return; switch (fwd_info->type) { case TYPE_DCI: case TYPE_DCI_CMD: break; default: pr_err("diag: In %s, invalid type %d for peripheral %d\n", __func__, fwd_info->type, fwd_info->peripheral); return; } diag_dci_process_peripheral_data(fwd_info, (void *)buf, len); if (fwd_info->buf_1) atomic_set(&fwd_info->buf_1->in_busy, 0); diagfwd_queue_read(fwd_info); } static void diagfwd_reset_buffers(struct diagfwd_info *fwd_info, unsigned char *buf) { if (!fwd_info || !buf) return; if (!driver->feature[fwd_info->peripheral].encode_hdlc) { if (fwd_info->buf_1 && fwd_info->buf_1->data == buf) atomic_set(&fwd_info->buf_1->in_busy, 0); else if (fwd_info->buf_2 && fwd_info->buf_2->data == buf) atomic_set(&fwd_info->buf_2->in_busy, 0); } else { if (fwd_info->buf_1 && fwd_info->buf_1->data_raw == buf) atomic_set(&fwd_info->buf_1->in_busy, 0); else if (fwd_info->buf_2 && fwd_info->buf_2->data_raw == buf) atomic_set(&fwd_info->buf_2->in_busy, 0); } } int diagfwd_peripheral_init(void) { uint8_t peripheral; uint8_t transport; uint8_t type; struct diagfwd_info *fwd_info = NULL; for (transport = 0; transport < NUM_TRANSPORT; transport++) { early_init_info[transport] = kzalloc( sizeof(struct diagfwd_info) * NUM_PERIPHERALS, GFP_KERNEL); if (!early_init_info[transport]) return -ENOMEM; kmemleak_not_leak(early_init_info[transport]); } for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) { for (transport = 0; transport < NUM_TRANSPORT; transport++) { fwd_info = &early_init_info[transport][peripheral]; fwd_info->peripheral = peripheral; fwd_info->type = TYPE_CNTL; fwd_info->transport = transport; fwd_info->ctxt = NULL; fwd_info->p_ops = NULL; fwd_info->ch_open = 0; fwd_info->inited = 1; fwd_info->read_bytes = 0; fwd_info->write_bytes = 0; spin_lock_init(&fwd_info->buf_lock); mutex_init(&fwd_info->data_mutex); } } for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) { for (type = 0; type < NUM_TYPES; type++) { fwd_info = &peripheral_info[type][peripheral]; fwd_info->peripheral = peripheral; fwd_info->type = type; fwd_info->ctxt = NULL; fwd_info->p_ops = NULL; fwd_info->ch_open = 0; fwd_info->read_bytes = 0; fwd_info->write_bytes = 0; spin_lock_init(&fwd_info->buf_lock); mutex_init(&fwd_info->data_mutex); if (type != TYPE_CNTL) fwd_info->inited = 1; } driver->diagfwd_data[peripheral] = &peripheral_info[TYPE_DATA][peripheral]; driver->diagfwd_cntl[peripheral] = &peripheral_info[TYPE_CNTL][peripheral]; driver->diagfwd_dci[peripheral] = &peripheral_info[TYPE_DCI][peripheral]; driver->diagfwd_cmd[peripheral] = &peripheral_info[TYPE_CMD][peripheral]; driver->diagfwd_dci_cmd[peripheral] = &peripheral_info[TYPE_DCI_CMD][peripheral]; } diag_smd_init(); if (driver->supports_sockets) diag_socket_init(); return 0; } void diagfwd_peripheral_exit(void) { uint8_t peripheral; uint8_t type; struct diagfwd_info *fwd_info = NULL; diag_smd_exit(); diag_socket_exit(); for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) { for (type = 0; type < NUM_TYPES; type++) { fwd_info = &peripheral_info[type][peripheral]; fwd_info->ctxt = NULL; fwd_info->p_ops = NULL; fwd_info->ch_open = 0; diagfwd_buffers_exit(fwd_info); } } for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) { driver->diagfwd_data[peripheral] = NULL; driver->diagfwd_cntl[peripheral] = NULL; driver->diagfwd_dci[peripheral] = NULL; driver->diagfwd_cmd[peripheral] = NULL; driver->diagfwd_dci_cmd[peripheral] = NULL; } kfree(early_init_info); } int diagfwd_cntl_register(uint8_t transport, uint8_t peripheral, void *ctxt, struct diag_peripheral_ops *ops, struct diagfwd_info **fwd_ctxt) { struct diagfwd_info *fwd_info = NULL; if (!ctxt || !ops) return -EIO; if (transport >= NUM_TRANSPORT || peripheral >= NUM_PERIPHERALS) return -EINVAL; fwd_info = &early_init_info[transport][peripheral]; *fwd_ctxt = &early_init_info[transport][peripheral]; fwd_info->ctxt = ctxt; fwd_info->p_ops = ops; fwd_info->c_ops = &cntl_ch_ops; return 0; } int diagfwd_register(uint8_t transport, uint8_t peripheral, uint8_t type, void *ctxt, struct diag_peripheral_ops *ops, struct diagfwd_info **fwd_ctxt) { struct diagfwd_info *fwd_info = NULL; if (peripheral >= NUM_PERIPHERALS || type >= NUM_TYPES || !ctxt || !ops || transport >= NUM_TRANSPORT) { pr_err("diag: In %s, returning error\n", __func__); return -EIO; } fwd_info = &peripheral_info[type][peripheral]; *fwd_ctxt = &peripheral_info[type][peripheral]; fwd_info->ctxt = ctxt; fwd_info->p_ops = ops; fwd_info->transport = transport; fwd_info->ch_open = 0; switch (type) { case TYPE_DATA: case TYPE_CMD: fwd_info->c_ops = &data_ch_ops; break; case TYPE_DCI: case TYPE_DCI_CMD: fwd_info->c_ops = &dci_ch_ops; break; default: pr_err("diag: In %s, invalid type: %d\n", __func__, type); return -EINVAL; } if (atomic_read(&fwd_info->opened) && fwd_info->p_ops && fwd_info->p_ops->open) { fwd_info->p_ops->open(fwd_info->ctxt); } return 0; } void diagfwd_deregister(uint8_t peripheral, uint8_t type, void *ctxt) { struct diagfwd_info *fwd_info = NULL; if (peripheral >= NUM_PERIPHERALS || type >= NUM_TYPES || !ctxt) return; fwd_info = &peripheral_info[type][peripheral]; if (fwd_info->ctxt != ctxt) { pr_err("diag: In %s, unable to find a match for p: %d t: %d\n", __func__, peripheral, type); return; } fwd_info->ctxt = NULL; fwd_info->p_ops = NULL; fwd_info->ch_open = 0; diagfwd_buffers_exit(fwd_info); switch (type) { case TYPE_DATA: driver->diagfwd_data[peripheral] = NULL; break; case TYPE_CNTL: driver->diagfwd_cntl[peripheral] = NULL; break; case TYPE_DCI: driver->diagfwd_dci[peripheral] = NULL; break; case TYPE_CMD: driver->diagfwd_cmd[peripheral] = NULL; break; case TYPE_DCI_CMD: driver->diagfwd_dci_cmd[peripheral] = NULL; break; } } void diagfwd_close_transport(uint8_t transport, uint8_t peripheral) { struct diagfwd_info *fwd_info = NULL; struct diagfwd_info *dest_info = NULL; int (*init_fn)(uint8_t) = NULL; void (*invalidate_fn)(void *, struct diagfwd_info *) = NULL; int (*check_channel_state)(void *) = NULL; uint8_t transport_open = 0; if (peripheral >= NUM_PERIPHERALS) return; switch (transport) { case TRANSPORT_SMD: transport_open = TRANSPORT_SOCKET; init_fn = diag_socket_init_peripheral; invalidate_fn = diag_socket_invalidate; check_channel_state = diag_socket_check_state; break; case TRANSPORT_SOCKET: transport_open = TRANSPORT_SMD; init_fn = diag_smd_init_peripheral; invalidate_fn = diag_smd_invalidate; check_channel_state = diag_smd_check_state; break; default: return; } fwd_info = &early_init_info[transport][peripheral]; if (fwd_info->p_ops && fwd_info->p_ops->close) fwd_info->p_ops->close(fwd_info->ctxt); fwd_info = &early_init_info[transport_open][peripheral]; dest_info = &peripheral_info[TYPE_CNTL][peripheral]; dest_info->inited = 1; dest_info->ctxt = fwd_info->ctxt; dest_info->p_ops = fwd_info->p_ops; dest_info->c_ops = fwd_info->c_ops; dest_info->ch_open = fwd_info->ch_open; dest_info->read_bytes = fwd_info->read_bytes; dest_info->write_bytes = fwd_info->write_bytes; dest_info->inited = fwd_info->inited; dest_info->buf_1 = fwd_info->buf_1; dest_info->buf_2 = fwd_info->buf_2; dest_info->transport = fwd_info->transport; invalidate_fn(dest_info->ctxt, dest_info); if (!check_channel_state(dest_info->ctxt)) diagfwd_late_open(dest_info); diagfwd_cntl_open(dest_info); init_fn(peripheral); diagfwd_queue_read(&peripheral_info[TYPE_DATA][peripheral]); diagfwd_queue_read(&peripheral_info[TYPE_CMD][peripheral]); } int diagfwd_write(uint8_t peripheral, uint8_t type, void *buf, int len) { struct diagfwd_info *fwd_info = NULL; int err = 0; uint8_t retry_count = 0; uint8_t max_retries = 3; if (peripheral >= NUM_PERIPHERALS || type >= NUM_TYPES) return -EINVAL; if (type == TYPE_CMD || type == TYPE_DCI_CMD) { if (!driver->feature[peripheral].rcvd_feature_mask || !driver->feature[peripheral].sent_feature_mask) { DIAGFWD_DBUG("diag: In %s, feature mask for peripheral: %d not received or sent yet\n", __func__, peripheral); return 0; } if (!driver->feature[peripheral].separate_cmd_rsp) type = (type == TYPE_CMD) ? TYPE_DATA : TYPE_DCI; } fwd_info = &peripheral_info[type][peripheral]; if (!fwd_info->inited || !atomic_read(&fwd_info->opened)) return -ENODEV; if (!(fwd_info->p_ops && fwd_info->p_ops->write && fwd_info->ctxt)) return -EIO; while (retry_count < max_retries) { err = 0; err = fwd_info->p_ops->write(fwd_info->ctxt, buf, len); if (err && err != -ENODEV) { usleep_range(100000, 101000); retry_count++; continue; } break; } if (!err) fwd_info->write_bytes += len; return err; } static void __diag_fwd_open(struct diagfwd_info *fwd_info) { if (!fwd_info) return; atomic_set(&fwd_info->opened, 1); if (!fwd_info->inited) return; if (fwd_info->buf_1) atomic_set(&fwd_info->buf_1->in_busy, 0); if (fwd_info->buf_2) atomic_set(&fwd_info->buf_2->in_busy, 0); if (fwd_info->p_ops && fwd_info->p_ops->open) fwd_info->p_ops->open(fwd_info->ctxt); diagfwd_queue_read(fwd_info); } void diagfwd_early_open(uint8_t peripheral) { uint8_t transport = 0; struct diagfwd_info *fwd_info = NULL; if (peripheral >= NUM_PERIPHERALS) return; for (transport = 0; transport < NUM_TRANSPORT; transport++) { fwd_info = &early_init_info[transport][peripheral]; __diag_fwd_open(fwd_info); } } void diagfwd_open(uint8_t peripheral, uint8_t type) { struct diagfwd_info *fwd_info = NULL; if (peripheral >= NUM_PERIPHERALS || type >= NUM_TYPES) return; fwd_info = &peripheral_info[type][peripheral]; __diag_fwd_open(fwd_info); } void diagfwd_late_open(struct diagfwd_info *fwd_info) { __diag_fwd_open(fwd_info); } void diagfwd_close(uint8_t peripheral, uint8_t type) { struct diagfwd_info *fwd_info = NULL; if (peripheral >= NUM_PERIPHERALS || type >= NUM_TYPES) return; fwd_info = &peripheral_info[type][peripheral]; atomic_set(&fwd_info->opened, 0); if (!fwd_info->inited) return; if (fwd_info->p_ops && fwd_info->p_ops->close) fwd_info->p_ops->close(fwd_info->ctxt); if (fwd_info->buf_1) atomic_set(&fwd_info->buf_1->in_busy, 1); if (fwd_info->buf_2) atomic_set(&fwd_info->buf_2->in_busy, 1); } int diagfwd_channel_open(struct diagfwd_info *fwd_info) { if (!fwd_info) return -EIO; if (!fwd_info->inited) { DIAGFWD_DBUG("diag: In %s, channel is not inited, p: %d, t: %d\n", __func__, fwd_info->peripheral, fwd_info->type); return -EINVAL; } if (fwd_info->ch_open) { DIAGFWD_DBUG("diag: In %s, channel is already open, p: %d, t: %d\n", __func__, fwd_info->peripheral, fwd_info->type); return 0; } fwd_info->ch_open = 1; diagfwd_buffers_init(fwd_info); if (fwd_info && fwd_info->c_ops && fwd_info->c_ops->open) fwd_info->c_ops->open(fwd_info); diagfwd_queue_read(fwd_info); DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "p: %d t: %d considered opened\n", fwd_info->peripheral, fwd_info->type); if (atomic_read(&fwd_info->opened)) { if (fwd_info->p_ops && fwd_info->p_ops->open) fwd_info->p_ops->open(fwd_info->ctxt); } if (fwd_info->peripheral == PERIPHERAL_MODEM) diag_initialized = 1; return 0; } int diagfwd_channel_close(struct diagfwd_info *fwd_info) { if (!fwd_info) return -EIO; fwd_info->ch_open = 0; if (fwd_info && fwd_info->c_ops && fwd_info->c_ops->close) fwd_info->c_ops->close(fwd_info); if (fwd_info->buf_1 && fwd_info->buf_1->data) atomic_set(&fwd_info->buf_1->in_busy, 0); if (fwd_info->buf_2 && fwd_info->buf_2->data) atomic_set(&fwd_info->buf_2->in_busy, 0); DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "p: %d t: %d considered closed\n", fwd_info->peripheral, fwd_info->type); if (fwd_info->peripheral == PERIPHERAL_MODEM) diag_initialized = 0; return 0; } int diagfwd_channel_read_done(struct diagfwd_info *fwd_info, unsigned char *buf, uint32_t len) { if (!fwd_info) { diag_ws_release(); return -EIO; } if (len == 0) { diagfwd_reset_buffers(fwd_info, buf); diag_ws_release(); return 0; } if (fwd_info && fwd_info->c_ops && fwd_info->c_ops->read_done) fwd_info->c_ops->read_done(fwd_info, buf, len); fwd_info->read_bytes += len; return 0; } void diagfwd_write_done(uint8_t peripheral, uint8_t type, int ctxt) { struct diagfwd_info *fwd_info = NULL; if (peripheral >= NUM_PERIPHERALS || type >= NUM_TYPES) return; fwd_info = &peripheral_info[type][peripheral]; if (ctxt == 1 && fwd_info->buf_1) atomic_set(&fwd_info->buf_1->in_busy, 0); else if (ctxt == 2 && fwd_info->buf_2) atomic_set(&fwd_info->buf_2->in_busy, 0); else pr_err("diag: In %s, invalid ctxt %d\n", __func__, ctxt); diagfwd_queue_read(fwd_info); } void diagfwd_channel_read(struct diagfwd_info *fwd_info) { int err = 0; uint32_t read_len = 0; unsigned char *read_buf = NULL; struct diagfwd_buf_t *temp_buf = NULL; if (!fwd_info) { diag_ws_release(); return; } if (!fwd_info->inited || !atomic_read(&fwd_info->opened)) { DIAGFWD_DBUG("diag: In %s, p: %d, t: %d, inited: %d, opened: %d ch_open: %d\n", __func__, fwd_info->peripheral, fwd_info->type, fwd_info->inited, atomic_read(&fwd_info->opened), fwd_info->ch_open); diag_ws_release(); return; } if (fwd_info->buf_1 && !atomic_read(&fwd_info->buf_1->in_busy)) { temp_buf = fwd_info->buf_1; atomic_set(&temp_buf->in_busy, 1); if (driver->feature[fwd_info->peripheral].encode_hdlc && (fwd_info->type == TYPE_DATA || fwd_info->type == TYPE_CMD)) { read_buf = fwd_info->buf_1->data_raw; read_len = fwd_info->buf_1->len_raw; } else { read_buf = fwd_info->buf_1->data; read_len = fwd_info->buf_1->len; } } else if (fwd_info->buf_2 && !atomic_read(&fwd_info->buf_2->in_busy)) { temp_buf = fwd_info->buf_2; atomic_set(&temp_buf->in_busy, 1); if (driver->feature[fwd_info->peripheral].encode_hdlc && (fwd_info->type == TYPE_DATA || fwd_info->type == TYPE_CMD)) { read_buf = fwd_info->buf_2->data_raw; read_len = fwd_info->buf_2->len_raw; } else { read_buf = fwd_info->buf_2->data; read_len = fwd_info->buf_2->len; } } else { DIAGFWD_DBUG("diag: In %s, both buffers are empty for p: %d, t: %d\n", __func__, fwd_info->peripheral, fwd_info->type); } if (!read_buf) { diag_ws_release(); return; } if (!(fwd_info->p_ops && fwd_info->p_ops->read && fwd_info->ctxt)) goto fail_return; DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "issued a read p: %d t: %d buf: %p\n", fwd_info->peripheral, fwd_info->type, read_buf); err = fwd_info->p_ops->read(fwd_info->ctxt, read_buf, read_len); if (err) goto fail_return; return; fail_return: diag_ws_release(); atomic_set(&temp_buf->in_busy, 0); return; } static void diagfwd_queue_read(struct diagfwd_info *fwd_info) { if (!fwd_info) return; if (!fwd_info->inited || !atomic_read(&fwd_info->opened)) { DIAGFWD_DBUG("diag: In %s, p: %d, t: %d, inited: %d, opened: %d ch_open: %d\n", __func__, fwd_info->peripheral, fwd_info->type, fwd_info->inited, atomic_read(&fwd_info->opened), fwd_info->ch_open); return; } if ((!driver->feature[fwd_info->peripheral].rcvd_feature_mask) && (fwd_info->type != TYPE_CNTL)) { return; } if (fwd_info->p_ops && fwd_info->p_ops->queue_read && fwd_info->ctxt) fwd_info->p_ops->queue_read(fwd_info->ctxt); } void diagfwd_buffers_init(struct diagfwd_info *fwd_info) { unsigned long flags; if (!fwd_info) return; if (!fwd_info->inited) { pr_err("diag: In %s, channel not inited, p: %d, t: %d\n", __func__, fwd_info->peripheral, fwd_info->type); return; } spin_lock_irqsave(&fwd_info->buf_lock, flags); if (!fwd_info->buf_1) { fwd_info->buf_1 = kzalloc(sizeof(struct diagfwd_buf_t), GFP_ATOMIC); if (!fwd_info->buf_1) goto err; kmemleak_not_leak(fwd_info->buf_1); } if (!fwd_info->buf_1->data) { fwd_info->buf_1->data = kzalloc(PERIPHERAL_BUF_SZ + APF_DIAG_PADDING, GFP_ATOMIC); if (!fwd_info->buf_1->data) goto err; fwd_info->buf_1->len = PERIPHERAL_BUF_SZ; kmemleak_not_leak(fwd_info->buf_1->data); fwd_info->buf_1->ctxt = SET_BUF_CTXT(fwd_info->peripheral, fwd_info->type, 1); } if (fwd_info->type == TYPE_DATA) { if (!fwd_info->buf_2) { fwd_info->buf_2 = kzalloc(sizeof(struct diagfwd_buf_t), GFP_ATOMIC); if (!fwd_info->buf_2) goto err; kmemleak_not_leak(fwd_info->buf_2); } if (!fwd_info->buf_2->data) { fwd_info->buf_2->data = kzalloc(PERIPHERAL_BUF_SZ + APF_DIAG_PADDING, GFP_ATOMIC); if (!fwd_info->buf_2->data) goto err; fwd_info->buf_2->len = PERIPHERAL_BUF_SZ; kmemleak_not_leak(fwd_info->buf_2->data); fwd_info->buf_2->ctxt = SET_BUF_CTXT( fwd_info->peripheral, fwd_info->type, 2); } if (driver->supports_apps_hdlc_encoding) { if (!fwd_info->buf_1->data_raw) { fwd_info->buf_1->data_raw = kzalloc(PERIPHERAL_BUF_SZ + APF_DIAG_PADDING, GFP_ATOMIC); if (!fwd_info->buf_1->data_raw) goto err; fwd_info->buf_1->len_raw = PERIPHERAL_BUF_SZ; kmemleak_not_leak(fwd_info->buf_1->data_raw); } if (!fwd_info->buf_2->data_raw) { fwd_info->buf_2->data_raw = kzalloc(PERIPHERAL_BUF_SZ + APF_DIAG_PADDING, GFP_ATOMIC); if (!fwd_info->buf_2->data_raw) goto err; fwd_info->buf_2->len_raw = PERIPHERAL_BUF_SZ; kmemleak_not_leak(fwd_info->buf_2->data_raw); } } } if (fwd_info->type == TYPE_CMD && driver->supports_apps_hdlc_encoding) { if (!fwd_info->buf_1->data_raw) { fwd_info->buf_1->data_raw = kzalloc(PERIPHERAL_BUF_SZ + APF_DIAG_PADDING, GFP_ATOMIC); if (!fwd_info->buf_1->data_raw) goto err; fwd_info->buf_1->len_raw = PERIPHERAL_BUF_SZ; kmemleak_not_leak(fwd_info->buf_1->data_raw); } } spin_unlock_irqrestore(&fwd_info->buf_lock, flags); return; err: spin_unlock_irqrestore(&fwd_info->buf_lock, flags); diagfwd_buffers_exit(fwd_info); return; } static void diagfwd_buffers_exit(struct diagfwd_info *fwd_info) { unsigned long flags; if (!fwd_info) return; spin_lock_irqsave(&fwd_info->buf_lock, flags); if (fwd_info->buf_1) { kfree(fwd_info->buf_1->data); fwd_info->buf_1->data = NULL; kfree(fwd_info->buf_1->data_raw); fwd_info->buf_1->data_raw = NULL; kfree(fwd_info->buf_1); fwd_info->buf_1 = NULL; } if (fwd_info->buf_2) { kfree(fwd_info->buf_2->data); fwd_info->buf_2->data = NULL; kfree(fwd_info->buf_2->data_raw); fwd_info->buf_2->data_raw = NULL; kfree(fwd_info->buf_2); fwd_info->buf_2 = NULL; } spin_unlock_irqrestore(&fwd_info->buf_lock, flags); }
gpl-2.0
teamfx/openjfx-8u-dev-rt
modules/graphics/src/main/native-glass/lens/wm/LensWindowManager.c
3
56215
/* * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. Oracle designates this * particular file as subject to the "Classpath" exception as provided * by Oracle in the LICENSE file that accompanied this code. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. */ #include "input/LensInput.h" #include "wm/LensWindowManager.h" #include "com_sun_glass_events_ViewEvent.h" #include "com_sun_glass_events_WindowEvent.h" #include "com_sun_glass_events_MouseEvent.h" #include "com_sun_glass_events_TouchEvent.h" #include "lensRFB/lensRFB.h" #include <stdlib.h> #include <stdio.h> #include <pthread.h> static pthread_mutex_t renderMutex = PTHREAD_MUTEX_INITIALIZER; static int _mousePosX; static int _mousePosY; static jboolean _onDraggingAction = JNI_FALSE; static NativeWindow _dragGrabbingWindow = NULL; static int _mousePressedButton = com_sun_glass_events_MouseEvent_BUTTON_NONE; static NativeWindow touchWindow = NULL; static jboolean isDnDStarted = JNI_FALSE; static inline void render_lock() { pthread_mutex_lock(&renderMutex); } static inline void render_unlock() { pthread_mutex_unlock(&renderMutex); } static NativeScreen mainScreen; static void lens_wm_rfbNotifyClearScreen(); static void lens_wm_clearScreen(); static void lens_wm_initRFB(JNIEnv *env); static void lens_wm_rfbNotifyWindowUpdate(NativeWindow window, int width, int height); static void lens_wm_windowCacheBounds(NativeWindow window); static void lens_wm_windowUncacheBounds(NativeWindow window); NativeWindow lens_wm_unsetFocusedWindow(JNIEnv *env, NativeWindow window); //service functions to handle window state static void lens_wm_windowMinimize(JNIEnv *env, NativeWindow window); static void lens_wm_windowRestore(JNIEnv *env, NativeWindow window); static void lens_wm_windowMaximize(JNIEnv *env, NativeWindow window); static void lens_wm_windowEnterFullscreen(JNIEnv *env, NativeWindow window); static void lens_wm_notifyEnterExitEvents(JNIEnv *env, NativeWindow *windowFound, int *relX, int *relY); jboolean lens_wm_initialize(JNIEnv *env) { jboolean result; GLASS_LOG_FINE("Init device"); result = glass_application_initialize(env); if (result) { GLASS_LOG_FINE("Init screen"); mainScreen = lens_screen_initialize(env); if (mainScreen) { GLASS_LOG_FINE("Clearing screen"); lens_wm_clearScreen(); GLASS_LOG_FINE("Cursor init"); fbCursorInitialize(mainScreen->width, mainScreen->height, mainScreen->depth); lens_wm_initRFB(env); GLASS_LOG_FINE("Init input devices"); result = lens_input_initialize(env); if (!result) { GLASS_LOG_SEVERE("lens_input_initialize failed"); } } else { GLASS_LOG_SEVERE("lens_screen_initialize() failed"); result = JNI_FALSE; } } else { GLASS_LOG_SEVERE("glass_application_initialize() failed"); } return result; } NativeScreen glass_screen_getMainScreen() { return mainScreen; } void lens_wm_getPointerPosition(int *pX, int *pY) { *pX = _mousePosX; *pY = _mousePosY; } void lens_wm_setPointerPosition(int x, int y) { _mousePosX = x; _mousePosY = y; fbCursorSetPosition(_mousePosX, _mousePosY); } LensResult lens_wm_notifyPlatformWindowRelease(JNIEnv *env, NativeWindow window) { GLASS_LOG_FINE("WM Window Relase window [%i]%p", window->id, window); if (window == lens_wm_getMouseWindow()) { // allow the next mouse motion to generate the ENTER lens_wm_setMouseWindow(NULL); } if (window == lens_wm_getGrabbedWindow()) { lens_wm_setGrabbedWindow(NULL); // don't bother with an event } if (window == glass_window_getFocusedWindow()) { glass_window_setFocusedWindow(NULL); } glass_window_list_lock(); NativeWindow head = glass_window_list_getHead(); glass_window_list_unlock(); if (head && head->view) { lens_wm_repaint(env, head); } return LENS_OK; } void lens_wm_repaint_all(JNIEnv *env) { render_lock(); glass_window_list_lock(); NativeWindow w = glass_window_list_getHead(); while (w) { if (w && w->view) { glass_application_notifyViewEvent(env, w->view, com_sun_glass_events_ViewEvent_REPAINT, w->currentBounds.x, w->currentBounds.y, w->currentBounds.width, w->currentBounds.height); } w = w->nextWindow; } glass_window_list_unlock(); render_unlock(); } void lens_wm_repaint(JNIEnv *env, NativeWindow window) { render_lock(); // remember clear could actually write pixels... lens_wm_clearScreen(); if (window && window->view) { glass_application_notifyViewEvent(env, window->view, com_sun_glass_events_ViewEvent_REPAINT, window->currentBounds.x, window->currentBounds.y, window->currentBounds.width, window->currentBounds.height); } render_unlock(); } //////////////////////////// WINDOW STATE MACHINE ////////////////////// static void lens_wm_windowMinimize(JNIEnv *env, NativeWindow window) { //"undo" previous state, if needed switch (window->state) { case NWS_MINIMIZED: GLASS_LOG_FINE("Nothing to do, skipping"); return; case NWS_NORMAL: case NWS_MAXIMIZED: //NOOP break; case NWS_FULLSCREEN: lens_wm_windowRestore(env, window); break; default: GLASS_LOG_SEVERE("Window is in unsupported NativeWindowState (%i)", window->state); } //cache window bounds for restoration lens_wm_windowCacheBounds(window); //if supported let platform do the minimization lens_platform_windowMinimize(env, window, JNI_TRUE); //update state window->state = NWS_MINIMIZED; //if window hold the focus, release it lens_wm_unsetFocusedWindow(env, window); //Stop rendering this window, because its minimized glass_application_RemoveWindowFromVisibleWindowList(env,window); //notify glass_application_notifyWindowEvent_resize(env, window, com_sun_glass_events_WindowEvent_MINIMIZE, window->cachedBounds.width, window->cachedBounds.height); } static void lens_wm_windowRestore(JNIEnv *env, NativeWindow window){ //"undo" previous state, if needed switch (window->state) { case NWS_MINIMIZED: GLASS_LOG_FINE("Window is minimized -notifying platform minimize(false)"); //notify platform lens_platform_windowMinimize(env, window, JNI_FALSE); if (window->isVisible) { //the window is restored and visible, add it to the window list //to resume rendering glass_application_addWindowToVisibleWindowList(env,window); } break; case NWS_NORMAL: GLASS_LOG_FINE("Nothing to do, skipping"); return; case NWS_MAXIMIZED: //NOOP break; case NWS_FULLSCREEN: GLASS_LOG_FINE("Window in full screen notify FULLSCREEN_EXIT" " (x=%i, y=%i, w=%i, h=%i)", window->cachedBounds.x, window->cachedBounds.y, window->cachedBounds.width, window->cachedBounds.height); //notify view it has existed full screen glass_application_notifyViewEvent(env, window->view, com_sun_glass_events_ViewEvent_FULLSCREEN_EXIT, window->cachedBounds.x, window->cachedBounds.y, window->cachedBounds.width, window->cachedBounds.height); break; default: GLASS_LOG_SEVERE("Window is in unsupported NativeWindowState (%i)", window->state); } //update state window->state = NWS_NORMAL; //resize and relocate window to previous bounds glass_window_setBoundsImpl(env, window, window->cachedBounds.x, window->cachedBounds.y, window->cachedBounds.width, window->cachedBounds.height, JNI_TRUE, JNI_TRUE, JNI_FALSE); //restore bounds lens_wm_windowUncacheBounds(window); GLASS_LOG_FINE("notify window it has been restored"); glass_application_notifyWindowEvent_resize(env, window, com_sun_glass_events_WindowEvent_RESTORE, window->currentBounds.width, window->currentBounds.height); GLASS_LOG_FINE("make sure window has the focus"); lens_wm_setFocusedWindow(env, window); } static void lens_wm_windowMaximize(JNIEnv *env, NativeWindow window){ //"undo" previous state, if needed switch (window->state) { case NWS_MINIMIZED: lens_wm_windowRestore(env, window); break; case NWS_NORMAL: //NOOP break; case NWS_MAXIMIZED: GLASS_LOG_FINE("Nothing to do, skipping"); return; case NWS_FULLSCREEN: lens_wm_windowRestore(env, window); break; default: GLASS_LOG_SEVERE("Window is in unsupported NativeWindowState (%i)", window->state); } /** * Window's max size can be limited, so try to extend the window * to the buttom right corner of the screen from the current x,y * coordinates. If the window will be extended beyond the screen * boundaries, push the window towards the top left corner of the * screen. If no limits applied to the window it will capture the * entire screen. */ //cache current window bounds for restoration lens_wm_windowCacheBounds(window); //get screen size NativeScreen screen = glass_screen_getMainScreen(); int width = screen->width; int height = screen->height; int x = window->currentBounds.x; int y = window->currentBounds.y; //check if window can occupy the entire screen if (glass_window_check_bounds(window, &width, &height)) { //window can be fully maximized, so we need to move it to //the top left corner x = 0; y = 0; } else { //window is restricted, check if new bounds are bigger //from current bounds if (width > window->currentBounds.width || height > window->currentBounds.height){ //calculate new x,y x = screen->width - width -1; y = screen->height - height -1; } } GLASS_LOG_FINE("Maximized window bounds x=%i, y=%i, width =%i, height=%i", x, y, width, height); //notify for bounds update glass_window_setBoundsImpl(env, window, x, y,width, height, JNI_TRUE, //update location JNI_TRUE, // update size JNI_FALSE /* update content */); //update state window->state = NWS_MAXIMIZED; //notify glass_application_notifyWindowEvent_resize(env, window, com_sun_glass_events_WindowEvent_MAXIMIZE, width, height); //make sure window has the focus lens_wm_setFocusedWindow(env, window); } static void lens_wm_windowEnterFullscreen(JNIEnv *env, NativeWindow window){ //"undo" previous state, if needed switch (window->state) { case NWS_MINIMIZED: GLASS_LOG_FINE("Window is minimized - restoring"); lens_wm_windowRestore(env, window); break; case NWS_NORMAL: case NWS_MAXIMIZED: //NOOP break; case NWS_FULLSCREEN: GLASS_LOG_FINE("Nothing to do, skipping"); return; default: GLASS_LOG_SEVERE("Window is in unsupported NativeWindowState (%i)", window->state); } //get screen NativeScreen screen = glass_screen_getMainScreen(); //cache current window bounds for restoration lens_wm_windowCacheBounds(window); //set full screen dimensions glass_window_setBoundsImpl(env,window, 0, 0, screen->width, screen->height, JNI_TRUE, // update position JNI_TRUE, // update size JNI_FALSE); // update content GLASS_LOG_FINE("Notifying FULLSCREEN_ENTER on view[%p] window %i[%p]" " x=%i, y=%i, w=%i, h=%i", window->view, window->id, window, window->currentBounds.x, window->currentBounds.y, window->currentBounds.width, window->currentBounds.height); //notify view glass_application_notifyViewEvent(env, window->view, com_sun_glass_events_ViewEvent_FULLSCREEN_ENTER, window->currentBounds.x, window->currentBounds.y, window->currentBounds.width, window->currentBounds.height); //make sure window has the focus lens_wm_setFocusedWindow(env, window); window->state = NWS_FULLSCREEN; } void glass_window_setBoundsImpl(JNIEnv *env, NativeWindow window, jint x, jint y, jint width, jint height, jboolean needToUpdatePostion, jboolean needToUpdateSize, jboolean isContentSize) { jboolean windowHasBeenUpdated = JNI_FALSE; GLASS_LOG_FINE("setBoundsImpl on window %i[%p] x=%i y=%i w=%i h=%i" " needToUpdatePostion=%s needToUpdateSize=%s isContentSize=%s" " state=%s", window->id, window, x, y, width, height, (needToUpdatePostion)?"true":"false", (needToUpdateSize)?"true":"false", (isContentSize)?"true":"false", lens_window_getNativeStateName(window->state)); if (isContentSize && !needToUpdateSize) { GLASS_LOG_FINE("Treating content size change as window size change"); needToUpdateSize = isContentSize; } GLASS_LOG_FINER("currentW(%i) != newW(%i) || currentH(%i)!=newH(%i)", window->currentBounds.width, width, window->currentBounds.height, height ); //handle resize if needed if (needToUpdateSize && (window->currentBounds.width != width || window->currentBounds.height != height)) { GLASS_LOG_FINE("Updatating window %i[%p] size from %iX%i to %iX%i", window->id, window, window->currentBounds.width, window->currentBounds.height, width, height); window->currentBounds.width = width; window->currentBounds.height = height; glass_application_notifyWindowEvent_resize(env,window, com_sun_glass_events_WindowEvent_RESIZE, width, height); windowHasBeenUpdated = JNI_TRUE; } GLASS_LOG_FINER("curentX(%i) != newX(%i) || currentY(%i)!=newY(%i)", window->currentBounds.x, x, window->currentBounds.y, y); //handle move if needed if (needToUpdatePostion && (window->currentBounds.x != x || window->currentBounds.y != y)) { GLASS_LOG_FINE("Updating window %i[%p] location from %iX%i to %iX%i", window->id, window, window->currentBounds.x, window->currentBounds.y, x, y); window->currentBounds.x = x; window->currentBounds.y = y; glass_application_notifyWindowEvent_move(env, window, x, y); windowHasBeenUpdated = JNI_TRUE; lens_wm_repaint(env, window); } if (!windowHasBeenUpdated) { GLASS_LOG_FINE("Nothing to do"); } } jboolean glass_window_setVisible(JNIEnv *env, NativeWindow window, jboolean visible) { GLASS_LOG_FINE("Setting window %i[%p](owner %i[%p]) from %s, to %s", window->id, window, window->owner? window->owner->id : -1, window->owner, (window->isVisible)?"visible":"invisible", (visible)?"visible":"invisible"); lens_platform_windowSetVisible(env, window, visible); window->isVisible = visible; if (!visible) { //lose focus and grab lens_wm_unsetFocusedWindow(env, window); } else { if (window->isFocusable && window->isEnabled) { //window become visible, grant it the focus lens_wm_setFocusedWindow(env, window); } } //no need to send an event to confirm window is visible return JNI_TRUE; } jboolean glass_view_drawBegin(NativeView view) { GLASS_LOG_FINER("glass_view_drawBegin"); render_lock(); return JNI_TRUE; } void glass_view_drawEnd(NativeView view) { GLASS_LOG_FINER("glass_view_drawEnd"); render_unlock(); } jboolean glass_window_requestFocus(JNIEnv *env, NativeWindow window, jint focusType) { NativeWindow focusWindow = glass_window_getFocusedWindow(); GLASS_LOG_FINE("requestFocus on window %d[%p], event %d", window?window->id:-1, window, focusType); if (!window) { GLASS_LOG_WARNING("requestFocus on a null window"); return JNI_FALSE; } if (window == focusWindow) { // no change, no notification ? GLASS_LOG_FINE("Focus requested on current focus window - ignore"); return JNI_TRUE; } if (!window->isFocusable) { GLASS_LOG_WARNING("Focus requested on isFocusable=false - ignore"); return JNI_FALSE; } if (!window->isEnabled) { GLASS_LOG_WARNING("Focus requested on isEnabled=false - ignore"); return JNI_FALSE; } if (!window->isVisible) { GLASS_LOG_WARNING("Focus requested on isVisible=false - ignore"); return JNI_FALSE; } //this function will release the grab if someone holds it lens_wm_setFocusedWindow(env, window); return JNI_TRUE; } jboolean glass_window_setFocusable(JNIEnv *env, NativeWindow window, jboolean isFocusable) { NativeWindow focusWindow; if (window->isFocusable == isFocusable) { // no change, so we can punt return JNI_TRUE; } focusWindow = glass_window_getFocusedWindow(); if (!isFocusable && focusWindow == window) { lens_wm_setFocusedWindow(env, NULL); GLASS_LOG_WARNING("isFocusable(false) on focus owner, cascade ?"); } window->isFocusable = isFocusable; return JNI_TRUE; } jboolean glass_window_setBackground(NativeWindow window, jfloat red, jfloat green, jfloat blue) { GLASS_LOG_WARNING("unimplemented glass_window_setBackground\n"); return JNI_TRUE; } void glass_window_toFront(JNIEnv *env, NativeWindow window) { if (glass_window_list_toFront(window)) { lens_wm_repaint(env, window); } } void glass_window_toBack(JNIEnv *env, NativeWindow window) { if (glass_window_list_toBack(window)) { lens_wm_repaint(env, window); } } jboolean glass_window_grabFocus(JNIEnv *env, NativeWindow window) { if (window == lens_wm_getGrabbedWindow()) { //this is OK per spec GLASS_LOG_FINE("RE-GRAB on %d[%p] root %d[%p]", window?window->id:-1, window, window->root?window->root->id:-1, window->root); return JNI_TRUE; } if (NULL == lens_wm_getGrabbedWindow() && window == glass_window_getFocusedWindow()) { // we allow the grab, note: focus is also checked in Java. GLASS_LOG_FINE("GRAB on %d[%p] (root %d[%p])", window?window->id:-1, window, window->root?window->root->id:-1, window->root); lens_wm_setGrabbedWindow(window); return JNI_TRUE; } // should not be able to happen GLASS_LOG_SEVERE("ERROR NO-GRAB on %d[%p]\n", window?window->id:-1, window); return JNI_FALSE; } /** * This functions will check if the given window is grabbed and * ungrab it if necessary. Note: may also be called from mouse * handling */ void glass_window_ungrabFocus(JNIEnv *env, NativeWindow window) { NativeWindow grabbedWindow = lens_wm_getGrabbedWindow(); GLASS_LOG_FINE("ungrab request on window %i[%p], current grabbed window %i[%p]", window?window->id:-1, window, grabbedWindow?grabbedWindow->id:-1, grabbedWindow ); if (window == NULL) { GLASS_LOG_FINE("window=NULL - Nothing to do"); return; } if (window != grabbedWindow) { GLASS_LOG_FINE("Window %d[%p] doesn't hold the grab, ignore", window?window->id:-1, window); return; } GLASS_LOG_FINE("Ungrabbing window %i[%p]", window?window->id : -1, window); lens_wm_setGrabbedWindow(NULL); //notify the UNGRAB glass_application_notifyWindowEvent(env, window, com_sun_glass_events_WindowEvent_FOCUS_UNGRAB); } void glass_view_setParent(JNIEnv *env, NativeWindow parent, NativeView view) { NativeWindow oldParent = view->parent; if (oldParent && oldParent->view) { GLASS_LOG_FINE("Notifying old view removed"); glass_application_notifyViewEvent(env, oldParent->view, com_sun_glass_events_ViewEvent_REMOVE, 0, 0, 0, 0); view->parent = NULL; } GLASS_LOG_FINE("Setting new owner, window %d [%p], for view %p", parent ? (signed int)parent->id : - 1, parent, view); view->parent = parent; //may be null if (parent && parent->view) { GLASS_LOG_FINE("Notifying view it has been added %p", parent->view); glass_application_notifyViewEvent(env, parent->view, com_sun_glass_events_ViewEvent_ADD, 0, 0, 0, 0); } } void lens_wm_shutdown(JNIEnv *env) { lens_platform_shutdown(env); } jboolean glass_window_setLevel(NativeWindow window, int level) { GLASS_LOG_WARNING("unimplemented glass_window_setLevel\n"); return JNI_TRUE; } jboolean glass_window_setMinimumSize(JNIEnv *env, NativeWindow window, jint width, jint height) { window->minWidth = width; window->minHeight = height; width = window->currentBounds.width; height = window->currentBounds.height; glass_window_check_bounds(window, &width, &height); if (width != window->currentBounds.width || height != window->currentBounds.height) { glass_window_setBoundsImpl(env, window, 0, 0, width, height, JNI_FALSE, // position JNI_TRUE, // size JNI_FALSE); // contentSize } return JNI_TRUE; } jboolean glass_window_setMaximumSize(JNIEnv *env, NativeWindow window, jint width, jint height) { window->maxWidth = width; window->maxHeight = height; width = window->currentBounds.width; height = window->currentBounds.height; glass_window_check_bounds(window, &width, &height); if (width != window->currentBounds.width || height != window->currentBounds.height) { glass_window_setBoundsImpl(env, window, 0, 0, width, height, JNI_FALSE, // position JNI_TRUE, // size JNI_FALSE);// contentSize } return JNI_TRUE; } jboolean glass_view_enterFullscreen(JNIEnv *env, NativeView view, jboolean animate, jboolean keepRatio, jboolean hideCursor) { NativeWindow window = view->parent; if (window == NULL) { GLASS_LOG_WARNING("Full screen request on a view(%p) with no parent window, abort", view); return JNI_FALSE; } GLASS_LOG_FINE("Enter full screen request on view %p, window %i[%p]", view, window->id, window); /** * animate, keepRatio ration and hideCursor are currently stubbed * to false in WindowStage.java, which is the only caller for * this API. * Ignoring them for now */ lens_wm_windowEnterFullscreen(env, window); return JNI_TRUE; } jboolean glass_view_exitFullscreen(JNIEnv *env, NativeView view, jboolean animate) { NativeWindow window = view->parent; if (window == NULL) { GLASS_LOG_WARNING("Exit full screen request on a view(%p) with no parent" " window, abort", view); return JNI_FALSE; } GLASS_LOG_FINE("Exit full screen request on view %p, window %i[%p]", view, window->id, window); /** * WindowStage.applyFullScreen() always sets the animate * parameter to false when calling enterFullScreen on its View, * in WindowStage.java, which is the only caller for this API. * Ignoring it for now. */ lens_wm_windowRestore(env, window); return JNI_TRUE; } jboolean glass_window_minimize(JNIEnv *env, NativeWindow window, jboolean toMinimize) { GLASS_LOG_FINE("Minimize window %i[%p] toMinimize=%s", window->id, window, (toMinimize)?"true":"false"); if (toMinimize) { lens_wm_windowMinimize(env, window); } else { lens_wm_windowRestore(env, window); } return JNI_TRUE; } jboolean glass_window_maximize(JNIEnv *env, NativeWindow window, jboolean toMaximize, jboolean isMaximized) { GLASS_LOG_FINE("Maximize window %i[%p] toMaximize=%s isMaximized=%s", window->id, window, (toMaximize)?"true":"false", (isMaximized)?"true":"false"); jboolean result = JNI_TRUE; if (toMaximize && !isMaximized) { lens_wm_windowMaximize(env, window); } else if (!toMaximize && isMaximized) { lens_wm_windowRestore(env, window); } else { GLASS_LOG_WARNING("Maximize request with bad arguments"); result = JNI_FALSE; } return result; } NativeWindow glass_window_findWindowAtLocation(int absX, int absY, int *pRelX, int *pRelY) { glass_window_list_lock(); NativeWindow w = glass_window_list_getTail(); while (w) { GLASS_LOG_FINEST("Window %d[%p] isVisible=%s, state=%s", w->id, w, w->isVisible?"true" : "false", lens_window_getNativeStateName(w->state)); if (w->isVisible && w->state != NWS_MINIMIZED) { if (absX >= w->currentBounds.x && absX < w->currentBounds.x + w->currentBounds.width && absY >= w->currentBounds.y && absY < w->currentBounds.y + w->currentBounds.height && w->isEnabled) { *pRelX = absX - w->currentBounds.x; *pRelY = absY - w->currentBounds.y; GLASS_LOG_FINER( "Absolute coordinates %i,%i are on window %i[%p] " "as relative coordinates %i,%i", absX, absY, w->id, w, *pRelX, *pRelY); glass_window_list_unlock(); return w; } } else { GLASS_LOG_FINER("Skipping invisible window %i[%p]", w->id, w); } w = w->previousWindow; } glass_window_list_unlock(); GLASS_LOG_FINER("Absolute coordinates %i,%i are not on a window", absX, absY); return NULL; } NativeWindow grabbedWindow = NULL; NativeWindow lens_wm_getGrabbedWindow() { return grabbedWindow; } void lens_wm_setGrabbedWindow(NativeWindow window) { grabbedWindow = window; } static void handleClickOrTouchEvent(JNIEnv *env, int xabs, int yabs) { int relX, relY; NativeWindow window = glass_window_findWindowAtLocation(xabs, yabs, &relX, &relY); // if we have a grabbed window, check to see if this breaks the grab if (grabbedWindow != NULL) { if ((window == NULL) || (window->root != grabbedWindow->root)) { glass_window_ungrabFocus(env, grabbedWindow); } } if (window != NULL) { NativeWindow focusedWindow = glass_window_getFocusedWindow(); // Will this cause a focus change ? if (focusedWindow && window->root != focusedWindow->root) { lens_wm_setFocusedWindow(env, window); } } } void lens_wm_notifyScrollEvent(JNIEnv *env, int xabs, int yabs, int step) { int relX, relY; NativeWindow window = glass_window_findWindowAtLocation(xabs, yabs, &relX, &relY); if (window != NULL) { glass_application_notifyScrollEvent(env, window, relX, relY, xabs, yabs, 0.0, step); } } // check for window grab then forward event to application. // check for focus changes and handle them. void lens_wm_notifyButtonEvent(JNIEnv *env, jboolean pressed, int button, int xabs, int yabs) { int relX, relY; NativeWindow window; //cache new coordinates _mousePosX = xabs; _mousePosY = yabs; //in case this function was called due to a mouse event, lens_wm_notifyEnterExitEvents() // will have no effect as the ENTER/EXIST were already notified from prior mouse motion events. //in case this function was called due to a touch event, lens_wm_notifyEnterExitEvents() //will guarantee we have the proper window's view state lens_wm_notifyEnterExitEvents(env, &window, &relX, &relY); //save current window lens_wm_setMouseWindow(window); GLASS_LOG_FINEST("button event on window %d[%p], pressed %s, button %d, abs (%d,%d) rel (%d,%d)", window?window->id:-1, window, pressed?"true":"false", button, xabs, yabs, relX, relY); if (_mousePressedButton == com_sun_glass_events_MouseEvent_BUTTON_NONE) { if (_onDraggingAction) { GLASS_LOG_SEVERE("bad native mouse drag state - Press event while on drag, resetting"); _onDraggingAction = JNI_FALSE; _dragGrabbingWindow = NULL; } GLASS_LOG_FINEST("first press (button %d)", button); _mousePressedButton = button; } else if (!pressed && button == _mousePressedButton) { GLASS_LOG_FINEST("pressed button %d released - stopping native mouse drag", button); _mousePressedButton = com_sun_glass_events_MouseEvent_BUTTON_NONE; _onDraggingAction = JNI_FALSE; _dragGrabbingWindow = NULL; } if (_onDraggingAction) { if (_dragGrabbingWindow != NULL) { relX = xabs - _dragGrabbingWindow->currentBounds.x; relY = yabs - _dragGrabbingWindow->currentBounds.y; glass_application_notifyMouseEvent(env, _dragGrabbingWindow, com_sun_glass_events_MouseEvent_UP, relX, relY, xabs, yabs, button); } if (button == _mousePressedButton) { _onDraggingAction = JNI_FALSE; _dragGrabbingWindow = NULL; } } else { if (window != NULL) { GLASS_LOG_FINEST("glass_wm_notifyButtonEvent sending to %p pressed=%d, button=%d %d,%d, %d, %d ", window, pressed, button, relX, relY, xabs, yabs); // pass on the event to Java. glass_application_notifyMouseEvent(env, window, pressed ? com_sun_glass_events_MouseEvent_DOWN : com_sun_glass_events_MouseEvent_UP, relX, relY, xabs, yabs, button); } } handleClickOrTouchEvent(env, xabs, yabs); } // check for window grab then forward event to application. // check for focus changes and handle them. void lens_wm_notifyMultiTouchEvent(JNIEnv *env, jint count, jint *states, jlong *ids, int *xabs, int *yabs, int primaryPointIndex) { int i; int dx, dy, relX, relY, absX, absY; jboolean allReleased; //set the touch window on first touch event if (touchWindow == NULL && primaryPointIndex >= 0 && !_onDraggingAction) { //find the touch window for first event touchWindow = glass_window_findWindowAtLocation(xabs[primaryPointIndex], yabs[primaryPointIndex], &relX, &relY); if (touchWindow) { GLASS_IF_LOG_FINEST("[touch event -> window] touch event on window %d[%p]", touchWindow->id, touchWindow); //we have a touch point over a window, we need to check that its the //starting point of the touch event sequence (all points pressed and // we are not in the a middle of a drag), if not ignore the event. // //example scenario - touch outside a window and drag the finger //into the window - same as press with a mouse outside a window, hold the //button and drag mouse into the window for (i = 0; i < count; i++) { if (states[i] != com_sun_glass_events_TouchEvent_TOUCH_PRESSED) { GLASS_IF_LOG_FINEST("[touch event -> window] in middle of touch sequance (states[%d]=%d) - ignore", i, states[i]); touchWindow = NULL; break; } } } } GLASS_LOG_FINEST("touch window %d, indexPoint = %d", touchWindow? touchWindow->id : -1, primaryPointIndex); if (touchWindow == NULL && primaryPointIndex == -1) { GLASS_LOG_FINER("Touch event outside a window"); } //sythesis mouse events. // handling of grab, exit/enter events etc. is done by the mouse handlers if (primaryPointIndex == -1) { //all points released, release button GLASS_LOG_FINEST("touch -> mouse - release"); //use last location absX = _mousePosX; absY = _mousePosY; lens_wm_notifyButtonEvent(env, JNI_FALSE, //pressed com_sun_glass_events_MouseEvent_BUTTON_LEFT, absX, absY); } else { //use new location absX = xabs[primaryPointIndex]; absY = yabs[primaryPointIndex]; switch (states[primaryPointIndex]) { case com_sun_glass_events_TouchEvent_TOUCH_PRESSED: if (absX != _mousePosX || absY != _mousePosY) { //RT-34624 - need to report move before press (if not already reported) lens_wm_notifyMotionEvent(env, absX, absY); } //send button pressed GLASS_LOG_FINEST("touch -> mouse - pressed"); lens_wm_notifyButtonEvent(env, JNI_TRUE, //preseed com_sun_glass_events_MouseEvent_BUTTON_LEFT, absX, absY); //explicitly update the cursor as lens_wm_notifyButtonEvent doesn't fbCursorSetPosition(absX, absY); break; case com_sun_glass_events_TouchEvent_TOUCH_MOVED: GLASS_LOG_FINEST("touch -> mouse - move"); lens_wm_notifyMotionEvent(env, absX, absY); break; case com_sun_glass_events_TouchEvent_TOUCH_STILL: //nothing to do _mousePosX = absX; _mousePosY = absY; GLASS_LOG_FINEST("touch -> mouse - still, ignoring"); break; case com_sun_glass_events_TouchEvent_TOUCH_RELEASED: //if more then one fingers is used, then a new primary point will //be assigned and we will not get TOUCH_RELEASED , if a single //point is used then then all points will be released //primaryPointIndex will be -1 and we shouldn't got here GLASS_LOG_WARNING("touch -> mouse - release, illegal state"); break; } } if (touchWindow != NULL) { //Check that touchWindow is still valid before using it. glass_window_list_lock(); if (glass_window_isExist(touchWindow)) { dx = -touchWindow->currentBounds.x; dy = -touchWindow->currentBounds.y; glass_application_notifyMultiTouchEvent(env, touchWindow, count, states, ids, xabs, yabs, dx, dy); if (primaryPointIndex == -1) { //all released touchWindow = NULL; } } glass_window_list_unlock(); } } void lens_wm_notifyMotionEvent(JNIEnv *env, int mousePosX, int mousePosY) { int relX, relY; int reportMove = 0; GLASS_LOG_FINEST("Motion event: x=%03d, y=%03d", mousePosX, mousePosY); //cache new coordinates _mousePosX = mousePosX; _mousePosY = mousePosY; NativeWindow window = NULL; fbCursorSetPosition(mousePosX, mousePosY); if (_mousePressedButton != com_sun_glass_events_MouseEvent_BUTTON_NONE && !_onDraggingAction && !isDnDStarted) { _onDraggingAction = JNI_TRUE; _dragGrabbingWindow = lens_wm_getMouseWindow(); GLASS_LOG_FINE("Starting native mouse drag on windown %d[%p]", _dragGrabbingWindow?_dragGrabbingWindow->id : -1, _dragGrabbingWindow); } lens_wm_notifyEnterExitEvents(env, &window, &relX, &relY); GLASS_LOG_FINER("Motion event on window %i[%p] absX=%i absY=%i, relX=%i, relY=%i", (window)?window->id:-1, window, _mousePosX, _mousePosY, relX, relY); //save current window lens_wm_setMouseWindow(window); //Send the move event if (_onDraggingAction && _dragGrabbingWindow != NULL) { relX = _mousePosX - _dragGrabbingWindow->currentBounds.x; relY = _mousePosY - _dragGrabbingWindow->currentBounds.y; GLASS_LOG_FINEST("MouseEvent_MOVE on window %i[%p]", (_dragGrabbingWindow)?_dragGrabbingWindow->id:-1, _dragGrabbingWindow); glass_application_notifyMouseEvent(env, _dragGrabbingWindow, com_sun_glass_events_MouseEvent_MOVE, relX, relY, _mousePosX, _mousePosY, com_sun_glass_events_MouseEvent_BUTTON_NONE); } else if (!_onDraggingAction && window != NULL) { GLASS_LOG_FINEST("MouseEvent_MOVE on window %i[%p]", (window)?window->id:-1, window); glass_application_notifyMouseEvent(env, window, com_sun_glass_events_MouseEvent_MOVE, relX, relY, _mousePosX, _mousePosY, com_sun_glass_events_MouseEvent_BUTTON_NONE); } } /** * This function will search for a window using current mouse * location (_mousePosX, _mousePosY) and report the current and * last window's view for MouseEvent.ENTER and MouseEvent.EXIT * events as needed. * * The window's params will be saved in the supplied pointers * according to the information returned from * glass_window_findWindowAtLocation() * * * * @param windowFound the NativeWindow as was found by * glass_window_findWindowAtLocation(). * pointer must not be NULL * @param relX the relative coordinate X on the found window as found by * glass_window_findWindowAtLocation(). * pointer must not be NULL * @param relY the relative coordinate Y on the found window as * found by * glass_window_findWindowAtLocation(). * pointer must not be NULL */ static void lens_wm_notifyEnterExitEvents(JNIEnv *env, NativeWindow *windowFound, int *relX, int *relY) { *windowFound = glass_window_findWindowAtLocation(_mousePosX, _mousePosY, relX, relY); NativeWindow window = *windowFound; NativeWindow lastMouseWindow = lens_wm_getMouseWindow(); GLASS_LOG_FINER("_dragGrabbingWindow = %i[%p], windowFound = %i[%p] lastMouseWindow = %i[%p]", (_dragGrabbingWindow)?_dragGrabbingWindow->id:-1, _dragGrabbingWindow, (*windowFound)?(*windowFound)->id:-1, *windowFound, (lastMouseWindow)?lastMouseWindow->id:-1, lastMouseWindow); //Send EXIT/ENTER events if (_onDraggingAction && _dragGrabbingWindow != NULL) { if (window != _dragGrabbingWindow && _dragGrabbingWindow == lastMouseWindow) { *relX = _mousePosX - _dragGrabbingWindow->currentBounds.x; *relY = _mousePosY - _dragGrabbingWindow->currentBounds.y; GLASS_LOG_FINER("MouseEvent_EXIT on dragGrabbingWindow %i[%p]", (_dragGrabbingWindow)?_dragGrabbingWindow->id:-1, _dragGrabbingWindow); glass_application_notifyMouseEvent(env, _dragGrabbingWindow, com_sun_glass_events_MouseEvent_EXIT, *relX, *relY, _mousePosX, _mousePosY, com_sun_glass_events_MouseEvent_BUTTON_NONE); } if (window == _dragGrabbingWindow && window != lastMouseWindow) { GLASS_LOG_FINER("MouseEvent_ENTER on dragGrabbingWindow %i[%p]", (_dragGrabbingWindow)?_dragGrabbingWindow->id:-1, _dragGrabbingWindow); glass_application_notifyMouseEvent(env, _dragGrabbingWindow, com_sun_glass_events_MouseEvent_ENTER, *relX, *relY, _mousePosX, _mousePosY, com_sun_glass_events_MouseEvent_BUTTON_NONE); } } if (!_onDraggingAction) { if (window != lastMouseWindow) { if (lastMouseWindow) { // Exited from lastMouseWindow int _relX = _mousePosX - lastMouseWindow->currentBounds.x; int _relY = _mousePosY - lastMouseWindow->currentBounds.y; GLASS_LOG_FINER("MouseEvent_EXIT on lastMouseWindow %i[%p]", (lastMouseWindow)?lastMouseWindow->id:-1, lastMouseWindow); glass_application_notifyMouseEvent(env, lastMouseWindow, com_sun_glass_events_MouseEvent_EXIT, _relX, _relY, _mousePosX, _mousePosY, com_sun_glass_events_MouseEvent_BUTTON_NONE); } if (window) { // Enter into window GLASS_LOG_FINER("MouseEvent_ENTER on window %i[%p]", (window)?window->id:-1, window); glass_application_notifyMouseEvent(env, window, com_sun_glass_events_MouseEvent_ENTER, *relX, *relY, _mousePosX, _mousePosY, com_sun_glass_events_MouseEvent_BUTTON_NONE); } } } } /* * set focus to the specified window, * providing FOCUS_LOST as needed to previous */ void lens_wm_setFocusedWindow(JNIEnv *env, NativeWindow window) { NativeWindow _focusedWindow = glass_window_getFocusedWindow(); if (window != _focusedWindow) { GLASS_LOG_FINE("Window %i[%p] is focused. Window %i[%p] requesting focus", (_focusedWindow)?_focusedWindow->id:-1, _focusedWindow, (window)?window->id:-1, window); if (_focusedWindow) { //Release the grab if the focused window holds it glass_window_ungrabFocus(env, _focusedWindow); /* function will print the result*/ GLASS_LOG_FINE("Notifying window %i[%p] focus lost ", _focusedWindow->id, _focusedWindow); glass_application_notifyWindowEvent(env, _focusedWindow, com_sun_glass_events_WindowEvent_FOCUS_LOST); } glass_window_setFocusedWindow(window); if (window != NULL) { GLASS_LOG_FINE("Notifying window %i[%p] focus gained ", window->id, window); glass_application_notifyWindowEvent(env, window, com_sun_glass_events_WindowEvent_FOCUS_GAINED); } } else { GLASS_LOG_FINE("Window %i[%p] is already focused - ignore", (window)?window->id:-1, window); } } /** * Check if this window hold the focus or the grab. Loose them * if required and give focus to the next focusable and visible * window * * @param env * @param window the window to unset * @return the new focused window (may be NULL) */ NativeWindow lens_wm_unsetFocusedWindow(JNIEnv *env, NativeWindow window){ GLASS_LOG_FINE("unsetting focus for window %i[%p]", window->id, window); NativeWindow _focusedWindow = glass_window_getFocusedWindow(); if (window == _focusedWindow) { //if this window hold the grab, release it GLASS_LOG_FINE("Check if this window holds the grab"); glass_window_ungrabFocus(env, window); /* function will print the result*/ GLASS_LOG_FINE("Releasing the focus"); lens_wm_setFocusedWindow(env, NULL); _focusedWindow = NULL; //search for the next focusable window glass_window_list_lock(); NativeWindow w = glass_window_list_getTail(); while (w) { if (w->isVisible && w->state != NWS_MINIMIZED) { if (!w->owner && w->isFocusable) { GLASS_LOG_FINE("Granting window %i[%p] the focus", w->id, w); _focusedWindow = w; break; } } w = w->previousWindow; } glass_window_list_unlock(); if (_focusedWindow != NULL) { lens_wm_setFocusedWindow(env, _focusedWindow); } }else { GLASS_LOG_FINE("Window %i[%p] doesn't have the focus", window?window->id : -1, window); } return _focusedWindow; } /* * MouseWindow * The window that currently has the mouse in it. * Note, this may be NULL. */ static NativeWindow mouseWindow = NULL; NativeWindow lens_wm_getMouseWindow() { return mouseWindow; } LensResult lens_wm_setMouseWindow(NativeWindow window) { mouseWindow = window; return LENS_OK; } static void lens_wm_clearScreen() { glass_screen_clear(); lens_wm_rfbNotifyClearScreen(); } void lens_wm_notifyWindowUpdate(NativeWindow window, int width, int height) { lens_wm_rfbNotifyWindowUpdate(window, width, height); } static void lens_wm_windowCacheBounds(NativeWindow window) { window->cachedBounds.x = window->currentBounds.x; window->cachedBounds.y = window->currentBounds.y; window->cachedBounds.width = window->currentBounds.width; window->cachedBounds.height = window->currentBounds.height; } static void lens_wm_windowUncacheBounds(NativeWindow window) { window->currentBounds.x = window->cachedBounds.x; window->currentBounds.y = window->cachedBounds.y; window->currentBounds.width = window->cachedBounds.width; window->currentBounds.height = window->cachedBounds.height; } void notify_lens_wm_DnDStarted() { isDnDStarted = JNI_TRUE; GLASS_LOG_FINE("DnD is active"); //reset mouse drag as DnD events has higher priority _onDraggingAction = JNI_FALSE; _dragGrabbingWindow = NULL; } void notify_lens_wm_DnDEnded() { isDnDStarted = JNI_FALSE; GLASS_LOG_FINE("DnD has ended"); } //// RFB support static void lens_wm_initRFB(JNIEnv *env) { #ifdef USE_RFB lens_rfb_init(env); #endif } static void lens_wm_rfbNotifyClearScreen() { #ifdef USE_RFB NativeScreen screen = glass_screen_getMainScreen(); lens_rfb_notifyDirtyRegion(0, 0, screen->width, screen->height); #endif } static void lens_wm_rfbNotifyWindowUpdate(NativeWindow window, int width, int height) { #ifdef USE_RFB NativeScreen screen = glass_screen_getMainScreen(); int x = window->currentBounds.x; int y = window->currentBounds.y; width = x + width > screen->width ? screen->width - x : width; height = y + height > screen->height ? screen->height - y : height; lens_rfb_notifyDirtyRegion(x, y, width, height); #endif }
gpl-2.0
ziozzang/rhel6-kernel-src
drivers/base/sys.c
3
15972
/* * sys.c - pseudo-bus for system 'devices' (cpus, PICs, timers, etc) * * Copyright (c) 2002-3 Patrick Mochel * 2002-3 Open Source Development Lab * * This file is released under the GPLv2 * * This exports a 'system' bus type. * By default, a 'sys' bus gets added to the root of the system. There will * always be core system devices. Devices can use sysdev_register() to * add themselves as children of the system bus. */ #include <linux/sysdev.h> #include <linux/err.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/pm.h> #include <linux/device.h> #include <linux/mutex.h> #include <linux/interrupt.h> #include "base.h" #define to_sysdev(k) container_of(k, struct sys_device, kobj) #define to_sysdev_attr(a) container_of(a, struct sysdev_attribute, attr) static ssize_t sysdev_show(struct kobject *kobj, struct attribute *attr, char *buffer) { struct sys_device *sysdev = to_sysdev(kobj); struct sysdev_attribute *sysdev_attr = to_sysdev_attr(attr); if (sysdev_attr->show) return sysdev_attr->show(sysdev, sysdev_attr, buffer); return -EIO; } static ssize_t sysdev_store(struct kobject *kobj, struct attribute *attr, const char *buffer, size_t count) { struct sys_device *sysdev = to_sysdev(kobj); struct sysdev_attribute *sysdev_attr = to_sysdev_attr(attr); if (sysdev_attr->store) return sysdev_attr->store(sysdev, sysdev_attr, buffer, count); return -EIO; } static const struct sysfs_ops sysfs_ops = { .show = sysdev_show, .store = sysdev_store, }; static struct kobj_type ktype_sysdev = { .sysfs_ops = &sysfs_ops, }; int sysdev_create_file(struct sys_device *s, struct sysdev_attribute *a) { return sysfs_create_file(&s->kobj, &a->attr); } void sysdev_remove_file(struct sys_device *s, struct sysdev_attribute *a) { sysfs_remove_file(&s->kobj, &a->attr); } EXPORT_SYMBOL_GPL(sysdev_create_file); EXPORT_SYMBOL_GPL(sysdev_remove_file); #define to_sysdev_class(k) container_of(k, struct sysdev_class, kset.kobj) #define to_sysdev_class_attr(a) container_of(a, \ struct sysdev_class_attribute, attr) static ssize_t sysdev_class_show(struct kobject *kobj, struct attribute *attr, char *buffer) { struct sysdev_class *class = to_sysdev_class(kobj); struct sysdev_class_attribute *class_attr = to_sysdev_class_attr(attr); if (class_attr->show) return class_attr->show(class, buffer); return -EIO; } static ssize_t sysdev_class_store(struct kobject *kobj, struct attribute *attr, const char *buffer, size_t count) { struct sysdev_class *class = to_sysdev_class(kobj); struct sysdev_class_attribute *class_attr = to_sysdev_class_attr(attr); if (class_attr->store) return class_attr->store(class, buffer, count); return -EIO; } static const struct sysfs_ops sysfs_class_ops = { .show = sysdev_class_show, .store = sysdev_class_store, }; static struct kobj_type ktype_sysdev_class = { .sysfs_ops = &sysfs_class_ops, }; int sysdev_class_create_file(struct sysdev_class *c, struct sysdev_class_attribute *a) { return sysfs_create_file(&c->kset.kobj, &a->attr); } EXPORT_SYMBOL_GPL(sysdev_class_create_file); void sysdev_class_remove_file(struct sysdev_class *c, struct sysdev_class_attribute *a) { sysfs_remove_file(&c->kset.kobj, &a->attr); } EXPORT_SYMBOL_GPL(sysdev_class_remove_file); static struct kset *system_kset; int sysdev_class_register(struct sysdev_class *cls) { int retval; pr_debug("Registering sysdev class '%s'\n", cls->name); INIT_LIST_HEAD(&cls->drivers); memset(&cls->kset.kobj, 0x00, sizeof(struct kobject)); cls->kset.kobj.parent = &system_kset->kobj; cls->kset.kobj.ktype = &ktype_sysdev_class; cls->kset.kobj.kset = system_kset; retval = kobject_set_name(&cls->kset.kobj, "%s", cls->name); if (retval) return retval; return kset_register(&cls->kset); } void sysdev_class_unregister(struct sysdev_class *cls) { pr_debug("Unregistering sysdev class '%s'\n", kobject_name(&cls->kset.kobj)); kset_unregister(&cls->kset); } EXPORT_SYMBOL_GPL(sysdev_class_register); EXPORT_SYMBOL_GPL(sysdev_class_unregister); static DEFINE_MUTEX(sysdev_drivers_lock); /** * sysdev_driver_register - Register auxillary driver * @cls: Device class driver belongs to. * @drv: Driver. * * @drv is inserted into @cls->drivers to be * called on each operation on devices of that class. The refcount * of @cls is incremented. */ int sysdev_driver_register(struct sysdev_class *cls, struct sysdev_driver *drv) { int err = 0; if (!cls) { WARN(1, KERN_WARNING "sysdev: invalid class passed to " "sysdev_driver_register!\n"); return -EINVAL; } /* Check whether this driver has already been added to a class. */ if (drv->entry.next && !list_empty(&drv->entry)) WARN(1, KERN_WARNING "sysdev: class %s: driver (%p) has already" " been registered to a class, something is wrong, but " "will forge on!\n", cls->name, drv); mutex_lock(&sysdev_drivers_lock); if (cls && kset_get(&cls->kset)) { list_add_tail(&drv->entry, &cls->drivers); /* If devices of this class already exist, tell the driver */ if (drv->add) { struct sys_device *dev; list_for_each_entry(dev, &cls->kset.list, kobj.entry) drv->add(dev); } } else { err = -EINVAL; WARN(1, KERN_ERR "%s: invalid device class\n", __func__); } mutex_unlock(&sysdev_drivers_lock); return err; } /** * sysdev_driver_unregister - Remove an auxillary driver. * @cls: Class driver belongs to. * @drv: Driver. */ void sysdev_driver_unregister(struct sysdev_class *cls, struct sysdev_driver *drv) { mutex_lock(&sysdev_drivers_lock); list_del_init(&drv->entry); if (cls) { if (drv->remove) { struct sys_device *dev; list_for_each_entry(dev, &cls->kset.list, kobj.entry) drv->remove(dev); } kset_put(&cls->kset); } mutex_unlock(&sysdev_drivers_lock); } EXPORT_SYMBOL_GPL(sysdev_driver_register); EXPORT_SYMBOL_GPL(sysdev_driver_unregister); int sysdev_initialize(struct sys_device *sysdev) { struct sysdev_class *cls = sysdev->cls; if (!cls) return -EINVAL; /* initialize the kobject to 0, in case it had previously been used */ memset(&sysdev->kobj, 0x00, sizeof(struct kobject)); /* Make sure the kset is set */ sysdev->kobj.kset = &cls->kset; /* Register the object */ kobject_init(&sysdev->kobj, &ktype_sysdev); return 0; } EXPORT_SYMBOL_GPL(sysdev_initialize); int sysdev_add(struct sys_device *sysdev) { int error; struct sysdev_class *cls = sysdev->cls; if (!cls) return -EINVAL; error = kobject_add(&sysdev->kobj, NULL, "%s%d", kobject_name(&cls->kset.kobj), sysdev->id); if (!error) { struct sysdev_driver *drv; pr_debug("Registering sys device '%s'\n", kobject_name(&sysdev->kobj)); mutex_lock(&sysdev_drivers_lock); /* Generic notification is implicit, because it's that * code that should have called us. */ /* Notify class auxillary drivers */ list_for_each_entry(drv, &cls->drivers, entry) { if (drv->add) drv->add(sysdev); } mutex_unlock(&sysdev_drivers_lock); kobject_uevent(&sysdev->kobj, KOBJ_ADD); } return error; } EXPORT_SYMBOL_GPL(sysdev_add); int sysdev_add_hack(struct sys_device *sysdev) { int error; struct sysdev_class *cls = sysdev->cls; if (!cls) return -EINVAL; error = kobject_add(&sysdev->kobj, NULL, "%s%d", kobject_name(&cls->kset.kobj), sysdev->id); if (!error) { struct sysdev_driver *drv; pr_debug("Registering sys device '%s'\n", kobject_name(&sysdev->kobj)); mutex_lock(&sysdev_drivers_lock); /* Generic notification is implicit, because it's that * code that should have called us. */ /* Notify class auxillary drivers */ list_for_each_entry(drv, &cls->drivers, entry) { if (drv->add) drv->add(sysdev); } mutex_unlock(&sysdev_drivers_lock); /* * FIXME: init_memory_block() issues the event * once its sysfs files are visible as well. * This is a hack only!!! */ /* XXX: kobject_uevent(&sysdev->kobj, KOBJ_ADD); */ } return error; } EXPORT_SYMBOL_GPL(sysdev_add_hack); /** * sysdev_register - add a system device to the tree * @sysdev: device in question * */ int sysdev_register(struct sys_device *sysdev) { struct sysdev_class *cls = sysdev->cls; if (sysdev_initialize(sysdev)) return -EINVAL; pr_debug("Registering sys device of class '%s'\n", kobject_name(&cls->kset.kobj)); return sysdev_add(sysdev); } int sysdev_register_hack(struct sys_device *sysdev) { struct sysdev_class *cls = sysdev->cls; if (sysdev_initialize(sysdev)) return -EINVAL; pr_debug("Registering sys device of class '%s'\n", kobject_name(&cls->kset.kobj)); return sysdev_add_hack(sysdev); } void sysdev_unregister(struct sys_device *sysdev) { struct sysdev_driver *drv; mutex_lock(&sysdev_drivers_lock); list_for_each_entry(drv, &sysdev->cls->drivers, entry) { if (drv->remove) drv->remove(sysdev); } mutex_unlock(&sysdev_drivers_lock); kobject_put(&sysdev->kobj); } /** * sysdev_shutdown - Shut down all system devices. * * Loop over each class of system devices, and the devices in each * of those classes. For each device, we call the shutdown method for * each driver registered for the device - the auxillaries, * and the class driver. * * Note: The list is iterated in reverse order, so that we shut down * child devices before we shut down their parents. The list ordering * is guaranteed by virtue of the fact that child devices are registered * after their parents. */ void sysdev_shutdown(void) { struct sysdev_class *cls; pr_debug("Shutting Down System Devices\n"); mutex_lock(&sysdev_drivers_lock); list_for_each_entry_reverse(cls, &system_kset->list, kset.kobj.entry) { struct sys_device *sysdev; pr_debug("Shutting down type '%s':\n", kobject_name(&cls->kset.kobj)); list_for_each_entry(sysdev, &cls->kset.list, kobj.entry) { struct sysdev_driver *drv; pr_debug(" %s\n", kobject_name(&sysdev->kobj)); /* Call auxillary drivers first */ list_for_each_entry(drv, &cls->drivers, entry) { if (drv->shutdown) drv->shutdown(sysdev); } /* Now call the generic one */ if (cls->shutdown) cls->shutdown(sysdev); } } mutex_unlock(&sysdev_drivers_lock); } static void __sysdev_resume(struct sys_device *dev) { struct sysdev_class *cls = dev->cls; struct sysdev_driver *drv; /* First, call the class-specific one */ if (cls->resume) cls->resume(dev); WARN_ONCE(!irqs_disabled(), "Interrupts enabled after %pF\n", cls->resume); /* Call auxillary drivers next. */ list_for_each_entry(drv, &cls->drivers, entry) { if (drv->resume) drv->resume(dev); WARN_ONCE(!irqs_disabled(), "Interrupts enabled after %pF\n", drv->resume); } } /** * sysdev_suspend - Suspend all system devices. * @state: Power state to enter. * * We perform an almost identical operation as sysdev_shutdown() * above, though calling ->suspend() instead. Interrupts are disabled * when this called. Devices are responsible for both saving state and * quiescing or powering down the device. * * This is only called by the device PM core, so we let them handle * all synchronization. */ int sysdev_suspend(pm_message_t state) { struct sysdev_class *cls; struct sys_device *sysdev, *err_dev; struct sysdev_driver *drv, *err_drv; int ret; pr_debug("Checking wake-up interrupts\n"); /* Return error code if there are any wake-up interrupts pending */ ret = check_wakeup_irqs(); if (ret) return ret; WARN_ONCE(!irqs_disabled(), "Interrupts enabled while suspending system devices\n"); pr_debug("Suspending System Devices\n"); list_for_each_entry_reverse(cls, &system_kset->list, kset.kobj.entry) { pr_debug("Suspending type '%s':\n", kobject_name(&cls->kset.kobj)); list_for_each_entry(sysdev, &cls->kset.list, kobj.entry) { pr_debug(" %s\n", kobject_name(&sysdev->kobj)); /* Call auxillary drivers first */ list_for_each_entry(drv, &cls->drivers, entry) { if (drv->suspend) { ret = drv->suspend(sysdev, state); if (ret) goto aux_driver; } WARN_ONCE(!irqs_disabled(), "Interrupts enabled after %pF\n", drv->suspend); } /* Now call the generic one */ if (cls->suspend) { ret = cls->suspend(sysdev, state); if (ret) goto cls_driver; WARN_ONCE(!irqs_disabled(), "Interrupts enabled after %pF\n", cls->suspend); } } } return 0; /* resume current sysdev */ cls_driver: drv = NULL; printk(KERN_ERR "Class suspend failed for %s\n", kobject_name(&sysdev->kobj)); aux_driver: if (drv) printk(KERN_ERR "Class driver suspend failed for %s\n", kobject_name(&sysdev->kobj)); list_for_each_entry(err_drv, &cls->drivers, entry) { if (err_drv == drv) break; if (err_drv->resume) err_drv->resume(sysdev); } /* resume other sysdevs in current class */ list_for_each_entry(err_dev, &cls->kset.list, kobj.entry) { if (err_dev == sysdev) break; pr_debug(" %s\n", kobject_name(&err_dev->kobj)); __sysdev_resume(err_dev); } /* resume other classes */ list_for_each_entry_continue(cls, &system_kset->list, kset.kobj.entry) { list_for_each_entry(err_dev, &cls->kset.list, kobj.entry) { pr_debug(" %s\n", kobject_name(&err_dev->kobj)); __sysdev_resume(err_dev); } } return ret; } EXPORT_SYMBOL_GPL(sysdev_suspend); /** * sysdev_resume - Bring system devices back to life. * * Similar to sysdev_suspend(), but we iterate the list forwards * to guarantee that parent devices are resumed before their children. * * Note: Interrupts are disabled when called. */ int sysdev_resume(void) { struct sysdev_class *cls; WARN_ONCE(!irqs_disabled(), "Interrupts enabled while resuming system devices\n"); pr_debug("Resuming System Devices\n"); list_for_each_entry(cls, &system_kset->list, kset.kobj.entry) { struct sys_device *sysdev; pr_debug("Resuming type '%s':\n", kobject_name(&cls->kset.kobj)); list_for_each_entry(sysdev, &cls->kset.list, kobj.entry) { pr_debug(" %s\n", kobject_name(&sysdev->kobj)); __sysdev_resume(sysdev); } } return 0; } EXPORT_SYMBOL_GPL(sysdev_resume); int __init system_bus_init(void) { system_kset = kset_create_and_add("system", NULL, &devices_kset->kobj); if (!system_kset) return -ENOMEM; return 0; } EXPORT_SYMBOL_GPL(sysdev_register); EXPORT_SYMBOL_GPL(sysdev_register_hack); EXPORT_SYMBOL_GPL(sysdev_unregister); #define to_ext_attr(x) container_of(x, struct sysdev_ext_attribute, attr) ssize_t sysdev_store_ulong(struct sys_device *sysdev, struct sysdev_attribute *attr, const char *buf, size_t size) { struct sysdev_ext_attribute *ea = to_ext_attr(attr); char *end; unsigned long new = simple_strtoul(buf, &end, 0); if (end == buf) return -EINVAL; *(unsigned long *)(ea->var) = new; /* Always return full write size even if we didn't consume all */ return size; } EXPORT_SYMBOL_GPL(sysdev_store_ulong); ssize_t sysdev_show_ulong(struct sys_device *sysdev, struct sysdev_attribute *attr, char *buf) { struct sysdev_ext_attribute *ea = to_ext_attr(attr); return snprintf(buf, PAGE_SIZE, "%lx\n", *(unsigned long *)(ea->var)); } EXPORT_SYMBOL_GPL(sysdev_show_ulong); ssize_t sysdev_store_int(struct sys_device *sysdev, struct sysdev_attribute *attr, const char *buf, size_t size) { struct sysdev_ext_attribute *ea = to_ext_attr(attr); char *end; long new = simple_strtol(buf, &end, 0); if (end == buf || new > INT_MAX || new < INT_MIN) return -EINVAL; *(int *)(ea->var) = new; /* Always return full write size even if we didn't consume all */ return size; } EXPORT_SYMBOL_GPL(sysdev_store_int); ssize_t sysdev_show_int(struct sys_device *sysdev, struct sysdev_attribute *attr, char *buf) { struct sysdev_ext_attribute *ea = to_ext_attr(attr); return snprintf(buf, PAGE_SIZE, "%d\n", *(int *)(ea->var)); } EXPORT_SYMBOL_GPL(sysdev_show_int);
gpl-2.0
tblume/systemd-suse-devel
src/resolve/test-resolved-packet.c
3
1759
/* SPDX-License-Identifier: LGPL-2.1+ */ /*** This file is part of systemd Copyright 2017 Zbigniew Jędrzejewski-Szmek systemd is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. systemd is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with systemd; If not, see <http://www.gnu.org/licenses/>. ***/ #include "log.h" #include "resolved-dns-packet.h" static void test_dns_packet_new(void) { size_t i; _cleanup_(dns_packet_unrefp) DnsPacket *p2 = NULL; for (i = 0; i <= DNS_PACKET_SIZE_MAX; i++) { _cleanup_(dns_packet_unrefp) DnsPacket *p = NULL; assert_se(dns_packet_new(&p, DNS_PROTOCOL_DNS, i, DNS_PACKET_SIZE_MAX) == 0); log_debug("dns_packet_new: %zu → %zu", i, p->allocated); assert_se(p->allocated >= MIN(DNS_PACKET_SIZE_MAX, i)); if (i > DNS_PACKET_SIZE_START + 10 && i < DNS_PACKET_SIZE_MAX - 10) i = MIN(i * 2, DNS_PACKET_SIZE_MAX - 10); } assert_se(dns_packet_new(&p2, DNS_PROTOCOL_DNS, DNS_PACKET_SIZE_MAX + 1, DNS_PACKET_SIZE_MAX) == -EFBIG); } int main(int argc, char **argv) { log_set_max_level(LOG_DEBUG); log_parse_environment(); log_open(); test_dns_packet_new(); return 0; }
gpl-2.0
igors/xfce4-settings
dialogs/appearance-settings/main.c
3
39192
/* * Copyright (c) 2008 Stephan Arts <stephan@xfce.org> * Copyright (c) 2008 Jannis Pohlmann <jannis@xfce.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Library General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifdef HAVE_CONFIG_H #include <config.h> #endif #ifdef HAVE_STDLIB_H #include <stdlib.h> #endif #ifdef HAVE_STRING_H #include <string.h> #endif #ifdef HAVE_SYS_WAIT_H #include <sys/wait.h> #endif #include <glib.h> #include <gtk/gtk.h> #include <libxfce4ui/libxfce4ui.h> #include <libxfce4util/libxfce4util.h> #include <xfconf/xfconf.h> #include "appearance-dialog_ui.h" #include "images.h" #define INCH_MM 25.4 /* Use a fallback DPI of 96 which should be ok-ish on most systems * and is only applied on rare occasions */ #define FALLBACK_DPI 96 /* Increase this number if new gtk settings have been added */ #define INITIALIZE_UINT (1) enum { COLUMN_THEME_NAME, COLUMN_THEME_DISPLAY_NAME, COLUMN_THEME_COMMENT, N_THEME_COLUMNS }; enum { COLUMN_RGBA_PIXBUF, COLUMN_RGBA_NAME, N_RGBA_COLUMNS }; /* String arrays with the settings in combo boxes */ static const gchar* toolbar_styles_array[] = { "icons", "text", "both", "both-horiz" }; static const gchar* xft_hint_styles_array[] = { "hintnone", "hintslight", "hintmedium", "hintfull" }; static const gchar* xft_rgba_array[] = { "none", "rgb", "bgr", "vrgb", "vbgr" }; static const GtkTargetEntry theme_drop_targets[] = { { "text/uri-list", 0, 0 } }; /* Option entries */ static GdkNativeWindow opt_socket_id = 0; static gboolean opt_version = FALSE; static GOptionEntry option_entries[] = { { "socket-id", 's', G_OPTION_FLAG_IN_MAIN, G_OPTION_ARG_INT, &opt_socket_id, N_("Settings manager socket"), N_("SOCKET ID") }, { "version", 'v', 0, G_OPTION_ARG_NONE, &opt_version, N_("Version information"), NULL }, { NULL } }; /* Global xfconf channel */ static XfconfChannel *xsettings_channel; static int compute_xsettings_dpi (GtkWidget *widget) { GdkScreen *screen; int width_mm, height_mm; int width, height; int dpi; screen = gtk_widget_get_screen (widget); width_mm = gdk_screen_get_width_mm (screen); height_mm = gdk_screen_get_height_mm (screen); dpi = FALLBACK_DPI; if (width_mm > 0 && height_mm > 0) { width = gdk_screen_get_width (screen); height = gdk_screen_get_height (screen); dpi = MIN (INCH_MM * width / width_mm, INCH_MM * height / height_mm); } return dpi; } static void cb_theme_tree_selection_changed (GtkTreeSelection *selection, const gchar *property) { GtkTreeModel *model; gboolean has_selection; gchar *name; GtkTreeIter iter; /* Get the selected list iter */ has_selection = gtk_tree_selection_get_selected (selection, &model, &iter); if (G_LIKELY (has_selection)) { /* Get the theme name */ gtk_tree_model_get (model, &iter, COLUMN_THEME_NAME, &name, -1); /* Store the new theme */ xfconf_channel_set_string (xsettings_channel, property, name); /* Cleanup */ g_free (name); } } static void cb_icon_theme_tree_selection_changed (GtkTreeSelection *selection) { /* Set the new icon theme */ cb_theme_tree_selection_changed (selection, "/Net/IconThemeName"); } static void cb_ui_theme_tree_selection_changed (GtkTreeSelection *selection) { /* Set the new UI theme */ cb_theme_tree_selection_changed (selection, "/Net/ThemeName"); } static void cb_toolbar_style_combo_changed (GtkComboBox *combo) { gint active; /* Get active item, prevent number outside the array */ active = CLAMP (gtk_combo_box_get_active (combo), 0, (gint) G_N_ELEMENTS (toolbar_styles_array)); /* Save setting */ xfconf_channel_set_string (xsettings_channel, "/Gtk/ToolbarStyle", toolbar_styles_array[active]); } static void cb_antialias_check_button_toggled (GtkToggleButton *toggle) { gint active; /* Don't allow an inconsistent button anymore */ gtk_toggle_button_set_inconsistent (toggle, FALSE); /* Get active */ active = gtk_toggle_button_get_active (toggle) ? 1 : 0; /* Save setting */ xfconf_channel_set_int (xsettings_channel, "/Xft/Antialias", active); } static void cb_hinting_style_combo_changed (GtkComboBox *combo) { gint active; /* Get active item, prevent number outside the array */ active = CLAMP (gtk_combo_box_get_active (combo), 0, (gint) G_N_ELEMENTS (xft_hint_styles_array)); /* Save setting */ xfconf_channel_set_string (xsettings_channel, "/Xft/HintStyle", xft_hint_styles_array[active]); } static void cb_rgba_style_combo_changed (GtkComboBox *combo) { gint active; /* Get active item, prevent number outside the array */ active = CLAMP (gtk_combo_box_get_active (combo), 0, (gint) G_N_ELEMENTS (xft_rgba_array)); /* Save setting */ xfconf_channel_set_string (xsettings_channel, "/Xft/RGBA", xft_rgba_array[active]); } static void cb_custom_dpi_check_button_toggled (GtkToggleButton *custom_dpi_toggle, GtkSpinButton *custom_dpi_spin) { gint dpi; if (gtk_toggle_button_get_active (custom_dpi_toggle)) { /* Custom DPI is activated, so restore the last custom DPI we know about */ dpi = xfconf_channel_get_int (xsettings_channel, "/Xfce/LastCustomDPI", -1); /* Unfortunately, we don't have a valid custom DPI value to use, so compute it */ if (dpi <= 0) dpi = compute_xsettings_dpi (GTK_WIDGET (custom_dpi_toggle)); /* Apply the computed custom DPI value */ xfconf_channel_set_int (xsettings_channel, "/Xft/DPI", dpi); gtk_widget_set_sensitive (GTK_WIDGET (custom_dpi_spin), TRUE); } else { /* Custom DPI is deactivated, so remember the current value as the last custom DPI */ dpi = gtk_spin_button_get_value_as_int (custom_dpi_spin); xfconf_channel_set_int (xsettings_channel, "/Xfce/LastCustomDPI", dpi); /* Tell xfsettingsd to compute the value itself */ xfconf_channel_set_int (xsettings_channel, "/Xft/DPI", -1); /* Make the spin button insensitive */ gtk_widget_set_sensitive (GTK_WIDGET (custom_dpi_spin), FALSE); } } static void cb_custom_dpi_spin_button_changed (GtkSpinButton *custom_dpi_spin, GtkToggleButton *custom_dpi_toggle) { gint dpi = gtk_spin_button_get_value_as_int (custom_dpi_spin); if (GTK_WIDGET_IS_SENSITIVE (custom_dpi_spin) && gtk_toggle_button_get_active (custom_dpi_toggle)) { /* Custom DPI is turned on and the spin button has changed, so remember the value */ xfconf_channel_set_int (xsettings_channel, "/Xfce/LastCustomDPI", dpi); } /* Tell xfsettingsd to apply the custom DPI value */ xfconf_channel_set_int (xsettings_channel, "/Xft/DPI", dpi); } #ifdef ENABLE_SOUND_SETTINGS static void cb_enable_event_sounds_check_button_toggled (GtkToggleButton *toggle, GtkWidget *button) { gboolean active; active = gtk_toggle_button_get_active (toggle); gtk_widget_set_sensitive (button, active); gtk_toggle_button_set_active (GTK_TOGGLE_BUTTON (button), active); } #endif static void appearance_settings_load_icon_themes (GtkListStore *list_store, GtkTreeView *tree_view) { GDir *dir; GtkTreePath *tree_path; GtkTreeIter iter; XfceRc *index_file; const gchar *file; gchar **icon_theme_dirs; gchar *index_filename; const gchar *theme_name; const gchar *theme_comment; gchar *comment_escaped; gchar *active_theme_name; gint i; GSList *check_list = NULL; /* Determine current theme */ active_theme_name = xfconf_channel_get_string (xsettings_channel, "/Net/IconThemeName", "Rodent"); /* Determine directories to look in for icon themes */ xfce_resource_push_path (XFCE_RESOURCE_ICONS, DATADIR G_DIR_SEPARATOR_S "icons"); icon_theme_dirs = xfce_resource_dirs (XFCE_RESOURCE_ICONS); xfce_resource_pop_path (XFCE_RESOURCE_ICONS); /* Iterate over all base directories */ for (i = 0; icon_theme_dirs[i] != NULL; ++i) { /* Open directory handle */ dir = g_dir_open (icon_theme_dirs[i], 0, NULL); /* Try next base directory if this one cannot be read */ if (G_UNLIKELY (dir == NULL)) continue; /* Iterate over filenames in the directory */ while ((file = g_dir_read_name (dir)) != NULL) { /* Build filename for the index.theme of the current icon theme directory */ index_filename = g_build_filename (icon_theme_dirs[i], file, "index.theme", NULL); /* Try to open the theme index file */ index_file = xfce_rc_simple_open (index_filename, TRUE); if (index_file != NULL && g_slist_find_custom (check_list, file, (GCompareFunc) g_utf8_collate) == NULL) { /* Set the icon theme group */ xfce_rc_set_group (index_file, "Icon Theme"); /* Check if the icon theme is valid and visible to the user */ if (G_LIKELY (xfce_rc_has_entry (index_file, "Directories") && !xfce_rc_read_bool_entry (index_file, "Hidden", FALSE))) { /* Insert the theme in the check list */ check_list = g_slist_prepend (check_list, g_strdup (file)); /* Get translated icon theme name and comment */ theme_name = xfce_rc_read_entry (index_file, "Name", file); theme_comment = xfce_rc_read_entry (index_file, "Comment", NULL); /* Escape the comment, since tooltips are markup, not text */ comment_escaped = theme_comment ? g_markup_escape_text (theme_comment, -1) : NULL; /* Append icon theme to the list store */ gtk_list_store_append (list_store, &iter); gtk_list_store_set (list_store, &iter, COLUMN_THEME_NAME, file, COLUMN_THEME_DISPLAY_NAME, theme_name, COLUMN_THEME_COMMENT, comment_escaped, -1); /* Cleanup */ g_free (comment_escaped); /* Check if this is the active theme, if so, select it */ if (G_UNLIKELY (g_utf8_collate (file, active_theme_name) == 0)) { tree_path = gtk_tree_model_get_path (GTK_TREE_MODEL (list_store), &iter); gtk_tree_selection_select_path (gtk_tree_view_get_selection (tree_view), tree_path); gtk_tree_view_scroll_to_cell (tree_view, tree_path, NULL, TRUE, 0.5, 0); gtk_tree_path_free (tree_path); } } } /* Close theme index file */ if (G_LIKELY (index_file)) xfce_rc_close (index_file); /* Free theme index filename */ g_free (index_filename); } /* Close directory handle */ g_dir_close (dir); } /* Free active theme name */ g_free (active_theme_name); /* Free list of base directories */ g_strfreev (icon_theme_dirs); /* Free the check list */ if (G_LIKELY (check_list)) { g_slist_foreach (check_list, (GFunc) g_free, NULL); g_slist_free (check_list); } } static void appearance_settings_load_ui_themes (GtkListStore *list_store, GtkTreeView *tree_view) { GDir *dir; GtkTreePath *tree_path; GtkTreeIter iter; XfceRc *index_file; const gchar *file; gchar **ui_theme_dirs; gchar *index_filename; const gchar *theme_name; const gchar *theme_comment; gchar *active_theme_name; gchar *gtkrc_filename; gchar *comment_escaped; gint i; GSList *check_list = NULL; /* Determine current theme */ active_theme_name = xfconf_channel_get_string (xsettings_channel, "/Net/ThemeName", "Default"); /* Determine directories to look in for ui themes */ xfce_resource_push_path (XFCE_RESOURCE_THEMES, DATADIR G_DIR_SEPARATOR_S "themes"); ui_theme_dirs = xfce_resource_dirs (XFCE_RESOURCE_THEMES); xfce_resource_pop_path (XFCE_RESOURCE_THEMES); /* Iterate over all base directories */ for (i = 0; ui_theme_dirs[i] != NULL; ++i) { /* Open directory handle */ dir = g_dir_open (ui_theme_dirs[i], 0, NULL); /* Try next base directory if this one cannot be read */ if (G_UNLIKELY (dir == NULL)) continue; /* Iterate over filenames in the directory */ while ((file = g_dir_read_name (dir)) != NULL) { /* Build the theme style filename */ gtkrc_filename = g_build_filename (ui_theme_dirs[i], file, "gtk-2.0", "gtkrc", NULL); /* Check if the gtkrc file exists and the theme is not already in the list */ if (g_file_test (gtkrc_filename, G_FILE_TEST_EXISTS) && g_slist_find_custom (check_list, file, (GCompareFunc) g_utf8_collate) == NULL) { /* Insert the theme in the check list */ check_list = g_slist_prepend (check_list, g_strdup (file)); /* Build filename for the index.theme of the current ui theme directory */ index_filename = g_build_filename (ui_theme_dirs[i], file, "index.theme", NULL); /* Try to open the theme index file */ index_file = xfce_rc_simple_open (index_filename, TRUE); if (G_LIKELY (index_file != NULL)) { /* Get translated ui theme name and comment */ theme_name = xfce_rc_read_entry (index_file, "Name", file); theme_comment = xfce_rc_read_entry (index_file, "Comment", NULL); /* Escape the comment because tooltips are markup, not text */ comment_escaped = theme_comment ? g_markup_escape_text (theme_comment, -1) : NULL; } else { /* Set defaults */ theme_name = file; comment_escaped = NULL; } /* Append ui theme to the list store */ gtk_list_store_append (list_store, &iter); gtk_list_store_set (list_store, &iter, COLUMN_THEME_NAME, file, COLUMN_THEME_DISPLAY_NAME, theme_name, COLUMN_THEME_COMMENT, comment_escaped, -1); /* Cleanup */ if (G_LIKELY (index_file != NULL)) xfce_rc_close (index_file); g_free (comment_escaped); /* Check if this is the active theme, if so, select it */ if (G_UNLIKELY (g_utf8_collate (file, active_theme_name) == 0)) { tree_path = gtk_tree_model_get_path (GTK_TREE_MODEL (list_store), &iter); gtk_tree_selection_select_path (gtk_tree_view_get_selection (tree_view), tree_path); gtk_tree_view_scroll_to_cell (tree_view, tree_path, NULL, TRUE, 0.5, 0); gtk_tree_path_free (tree_path); } /* Free theme index filename */ g_free (index_filename); } /* Free gtkrc filename */ g_free (gtkrc_filename); } /* Close directory handle */ g_dir_close (dir); } /* Free active theme name */ g_free (active_theme_name); /* Free list of base directories */ g_strfreev (ui_theme_dirs); /* Free the check list */ if (G_LIKELY (check_list)) { g_slist_foreach (check_list, (GFunc) g_free, NULL); g_slist_free (check_list); } } static void appearance_settings_dialog_channel_property_changed (XfconfChannel *channel, const gchar *property_name, const GValue *value, GtkBuilder *builder) { GObject *object; gchar *str; guint i; gint antialias, dpi, custom_dpi; GtkTreeModel *model; g_return_if_fail (property_name != NULL); g_return_if_fail (GTK_IS_BUILDER (builder)); if (strcmp (property_name, "/Xft/RGBA") == 0) { str = xfconf_channel_get_string (xsettings_channel, property_name, xft_rgba_array[0]); for (i = 0; i < G_N_ELEMENTS (xft_rgba_array); i++) { if (strcmp (str, xft_rgba_array[i]) == 0) { object = gtk_builder_get_object (builder, "xft_rgba_combo_box"); gtk_combo_box_set_active (GTK_COMBO_BOX (object), i); break; } } g_free (str); } else if (strcmp (property_name, "/Gtk/ToolbarStyle") == 0) { str = xfconf_channel_get_string (xsettings_channel, property_name, toolbar_styles_array[2]); for (i = 0; i < G_N_ELEMENTS (toolbar_styles_array); i++) { if (strcmp (str, toolbar_styles_array[i]) == 0) { object = gtk_builder_get_object (builder, "gtk_toolbar_style_combo_box"); gtk_combo_box_set_active (GTK_COMBO_BOX (object), i); break; } } g_free (str); } else if (strcmp (property_name, "/Xft/HintStyle") == 0) { str = xfconf_channel_get_string (xsettings_channel, property_name, xft_hint_styles_array[0]); for (i = 0; i < G_N_ELEMENTS (xft_hint_styles_array); i++) { if (strcmp (str, xft_hint_styles_array[i]) == 0) { object = gtk_builder_get_object (builder, "xft_hinting_style_combo_box"); gtk_combo_box_set_active (GTK_COMBO_BOX (object), i); break; } } g_free (str); } else if (strcmp (property_name, "/Xft/Antialias") == 0) { object = gtk_builder_get_object (builder, "xft_antialias_check_button"); antialias = xfconf_channel_get_int (xsettings_channel, property_name, -1); switch (antialias) { case 1: gtk_toggle_button_set_active (GTK_TOGGLE_BUTTON (object), TRUE); break; case 0: gtk_toggle_button_set_active (GTK_TOGGLE_BUTTON (object), FALSE); break; default: /* -1 */ gtk_toggle_button_set_inconsistent (GTK_TOGGLE_BUTTON (object), TRUE); break; } } else if (strcmp (property_name, "/Xft/DPI") == 0) { /* The DPI has changed, so get its value and the last known custom value */ dpi = xfconf_channel_get_int (xsettings_channel, property_name, FALLBACK_DPI); custom_dpi = xfconf_channel_get_int (xsettings_channel, "/Xfce/LastCustomDPI", -1); /* Activate the check button if we're using a custom DPI */ object = gtk_builder_get_object (builder, "xft_custom_dpi_check_button"); gtk_toggle_button_set_active (GTK_TOGGLE_BUTTON (object), dpi >= 0); /* If we're not using a custom DPI, compute the future custom DPI automatically */ if (custom_dpi == -1) custom_dpi = compute_xsettings_dpi (GTK_WIDGET (object)); object = gtk_builder_get_object (builder, "xft_custom_dpi_spin_button"); if (dpi > 0) { /* We're using a custom DPI, so use the current DPI setting for the spin value */ gtk_spin_button_set_value (GTK_SPIN_BUTTON (object), dpi); } else { /* Set the spin button value to the last custom DPI */ gtk_spin_button_set_value (GTK_SPIN_BUTTON (object), custom_dpi); } } else if (strcmp (property_name, "/Net/ThemeName") == 0) { GtkTreeIter iter; gboolean reload; object = gtk_builder_get_object (builder, "gtk_theme_treeview"); model = gtk_tree_view_get_model (GTK_TREE_VIEW (object)); reload = TRUE; if (gtk_tree_selection_get_selected (gtk_tree_view_get_selection (GTK_TREE_VIEW (object)), &model, &iter)) { gchar *selected_name; gchar *new_name; gtk_tree_model_get (model, &iter, COLUMN_THEME_NAME, &selected_name, -1); new_name = xfconf_channel_get_string (channel, property_name, NULL); reload = (strcmp (new_name, selected_name) != 0); g_free (selected_name); g_free (new_name); } if (reload) { gtk_list_store_clear (GTK_LIST_STORE (model)); appearance_settings_load_ui_themes (GTK_LIST_STORE (model), GTK_TREE_VIEW (object)); } } else if (strcmp (property_name, "/Net/IconThemeName") == 0) { GtkTreeIter iter; gboolean reload; reload = TRUE; object = gtk_builder_get_object (builder, "icon_theme_treeview"); model = gtk_tree_view_get_model (GTK_TREE_VIEW (object)); if (gtk_tree_selection_get_selected (gtk_tree_view_get_selection (GTK_TREE_VIEW (object)), &model, &iter)) { gchar *selected_name; gchar *new_name; gtk_tree_model_get (model, &iter, COLUMN_THEME_NAME, &selected_name, -1); new_name = xfconf_channel_get_string (channel, property_name, NULL); reload = (strcmp (new_name, selected_name) != 0); g_free (selected_name); g_free (new_name); } if (reload) { gtk_list_store_clear (GTK_LIST_STORE (model)); appearance_settings_load_icon_themes (GTK_LIST_STORE (model), GTK_TREE_VIEW (object)); } } } static void cb_theme_uri_dropped (GtkWidget *widget, GdkDragContext *drag_context, gint x, gint y, GtkSelectionData *data, guint info, guint timestamp, GtkBuilder *builder) { gchar **uris; gchar *argv[3]; guint i; GError *error = NULL; gint status; GtkWidget *toplevel = gtk_widget_get_toplevel (widget); gchar *filename; GdkCursor *cursor; GdkWindow *gdkwindow; gboolean something_installed = FALSE; GObject *object; GtkTreeModel *model; uris = gtk_selection_data_get_uris (data); if (uris == NULL) return; argv[0] = HELPERDIR G_DIR_SEPARATOR_S "appearance-install-theme"; argv[2] = NULL; /* inform the user we are installing the theme */ gdkwindow = gtk_widget_get_window (widget); cursor = gdk_cursor_new_for_display (gtk_widget_get_display (widget), GDK_WATCH); gdk_window_set_cursor (gdkwindow, cursor); /* iterate main loop to show cursor */ while (gtk_events_pending ()) gtk_main_iteration (); for (i = 0; uris[i] != NULL; i++) { filename = g_filename_from_uri (uris[i], NULL, NULL); if (filename == NULL) continue; argv[1] = filename; if (g_spawn_sync (NULL, argv, NULL, 0, NULL, NULL, NULL, NULL, &status, &error) && status > 0) { switch (WEXITSTATUS (status)) { case 2: g_set_error (&error, G_SPAWN_ERROR, 0, _("File is larger than %d MB, installation aborted"), 50); break; case 3: g_set_error_literal (&error, G_SPAWN_ERROR, 0, _("Failed to create temporary directory")); break; case 4: g_set_error_literal (&error, G_SPAWN_ERROR, 0, _("Failed to extract archive")); break; case 5: g_set_error_literal (&error, G_SPAWN_ERROR, 0, _("Unknown format, only archives and directories are supported")); break; default: g_set_error (&error, G_SPAWN_ERROR, 0, _("An unknown error, exit code is %d"), WEXITSTATUS (status)); break; } } if (error != NULL) { xfce_dialog_show_error (GTK_WINDOW (toplevel), error, _("Failed to install theme")); g_clear_error (&error); } else { something_installed = TRUE; } g_free (filename); } g_strfreev (uris); gdk_window_set_cursor (gdkwindow, NULL); gdk_cursor_unref (cursor); if (something_installed) { /* reload icon theme treeview */ object = gtk_builder_get_object (builder, "icon_theme_treeview"); model = gtk_tree_view_get_model (GTK_TREE_VIEW (object)); gtk_list_store_clear (GTK_LIST_STORE (model)); appearance_settings_load_icon_themes (GTK_LIST_STORE (model), GTK_TREE_VIEW (object)); /* reload gtk theme treeview */ object = gtk_builder_get_object (builder, "gtk_theme_treeview"); model = gtk_tree_view_get_model (GTK_TREE_VIEW (object)); gtk_list_store_clear (GTK_LIST_STORE (model)); appearance_settings_load_ui_themes (GTK_LIST_STORE (model), GTK_TREE_VIEW (object)); } } static void appearance_settings_dialog_configure_widgets (GtkBuilder *builder) { GObject *object, *object2; GtkListStore *list_store; GtkCellRenderer *renderer; GdkPixbuf *pixbuf; GtkTreeSelection *selection; /* Icon themes list */ object = gtk_builder_get_object (builder, "icon_theme_treeview"); list_store = gtk_list_store_new (N_THEME_COLUMNS, G_TYPE_STRING, G_TYPE_STRING, G_TYPE_STRING); gtk_tree_sortable_set_sort_column_id (GTK_TREE_SORTABLE (list_store), COLUMN_THEME_DISPLAY_NAME, GTK_SORT_ASCENDING); gtk_tree_view_set_model (GTK_TREE_VIEW (object), GTK_TREE_MODEL (list_store)); gtk_tree_view_set_tooltip_column (GTK_TREE_VIEW (object), COLUMN_THEME_COMMENT); renderer = gtk_cell_renderer_text_new (); gtk_tree_view_insert_column_with_attributes (GTK_TREE_VIEW (object), 0, "", renderer, "text", COLUMN_THEME_DISPLAY_NAME, NULL); appearance_settings_load_icon_themes (list_store, GTK_TREE_VIEW (object)); g_object_unref (G_OBJECT (list_store)); selection = gtk_tree_view_get_selection (GTK_TREE_VIEW (object)); gtk_tree_selection_set_mode (selection, GTK_SELECTION_SINGLE); g_signal_connect (G_OBJECT (selection), "changed", G_CALLBACK (cb_icon_theme_tree_selection_changed), NULL); gtk_drag_dest_set (GTK_WIDGET (object), GTK_DEST_DEFAULT_ALL, theme_drop_targets, G_N_ELEMENTS (theme_drop_targets), GDK_ACTION_COPY); g_signal_connect (G_OBJECT (object), "drag-data-received", G_CALLBACK (cb_theme_uri_dropped), builder); /* Gtk (UI) themes */ object = gtk_builder_get_object (builder, "gtk_theme_treeview"); list_store = gtk_list_store_new (N_THEME_COLUMNS, G_TYPE_STRING, G_TYPE_STRING, G_TYPE_STRING); gtk_tree_sortable_set_sort_column_id (GTK_TREE_SORTABLE (list_store), COLUMN_THEME_DISPLAY_NAME, GTK_SORT_ASCENDING); gtk_tree_view_set_model (GTK_TREE_VIEW (object), GTK_TREE_MODEL (list_store)); gtk_tree_view_set_tooltip_column (GTK_TREE_VIEW (object), COLUMN_THEME_COMMENT); renderer = gtk_cell_renderer_text_new(); gtk_tree_view_insert_column_with_attributes (GTK_TREE_VIEW (object), 0, "", renderer, "text", COLUMN_THEME_DISPLAY_NAME, NULL); appearance_settings_load_ui_themes (list_store, GTK_TREE_VIEW (object)); g_object_unref (G_OBJECT (list_store)); selection = gtk_tree_view_get_selection (GTK_TREE_VIEW (object)); gtk_tree_selection_set_mode (selection, GTK_SELECTION_SINGLE); g_signal_connect (G_OBJECT (selection), "changed", G_CALLBACK (cb_ui_theme_tree_selection_changed), NULL); gtk_drag_dest_set (GTK_WIDGET (object), GTK_DEST_DEFAULT_ALL, theme_drop_targets, G_N_ELEMENTS (theme_drop_targets), GDK_ACTION_COPY); g_signal_connect (G_OBJECT (object), "drag-data-received", G_CALLBACK (cb_theme_uri_dropped), builder); /* Subpixel (rgba) hinting Combo */ object = gtk_builder_get_object (builder, "xft_rgba_store"); pixbuf = gdk_pixbuf_new_from_xpm_data (rgba_image_none_xpm); gtk_list_store_insert_with_values (GTK_LIST_STORE (object), NULL, 0, 0, pixbuf, 1, _("None"), -1); g_object_unref (G_OBJECT (pixbuf)); pixbuf = gdk_pixbuf_new_from_xpm_data (rgba_image_rgb_xpm); gtk_list_store_insert_with_values (GTK_LIST_STORE (object), NULL, 1, 0, pixbuf, 1, _("RGB"), -1); g_object_unref (G_OBJECT (pixbuf)); pixbuf = gdk_pixbuf_new_from_xpm_data (rgba_image_bgr_xpm); gtk_list_store_insert_with_values (GTK_LIST_STORE (object), NULL, 2, 0, pixbuf, 1, _("BGR"), -1); g_object_unref (G_OBJECT (pixbuf)); pixbuf = gdk_pixbuf_new_from_xpm_data (rgba_image_vrgb_xpm); gtk_list_store_insert_with_values (GTK_LIST_STORE (object), NULL, 3, 0, pixbuf, 1, _("Vertical RGB"), -1); g_object_unref (G_OBJECT (pixbuf)); pixbuf = gdk_pixbuf_new_from_xpm_data (rgba_image_vbgr_xpm); gtk_list_store_insert_with_values (GTK_LIST_STORE (object), NULL, 4, 0, pixbuf, 1, _("Vertical BGR"), -1); g_object_unref (G_OBJECT (pixbuf)); object = gtk_builder_get_object (builder, "xft_rgba_combo_box"); appearance_settings_dialog_channel_property_changed (xsettings_channel, "/Xft/RGBA", NULL, builder); g_signal_connect (G_OBJECT (object), "changed", G_CALLBACK (cb_rgba_style_combo_changed), NULL); /* Enable editable menu accelerators */ object = gtk_builder_get_object (builder, "gtk_caneditaccels_check_button"); xfconf_g_property_bind (xsettings_channel, "/Gtk/CanChangeAccels", G_TYPE_BOOLEAN, G_OBJECT (object), "active"); /* Show menu images */ object = gtk_builder_get_object (builder, "gtk_menu_images_check_button"); xfconf_g_property_bind (xsettings_channel, "/Gtk/MenuImages", G_TYPE_BOOLEAN, G_OBJECT (object), "active"); /* Show button images */ object = gtk_builder_get_object (builder, "gtk_button_images_check_button"); xfconf_g_property_bind (xsettings_channel, "/Gtk/ButtonImages", G_TYPE_BOOLEAN, G_OBJECT (object), "active"); /* Font name */ object = gtk_builder_get_object (builder, "gtk_fontname_button"); xfconf_g_property_bind (xsettings_channel, "/Gtk/FontName", G_TYPE_STRING, G_OBJECT (object), "font-name"); /* Toolbar style */ object = gtk_builder_get_object (builder, "gtk_toolbar_style_combo_box"); appearance_settings_dialog_channel_property_changed (xsettings_channel, "/Gtk/ToolbarStyle", NULL, builder); g_signal_connect (G_OBJECT (object), "changed", G_CALLBACK(cb_toolbar_style_combo_changed), NULL); /* Hinting style */ object = gtk_builder_get_object (builder, "xft_hinting_style_combo_box"); appearance_settings_dialog_channel_property_changed (xsettings_channel, "/Xft/HintStyle", NULL, builder); g_signal_connect (G_OBJECT (object), "changed", G_CALLBACK (cb_hinting_style_combo_changed), NULL); /* Hinting */ object = gtk_builder_get_object (builder, "xft_antialias_check_button"); appearance_settings_dialog_channel_property_changed (xsettings_channel, "/Xft/Antialias", NULL, builder); g_signal_connect (G_OBJECT (object), "toggled", G_CALLBACK (cb_antialias_check_button_toggled), NULL); /* DPI */ object = gtk_builder_get_object (builder, "xft_custom_dpi_check_button"); object2 = gtk_builder_get_object (builder, "xft_custom_dpi_spin_button"); appearance_settings_dialog_channel_property_changed (xsettings_channel, "/Xft/DPI", NULL, builder); gtk_widget_set_sensitive (GTK_WIDGET (object2), gtk_toggle_button_get_active (GTK_TOGGLE_BUTTON (object))); g_signal_connect (G_OBJECT (object), "toggled", G_CALLBACK (cb_custom_dpi_check_button_toggled), object2); g_signal_connect (G_OBJECT (object2), "value-changed", G_CALLBACK (cb_custom_dpi_spin_button_changed), object); #ifdef ENABLE_SOUND_SETTINGS /* Sounds */ object = gtk_builder_get_object (builder, "event_sounds_frame"); gtk_widget_show (GTK_WIDGET (object)); object = gtk_builder_get_object (builder, "enable_event_sounds_check_button"); object2 = gtk_builder_get_object (builder, "enable_input_feedback_sounds_button"); g_signal_connect (G_OBJECT (object), "toggled", G_CALLBACK (cb_enable_event_sounds_check_button_toggled), object2); xfconf_g_property_bind (xsettings_channel, "/Net/EnableEventSounds", G_TYPE_BOOLEAN, G_OBJECT (object), "active"); xfconf_g_property_bind (xsettings_channel, "/Net/EnableInputFeedbackSounds", G_TYPE_BOOLEAN, G_OBJECT (object2), "active"); gtk_widget_set_sensitive (GTK_WIDGET (object2), gtk_toggle_button_get_active (GTK_TOGGLE_BUTTON (object))); #endif } static void appearance_settings_dialog_response (GtkWidget *dialog, gint response_id) { if (response_id == GTK_RESPONSE_HELP) xfce_dialog_show_help (GTK_WINDOW (dialog), "xfce4-settings", "appearance", NULL); else gtk_main_quit (); } gint main (gint argc, gchar **argv) { GObject *dialog, *plug_child; GtkWidget *plug; GtkBuilder *builder; GError *error = NULL; /* setup translation domain */ xfce_textdomain (GETTEXT_PACKAGE, LOCALEDIR, "UTF-8"); /* initialize Gtk+ */ if (!gtk_init_with_args (&argc, &argv, "", option_entries, GETTEXT_PACKAGE, &error)) { if (G_LIKELY (error)) { /* print error */ g_print ("%s: %s.\n", G_LOG_DOMAIN, error->message); g_print (_("Type '%s --help' for usage."), G_LOG_DOMAIN); g_print ("\n"); /* cleanup */ g_error_free (error); } else { g_error ("Unable to open display."); } return EXIT_FAILURE; } /* print version information */ if (G_UNLIKELY (opt_version)) { g_print ("%s %s (Xfce %s)\n\n", G_LOG_DOMAIN, PACKAGE_VERSION, xfce_version_string ()); g_print ("%s\n", "Copyright (c) 2008-2011"); g_print ("\t%s\n\n", _("The Xfce development team. All rights reserved.")); g_print (_("Please report bugs to <%s>."), PACKAGE_BUGREPORT); g_print ("\n"); return EXIT_SUCCESS; } /* initialize xfconf */ if (!xfconf_init (&error)) { /* print error and exit */ g_error ("Failed to connect to xfconf daemon: %s.", error->message); g_error_free (error); return EXIT_FAILURE; } /* open the xsettings channel */ xsettings_channel = xfconf_channel_new ("xsettings"); if (G_LIKELY (xsettings_channel)) { /* hook to make sure the libxfce4ui library is linked */ if (xfce_titled_dialog_get_type () == 0) return EXIT_FAILURE; /* load the gtk user interface file*/ builder = gtk_builder_new (); if (gtk_builder_add_from_string (builder, appearance_dialog_ui, appearance_dialog_ui_length, &error) != 0) { /* connect signal to monitor the channel */ g_signal_connect (G_OBJECT (xsettings_channel), "property-changed", G_CALLBACK (appearance_settings_dialog_channel_property_changed), builder); appearance_settings_dialog_configure_widgets (builder); if (G_UNLIKELY (opt_socket_id == 0)) { /* build the dialog */ dialog = gtk_builder_get_object (builder, "dialog"); g_signal_connect (dialog, "response", G_CALLBACK (appearance_settings_dialog_response), NULL); gtk_window_present (GTK_WINDOW (dialog)); /* To prevent the settings dialog to be saved in the session */ gdk_set_sm_client_id ("FAKE ID"); gtk_main (); } else { /* Create plug widget */ plug = gtk_plug_new (opt_socket_id); g_signal_connect (plug, "delete-event", G_CALLBACK (gtk_main_quit), NULL); gtk_widget_show (plug); /* Stop startup notification */ gdk_notify_startup_complete (); /* Get plug child widget */ plug_child = gtk_builder_get_object (builder, "plug-child"); gtk_widget_reparent (GTK_WIDGET (plug_child), plug); gtk_widget_show (GTK_WIDGET (plug_child)); /* To prevent the settings dialog to be saved in the session */ gdk_set_sm_client_id ("FAKE ID"); /* Enter main loop */ gtk_main (); } } else { g_error ("Failed to load the UI file: %s.", error->message); g_error_free (error); } /* Release Builder */ g_object_unref (G_OBJECT (builder)); /* release the channel */ g_object_unref (G_OBJECT (xsettings_channel)); } /* shutdown xfconf */ xfconf_shutdown (); return EXIT_SUCCESS; }
gpl-2.0
turbhrus/XCSoar
src/Android/NativeLeScanCallback.cpp
3
2363
/* Copyright_License { XCSoar Glide Computer - http://www.xcsoar.org/ Copyright (C) 2000-2015 The XCSoar Project A detailed list of copyright holders can be found in the file "AUTHORS". This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. } */ #include "NativeLeScanCallback.hpp" #include "LeScanCallback.hpp" #include "Main.hpp" #include "Java/Class.hxx" #include "Java/String.hxx" #include "org_xcsoar_NativeLeScanCallback.h" namespace NativeLeScanCallback { static Java::TrivialClass cls; static jmethodID ctor; static jfieldID ptr_field; }; JNIEXPORT void JNICALL Java_org_xcsoar_NativeLeScanCallback_onLeScan(JNIEnv *env, jobject obj, jstring _address, jstring _name) { jlong ptr = env->GetLongField(obj, NativeLeScanCallback::ptr_field); if (ptr == 0) return; char address[64], name[256]; Java::String::CopyTo(env, _address, address, sizeof(address)); Java::String::CopyTo(env, _name, name, sizeof(name)); LeScanCallback &cb = *(LeScanCallback *)(void *)ptr; cb.OnLeScan(address, name); } void NativeLeScanCallback::Initialise(JNIEnv *env) { if (android_api_level < 18 || !cls.FindOptional(env, "org/xcsoar/NativeLeScanCallback")) /* Bluetooth LE not supported on this Android version */ return; ctor = env->GetMethodID(cls, "<init>", "(J)V"); ptr_field = env->GetFieldID(cls, "ptr", "J"); } void NativeLeScanCallback::Deinitialise(JNIEnv *env) { cls.ClearOptional(env); } jobject NativeLeScanCallback::Create(JNIEnv *env, LeScanCallback &cb) { if (!cls.IsDefined()) /* Bluetooth LE not supported on this Android version */ return nullptr; return env->NewObject(cls, ctor, (jlong)&cb); }
gpl-2.0
Jahdere/TBC
src/bindings/ScriptDev2/scripts/eastern_kingdoms/naxxramas/boss_patchwerk.cpp
3
5973
/* Copyright (C) 2006 - 2013 ScriptDev2 <http://www.scriptdev2.com/> * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* ScriptData SDName: Boss_Patchwerk SD%Complete: 100 SDComment: SDCategory: Naxxramas EndScriptData */ #include "precompiled.h" #include "naxxramas.h" enum { SAY_AGGRO1 = -1533017, SAY_AGGRO2 = -1533018, SAY_SLAY = -1533019, SAY_DEATH = -1533020, EMOTE_GENERIC_BERSERK = -1000004, EMOTE_GENERIC_ENRAGED = -1000003, SPELL_HATEFULSTRIKE = 28308, SPELL_ENRAGE = 28131, SPELL_BERSERK = 26662, SPELL_SLIMEBOLT = 32309 }; struct MANGOS_DLL_DECL boss_patchwerkAI : public ScriptedAI { boss_patchwerkAI(Creature* pCreature) : ScriptedAI(pCreature) { m_pInstance = (instance_naxxramas*)pCreature->GetInstanceData(); Reset(); } instance_naxxramas* m_pInstance; uint32 m_uiHatefulStrikeTimer; uint32 m_uiBerserkTimer; uint32 m_uiSlimeboltTimer; bool m_bEnraged; bool m_bBerserk; void Reset() override { m_uiHatefulStrikeTimer = 1000; // 1 second m_uiBerserkTimer = MINUTE * 6 * IN_MILLISECONDS; // 6 minutes m_uiSlimeboltTimer = 10000; m_bEnraged = false; m_bBerserk = false; } void KilledUnit(Unit* /*pVictim*/) override { if (urand(0, 4)) return; DoScriptText(SAY_SLAY, m_creature); } void JustDied(Unit* /*pKiller*/) override { DoScriptText(SAY_DEATH, m_creature); if (m_pInstance) m_pInstance->SetData(TYPE_PATCHWERK, DONE); } void Aggro(Unit* /*pWho*/) override { DoScriptText(urand(0, 1) ? SAY_AGGRO1 : SAY_AGGRO2, m_creature); if (m_pInstance) m_pInstance->SetData(TYPE_PATCHWERK, IN_PROGRESS); } void JustReachedHome() override { if (m_pInstance) m_pInstance->SetData(TYPE_PATCHWERK, FAIL); } void DoHatefulStrike() { // The ability is used on highest HP target choosen of the top 2 (3 heroic) targets on threat list being in melee range Unit* pTarget = NULL; uint32 uiHighestHP = 0; uint32 uiTargets = 2; ThreatList const& tList = m_creature->getThreatManager().getThreatList(); if (tList.size() > 1) // Check if more than two targets, and start loop with second-most aggro { ThreatList::const_iterator iter = tList.begin(); std::advance(iter, 1); for (; iter != tList.end(); ++iter) { if (!uiTargets) break; if (Unit* pTempTarget = m_creature->GetMap()->GetUnit((*iter)->getUnitGuid())) { if (m_creature->CanReachWithMeleeAttack(pTempTarget)) { if (pTempTarget->GetHealth() > uiHighestHP) { uiHighestHP = pTempTarget->GetHealth(); pTarget = pTempTarget; } --uiTargets; } } } } if (!pTarget) pTarget = m_creature->getVictim(); DoCastSpellIfCan(pTarget, SPELL_HATEFULSTRIKE); } void UpdateAI(const uint32 uiDiff) override { if (!m_creature->SelectHostileTarget() || !m_creature->getVictim()) return; // Hateful Strike if (m_uiHatefulStrikeTimer < uiDiff) { DoHatefulStrike(); m_uiHatefulStrikeTimer = 1000; } else m_uiHatefulStrikeTimer -= uiDiff; // Soft Enrage at 5% if (!m_bEnraged) { if (m_creature->GetHealthPercent() < 5.0f) { if (DoCastSpellIfCan(m_creature, SPELL_ENRAGE) == CAST_OK) { DoScriptText(EMOTE_GENERIC_ENRAGED, m_creature); m_bEnraged = true; } } } // Berserk after 6 minutes if (!m_bBerserk) { if (m_uiBerserkTimer < uiDiff) { if (DoCastSpellIfCan(m_creature, SPELL_BERSERK) == CAST_OK) { DoScriptText(EMOTE_GENERIC_BERSERK, m_creature); m_bBerserk = true; } } else m_uiBerserkTimer -= uiDiff; } else { // Slimebolt - casted only while Berserking to prevent kiting if (m_uiSlimeboltTimer < uiDiff) { DoCastSpellIfCan(m_creature->getVictim(), SPELL_SLIMEBOLT); m_uiSlimeboltTimer = 5000; } else m_uiSlimeboltTimer -= uiDiff; } DoMeleeAttackIfReady(); } }; CreatureAI* GetAI_boss_patchwerk(Creature* pCreature) { return new boss_patchwerkAI(pCreature); } void AddSC_boss_patchwerk() { Script* pNewScript; pNewScript = new Script; pNewScript->Name = "boss_patchwerk"; pNewScript->GetAI = &GetAI_boss_patchwerk; pNewScript->RegisterSelf(); }
gpl-2.0
slade87/ALE21-Kernel
drivers/huawei_platform/connectivity/hisi/hw-pm/wireless_patch.c
3
73741
#ifdef __cplusplus #if __cplusplus extern "C" { #endif #endif /* * 1 Header File Including */ #include <linux/init.h> #include <linux/module.h> #include <linux/poll.h> #include <linux/printk.h> #include <linux/device.h> #include <linux/firmware.h> #include <linux/fcntl.h> #include <linux/fs.h> #include <linux/slab.h> #include <linux/printk.h> #include <linux/miscdevice.h> #include <linux/wait.h> #include <linux/sched.h> #include <linux/moduleparam.h> #include <linux/time.h> #include <linux/delay.h> #include <linux/rtc.h> #include <linux/syscalls.h> #include <linux/dma-mapping.h> #include "wireless_patch.h" #include "wireless_board.h" /* * 2 Global Variable Definition */ PATCH_GLOBALS_STUR g_st_global[ENUM_INFO_TOTAL]; RINGBUF_STRU g_stringbuf; uint8 *g_pucDataBuf; /* xmodem Ë÷Òý */ uint8 g_index = 1; /* log ´òÓ¡¼¶±ð */ int32 g_debuglevel_patch = 2; int32 g_usemalloc = 0; unsigned short CRC_table[256] = { /* CRC Óàʽ±í */ 0X0000, 0X1021, 0X2042, 0X3063, 0X4084, 0X50A5, 0X60C6, 0X70E7, 0X8108, 0X9129, 0XA14A, 0XB16B, 0XC18C, 0XD1AD, 0XE1CE, 0XF1EF, 0X1231, 0X0210, 0X3273, 0X2252, 0X52B5, 0X4294, 0X72F7, 0X62D6, 0X9339, 0X8318, 0XB37B, 0XA35A, 0XD3BD, 0XC39C, 0XF3FF, 0XE3DE, 0X2462, 0X3443, 0X0420, 0X1401, 0X64E6, 0X74C7, 0X44A4, 0X5485, 0XA56A, 0XB54B, 0X8528, 0X9509, 0XE5EE, 0XF5CF, 0XC5AC, 0XD58D, 0X3653, 0X2672, 0X1611, 0X0630, 0X76D7, 0X66F6, 0X5695, 0X46B4, 0XB75B, 0XA77A, 0X9719, 0X8738, 0XF7DF, 0XE7FE, 0XD79D, 0XC7BC, 0X48C4, 0X58E5, 0X6886, 0X78A7, 0X0840, 0X1861, 0X2802, 0X3823, 0XC9CC, 0XD9ED, 0XE98E, 0XF9AF, 0X8948, 0X9969, 0XA90A, 0XB92B, 0X5AF5, 0X4AD4, 0X7AB7, 0X6A96, 0X1A71, 0X0A50, 0X3A33, 0X2A12, 0XDBFD, 0XCBDC, 0XFBBF, 0XEB9E, 0X9B79, 0X8B58, 0XBB3B, 0XAB1A, 0X6CA6, 0X7C87, 0X4CE4, 0X5CC5, 0X2C22, 0X3C03, 0X0C60, 0X1C41, 0XEDAE, 0XFD8F, 0XCDEC, 0XDDCD, 0XAD2A, 0XBD0B, 0X8D68, 0X9D49, 0X7E97, 0X6EB6, 0X5ED5, 0X4EF4, 0X3E13, 0X2E32, 0X1E51, 0X0E70, 0XFF9F, 0XEFBE, 0XDFDD, 0XCFFC, 0XBF1B, 0XAF3A, 0X9F59, 0X8F78, 0X9188, 0X81A9, 0XB1CA, 0XA1EB, 0XD10C, 0XC12D, 0XF14E, 0XE16F, 0X1080, 0X00A1, 0X30C2, 0X20E3, 0X5004, 0X4025, 0X7046, 0X6067, 0X83B9, 0X9398, 0XA3FB, 0XB3DA, 0XC33D, 0XD31C, 0XE37F, 0XF35E, 0X02B1, 0X1290, 0X22F3, 0X32D2, 0X4235, 0X5214, 0X6277, 0X7256, 0XB5EA, 0XA5CB, 0X95A8, 0X8589, 0XF56E, 0XE54F, 0XD52C, 0XC50D, 0X34E2, 0X24C3, 0X14A0, 0X0481, 0X7466, 0X6447, 0X5424, 0X4405, 0XA7DB, 0XB7FA, 0X8799, 0X97B8, 0XE75F, 0XF77E, 0XC71D, 0XD73C, 0X26D3, 0X36F2, 0X0691, 0X16B0, 0X6657, 0X7676, 0X4615, 0X5634, 0XD94C, 0XC96D, 0XF90E, 0XE92F, 0X99C8, 0X89E9, 0XB98A, 0XA9AB, 0X5844, 0X4865, 0X7806, 0X6827, 0X18C0, 0X08E1, 0X3882, 0X28A3, 0XCB7D, 0XDB5C, 0XEB3F, 0XFB1E, 0X8BF9, 0X9BD8, 0XABBB, 0XBB9A, 0X4A75, 0X5A54, 0X6A37, 0X7A16, 0X0AF1, 0X1AD0, 0X2AB3, 0X3A92, 0XFD2E, 0XED0F, 0XDD6C, 0XCD4D, 0XBDAA, 0XAD8B, 0X9DE8, 0X8DC9, 0X7C26, 0X6C07, 0X5C64, 0X4C45, 0X3CA2, 0X2C83, 0X1CE0, 0X0CC1, 0XEF1F, 0XFF3E, 0XCF5D, 0XDF7C, 0XAF9B, 0XBFBA, 0X8FD9, 0X9FF8, 0X6E17, 0X7E36, 0X4E55, 0X5E74, 0X2E93, 0X3EB2, 0X0ED1, 0X1EF0 }; extern uint8 powerpin_state; module_param(g_debuglevel_patch, int, 0); /* * 3 Function Definition */ extern int32 pm_sdio_send( uint8 *buf, int32 len); extern int32 pm_uart_send(uint8 *date, int32 len); extern int32 pm_uart_set_baudrate(int32 baudrate); extern int32 wlan_patch_recv(uint8 * data, int32 count); /***************************************************************************** Prototype : patch_Crc16 Description : CRCУÑé Input : int8 *ptr uint16 l_count Output : Return Value : uint16 Calls : Called By : History : 1.Date : 2012/11/9 Author : kf74033 Modification : Created function *****************************************************************************/ unsigned short do_crc_table_1(uint8 *data, uint16 length) { uint16 crc = 0; uint8 dataTmp; while(length > 0) { dataTmp = (uint8)(crc >> 8); crc = crc <<8; crc = crc^CRC_table[dataTmp^(*data)]; length--; data++; } return crc; } uint16 patch_Crc16 (int8 *ptr, uint16 l_count) { uint16 us_crc; int32 i; us_crc = 0; #if 1 while(l_count--) { us_crc = us_crc ^ (int) *ptr++ << 8; for (i = 0; i < 8; i++) { if(us_crc & 0x8000) { us_crc = us_crc << 1 ^ 0x1021; } else { us_crc = us_crc << 1; } } } #endif return (us_crc & 0xFFFF); } /***************************************************************************** Prototype : patch_send Description : send message to device,by sdio or uart Input : uint8 *data int32 len uint8 expect int32 type Output : Return Value : int32 Calls : Called By : History : 1.Date : 2012/11/12 Author : kf74033 Modification : Created function *****************************************************************************/ int32 patch_send(uint8 *data, int32 len, uint8 expect, int32 type) { int32 i; int32 l_ret; PS_PRINT_DBG("len = %d, data = %x %x %x %x %x %x %x %x \n", len, data[0], data[1], data[2], data[3], data[4], data[5], data[6], data[7]); for(i = 0; i < HOST_DEV_TIMEOUT; i++) { l_ret = send_msg(data, len, type); if (0 > l_ret) { continue; //return -EFAIL; } l_ret = recv_expect_result(expect, type); if (0 == l_ret) { return SUCC; } } return -EFAIL; } /***************************************************************************** Prototype : patch_xmodem_send Description : xmodem protocol encapsulation for down file Input : uint8 *data int32 len uint8 expect Output : Return Value : int32 Calls : Called By : History : 1.Date : 2012/11/12 Author : kf74033 Modification : Created function *****************************************************************************/ int32 patch_xmodem_send(uint8 *data, int32 len, uint8 expect) { XMODEM_HEAD_PKT_STRU st_patch_pkt; uint16 us_CRCValue; int32 l_ret; int32 l_sendlen; int32 l_datalen; int32 i; uint8 *flag; uint8 CRCValue_H; uint8 CRCValue_L; PS_PRINT_DBG("enter"); if (NULL == data) { return -EFAIL; } l_datalen = len; flag = data; // pst_patch_pkt = OS_KMALLOC_GFP(XMODEM_PAK_LEN); while(l_datalen > 0) { l_sendlen = MIN(XMODE_DATA_LEN, l_datalen); l_datalen = l_datalen - l_sendlen ; // OS_MEM_SET((void *)pst_patch_pkt, 0, sizeof(XMODEM_CRC_PKT_STRU)); st_patch_pkt.Head = SOH; st_patch_pkt.PacketNum = g_index; /* ¹¹°ü */ // OS_MEM_CPY(pst_patch_pkt->packet_data, flag, l_sendlen); // flag = flag + l_sendlen; /* Êý¾Ý³¤¶È²»¹»128¸ö */ if (XMODE_DATA_LEN > l_sendlen) { PS_PRINT_DBG("data_len %d\n", l_sendlen); OS_MEM_SET(&flag[l_sendlen], 0x00, (XMODE_DATA_LEN - l_sendlen)); } // us_CRCValue = patch_Crc16((int8 *)flag, XMODE_DATA_LEN); us_CRCValue = do_crc_table_1(flag, XMODE_DATA_LEN); CRCValue_H = (us_CRCValue & 0xFF00)>>8; CRCValue_L = us_CRCValue & 0xFF; st_patch_pkt.PacketAnt = ~(st_patch_pkt.PacketNum); for (i = 0; i < HOST_DEV_TIMEOUT; i++) { l_ret = pm_uart_send((uint8 *)&st_patch_pkt, 3); l_ret = pm_uart_send(flag, XMODE_DATA_LEN); l_ret = pm_uart_send(&CRCValue_H, 1); l_ret = pm_uart_send(&CRCValue_L, 1); l_ret = recv_expect_result(expect, ENUM_INFO_UART); if (0 > l_ret) { continue; } #if 0 if (0 <= l_ret) { break; } #endif /* Ìí¼Ó³¬Ê±Í˳ö»úÖÆºóÐ޸ģ¬byl00225847 */ if (SUCC == l_ret) { break; } } if (i >= HOST_DEV_TIMEOUT) { // OS_MEM_KFREE(pst_patch_pkt); return -EFAIL; } flag = flag + l_sendlen; g_index++; } // OS_MEM_KFREE(pst_patch_pkt); return SUCC; } /***************************************************************************** Prototype : patch_send_file Description : send file to device,by sdio or uart Input : uint8 *data int32 len uint8 expect int32 type Output : Return Value : int32 Calls : Called By : History : 1.Date : 2012/11/12 Author : kf74033 Modification : Created function *****************************************************************************/ int32 patch_send_file(uint8 *data, int32 len, uint8 expect, int32 type) { int32 l_ret; PS_PRINT_DBG("len = %d, data = %x %x %x %x %x %x %x %x \n", len, data[0], data[1], data[2], data[3], data[4], data[5], data[6], data[7]); switch (type) { case ENUM_INFO_SDIO: { l_ret = patch_send(data, len, expect, type); } break; case ENUM_INFO_UART:/* uart READ */ { l_ret = patch_xmodem_send(data, len, expect); } break; default: l_ret = -1; PS_PRINT_ERR("info type error[%d]!!!\n", type); break; } return l_ret; } /***************************************************************************** Prototype : patch_send_data Description : send data to device,by sdio or uart Input : uint8 *data int32 len uint8 expect int32 type Output : Return Value : int32 Calls : Called By : History : 1.Date : 2012/11/12 Author : kf74033 Modification : Created function *****************************************************************************/ int32 patch_send_data(uint8 *data, int32 len, uint8 expect, int32 type) { return patch_send(data, len, expect, type); } static void ringbuf_flush(void) { /* writing maybe still work when interrupt by flush */ g_stringbuf.ptail = g_stringbuf.phead; } /***************************************************************************** Prototype : uart_recv_data Description : receiver data form device,by uart interface Input : uint8 *data int32 len Output : Return Value : int32 Calls : Called By : History : 1.Date : 2012/11/12 Author : kf74033 Modification : Created function *****************************************************************************/ int32 uart_recv_data(const uint8 *data, int32 len) { uint32 ulbuflen = 0; uint32 ulheadtoendlen = 0; uint8 *ptail; if (unlikely((NULL == data))) { PS_PRINT_ERR("data is NULL\n "); return -EFAIL; } if ((NULL == g_stringbuf.pbufstart) || (g_stringbuf.pbufstart > g_stringbuf.pbufend)) { PS_PRINT_ERR("buf is NULL,write failed\n "); PS_PRINT_ERR("pbufstart=%p,pbufend=%p\n", g_stringbuf.pbufstart, g_stringbuf.pbufend); return -EFAIL; } ptail = g_stringbuf.ptail; if ((g_stringbuf.phead < g_stringbuf.pbufstart) || (g_stringbuf.phead > g_stringbuf.pbufend) || (ptail < g_stringbuf.pbufstart) || (ptail > g_stringbuf.pbufend)) { PS_PRINT_ERR("phead or ptail is out of range,write failed\n"); PS_PRINT_ERR("pbufstart=%p,pbufend=%p\n", g_stringbuf.pbufstart, g_stringbuf.pbufend); PS_PRINT_ERR("phead=%p,ptail=%p\n", g_stringbuf.phead, g_stringbuf.ptail); return -EFAIL; } ulbuflen = g_stringbuf.pbufend - g_stringbuf.pbufstart; PS_PRINT_DBG("len [%d],\n ", len); /* phead and ptail are in the same cycle */ if (g_stringbuf.phead >= ptail) { /* still in the same cycle */ if ((g_stringbuf.pbufend - g_stringbuf.phead) >= len) { OS_MEM_CPY(g_stringbuf.phead, data, len); g_stringbuf.phead += len; if (OS_WAITQUEUE_ACTIVE(g_st_global[ENUM_INFO_UART].pst_wait)) { OS_WAKE_UP_INTERRUPTIBLE(g_st_global[ENUM_INFO_UART].pst_wait); PS_PRINT_DBG("wake up ok"); } } else if ((ulbuflen - (g_stringbuf.phead - ptail)) > len) { ulheadtoendlen = g_stringbuf.pbufend - g_stringbuf.phead; OS_MEM_CPY(g_stringbuf.phead, data, ulheadtoendlen); OS_MEM_CPY(g_stringbuf.pbufstart, data + ulheadtoendlen, len - ulheadtoendlen); g_stringbuf.phead = g_stringbuf.pbufstart + (len - ulheadtoendlen); PS_PRINT_INFO("phead back\n"); if (OS_WAITQUEUE_ACTIVE(g_st_global[ENUM_INFO_UART].pst_wait)) { OS_WAKE_UP_INTERRUPTIBLE(g_st_global[ENUM_INFO_UART].pst_wait); PS_PRINT_DBG("wake up ok"); } } else { PS_PRINT_ERR("Not enough mem,len=%d.\n ", len); } } /* phead is in the next cycle */ /* "ptail - phead = 1" means the buffer is full */ else if ((ptail - g_stringbuf.phead - 1) > len) { OS_MEM_CPY(g_stringbuf.phead, data, len); g_stringbuf.phead += len; if (OS_WAITQUEUE_ACTIVE(g_st_global[ENUM_INFO_UART].pst_wait)) { OS_WAKE_UP_INTERRUPTIBLE(g_st_global[ENUM_INFO_UART].pst_wait); PS_PRINT_DBG("wake up ok"); } } else { PS_PRINT_ERR("Not enough mem,len=%d.\n ", len); } if (g_stringbuf.phead >= g_stringbuf.pbufend) { g_stringbuf.phead = g_stringbuf.pbufstart; PS_PRINT_INFO("phead back\n"); } return SUCC; } /***************************************************************************** Prototype : sdio_recv_data Description : receiver data form device,by sdio interface Input : uint8 *data int32 len Output : Return Value : int32 Calls : Called By : History : 1.Date : 2012/11/12 Author : kf74033 Modification : Created function *****************************************************************************/ int32 sdio_recv_data(uint8 *data, int32 len) { /* * patch¼ÓÔØÊ±£¬²ÉÓõÄÊÇÒ»ÎÊÒ»´ðʽ£¬µ«Êǵ±device¶Ôijһ¸ö²Ù×÷Íê³Éʱ£¬ * »á»Ø¸´HOST²àÒ»¸öGÖ¸Áî, Õâ¸öʱºò¿ÉÄܻḲ¸ÇÇ°ÃæµÄCÖ¸ÁËùÒÔÕâÀïÓÐÒ»¸ö±¸ÓÃbuf */ if(0 == g_st_global[ENUM_INFO_SDIO].l_Recvbuf1_len) { g_st_global[ENUM_INFO_SDIO].l_Recvbuf1_len = MIN(RECV_BUF_LEN, len); OS_MEM_CPY(g_st_global[ENUM_INFO_SDIO].auc_Recvbuf1, data, g_st_global[ENUM_INFO_SDIO].l_Recvbuf1_len); PS_PRINT_DBG("save auc_Recvbuf1"); if (OS_WAITQUEUE_ACTIVE(g_st_global[ENUM_INFO_SDIO].pst_wait)) { OS_WAKE_UP_INTERRUPTIBLE(g_st_global[ENUM_INFO_SDIO].pst_wait); PS_PRINT_DBG("wake up ok"); } } else { /* auc_Recvbuf2ÊDZ¸ÓÃbuf£¬µ± auc_Recvbuf1 Êý¾ÝûÓб»¶ÁÈ¡µÄʱºòʹÓÃ*/ g_st_global[ENUM_INFO_SDIO].l_Recvbuf2_len = MIN(RECV_BUF_LEN, len); OS_MEM_CPY(g_st_global[ENUM_INFO_SDIO].auc_Recvbuf2, data, g_st_global[ENUM_INFO_SDIO].l_Recvbuf2_len); PS_PRINT_DBG("save auc_Recvbuf2"); } return SUCC; } /***************************************************************************** Prototype : read_msg Description : read msg Input : uint8 *data int32 len int32 type Output : Return Value : int32 Calls : Called By : History : 1.Date : 2012/11/12 Author : kf74033 Modification : Created function *****************************************************************************/ int32 read_msg(uint8 *data, int32 len, int32 type) { int32 l_len; uint32 ultailtoendlen; uint8 *phead; PS_PRINT_DBG(" entry\n"); if (unlikely((NULL == data))) { PS_PRINT_ERR("data is NULL\n "); return -EFAIL; } if (ENUM_INFO_UART == type) { if ((NULL == g_stringbuf.pbufstart) || (g_stringbuf.pbufstart > g_stringbuf.pbufend)) { PS_PRINT_ERR("buf is NULL,read failed\n "); PS_PRINT_ERR("pbufstart=%p,pbufend=%p\n", g_stringbuf.pbufstart, g_stringbuf.pbufend); return -EFAIL; } if ((g_stringbuf.phead < g_stringbuf.pbufstart) || (g_stringbuf.phead > g_stringbuf.pbufend) || (g_stringbuf.ptail < g_stringbuf.pbufstart) || (g_stringbuf.ptail > g_stringbuf.pbufend)) { PS_PRINT_ERR("phead or ptail is out of range, read failed\n"); PS_PRINT_ERR("pbufstart=%p,pbufend=%p\n", g_stringbuf.pbufstart, g_stringbuf.pbufend); PS_PRINT_ERR("phead=%p,ptail=%p\n", g_stringbuf.phead, g_stringbuf.ptail); return -EFAIL; } } switch (type) { case ENUM_INFO_SDIO: #if 0 { if ((g_st_global[ENUM_INFO_SDIO].l_Recvbuf2_len != 0) && ( g_st_global[ENUM_INFO_SDIO].l_Recvbuf1_len == 0)) { l_len = MIN(len, g_st_global[ENUM_INFO_SDIO].l_Recvbuf2_len); OS_MEM_CPY(data, g_st_global[ENUM_INFO_SDIO].auc_Recvbuf2, l_len); g_st_global[ENUM_INFO_SDIO].l_Recvbuf2_len = 0; } else { OS_WAIT_EVENT_INTERRUPTIBLE(*g_st_global[ENUM_INFO_SDIO].pst_wait, (0 < g_st_global[ENUM_INFO_SDIO].l_Recvbuf1_len)); l_len = MIN(len, g_st_global[ENUM_INFO_SDIO].l_Recvbuf1_len); OS_MEM_CPY(data, g_st_global[ENUM_INFO_SDIO].auc_Recvbuf1, l_len); g_st_global[ENUM_INFO_SDIO].l_Recvbuf1_len = 0; } } #endif l_len = wlan_patch_recv(data, len); PS_PRINT_DBG("Receive l_len=%d\n",l_len); break; case ENUM_INFO_UART: { OS_WAIT_EVENT_INTERRUPTIBLE_TIMEOUT(*g_st_global[ENUM_INFO_UART].pst_wait, (g_stringbuf.phead != g_stringbuf.ptail), PATCH_INTEROP_TIMEOUT); phead = g_stringbuf.phead; /* phead and ptail are in the same cycle */ if (phead > g_stringbuf.ptail) { if ((phead - g_stringbuf.ptail) > len) { OS_MEM_CPY(data, g_stringbuf.ptail, len); l_len = len; g_stringbuf.ptail += len; } /* not enough data */ else { l_len = phead - g_stringbuf.ptail; OS_MEM_CPY(data, g_stringbuf.ptail, l_len); g_stringbuf.ptail += l_len; } } /* phead is in the next cycle */ else if (phead < g_stringbuf.ptail) { ultailtoendlen = g_stringbuf.pbufend - g_stringbuf.ptail; if (ultailtoendlen > len) { OS_MEM_CPY(data, g_stringbuf.ptail, len); l_len = len; g_stringbuf.ptail += len; } else { OS_MEM_CPY(data, g_stringbuf.ptail, ultailtoendlen); if ((phead - g_stringbuf.pbufstart) > (len - ultailtoendlen)) { OS_MEM_CPY(data + ultailtoendlen, g_stringbuf.pbufstart, len - ultailtoendlen); g_stringbuf.ptail = g_stringbuf.pbufstart + (len - ultailtoendlen); l_len = len; PS_PRINT_INFO("ptail back\n"); } else { OS_MEM_CPY(data + ultailtoendlen, g_stringbuf.pbufstart, phead - g_stringbuf.pbufstart); l_len = ultailtoendlen + (phead - g_stringbuf.pbufstart); g_stringbuf.ptail = phead; } } } else { l_len = -1; PS_PRINT_WARNING("No data.\n"); } if (g_stringbuf.ptail >= g_stringbuf.pbufend) { g_stringbuf.ptail = g_stringbuf.pbufstart; } } break; default: l_len = -1; PS_PRINT_ERR("info type error[%d]!!!\n", type); break; } return l_len; } /***************************************************************************** Prototype : send_msg Description : send message to device,by sdio or uart Input : uint8 *data int32 len int32 type Output : Return Value : int32 Calls : Called By : History : 1.Date : 2012/11/12 Author : kf74033 Modification : Created function *****************************************************************************/ int32 send_msg(uint8 *data, int32 len, int32 type) { int32 l_ret; PS_PRINT_DBG("len = %d, data = %x %x %x %x %x %x %x %x \n", len, data[0], data[1], data[2], data[3], data[4], data[5], data[6], data[7]); switch (type) { case ENUM_INFO_SDIO: l_ret = pm_sdio_send(data, len); break; case ENUM_INFO_UART: l_ret = pm_uart_send(data, len); break; default: l_ret = -EFAIL; PS_PRINT_ERR("info type error[%d]!!!\n", type); break; } return l_ret; } /***************************************************************************** Prototype : recv_expect_result Description : receive result form device Input : uint8 expect int32 type Output : Return Value : int32 Calls : Called By : History : 1.Date : 2012/11/1 Author : kf74033 Modification : Created function *****************************************************************************/ int32 recv_expect_result(uint8 expect, int32 type) { uint8 auc_buf[RECV_BUF_LEN]; int32 l_len; int32 i; PS_PRINT_DBG(" entry\n"); OS_MEM_SET(auc_buf, 0, RECV_BUF_LEN); for (i = 0; i < HOST_DEV_TIMEOUT; i++) { l_len = read_msg(auc_buf, 1, type); if (0 > l_len) { PS_PRINT_ERR("recv result fail\n"); return -EFAIL; } if (auc_buf[0] == expect) { PS_PRINT_DBG(" send SUCC [%x]\n", expect); return SUCC; } /* * NAK: Îļþ´«ÊäÊ±ÖØ·¢±êʶ * MSG_FORM_DRV_N:ÆäËûÖØ·¢±êʶ */ else if ((MSG_FORM_DRV_N == auc_buf[0]) || (NAK == auc_buf[0]) || (MSG_FORM_DRV_C == auc_buf[0])) { PS_PRINT_ERR(" send again [0x%x]\n", auc_buf[0]); return -EFAIL; } else { /* ¶ÔÓÚ´íÎóµÄ½á¹û£¬ÓÐÊ®´ÎµÄ»ú»á£¬ */ if (ENUM_INFO_SDIO == type) { PATCH_SEND_N_SDIO; } else { PATCH_SEND_N_UART; } PS_PRINT_WARNING(" error result[0x%x], expect [0x%x], read result again\n", auc_buf[0], expect); } } return -EFAIL; } /***************************************************************************** Prototype : patch_string_to_num Description : string to number Input : int8 *string Output : Return Value : int32 Calls : Called By : History : 1.Date : 2012/11/14 Author : kf74033 Modification : Created function *****************************************************************************/ int32 patch_string_to_num(uint8 *string) { int32 i; int32 l_num; PS_PRINT_DBG(" entry\n"); if (NULL == string) { return -EFAIL; } l_num = 0; for (i = 0; (string[i] >= '0') && (string[i] <= '9'); i++) { l_num = (l_num * 10) + (string[i] - '0'); } return l_num; } /***************************************************************************** Prototype : patch_wait_g_form_dev Description : wait go'command form device Input : int32 type Output : Return Value : int32 Calls : Called By : History : 1.Date : 2012/11/14 Author : kf74033 Modification : Created function *****************************************************************************/ int32 patch_wait_g_form_dev(int32 type) { int32 l_ret; int32 i; PS_PRINT_DBG(" entry\n"); for (i = 0; i < HOST_DEV_TIMEOUT; i++) { l_ret = recv_expect_result(MSG_FORM_DRV_G, type); if (0 == l_ret) { PS_PRINT_DBG(" device finish G\n" ); return SUCC; } } return -EFAIL; } /***************************************************************************** Prototype : patch_wait_g_form_dev Description : wait go'command form device Input : int32 type Output : Return Value : int32 Calls : Called By : History : 1.Date : 2012/11/14 Author : kf74033 Modification : Created function *****************************************************************************/ int32 patch_wait_g_retry_form_dev(int32 type) { int32 l_ret; int32 i; PS_PRINT_DBG(" entry\n"); for (i = 0; i < HOST_DEV_TIMEOUT; i++) { l_ret = recv_expect_result(MSG_FORM_DRV_G, type); if (0 == l_ret) { PS_PRINT_DBG(" device finish G\n" ); return SUCC; } else { if (ENUM_INFO_UART == type) { PATCH_SEND_N_UART; } PS_PRINT_WARNING("receive G failed\n" ); } } return -EFAIL; } /***************************************************************************** Prototype : patch_send_char Description : send char to device Input : int8 num int32 wait int32 type Output : Return Value : int32 Calls : Called By : History : 1.Date : 2012/11/14 Author : kf74033 Modification : Created function *****************************************************************************/ int32 patch_send_char(int8 num, int32 wait, int32 type) { int32 l_ret; uint8 auc_buf[8]; int32 i; PS_PRINT_DBG("enter"); OS_MEM_SET(auc_buf, num, 8); PS_PRINT_DBG("send [0x%x], wait[%d]\n", num, wait); for(i = 0; i < HOST_DEV_TIMEOUT; i++) { /* * sdio ½Ó¿Ú·¢ËÍʱ£¬»áËÄ×Ö½Ú¶ÔÆë£¬·¢ËÍËĸö * uart ½Ó¿Ú·¢ËÍʱ£¬Ö»·¢ËÍÒ»¸ö */ l_ret = send_msg(auc_buf, 1, type); if (0 > l_ret) { PS_PRINT_ERR("Send fail\n"); return l_ret; } if (WAIT_RESPONSE == wait) { l_ret = recv_expect_result(ACK, type); if (0 > l_ret) { continue; } } return l_ret; } return -EFAIL;; } /***************************************************************************** Prototype : patch_read_patch Description : read patch Input : int32 len OS_KERNEL_FILE_STRU *fp Output : int8 *buf Return Value : int32 Calls : Called By : History : 1.Date : 2012/11/14 Author : kf74033 Modification : Created function *****************************************************************************/ int32 patch_read_patch(int8 *buf, int32 len, OS_KERNEL_FILE_STRU *fp) { int32 rdlen; if ((IS_ERR(fp)) || (NULL == buf)) { fp = NULL; PS_PRINT_ERR("buf/fp is NULL\n"); return -EFAIL; } rdlen = kernel_read(fp, fp->f_pos, buf, len); if (rdlen > 0) { fp->f_pos += rdlen; } return rdlen; } /***************************************************************************** Prototype : patch_down_file Description : begin download patch file Input : uint8 *puc_file int32 type Output : Return Value : int32 Calls : Called By : History : 1.Date : 2012/11/1 Author : kf74033 Modification : Created function *****************************************************************************/ int32 patch_down_file(uint8 *puc_file, int32 type) { OS_KERNEL_FILE_STRU *fp; uint8 *auc_buf; int32 l_len; int32 l_ret; int32 l_count; PS_PRINT_DBG("enter"); if (NULL == puc_file) { return -EFAIL; } fp = filp_open(puc_file, O_RDONLY, 0); if (IS_ERR(fp)) { fp = NULL; PS_PRINT_ERR("filp_open %s fail!!\n", puc_file); return -EFAIL; } // auc_buf = OS_KMALLOC_GFP(READ_PATCH_BUF_LEN); // auc_buf = g_pucDataBuf; if (NULL == g_pucDataBuf) { filp_close(fp, NULL); fp = NULL; return -EFAIL; } l_count = 1; g_index = 1; while(1) { // OS_MEM_SET(g_pucDataBuf, 0, READ_PATCH_BUF_LEN); #if 1 l_len = patch_read_patch(g_pucDataBuf, READ_PATCH_BUF_LEN, fp); #else l_len = 128; #endif PS_PRINT_DBG("kernel_read len[%d] [%d]\n", l_len, l_count); /* Õý³£¶ÁÈ¡Îļþ */ if ((0 < l_len) && (l_len <= READ_PATCH_BUF_LEN)) { l_ret = patch_send_file(g_pucDataBuf, l_len, ACK, type); if(-EFAIL == l_ret) { PS_PRINT_ERR(" uart send data[%d] fail\n", l_count); break; } } /* ÎļþÒѾ­¶ÁÈ¡Íê³É*/ else if(0 == l_len) { if (ENUM_INFO_SDIO == type) { PATCH_SEND_EOT_SDIO; } else { PATCH_SEND_EOT_UART; g_index = 1; } l_ret = SUCC; PS_PRINT_DBG("read file[%d] [%d] send EOT\n", l_count, l_len); break; } /* ¶ÁÈ¡Îļþ³ö´í */ else { if (ENUM_INFO_SDIO == type) { PATCH_SEND_CAN_SDIO; } else { PATCH_SEND_CAN_UART; g_index = 1; } l_ret = -EFAIL; PS_PRINT_ERR("read file[%d] [%d]\n", l_count, l_len); break; } l_count++; } // OS_MEM_KFREE(auc_buf); auc_buf = NULL; filp_close(fp, NULL); fp = NULL; PS_PRINT_DBG("%s send finish\n", puc_file); return l_ret; } /***************************************************************************** Prototype : recv_mem Description : recv mem Input : uint8 *data int32 len int32 type Output : Return Value : int32 Calls : Called By : History : 1.Date : 2012/11/12 Author : kf74033 Modification : Created function *****************************************************************************/ int32 recv_mem(uint8 *data, int32 len, int32 type) { int32 l_len; int32 l_ret; PS_PRINT_DBG(" entry\n"); switch (type) { case ENUM_INFO_SDIO: #if 0 { if ((g_st_global[ENUM_INFO_SDIO].l_Recvbuf2_len != 0) && ( g_st_global[ENUM_INFO_SDIO].l_Recvbuf1_len == 0)) { l_len = MIN(len, g_st_global[ENUM_INFO_SDIO].l_Recvbuf2_len); OS_MEM_CPY(data, g_st_global[ENUM_INFO_SDIO].auc_Recvbuf2, l_len); g_st_global[ENUM_INFO_SDIO].l_Recvbuf2_len = 0; } else { OS_WAIT_EVENT_INTERRUPTIBLE(*g_st_global[ENUM_INFO_SDIO].pst_wait, (0 < g_st_global[ENUM_INFO_SDIO].l_Recvbuf1_len)); l_len = MIN(len, g_st_global[ENUM_INFO_SDIO].l_Recvbuf1_len); OS_MEM_CPY(data, g_st_global[ENUM_INFO_SDIO].auc_Recvbuf1, l_len); g_st_global[ENUM_INFO_SDIO].l_Recvbuf1_len = 0; } } #endif l_len = 0; #if 0 while(len > 32768) { l_ret = wlan_patch_recv(data + l_len, 32768); if (l_ret > 0) { len -= l_ret; l_len += l_ret; PS_PRINT_SUC("l_len=%d, len = %d\n", l_len, len); } } #endif l_ret = wlan_patch_recv(data, len); if (l_ret > 0) { len -= l_ret; l_len += l_ret; PS_PRINT_SUC("l_len=%d, len = %d\n", l_len, len); } PS_PRINT_DBG("Receive l_len=%d\n",l_len); break; case ENUM_INFO_UART: { if (( g_st_global[ENUM_INFO_UART].l_Recvbuf2_len != 0) && ( g_st_global[ENUM_INFO_UART].l_Recvbuf1_len == 0)) { l_len = MIN(len, g_st_global[ENUM_INFO_UART].l_Recvbuf2_len); OS_MEM_CPY(data, g_st_global[ENUM_INFO_UART].auc_Recvbuf2, l_len); #if 0 g_st_global[ENUM_INFO_UART].l_Recvbuf2_len = 0; #else g_st_global[ENUM_INFO_UART].l_Recvbuf2_len -= l_len; OS_MEM_CPY(g_st_global[ENUM_INFO_UART].auc_Recvbuf2, &g_st_global[ENUM_INFO_UART].auc_Recvbuf2[l_len], g_st_global[ENUM_INFO_UART].l_Recvbuf2_len); #endif } else { #if 1 OS_WAIT_EVENT_INTERRUPTIBLE_TIMEOUT(*g_st_global[ENUM_INFO_UART].pst_wait, (0 < g_st_global[ENUM_INFO_UART].l_Recvbuf1_len), PATCH_INTEROP_TIMEOUT); #else OS_WAIT_EVENT_INTERRUPTIBLE(*g_st_global[ENUM_INFO_UART].pst_wait, (0 < g_st_global[ENUM_INFO_UART].l_Recvbuf1_len)); #endif l_len = MIN(len, g_st_global[ENUM_INFO_UART].l_Recvbuf1_len); if (0 == l_len) { l_len = -1; PS_PRINT_ERR("Receive timeout\n"); break; } PS_PRINT_DBG("Receive l_len=%d\n",l_len); OS_MEM_CPY(data, g_st_global[ENUM_INFO_UART].auc_Recvbuf1, l_len); PS_PRINT_DBG("Receive=%x\n", g_st_global[ENUM_INFO_UART].auc_Recvbuf1[0]); #if 0 g_st_global[ENUM_INFO_UART].l_Recvbuf1_len = 0; #else /* UARTÓÐʱºò»á½«Á½°üÊý¾Ýͬʱ·¢Ë͹ýÀ´£¬¸ù¾ÝÏàÓ¦ÐèÇó¶ÁÈ¡¶ÔÓ¦³¤¶È */ g_st_global[ENUM_INFO_UART].l_Recvbuf1_len -= l_len; OS_MEM_CPY(g_st_global[ENUM_INFO_UART].auc_Recvbuf1, &g_st_global[ENUM_INFO_UART].auc_Recvbuf1[l_len], g_st_global[ENUM_INFO_UART].l_Recvbuf1_len); #endif } } break; default: l_len = -1; PS_PRINT_ERR("info type error[%d]!!!\n", type); break; } return l_len; } /***************************************************************************** Prototype : patch_readm_fileopen Description : creat and open file to save mem Input : int32 type Output : Return Value : OS_KERNEL_FILE_STRU * Calls : Called By : History : 1.Date : 2012/11/1 Author : kf74033 Modification : Created function *****************************************************************************/ OS_KERNEL_FILE_STRU * patch_readm_fileopen(int32 type) { OS_KERNEL_FILE_STRU *fp; struct timeval tv; struct rtc_time tm; char filename[50] = {0}; do_gettimeofday(&tv); rtc_time_to_tm(tv.tv_sec, &tm); PS_PRINT_INFO("%4d-%02d-%02d %02d:%02d:%02d\n", tm.tm_year + 1900, tm.tm_mon+1, tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec); if (ENUM_INFO_SDIO == type) { snprintf(filename, sizeof(filename) - 1, "/data/memdump/readm_wifi%04d%02d%02d%02d%02d%02d.bin", tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec); } else { snprintf(filename, sizeof(filename) - 1, "/data/memdump/readm_bfg%04d%02d%02d%02d%02d%02d.bin", tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec); } PS_PRINT_INFO("filename = %s",filename); fp = filp_open(filename, O_RDWR | O_CREAT, 0644); return fp; } /***************************************************************************** Prototype : patch_recv_mem Description : receive memory information form device Input : int32 len int32 type Output : Return Value : int32 Calls : Called By : History : 1.Date : 2012/11/1 Author : kf74033 Modification : Created function *****************************************************************************/ int32 patch_recv_mem(OS_KERNEL_FILE_STRU *fp, int32 len, int32 type) { uint8 *pdatabuf; uint8 buf; int32 l_ret; int32 count; mm_segment_t fs; uint8 retry = 3; int32 lenbuf = 0; if (IS_ERR(fp)) { PS_PRINT_ERR("fp is error,fp = 0x%p\n", fp); return -EFAIL; } PS_PRINT_DBG("expect recv len is [%d]\n", len); count = 0; l_ret = read_msg(&buf, 1, type); pdatabuf = OS_KMALLOC_GFP(len); if (NULL == pdatabuf) { filp_close(fp, NULL); return -EFAIL; } OS_MEM_SET(pdatabuf, 0, len); if ('a' != buf) { PS_PRINT_DBG("receive a fail, readmem anyway\n"); } //#if 0 fs = get_fs(); set_fs(KERNEL_DS); l_ret = vfs_llseek(fp, 0, SEEK_END); PS_PRINT_INFO("pos = %ld", fp->f_pos); while (len > lenbuf)//RECV_BUF_LEN) { l_ret = read_msg(pdatabuf + lenbuf, len - lenbuf, type); if (l_ret > 0) { lenbuf += l_ret; } else { retry--; lenbuf = 0; if (0 == retry) { l_ret = -EFAIL; PS_PRINT_ERR("time out\n"); break; } if (ENUM_INFO_SDIO == type) { PATCH_SEND_N_SDIO; } else { ringbuf_flush(); PATCH_SEND_N_UART; } } } if (len <= lenbuf) { vfs_write(fp, pdatabuf, len, &fp->f_pos); } filp_close(fp, NULL); set_fs(fs); if (ENUM_INFO_SDIO == type) { PATCH_SEND_A_SDIO; } else { ringbuf_flush(); PATCH_SEND_A_UART; } OS_MEM_KFREE(pdatabuf); return l_ret; } /***************************************************************************** Prototype : patch_int_para_send Description : down integer to device Input : int8 *name uint8 *Value int32 type Output : Return Value : int32 Calls : Called By : History : 1.Date : 2012/11/1 Author : kf74033 Modification : Created function *****************************************************************************/ int32 patch_int_para_send(uint8 *name, uint8 *Value, int32 type) { int32 l_ret; int32 data_len; int32 Value_len; int32 i; int32 n; uint8 auc_num[INT32_STR_LEN]; uint8 data[DATA_BUF_LEN]; PS_PRINT_DBG("entry "); Value_len = OS_STR_LEN((int8 *)Value); OS_MEM_SET(auc_num, 0, INT32_STR_LEN); OS_MEM_SET(data, 0, DATA_BUF_LEN); data_len = 0; data_len = OS_STR_LEN(name); OS_MEM_CPY(data, name, data_len); data[data_len] = COMPART_KEYWORD; data_len = data_len + 1; for (i = 0,n = 0; (i <= Value_len) && (n < INT32_STR_LEN); i++) { if ((',' == Value[i]) || (i == Value_len )) { PS_PRINT_DBG("auc_num = %s, i = %d, n = %d\n", auc_num, i, n); if (0 == n) { continue; } OS_MEM_CPY((uint8 *)&data[data_len], auc_num, n); data_len = data_len + n; data[data_len] = COMPART_KEYWORD; data_len = data_len + 1; OS_MEM_SET(auc_num, 0, INT32_STR_LEN); n = 0; } else if (0x20 == Value[i]) { continue; } else { auc_num[n] = Value[i]; n++; } } PS_PRINT_DBG("data_len = %d, \n", data_len); PS_PRINT_DBG("data = %s, \n", data); // l_ret = patch_send_data(data, data_len, MSG_FORM_DRV_A, type); if (ENUM_INFO_UART == type) { ringbuf_flush(); } l_ret = send_msg(data, data_len, type); return l_ret; } /***************************************************************************** Prototype : patch_file_type Description : down parameter Input : uint8 *Key uint8 * Value Output : Return Value : int32 Calls : Called By : History : 1.Date : 2012/11/1 Author : kf74033 Modification : Created function *****************************************************************************/ int32 patch_number_type(uint8 *Key, uint8 *Value, int32 type) { int32 l_ret = -EFAIL; int32 num; uint8 *flag; uint8 pinstate; OS_KERNEL_FILE_STRU *fp; PS_PRINT_DBG("entry"); if (!OS_MEM_CMP(Key, VER_CMD_KEYWORD, OS_STR_LEN(VER_CMD_KEYWORD))) { PS_PRINT_DBG("version \n"); return SUCC; } if (!OS_STR_CMP((int8 *)Key, PLL_CMD_KEYWORD) || !OS_STR_CMP((int8 *)Key, JUMP_CMD_KEYWORD) || !OS_STR_CMP((int8 *)Key, WMEM_CMD_KEYWORD)) { l_ret = patch_int_para_send(Key, Value, type); if (0 > l_ret) { PS_PRINT_ERR("send %s,%s fail \n", Key, Value); return l_ret; } /* G ÊÇdevice ÏàÓ¦²Ù×÷Íê³É±êÖ¾ */ PS_PRINT_DBG("recv g form device\n"); l_ret = patch_wait_g_form_dev(type); if (0 > l_ret) { PS_PRINT_ERR("recv g FAIL [%d]\n", l_ret); return -EFAIL; } } else if (!OS_STR_CMP((int8 *)Key, BRT_CMD_KEYWORD)) { /* Ð޸IJ¨ÌØÂÊ */ l_ret = patch_int_para_send(Key, Value, type); if (0 > l_ret) { PS_PRINT_ERR("send %s,%s fail \n", Key, Value); return l_ret; } PS_PRINT_DBG("change baudrate\n"); //recv_expect_result(MSG_FORM_DRV_A, type); num = patch_string_to_num(Value); /* Ôö¼Óµ÷ÓÃÐ޸IJ¨ÌØÂʺ¯Êý */ l_ret = pm_uart_set_baudrate(num); if (0 > l_ret) { PS_PRINT_ERR(" modify baudrate fail!!\n"); return -EFAIL; } ringbuf_flush(); msleep(10); PATCH_SEND_N_UART; /* G ÊÇdevice ÏàÓ¦²Ù×÷Íê³É±êÖ¾ */ PS_PRINT_DBG("recv g form device\n"); l_ret = patch_wait_g_retry_form_dev(type); if (0 > l_ret) { return -EFAIL; } } else if (!OS_STR_CMP((int8 *)Key, RMEM_CMD_KEYWORD)) { /* not enough mem to read mem */ if ((0 == g_usemalloc) && (ENUM_INFO_UART == type)) { return SUCC; } powerpin_state &= PINSTATE_MASK; pinstate = (powerpin_state & (powerpin_state >> PINSTATE_NUM)); /* disabled */ if ((PINENABLE == pinstate) || (0 == (pinstate & BFG_PINDISABLE) && (ENUM_INFO_UART == type)) || (0 == (pinstate & WLAN_PINDISABLE) && (ENUM_INFO_SDIO == type))) { PS_PRINT_INFO("pinstart bfg_pindisable\n"); return SUCC; } fp = patch_readm_fileopen(type); if (IS_ERR(fp)) { PS_PRINT_ERR("create file error,fp = 0x%p, errorno=%d\n", fp, PTR_ERR(fp)); return SUCC; } l_ret = patch_int_para_send(Key, Value, type); if (0 > l_ret) { PS_PRINT_ERR("send %s,%s fail \n", Key, Value); filp_close(fp, NULL); return l_ret; } PS_PRINT_DBG("recv mem form device\n"); flag = OS_STR_CHR(Value, ','); if (NULL == flag) { PS_PRINT_ERR("RECV LEN ERROR..\n"); filp_close(fp, NULL); return -EFAIL; } flag++; PS_PRINT_DBG("recv len [%s]\n", flag); while(COMPART_KEYWORD == *flag) { flag++; } num = patch_string_to_num(flag); PS_PRINT_DBG("recv len [%d]\n", num); l_ret = patch_recv_mem(fp, num, type); } return l_ret; } /***************************************************************************** Prototype : patch_quit_type Description : down quit command Input : uint8 *Key uint8 * Value Output : Return Value : int32 Calls : Called By : History : 1.Date : 2012/11/1 Author : kf74033 Modification : Created function *****************************************************************************/ int32 patch_quit_type(int32 type) { int32 l_ret; int32 l_len; uint8 buf[8]; PS_PRINT_DBG("entry\n"); OS_MEM_SET(buf, 0, 8); OS_MEM_CPY(buf, (uint8 *)QUIT_CMD, OS_STR_LEN(QUIT_CMD)); l_len = OS_STR_LEN(QUIT_CMD); buf[l_len] = COMPART_KEYWORD; l_len++; if (ENUM_INFO_UART == type) { pm_change_patch_state(); } l_ret = send_msg(buf, l_len, type); return l_ret; } /***************************************************************************** Prototype : patch_file_type Description : down addr and file Input : uint8 *Key uint8 * Value Output : Return Value : int32 Calls : Called By : History : 1.Date : 2012/11/1 Author : kf74033 Modification : Created function *****************************************************************************/ int32 patch_file_type(uint8 *Key, uint8 *Value, int32 type) { int32 i; int32 n; int32 l_ret; int32 l_len; uint8 auc_addr[INT32_STR_LEN]; uint8 data[DATA_BUF_LEN]; int32 data_len; PS_PRINT_DBG("Key = %s, Value = %s\n", Key, Value); /* * ¸ù¾Ý¹Ø¼ü×ÖµÄ×îºóÒ»¸ö×Ö·û£¬È·¶¨·¢Ë͵ØÖ·Ö®ºó£¬deviceµÄ·µ»ØÖµ * ËùÒÔÅäÖÃÎļþµÄ¹Ø¼ü×Ö²»ÄÜËæÒâÐÞ¸Ä */ OS_MEM_SET(data, 0, DATA_BUF_LEN); data_len = OS_STR_LEN(Key); OS_MEM_CPY(data, Key, data_len); data[data_len] = COMPART_KEYWORD; data_len++; OS_MEM_SET(auc_addr, 0, INT32_STR_LEN); for (i = 0,n = 0; Value[i] != ',' && n < INT32_STR_LEN; i++) { if ((',' == Value[i]) || (COMPART_KEYWORD == Value[i])) { break; } else { auc_addr[n] = Value[i]; n++; } } OS_MEM_CPY((uint8 *)&data[data_len], auc_addr, n); data_len = data_len + n; data[data_len] = COMPART_KEYWORD; data_len++; PS_PRINT_DBG("data is %s\n", data); l_ret = patch_send_data(data, data_len, MSG_FORM_DRV_C, type); if (0 > l_ret) { PS_PRINT_ERR(" SEND %s addr error\n", Key); return -EFAIL; } /* ɾ³ýÍ·²¿µÄ¿Õ¸ñ */ l_len = OS_STR_LEN((int8 *)Value); for(i = i + 1; i < l_len; i++) { if (('/' == Value[i]) || ('.' == Value[i])) /* ¼æÈݾø¶Ô·¾¶ºÍÏà¶Ô·¾¶ */ { break; } } PS_PRINT_DBG("file path is %s\n", &Value[i]); l_ret = patch_down_file (&Value[i], type); if (0 > l_ret) { PS_PRINT_ERR(" SEND %s file error\n", Key); return l_ret; } /* G ÊÇ DEVICE Íê³ÉÏàÓ¦²Ù×÷±êÖ¾ */ l_ret = patch_wait_g_form_dev(type); return l_ret; } /***************************************************************************** Prototype : patch_device_respond Description : wait respond form device Input : void Output : Return Value : int32 Calls : Called By : History : 1.Date : 2012/11/1 Author : kf74033 Modification : Created function *****************************************************************************/ int32 patch_device_respond(int32 type) { int32 l_ret; int32 i; PS_PRINT_DBG("entry"); for(i = 0; i < HOST_DEV_TIMEOUT; i++) { if (ENUM_INFO_SDIO == type) { // PATCH_SEND_N_SDIO; } else { PATCH_SEND_N_UART; } OS_MEM_SET(g_st_global[type].auc_DevVersion, 0, VERSION_LEN); msleep(1); l_ret = read_msg(g_st_global[type].auc_DevVersion, VERSION_LEN, type); if (0 > l_ret) { PS_PRINT_ERR("read fail![%d]\n", i); continue; } else if (!OS_MEM_CMP((int8 *)g_st_global[type].auc_DevVersion, (int8 *)g_st_global[type].auc_CfgVersion, OS_STR_LEN(g_st_global[type].auc_CfgVersion))) { if (ENUM_INFO_SDIO == type) { PATCH_SEND_A_SDIO; msleep(10); } else { PATCH_SEND_A_UART; } PS_PRINT_INFO("Device Version = [%s], CfgVersion = [%s].\n", g_st_global[type].auc_DevVersion, g_st_global[type].auc_CfgVersion); return SUCC; } else { if (ENUM_INFO_SDIO == type) { // PATCH_SEND_N_SDIO; } else { PATCH_SEND_N_UART; } PS_PRINT_INFO("Device Version = [%s], CfgVersion = [%s].\n", g_st_global[type].auc_DevVersion, g_st_global[type].auc_CfgVersion); } } PS_PRINT_ERR("read device version fail![%d]\n", i); return -EFAIL; } /***************************************************************************** Prototype : patch_del_space Description : delete space Input : uint8 *string int32 *len Output : Return Value : uint8 * Calls : Called By : History : 1.Date : 2012/10/19 Author : kf74033 Modification : Created function *****************************************************************************/ uint8 *patch_del_space(uint8 *string, int32 *len) { int i; PS_PRINT_DBG("entry"); if ((NULL == string) || (NULL == len)) { return NULL; } /* ɾ³ýβ²¿µÄ¿Õ¸ñ */ for(i = *len - 1; i >= 0; i--) { if (COMPART_KEYWORD != string[i]) { break; } string[i] = '\0'; } /* ³ö´í */ if (i < 0) { PS_PRINT_ERR(" string is Space bar\n"); return NULL; } /* ÔÚforÓï¾äÖмõÈ¥1£¬ÕâÀï¼ÓÉÏ1 */ *len = i + 1; /* ɾ³ýÍ·²¿µÄ¿Õ¸ñ */ for(i = 0; i < *len; i++) { if (' ' != string[i]) { /* ¼õÈ¥¿Õ¸ñµÄ¸öÊý */ *len = *len - i; return &string[i]; } } return NULL; } /***************************************************************************** Prototype : patch_read_cfg Description : read config file Input : uint8 *cfg_path uint8 *read_buf Output : Return Value : int32 Calls : Called By : History : 1.Date : 2012/11/1 Author : kf74033 Modification : Created function *****************************************************************************/ int32 patch_read_cfg(uint8 *cfg_path, uint8 *read_buf) { OS_KERNEL_FILE_STRU *fp; int32 l_ret; PS_PRINT_DBG("entry\n"); if ((NULL == cfg_path) || (NULL == read_buf)) { PS_PRINT_ERR("para is NULL\n"); return -EFAIL; } fp = filp_open(cfg_path, O_RDONLY, 0); if (IS_ERR(fp)) { fp = NULL; PS_PRINT_ERR("filp_open %s fail\n", cfg_path); return -EFAIL; } OS_MEM_SET(read_buf, 0, READ_CFG_BUF_LEN); l_ret = kernel_read(fp, fp->f_pos, read_buf, READ_CFG_BUF_LEN); filp_close(fp, NULL); fp = NULL; return l_ret; } /***************************************************************************** Prototype : patch_parse_cmd Description : parse config command Input : uint8 *buf uint8 *cmd_name uint8 *cmd_para Output : Return Value : int32 Calls : Called By : History : 1.Date : 2012/11/1 Author : kf74033 Modification : Created function *****************************************************************************/ int32 patch_parse_cmd(uint8 *buf, uint8 *cmd_name, uint8 *cmd_para) { int32 l_ret; int32 l_cmdlen; int32 l_paralen; uint8 *begin; uint8 *end; uint8 *link; uint8 *handle; #ifdef HISI_GET_BOARD_ID int32 l_offset = 0; #endif begin = buf; if((NULL == buf) || (NULL == cmd_name) || (NULL == cmd_para)) { PS_PRINT_ERR("buf/cmd_name/cmd_para is NULL\n"); return ERROR_TYPE_CMD; } /* ×¢ÊÍÐÐ */ if ('@' == buf[0]) { return ERROR_TYPE_CMD; } /* ´íÎóÐУ¬»òÕßÍ˳öÃüÁîÐÐ */ link = OS_STR_CHR((int8 *)begin, '='); if (NULL == link) { /* Í˳öÃüÁîÐÐ */ if (NULL != OS_STR_STR((int8 *)buf, QUIT_CMD_KEYWORD)) { return QUIT_TYPE_CMD; } return ERROR_TYPE_CMD; } /* ´íÎóÐУ¬Ã»ÓнáÊø·û */ end = OS_STR_CHR(link, ';'); if (NULL == end) { return ERROR_TYPE_CMD; } l_cmdlen = link - begin; /* ɾ³ý¹Ø¼ü×ÖµÄÁ½±ß¿Õ¸ñ */ handle = patch_del_space((uint8 *)begin, &l_cmdlen); if (NULL == handle) { return ERROR_TYPE_CMD; } /* ÅжÏÃüÁîÀàÐÍ */ if (!OS_MEM_CMP(handle, (uint8 *)FILE_TYPE_CMD_KEY, OS_STR_LEN((uint8 *)FILE_TYPE_CMD_KEY))) { handle = OS_STR_STR(handle, (uint8 *)FILE_TYPE_CMD_KEY) + OS_STR_LEN(FILE_TYPE_CMD_KEY); l_cmdlen = l_cmdlen - OS_STR_LEN(FILE_TYPE_CMD_KEY); l_ret = FILE_TYPE_CMD; } else if (!OS_MEM_CMP(handle, (uint8 *)NUM_TYPE_CMD_KEY, OS_STR_LEN(NUM_TYPE_CMD_KEY))) { handle = OS_STR_STR(handle, (uint8 *)NUM_TYPE_CMD_KEY) + OS_STR_LEN(NUM_TYPE_CMD_KEY); l_cmdlen = l_cmdlen - OS_STR_LEN(NUM_TYPE_CMD_KEY); l_ret = NUM_TYPE_CMD; } else { return ERROR_TYPE_CMD; } OS_MEM_CPY(cmd_name, handle, l_cmdlen); /* ɾ³ýÖµÁ½±ß¿Õ¸ñ */ begin = link + 1; l_paralen = end - begin; handle = patch_del_space((uint8 *)begin, &l_paralen); if (NULL == handle) { return ERROR_TYPE_CMD; } OS_MEM_CPY(cmd_para, handle, l_paralen); #ifdef HISI_GET_BOARD_ID if (!OS_MEM_CMP(buf, (uint8 *)FILE_TYPE_RW_KEY, OS_STR_LEN((uint8 *)FILE_TYPE_RW_KEY)) && !OS_STR_STR(buf, UART_KEY)) { // dts get rw file name failed l_paralen = strlen(g_st_boardid.rw_file_name); if (0 == l_paralen) { PS_PRINT_WARNING("Get the dts rw file name failed\n"); return l_ret; } PS_PRINT_INFO("cmd_para:{%s}\n", cmd_para); end = OS_STR_CHR((int8 *)cmd_para, ','); if (NULL == end) { return l_ret; } l_offset = (uint8 *)end - cmd_para + 1; // out of range if ((l_offset + l_paralen + 1) > PARA_LEN) { return l_ret; } snprintf(cmd_para + l_offset, l_paralen + 1, "%s", g_st_boardid.rw_file_name); PS_PRINT_INFO("cmd_para:{%s}\n", cmd_para); } #endif return l_ret; } /***************************************************************************** Prototype : patch_parse_cfg Description : parse config file Input : uint8 *buf int32 len int32 type Output : Return Value : int32 Calls : Called By : History : 1.Date : 2012/11/1 Author : kf74033 Modification : Created function *****************************************************************************/ void *patch_malloc_cmd_buf(uint8 *buf, int32 type) { int32 l_len; uint8 *flag; uint8 *p_buf; /* ͳ¼ÆÃüÁî¸öÊý */ flag = buf; g_st_global[type].l_count = 0; while(NULL != flag) { /* Ò»¸öÕýÈ·µÄÃüÁîÐнáÊø·ûΪ ; */ flag = OS_STR_CHR(flag, CMD_LINE_SIGN); if (NULL == flag) { break; } g_st_global[type].l_count++; flag++; } PS_PRINT_DBG("l_count = %d\n", g_st_global[type].l_count); /* ÉêÇë´æ´¢ÃüÁî¿Õ¼ä */ /* ±Èʵ¼Ê¶à·ÖÒ»¸öÃüÁî¿Õ¼ä¡£ÕâÑùµÄ·ÀÖ¹ÅäÖÃÎļþ×îºóÒ»¸öÃüÁî²»ÊÇÍ˳öÃüÁî */ l_len = (g_st_global[type].l_count + 1) * sizeof(struct cmd_type_st); p_buf = OS_KMALLOC_GFP(l_len); if (NULL == p_buf) { PS_PRINT_ERR("kmalloc cmd_type_st fail\n"); return NULL; } OS_MEM_SET((void *)p_buf, 0, l_len); return p_buf; } /***************************************************************************** Prototype : patch_parse_cfg Description : parse config file Input : uint8 *buf int32 len int32 type Output : Return Value : int32 Calls : Called By : History : 1.Date : 2012/11/1 Author : kf74033 Modification : Created function *****************************************************************************/ int32 patch_parse_cfg(uint8 *buf, int32 buf_len, int32 type) { int32 i; int32 l_len; uint8 *flag; uint8 *begin; uint8 *end; int32 cmd_type; uint8 cmd_name[CMD_LEN]; uint8 cmd_para[PARA_LEN]; g_st_global[type].pst_cmd = (struct cmd_type_st *)patch_malloc_cmd_buf(buf, type); if (NULL == g_st_global[type].pst_cmd) { PS_PRINT_ERR(" patch_malloc_cmd_buf fail\n"); return -EFAIL; } /* ½âÎöCMD BUF*/ flag = buf; /* ÉêÇëµÄ´æ´¢ÃüÁî¿Õ¼ä¹»Óã¬bufûÓжÁÈ¡Íê³É */ // l_len = buf_len - 1; l_len = buf_len; i = 0; while((i < g_st_global[type].l_count) && (flag < &buf[l_len])) { /* *»ñÈ¡ÅäÖÃÎļþÖеÄÒ»ÐÐ,ÅäÖÃÎļþ±ØÐëÊÇunix¸ñʽ. *ÅäÖÃÎļþÖеÄijһÐк¬ÓÐ×Ö·û @ ÔòÈÏΪ¸ÃÐÐΪעÊÍÐÐ */ begin = flag; end = OS_STR_CHR(flag, '\n'); if(NULL == end) { PS_PRINT_INFO("end is null\n"); break; } if (end == begin) /* ¸ÃÐÐÖ»ÓÐÒ»¸ö»»Ðзû */ { PS_PRINT_DBG("blank line\n"); flag = end + 1; continue; } *end = '\0'; PS_PRINT_DBG("operation string is [%s]\n", begin); OS_MEM_SET(cmd_name, 0, CMD_LEN); OS_MEM_SET(cmd_para, 0, PARA_LEN); cmd_type = patch_parse_cmd(begin,cmd_name, cmd_para); if (ERROR_TYPE_CMD != cmd_type)/* ÕýÈ·µÄÃüÁîÀàÐÍ£¬Ôö¼Ó */ { // PS_PRINT_ERR("cmd[%d]type[%d], name[%s], para[%s]\n", // i, cmd_type, cmd_name, cmd_para); g_st_global[type].pst_cmd[i].cmd_type = cmd_type; OS_MEM_CPY(g_st_global[type].pst_cmd[i].cmd_name, cmd_name, CMD_LEN); OS_MEM_CPY(g_st_global[type].pst_cmd[i].cmd_para, cmd_para, PARA_LEN); /* »ñÈ¡ÅäÖð汾ºÅ */ if (!OS_MEM_CMP(g_st_global[type].pst_cmd[i].cmd_name, VER_CMD_KEYWORD, OS_STR_LEN(VER_CMD_KEYWORD))) { OS_MEM_CPY(g_st_global[type].auc_CfgVersion, g_st_global[type].pst_cmd[i].cmd_para, //OS_STR_LEN(g_st_global[type].pst_cmd[i].cmd_para)); VERSION_LEN); PS_PRINT_DBG("g_CfgVersion = [%s],[%s], \n", g_st_global[type].auc_CfgVersion, g_st_global[type].pst_cmd[i].cmd_para); } i++; } flag = end + 1; } PS_PRINT_DBG("Read cmd OK\n"); /* Èç¹û×îºóÒ»¸öÃüÁî²»ÊÇÍ˳öÃüÁÔòÔö¼ÓÒ»¸öÍ˳öÃüÁî */ if (QUIT_TYPE_CMD != g_st_global[type].pst_cmd[i-1].cmd_type) { g_st_global[type].pst_cmd[i].cmd_type = QUIT_TYPE_CMD; i++; /* ·½±ãͳ¼ÆÃüÁî¸öÊý */ } /* ¸ù¾Ýʵ¼ÊÃüÁî¸öÊý£¬ÐÞ¸Ä×îÖÕµÄÃüÁî¸öÊý */ g_st_global[type].l_count = i; PS_PRINT_DBG("type[%d], cmd count[%d]\n", type, g_st_global[type].l_count); return SUCC; } /***************************************************************************** Prototype : patch_get_cfg Description : get patch config command Input : uint8 *cfg int32 type Output : Return Value : int32 Calls : Called By : History : 1.Date : 2012/11/1 Author : kf74033 Modification : Created function *****************************************************************************/ int32 patch_get_cfg(uint8 *cfg, int32 type) { uint8 *buf; int32 l_readlen; int32 l_ret; buf = OS_KMALLOC_GFP(READ_CFG_BUF_LEN); if (NULL == buf) { return -EFAIL; } /* ÅäÖÃÎļþ±ØÐëСÓÚ1024 */ l_readlen = patch_read_cfg(cfg, buf); if(0 > l_readlen) { PS_PRINT_ERR("read cfg error\n"); OS_MEM_KFREE(buf); buf = NULL; return -EFAIL; } l_ret = patch_parse_cfg(buf, l_readlen, type); if(0 > l_ret) { PS_PRINT_ERR("parse cfg error\n"); } OS_MEM_KFREE(buf); buf = NULL; return l_ret; } EXPORT_SYMBOL(patch_get_cfg); /***************************************************************************** Prototype : patch_download_info Description : download patch Input : int32 type Output : Return Value : int32 Calls : Called By : History : 1.Date : 2012/11/1 Author : kf74033 Modification : Created function *****************************************************************************/ int32 patch_execute_cmd(int32 cmd_type, uint8 *cmd_name, uint8 *cmd_para,int32 type) { int32 l_ret; /* Çå¿ÕÉϴβÙ×÷ÒÅÁôÏÂÀ´µÄÊý¾Ý£¬¶ÁÈ¡½á¹ûʱÒÔ³¤¶ÈΪÅжϣ¬buf¾Í²»ÓÃÇå¿ÕÁË */ g_st_global[type].l_Recvbuf1_len = 0; g_st_global[type].l_Recvbuf2_len = 0; switch(cmd_type) { case FILE_TYPE_CMD: PS_PRINT_DBG(" command type FILE_TYPE_CMD\n"); l_ret = patch_file_type(cmd_name, cmd_para, type); break; case NUM_TYPE_CMD: PS_PRINT_DBG(" command type NUM_TYPE_CMD\n"); l_ret = patch_number_type(cmd_name, cmd_para, type); break; case QUIT_TYPE_CMD: PS_PRINT_DBG(" command type QUIT_TYPE_CMD\n"); l_ret = patch_quit_type(type); break; default: PS_PRINT_ERR("command type error[%d]\n", cmd_type); l_ret = -EFAIL; break; } return l_ret; } /***************************************************************************** Prototype : patch_download_info Description : download patch Input : int32 type Output : Return Value : int32 Calls : Called By : History : 1.Date : 2012/11/1 Author : kf74033 Modification : Created function *****************************************************************************/ int32 patch_download_export(uint8 *keyword, int32 type) { int32 i; int32 l_ret; for (i = 0; i < g_st_global[type].l_count; i++) { if (!OS_STR_CMP(g_st_global[type].pst_cmd[i].cmd_name, keyword)) //OS_STR_LEN(keyword))) { break; } } if (i >= g_st_global[type].l_count) { return -EFAIL; } l_ret = patch_execute_cmd(g_st_global[type].pst_cmd[i].cmd_type, g_st_global[type].pst_cmd[i].cmd_name, g_st_global[type].pst_cmd[i].cmd_para, type); return l_ret; } EXPORT_SYMBOL(patch_download_export); /***************************************************************************** Prototype : patch_download_patch Description : download patch Input : int32 type Output : Return Value : int32 Calls : Called By : History : 1.Date : 2012/11/1 Author : kf74033 Modification : Created function *****************************************************************************/ int32 patch_download_patch(int32 type) { int32 l_ret; int32 i; uint32 ul_alloc_len = READ_DATA_BUF_LEN; // g_debuglevel_patch =12; dma_addr_t puc_phyaddr; PS_PRINT_INFO("type:%d\n", type); if (ENUM_INFO_UART == type) { g_stringbuf.pbufstart = kmalloc(ul_alloc_len, GFP_KERNEL); if (NULL == g_stringbuf.pbufstart) { ul_alloc_len = READ_DATA_REALLOC_BUF_LEN; g_stringbuf.pbufstart = kmalloc(ul_alloc_len, GFP_KERNEL); if (NULL == g_stringbuf.pbufstart) { g_usemalloc = 0; PS_PRINT_ERR("ringbuf KMALLOC SIZE(%d) failed.\n", ul_alloc_len); g_stringbuf.pbufstart = g_st_global[type].auc_Recvbuf1; g_stringbuf.pbufend = RECV_BUF_LEN + g_stringbuf.pbufstart; return -EFAIL; } powerpin_state &= 0xFF - BFG_PINDISABLE; PS_PRINT_INFO("ringbuf kmalloc size(%d) suc.\n", ul_alloc_len); } g_stringbuf.pbufend = ul_alloc_len + g_stringbuf.pbufstart; g_usemalloc = 1; g_stringbuf.phead = g_stringbuf.pbufstart; g_stringbuf.ptail = g_stringbuf.pbufstart; } l_ret = patch_device_respond(type); if(0 > l_ret) { PS_PRINT_ERR("receiver respond form device is fail\n"); if ((ENUM_INFO_UART == type) && (1 == g_usemalloc)) { //dma_free_coherent(NULL, READ_DATA_BUF_LEN, g_stringbuf.pbufstart, puc_phyaddr); kfree(g_stringbuf.pbufstart); g_stringbuf.pbufstart = NULL; g_stringbuf.pbufend = NULL; g_stringbuf.phead = NULL; g_stringbuf.ptail = NULL; } return -EFAIL; } /* Ö´ÐÐÌõ¼þ:: ÃüÁîÐÐûÓжÁÍ꣬ÃüÁî²»ÊÇ´íÎóÃüÁî */ for (i = 0; i < g_st_global[type].l_count; i++) { PS_PRINT_DBG("cmd[%d]type[%d], name[%s], para[%s]\n", i, g_st_global[type].pst_cmd[i].cmd_type, g_st_global[type].pst_cmd[i].cmd_name, g_st_global[type].pst_cmd[i].cmd_para); l_ret = patch_execute_cmd(g_st_global[type].pst_cmd[i].cmd_type, g_st_global[type].pst_cmd[i].cmd_name, g_st_global[type].pst_cmd[i].cmd_para, type); if (0 > l_ret) { if ((ENUM_INFO_UART == type) && (1 == g_usemalloc)) { //dma_free_coherent(NULL, READ_DATA_BUF_LEN, g_stringbuf.pbufstart, puc_phyaddr); kfree(g_stringbuf.pbufstart); g_stringbuf.pbufstart = NULL; g_stringbuf.pbufend = NULL; g_stringbuf.phead = NULL; g_stringbuf.ptail = NULL; g_usemalloc = 0; } if (ENUM_INFO_SDIO == type) { powerpin_state &= 0xFF - WLAN_PINDISABLE; } else if (ENUM_INFO_UART == type) { powerpin_state &= 0xFF - BFG_PINDISABLE; } return l_ret; } } if (ENUM_INFO_SDIO == type) { powerpin_state &= 0xFF - WLAN_PINDISABLE; } else if (ENUM_INFO_UART == type) { powerpin_state &= 0xFF - BFG_PINDISABLE; } PS_PRINT_DBG("DOWN PATCH SUCC\n"); if ((ENUM_INFO_UART == type) && (1 == g_usemalloc)) { //dma_free_coherent(NULL, READ_DATA_BUF_LEN, g_stringbuf.pbufstart, puc_phyaddr); kfree(g_stringbuf.pbufstart); g_usemalloc = 0; } g_stringbuf.pbufstart = NULL; g_stringbuf.pbufend = NULL; g_stringbuf.phead = NULL; g_stringbuf.ptail = NULL; return SUCC; } EXPORT_SYMBOL(patch_download_patch); /***************************************************************************** Prototype : patch_init Description : patch module initialization Input : Output : Return Value : int32 Calls : Called By : History : 1.Date : 2012/11/14 Author : kf74033 Modification : Created function *****************************************************************************/ int32 patch_init(int32 type) #if 1 { int32 l_ret; PS_PRINT_DBG("entry\n"); // OS_MEM_SET(g_st_global, 0, ENUM_INFO_TOTAL * sizeof(PATCH_GLOBALS_STUR)); if (ENUM_INFO_SDIO == type) { OS_MEM_CPY(g_st_global[ENUM_INFO_SDIO].auc_Cfgpath, SDIO_CFG_FILE, OS_STR_LEN(SDIO_CFG_FILE)); } else { OS_MEM_CPY(g_st_global[ENUM_INFO_UART].auc_Cfgpath, UART_CFG_FILE, OS_STR_LEN(UART_CFG_FILE)); } g_st_global[type].pst_wait = OS_KMALLOC_GFP(sizeof(OS_WAIT_QUEUE_HEAD_T_STRU)); if (NULL == g_st_global[type].pst_wait) { // patch_exit(); return -EFAIL; } OS_INIT_WAITQUEUE_HEAD(g_st_global[type].pst_wait); l_ret = patch_get_cfg(g_st_global[type].auc_Cfgpath, type); if(0 > l_ret) { // patch_exit(); PS_PRINT_ERR("get [%s] command is fail\n", g_st_global[type].auc_Cfgpath); return -EFAIL; } if(NULL == g_pucDataBuf) { g_pucDataBuf = OS_KMALLOC_GFP(READ_PATCH_BUF_LEN); if(NULL == g_pucDataBuf) { PS_PRINT_ERR("g_pucDataBuf KMALLOC failed"); g_pucDataBuf = NULL; return -EFAIL; } else { PS_PRINT_DBG("g_pucDataBuf KMALLOC succ"); } } return SUCC; } #else { int32 l_ret; int32 i; PS_PRINT_DBG("entry\n"); // OS_MEM_SET(g_st_global, 0, ENUM_INFO_TOTAL * sizeof(PATCH_GLOBALS_STUR)); OS_MEM_CPY(g_st_global[ENUM_INFO_SDIO].auc_Cfgpath, SDIO_CFG_FILE, OS_STR_LEN(SDIO_CFG_FILE)); OS_MEM_CPY(g_st_global[ENUM_INFO_UART].auc_Cfgpath, UART_CFG_FILE, OS_STR_LEN(UART_CFG_FILE)); for (i = 0; i < ENUM_INFO_TOTAL; i++) { g_st_global[i].pst_wait = OS_KMALLOC_GFP(sizeof(OS_WAIT_QUEUE_HEAD_T_STRU)); if (NULL == g_st_global[i].pst_wait) { patch_exit(); return -EFAIL; } OS_INIT_WAITQUEUE_HEAD(g_st_global[i].pst_wait); l_ret = patch_get_cfg(g_st_global[i].auc_Cfgpath, i); if(0 > l_ret) { patch_exit(); PS_PRINT_ERR("get [%s] command is fail\n", g_st_global[i].auc_Cfgpath); return -EFAIL; } } return SUCC; } #endif EXPORT_SYMBOL(patch_init); /***************************************************************************** Prototype : patch_exit Description : patch module exit Input : Output : Return Value : int32 Calls : Called By : History : 1.Date : 2012/11/14 Author : kf74033 Modification : Created function *****************************************************************************/ int32 patch_exit(void) { int32 i; for (i = 0; i < ENUM_INFO_TOTAL; i++) { g_st_global[i].l_count = 0; if (NULL != g_st_global[i].pst_cmd) { OS_MEM_KFREE(g_st_global[i].pst_cmd); g_st_global[i].pst_cmd = NULL; } if (NULL != g_st_global[i].pst_wait) { OS_MEM_KFREE(g_st_global[i].pst_wait); g_st_global[i].pst_wait = NULL; } } if(NULL != g_pucDataBuf) { OS_MEM_KFREE(g_pucDataBuf); g_pucDataBuf = NULL; } return SUCC; } EXPORT_SYMBOL(patch_exit);
gpl-2.0
RealDigitalMediaAndroid/linux-imx6
drivers/net/wireless/rtlwifi/rtl8821as/core/rtw_xmit.c
3
122703
/****************************************************************************** * * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * ******************************************************************************/ #define _RTW_XMIT_C_ #include <drv_types.h> #if defined (PLATFORM_LINUX) && defined (PLATFORM_WINDOWS) #error "Shall be Linux or Windows, but not both!\n" #endif static u8 P802_1H_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0xf8 }; static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 }; static void _init_txservq(struct tx_servq *ptxservq) { _func_enter_; _rtw_init_listhead(&ptxservq->tx_pending); _rtw_init_queue(&ptxservq->sta_pending); ptxservq->qcnt = 0; _func_exit_; } void _rtw_init_sta_xmit_priv(struct sta_xmit_priv *psta_xmitpriv) { _func_enter_; _rtw_memset((unsigned char *)psta_xmitpriv, 0, sizeof (struct sta_xmit_priv)); _rtw_spinlock_init(&psta_xmitpriv->lock); //for(i = 0 ; i < MAX_NUMBLKS; i++) // _init_txservq(&(psta_xmitpriv->blk_q[i])); _init_txservq(&psta_xmitpriv->be_q); _init_txservq(&psta_xmitpriv->bk_q); _init_txservq(&psta_xmitpriv->vi_q); _init_txservq(&psta_xmitpriv->vo_q); _rtw_init_listhead(&psta_xmitpriv->legacy_dz); _rtw_init_listhead(&psta_xmitpriv->apsd); _func_exit_; } s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, _adapter *padapter) { int i; struct xmit_buf *pxmitbuf; struct xmit_frame *pxframe; sint res=_SUCCESS; _func_enter_; // We don't need to memset padapter->XXX to zero, because adapter is allocated by rtw_zvmalloc(). //_rtw_memset((unsigned char *)pxmitpriv, 0, sizeof(struct xmit_priv)); _rtw_spinlock_init(&pxmitpriv->lock); _rtw_spinlock_init(&pxmitpriv->lock_sctx); _rtw_init_sema(&pxmitpriv->xmit_sema, 0); _rtw_init_sema(&pxmitpriv->terminate_xmitthread_sema, 0); /* Please insert all the queue initializaiton using _rtw_init_queue below */ pxmitpriv->adapter = padapter; //for(i = 0 ; i < MAX_NUMBLKS; i++) // _rtw_init_queue(&pxmitpriv->blk_strms[i]); _rtw_init_queue(&pxmitpriv->be_pending); _rtw_init_queue(&pxmitpriv->bk_pending); _rtw_init_queue(&pxmitpriv->vi_pending); _rtw_init_queue(&pxmitpriv->vo_pending); _rtw_init_queue(&pxmitpriv->bm_pending); //_rtw_init_queue(&pxmitpriv->legacy_dz_queue); //_rtw_init_queue(&pxmitpriv->apsd_queue); _rtw_init_queue(&pxmitpriv->free_xmit_queue); /* Please allocate memory with the sz = (struct xmit_frame) * NR_XMITFRAME, and initialize free_xmit_frame below. Please also apply free_txobj to link_up all the xmit_frames... */ pxmitpriv->pallocated_frame_buf = rtw_zvmalloc(NR_XMITFRAME * sizeof(struct xmit_frame) + 4); if (pxmitpriv->pallocated_frame_buf == NULL){ pxmitpriv->pxmit_frame_buf =NULL; RT_TRACE(_module_rtl871x_xmit_c_,_drv_err_,("alloc xmit_frame fail!\n")); res= _FAIL; goto exit; } pxmitpriv->pxmit_frame_buf = (u8 *)N_BYTE_ALIGMENT((SIZE_PTR)(pxmitpriv->pallocated_frame_buf), 4); //pxmitpriv->pxmit_frame_buf = pxmitpriv->pallocated_frame_buf + 4 - // ((SIZE_PTR) (pxmitpriv->pallocated_frame_buf) &3); pxframe = (struct xmit_frame*) pxmitpriv->pxmit_frame_buf; for (i = 0; i < NR_XMITFRAME; i++) { _rtw_init_listhead(&(pxframe->list)); pxframe->padapter = padapter; pxframe->frame_tag = NULL_FRAMETAG; pxframe->pkt = NULL; pxframe->buf_addr = NULL; pxframe->pxmitbuf = NULL; rtw_list_insert_tail(&(pxframe->list), &(pxmitpriv->free_xmit_queue.queue)); pxframe++; } pxmitpriv->free_xmitframe_cnt = NR_XMITFRAME; pxmitpriv->frag_len = MAX_FRAG_THRESHOLD; //init xmit_buf _rtw_init_queue(&pxmitpriv->free_xmitbuf_queue); _rtw_init_queue(&pxmitpriv->pending_xmitbuf_queue); pxmitpriv->pallocated_xmitbuf = rtw_zvmalloc(NR_XMITBUFF * sizeof(struct xmit_buf) + 4); if (pxmitpriv->pallocated_xmitbuf == NULL){ RT_TRACE(_module_rtl871x_xmit_c_,_drv_err_,("alloc xmit_buf fail!\n")); res= _FAIL; goto exit; } pxmitpriv->pxmitbuf = (u8 *)N_BYTE_ALIGMENT((SIZE_PTR)(pxmitpriv->pallocated_xmitbuf), 4); //pxmitpriv->pxmitbuf = pxmitpriv->pallocated_xmitbuf + 4 - // ((SIZE_PTR) (pxmitpriv->pallocated_xmitbuf) &3); pxmitbuf = (struct xmit_buf*)pxmitpriv->pxmitbuf; for (i = 0; i < NR_XMITBUFF; i++) { _rtw_init_listhead(&pxmitbuf->list); pxmitbuf->priv_data = NULL; pxmitbuf->padapter = padapter; pxmitbuf->buf_tag = XMITBUF_DATA; /* Tx buf allocation may fail sometimes, so sleep and retry. */ if((res=rtw_os_xmit_resource_alloc(padapter, pxmitbuf,(MAX_XMITBUF_SZ + XMITBUF_ALIGN_SZ), _TRUE)) == _FAIL) { rtw_msleep_os(10); res = rtw_os_xmit_resource_alloc(padapter, pxmitbuf,(MAX_XMITBUF_SZ + XMITBUF_ALIGN_SZ), _TRUE); if (res == _FAIL) { goto exit; } } #if defined(CONFIG_SDIO_HCI) || defined(CONFIG_GSPI_HCI) pxmitbuf->phead = pxmitbuf->pbuf; pxmitbuf->pend = pxmitbuf->pbuf + MAX_XMITBUF_SZ; pxmitbuf->len = 0; pxmitbuf->pdata = pxmitbuf->ptail = pxmitbuf->phead; #endif pxmitbuf->flags = XMIT_VO_QUEUE; rtw_list_insert_tail(&pxmitbuf->list, &(pxmitpriv->free_xmitbuf_queue.queue)); #ifdef DBG_XMIT_BUF pxmitbuf->no=i; #endif pxmitbuf++; } pxmitpriv->free_xmitbuf_cnt = NR_XMITBUFF; /* init xframe_ext queue, the same count as extbuf */ _rtw_init_queue(&pxmitpriv->free_xframe_ext_queue); pxmitpriv->xframe_ext_alloc_addr = rtw_zvmalloc(NR_XMIT_EXTBUFF * sizeof(struct xmit_frame) + 4); if (pxmitpriv->xframe_ext_alloc_addr == NULL){ pxmitpriv->xframe_ext = NULL; RT_TRACE(_module_rtl871x_xmit_c_,_drv_err_,("alloc xframe_ext fail!\n")); res= _FAIL; goto exit; } pxmitpriv->xframe_ext = (u8 *)N_BYTE_ALIGMENT((SIZE_PTR)(pxmitpriv->xframe_ext_alloc_addr), 4); pxframe = (struct xmit_frame*)pxmitpriv->xframe_ext; for (i = 0; i < NR_XMIT_EXTBUFF; i++) { _rtw_init_listhead(&(pxframe->list)); pxframe->padapter = padapter; pxframe->frame_tag = NULL_FRAMETAG; pxframe->pkt = NULL; pxframe->buf_addr = NULL; pxframe->pxmitbuf = NULL; pxframe->ext_tag = 1; rtw_list_insert_tail(&(pxframe->list), &(pxmitpriv->free_xframe_ext_queue.queue)); pxframe++; } pxmitpriv->free_xframe_ext_cnt = NR_XMIT_EXTBUFF; // Init xmit extension buff _rtw_init_queue(&pxmitpriv->free_xmit_extbuf_queue); pxmitpriv->pallocated_xmit_extbuf = rtw_zvmalloc(NR_XMIT_EXTBUFF * sizeof(struct xmit_buf) + 4); if (pxmitpriv->pallocated_xmit_extbuf == NULL){ RT_TRACE(_module_rtl871x_xmit_c_,_drv_err_,("alloc xmit_extbuf fail!\n")); res= _FAIL; goto exit; } pxmitpriv->pxmit_extbuf = (u8 *)N_BYTE_ALIGMENT((SIZE_PTR)(pxmitpriv->pallocated_xmit_extbuf), 4); pxmitbuf = (struct xmit_buf*)pxmitpriv->pxmit_extbuf; for (i = 0; i < NR_XMIT_EXTBUFF; i++) { _rtw_init_listhead(&pxmitbuf->list); pxmitbuf->priv_data = NULL; pxmitbuf->padapter = padapter; pxmitbuf->buf_tag = XMITBUF_MGNT; if((res=rtw_os_xmit_resource_alloc(padapter, pxmitbuf,MAX_XMIT_EXTBUF_SZ + XMITBUF_ALIGN_SZ, _TRUE)) == _FAIL) { res= _FAIL; goto exit; } #if defined(CONFIG_SDIO_HCI) || defined(CONFIG_GSPI_HCI) pxmitbuf->phead = pxmitbuf->pbuf; pxmitbuf->pend = pxmitbuf->pbuf + MAX_XMIT_EXTBUF_SZ; pxmitbuf->len = 0; pxmitbuf->pdata = pxmitbuf->ptail = pxmitbuf->phead; #endif rtw_list_insert_tail(&pxmitbuf->list, &(pxmitpriv->free_xmit_extbuf_queue.queue)); #ifdef DBG_XMIT_BUF_EXT pxmitbuf->no=i; #endif pxmitbuf++; } pxmitpriv->free_xmit_extbuf_cnt = NR_XMIT_EXTBUFF; for (i = 0; i<CMDBUF_MAX; i++) { pxmitbuf = &pxmitpriv->pcmd_xmitbuf[i]; if (pxmitbuf) { _rtw_init_listhead(&pxmitbuf->list); pxmitbuf->priv_data = NULL; pxmitbuf->padapter = padapter; pxmitbuf->buf_tag = XMITBUF_CMD; if((res=rtw_os_xmit_resource_alloc(padapter, pxmitbuf, MAX_CMDBUF_SZ+XMITBUF_ALIGN_SZ, _TRUE)) == _FAIL) { res= _FAIL; goto exit; } #if defined(CONFIG_SDIO_HCI) || defined(CONFIG_GSPI_HCI) pxmitbuf->phead = pxmitbuf->pbuf; pxmitbuf->pend = pxmitbuf->pbuf + MAX_CMDBUF_SZ; pxmitbuf->len = 0; pxmitbuf->pdata = pxmitbuf->ptail = pxmitbuf->phead; #endif pxmitbuf->alloc_sz = MAX_CMDBUF_SZ+XMITBUF_ALIGN_SZ; } } rtw_alloc_hwxmits(padapter); rtw_init_hwxmits(pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry); for (i = 0; i < 4; i ++) { pxmitpriv->wmm_para_seq[i] = i; } #ifdef CONFIG_USB_HCI pxmitpriv->txirp_cnt=1; _rtw_init_sema(&(pxmitpriv->tx_retevt), 0); //per AC pending irp pxmitpriv->beq_cnt = 0; pxmitpriv->bkq_cnt = 0; pxmitpriv->viq_cnt = 0; pxmitpriv->voq_cnt = 0; #endif #ifdef CONFIG_XMIT_ACK pxmitpriv->ack_tx = _FALSE; _rtw_mutex_init(&pxmitpriv->ack_tx_mutex); rtw_sctx_init(&pxmitpriv->ack_tx_ops, 0); #endif rtw_hal_init_xmit_priv(padapter); exit: _func_exit_; return res; } void rtw_mfree_xmit_priv_lock (struct xmit_priv *pxmitpriv); void rtw_mfree_xmit_priv_lock (struct xmit_priv *pxmitpriv) { _rtw_spinlock_free(&pxmitpriv->lock); _rtw_free_sema(&pxmitpriv->xmit_sema); _rtw_free_sema(&pxmitpriv->terminate_xmitthread_sema); _rtw_spinlock_free(&pxmitpriv->be_pending.lock); _rtw_spinlock_free(&pxmitpriv->bk_pending.lock); _rtw_spinlock_free(&pxmitpriv->vi_pending.lock); _rtw_spinlock_free(&pxmitpriv->vo_pending.lock); _rtw_spinlock_free(&pxmitpriv->bm_pending.lock); //_rtw_spinlock_free(&pxmitpriv->legacy_dz_queue.lock); //_rtw_spinlock_free(&pxmitpriv->apsd_queue.lock); _rtw_spinlock_free(&pxmitpriv->free_xmit_queue.lock); _rtw_spinlock_free(&pxmitpriv->free_xmitbuf_queue.lock); _rtw_spinlock_free(&pxmitpriv->pending_xmitbuf_queue.lock); } void _rtw_free_xmit_priv (struct xmit_priv *pxmitpriv) { int i; _adapter *padapter = pxmitpriv->adapter; struct xmit_frame *pxmitframe = (struct xmit_frame*) pxmitpriv->pxmit_frame_buf; struct xmit_buf *pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmitbuf; _func_enter_; rtw_hal_free_xmit_priv(padapter); rtw_mfree_xmit_priv_lock(pxmitpriv); if(pxmitpriv->pxmit_frame_buf==NULL) goto out; for(i=0; i<NR_XMITFRAME; i++) { rtw_os_xmit_complete(padapter, pxmitframe); pxmitframe++; } for(i=0; i<NR_XMITBUFF; i++) { rtw_os_xmit_resource_free(padapter, pxmitbuf,(MAX_XMITBUF_SZ + XMITBUF_ALIGN_SZ), _TRUE); pxmitbuf++; } if(pxmitpriv->pallocated_frame_buf) { rtw_vmfree(pxmitpriv->pallocated_frame_buf, NR_XMITFRAME * sizeof(struct xmit_frame) + 4); } if(pxmitpriv->pallocated_xmitbuf) { rtw_vmfree(pxmitpriv->pallocated_xmitbuf, NR_XMITBUFF * sizeof(struct xmit_buf) + 4); } /* free xframe_ext queue, the same count as extbuf */ if ((pxmitframe = (struct xmit_frame*)pxmitpriv->xframe_ext)) { for (i=0; i<NR_XMIT_EXTBUFF; i++) { rtw_os_xmit_complete(padapter, pxmitframe); pxmitframe++; } } if (pxmitpriv->xframe_ext_alloc_addr) rtw_vmfree(pxmitpriv->xframe_ext_alloc_addr, NR_XMIT_EXTBUFF * sizeof(struct xmit_frame) + 4); _rtw_spinlock_free(&pxmitpriv->free_xframe_ext_queue.lock); // free xmit extension buff _rtw_spinlock_free(&pxmitpriv->free_xmit_extbuf_queue.lock); pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmit_extbuf; for(i=0; i<NR_XMIT_EXTBUFF; i++) { rtw_os_xmit_resource_free(padapter, pxmitbuf,(MAX_XMIT_EXTBUF_SZ + XMITBUF_ALIGN_SZ), _TRUE); pxmitbuf++; } if(pxmitpriv->pallocated_xmit_extbuf) { rtw_vmfree(pxmitpriv->pallocated_xmit_extbuf, NR_XMIT_EXTBUFF * sizeof(struct xmit_buf) + 4); } for (i=0; i<CMDBUF_MAX; i++) { pxmitbuf = &pxmitpriv->pcmd_xmitbuf[i]; if(pxmitbuf!=NULL) rtw_os_xmit_resource_free(padapter, pxmitbuf, MAX_CMDBUF_SZ+XMITBUF_ALIGN_SZ , _TRUE); } rtw_free_hwxmits(padapter); #ifdef CONFIG_XMIT_ACK _rtw_mutex_free(&pxmitpriv->ack_tx_mutex); #endif out: _func_exit_; } u8 query_ra_short_GI(struct sta_info *psta) { u8 sgi = _FALSE, sgi_20m = _FALSE, sgi_40m = _FALSE, sgi_80m = _FALSE; #ifdef CONFIG_80211N_HT #ifdef CONFIG_80211AC_VHT if (psta->vhtpriv.vht_option) { sgi_80m= psta->vhtpriv.sgi_80m; } else #endif //CONFIG_80211AC_VHT { sgi_20m = psta->htpriv.sgi_20m; sgi_40m = psta->htpriv.sgi_40m; } #endif switch(psta->bw_mode){ case CHANNEL_WIDTH_80: sgi = sgi_80m; break; case CHANNEL_WIDTH_40: sgi = sgi_40m; break; case CHANNEL_WIDTH_20: default: sgi = sgi_20m; break; } return sgi; } static void update_attrib_vcs_info(_adapter *padapter, struct xmit_frame *pxmitframe) { u32 sz; struct pkt_attrib *pattrib = &pxmitframe->attrib; //struct sta_info *psta = pattrib->psta; struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv); struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info); /* if(pattrib->psta) { psta = pattrib->psta; } else { DBG_871X("%s, call rtw_get_stainfo()\n", __func__); psta=rtw_get_stainfo(&padapter->stapriv ,&pattrib->ra[0] ); } if(psta==NULL) { DBG_871X("%s, psta==NUL\n", __func__); return; } if(!(psta->state &_FW_LINKED)) { DBG_871X("%s, psta->state(0x%x) != _FW_LINKED\n", __func__, psta->state); return; } */ if (pattrib->nr_frags != 1) { sz = padapter->xmitpriv.frag_len; } else //no frag { sz = pattrib->last_txcmdsz; } // (1) RTS_Threshold is compared to the MPDU, not MSDU. // (2) If there are more than one frag in this MSDU, only the first frag uses protection frame. // Other fragments are protected by previous fragment. // So we only need to check the length of first fragment. if(pmlmeext->cur_wireless_mode < WIRELESS_11_24N || padapter->registrypriv.wifi_spec) { if(sz > padapter->registrypriv.rts_thresh) { pattrib->vcs_mode = RTS_CTS; } else { if(pattrib->rtsen) pattrib->vcs_mode = RTS_CTS; else if(pattrib->cts2self) pattrib->vcs_mode = CTS_TO_SELF; else pattrib->vcs_mode = NONE_VCS; } } else { while (_TRUE) { #if 0 //Todo //check IOT action if(pHTInfo->IOTAction & HT_IOT_ACT_FORCED_CTS2SELF) { pattrib->vcs_mode = CTS_TO_SELF; pattrib->rts_rate = MGN_24M; break; } else if(pHTInfo->IOTAction & (HT_IOT_ACT_FORCED_RTS|HT_IOT_ACT_PURE_N_MODE)) { pattrib->vcs_mode = RTS_CTS; pattrib->rts_rate = MGN_24M; break; } #endif //IOT action if((pmlmeinfo->assoc_AP_vendor == HT_IOT_PEER_ATHEROS) && (pattrib->ampdu_en==_TRUE) && (padapter->securitypriv.dot11PrivacyAlgrthm == _AES_ )) { pattrib->vcs_mode = CTS_TO_SELF; break; } //check ERP protection if(pattrib->rtsen || pattrib->cts2self) { if(pattrib->rtsen) pattrib->vcs_mode = RTS_CTS; else if(pattrib->cts2self) pattrib->vcs_mode = CTS_TO_SELF; break; } //check HT op mode if(pattrib->ht_en) { u8 HTOpMode = pmlmeinfo->HT_protection; if((pmlmeext->cur_bwmode && (HTOpMode == 2 || HTOpMode == 3)) || (!pmlmeext->cur_bwmode && HTOpMode == 3) ) { pattrib->vcs_mode = RTS_CTS; break; } } //check rts if(sz > padapter->registrypriv.rts_thresh) { pattrib->vcs_mode = RTS_CTS; break; } //to do list: check MIMO power save condition. //check AMPDU aggregation for TXOP if((pattrib->ampdu_en==_TRUE) && (!IS_HARDWARE_TYPE_8812(padapter))) { pattrib->vcs_mode = RTS_CTS; break; } pattrib->vcs_mode = NONE_VCS; break; } } //for debug : force driver control vrtl_carrier_sense. if(padapter->driver_vcs_en==1) { //u8 driver_vcs_en; //Enable=1, Disable=0 driver control vrtl_carrier_sense. //u8 driver_vcs_type;//force 0:disable VCS, 1:RTS-CTS, 2:CTS-to-self when vcs_en=1. pattrib->vcs_mode = padapter->driver_vcs_type; } } static void update_attrib_phy_info(_adapter *padapter, struct pkt_attrib *pattrib, struct sta_info *psta) { struct mlme_ext_priv *mlmeext = &padapter->mlmeextpriv; pattrib->rtsen = psta->rtsen; pattrib->cts2self = psta->cts2self; pattrib->mdata = 0; pattrib->eosp = 0; pattrib->triggered=0; pattrib->ampdu_spacing = 0; //qos_en, ht_en, init rate, ,bw, ch_offset, sgi pattrib->qos_en = psta->qos_option; pattrib->raid = psta->raid; if (mlmeext->cur_bwmode < psta->bw_mode) pattrib->bwmode = mlmeext->cur_bwmode; else pattrib->bwmode = psta->bw_mode; pattrib->sgi = query_ra_short_GI(psta); pattrib->ldpc = psta->ldpc; pattrib->stbc = psta->stbc; #ifdef CONFIG_80211N_HT pattrib->ht_en = psta->htpriv.ht_option; pattrib->ch_offset = psta->htpriv.ch_offset; pattrib->ampdu_en = _FALSE; if(padapter->driver_ampdu_spacing != 0xFF) //driver control AMPDU Density for peer sta's rx pattrib->ampdu_spacing = padapter->driver_ampdu_spacing; else pattrib->ampdu_spacing = psta->htpriv.rx_ampdu_min_spacing; #endif //CONFIG_80211N_HT //if(pattrib->ht_en && psta->htpriv.ampdu_enable) //{ // if(psta->htpriv.agg_enable_bitmap & BIT(pattrib->priority)) // pattrib->ampdu_en = _TRUE; //} pattrib->retry_ctrl = _FALSE; #ifdef CONFIG_AUTO_AP_MODE if(psta->isrc && psta->pid>0) pattrib->pctrl = _TRUE; #endif } static s32 update_attrib_sec_info(_adapter *padapter, struct pkt_attrib *pattrib, struct sta_info *psta) { sint res = _SUCCESS; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; struct security_priv *psecuritypriv = &padapter->securitypriv; sint bmcast = IS_MCAST(pattrib->ra); _rtw_memset(pattrib->dot118021x_UncstKey.skey, 0, 16); _rtw_memset(pattrib->dot11tkiptxmickey.skey, 0, 16); pattrib->mac_id = psta->mac_id; if (psta->ieee8021x_blocked == _TRUE) { RT_TRACE(_module_rtl871x_xmit_c_,_drv_err_,("\n psta->ieee8021x_blocked == _TRUE \n")); pattrib->encrypt = 0; if((pattrib->ether_type != 0x888e) && (check_fwstate(pmlmepriv, WIFI_MP_STATE) == _FALSE)) { RT_TRACE(_module_rtl871x_xmit_c_,_drv_err_,("\npsta->ieee8021x_blocked == _TRUE, pattrib->ether_type(%.4x) != 0x888e\n",pattrib->ether_type)); #ifdef DBG_TX_DROP_FRAME DBG_871X("DBG_TX_DROP_FRAME %s psta->ieee8021x_blocked == _TRUE, pattrib->ether_type(%04x) != 0x888e\n", __FUNCTION__,pattrib->ether_type); #endif res = _FAIL; goto exit; } } else { GET_ENCRY_ALGO(psecuritypriv, psta, pattrib->encrypt, bmcast); #ifdef CONFIG_WAPI_SUPPORT if(pattrib->ether_type == 0x88B4) pattrib->encrypt=_NO_PRIVACY_; #endif switch(psecuritypriv->dot11AuthAlgrthm) { case dot11AuthAlgrthm_Open: case dot11AuthAlgrthm_Shared: case dot11AuthAlgrthm_Auto: pattrib->key_idx = (u8)psecuritypriv->dot11PrivacyKeyIndex; break; case dot11AuthAlgrthm_8021X: if(bmcast) pattrib->key_idx = (u8)psecuritypriv->dot118021XGrpKeyid; else pattrib->key_idx = 0; break; default: pattrib->key_idx = 0; break; } //For WPS 1.0 WEP, driver should not encrypt EAPOL Packet for WPS handshake. if (((pattrib->encrypt ==_WEP40_)||(pattrib->encrypt ==_WEP104_)) && (pattrib->ether_type == 0x888e)) pattrib->encrypt=_NO_PRIVACY_; } switch (pattrib->encrypt) { case _WEP40_: case _WEP104_: pattrib->iv_len = 4; pattrib->icv_len = 4; WEP_IV(pattrib->iv, psta->dot11txpn, pattrib->key_idx); break; case _TKIP_: pattrib->iv_len = 8; pattrib->icv_len = 4; if(psecuritypriv->busetkipkey==_FAIL) { #ifdef DBG_TX_DROP_FRAME DBG_871X("DBG_TX_DROP_FRAME %s psecuritypriv->busetkipkey(%d)==_FAIL drop packet\n", __FUNCTION__, psecuritypriv->busetkipkey); #endif res =_FAIL; goto exit; } if(bmcast) TKIP_IV(pattrib->iv, psta->dot11txpn, pattrib->key_idx); else TKIP_IV(pattrib->iv, psta->dot11txpn, 0); _rtw_memcpy(pattrib->dot11tkiptxmickey.skey, psta->dot11tkiptxmickey.skey, 16); break; case _AES_: pattrib->iv_len = 8; pattrib->icv_len = 8; if(bmcast) AES_IV(pattrib->iv, psta->dot11txpn, pattrib->key_idx); else AES_IV(pattrib->iv, psta->dot11txpn, 0); break; #ifdef CONFIG_WAPI_SUPPORT case _SMS4_: pattrib->iv_len = 18; pattrib->icv_len = 16; rtw_wapi_get_iv(padapter,pattrib->ra,pattrib->iv); break; #endif default: pattrib->iv_len = 0; pattrib->icv_len = 0; break; } if(pattrib->encrypt>0) _rtw_memcpy(pattrib->dot118021x_UncstKey.skey, psta->dot118021x_UncstKey.skey, 16); RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("update_attrib: encrypt=%d securitypriv.sw_encrypt=%d\n", pattrib->encrypt, padapter->securitypriv.sw_encrypt)); if (pattrib->encrypt && ((padapter->securitypriv.sw_encrypt == _TRUE) || (psecuritypriv->hw_decrypted == _FALSE))) { pattrib->bswenc = _TRUE; RT_TRACE(_module_rtl871x_xmit_c_,_drv_err_, ("update_attrib: encrypt=%d securitypriv.hw_decrypted=%d bswenc=_TRUE\n", pattrib->encrypt, padapter->securitypriv.sw_encrypt)); } else { pattrib->bswenc = _FALSE; RT_TRACE(_module_rtl871x_xmit_c_,_drv_info_,("update_attrib: bswenc=_FALSE\n")); } #if defined(CONFIG_CONCURRENT_MODE) && !defined(DYNAMIC_CAMID_ALLOC) if((pattrib->encrypt && bmcast) || (pattrib->encrypt ==_WEP40_) || (pattrib->encrypt ==_WEP104_)) { pattrib->bswenc = _TRUE;//force using sw enc. } #endif #ifdef CONFIG_WAPI_SUPPORT if(pattrib->encrypt == _SMS4_) pattrib->bswenc = _FALSE; #endif #ifdef CONFIG_TDLS if(pattrib->direct_link == _TRUE) { pattrib->mac_id = pattrib->ptdls_sta->mac_id; if(pattrib->encrypt>0) { pattrib->encrypt= _AES_; pattrib->iv_len=8; pattrib->icv_len=8; pattrib->bswenc = _FALSE; } } #endif //CONFIG_TDLS exit: return res; } u8 qos_acm(u8 acm_mask, u8 priority) { u8 change_priority = priority; switch (priority) { case 0: case 3: if(acm_mask & BIT(1)) change_priority = 1; break; case 1: case 2: break; case 4: case 5: if(acm_mask & BIT(2)) change_priority = 0; break; case 6: case 7: if(acm_mask & BIT(3)) change_priority = 5; break; default: DBG_871X("qos_acm(): invalid pattrib->priority: %d!!!\n", priority); break; } return change_priority; } static void set_qos(struct pkt_file *ppktfile, struct pkt_attrib *pattrib) { struct ethhdr etherhdr; struct iphdr ip_hdr; s32 UserPriority = 0; _rtw_open_pktfile(ppktfile->pkt, ppktfile); _rtw_pktfile_read(ppktfile, (unsigned char*)&etherhdr, ETH_HLEN); // get UserPriority from IP hdr if (pattrib->ether_type == 0x0800) { _rtw_pktfile_read(ppktfile, (u8*)&ip_hdr, sizeof(ip_hdr)); // UserPriority = (ntohs(ip_hdr.tos) >> 5) & 0x3; UserPriority = ip_hdr.tos >> 5; } /* else if (pattrib->ether_type == 0x888e) { // "When priority processing of data frames is supported, // a STA's SME should send EAPOL-Key frames at the highest priority." UserPriority = 7; } */ pattrib->priority = UserPriority; pattrib->hdrlen = WLAN_HDR_A3_QOS_LEN; pattrib->subtype = WIFI_QOS_DATA_TYPE; } #ifdef CONFIG_TDLS void rtw_check_tdls_established(_adapter *padapter, struct pkt_attrib *pattrib) { pattrib->ptdls_sta = NULL; pattrib->direct_link = _FALSE; if((padapter->tdlsinfo.link_established == _TRUE)){ pattrib->ptdls_sta = rtw_get_stainfo(&padapter->stapriv, pattrib->dst); if((pattrib->ptdls_sta!=NULL)&& (pattrib->ptdls_sta->tdls_sta_state & TDLS_LINKED_STATE)&& (pattrib->ether_type!=0x0806)){ pattrib->direct_link = _TRUE; //DBG_871X("send ptk to "MAC_FMT" using direct link\n", MAC_ARG(pattrib->dst)); } } } s32 update_tdls_attrib(_adapter *padapter, struct pkt_attrib *pattrib) { struct sta_info *psta = NULL; struct sta_priv *pstapriv = &padapter->stapriv; struct security_priv *psecuritypriv = &padapter->securitypriv; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; struct qos_priv *pqospriv= &pmlmepriv->qospriv; s32 res=_SUCCESS; psta = rtw_get_stainfo(pstapriv, pattrib->ra); if (psta == NULL) { res =_FAIL; goto exit; } pattrib->mac_id = psta->mac_id; pattrib->psta = psta; pattrib->ack_policy = 0; // get ether_hdr_len pattrib->pkt_hdrlen = ETH_HLEN;//(pattrib->ether_type == 0x8100) ? (14 + 4 ): 14; //vlan tag // [TDLS] TODO: setup req/rsp should be AC_BK if (pqospriv->qos_option && psta->qos_option) { pattrib->priority = 4; //tdls management frame should be AC_VI pattrib->hdrlen = WLAN_HDR_A3_QOS_LEN; pattrib->subtype = WIFI_QOS_DATA_TYPE; } else { pattrib->priority = 0; pattrib->hdrlen = WLAN_HDR_A3_LEN; pattrib->subtype = WIFI_DATA_TYPE; } //TODO:_lock if(update_attrib_sec_info(padapter, pattrib, psta) == _FAIL) { res = _FAIL; goto exit; } update_attrib_phy_info(padapter, pattrib, psta); exit: return res; } #endif //CONFIG_TDLS static s32 update_attrib(_adapter *padapter, _pkt *pkt, struct pkt_attrib *pattrib) { uint i; struct pkt_file pktfile; struct sta_info *psta = NULL; struct ethhdr etherhdr; sint bmcast; struct sta_priv *pstapriv = &padapter->stapriv; struct security_priv *psecuritypriv = &padapter->securitypriv; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; struct qos_priv *pqospriv= &pmlmepriv->qospriv; sint res = _SUCCESS; _func_enter_; DBG_COUNTER(padapter->tx_logs.core_tx_upd_attrib); _rtw_open_pktfile(pkt, &pktfile); i = _rtw_pktfile_read(&pktfile, (u8*)&etherhdr, ETH_HLEN); pattrib->ether_type = ntohs(etherhdr.h_proto); _rtw_memcpy(pattrib->dst, &etherhdr.h_dest, ETH_ALEN); _rtw_memcpy(pattrib->src, &etherhdr.h_source, ETH_ALEN); if ((check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == _TRUE) || (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == _TRUE)) { _rtw_memcpy(pattrib->ra, pattrib->dst, ETH_ALEN); _rtw_memcpy(pattrib->ta, myid(&padapter->eeprompriv), ETH_ALEN); DBG_COUNTER(padapter->tx_logs.core_tx_upd_attrib_adhoc); } else if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)) { _rtw_memcpy(pattrib->ra, get_bssid(pmlmepriv), ETH_ALEN); _rtw_memcpy(pattrib->ta, myid(&padapter->eeprompriv), ETH_ALEN); DBG_COUNTER(padapter->tx_logs.core_tx_upd_attrib_sta); } else if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) { _rtw_memcpy(pattrib->ra, pattrib->dst, ETH_ALEN); _rtw_memcpy(pattrib->ta, get_bssid(pmlmepriv), ETH_ALEN); DBG_COUNTER(padapter->tx_logs.core_tx_upd_attrib_ap); } else DBG_COUNTER(padapter->tx_logs.core_tx_upd_attrib_unknown); #ifdef CONFIG_TDLS rtw_check_tdls_established(padapter, pattrib); #endif //CONFIG_TDLS pattrib->pktlen = pktfile.pkt_len; if (ETH_P_IP == pattrib->ether_type) { // The following is for DHCP and ARP packet, we use cck1M to tx these packets and let LPS awake some time // to prevent DHCP protocol fail u8 tmp[24]; _rtw_pktfile_read(&pktfile, &tmp[0], 24); pattrib->dhcp_pkt = 0; if (pktfile.pkt_len > 282) {//MINIMUM_DHCP_PACKET_SIZE) { if (ETH_P_IP == pattrib->ether_type) {// IP header if (((tmp[21] == 68) && (tmp[23] == 67)) || ((tmp[21] == 67) && (tmp[23] == 68))) { // 68 : UDP BOOTP client // 67 : UDP BOOTP server RT_TRACE(_module_rtl871x_xmit_c_,_drv_err_,("======================update_attrib: get DHCP Packet \n")); // Use low rate to send DHCP packet. //if(pMgntInfo->IOTAction & HT_IOT_ACT_WA_IOT_Broadcom) //{ // tcb_desc->DataRate = MgntQuery_TxRateExcludeCCKRates(ieee);//0xc;//ofdm 6m // tcb_desc->bTxDisableRateFallBack = false; //} //else // pTcb->DataRate = Adapter->MgntInfo.LowestBasicRate; //RTPRINT(FDM, WA_IOT, ("DHCP TranslateHeader(), pTcb->DataRate = 0x%x\n", pTcb->DataRate)); pattrib->dhcp_pkt = 1; DBG_COUNTER(padapter->tx_logs.core_tx_upd_attrib_dhcp); } } } //for parsing ICMP pakcets { struct iphdr *piphdr = (struct iphdr *)tmp; pattrib->icmp_pkt = 0; if(piphdr->protocol == 0x1) // protocol type in ip header 0x1 is ICMP { pattrib->icmp_pkt = 1; DBG_COUNTER(padapter->tx_logs.core_tx_upd_attrib_icmp); } } } else if (0x888e == pattrib->ether_type) { DBG_871X_LEVEL(_drv_always_, "send eapol packet\n"); } if ( (pattrib->ether_type == 0x888e) || (pattrib->dhcp_pkt == 1) ) { rtw_set_scan_deny(padapter, 3000); } #ifdef CONFIG_LPS // If EAPOL , ARP , OR DHCP packet, driver must be in active mode. #ifdef CONFIG_WAPI_SUPPORT if ( (pattrib->ether_type == 0x88B4) || (pattrib->ether_type == 0x0806) || (pattrib->ether_type == 0x888e) || (pattrib->dhcp_pkt == 1) ) #else //!CONFIG_WAPI_SUPPORT #if 0 if ( (pattrib->ether_type == 0x0806) || (pattrib->ether_type == 0x888e) || (pattrib->dhcp_pkt == 1) ) #else // only ICMP/DHCP packets is as SPECIAL_PACKET, and leave LPS when tx IMCP/DHCP packets. //if ((pattrib->ether_type == 0x888e) || (pattrib->dhcp_pkt == 1) ) if (pattrib->icmp_pkt==1) { rtw_lps_ctrl_wk_cmd(padapter, LPS_CTRL_LEAVE, 1); } else if(pattrib->dhcp_pkt==1) #endif #endif { DBG_COUNTER(padapter->tx_logs.core_tx_upd_attrib_active); rtw_lps_ctrl_wk_cmd(padapter, LPS_CTRL_SPECIAL_PACKET, 1); } #endif //CONFIG_LPS bmcast = IS_MCAST(pattrib->ra); // get sta_info if (bmcast) { psta = rtw_get_bcmc_stainfo(padapter); } else { psta = rtw_get_stainfo(pstapriv, pattrib->ra); if (psta == NULL) { // if we cannot get psta => drop the pkt DBG_COUNTER(padapter->tx_logs.core_tx_upd_attrib_err_ucast_sta); RT_TRACE(_module_rtl871x_xmit_c_, _drv_alert_, ("\nupdate_attrib => get sta_info fail, ra:" MAC_FMT"\n", MAC_ARG(pattrib->ra))); #ifdef DBG_TX_DROP_FRAME DBG_871X("DBG_TX_DROP_FRAME %s get sta_info fail, ra:" MAC_FMT"\n", __FUNCTION__, MAC_ARG(pattrib->ra)); #endif res =_FAIL; goto exit; } else if((check_fwstate(pmlmepriv, WIFI_AP_STATE)==_TRUE)&&(!(psta->state & _FW_LINKED))) { DBG_COUNTER(padapter->tx_logs.core_tx_upd_attrib_err_ucast_ap_link); res =_FAIL; goto exit; } } if(psta == NULL) { // if we cannot get psta => drop the pkt DBG_COUNTER(padapter->tx_logs.core_tx_upd_attrib_err_sta); RT_TRACE(_module_rtl871x_xmit_c_, _drv_alert_, ("\nupdate_attrib => get sta_info fail, ra:" MAC_FMT "\n", MAC_ARG(pattrib->ra))); #ifdef DBG_TX_DROP_FRAME DBG_871X("DBG_TX_DROP_FRAME %s get sta_info fail, ra:" MAC_FMT"\n", __FUNCTION__, MAC_ARG(pattrib->ra)); #endif res = _FAIL; goto exit; } if(!(psta->state &_FW_LINKED)) { DBG_COUNTER(padapter->tx_logs.core_tx_upd_attrib_err_link); DBG_871X("%s, psta("MAC_FMT")->state(0x%x) != _FW_LINKED\n", __func__, MAC_ARG(psta->hwaddr), psta->state); return _FAIL; } //TODO:_lock if(update_attrib_sec_info(padapter, pattrib, psta) == _FAIL) { DBG_COUNTER(padapter->tx_logs.core_tx_upd_attrib_err_sec); res = _FAIL; goto exit; } update_attrib_phy_info(padapter, pattrib, psta); //DBG_8192C("%s ==> mac_id(%d)\n",__FUNCTION__,pattrib->mac_id ); pattrib->psta = psta; //TODO:_unlock pattrib->pctrl = 0; pattrib->ack_policy = 0; // get ether_hdr_len pattrib->pkt_hdrlen = ETH_HLEN;//(pattrib->ether_type == 0x8100) ? (14 + 4 ): 14; //vlan tag pattrib->hdrlen = WLAN_HDR_A3_LEN; pattrib->subtype = WIFI_DATA_TYPE; pattrib->priority = 0; if (check_fwstate(pmlmepriv, WIFI_AP_STATE|WIFI_ADHOC_STATE|WIFI_ADHOC_MASTER_STATE)) { if(pattrib->qos_en) set_qos(&pktfile, pattrib); } else { if(pqospriv->qos_option) { set_qos(&pktfile, pattrib); if(pmlmepriv->acm_mask != 0) { pattrib->priority = qos_acm(pmlmepriv->acm_mask, pattrib->priority); } } } //pattrib->priority = 5; //force to used VI queue, for testing rtw_set_tx_chksum_offload(pkt, pattrib); exit: _func_exit_; return res; } static s32 xmitframe_addmic(_adapter *padapter, struct xmit_frame *pxmitframe){ sint curfragnum,length; u8 *pframe, *payload,mic[8]; struct mic_data micdata; //struct sta_info *stainfo; struct qos_priv *pqospriv= &(padapter->mlmepriv.qospriv); struct pkt_attrib *pattrib = &pxmitframe->attrib; struct security_priv *psecuritypriv=&padapter->securitypriv; struct xmit_priv *pxmitpriv=&padapter->xmitpriv; u8 priority[4]={0x0,0x0,0x0,0x0}; u8 hw_hdr_offset = 0; sint bmcst = IS_MCAST(pattrib->ra); /* if(pattrib->psta) { stainfo = pattrib->psta; } else { DBG_871X("%s, call rtw_get_stainfo()\n", __func__); stainfo=rtw_get_stainfo(&padapter->stapriv ,&pattrib->ra[0]); } if(stainfo==NULL) { DBG_871X("%s, psta==NUL\n", __func__); return _FAIL; } if(!(stainfo->state &_FW_LINKED)) { DBG_871X("%s, psta->state(0x%x) != _FW_LINKED\n", __func__, stainfo->state); return _FAIL; } */ _func_enter_; #ifdef CONFIG_USB_TX_AGGREGATION hw_hdr_offset = TXDESC_SIZE + (pxmitframe->pkt_offset * PACKET_OFFSET_SZ);; #else #ifdef CONFIG_TX_EARLY_MODE hw_hdr_offset = TXDESC_OFFSET+ EARLY_MODE_INFO_SIZE; #else hw_hdr_offset = TXDESC_OFFSET; #endif #endif if(pattrib->encrypt ==_TKIP_)//if(psecuritypriv->dot11PrivacyAlgrthm==_TKIP_PRIVACY_) { //encode mic code //if(stainfo!= NULL) { u8 null_key[16]={0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0}; pframe = pxmitframe->buf_addr + hw_hdr_offset; if(bmcst) { if(_rtw_memcmp(psecuritypriv->dot118021XGrptxmickey[psecuritypriv->dot118021XGrpKeyid].skey, null_key, 16)==_TRUE){ //DbgPrint("\nxmitframe_addmic:stainfo->dot11tkiptxmickey==0\n"); //rtw_msleep_os(10); return _FAIL; } //start to calculate the mic code rtw_secmicsetkey(&micdata, psecuritypriv->dot118021XGrptxmickey[psecuritypriv->dot118021XGrpKeyid].skey); } else { if(_rtw_memcmp(&pattrib->dot11tkiptxmickey.skey[0],null_key, 16)==_TRUE){ //DbgPrint("\nxmitframe_addmic:stainfo->dot11tkiptxmickey==0\n"); //rtw_msleep_os(10); return _FAIL; } //start to calculate the mic code rtw_secmicsetkey(&micdata, &pattrib->dot11tkiptxmickey.skey[0]); } if(pframe[1]&1){ //ToDS==1 rtw_secmicappend(&micdata, &pframe[16], 6); //DA if(pframe[1]&2) //From Ds==1 rtw_secmicappend(&micdata, &pframe[24], 6); else rtw_secmicappend(&micdata, &pframe[10], 6); } else{ //ToDS==0 rtw_secmicappend(&micdata, &pframe[4], 6); //DA if(pframe[1]&2) //From Ds==1 rtw_secmicappend(&micdata, &pframe[16], 6); else rtw_secmicappend(&micdata, &pframe[10], 6); } //if(pqospriv->qos_option==1) if(pattrib->qos_en) priority[0]=(u8)pxmitframe->attrib.priority; rtw_secmicappend(&micdata, &priority[0], 4); payload=pframe; for(curfragnum=0;curfragnum<pattrib->nr_frags;curfragnum++){ payload=(u8 *)RND4((SIZE_PTR)(payload)); RT_TRACE(_module_rtl871x_xmit_c_,_drv_err_,("===curfragnum=%d, pframe= 0x%.2x, 0x%.2x, 0x%.2x, 0x%.2x, 0x%.2x, 0x%.2x, 0x%.2x, 0x%.2x,!!!\n", curfragnum,*payload, *(payload+1),*(payload+2),*(payload+3),*(payload+4),*(payload+5),*(payload+6),*(payload+7))); payload=payload+pattrib->hdrlen+pattrib->iv_len; RT_TRACE(_module_rtl871x_xmit_c_,_drv_err_,("curfragnum=%d pattrib->hdrlen=%d pattrib->iv_len=%d",curfragnum,pattrib->hdrlen,pattrib->iv_len)); if((curfragnum+1)==pattrib->nr_frags){ length=pattrib->last_txcmdsz-pattrib->hdrlen-pattrib->iv_len-( (pattrib->bswenc) ? pattrib->icv_len : 0); rtw_secmicappend(&micdata, payload,length); payload=payload+length; } else{ length=pxmitpriv->frag_len-pattrib->hdrlen-pattrib->iv_len-( (pattrib->bswenc) ? pattrib->icv_len : 0); rtw_secmicappend(&micdata, payload, length); payload=payload+length+pattrib->icv_len; RT_TRACE(_module_rtl871x_xmit_c_,_drv_err_,("curfragnum=%d length=%d pattrib->icv_len=%d",curfragnum,length,pattrib->icv_len)); } } rtw_secgetmic(&micdata,&(mic[0])); RT_TRACE(_module_rtl871x_xmit_c_,_drv_err_,("xmitframe_addmic: before add mic code!!!\n")); RT_TRACE(_module_rtl871x_xmit_c_,_drv_err_,("xmitframe_addmic: pattrib->last_txcmdsz=%d!!!\n",pattrib->last_txcmdsz)); RT_TRACE(_module_rtl871x_xmit_c_,_drv_err_,("xmitframe_addmic: mic[0]=0x%.2x ,mic[1]=0x%.2x ,mic[2]=0x%.2x ,mic[3]=0x%.2x \n\ mic[4]=0x%.2x ,mic[5]=0x%.2x ,mic[6]=0x%.2x ,mic[7]=0x%.2x !!!!\n", mic[0],mic[1],mic[2],mic[3],mic[4],mic[5],mic[6],mic[7])); //add mic code and add the mic code length in last_txcmdsz _rtw_memcpy(payload, &(mic[0]),8); pattrib->last_txcmdsz+=8; RT_TRACE(_module_rtl871x_xmit_c_,_drv_info_,("\n ========last pkt========\n")); payload=payload-pattrib->last_txcmdsz+8; for(curfragnum=0;curfragnum<pattrib->last_txcmdsz;curfragnum=curfragnum+8) RT_TRACE(_module_rtl871x_xmit_c_,_drv_info_,(" %.2x, %.2x, %.2x, %.2x, %.2x, %.2x, %.2x, %.2x ", *(payload+curfragnum), *(payload+curfragnum+1), *(payload+curfragnum+2),*(payload+curfragnum+3), *(payload+curfragnum+4),*(payload+curfragnum+5),*(payload+curfragnum+6),*(payload+curfragnum+7))); } /* else{ RT_TRACE(_module_rtl871x_xmit_c_,_drv_err_,("xmitframe_addmic: rtw_get_stainfo==NULL!!!\n")); } */ } _func_exit_; return _SUCCESS; } static s32 xmitframe_swencrypt(_adapter *padapter, struct xmit_frame *pxmitframe){ struct pkt_attrib *pattrib = &pxmitframe->attrib; //struct security_priv *psecuritypriv=&padapter->securitypriv; _func_enter_; //if((psecuritypriv->sw_encrypt)||(pattrib->bswenc)) if(pattrib->bswenc) { //DBG_871X("start xmitframe_swencrypt\n"); RT_TRACE(_module_rtl871x_xmit_c_,_drv_alert_,("### xmitframe_swencrypt\n")); switch(pattrib->encrypt){ case _WEP40_: case _WEP104_: rtw_wep_encrypt(padapter, (u8 *)pxmitframe); break; case _TKIP_: rtw_tkip_encrypt(padapter, (u8 *)pxmitframe); break; case _AES_: rtw_aes_encrypt(padapter, (u8 * )pxmitframe); break; #ifdef CONFIG_WAPI_SUPPORT case _SMS4_: rtw_sms4_encrypt(padapter, (u8 * )pxmitframe); #endif default: break; } } else { RT_TRACE(_module_rtl871x_xmit_c_,_drv_notice_,("### xmitframe_hwencrypt\n")); } _func_exit_; return _SUCCESS; } s32 rtw_make_wlanhdr (_adapter *padapter , u8 *hdr, struct pkt_attrib *pattrib) { u16 *qc; struct rtw_ieee80211_hdr *pwlanhdr = (struct rtw_ieee80211_hdr *)hdr; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; struct qos_priv *pqospriv = &pmlmepriv->qospriv; u8 qos_option = _FALSE; #ifdef CONFIG_TDLS struct tdls_info *ptdlsinfo = &padapter->tdlsinfo; struct sta_priv *pstapriv = &padapter->stapriv; #endif //CONFIG_TDLS sint res = _SUCCESS; u16 *fctrl = &pwlanhdr->frame_ctl; //struct sta_info *psta; //sint bmcst = IS_MCAST(pattrib->ra); _func_enter_; /* psta = rtw_get_stainfo(&padapter->stapriv, pattrib->ra); if(pattrib->psta != psta) { DBG_871X("%s, pattrib->psta(%p) != psta(%p)\n", __func__, pattrib->psta, psta); return; } if(psta==NULL) { DBG_871X("%s, psta==NUL\n", __func__); return _FAIL; } if(!(psta->state &_FW_LINKED)) { DBG_871X("%s, psta->state(0x%x) != _FW_LINKED\n", __func__, psta->state); return _FAIL; } */ _rtw_memset(hdr, 0, WLANHDR_OFFSET); SetFrameSubType(fctrl, pattrib->subtype); if (pattrib->subtype & WIFI_DATA_TYPE) { if ((check_fwstate(pmlmepriv, WIFI_STATION_STATE) == _TRUE)) { #ifdef CONFIG_TDLS if(pattrib->direct_link == _TRUE){ //TDLS data transfer, ToDS=0, FrDs=0 _rtw_memcpy(pwlanhdr->addr1, pattrib->dst, ETH_ALEN); _rtw_memcpy(pwlanhdr->addr2, pattrib->src, ETH_ALEN); _rtw_memcpy(pwlanhdr->addr3, get_bssid(pmlmepriv), ETH_ALEN); } else #endif //CONFIG_TDLS { //to_ds = 1, fr_ds = 0; // 1.Data transfer to AP // 2.Arp pkt will relayed by AP SetToDs(fctrl); _rtw_memcpy(pwlanhdr->addr1, get_bssid(pmlmepriv), ETH_ALEN); _rtw_memcpy(pwlanhdr->addr2, pattrib->ta, ETH_ALEN); _rtw_memcpy(pwlanhdr->addr3, pattrib->dst, ETH_ALEN); } if (pqospriv->qos_option) qos_option = _TRUE; } else if ((check_fwstate(pmlmepriv, WIFI_AP_STATE) == _TRUE) ) { //to_ds = 0, fr_ds = 1; SetFrDs(fctrl); _rtw_memcpy(pwlanhdr->addr1, pattrib->dst, ETH_ALEN); _rtw_memcpy(pwlanhdr->addr2, get_bssid(pmlmepriv), ETH_ALEN); _rtw_memcpy(pwlanhdr->addr3, pattrib->src, ETH_ALEN); if(pattrib->qos_en) qos_option = _TRUE; } else if ((check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == _TRUE) || (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == _TRUE)) { _rtw_memcpy(pwlanhdr->addr1, pattrib->dst, ETH_ALEN); _rtw_memcpy(pwlanhdr->addr2, pattrib->ta, ETH_ALEN); _rtw_memcpy(pwlanhdr->addr3, get_bssid(pmlmepriv), ETH_ALEN); if(pattrib->qos_en) qos_option = _TRUE; } else { RT_TRACE(_module_rtl871x_xmit_c_,_drv_err_,("fw_state:%x is not allowed to xmit frame\n", get_fwstate(pmlmepriv))); res = _FAIL; goto exit; } if(pattrib->mdata) SetMData(fctrl); if (pattrib->encrypt) SetPrivacy(fctrl); if (qos_option) { qc = (unsigned short *)(hdr + pattrib->hdrlen - 2); if (pattrib->priority) SetPriority(qc, pattrib->priority); SetEOSP(qc, pattrib->eosp); SetAckpolicy(qc, pattrib->ack_policy); } //TODO: fill HT Control Field //Update Seq Num will be handled by f/w { struct sta_info *psta; psta = rtw_get_stainfo(&padapter->stapriv, pattrib->ra); if(pattrib->psta != psta) { DBG_871X("%s, pattrib->psta(%p) != psta(%p)\n", __func__, pattrib->psta, psta); return _FAIL; } if(psta==NULL) { DBG_871X("%s, psta==NUL\n", __func__); return _FAIL; } if(!(psta->state &_FW_LINKED)) { DBG_871X("%s, psta->state(0x%x) != _FW_LINKED\n", __func__, psta->state); return _FAIL; } if(psta) { #ifdef CONFIG_TDLS if(pattrib->direct_link==_TRUE) { psta = pattrib->ptdls_sta; //qos_en, ht_en, init rate, ,bw, ch_offset, sgi //pattrib->qos_en = ptdls_sta->qos_option; pattrib->raid = psta->raid; #ifdef CONFIG_80211N_HT pattrib->bwmode = psta->bw_mode; pattrib->ht_en = psta->htpriv.ht_option; pattrib->ch_offset = psta->htpriv.ch_offset; pattrib->sgi= query_ra_short_GI(psta); #endif //CONFIG_80211N_HT } #endif //CONFIG_TDLS psta->sta_xmitpriv.txseq_tid[pattrib->priority]++; psta->sta_xmitpriv.txseq_tid[pattrib->priority] &= 0xFFF; pattrib->seqnum = psta->sta_xmitpriv.txseq_tid[pattrib->priority]; SetSeqNum(hdr, pattrib->seqnum); #ifdef CONFIG_80211N_HT //check if enable ampdu if(pattrib->ht_en && psta->htpriv.ampdu_enable) { if(psta->htpriv.agg_enable_bitmap & BIT(pattrib->priority)) pattrib->ampdu_en = _TRUE; } //re-check if enable ampdu by BA_starting_seqctrl if(pattrib->ampdu_en == _TRUE) { u16 tx_seq; tx_seq = psta->BA_starting_seqctrl[pattrib->priority & 0x0f]; //check BA_starting_seqctrl if(SN_LESS(pattrib->seqnum, tx_seq)) { //DBG_871X("tx ampdu seqnum(%d) < tx_seq(%d)\n", pattrib->seqnum, tx_seq); pattrib->ampdu_en = _FALSE;//AGG BK } else if(SN_EQUAL(pattrib->seqnum, tx_seq)) { psta->BA_starting_seqctrl[pattrib->priority & 0x0f] = (tx_seq+1)&0xfff; pattrib->ampdu_en = _TRUE;//AGG EN } else { //DBG_871X("tx ampdu over run\n"); psta->BA_starting_seqctrl[pattrib->priority & 0x0f] = (pattrib->seqnum+1)&0xfff; pattrib->ampdu_en = _TRUE;//AGG EN } } #endif //CONFIG_80211N_HT } } } else { } exit: _func_exit_; return res; } s32 rtw_txframes_pending(_adapter *padapter) { struct xmit_priv *pxmitpriv = &padapter->xmitpriv; return ((_rtw_queue_empty(&pxmitpriv->be_pending) == _FALSE) || (_rtw_queue_empty(&pxmitpriv->bk_pending) == _FALSE) || (_rtw_queue_empty(&pxmitpriv->vi_pending) == _FALSE) || (_rtw_queue_empty(&pxmitpriv->vo_pending) == _FALSE)); } s32 rtw_txframes_sta_ac_pending(_adapter *padapter, struct pkt_attrib *pattrib) { struct sta_info *psta; struct tx_servq *ptxservq; int priority = pattrib->priority; /* if(pattrib->psta) { psta = pattrib->psta; } else { DBG_871X("%s, call rtw_get_stainfo()\n", __func__); psta=rtw_get_stainfo(&padapter->stapriv ,&pattrib->ra[0]); } */ psta = rtw_get_stainfo(&padapter->stapriv, pattrib->ra); if(pattrib->psta != psta) { DBG_871X("%s, pattrib->psta(%p) != psta(%p)\n", __func__, pattrib->psta, psta); return 0; } if(psta==NULL) { DBG_871X("%s, psta==NUL\n", __func__); return 0; } if(!(psta->state &_FW_LINKED)) { DBG_871X("%s, psta->state(0x%x) != _FW_LINKED\n", __func__, psta->state); return 0; } switch(priority) { case 1: case 2: ptxservq = &(psta->sta_xmitpriv.bk_q); break; case 4: case 5: ptxservq = &(psta->sta_xmitpriv.vi_q); break; case 6: case 7: ptxservq = &(psta->sta_xmitpriv.vo_q); break; case 0: case 3: default: ptxservq = &(psta->sta_xmitpriv.be_q); break; } return ptxservq->qcnt; } #ifdef CONFIG_TDLS int rtw_build_tdls_ies(_adapter * padapter, struct xmit_frame * pxmitframe, u8 *pframe, struct tdls_txmgmt *ptxmgmt) { int res=_SUCCESS; switch(ptxmgmt->action_code){ case TDLS_SETUP_REQUEST: rtw_build_tdls_setup_req_ies(padapter, pxmitframe, pframe, ptxmgmt); break; case TDLS_SETUP_RESPONSE: rtw_build_tdls_setup_rsp_ies(padapter, pxmitframe, pframe, ptxmgmt); break; case TDLS_SETUP_CONFIRM: rtw_build_tdls_setup_cfm_ies(padapter, pxmitframe, pframe, ptxmgmt); break; case TDLS_TEARDOWN: rtw_build_tdls_teardown_ies(padapter, pxmitframe, pframe, ptxmgmt); break; case TDLS_DISCOVERY_REQUEST: rtw_build_tdls_dis_req_ies(padapter, pxmitframe, pframe, ptxmgmt); break; case TDLS_PEER_TRAFFIC_INDICATION: rtw_build_tdls_peer_traffic_indication_ies(padapter, pxmitframe, pframe); break; case TDLS_CHANNEL_SWITCH_REQUEST: rtw_build_tdls_ch_switch_req_ies(padapter, pxmitframe, pframe); break; case TDLS_CHANNEL_SWITCH_RESPONSE: rtw_build_tdls_ch_switch_rsp_ies(padapter, pxmitframe, pframe); break; case TDLS_PEER_TRAFFIC_RESPONSE: rtw_build_tdls_peer_traffic_rsp_ies(padapter, pxmitframe, pframe); break; #ifdef CONFIG_WFD case TUNNELED_PROBE_REQ: rtw_build_tunneled_probe_req_ies(padapter, pxmitframe, pframe); break; case TUNNELED_PROBE_RSP: rtw_build_tunneled_probe_rsp_ies(padapter, pxmitframe, pframe); break; #endif //CONFIG_WFD default: res=_FAIL; break; } return res; } s32 rtw_make_tdls_wlanhdr (_adapter *padapter , u8 *hdr, struct pkt_attrib *pattrib, struct tdls_txmgmt *ptxmgmt) { u16 *qc; struct rtw_ieee80211_hdr *pwlanhdr = (struct rtw_ieee80211_hdr *)hdr; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; struct qos_priv *pqospriv = &pmlmepriv->qospriv; struct sta_priv *pstapriv = &padapter->stapriv; struct sta_info *psta=NULL, *ptdls_sta=NULL; u8 tdls_seq=0, baddr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; sint res = _SUCCESS; u16 *fctrl = &pwlanhdr->frame_ctl; _func_enter_; _rtw_memset(hdr, 0, WLANHDR_OFFSET); SetFrameSubType(fctrl, pattrib->subtype); switch(ptxmgmt->action_code){ case TDLS_SETUP_REQUEST: case TDLS_SETUP_RESPONSE: case TDLS_SETUP_CONFIRM: case TDLS_PEER_TRAFFIC_INDICATION: case TDLS_PEER_PSM_REQUEST: //directly to peer STA or via AP case TUNNELED_PROBE_REQ: case TUNNELED_PROBE_RSP: case TDLS_DISCOVERY_REQUEST: SetToDs(fctrl); _rtw_memcpy(pwlanhdr->addr1, get_bssid(pmlmepriv), ETH_ALEN); _rtw_memcpy(pwlanhdr->addr2, pattrib->src, ETH_ALEN); _rtw_memcpy(pwlanhdr->addr3, pattrib->dst, ETH_ALEN); break; case TDLS_CHANNEL_SWITCH_REQUEST: case TDLS_CHANNEL_SWITCH_RESPONSE: case TDLS_PEER_PSM_RESPONSE: case TDLS_PEER_TRAFFIC_RESPONSE: _rtw_memcpy(pwlanhdr->addr1, pattrib->dst, ETH_ALEN); _rtw_memcpy(pwlanhdr->addr2, pattrib->src, ETH_ALEN); _rtw_memcpy(pwlanhdr->addr3, get_bssid(pmlmepriv), ETH_ALEN); tdls_seq=1; break; case TDLS_TEARDOWN: if(ptxmgmt->status_code == _RSON_TDLS_TEAR_UN_RSN_) { _rtw_memcpy(pwlanhdr->addr1, pattrib->dst, ETH_ALEN); _rtw_memcpy(pwlanhdr->addr2, pattrib->src, ETH_ALEN); _rtw_memcpy(pwlanhdr->addr3, get_bssid(pmlmepriv), ETH_ALEN); tdls_seq=1; } else { SetToDs(fctrl); _rtw_memcpy(pwlanhdr->addr1, get_bssid(pmlmepriv), ETH_ALEN); _rtw_memcpy(pwlanhdr->addr2, pattrib->src, ETH_ALEN); _rtw_memcpy(pwlanhdr->addr3, pattrib->dst, ETH_ALEN); } break; } if (pattrib->encrypt) SetPrivacy(fctrl); if(ptxmgmt->action_code == TDLS_PEER_TRAFFIC_RESPONSE) { SetPwrMgt(fctrl); } if (pqospriv->qos_option) { qc = (unsigned short *)(hdr + pattrib->hdrlen - 2); if (pattrib->priority) SetPriority(qc, pattrib->priority); SetAckpolicy(qc, pattrib->ack_policy); } psta = pattrib->psta; // 1. update seq_num per link by sta_info // 2. rewrite encrypt to _AES_, also rewrite iv_len, icv_len if(tdls_seq==1){ ptdls_sta=rtw_get_stainfo(pstapriv, pattrib->dst); if(ptdls_sta){ ptdls_sta->sta_xmitpriv.txseq_tid[pattrib->priority]++; ptdls_sta->sta_xmitpriv.txseq_tid[pattrib->priority] &= 0xFFF; pattrib->seqnum = ptdls_sta->sta_xmitpriv.txseq_tid[pattrib->priority]; SetSeqNum(hdr, pattrib->seqnum); if (pattrib->encrypt){ pattrib->encrypt= _AES_; pattrib->iv_len=8; pattrib->icv_len=8; pattrib->bswenc = _FALSE; } pattrib->mac_id = ptdls_sta->mac_id; }else{ res=_FAIL; goto exit; } }else if(psta){ psta->sta_xmitpriv.txseq_tid[pattrib->priority]++; psta->sta_xmitpriv.txseq_tid[pattrib->priority] &= 0xFFF; pattrib->seqnum = psta->sta_xmitpriv.txseq_tid[pattrib->priority]; SetSeqNum(hdr, pattrib->seqnum); } exit: _func_exit_; return res; } s32 rtw_xmit_tdls_coalesce(_adapter * padapter, struct xmit_frame * pxmitframe, struct tdls_txmgmt *ptxmgmt) { s32 llc_sz; u8 *pframe, *mem_start; struct sta_info *psta; struct sta_priv *pstapriv = &padapter->stapriv; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; struct pkt_attrib *pattrib = &pxmitframe->attrib; u8 *pbuf_start; s32 bmcst = IS_MCAST(pattrib->ra); s32 res = _SUCCESS; _func_enter_; if (pattrib->psta) { psta = pattrib->psta; } else { if(bmcst) { psta = rtw_get_bcmc_stainfo(padapter); } else { psta = rtw_get_stainfo(&padapter->stapriv, pattrib->ra); } } if(psta==NULL) return _FAIL; if (pxmitframe->buf_addr == NULL) return _FAIL; pbuf_start = pxmitframe->buf_addr; mem_start = pbuf_start + TXDESC_OFFSET; if (rtw_make_tdls_wlanhdr(padapter, mem_start, pattrib, ptxmgmt) == _FAIL) { res = _FAIL; goto exit; } pframe = mem_start; pframe += pattrib->hdrlen; //adding icv, if necessary... if (pattrib->iv_len) { if (psta != NULL) { switch(pattrib->encrypt) { case _WEP40_: case _WEP104_: WEP_IV(pattrib->iv, psta->dot11txpn, pattrib->key_idx); break; case _TKIP_: if(bmcst) TKIP_IV(pattrib->iv, psta->dot11txpn, pattrib->key_idx); else TKIP_IV(pattrib->iv, psta->dot11txpn, 0); break; case _AES_: if(bmcst) AES_IV(pattrib->iv, psta->dot11txpn, pattrib->key_idx); else AES_IV(pattrib->iv, psta->dot11txpn, 0); break; } } _rtw_memcpy(pframe, pattrib->iv, pattrib->iv_len); pframe += pattrib->iv_len; } llc_sz = rtw_put_snap(pframe, pattrib->ether_type); pframe += llc_sz; //pattrib->pktlen will be counted in rtw_build_tdls_ies pattrib->pktlen = 0; rtw_build_tdls_ies(padapter, pxmitframe, pframe, ptxmgmt); if ((pattrib->icv_len >0 )&& (pattrib->bswenc)) { pframe += pattrib->pktlen; _rtw_memcpy(pframe, pattrib->icv, pattrib->icv_len); pframe += pattrib->icv_len; } pattrib->nr_frags = 1; pattrib->last_txcmdsz = pattrib->hdrlen + pattrib->iv_len + llc_sz + ((pattrib->bswenc) ? pattrib->icv_len : 0) + pattrib->pktlen; if (xmitframe_addmic(padapter, pxmitframe) == _FAIL) { goto exit; } xmitframe_swencrypt(padapter, pxmitframe); update_attrib_vcs_info(padapter, pxmitframe); exit: _func_exit_; return res; } #endif //CONFIG_TDLS /* * Calculate wlan 802.11 packet MAX size from pkt_attrib * This function doesn't consider fragment case */ u32 rtw_calculate_wlan_pkt_size_by_attribue(struct pkt_attrib *pattrib) { u32 len = 0; len = pattrib->hdrlen + pattrib->iv_len; // WLAN Header and IV len += SNAP_SIZE + sizeof(u16); // LLC len += pattrib->pktlen; if (pattrib->encrypt == _TKIP_) len += 8; // MIC len += ((pattrib->bswenc) ? pattrib->icv_len : 0); // ICV return len; } /* This sub-routine will perform all the following: 1. remove 802.3 header. 2. create wlan_header, based on the info in pxmitframe 3. append sta's iv/ext-iv 4. append LLC 5. move frag chunk from pframe to pxmitframe->mem 6. apply sw-encrypt, if necessary. */ s32 rtw_xmitframe_coalesce(_adapter *padapter, _pkt *pkt, struct xmit_frame *pxmitframe) { struct pkt_file pktfile; s32 frg_inx, frg_len, mpdu_len, llc_sz, mem_sz; SIZE_PTR addr; u8 *pframe, *mem_start; u8 hw_hdr_offset; //struct sta_info *psta; //struct sta_priv *pstapriv = &padapter->stapriv; //struct mlme_priv *pmlmepriv = &padapter->mlmepriv; struct xmit_priv *pxmitpriv = &padapter->xmitpriv; struct pkt_attrib *pattrib = &pxmitframe->attrib; u8 *pbuf_start; s32 bmcst = IS_MCAST(pattrib->ra); s32 res = _SUCCESS; _func_enter_; /* if (pattrib->psta) { psta = pattrib->psta; } else { DBG_871X("%s, call rtw_get_stainfo()\n", __func__); psta = rtw_get_stainfo(&padapter->stapriv, pattrib->ra); } if(psta==NULL) { DBG_871X("%s, psta==NUL\n", __func__); return _FAIL; } if(!(psta->state &_FW_LINKED)) { DBG_871X("%s, psta->state(0x%x) != _FW_LINKED\n", __func__, psta->state); return _FAIL; } */ if (pxmitframe->buf_addr == NULL){ DBG_8192C("==> %s buf_addr==NULL \n",__FUNCTION__); return _FAIL; } pbuf_start = pxmitframe->buf_addr; #ifdef CONFIG_USB_TX_AGGREGATION hw_hdr_offset = TXDESC_SIZE + (pxmitframe->pkt_offset * PACKET_OFFSET_SZ); #else #ifdef CONFIG_TX_EARLY_MODE //for SDIO && Tx Agg hw_hdr_offset = TXDESC_OFFSET + EARLY_MODE_INFO_SIZE; #else hw_hdr_offset = TXDESC_OFFSET; #endif #endif mem_start = pbuf_start + hw_hdr_offset; if (rtw_make_wlanhdr(padapter, mem_start, pattrib) == _FAIL) { RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("rtw_xmitframe_coalesce: rtw_make_wlanhdr fail; drop pkt\n")); DBG_8192C("rtw_xmitframe_coalesce: rtw_make_wlanhdr fail; drop pkt\n"); res = _FAIL; goto exit; } _rtw_open_pktfile(pkt, &pktfile); _rtw_pktfile_read(&pktfile, NULL, pattrib->pkt_hdrlen); frg_inx = 0; frg_len = pxmitpriv->frag_len - 4;//2346-4 = 2342 while (1) { llc_sz = 0; mpdu_len = frg_len; pframe = mem_start; SetMFrag(mem_start); pframe += pattrib->hdrlen; mpdu_len -= pattrib->hdrlen; //adding icv, if necessary... if (pattrib->iv_len) { /* //if (check_fwstate(pmlmepriv, WIFI_MP_STATE)) // psta = rtw_get_stainfo(pstapriv, get_bssid(pmlmepriv)); //else // psta = rtw_get_stainfo(pstapriv, pattrib->ra); if (psta != NULL) { switch(pattrib->encrypt) { case _WEP40_: case _WEP104_: WEP_IV(pattrib->iv, psta->dot11txpn, pattrib->key_idx); break; case _TKIP_: if(bmcst) TKIP_IV(pattrib->iv, psta->dot11txpn, pattrib->key_idx); else TKIP_IV(pattrib->iv, psta->dot11txpn, 0); break; case _AES_: if(bmcst) AES_IV(pattrib->iv, psta->dot11txpn, pattrib->key_idx); else AES_IV(pattrib->iv, psta->dot11txpn, 0); break; #ifdef CONFIG_WAPI_SUPPORT case _SMS4_: rtw_wapi_get_iv(padapter,pattrib->ra,pattrib->iv); break; #endif } } */ _rtw_memcpy(pframe, pattrib->iv, pattrib->iv_len); RT_TRACE(_module_rtl871x_xmit_c_, _drv_notice_, ("rtw_xmitframe_coalesce: keyid=%d pattrib->iv[3]=%.2x pframe=%.2x %.2x %.2x %.2x\n", padapter->securitypriv.dot11PrivacyKeyIndex, pattrib->iv[3], *pframe, *(pframe+1), *(pframe+2), *(pframe+3))); pframe += pattrib->iv_len; mpdu_len -= pattrib->iv_len; } if (frg_inx == 0) { llc_sz = rtw_put_snap(pframe, pattrib->ether_type); pframe += llc_sz; mpdu_len -= llc_sz; } if ((pattrib->icv_len >0) && (pattrib->bswenc)) { mpdu_len -= pattrib->icv_len; } if (bmcst) { // don't do fragment to broadcat/multicast packets mem_sz = _rtw_pktfile_read(&pktfile, pframe, pattrib->pktlen); } else { mem_sz = _rtw_pktfile_read(&pktfile, pframe, mpdu_len); } pframe += mem_sz; if ((pattrib->icv_len >0 )&& (pattrib->bswenc)) { _rtw_memcpy(pframe, pattrib->icv, pattrib->icv_len); pframe += pattrib->icv_len; } frg_inx++; if (bmcst || (rtw_endofpktfile(&pktfile) == _TRUE)) { pattrib->nr_frags = frg_inx; pattrib->last_txcmdsz = pattrib->hdrlen + pattrib->iv_len + ((pattrib->nr_frags==1)? llc_sz:0) + ((pattrib->bswenc) ? pattrib->icv_len : 0) + mem_sz; ClearMFrag(mem_start); break; } else { RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("%s: There're still something in packet!\n", __FUNCTION__)); } addr = (SIZE_PTR)(pframe); mem_start = (unsigned char *)RND4(addr) + hw_hdr_offset; _rtw_memcpy(mem_start, pbuf_start + hw_hdr_offset, pattrib->hdrlen); } if (xmitframe_addmic(padapter, pxmitframe) == _FAIL) { RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("xmitframe_addmic(padapter, pxmitframe)==_FAIL\n")); DBG_8192C("xmitframe_addmic(padapter, pxmitframe)==_FAIL\n"); res = _FAIL; goto exit; } xmitframe_swencrypt(padapter, pxmitframe); if(bmcst == _FALSE) update_attrib_vcs_info(padapter, pxmitframe); else pattrib->vcs_mode = NONE_VCS; exit: _func_exit_; return res; } #ifdef CONFIG_IEEE80211W //broadcast or multicast management pkt use BIP, unicast management pkt use CCMP encryption s32 rtw_mgmt_xmitframe_coalesce(_adapter *padapter, _pkt *pkt, struct xmit_frame *pxmitframe) { struct pkt_file pktfile; s32 frg_inx, frg_len, mpdu_len, llc_sz, mem_sz; SIZE_PTR addr; u8 *pframe, *mem_start = NULL, *tmp_buf=NULL; u8 hw_hdr_offset, subtype ; struct sta_info *psta = NULL; struct xmit_priv *pxmitpriv = &padapter->xmitpriv; struct pkt_attrib *pattrib = &pxmitframe->attrib; u8 *pbuf_start; s32 bmcst = IS_MCAST(pattrib->ra); s32 res = _FAIL; u8 *BIP_AAD=NULL; u8 *MGMT_body=NULL; struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; struct rtw_ieee80211_hdr *pwlanhdr; u8 MME[_MME_IE_LENGTH_]; _irqL irqL; u32 ori_len; mem_start = pframe = (u8 *)(pxmitframe->buf_addr) + TXDESC_OFFSET; pwlanhdr = (struct rtw_ieee80211_hdr *)pframe; _func_enter_; ori_len = BIP_AAD_SIZE+pattrib->pktlen; tmp_buf = BIP_AAD = rtw_zmalloc(ori_len); subtype = GetFrameSubType(pframe); //bit(7)~bit(2) if(BIP_AAD == NULL) return _FAIL; _enter_critical_bh(&padapter->security_key_mutex, &irqL); //only support station mode if(!check_fwstate(pmlmepriv, WIFI_STATION_STATE) || !check_fwstate(pmlmepriv, _FW_LINKED)) goto xmitframe_coalesce_success; //IGTK key is not install, it may not support 802.11w if(padapter->securitypriv.binstallBIPkey != _TRUE) { DBG_871X("no instll BIP key\n"); goto xmitframe_coalesce_success; } //station mode doesn't need TX BIP, just ready the code if(bmcst) { int frame_body_len; u8 mic[16]; _rtw_memset(MME, 0, _MME_IE_LENGTH_); //other types doesn't need the BIP if(GetFrameSubType(pframe) != WIFI_DEAUTH && GetFrameSubType(pframe) != WIFI_DISASSOC) goto xmitframe_coalesce_fail; MGMT_body = pframe + sizeof(struct rtw_ieee80211_hdr_3addr); pframe += pattrib->pktlen; //octent 0 and 1 is key index ,BIP keyid is 4 or 5, LSB only need octent 0 MME[0]=padapter->securitypriv.dot11wBIPKeyid; //copy packet number _rtw_memcpy(&MME[2], &pmlmeext->mgnt_80211w_IPN, 6); //increase the packet number pmlmeext->mgnt_80211w_IPN++; //add MME IE with MIC all zero, MME string doesn't include element id and length pframe = rtw_set_ie(pframe, _MME_IE_ , 16 , MME, &(pattrib->pktlen)); pattrib->last_txcmdsz = pattrib->pktlen; // total frame length - header length frame_body_len = pattrib->pktlen - sizeof(struct rtw_ieee80211_hdr_3addr); //conscruct AAD, copy frame control field _rtw_memcpy(BIP_AAD, &pwlanhdr->frame_ctl, 2); ClearRetry(BIP_AAD); ClearPwrMgt(BIP_AAD); ClearMData(BIP_AAD); //conscruct AAD, copy address 1 to address 3 _rtw_memcpy(BIP_AAD+2, pwlanhdr->addr1, 18); //copy management fram body _rtw_memcpy(BIP_AAD+BIP_AAD_SIZE, MGMT_body, frame_body_len); /*//dump total packet include MME with zero MIC { int i; printk("Total packet: "); for(i=0; i < BIP_AAD_SIZE+frame_body_len; i++) printk(" %02x ", BIP_AAD[i]); printk("\n"); }*/ //calculate mic if(omac1_aes_128(padapter->securitypriv.dot11wBIPKey[padapter->securitypriv.dot11wBIPKeyid].skey , BIP_AAD, BIP_AAD_SIZE+frame_body_len, mic)) goto xmitframe_coalesce_fail; /*//dump calculated mic result { int i; printk("Calculated mic result: "); for(i=0; i<16; i++) printk(" %02x ", mic[i]); printk("\n"); }*/ //copy right BIP mic value, total is 128bits, we use the 0~63 bits _rtw_memcpy(pframe-8, mic, 8); /*/dump all packet after mic ok { int pp; printk("pattrib->pktlen = %d \n", pattrib->pktlen); for(pp=0;pp< pattrib->pktlen; pp++) printk(" %02x ", mem_start[pp]); printk("\n"); }*/ } else //unicast mgmt frame TX { //start to encrypt mgmt frame if(subtype == WIFI_DEAUTH || subtype == WIFI_DISASSOC || subtype == WIFI_REASSOCREQ || subtype == WIFI_ACTION) { if (pattrib->psta) psta = pattrib->psta; else { psta = rtw_get_stainfo(&padapter->stapriv, pattrib->ra); } if(psta==NULL) { DBG_871X("%s, psta==NUL\n", __func__); goto xmitframe_coalesce_fail; } if(!(psta->state & _FW_LINKED) || pxmitframe->buf_addr==NULL) { DBG_871X("%s, not _FW_LINKED or addr null\n", __func__); goto xmitframe_coalesce_fail; } //DBG_871X("%s, action frame category=%d \n", __func__, pframe[WLAN_HDR_A3_LEN]); //according 802.11-2012 standard, these five types are not robust types if(subtype == WIFI_ACTION && (pframe[WLAN_HDR_A3_LEN] == RTW_WLAN_CATEGORY_PUBLIC || pframe[WLAN_HDR_A3_LEN] == RTW_WLAN_CATEGORY_HT || pframe[WLAN_HDR_A3_LEN] == RTW_WLAN_CATEGORY_UNPROTECTED_WNM || pframe[WLAN_HDR_A3_LEN] == RTW_WLAN_CATEGORY_SELF_PROTECTED || pframe[WLAN_HDR_A3_LEN] == RTW_WLAN_CATEGORY_P2P)) goto xmitframe_coalesce_fail; //before encrypt dump the management packet content /*{ int i; printk("Management pkt: "); for(i=0; i<pattrib->pktlen; i++) printk(" %02x ", pframe[i]); printk("=======\n"); }*/ if(pattrib->encrypt>0) _rtw_memcpy(pattrib->dot118021x_UncstKey.skey, psta->dot118021x_UncstKey.skey, 16); //bakeup original management packet _rtw_memcpy(tmp_buf, pframe, pattrib->pktlen); //move to data portion pframe += pattrib->hdrlen; //802.11w unicast management packet must be _AES_ pattrib->iv_len = 8; //it's MIC of AES pattrib->icv_len = 8; switch(pattrib->encrypt) { case _AES_: //set AES IV header AES_IV(pattrib->iv, psta->dot11wtxpn, 0); break; default: goto xmitframe_coalesce_fail; } //insert iv header into management frame _rtw_memcpy(pframe, pattrib->iv, pattrib->iv_len); pframe += pattrib->iv_len; //copy mgmt data portion after CCMP header _rtw_memcpy(pframe, tmp_buf+pattrib->hdrlen, pattrib->pktlen-pattrib->hdrlen); //move pframe to end of mgmt pkt pframe += pattrib->pktlen-pattrib->hdrlen; //add 8 bytes CCMP IV header to length pattrib->pktlen += pattrib->iv_len; /*//dump management packet include AES IV header { int i; printk("Management pkt + IV: "); //for(i=0; i<pattrib->pktlen; i++) //printk(" %02x ", mem_start[i]); printk("@@@@@@@@@@@@@\n"); }*/ if ((pattrib->icv_len >0 )&& (pattrib->bswenc)) { _rtw_memcpy(pframe, pattrib->icv, pattrib->icv_len); pframe += pattrib->icv_len; } //add 8 bytes MIC pattrib->pktlen += pattrib->icv_len; //set final tx command size pattrib->last_txcmdsz = pattrib->pktlen; //set protected bit must be beofre SW encrypt SetPrivacy(mem_start); /*//dump management packet include AES header { int i; printk("prepare to enc Management pkt + IV: "); for(i=0; i<pattrib->pktlen; i++) printk(" %02x ", mem_start[i]); printk("@@@@@@@@@@@@@\n"); }*/ //software encrypt xmitframe_swencrypt(padapter, pxmitframe); } } xmitframe_coalesce_success: _exit_critical_bh(&padapter->security_key_mutex, &irqL); rtw_mfree(BIP_AAD, ori_len); _func_exit_; return _SUCCESS; xmitframe_coalesce_fail: _exit_critical_bh(&padapter->security_key_mutex, &irqL); rtw_mfree(BIP_AAD, ori_len); _func_exit_; return _FAIL; } #endif //CONFIG_IEEE80211W /* Logical Link Control(LLC) SubNetwork Attachment Point(SNAP) header * IEEE LLC/SNAP header contains 8 octets * First 3 octets comprise the LLC portion * SNAP portion, 5 octets, is divided into two fields: * Organizationally Unique Identifier(OUI), 3 octets, * type, defined by that organization, 2 octets. */ s32 rtw_put_snap(u8 *data, u16 h_proto) { struct ieee80211_snap_hdr *snap; u8 *oui; _func_enter_; snap = (struct ieee80211_snap_hdr *)data; snap->dsap = 0xaa; snap->ssap = 0xaa; snap->ctrl = 0x03; if (h_proto == 0x8137 || h_proto == 0x80f3) oui = P802_1H_OUI; else oui = RFC1042_OUI; snap->oui[0] = oui[0]; snap->oui[1] = oui[1]; snap->oui[2] = oui[2]; *(u16 *)(data + SNAP_SIZE) = htons(h_proto); _func_exit_; return SNAP_SIZE + sizeof(u16); } void rtw_update_protection(_adapter *padapter, u8 *ie, uint ie_len) { uint protection; u8 *perp; sint erp_len; struct xmit_priv *pxmitpriv = &padapter->xmitpriv; struct registry_priv *pregistrypriv = &padapter->registrypriv; _func_enter_; switch(pxmitpriv->vcs_setting) { case DISABLE_VCS: pxmitpriv->vcs = NONE_VCS; break; case ENABLE_VCS: break; case AUTO_VCS: default: perp = rtw_get_ie(ie, _ERPINFO_IE_, &erp_len, ie_len); if(perp == NULL) { pxmitpriv->vcs = NONE_VCS; } else { protection = (*(perp + 2)) & BIT(1); if (protection) { if(pregistrypriv->vcs_type == RTS_CTS) pxmitpriv->vcs = RTS_CTS; else pxmitpriv->vcs = CTS_TO_SELF; } else pxmitpriv->vcs = NONE_VCS; } break; } _func_exit_; } void rtw_count_tx_stats(PADAPTER padapter, struct xmit_frame *pxmitframe, int sz) { struct sta_info *psta = NULL; struct stainfo_stats *pstats = NULL; struct xmit_priv *pxmitpriv = &padapter->xmitpriv; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; u8 pkt_num = 1; if ((pxmitframe->frame_tag&0x0f) == DATA_FRAMETAG) { #if defined(CONFIG_USB_TX_AGGREGATION) || defined(CONFIG_SDIO_HCI) || defined(CONFIG_GSPI_HCI) pkt_num = pxmitframe->agg_num; #endif pmlmepriv->LinkDetectInfo.NumTxOkInPeriod += pkt_num; pxmitpriv->tx_pkts += pkt_num; pxmitpriv->tx_bytes += sz; psta = pxmitframe->attrib.psta; if (psta) { pstats = &psta->sta_stats; pstats->tx_pkts += pkt_num; pstats->tx_bytes += sz; #ifdef CONFIG_TDLS if(pxmitframe->attrib.ptdls_sta != NULL) { pstats = &(pxmitframe->attrib.ptdls_sta->sta_stats); pstats->tx_pkts += pkt_num; pstats->tx_bytes += sz; } #endif //CONFIG_TDLS } #ifdef CONFIG_CHECK_LEAVE_LPS //traffic_check_for_leave_lps(padapter, _TRUE); #endif //CONFIG_LPS } } static struct xmit_buf *__rtw_alloc_cmd_xmitbuf(struct xmit_priv *pxmitpriv, enum cmdbuf_type buf_type) { struct xmit_buf *pxmitbuf = NULL; _func_enter_; pxmitbuf = &pxmitpriv->pcmd_xmitbuf[buf_type]; if (pxmitbuf != NULL) { pxmitbuf->priv_data = NULL; #if defined(CONFIG_SDIO_HCI) || defined(CONFIG_GSPI_HCI) pxmitbuf->len = 0; pxmitbuf->pdata = pxmitbuf->ptail = pxmitbuf->phead; pxmitbuf->agg_num = 0; pxmitbuf->pg_num = 0; #endif #ifdef CONFIG_PCI_HCI pxmitbuf->len = 0; pxmitbuf->desc = NULL; #endif if (pxmitbuf->sctx) { DBG_871X("%s pxmitbuf->sctx is not NULL\n", __func__); rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_BUF_ALLOC); } } else { DBG_871X("%s fail, no xmitbuf available !!!\n", __func__); } exit: _func_exit_; return pxmitbuf; } struct xmit_frame *__rtw_alloc_cmdxmitframe(struct xmit_priv *pxmitpriv, enum cmdbuf_type buf_type) { struct xmit_frame *pcmdframe; struct xmit_buf *pxmitbuf; if ((pcmdframe = rtw_alloc_xmitframe(pxmitpriv)) == NULL) { DBG_871X("%s, alloc xmitframe fail\n", __FUNCTION__); return NULL; } if ((pxmitbuf = __rtw_alloc_cmd_xmitbuf(pxmitpriv, buf_type)) == NULL) { DBG_871X("%s, alloc xmitbuf fail\n", __FUNCTION__); rtw_free_xmitframe(pxmitpriv, pcmdframe); return NULL; } pcmdframe->frame_tag = MGNT_FRAMETAG; pcmdframe->pxmitbuf = pxmitbuf; pcmdframe->buf_addr = pxmitbuf->pbuf; pxmitbuf->priv_data = pcmdframe; return pcmdframe; } struct xmit_buf *rtw_alloc_xmitbuf_ext(struct xmit_priv *pxmitpriv) { _irqL irqL; struct xmit_buf *pxmitbuf = NULL; _list *plist, *phead; _queue *pfree_queue = &pxmitpriv->free_xmit_extbuf_queue; _func_enter_; _enter_critical(&pfree_queue->lock, &irqL); if(_rtw_queue_empty(pfree_queue) == _TRUE) { pxmitbuf = NULL; } else { phead = get_list_head(pfree_queue); plist = get_next(phead); pxmitbuf = LIST_CONTAINOR(plist, struct xmit_buf, list); rtw_list_delete(&(pxmitbuf->list)); } if (pxmitbuf != NULL) { pxmitpriv->free_xmit_extbuf_cnt--; #ifdef DBG_XMIT_BUF_EXT DBG_871X("DBG_XMIT_BUF_EXT ALLOC no=%d, free_xmit_extbuf_cnt=%d\n",pxmitbuf->no, pxmitpriv->free_xmit_extbuf_cnt); #endif pxmitbuf->priv_data = NULL; #if defined(CONFIG_SDIO_HCI) || defined(CONFIG_GSPI_HCI) pxmitbuf->len = 0; pxmitbuf->pdata = pxmitbuf->ptail = pxmitbuf->phead; pxmitbuf->agg_num = 1; #endif #ifdef CONFIG_PCI_HCI pxmitbuf->len = 0; pxmitbuf->desc = NULL; #endif if (pxmitbuf->sctx) { DBG_871X("%s pxmitbuf->sctx is not NULL\n", __func__); rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_BUF_ALLOC); } } _exit_critical(&pfree_queue->lock, &irqL); _func_exit_; return pxmitbuf; } s32 rtw_free_xmitbuf_ext(struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf) { _irqL irqL; _queue *pfree_queue = &pxmitpriv->free_xmit_extbuf_queue; _func_enter_; if(pxmitbuf==NULL) { return _FAIL; } _enter_critical(&pfree_queue->lock, &irqL); rtw_list_delete(&pxmitbuf->list); rtw_list_insert_tail(&(pxmitbuf->list), get_list_head(pfree_queue)); pxmitpriv->free_xmit_extbuf_cnt++; #ifdef DBG_XMIT_BUF_EXT DBG_871X("DBG_XMIT_BUF_EXT FREE no=%d, free_xmit_extbuf_cnt=%d\n",pxmitbuf->no ,pxmitpriv->free_xmit_extbuf_cnt); #endif _exit_critical(&pfree_queue->lock, &irqL); _func_exit_; return _SUCCESS; } struct xmit_buf *rtw_alloc_xmitbuf(struct xmit_priv *pxmitpriv) { _irqL irqL; struct xmit_buf *pxmitbuf = NULL; _list *plist, *phead; _queue *pfree_xmitbuf_queue = &pxmitpriv->free_xmitbuf_queue; _func_enter_; //DBG_871X("+rtw_alloc_xmitbuf\n"); _enter_critical(&pfree_xmitbuf_queue->lock, &irqL); if(_rtw_queue_empty(pfree_xmitbuf_queue) == _TRUE) { pxmitbuf = NULL; } else { phead = get_list_head(pfree_xmitbuf_queue); plist = get_next(phead); pxmitbuf = LIST_CONTAINOR(plist, struct xmit_buf, list); rtw_list_delete(&(pxmitbuf->list)); } if (pxmitbuf != NULL) { pxmitpriv->free_xmitbuf_cnt--; #ifdef DBG_XMIT_BUF DBG_871X("DBG_XMIT_BUF ALLOC no=%d, free_xmitbuf_cnt=%d\n",pxmitbuf->no, pxmitpriv->free_xmitbuf_cnt); #endif //DBG_871X("alloc, free_xmitbuf_cnt=%d\n", pxmitpriv->free_xmitbuf_cnt); pxmitbuf->priv_data = NULL; #if defined(CONFIG_SDIO_HCI) || defined(CONFIG_GSPI_HCI) pxmitbuf->len = 0; pxmitbuf->pdata = pxmitbuf->ptail = pxmitbuf->phead; pxmitbuf->agg_num = 0; pxmitbuf->pg_num = 0; #endif #ifdef CONFIG_PCI_HCI pxmitbuf->len = 0; pxmitbuf->desc = NULL; #endif if (pxmitbuf->sctx) { DBG_871X("%s pxmitbuf->sctx is not NULL\n", __func__); rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_BUF_ALLOC); } } #ifdef DBG_XMIT_BUF else { DBG_871X("DBG_XMIT_BUF rtw_alloc_xmitbuf return NULL\n"); } #endif _exit_critical(&pfree_xmitbuf_queue->lock, &irqL); _func_exit_; return pxmitbuf; } s32 rtw_free_xmitbuf(struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf) { _irqL irqL; _queue *pfree_xmitbuf_queue = &pxmitpriv->free_xmitbuf_queue; _func_enter_; //DBG_871X("+rtw_free_xmitbuf\n"); if(pxmitbuf==NULL) { return _FAIL; } if (pxmitbuf->sctx) { DBG_871X("%s pxmitbuf->sctx is not NULL\n", __func__); rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_BUF_FREE); } if(pxmitbuf->buf_tag == XMITBUF_CMD) { } else if(pxmitbuf->buf_tag == XMITBUF_MGNT) { rtw_free_xmitbuf_ext(pxmitpriv, pxmitbuf); } else { _enter_critical(&pfree_xmitbuf_queue->lock, &irqL); rtw_list_delete(&pxmitbuf->list); rtw_list_insert_tail(&(pxmitbuf->list), get_list_head(pfree_xmitbuf_queue)); pxmitpriv->free_xmitbuf_cnt++; //DBG_871X("FREE, free_xmitbuf_cnt=%d\n", pxmitpriv->free_xmitbuf_cnt); #ifdef DBG_XMIT_BUF DBG_871X("DBG_XMIT_BUF FREE no=%d, free_xmitbuf_cnt=%d\n",pxmitbuf->no ,pxmitpriv->free_xmitbuf_cnt); #endif _exit_critical(&pfree_xmitbuf_queue->lock, &irqL); } _func_exit_; return _SUCCESS; } void rtw_init_xmitframe(struct xmit_frame *pxframe) { if (pxframe != NULL)//default value setting { pxframe->buf_addr = NULL; pxframe->pxmitbuf = NULL; _rtw_memset(&pxframe->attrib, 0, sizeof(struct pkt_attrib)); //pxframe->attrib.psta = NULL; pxframe->frame_tag = DATA_FRAMETAG; #ifdef CONFIG_USB_HCI pxframe->pkt = NULL; #ifdef USB_PACKET_OFFSET_SZ pxframe->pkt_offset = (PACKET_OFFSET_SZ/8); #else pxframe->pkt_offset = 1;//default use pkt_offset to fill tx desc #endif #ifdef CONFIG_USB_TX_AGGREGATION pxframe->agg_num = 1; #endif #endif //#ifdef CONFIG_USB_HCI #if defined(CONFIG_SDIO_HCI) || defined(CONFIG_GSPI_HCI) pxframe->pg_num = 1; pxframe->agg_num = 1; #endif #ifdef CONFIG_XMIT_ACK pxframe->ack_report = 0; #endif } } /* Calling context: 1. OS_TXENTRY 2. RXENTRY (rx_thread or RX_ISR/RX_CallBack) If we turn on USE_RXTHREAD, then, no need for critical section. Otherwise, we must use _enter/_exit critical to protect free_xmit_queue... Must be very very cautious... */ struct xmit_frame *rtw_alloc_xmitframe(struct xmit_priv *pxmitpriv)//(_queue *pfree_xmit_queue) { /* Please remember to use all the osdep_service api, and lock/unlock or _enter/_exit critical to protect pfree_xmit_queue */ _irqL irqL; struct xmit_frame *pxframe = NULL; _list *plist, *phead; _queue *pfree_xmit_queue = &pxmitpriv->free_xmit_queue; _func_enter_; _enter_critical_bh(&pfree_xmit_queue->lock, &irqL); if (_rtw_queue_empty(pfree_xmit_queue) == _TRUE) { RT_TRACE(_module_rtl871x_xmit_c_,_drv_info_,("rtw_alloc_xmitframe:%d\n", pxmitpriv->free_xmitframe_cnt)); pxframe = NULL; } else { phead = get_list_head(pfree_xmit_queue); plist = get_next(phead); pxframe = LIST_CONTAINOR(plist, struct xmit_frame, list); rtw_list_delete(&(pxframe->list)); pxmitpriv->free_xmitframe_cnt--; RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_alloc_xmitframe():free_xmitframe_cnt=%d\n", pxmitpriv->free_xmitframe_cnt)); } _exit_critical_bh(&pfree_xmit_queue->lock, &irqL); rtw_init_xmitframe(pxframe); _func_exit_; return pxframe; } struct xmit_frame *rtw_alloc_xmitframe_ext(struct xmit_priv *pxmitpriv) { _irqL irqL; struct xmit_frame *pxframe = NULL; _list *plist, *phead; _queue *queue = &pxmitpriv->free_xframe_ext_queue; _func_enter_; _enter_critical_bh(&queue->lock, &irqL); if (_rtw_queue_empty(queue) == _TRUE) { RT_TRACE(_module_rtl871x_xmit_c_,_drv_info_,("rtw_alloc_xmitframe_ext:%d\n", pxmitpriv->free_xframe_ext_cnt)); pxframe = NULL; } else { phead = get_list_head(queue); plist = get_next(phead); pxframe = LIST_CONTAINOR(plist, struct xmit_frame, list); rtw_list_delete(&(pxframe->list)); pxmitpriv->free_xframe_ext_cnt--; RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_alloc_xmitframe_ext():free_xmitframe_cnt=%d\n", pxmitpriv->free_xframe_ext_cnt)); } _exit_critical_bh(&queue->lock, &irqL); rtw_init_xmitframe(pxframe); _func_exit_; return pxframe; } struct xmit_frame *rtw_alloc_xmitframe_once(struct xmit_priv *pxmitpriv) { struct xmit_frame *pxframe = NULL; u8 *alloc_addr; alloc_addr = rtw_zmalloc(sizeof(struct xmit_frame) + 4); if (alloc_addr == NULL) goto exit; pxframe = (struct xmit_frame *)N_BYTE_ALIGMENT((SIZE_PTR)(alloc_addr), 4); pxframe->alloc_addr = alloc_addr; pxframe->padapter = pxmitpriv->adapter; pxframe->frame_tag = NULL_FRAMETAG; pxframe->pkt = NULL; pxframe->buf_addr = NULL; pxframe->pxmitbuf = NULL; rtw_init_xmitframe(pxframe); DBG_871X("################## %s ##################\n", __func__); exit: return pxframe; } s32 rtw_free_xmitframe(struct xmit_priv *pxmitpriv, struct xmit_frame *pxmitframe) { _irqL irqL; _queue *queue = NULL; _adapter *padapter = pxmitpriv->adapter; _pkt *pndis_pkt = NULL; _func_enter_; if (pxmitframe == NULL) { RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("======rtw_free_xmitframe():pxmitframe==NULL!!!!!!!!!!\n")); goto exit; } if (pxmitframe->pkt){ pndis_pkt = pxmitframe->pkt; pxmitframe->pkt = NULL; } if (pxmitframe->alloc_addr) { DBG_871X("################## %s with alloc_addr ##################\n", __func__); rtw_mfree(pxmitframe->alloc_addr, sizeof(struct xmit_frame) + 4); goto check_pkt_complete; } if (pxmitframe->ext_tag == 0) queue = &pxmitpriv->free_xmit_queue; else if(pxmitframe->ext_tag == 1) queue = &pxmitpriv->free_xframe_ext_queue; else {} _enter_critical_bh(&queue->lock, &irqL); rtw_list_delete(&pxmitframe->list); rtw_list_insert_tail(&pxmitframe->list, get_list_head(queue)); if (pxmitframe->ext_tag == 0) { pxmitpriv->free_xmitframe_cnt++; RT_TRACE(_module_rtl871x_xmit_c_, _drv_debug_, ("rtw_free_xmitframe():free_xmitframe_cnt=%d\n", pxmitpriv->free_xmitframe_cnt)); } else if(pxmitframe->ext_tag == 1) { pxmitpriv->free_xframe_ext_cnt++; RT_TRACE(_module_rtl871x_xmit_c_, _drv_debug_, ("rtw_free_xmitframe():free_xframe_ext_cnt=%d\n", pxmitpriv->free_xframe_ext_cnt)); } else { } _exit_critical_bh(&queue->lock, &irqL); check_pkt_complete: if(pndis_pkt) rtw_os_pkt_complete(padapter, pndis_pkt); exit: _func_exit_; return _SUCCESS; } void rtw_free_xmitframe_queue(struct xmit_priv *pxmitpriv, _queue *pframequeue) { _irqL irqL; _list *plist, *phead; struct xmit_frame *pxmitframe; _func_enter_; _enter_critical_bh(&(pframequeue->lock), &irqL); phead = get_list_head(pframequeue); plist = get_next(phead); while (rtw_end_of_queue_search(phead, plist) == _FALSE) { pxmitframe = LIST_CONTAINOR(plist, struct xmit_frame, list); plist = get_next(plist); rtw_free_xmitframe(pxmitpriv,pxmitframe); } _exit_critical_bh(&(pframequeue->lock), &irqL); _func_exit_; } s32 rtw_xmitframe_enqueue(_adapter *padapter, struct xmit_frame *pxmitframe) { DBG_COUNTER(padapter->tx_logs.core_tx_enqueue); if (rtw_xmit_classifier(padapter, pxmitframe) == _FAIL) { RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("rtw_xmitframe_enqueue: drop xmit pkt for classifier fail\n")); // pxmitframe->pkt = NULL; return _FAIL; } return _SUCCESS; } static struct xmit_frame *dequeue_one_xmitframe(struct xmit_priv *pxmitpriv, struct hw_xmit *phwxmit, struct tx_servq *ptxservq, _queue *pframe_queue) { _list *xmitframe_plist, *xmitframe_phead; struct xmit_frame *pxmitframe=NULL; xmitframe_phead = get_list_head(pframe_queue); xmitframe_plist = get_next(xmitframe_phead); while ((rtw_end_of_queue_search(xmitframe_phead, xmitframe_plist)) == _FALSE) { pxmitframe = LIST_CONTAINOR(xmitframe_plist, struct xmit_frame, list); xmitframe_plist = get_next(xmitframe_plist); /*#ifdef RTK_DMP_PLATFORM #ifdef CONFIG_USB_TX_AGGREGATION if((ptxservq->qcnt>0) && (ptxservq->qcnt<=2)) { pxmitframe = NULL; tasklet_schedule(&pxmitpriv->xmit_tasklet); break; } #endif #endif*/ rtw_list_delete(&pxmitframe->list); ptxservq->qcnt--; //rtw_list_insert_tail(&pxmitframe->list, &phwxmit->pending); //ptxservq->qcnt--; break; pxmitframe = NULL; } return pxmitframe; } struct xmit_frame* rtw_dequeue_xframe(struct xmit_priv *pxmitpriv, struct hw_xmit *phwxmit_i, sint entry) { _irqL irqL0; _list *sta_plist, *sta_phead; struct hw_xmit *phwxmit; struct tx_servq *ptxservq = NULL; _queue *pframe_queue = NULL; struct xmit_frame *pxmitframe = NULL; _adapter *padapter = pxmitpriv->adapter; struct registry_priv *pregpriv = &padapter->registrypriv; int i, inx[4]; #ifdef CONFIG_USB_HCI // int j, tmp, acirp_cnt[4]; #endif _func_enter_; inx[0] = 0; inx[1] = 1; inx[2] = 2; inx[3] = 3; if(pregpriv->wifi_spec==1) { int j, tmp, acirp_cnt[4]; #if 0 if(flags<XMIT_QUEUE_ENTRY) { //priority exchange according to the completed xmitbuf flags. inx[flags] = 0; inx[0] = flags; } #endif #if defined(CONFIG_USB_HCI) || defined(CONFIG_SDIO_HCI) || defined(CONFIG_PCI_HCI) for(j=0; j<4; j++) inx[j] = pxmitpriv->wmm_para_seq[j]; #endif } _enter_critical_bh(&pxmitpriv->lock, &irqL0); for(i = 0; i < entry; i++) { phwxmit = phwxmit_i + inx[i]; //_enter_critical_ex(&phwxmit->sta_queue->lock, &irqL0); sta_phead = get_list_head(phwxmit->sta_queue); sta_plist = get_next(sta_phead); while ((rtw_end_of_queue_search(sta_phead, sta_plist)) == _FALSE) { ptxservq= LIST_CONTAINOR(sta_plist, struct tx_servq, tx_pending); pframe_queue = &ptxservq->sta_pending; pxmitframe = dequeue_one_xmitframe(pxmitpriv, phwxmit, ptxservq, pframe_queue); if(pxmitframe) { phwxmit->accnt--; //Remove sta node when there is no pending packets. if(_rtw_queue_empty(pframe_queue)) //must be done after get_next and before break rtw_list_delete(&ptxservq->tx_pending); //_exit_critical_ex(&phwxmit->sta_queue->lock, &irqL0); goto exit; } sta_plist = get_next(sta_plist); } //_exit_critical_ex(&phwxmit->sta_queue->lock, &irqL0); } exit: _exit_critical_bh(&pxmitpriv->lock, &irqL0); _func_exit_; return pxmitframe; } #if 1 struct tx_servq *rtw_get_sta_pending(_adapter *padapter, struct sta_info *psta, sint up, u8 *ac) { struct tx_servq *ptxservq=NULL; _func_enter_; switch (up) { case 1: case 2: ptxservq = &(psta->sta_xmitpriv.bk_q); *(ac) = 3; RT_TRACE(_module_rtl871x_xmit_c_,_drv_info_,("rtw_get_sta_pending : BK \n")); break; case 4: case 5: ptxservq = &(psta->sta_xmitpriv.vi_q); *(ac) = 1; RT_TRACE(_module_rtl871x_xmit_c_,_drv_info_,("rtw_get_sta_pending : VI\n")); break; case 6: case 7: ptxservq = &(psta->sta_xmitpriv.vo_q); *(ac) = 0; RT_TRACE(_module_rtl871x_xmit_c_,_drv_info_,("rtw_get_sta_pending : VO \n")); break; case 0: case 3: default: ptxservq = &(psta->sta_xmitpriv.be_q); *(ac) = 2; RT_TRACE(_module_rtl871x_xmit_c_,_drv_info_,("rtw_get_sta_pending : BE \n")); break; } _func_exit_; return ptxservq; } #else __inline static struct tx_servq *rtw_get_sta_pending (_adapter *padapter, _queue **ppstapending, struct sta_info *psta, sint up) { struct tx_servq *ptxservq; struct hw_xmit *phwxmits = padapter->xmitpriv.hwxmits; _func_enter_; #ifdef CONFIG_RTL8711 if(IS_MCAST(psta->hwaddr)) { ptxservq = &(psta->sta_xmitpriv.be_q); // we will use be_q to queue bc/mc frames in BCMC_stainfo *ppstapending = &padapter->xmitpriv.bm_pending; } else #endif { switch (up) { case 1: case 2: ptxservq = &(psta->sta_xmitpriv.bk_q); *ppstapending = &padapter->xmitpriv.bk_pending; (phwxmits+3)->accnt++; RT_TRACE(_module_rtl871x_xmit_c_,_drv_info_,("rtw_get_sta_pending : BK \n")); break; case 4: case 5: ptxservq = &(psta->sta_xmitpriv.vi_q); *ppstapending = &padapter->xmitpriv.vi_pending; (phwxmits+1)->accnt++; RT_TRACE(_module_rtl871x_xmit_c_,_drv_info_,("rtw_get_sta_pending : VI\n")); break; case 6: case 7: ptxservq = &(psta->sta_xmitpriv.vo_q); *ppstapending = &padapter->xmitpriv.vo_pending; (phwxmits+0)->accnt++; RT_TRACE(_module_rtl871x_xmit_c_,_drv_info_,("rtw_get_sta_pending : VO \n")); break; case 0: case 3: default: ptxservq = &(psta->sta_xmitpriv.be_q); *ppstapending = &padapter->xmitpriv.be_pending; (phwxmits+2)->accnt++; RT_TRACE(_module_rtl871x_xmit_c_,_drv_info_,("rtw_get_sta_pending : BE \n")); break; } } _func_exit_; return ptxservq; } #endif /* * Will enqueue pxmitframe to the proper queue, * and indicate it to xx_pending list..... */ s32 rtw_xmit_classifier(_adapter *padapter, struct xmit_frame *pxmitframe) { //_irqL irqL0; u8 ac_index; struct sta_info *psta; struct tx_servq *ptxservq; struct pkt_attrib *pattrib = &pxmitframe->attrib; struct sta_priv *pstapriv = &padapter->stapriv; struct hw_xmit *phwxmits = padapter->xmitpriv.hwxmits; sint res = _SUCCESS; _func_enter_; DBG_COUNTER(padapter->tx_logs.core_tx_enqueue_class); /* if (pattrib->psta) { psta = pattrib->psta; } else { DBG_871X("%s, call rtw_get_stainfo()\n", __func__); psta = rtw_get_stainfo(pstapriv, pattrib->ra); } */ psta = rtw_get_stainfo(&padapter->stapriv, pattrib->ra); if(pattrib->psta != psta) { DBG_COUNTER(padapter->tx_logs.core_tx_enqueue_class_err_sta); DBG_871X("%s, pattrib->psta(%p) != psta(%p)\n", __func__, pattrib->psta, psta); return _FAIL; } if (psta == NULL) { DBG_COUNTER(padapter->tx_logs.core_tx_enqueue_class_err_nosta); res = _FAIL; DBG_8192C("rtw_xmit_classifier: psta == NULL\n"); RT_TRACE(_module_rtl871x_xmit_c_,_drv_err_,("rtw_xmit_classifier: psta == NULL\n")); goto exit; } if(!(psta->state &_FW_LINKED)) { DBG_COUNTER(padapter->tx_logs.core_tx_enqueue_class_err_fwlink); DBG_871X("%s, psta->state(0x%x) != _FW_LINKED\n", __func__, psta->state); return _FAIL; } ptxservq = rtw_get_sta_pending(padapter, psta, pattrib->priority, (u8 *)(&ac_index)); //_enter_critical(&pstapending->lock, &irqL0); if (rtw_is_list_empty(&ptxservq->tx_pending)) { rtw_list_insert_tail(&ptxservq->tx_pending, get_list_head(phwxmits[ac_index].sta_queue)); } //_enter_critical(&ptxservq->sta_pending.lock, &irqL1); rtw_list_insert_tail(&pxmitframe->list, get_list_head(&ptxservq->sta_pending)); ptxservq->qcnt++; phwxmits[ac_index].accnt++; //_exit_critical(&ptxservq->sta_pending.lock, &irqL1); //_exit_critical(&pstapending->lock, &irqL0); exit: _func_exit_; return res; } void rtw_alloc_hwxmits(_adapter *padapter) { struct hw_xmit *hwxmits; struct xmit_priv *pxmitpriv = &padapter->xmitpriv; pxmitpriv->hwxmit_entry = HWXMIT_ENTRY; pxmitpriv->hwxmits = NULL; pxmitpriv->hwxmits = (struct hw_xmit *)rtw_zmalloc(sizeof (struct hw_xmit) * pxmitpriv->hwxmit_entry); if(pxmitpriv->hwxmits == NULL) { DBG_871X("alloc hwxmits fail!...\n"); return; } hwxmits = pxmitpriv->hwxmits; if(pxmitpriv->hwxmit_entry == 5) { //pxmitpriv->bmc_txqueue.head = 0; //hwxmits[0] .phwtxqueue = &pxmitpriv->bmc_txqueue; hwxmits[0] .sta_queue = &pxmitpriv->bm_pending; //pxmitpriv->vo_txqueue.head = 0; //hwxmits[1] .phwtxqueue = &pxmitpriv->vo_txqueue; hwxmits[1] .sta_queue = &pxmitpriv->vo_pending; //pxmitpriv->vi_txqueue.head = 0; //hwxmits[2] .phwtxqueue = &pxmitpriv->vi_txqueue; hwxmits[2] .sta_queue = &pxmitpriv->vi_pending; //pxmitpriv->bk_txqueue.head = 0; //hwxmits[3] .phwtxqueue = &pxmitpriv->bk_txqueue; hwxmits[3] .sta_queue = &pxmitpriv->bk_pending; //pxmitpriv->be_txqueue.head = 0; //hwxmits[4] .phwtxqueue = &pxmitpriv->be_txqueue; hwxmits[4] .sta_queue = &pxmitpriv->be_pending; } else if(pxmitpriv->hwxmit_entry == 4) { //pxmitpriv->vo_txqueue.head = 0; //hwxmits[0] .phwtxqueue = &pxmitpriv->vo_txqueue; hwxmits[0] .sta_queue = &pxmitpriv->vo_pending; //pxmitpriv->vi_txqueue.head = 0; //hwxmits[1] .phwtxqueue = &pxmitpriv->vi_txqueue; hwxmits[1] .sta_queue = &pxmitpriv->vi_pending; //pxmitpriv->be_txqueue.head = 0; //hwxmits[2] .phwtxqueue = &pxmitpriv->be_txqueue; hwxmits[2] .sta_queue = &pxmitpriv->be_pending; //pxmitpriv->bk_txqueue.head = 0; //hwxmits[3] .phwtxqueue = &pxmitpriv->bk_txqueue; hwxmits[3] .sta_queue = &pxmitpriv->bk_pending; } else { } } void rtw_free_hwxmits(_adapter *padapter) { struct hw_xmit *hwxmits; struct xmit_priv *pxmitpriv = &padapter->xmitpriv; hwxmits = pxmitpriv->hwxmits; if(hwxmits) rtw_mfree((u8 *)hwxmits, (sizeof (struct hw_xmit) * pxmitpriv->hwxmit_entry)); } void rtw_init_hwxmits(struct hw_xmit *phwxmit, sint entry) { sint i; _func_enter_; for(i = 0; i < entry; i++, phwxmit++) { //_rtw_spinlock_init(&phwxmit->xmit_lock); //_rtw_init_listhead(&phwxmit->pending); //phwxmit->txcmdcnt = 0; phwxmit->accnt = 0; } _func_exit_; } #ifdef CONFIG_BR_EXT int rtw_br_client_tx(_adapter *padapter, struct sk_buff **pskb) { struct sk_buff *skb = *pskb; struct xmit_priv *pxmitpriv = &padapter->xmitpriv; _irqL irqL; //if(check_fwstate(pmlmepriv, WIFI_STATION_STATE|WIFI_ADHOC_STATE) == _TRUE) { void dhcp_flag_bcast(_adapter *priv, struct sk_buff *skb); int res, is_vlan_tag=0, i, do_nat25=1; unsigned short vlan_hdr=0; void *br_port = NULL; //mac_clone_handle_frame(priv, skb); #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 35)) br_port = padapter->pnetdev->br_port; #else // (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 35)) rcu_read_lock(); br_port = rcu_dereference(padapter->pnetdev->rx_handler_data); rcu_read_unlock(); #endif // (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 35)) _enter_critical_bh(&padapter->br_ext_lock, &irqL); if ( !(skb->data[0] & 1) && br_port && memcmp(skb->data+MACADDRLEN, padapter->br_mac, MACADDRLEN) && *((unsigned short *)(skb->data+MACADDRLEN*2)) != __constant_htons(ETH_P_8021Q) && *((unsigned short *)(skb->data+MACADDRLEN*2)) == __constant_htons(ETH_P_IP) && !memcmp(padapter->scdb_mac, skb->data+MACADDRLEN, MACADDRLEN) && padapter->scdb_entry) { memcpy(skb->data+MACADDRLEN, GET_MY_HWADDR(padapter), MACADDRLEN); padapter->scdb_entry->ageing_timer = jiffies; _exit_critical_bh(&padapter->br_ext_lock, &irqL); } else //if (!priv->pmib->ethBrExtInfo.nat25_disable) { // if (priv->dev->br_port && // !memcmp(skb->data+MACADDRLEN, priv->br_mac, MACADDRLEN)) { #if 1 if (*((unsigned short *)(skb->data+MACADDRLEN*2)) == __constant_htons(ETH_P_8021Q)) { is_vlan_tag = 1; vlan_hdr = *((unsigned short *)(skb->data+MACADDRLEN*2+2)); for (i=0; i<6; i++) *((unsigned short *)(skb->data+MACADDRLEN*2+2-i*2)) = *((unsigned short *)(skb->data+MACADDRLEN*2-2-i*2)); skb_pull(skb, 4); } //if SA == br_mac && skb== IP => copy SIP to br_ip ?? why if (!memcmp(skb->data+MACADDRLEN, padapter->br_mac, MACADDRLEN) && (*((unsigned short *)(skb->data+MACADDRLEN*2)) == __constant_htons(ETH_P_IP))) memcpy(padapter->br_ip, skb->data+WLAN_ETHHDR_LEN+12, 4); if (*((unsigned short *)(skb->data+MACADDRLEN*2)) == __constant_htons(ETH_P_IP)) { if (memcmp(padapter->scdb_mac, skb->data+MACADDRLEN, MACADDRLEN)) { void *scdb_findEntry(_adapter *priv, unsigned char *macAddr, unsigned char *ipAddr); if ((padapter->scdb_entry = (struct nat25_network_db_entry *)scdb_findEntry(padapter, skb->data+MACADDRLEN, skb->data+WLAN_ETHHDR_LEN+12)) != NULL) { memcpy(padapter->scdb_mac, skb->data+MACADDRLEN, MACADDRLEN); memcpy(padapter->scdb_ip, skb->data+WLAN_ETHHDR_LEN+12, 4); padapter->scdb_entry->ageing_timer = jiffies; do_nat25 = 0; } } else { if (padapter->scdb_entry) { padapter->scdb_entry->ageing_timer = jiffies; do_nat25 = 0; } else { memset(padapter->scdb_mac, 0, MACADDRLEN); memset(padapter->scdb_ip, 0, 4); } } } _exit_critical_bh(&padapter->br_ext_lock, &irqL); #endif // 1 if (do_nat25) { int nat25_db_handle(_adapter *priv, struct sk_buff *skb, int method); if (nat25_db_handle(padapter, skb, NAT25_CHECK) == 0) { struct sk_buff *newskb; if (is_vlan_tag) { skb_push(skb, 4); for (i=0; i<6; i++) *((unsigned short *)(skb->data+i*2)) = *((unsigned short *)(skb->data+4+i*2)); *((unsigned short *)(skb->data+MACADDRLEN*2)) = __constant_htons(ETH_P_8021Q); *((unsigned short *)(skb->data+MACADDRLEN*2+2)) = vlan_hdr; } newskb = rtw_skb_copy(skb); if (newskb == NULL) { //priv->ext_stats.tx_drops++; DEBUG_ERR("TX DROP: rtw_skb_copy fail!\n"); //goto stop_proc; return -1; } rtw_skb_free(skb); *pskb = skb = newskb; if (is_vlan_tag) { vlan_hdr = *((unsigned short *)(skb->data+MACADDRLEN*2+2)); for (i=0; i<6; i++) *((unsigned short *)(skb->data+MACADDRLEN*2+2-i*2)) = *((unsigned short *)(skb->data+MACADDRLEN*2-2-i*2)); skb_pull(skb, 4); } } if (skb_is_nonlinear(skb)) DEBUG_ERR("%s(): skb_is_nonlinear!!\n", __FUNCTION__); #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)) res = skb_linearize(skb, GFP_ATOMIC); #else // (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)) res = skb_linearize(skb); #endif // (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)) if (res < 0) { DEBUG_ERR("TX DROP: skb_linearize fail!\n"); //goto free_and_stop; return -1; } res = nat25_db_handle(padapter, skb, NAT25_INSERT); if (res < 0) { if (res == -2) { //priv->ext_stats.tx_drops++; DEBUG_ERR("TX DROP: nat25_db_handle fail!\n"); //goto free_and_stop; return -1; } // we just print warning message and let it go //DEBUG_WARN("%s()-%d: nat25_db_handle INSERT Warning!\n", __FUNCTION__, __LINE__); //return -1; // return -1 will cause system crash on 2011/08/30! return 0; } } memcpy(skb->data+MACADDRLEN, GET_MY_HWADDR(padapter), MACADDRLEN); dhcp_flag_bcast(padapter, skb); if (is_vlan_tag) { skb_push(skb, 4); for (i=0; i<6; i++) *((unsigned short *)(skb->data+i*2)) = *((unsigned short *)(skb->data+4+i*2)); *((unsigned short *)(skb->data+MACADDRLEN*2)) = __constant_htons(ETH_P_8021Q); *((unsigned short *)(skb->data+MACADDRLEN*2+2)) = vlan_hdr; } } #if 0 else{ if (*((unsigned short *)(skb->data+MACADDRLEN*2)) == __constant_htons(ETH_P_8021Q)) { is_vlan_tag = 1; } if(is_vlan_tag){ if(ICMPV6_MCAST_MAC(skb->data) && ICMPV6_PROTO1A_VALN(skb->data)){ memcpy(skb->data+MACADDRLEN, GET_MY_HWADDR(padapter), MACADDRLEN); } }else { if(ICMPV6_MCAST_MAC(skb->data) && ICMPV6_PROTO1A(skb->data)){ memcpy(skb->data+MACADDRLEN, GET_MY_HWADDR(padapter), MACADDRLEN); } } } #endif // 0 // check if SA is equal to our MAC if (memcmp(skb->data+MACADDRLEN, GET_MY_HWADDR(padapter), MACADDRLEN)) { //priv->ext_stats.tx_drops++; DEBUG_ERR("TX DROP: untransformed frame SA:%02X%02X%02X%02X%02X%02X!\n", skb->data[6],skb->data[7],skb->data[8],skb->data[9],skb->data[10],skb->data[11]); //goto free_and_stop; return -1; } } return 0; } #endif // CONFIG_BR_EXT u32 rtw_get_ff_hwaddr(struct xmit_frame *pxmitframe) { u32 addr; struct pkt_attrib *pattrib = &pxmitframe->attrib; switch(pattrib->qsel) { case 0: case 3: addr = BE_QUEUE_INX; break; case 1: case 2: addr = BK_QUEUE_INX; break; case 4: case 5: addr = VI_QUEUE_INX; break; case 6: case 7: addr = VO_QUEUE_INX; break; case 0x10: addr = BCN_QUEUE_INX; break; case 0x11://BC/MC in PS (HIQ) addr = HIGH_QUEUE_INX; break; case 0x12: default: addr = MGT_QUEUE_INX; break; } return addr; } static void do_queue_select(_adapter *padapter, struct pkt_attrib *pattrib) { u8 qsel; qsel = pattrib->priority; RT_TRACE(_module_rtl871x_xmit_c_,_drv_info_,("### do_queue_select priority=%d ,qsel = %d\n",pattrib->priority ,qsel)); #ifdef CONFIG_CONCURRENT_MODE // if (check_fwstate(&padapter->mlmepriv, WIFI_AP_STATE) == _TRUE) // qsel = 7;// #endif pattrib->qsel = qsel; } /* * The main transmit(tx) entry * * Return * 1 enqueue * 0 success, hardware will handle this xmit frame(packet) * <0 fail */ s32 rtw_xmit(_adapter *padapter, _pkt **ppkt) { static u32 start = 0; static u32 drop_cnt = 0; #ifdef CONFIG_AP_MODE _irqL irqL0; #endif struct xmit_priv *pxmitpriv = &padapter->xmitpriv; struct xmit_frame *pxmitframe = NULL; #ifdef CONFIG_BR_EXT struct mlme_priv *pmlmepriv = &padapter->mlmepriv; void *br_port = NULL; #endif // CONFIG_BR_EXT s32 res; DBG_COUNTER(padapter->tx_logs.core_tx); if (start == 0) start = rtw_get_current_time(); pxmitframe = rtw_alloc_xmitframe(pxmitpriv); if (rtw_get_passing_time_ms(start) > 2000) { if (drop_cnt) DBG_871X("DBG_TX_DROP_FRAME %s no more pxmitframe, drop_cnt:%u\n", __FUNCTION__, drop_cnt); start = rtw_get_current_time(); drop_cnt = 0; } if (pxmitframe == NULL) { drop_cnt ++; RT_TRACE(_module_xmit_osdep_c_, _drv_err_, ("rtw_xmit: no more pxmitframe\n")); DBG_COUNTER(padapter->tx_logs.core_tx_err_pxmitframe); return -1; } #ifdef CONFIG_BR_EXT #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 35)) br_port = padapter->pnetdev->br_port; #else // (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 35)) rcu_read_lock(); br_port = rcu_dereference(padapter->pnetdev->rx_handler_data); rcu_read_unlock(); #endif // (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 35)) if( br_port && check_fwstate(pmlmepriv, WIFI_STATION_STATE|WIFI_ADHOC_STATE) == _TRUE) { res = rtw_br_client_tx(padapter, ppkt); if (res == -1) { rtw_free_xmitframe(pxmitpriv, pxmitframe); DBG_COUNTER(padapter->tx_logs.core_tx_err_brtx); return -1; } } #endif // CONFIG_BR_EXT res = update_attrib(padapter, *ppkt, &pxmitframe->attrib); #ifdef CONFIG_WAPI_SUPPORT if(pxmitframe->attrib.ether_type != 0x88B4) { if(rtw_wapi_drop_for_key_absent(padapter, pxmitframe->attrib.ra)) { WAPI_TRACE(WAPI_RX,"drop for key absend when tx \n"); res = _FAIL; } } #endif if (res == _FAIL) { RT_TRACE(_module_xmit_osdep_c_, _drv_err_, ("rtw_xmit: update attrib fail\n")); #ifdef DBG_TX_DROP_FRAME DBG_871X("DBG_TX_DROP_FRAME %s update attrib fail\n", __FUNCTION__); #endif rtw_free_xmitframe(pxmitpriv, pxmitframe); return -1; } pxmitframe->pkt = *ppkt; rtw_led_control(padapter, LED_CTL_TX); do_queue_select(padapter, &pxmitframe->attrib); #ifdef CONFIG_AP_MODE _enter_critical_bh(&pxmitpriv->lock, &irqL0); if(xmitframe_enqueue_for_sleeping_sta(padapter, pxmitframe) == _TRUE) { _exit_critical_bh(&pxmitpriv->lock, &irqL0); DBG_COUNTER(padapter->tx_logs.core_tx_ap_enqueue); return 1; } _exit_critical_bh(&pxmitpriv->lock, &irqL0); #endif //pre_xmitframe if (rtw_hal_xmit(padapter, pxmitframe) == _FALSE) return 1; return 0; } #ifdef CONFIG_TDLS sint xmitframe_enqueue_for_tdls_sleeping_sta(_adapter *padapter, struct xmit_frame *pxmitframe) { sint ret=_FALSE; _irqL irqL; struct sta_info *ptdls_sta=NULL; struct sta_priv *pstapriv = &padapter->stapriv; struct pkt_attrib *pattrib = &pxmitframe->attrib; struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv); int i; ptdls_sta=rtw_get_stainfo(pstapriv, pattrib->dst); if(ptdls_sta==NULL){ return ret; }else if(ptdls_sta->tdls_sta_state&TDLS_LINKED_STATE){ if(pattrib->triggered==1) { ret = _TRUE; return ret; } _enter_critical_bh(&ptdls_sta->sleep_q.lock, &irqL); if(ptdls_sta->state&WIFI_SLEEP_STATE) { rtw_list_delete(&pxmitframe->list); //_enter_critical_bh(&psta->sleep_q.lock, &irqL); rtw_list_insert_tail(&pxmitframe->list, get_list_head(&ptdls_sta->sleep_q)); ptdls_sta->sleepq_len++; ptdls_sta->sleepq_ac_len++; //indicate 4-AC queue bit in TDLS peer traffic indication switch(pattrib->priority) { case 1: case 2: ptdls_sta->uapsd_bk |= BIT(1); break; case 4: case 5: ptdls_sta->uapsd_vi |= BIT(1); break; case 6: case 7: ptdls_sta->uapsd_vo |= BIT(1); break; case 0: case 3: default: ptdls_sta->uapsd_be |= BIT(1); break; } if(ptdls_sta->sleepq_len==1) { //transmit TDLS PTI via AP rtw_tdls_cmd(padapter, ptdls_sta->hwaddr, TDLS_SD_PTI); } ret = _TRUE; } _exit_critical_bh(&ptdls_sta->sleep_q.lock, &irqL); } return ret; } #endif //CONFIG_TDLS #define RTW_HIQ_FILTER_ALLOW_ALL 0 #define RTW_HIQ_FILTER_ALLOW_SPECIAL 1 #define RTW_HIQ_FILTER_DENY_ALL 2 inline bool xmitframe_hiq_filter(struct xmit_frame *xmitframe) { bool allow = _FALSE; _adapter *adapter = xmitframe->padapter; struct registry_priv *registry = &adapter->registrypriv; if (adapter->interface_type != RTW_PCIE) { if (registry->hiq_filter == RTW_HIQ_FILTER_ALLOW_SPECIAL) { struct pkt_attrib *attrib = &xmitframe->attrib; if (attrib->ether_type == 0x0806 || attrib->ether_type == 0x888e #ifdef CONFIG_WAPI_SUPPORT || attrib->ether_type == 0x88B4 #endif || attrib->dhcp_pkt ) { if (0) DBG_871X(FUNC_ADPT_FMT" ether_type:0x%04x%s\n", FUNC_ADPT_ARG(xmitframe->padapter) , attrib->ether_type, attrib->dhcp_pkt?" DHCP":""); allow = _TRUE; } } else if (registry->hiq_filter == RTW_HIQ_FILTER_ALLOW_ALL) { allow = _TRUE; } else if (registry->hiq_filter == RTW_HIQ_FILTER_DENY_ALL) { } else { rtw_warn_on(1); } } return allow; } #if defined(CONFIG_AP_MODE) || defined(CONFIG_TDLS) sint xmitframe_enqueue_for_sleeping_sta(_adapter *padapter, struct xmit_frame *pxmitframe) { _irqL irqL; sint ret=_FALSE; struct sta_info *psta=NULL; struct sta_priv *pstapriv = &padapter->stapriv; struct pkt_attrib *pattrib = &pxmitframe->attrib; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; sint bmcst = IS_MCAST(pattrib->ra); bool update_tim = _FALSE; #ifdef CONFIG_TDLS if( padapter->tdlsinfo.link_established == _TRUE ) { ret = xmitframe_enqueue_for_tdls_sleeping_sta(padapter, pxmitframe); } #endif //CONFIG_TDLS if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == _FALSE) { DBG_COUNTER(padapter->tx_logs.core_tx_ap_enqueue_warn_fwstate); return ret; } /* if(pattrib->psta) { psta = pattrib->psta; } else { DBG_871X("%s, call rtw_get_stainfo()\n", __func__); psta=rtw_get_stainfo(pstapriv, pattrib->ra); } */ psta = rtw_get_stainfo(&padapter->stapriv, pattrib->ra); if(pattrib->psta != psta) { DBG_COUNTER(padapter->tx_logs.core_tx_ap_enqueue_warn_sta); DBG_871X("%s, pattrib->psta(%p) != psta(%p)\n", __func__, pattrib->psta, psta); return _FALSE; } if(psta==NULL) { DBG_COUNTER(padapter->tx_logs.core_tx_ap_enqueue_warn_nosta); DBG_871X("%s, psta==NUL\n", __func__); return _FALSE; } if(!(psta->state &_FW_LINKED)) { DBG_COUNTER(padapter->tx_logs.core_tx_ap_enqueue_warn_link); DBG_871X("%s, psta->state(0x%x) != _FW_LINKED\n", __func__, psta->state); return _FALSE; } if(pattrib->triggered==1) { DBG_COUNTER(padapter->tx_logs.core_tx_ap_enqueue_warn_trigger); //DBG_871X("directly xmit pspoll_triggered packet\n"); //pattrib->triggered=0; if (bmcst && xmitframe_hiq_filter(pxmitframe) == _TRUE) pattrib->qsel = QSLT_HIGH;//HIQ return ret; } if(bmcst) { _enter_critical_bh(&psta->sleep_q.lock, &irqL); if(pstapriv->sta_dz_bitmap)//if anyone sta is in ps mode { //pattrib->qsel = QSLT_HIGH;//HIQ rtw_list_delete(&pxmitframe->list); //_enter_critical_bh(&psta->sleep_q.lock, &irqL); rtw_list_insert_tail(&pxmitframe->list, get_list_head(&psta->sleep_q)); psta->sleepq_len++; if (!(pstapriv->tim_bitmap & BIT(0))) update_tim = _TRUE; pstapriv->tim_bitmap |= BIT(0);// pstapriv->sta_dz_bitmap |= BIT(0); //DBG_871X("enqueue, sq_len=%d, tim=%x\n", psta->sleepq_len, pstapriv->tim_bitmap); if (update_tim == _TRUE) { if (is_broadcast_mac_addr(pattrib->ra)) _update_beacon(padapter, _TIM_IE_, NULL, _TRUE, "buffer BC"); else _update_beacon(padapter, _TIM_IE_, NULL, _TRUE, "buffer MC"); } else { chk_bmc_sleepq_cmd(padapter); } //_exit_critical_bh(&psta->sleep_q.lock, &irqL); ret = _TRUE; DBG_COUNTER(padapter->tx_logs.core_tx_ap_enqueue_mcast); } _exit_critical_bh(&psta->sleep_q.lock, &irqL); return ret; } _enter_critical_bh(&psta->sleep_q.lock, &irqL); if(psta->state&WIFI_SLEEP_STATE) { u8 wmmps_ac=0; if(pstapriv->sta_dz_bitmap&BIT(psta->aid)) { rtw_list_delete(&pxmitframe->list); //_enter_critical_bh(&psta->sleep_q.lock, &irqL); rtw_list_insert_tail(&pxmitframe->list, get_list_head(&psta->sleep_q)); psta->sleepq_len++; switch(pattrib->priority) { case 1: case 2: wmmps_ac = psta->uapsd_bk&BIT(0); break; case 4: case 5: wmmps_ac = psta->uapsd_vi&BIT(0); break; case 6: case 7: wmmps_ac = psta->uapsd_vo&BIT(0); break; case 0: case 3: default: wmmps_ac = psta->uapsd_be&BIT(0); break; } if(wmmps_ac) psta->sleepq_ac_len++; if(((psta->has_legacy_ac) && (!wmmps_ac)) ||((!psta->has_legacy_ac)&&(wmmps_ac))) { if (!(pstapriv->tim_bitmap & BIT(psta->aid))) update_tim = _TRUE; pstapriv->tim_bitmap |= BIT(psta->aid); //DBG_871X("enqueue, sq_len=%d, tim=%x\n", psta->sleepq_len, pstapriv->tim_bitmap); if(update_tim == _TRUE) { //DBG_871X("sleepq_len==1, update BCNTIM\n"); //upate BCN for TIM IE _update_beacon(padapter, _TIM_IE_, NULL, _TRUE, "buffer UC"); } } //_exit_critical_bh(&psta->sleep_q.lock, &irqL); //if(psta->sleepq_len > (NR_XMITFRAME>>3)) //{ // wakeup_sta_to_xmit(padapter, psta); //} ret = _TRUE; DBG_COUNTER(padapter->tx_logs.core_tx_ap_enqueue_ucast); } } _exit_critical_bh(&psta->sleep_q.lock, &irqL); return ret; } static void dequeue_xmitframes_to_sleeping_queue(_adapter *padapter, struct sta_info *psta, _queue *pframequeue) { sint ret; _list *plist, *phead; u8 ac_index; struct tx_servq *ptxservq; struct pkt_attrib *pattrib; struct xmit_frame *pxmitframe; struct hw_xmit *phwxmits = padapter->xmitpriv.hwxmits; phead = get_list_head(pframequeue); plist = get_next(phead); while (rtw_end_of_queue_search(phead, plist) == _FALSE) { pxmitframe = LIST_CONTAINOR(plist, struct xmit_frame, list); plist = get_next(plist); pattrib = &pxmitframe->attrib; pattrib->triggered = 0; ret = xmitframe_enqueue_for_sleeping_sta(padapter, pxmitframe); if(_TRUE == ret) { ptxservq = rtw_get_sta_pending(padapter, psta, pattrib->priority, (u8 *)(&ac_index)); ptxservq->qcnt--; phwxmits[ac_index].accnt--; } else { //DBG_871X("xmitframe_enqueue_for_sleeping_sta return _FALSE\n"); } } } void stop_sta_xmit(_adapter *padapter, struct sta_info *psta) { _irqL irqL0; struct sta_info *psta_bmc; struct sta_xmit_priv *pstaxmitpriv; struct sta_priv *pstapriv = &padapter->stapriv; struct xmit_priv *pxmitpriv = &padapter->xmitpriv; pstaxmitpriv = &psta->sta_xmitpriv; //for BC/MC Frames psta_bmc = rtw_get_bcmc_stainfo(padapter); _enter_critical_bh(&pxmitpriv->lock, &irqL0); psta->state |= WIFI_SLEEP_STATE; #ifdef CONFIG_TDLS if( !(psta->tdls_sta_state & TDLS_LINKED_STATE) ) #endif //CONFIG_TDLS pstapriv->sta_dz_bitmap |= BIT(psta->aid); dequeue_xmitframes_to_sleeping_queue(padapter, psta, &pstaxmitpriv->vo_q.sta_pending); rtw_list_delete(&(pstaxmitpriv->vo_q.tx_pending)); dequeue_xmitframes_to_sleeping_queue(padapter, psta, &pstaxmitpriv->vi_q.sta_pending); rtw_list_delete(&(pstaxmitpriv->vi_q.tx_pending)); dequeue_xmitframes_to_sleeping_queue(padapter, psta, &pstaxmitpriv->be_q.sta_pending); rtw_list_delete(&(pstaxmitpriv->be_q.tx_pending)); dequeue_xmitframes_to_sleeping_queue(padapter, psta, &pstaxmitpriv->bk_q.sta_pending); rtw_list_delete(&(pstaxmitpriv->bk_q.tx_pending)); #ifdef CONFIG_TDLS if( !(psta->tdls_sta_state & TDLS_LINKED_STATE) ) { if( psta_bmc != NULL ) { #endif //CONFIG_TDLS //for BC/MC Frames pstaxmitpriv = &psta_bmc->sta_xmitpriv; dequeue_xmitframes_to_sleeping_queue(padapter, psta_bmc, &pstaxmitpriv->be_q.sta_pending); rtw_list_delete(&(pstaxmitpriv->be_q.tx_pending)); #ifdef CONFIG_TDLS } } #endif //CONFIG_TDLS _exit_critical_bh(&pxmitpriv->lock, &irqL0); } void wakeup_sta_to_xmit(_adapter *padapter, struct sta_info *psta) { _irqL irqL; u8 update_mask=0, wmmps_ac=0; struct sta_info *psta_bmc; _list *xmitframe_plist, *xmitframe_phead; struct xmit_frame *pxmitframe=NULL; struct sta_priv *pstapriv = &padapter->stapriv; struct xmit_priv *pxmitpriv = &padapter->xmitpriv; psta_bmc = rtw_get_bcmc_stainfo(padapter); //_enter_critical_bh(&psta->sleep_q.lock, &irqL); _enter_critical_bh(&pxmitpriv->lock, &irqL); xmitframe_phead = get_list_head(&psta->sleep_q); xmitframe_plist = get_next(xmitframe_phead); while ((rtw_end_of_queue_search(xmitframe_phead, xmitframe_plist)) == _FALSE) { pxmitframe = LIST_CONTAINOR(xmitframe_plist, struct xmit_frame, list); xmitframe_plist = get_next(xmitframe_plist); rtw_list_delete(&pxmitframe->list); switch(pxmitframe->attrib.priority) { case 1: case 2: wmmps_ac = psta->uapsd_bk&BIT(1); break; case 4: case 5: wmmps_ac = psta->uapsd_vi&BIT(1); break; case 6: case 7: wmmps_ac = psta->uapsd_vo&BIT(1); break; case 0: case 3: default: wmmps_ac = psta->uapsd_be&BIT(1); break; } psta->sleepq_len--; if(psta->sleepq_len>0) pxmitframe->attrib.mdata = 1; else pxmitframe->attrib.mdata = 0; if(wmmps_ac) { psta->sleepq_ac_len--; if(psta->sleepq_ac_len>0) { pxmitframe->attrib.mdata = 1; pxmitframe->attrib.eosp = 0; } else { pxmitframe->attrib.mdata = 0; pxmitframe->attrib.eosp = 1; } } pxmitframe->attrib.triggered = 1; /* _exit_critical_bh(&psta->sleep_q.lock, &irqL); if(rtw_hal_xmit(padapter, pxmitframe) == _TRUE) { rtw_os_xmit_complete(padapter, pxmitframe); } _enter_critical_bh(&psta->sleep_q.lock, &irqL); */ rtw_hal_xmitframe_enqueue(padapter, pxmitframe); } if(psta->sleepq_len==0) { #ifdef CONFIG_TDLS if( psta->tdls_sta_state & TDLS_LINKED_STATE ) { if(psta->state&WIFI_SLEEP_STATE) psta->state ^= WIFI_SLEEP_STATE; _exit_critical_bh(&pxmitpriv->lock, &irqL); return; } #endif //CONFIG_TDLS if (pstapriv->tim_bitmap & BIT(psta->aid)) { //DBG_871X("wakeup to xmit, qlen==0, update_BCNTIM, tim=%x\n", pstapriv->tim_bitmap); //upate BCN for TIM IE //update_BCNTIM(padapter); update_mask = BIT(0); } pstapriv->tim_bitmap &= ~BIT(psta->aid); if(psta->state&WIFI_SLEEP_STATE) psta->state ^= WIFI_SLEEP_STATE; if(psta->state & WIFI_STA_ALIVE_CHK_STATE) { DBG_871X("%s alive check\n", __func__); psta->expire_to = pstapriv->expire_to; psta->state ^= WIFI_STA_ALIVE_CHK_STATE; } pstapriv->sta_dz_bitmap &= ~BIT(psta->aid); } //for BC/MC Frames if(!psta_bmc) goto _exit; if((pstapriv->sta_dz_bitmap&0xfffe) == 0x0)//no any sta in ps mode { xmitframe_phead = get_list_head(&psta_bmc->sleep_q); xmitframe_plist = get_next(xmitframe_phead); while ((rtw_end_of_queue_search(xmitframe_phead, xmitframe_plist)) == _FALSE) { pxmitframe = LIST_CONTAINOR(xmitframe_plist, struct xmit_frame, list); xmitframe_plist = get_next(xmitframe_plist); rtw_list_delete(&pxmitframe->list); psta_bmc->sleepq_len--; if(psta_bmc->sleepq_len>0) pxmitframe->attrib.mdata = 1; else pxmitframe->attrib.mdata = 0; pxmitframe->attrib.triggered = 1; /* _exit_critical_bh(&psta_bmc->sleep_q.lock, &irqL); if(rtw_hal_xmit(padapter, pxmitframe) == _TRUE) { rtw_os_xmit_complete(padapter, pxmitframe); } _enter_critical_bh(&psta_bmc->sleep_q.lock, &irqL); */ rtw_hal_xmitframe_enqueue(padapter, pxmitframe); } if(psta_bmc->sleepq_len==0) { if (pstapriv->tim_bitmap & BIT(0)) { //DBG_871X("wakeup to xmit, qlen==0, update_BCNTIM, tim=%x\n", pstapriv->tim_bitmap); //upate BCN for TIM IE //update_BCNTIM(padapter); update_mask |= BIT(1); } pstapriv->tim_bitmap &= ~BIT(0); pstapriv->sta_dz_bitmap &= ~BIT(0); } } _exit: //_exit_critical_bh(&psta_bmc->sleep_q.lock, &irqL); _exit_critical_bh(&pxmitpriv->lock, &irqL); if(update_mask) { //update_BCNTIM(padapter); if ((update_mask & (BIT(0)|BIT(1))) == (BIT(0)|BIT(1))) _update_beacon(padapter, _TIM_IE_, NULL, _TRUE, "clear UC&BMC"); else if ((update_mask & BIT(1)) == BIT(1)) _update_beacon(padapter, _TIM_IE_, NULL, _TRUE, "clear BMC"); else _update_beacon(padapter, _TIM_IE_, NULL, _TRUE, "clear UC"); } } void xmit_delivery_enabled_frames(_adapter *padapter, struct sta_info *psta) { _irqL irqL; u8 wmmps_ac=0; _list *xmitframe_plist, *xmitframe_phead; struct xmit_frame *pxmitframe=NULL; struct sta_priv *pstapriv = &padapter->stapriv; struct xmit_priv *pxmitpriv = &padapter->xmitpriv; //_enter_critical_bh(&psta->sleep_q.lock, &irqL); _enter_critical_bh(&pxmitpriv->lock, &irqL); xmitframe_phead = get_list_head(&psta->sleep_q); xmitframe_plist = get_next(xmitframe_phead); while ((rtw_end_of_queue_search(xmitframe_phead, xmitframe_plist)) == _FALSE) { pxmitframe = LIST_CONTAINOR(xmitframe_plist, struct xmit_frame, list); xmitframe_plist = get_next(xmitframe_plist); switch(pxmitframe->attrib.priority) { case 1: case 2: wmmps_ac = psta->uapsd_bk&BIT(1); break; case 4: case 5: wmmps_ac = psta->uapsd_vi&BIT(1); break; case 6: case 7: wmmps_ac = psta->uapsd_vo&BIT(1); break; case 0: case 3: default: wmmps_ac = psta->uapsd_be&BIT(1); break; } if(!wmmps_ac) continue; rtw_list_delete(&pxmitframe->list); psta->sleepq_len--; psta->sleepq_ac_len--; if(psta->sleepq_ac_len>0) { pxmitframe->attrib.mdata = 1; pxmitframe->attrib.eosp = 0; } else { pxmitframe->attrib.mdata = 0; pxmitframe->attrib.eosp = 1; } pxmitframe->attrib.triggered = 1; rtw_hal_xmitframe_enqueue(padapter, pxmitframe); if((psta->sleepq_ac_len==0) && (!psta->has_legacy_ac) && (wmmps_ac)) { #ifdef CONFIG_TDLS if(psta->tdls_sta_state & TDLS_LINKED_STATE ) { //_exit_critical_bh(&psta->sleep_q.lock, &irqL); goto exit; } #endif //CONFIG_TDLS pstapriv->tim_bitmap &= ~BIT(psta->aid); //DBG_871X("wakeup to xmit, qlen==0, update_BCNTIM, tim=%x\n", pstapriv->tim_bitmap); //upate BCN for TIM IE //update_BCNTIM(padapter); update_beacon(padapter, _TIM_IE_, NULL, _TRUE); //update_mask = BIT(0); } } exit: //_exit_critical_bh(&psta->sleep_q.lock, &irqL); _exit_critical_bh(&pxmitpriv->lock, &irqL); return; } #endif #ifdef CONFIG_XMIT_THREAD_MODE void enqueue_pending_xmitbuf( struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf) { _irqL irql; _queue *pqueue; _adapter *pri_adapter = pxmitpriv->adapter; pqueue = &pxmitpriv->pending_xmitbuf_queue; _enter_critical_bh(&pqueue->lock, &irql); rtw_list_delete(&pxmitbuf->list); rtw_list_insert_tail(&pxmitbuf->list, get_list_head(pqueue)); _exit_critical_bh(&pqueue->lock, &irql); #if defined(CONFIG_SDIO_HCI) && defined(CONFIG_CONCURRENT_MODE) if (pri_adapter->adapter_type > PRIMARY_ADAPTER) pri_adapter = pri_adapter->pbuddy_adapter; #endif //SDIO_HCI + CONCURRENT _rtw_up_sema(&(pri_adapter->xmitpriv.xmit_sema)); } void enqueue_pending_xmitbuf_to_head( struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf) { _irqL irql; _queue *pqueue; _adapter *pri_adapter = pxmitpriv->adapter; pqueue = &pxmitpriv->pending_xmitbuf_queue; _enter_critical_bh(&pqueue->lock, &irql); rtw_list_delete(&pxmitbuf->list); rtw_list_insert_head(&pxmitbuf->list, get_list_head(pqueue)); _exit_critical_bh(&pqueue->lock, &irql); } struct xmit_buf* dequeue_pending_xmitbuf( struct xmit_priv *pxmitpriv) { _irqL irql; struct xmit_buf *pxmitbuf; _queue *pqueue; pxmitbuf = NULL; pqueue = &pxmitpriv->pending_xmitbuf_queue; _enter_critical_bh(&pqueue->lock, &irql); if (_rtw_queue_empty(pqueue) == _FALSE) { _list *plist, *phead; phead = get_list_head(pqueue); plist = get_next(phead); pxmitbuf = LIST_CONTAINOR(plist, struct xmit_buf, list); rtw_list_delete(&pxmitbuf->list); } _exit_critical_bh(&pqueue->lock, &irql); return pxmitbuf; } struct xmit_buf* dequeue_pending_xmitbuf_under_survey( struct xmit_priv *pxmitpriv) { _irqL irql; struct xmit_buf *pxmitbuf; #ifdef CONFIG_USB_HCI struct xmit_frame *pxmitframe; #endif _queue *pqueue; pxmitbuf = NULL; pqueue = &pxmitpriv->pending_xmitbuf_queue; _enter_critical_bh(&pqueue->lock, &irql); if (_rtw_queue_empty(pqueue) == _FALSE) { _list *plist, *phead; u8 type; phead = get_list_head(pqueue); plist = phead; do { plist = get_next(plist); if (plist == phead) break; pxmitbuf = LIST_CONTAINOR(plist, struct xmit_buf, list); #ifdef CONFIG_USB_HCI pxmitframe = (struct xmit_frame*)pxmitbuf->priv_data; if(pxmitframe) { type = GetFrameSubType(pxmitbuf->pbuf + TXDESC_SIZE + pxmitframe->pkt_offset * PACKET_OFFSET_SZ); } else { DBG_871X("%s, !!!ERROR!!! For USB, TODO ITEM \n", __FUNCTION__); } #else type = GetFrameSubType(pxmitbuf->pbuf + TXDESC_OFFSET); #endif if ((type == WIFI_PROBEREQ) || (type == WIFI_DATA_NULL) || (type == WIFI_QOS_DATA_NULL)) { rtw_list_delete(&pxmitbuf->list); break; } pxmitbuf = NULL; } while (1); } _exit_critical_bh(&pqueue->lock, &irql); return pxmitbuf; } sint check_pending_xmitbuf( struct xmit_priv *pxmitpriv) { _irqL irql; _queue *pqueue; sint ret = _FALSE; pqueue = &pxmitpriv->pending_xmitbuf_queue; _enter_critical_bh(&pqueue->lock, &irql); if(_rtw_queue_empty(pqueue) == _FALSE) ret = _TRUE; _exit_critical_bh(&pqueue->lock, &irql); return ret; } thread_return rtw_xmit_thread(thread_context context) { s32 err; PADAPTER padapter; err = _SUCCESS; padapter = (PADAPTER)context; thread_enter("RTW_XMIT_THREAD"); do { err = rtw_hal_xmit_thread_handler(padapter); flush_signals_thread(); } while (_SUCCESS == err); _rtw_up_sema(&padapter->xmitpriv.terminate_xmitthread_sema); thread_exit(); } #endif void rtw_sctx_init(struct submit_ctx *sctx, int timeout_ms) { sctx->timeout_ms = timeout_ms; sctx->submit_time= rtw_get_current_time(); #ifdef PLATFORM_LINUX /* TODO: add condition wating interface for other os */ init_completion(&sctx->done); #endif sctx->status = RTW_SCTX_SUBMITTED; } int rtw_sctx_wait(struct submit_ctx *sctx, const char *msg) { int ret = _FAIL; unsigned long expire; int status = 0; #ifdef PLATFORM_LINUX expire= sctx->timeout_ms ? msecs_to_jiffies(sctx->timeout_ms) : MAX_SCHEDULE_TIMEOUT; if (!wait_for_completion_timeout(&sctx->done, expire)) { /* timeout, do something?? */ status = RTW_SCTX_DONE_TIMEOUT; DBG_871X("%s timeout: %s\n", __func__, msg); } else { status = sctx->status; } #endif if (status == RTW_SCTX_DONE_SUCCESS) { ret = _SUCCESS; } return ret; } bool rtw_sctx_chk_waring_status(int status) { switch(status) { case RTW_SCTX_DONE_UNKNOWN: case RTW_SCTX_DONE_BUF_ALLOC: case RTW_SCTX_DONE_BUF_FREE: case RTW_SCTX_DONE_DRV_STOP: case RTW_SCTX_DONE_DEV_REMOVE: return _TRUE; default: return _FALSE; } } void rtw_sctx_done_err(struct submit_ctx **sctx, int status) { if (*sctx) { if (rtw_sctx_chk_waring_status(status)) DBG_871X("%s status:%d\n", __func__, status); (*sctx)->status = status; #ifdef PLATFORM_LINUX complete(&((*sctx)->done)); #endif *sctx = NULL; } } void rtw_sctx_done(struct submit_ctx **sctx) { rtw_sctx_done_err(sctx, RTW_SCTX_DONE_SUCCESS); } #ifdef CONFIG_XMIT_ACK #ifdef CONFIG_XMIT_ACK_POLLING s32 c2h_evt_hdl(_adapter *adapter, u8 *c2h_evt, c2h_id_filter filter); /** * rtw_ack_tx_polling - * @pxmitpriv: xmit_priv to address ack_tx_ops * @timeout_ms: timeout msec * * Init ack_tx_ops and then do c2h_evt_hdl() and polling ack_tx_ops repeatedly * till tx report or timeout * Returns: _SUCCESS if TX report ok, _FAIL for others */ int rtw_ack_tx_polling(struct xmit_priv *pxmitpriv, u32 timeout_ms) { int ret = _FAIL; struct submit_ctx *pack_tx_ops = &pxmitpriv->ack_tx_ops; _adapter *adapter = container_of(pxmitpriv, _adapter, xmitpriv); pack_tx_ops->submit_time = rtw_get_current_time(); pack_tx_ops->timeout_ms = timeout_ms; pack_tx_ops->status = RTW_SCTX_SUBMITTED; do { c2h_evt_hdl(adapter, NULL, rtw_hal_c2h_id_filter_ccx(adapter)); if (pack_tx_ops->status != RTW_SCTX_SUBMITTED) break; if (adapter->bDriverStopped) { pack_tx_ops->status = RTW_SCTX_DONE_DRV_STOP; break; } if (adapter->bSurpriseRemoved) { pack_tx_ops->status = RTW_SCTX_DONE_DEV_REMOVE; break; } rtw_msleep_os(10); } while (rtw_get_passing_time_ms(pack_tx_ops->submit_time) < timeout_ms); if (pack_tx_ops->status == RTW_SCTX_SUBMITTED) { pack_tx_ops->status = RTW_SCTX_DONE_TIMEOUT; DBG_871X("%s timeout\n", __func__); } if (pack_tx_ops->status == RTW_SCTX_DONE_SUCCESS) ret = _SUCCESS; return ret; } #endif int rtw_ack_tx_wait(struct xmit_priv *pxmitpriv, u32 timeout_ms) { #ifdef CONFIG_XMIT_ACK_POLLING return rtw_ack_tx_polling(pxmitpriv, timeout_ms); #else struct submit_ctx *pack_tx_ops = &pxmitpriv->ack_tx_ops; pack_tx_ops->submit_time = rtw_get_current_time(); pack_tx_ops->timeout_ms = timeout_ms; pack_tx_ops->status = RTW_SCTX_SUBMITTED; return rtw_sctx_wait(pack_tx_ops, __func__); #endif } void rtw_ack_tx_done(struct xmit_priv *pxmitpriv, int status) { struct submit_ctx *pack_tx_ops = &pxmitpriv->ack_tx_ops; if (pxmitpriv->ack_tx) { rtw_sctx_done_err(&pack_tx_ops, status); } else { DBG_871X("%s ack_tx not set\n", __func__); } } #endif //CONFIG_XMIT_ACK
gpl-2.0
MoKee/android_kernel_zte_msm8994
drivers/mmc/core/bus.c
3
11018
/* * linux/drivers/mmc/core/bus.c * * Copyright (C) 2003 Russell King, All Rights Reserved. * Copyright (C) 2007 Pierre Ossman * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * MMC card bus driver model */ #include <linux/export.h> #include <linux/device.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/stat.h> #include <linux/pm_runtime.h> #include <linux/mmc/card.h> #include <linux/mmc/host.h> #include "core.h" #include "sdio_cis.h" #include "bus.h" #define to_mmc_driver(d) container_of(d, struct mmc_driver, drv) #define RUNTIME_SUSPEND_DELAY_MS 10000 static ssize_t mmc_type_show(struct device *dev, struct device_attribute *attr, char *buf) { struct mmc_card *card = mmc_dev_to_card(dev); switch (card->type) { case MMC_TYPE_MMC: return sprintf(buf, "MMC\n"); case MMC_TYPE_SD: return sprintf(buf, "SD\n"); case MMC_TYPE_SDIO: return sprintf(buf, "SDIO\n"); case MMC_TYPE_SD_COMBO: return sprintf(buf, "SDcombo\n"); default: return -EFAULT; } } static struct device_attribute mmc_dev_attrs[] = { __ATTR(type, S_IRUGO, mmc_type_show, NULL), __ATTR_NULL, }; /* * This currently matches any MMC driver to any MMC card - drivers * themselves make the decision whether to drive this card in their * probe method. */ static int mmc_bus_match(struct device *dev, struct device_driver *drv) { return 1; } static int mmc_bus_uevent(struct device *dev, struct kobj_uevent_env *env) { struct mmc_card *card = mmc_dev_to_card(dev); const char *type; int retval = 0; switch (card->type) { case MMC_TYPE_MMC: type = "MMC"; break; case MMC_TYPE_SD: type = "SD"; break; case MMC_TYPE_SDIO: type = "SDIO"; break; case MMC_TYPE_SD_COMBO: type = "SDcombo"; break; default: type = NULL; } if (type) { retval = add_uevent_var(env, "MMC_TYPE=%s", type); if (retval) return retval; } retval = add_uevent_var(env, "MMC_NAME=%s", mmc_card_name(card)); if (retval) return retval; /* * Request the mmc_block device. Note: that this is a direct request * for the module it carries no information as to what is inserted. */ retval = add_uevent_var(env, "MODALIAS=mmc:block"); return retval; } static int mmc_bus_probe(struct device *dev) { struct mmc_driver *drv = to_mmc_driver(dev->driver); struct mmc_card *card = mmc_dev_to_card(dev); return drv->probe(card); } static int mmc_bus_remove(struct device *dev) { struct mmc_driver *drv = to_mmc_driver(dev->driver); struct mmc_card *card = mmc_dev_to_card(dev); drv->remove(card); return 0; } static void mmc_bus_shutdown(struct device *dev) { struct mmc_driver *drv = to_mmc_driver(dev->driver); struct mmc_card *card = mmc_dev_to_card(dev); if (!drv) { pr_debug("%s: %s: drv is NULL\n", dev_name(dev), __func__); return; } if (!card) { pr_debug("%s: %s: card is NULL\n", dev_name(dev), __func__); return; } if (drv->shutdown) drv->shutdown(card); } #ifdef CONFIG_PM_SLEEP static int mmc_bus_suspend(struct device *dev) { struct mmc_driver *drv = to_mmc_driver(dev->driver); struct mmc_card *card = mmc_dev_to_card(dev); int ret = 0; if (dev->driver && drv->suspend) ret = drv->suspend(card); return ret; } static int mmc_bus_resume(struct device *dev) { struct mmc_driver *drv = to_mmc_driver(dev->driver); struct mmc_card *card = mmc_dev_to_card(dev); int ret = 0; if (dev->driver && drv->resume) ret = drv->resume(card); return ret; } #endif #ifdef CONFIG_PM_RUNTIME static int mmc_runtime_suspend(struct device *dev) { struct mmc_card *card = mmc_dev_to_card(dev); if (mmc_use_core_runtime_pm(card->host)) { /* * If idle time bkops is running on the card, let's not get * into suspend. */ if (mmc_card_doing_bkops(card) && mmc_card_is_prog_state(card)) return -EBUSY; else return 0; } else { return mmc_power_save_host(card->host); } } static int mmc_runtime_resume(struct device *dev) { struct mmc_card *card = mmc_dev_to_card(dev); if (mmc_use_core_runtime_pm(card->host)) return 0; else return mmc_power_restore_host(card->host); } static int mmc_runtime_idle(struct device *dev) { struct mmc_card *card = mmc_dev_to_card(dev); struct mmc_host *host = card->host; int ret = 0; if (mmc_use_core_runtime_pm(card->host)) { ret = pm_schedule_suspend(dev, card->idle_timeout); if ((ret < 0) && (dev->power.runtime_error || dev->power.disable_depth > 0)) { pr_err("%s: %s: %s: pm_schedule_suspend failed: err: %d\n", mmc_hostname(host), __func__, dev_name(dev), ret); return ret; } } return ret; } #endif /* !CONFIG_PM_RUNTIME */ static const struct dev_pm_ops mmc_bus_pm_ops = { SET_RUNTIME_PM_OPS(mmc_runtime_suspend, mmc_runtime_resume, mmc_runtime_idle) SET_SYSTEM_SLEEP_PM_OPS(mmc_bus_suspend, mmc_bus_resume) }; static ssize_t show_rpm_delay(struct device *dev, struct device_attribute *attr, char *buf) { struct mmc_card *card = mmc_dev_to_card(dev); if (!card) { pr_err("%s: %s: card is NULL\n", dev_name(dev), __func__); return -EINVAL; } return snprintf(buf, PAGE_SIZE, "%u\n", card->idle_timeout); } static ssize_t store_rpm_delay(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct mmc_card *card = mmc_dev_to_card(dev); unsigned int delay; if (!card) { pr_err("%s: %s: card is NULL\n", dev_name(dev), __func__); return -EINVAL; } if (!kstrtou32(buf, 0, &delay)) { if (delay < 2000) { pr_err("%s: %s: less than 2 sec delay is unsupported\n", mmc_hostname(card->host), __func__); return -EINVAL; } card->idle_timeout = delay; } return count; } static struct bus_type mmc_bus_type = { .name = "mmc", .dev_attrs = mmc_dev_attrs, .match = mmc_bus_match, .uevent = mmc_bus_uevent, .probe = mmc_bus_probe, .remove = mmc_bus_remove, .shutdown = mmc_bus_shutdown, .pm = &mmc_bus_pm_ops, }; int mmc_register_bus(void) { return bus_register(&mmc_bus_type); } void mmc_unregister_bus(void) { bus_unregister(&mmc_bus_type); } /** * mmc_register_driver - register a media driver * @drv: MMC media driver */ int mmc_register_driver(struct mmc_driver *drv) { drv->drv.bus = &mmc_bus_type; return driver_register(&drv->drv); } EXPORT_SYMBOL(mmc_register_driver); /** * mmc_unregister_driver - unregister a media driver * @drv: MMC media driver */ void mmc_unregister_driver(struct mmc_driver *drv) { drv->drv.bus = &mmc_bus_type; driver_unregister(&drv->drv); } EXPORT_SYMBOL(mmc_unregister_driver); static void mmc_release_card(struct device *dev) { struct mmc_card *card = mmc_dev_to_card(dev); sdio_free_common_cis(card); kfree(card->info); kfree(card); } /* * Allocate and initialise a new MMC card structure. */ struct mmc_card *mmc_alloc_card(struct mmc_host *host, struct device_type *type) { struct mmc_card *card; card = kzalloc(sizeof(struct mmc_card), GFP_KERNEL); if (!card) return ERR_PTR(-ENOMEM); card->host = host; device_initialize(&card->dev); card->dev.parent = mmc_classdev(host); card->dev.bus = &mmc_bus_type; card->dev.release = mmc_release_card; card->dev.type = type; spin_lock_init(&card->bkops_info.bkops_stats.lock); spin_lock_init(&card->wr_pack_stats.lock); return card; } /* * Register a new MMC card with the driver model. */ int mmc_add_card(struct mmc_card *card) { int ret; const char *type; const char *uhs_bus_speed_mode = ""; static const char *const uhs_speeds[] = { [UHS_SDR12_BUS_SPEED] = "SDR12 ", [UHS_SDR25_BUS_SPEED] = "SDR25 ", [UHS_SDR50_BUS_SPEED] = "SDR50 ", [UHS_SDR104_BUS_SPEED] = "SDR104 ", [UHS_DDR50_BUS_SPEED] = "DDR50 ", }; dev_set_name(&card->dev, "%s:%04x", mmc_hostname(card->host), card->rca); switch (card->type) { case MMC_TYPE_MMC: type = "MMC"; break; case MMC_TYPE_SD: type = "SD"; if (mmc_card_blockaddr(card)) { if (mmc_card_ext_capacity(card)) type = "SDXC"; else type = "SDHC"; } break; case MMC_TYPE_SDIO: type = "SDIO"; break; case MMC_TYPE_SD_COMBO: type = "SD-combo"; if (mmc_card_blockaddr(card)) type = "SDHC-combo"; break; default: type = "?"; break; } if (mmc_sd_card_uhs(card) && (card->sd_bus_speed < ARRAY_SIZE(uhs_speeds))) uhs_bus_speed_mode = uhs_speeds[card->sd_bus_speed]; if (mmc_host_is_spi(card->host)) { pr_info("%s: new %s%s%s card on SPI\n", mmc_hostname(card->host), mmc_card_highspeed(card) ? "high speed " : "", mmc_card_ddr_mode(card) ? "DDR " : "", type); } else { pr_info("%s: new %s%s%s%s%s%s card at address %04x\n", mmc_hostname(card->host), mmc_card_uhs(card) ? "ultra high speed " : (mmc_card_highspeed(card) ? "high speed " : ""), (mmc_card_hs400(card) ? "HS400 " : ""), (mmc_card_hs200(card) ? "HS200 " : ""), mmc_card_ddr_mode(card) ? "DDR " : "", uhs_bus_speed_mode, type, card->rca); } #ifdef CONFIG_DEBUG_FS mmc_add_card_debugfs(card); #endif mmc_init_context_info(card->host); ret = pm_runtime_set_active(&card->dev); if (ret) pr_err("%s: %s: failed setting runtime active: ret: %d\n", mmc_hostname(card->host), __func__, ret); else if (!mmc_card_sdio(card) && mmc_use_core_runtime_pm(card->host)) pm_runtime_enable(&card->dev); if (mmc_card_sdio(card)) { ret = device_init_wakeup(&card->dev, true); if (ret) pr_err("%s: %s: failed to init wakeup: %d\n", mmc_hostname(card->host), __func__, ret); } ret = device_add(&card->dev); if (ret) return ret; device_enable_async_suspend(&card->dev); if (mmc_use_core_runtime_pm(card->host) && !mmc_card_sdio(card)) { card->rpm_attrib.show = show_rpm_delay; card->rpm_attrib.store = store_rpm_delay; sysfs_attr_init(&card->rpm_attrib.attr); card->rpm_attrib.attr.name = "runtime_pm_timeout"; card->rpm_attrib.attr.mode = S_IRUGO | S_IWUSR; ret = device_create_file(&card->dev, &card->rpm_attrib); if (ret) pr_err("%s: %s: creating runtime pm sysfs entry: failed: %d\n", mmc_hostname(card->host), __func__, ret); /* Default timeout is 10 seconds */ card->idle_timeout = RUNTIME_SUSPEND_DELAY_MS; /* ZTEMT: Adjust sdcard suspend/resume timeout */ if (card->type == MMC_TYPE_SD) { card->idle_timeout = 10000000; } } mmc_card_set_present(card); return 0; } /* * Unregister a new MMC card with the driver model, and * (eventually) free it. */ void mmc_remove_card(struct mmc_card *card) { #ifdef CONFIG_DEBUG_FS mmc_remove_card_debugfs(card); #endif if (mmc_card_present(card)) { if (mmc_host_is_spi(card->host)) { pr_info("%s: SPI card removed\n", mmc_hostname(card->host)); } else { pr_info("%s: card %04x removed\n", mmc_hostname(card->host), card->rca); } device_del(&card->dev); } kfree(card->wr_pack_stats.packing_events); kfree(card->cached_ext_csd); put_device(&card->dev); }
gpl-2.0
mattstock/binutils-bexkat1
gdb/vax-tdep.c
3
14987
/* Target-dependent code for the VAX. Copyright (C) 1986-2021 Free Software Foundation, Inc. This file is part of GDB. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "defs.h" #include "arch-utils.h" #include "dis-asm.h" #include "frame.h" #include "frame-base.h" #include "frame-unwind.h" #include "gdbcore.h" #include "gdbtypes.h" #include "osabi.h" #include "regcache.h" #include "regset.h" #include "trad-frame.h" #include "value.h" #include "vax-tdep.h" /* Return the name of register REGNUM. */ static const char * vax_register_name (struct gdbarch *gdbarch, int regnum) { static const char *register_names[] = { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "ap", "fp", "sp", "pc", "ps", }; if (regnum >= 0 && regnum < ARRAY_SIZE (register_names)) return register_names[regnum]; return NULL; } /* Return the GDB type object for the "standard" data type of data in register REGNUM. */ static struct type * vax_register_type (struct gdbarch *gdbarch, int regnum) { return builtin_type (gdbarch)->builtin_int; } /* Core file support. */ /* Supply register REGNUM from the buffer specified by GREGS and LEN in the general-purpose register set REGSET to register cache REGCACHE. If REGNUM is -1, do this for all registers in REGSET. */ static void vax_supply_gregset (const struct regset *regset, struct regcache *regcache, int regnum, const void *gregs, size_t len) { const gdb_byte *regs = (const gdb_byte *) gregs; int i; for (i = 0; i < VAX_NUM_REGS; i++) { if (regnum == i || regnum == -1) regcache->raw_supply (i, regs + i * 4); } } /* VAX register set. */ static const struct regset vax_gregset = { NULL, vax_supply_gregset }; /* Iterate over core file register note sections. */ static void vax_iterate_over_regset_sections (struct gdbarch *gdbarch, iterate_over_regset_sections_cb *cb, void *cb_data, const struct regcache *regcache) { cb (".reg", VAX_NUM_REGS * 4, VAX_NUM_REGS * 4, &vax_gregset, NULL, cb_data); } /* The VAX UNIX calling convention uses R1 to pass a structure return value address instead of passing it as a first (hidden) argument as the VMS calling convention suggests. */ static CORE_ADDR vax_store_arguments (struct regcache *regcache, int nargs, struct value **args, CORE_ADDR sp) { struct gdbarch *gdbarch = regcache->arch (); enum bfd_endian byte_order = gdbarch_byte_order (gdbarch); gdb_byte buf[4]; int count = 0; int i; /* We create an argument list on the stack, and make the argument pointer to it. */ /* Push arguments in reverse order. */ for (i = nargs - 1; i >= 0; i--) { int len = TYPE_LENGTH (value_enclosing_type (args[i])); sp -= (len + 3) & ~3; count += (len + 3) / 4; write_memory (sp, value_contents_all (args[i]), len); } /* Push argument count. */ sp -= 4; store_unsigned_integer (buf, 4, byte_order, count); write_memory (sp, buf, 4); /* Update the argument pointer. */ store_unsigned_integer (buf, 4, byte_order, sp); regcache->cooked_write (VAX_AP_REGNUM, buf); return sp; } static CORE_ADDR vax_push_dummy_call (struct gdbarch *gdbarch, struct value *function, struct regcache *regcache, CORE_ADDR bp_addr, int nargs, struct value **args, CORE_ADDR sp, function_call_return_method return_method, CORE_ADDR struct_addr) { enum bfd_endian byte_order = gdbarch_byte_order (gdbarch); CORE_ADDR fp = sp; gdb_byte buf[4]; /* Set up the function arguments. */ sp = vax_store_arguments (regcache, nargs, args, sp); /* Store return value address. */ if (return_method == return_method_struct) regcache_cooked_write_unsigned (regcache, VAX_R1_REGNUM, struct_addr); /* Store return address in the PC slot. */ sp -= 4; store_unsigned_integer (buf, 4, byte_order, bp_addr); write_memory (sp, buf, 4); /* Store the (fake) frame pointer in the FP slot. */ sp -= 4; store_unsigned_integer (buf, 4, byte_order, fp); write_memory (sp, buf, 4); /* Skip the AP slot. */ sp -= 4; /* Store register save mask and control bits. */ sp -= 4; store_unsigned_integer (buf, 4, byte_order, 0); write_memory (sp, buf, 4); /* Store condition handler. */ sp -= 4; store_unsigned_integer (buf, 4, byte_order, 0); write_memory (sp, buf, 4); /* Update the stack pointer and frame pointer. */ store_unsigned_integer (buf, 4, byte_order, sp); regcache->cooked_write (VAX_SP_REGNUM, buf); regcache->cooked_write (VAX_FP_REGNUM, buf); /* Return the saved (fake) frame pointer. */ return fp; } static struct frame_id vax_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame) { CORE_ADDR fp; fp = get_frame_register_unsigned (this_frame, VAX_FP_REGNUM); return frame_id_build (fp, get_frame_pc (this_frame)); } static enum return_value_convention vax_return_value (struct gdbarch *gdbarch, struct value *function, struct type *type, struct regcache *regcache, gdb_byte *readbuf, const gdb_byte *writebuf) { int len = TYPE_LENGTH (type); gdb_byte buf[8]; if (type->code () == TYPE_CODE_STRUCT || type->code () == TYPE_CODE_UNION || type->code () == TYPE_CODE_ARRAY) { /* The default on VAX is to return structures in static memory. Consequently a function must return the address where we can find the return value. */ if (readbuf) { ULONGEST addr; regcache_raw_read_unsigned (regcache, VAX_R0_REGNUM, &addr); read_memory (addr, readbuf, len); } return RETURN_VALUE_ABI_RETURNS_ADDRESS; } if (readbuf) { /* Read the contents of R0 and (if necessary) R1. */ regcache->cooked_read (VAX_R0_REGNUM, buf); if (len > 4) regcache->cooked_read (VAX_R1_REGNUM, buf + 4); memcpy (readbuf, buf, len); } if (writebuf) { /* Read the contents to R0 and (if necessary) R1. */ memcpy (buf, writebuf, len); regcache->cooked_write (VAX_R0_REGNUM, buf); if (len > 4) regcache->cooked_write (VAX_R1_REGNUM, buf + 4); } return RETURN_VALUE_REGISTER_CONVENTION; } /* Use the program counter to determine the contents and size of a breakpoint instruction. Return a pointer to a string of bytes that encode a breakpoint instruction, store the length of the string in *LEN and optionally adjust *PC to point to the correct memory location for inserting the breakpoint. */ constexpr gdb_byte vax_break_insn[] = { 3 }; typedef BP_MANIPULATION (vax_break_insn) vax_breakpoint; /* Advance PC across any function entry prologue instructions to reach some "real" code. */ static CORE_ADDR vax_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc) { enum bfd_endian byte_order = gdbarch_byte_order (gdbarch); gdb_byte op = read_memory_unsigned_integer (pc, 1, byte_order); if (op == 0x11) pc += 2; /* skip brb */ if (op == 0x31) pc += 3; /* skip brw */ if (op == 0xC2 && read_memory_unsigned_integer (pc + 2, 1, byte_order) == 0x5E) pc += 3; /* skip subl2 */ if (op == 0x9E && read_memory_unsigned_integer (pc + 1, 1, byte_order) == 0xAE && read_memory_unsigned_integer (pc + 3, 1, byte_order) == 0x5E) pc += 4; /* skip movab */ if (op == 0x9E && read_memory_unsigned_integer (pc + 1, 1, byte_order) == 0xCE && read_memory_unsigned_integer (pc + 4, 1, byte_order) == 0x5E) pc += 5; /* skip movab */ if (op == 0x9E && read_memory_unsigned_integer (pc + 1, 1, byte_order) == 0xEE && read_memory_unsigned_integer (pc + 6, 1, byte_order) == 0x5E) pc += 7; /* skip movab */ return pc; } /* Unwinding the stack is relatively easy since the VAX has a dedicated frame pointer, and frames are set up automatically as the result of a function call. Most of the relevant information can be inferred from the documentation of the Procedure Call Instructions in the VAX MACRO and Instruction Set Reference Manual. */ struct vax_frame_cache { /* Base address. */ CORE_ADDR base; /* Table of saved registers. */ trad_frame_saved_reg *saved_regs; }; static struct vax_frame_cache * vax_frame_cache (struct frame_info *this_frame, void **this_cache) { struct vax_frame_cache *cache; CORE_ADDR addr; ULONGEST mask; int regnum; if (*this_cache) return (struct vax_frame_cache *) *this_cache; /* Allocate a new cache. */ cache = FRAME_OBSTACK_ZALLOC (struct vax_frame_cache); cache->saved_regs = trad_frame_alloc_saved_regs (this_frame); /* The frame pointer is used as the base for the frame. */ cache->base = get_frame_register_unsigned (this_frame, VAX_FP_REGNUM); if (cache->base == 0) return cache; /* The register save mask and control bits determine the layout of the stack frame. */ mask = get_frame_memory_unsigned (this_frame, cache->base + 4, 4) >> 16; /* These are always saved. */ cache->saved_regs[VAX_PC_REGNUM].set_addr (cache->base + 16); cache->saved_regs[VAX_FP_REGNUM].set_addr (cache->base + 12); cache->saved_regs[VAX_AP_REGNUM].set_addr (cache->base + 8); cache->saved_regs[VAX_PS_REGNUM].set_addr (cache->base + 4); /* Scan the register save mask and record the location of the saved registers. */ addr = cache->base + 20; for (regnum = 0; regnum < VAX_AP_REGNUM; regnum++) { if (mask & (1 << regnum)) { cache->saved_regs[regnum].set_addr (addr); addr += 4; } } /* The CALLS/CALLG flag determines whether this frame has a General Argument List or a Stack Argument List. */ if (mask & (1 << 13)) { ULONGEST numarg; /* This is a procedure with Stack Argument List. Adjust the stack address for the arguments that were pushed onto the stack. The return instruction will automatically pop the arguments from the stack. */ numarg = get_frame_memory_unsigned (this_frame, addr, 1); addr += 4 + numarg * 4; } /* Bits 1:0 of the stack pointer were saved in the control bits. */ cache->saved_regs[VAX_SP_REGNUM].set_value (addr + (mask >> 14)); return cache; } static void vax_frame_this_id (struct frame_info *this_frame, void **this_cache, struct frame_id *this_id) { struct vax_frame_cache *cache = vax_frame_cache (this_frame, this_cache); /* This marks the outermost frame. */ if (cache->base == 0) return; (*this_id) = frame_id_build (cache->base, get_frame_func (this_frame)); } static struct value * vax_frame_prev_register (struct frame_info *this_frame, void **this_cache, int regnum) { struct vax_frame_cache *cache = vax_frame_cache (this_frame, this_cache); return trad_frame_get_prev_register (this_frame, cache->saved_regs, regnum); } static const struct frame_unwind vax_frame_unwind = { NORMAL_FRAME, default_frame_unwind_stop_reason, vax_frame_this_id, vax_frame_prev_register, NULL, default_frame_sniffer }; static CORE_ADDR vax_frame_base_address (struct frame_info *this_frame, void **this_cache) { struct vax_frame_cache *cache = vax_frame_cache (this_frame, this_cache); return cache->base; } static CORE_ADDR vax_frame_args_address (struct frame_info *this_frame, void **this_cache) { return get_frame_register_unsigned (this_frame, VAX_AP_REGNUM); } static const struct frame_base vax_frame_base = { &vax_frame_unwind, vax_frame_base_address, vax_frame_base_address, vax_frame_args_address }; /* Return number of arguments for FRAME. */ static int vax_frame_num_args (struct frame_info *frame) { CORE_ADDR args; /* Assume that the argument pointer for the outermost frame is hosed, as is the case on NetBSD/vax ELF. */ if (get_frame_base_address (frame) == 0) return 0; args = get_frame_register_unsigned (frame, VAX_AP_REGNUM); return get_frame_memory_unsigned (frame, args, 1); } /* Initialize the current architecture based on INFO. If possible, re-use an architecture from ARCHES, which is a list of architectures already created during this debugging session. Called e.g. at program startup, when reading a core file, and when reading a binary file. */ static struct gdbarch * vax_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches) { struct gdbarch *gdbarch; /* If there is already a candidate, use it. */ arches = gdbarch_list_lookup_by_info (arches, &info); if (arches != NULL) return arches->gdbarch; gdbarch = gdbarch_alloc (&info, NULL); set_gdbarch_float_format (gdbarch, floatformats_vax_f); set_gdbarch_double_format (gdbarch, floatformats_vax_d); set_gdbarch_long_double_format (gdbarch, floatformats_vax_d); set_gdbarch_long_double_bit (gdbarch, 64); /* Register info */ set_gdbarch_num_regs (gdbarch, VAX_NUM_REGS); set_gdbarch_register_name (gdbarch, vax_register_name); set_gdbarch_register_type (gdbarch, vax_register_type); set_gdbarch_sp_regnum (gdbarch, VAX_SP_REGNUM); set_gdbarch_pc_regnum (gdbarch, VAX_PC_REGNUM); set_gdbarch_ps_regnum (gdbarch, VAX_PS_REGNUM); set_gdbarch_iterate_over_regset_sections (gdbarch, vax_iterate_over_regset_sections); /* Frame and stack info */ set_gdbarch_skip_prologue (gdbarch, vax_skip_prologue); set_gdbarch_frame_num_args (gdbarch, vax_frame_num_args); set_gdbarch_frame_args_skip (gdbarch, 4); /* Stack grows downward. */ set_gdbarch_inner_than (gdbarch, core_addr_lessthan); /* Return value info */ set_gdbarch_return_value (gdbarch, vax_return_value); /* Call dummy code. */ set_gdbarch_push_dummy_call (gdbarch, vax_push_dummy_call); set_gdbarch_dummy_id (gdbarch, vax_dummy_id); /* Breakpoint info */ set_gdbarch_breakpoint_kind_from_pc (gdbarch, vax_breakpoint::kind_from_pc); set_gdbarch_sw_breakpoint_from_kind (gdbarch, vax_breakpoint::bp_from_kind); /* Misc info */ set_gdbarch_deprecated_function_start_offset (gdbarch, 2); set_gdbarch_believe_pcc_promotion (gdbarch, 1); frame_base_set_default (gdbarch, &vax_frame_base); /* Hook in ABI-specific overrides, if they have been registered. */ gdbarch_init_osabi (info, gdbarch); frame_unwind_append_unwinder (gdbarch, &vax_frame_unwind); return (gdbarch); } void _initialize_vax_tdep (); void _initialize_vax_tdep () { gdbarch_register (bfd_arch_vax, vax_gdbarch_init, NULL); }
gpl-2.0
me-oss/me-linux
drivers/net/pcmcia/axnet_cs.c
3
56696
/*====================================================================== A PCMCIA ethernet driver for Asix AX88190-based cards The Asix AX88190 is a NS8390-derived chipset with a few nasty idiosyncracies that make it very inconvenient to support with a standard 8390 driver. This driver is based on pcnet_cs, with the tweaked 8390 code grafted on the end. Much of what I did was to clean up and update a similar driver supplied by Asix, which was adapted by William Lee, william@asix.com.tw. Copyright (C) 2001 David A. Hinds -- dahinds@users.sourceforge.net axnet_cs.c 1.28 2002/06/29 06:27:37 The network driver code is based on Donald Becker's NE2000 code: Written 1992,1993 by Donald Becker. Copyright 1993 United States Government as represented by the Director, National Security Agency. This software may be used and distributed according to the terms of the GNU General Public License, incorporated herein by reference. Donald Becker may be reached at becker@scyld.com ======================================================================*/ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/ptrace.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/timer.h> #include <linux/delay.h> #include <linux/spinlock.h> #include <linux/ethtool.h> #include <linux/netdevice.h> #include <linux/crc32.h> #include "../8390.h" #include <pcmcia/cs_types.h> #include <pcmcia/cs.h> #include <pcmcia/cistpl.h> #include <pcmcia/ciscode.h> #include <pcmcia/ds.h> #include <pcmcia/cisreg.h> #include <asm/io.h> #include <asm/system.h> #include <asm/byteorder.h> #include <asm/uaccess.h> #define AXNET_CMD 0x00 #define AXNET_DATAPORT 0x10 /* NatSemi-defined port window offset. */ #define AXNET_RESET 0x1f /* Issue a read to reset, a write to clear. */ #define AXNET_MII_EEP 0x14 /* Offset of MII access port */ #define AXNET_TEST 0x15 /* Offset of TEST Register port */ #define AXNET_GPIO 0x17 /* Offset of General Purpose Register Port */ #define AXNET_START_PG 0x40 /* First page of TX buffer */ #define AXNET_STOP_PG 0x80 /* Last page +1 of RX ring */ #define AXNET_RDC_TIMEOUT 0x02 /* Max wait in jiffies for Tx RDC */ #define IS_AX88190 0x0001 #define IS_AX88790 0x0002 /*====================================================================*/ /* Module parameters */ MODULE_AUTHOR("David Hinds <dahinds@users.sourceforge.net>"); MODULE_DESCRIPTION("Asix AX88190 PCMCIA ethernet driver"); MODULE_LICENSE("GPL"); #ifdef PCMCIA_DEBUG #define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0) INT_MODULE_PARM(pc_debug, PCMCIA_DEBUG); #define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args) static char *version = "axnet_cs.c 1.28 2002/06/29 06:27:37 (David Hinds)"; #else #define DEBUG(n, args...) #endif /*====================================================================*/ static int axnet_config(struct pcmcia_device *link); static void axnet_release(struct pcmcia_device *link); static int axnet_open(struct net_device *dev); static int axnet_close(struct net_device *dev); static int axnet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); static struct ethtool_ops netdev_ethtool_ops; static irqreturn_t ei_irq_wrapper(int irq, void *dev_id, struct pt_regs *regs); static void ei_watchdog(u_long arg); static void axnet_reset_8390(struct net_device *dev); static int mdio_read(kio_addr_t addr, int phy_id, int loc); static void mdio_write(kio_addr_t addr, int phy_id, int loc, int value); static void get_8390_hdr(struct net_device *, struct e8390_pkt_hdr *, int); static void block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset); static void block_output(struct net_device *dev, int count, const u_char *buf, const int start_page); static void axnet_detach(struct pcmcia_device *p_dev); static void axdev_setup(struct net_device *dev); static void AX88190_init(struct net_device *dev, int startp); static int ax_open(struct net_device *dev); static int ax_close(struct net_device *dev); static irqreturn_t ax_interrupt(int irq, void *dev_id, struct pt_regs *regs); /*====================================================================*/ typedef struct axnet_dev_t { struct pcmcia_device *p_dev; dev_node_t node; caddr_t base; struct timer_list watchdog; int stale, fast_poll; u_short link_status; u_char duplex_flag; int phy_id; int flags; } axnet_dev_t; static inline axnet_dev_t *PRIV(struct net_device *dev) { void *p = (char *)netdev_priv(dev) + sizeof(struct ei_device); return p; } /*====================================================================== axnet_attach() creates an "instance" of the driver, allocating local data structures for one device. The device is registered with Card Services. ======================================================================*/ static int axnet_probe(struct pcmcia_device *link) { axnet_dev_t *info; struct net_device *dev; DEBUG(0, "axnet_attach()\n"); dev = alloc_netdev(sizeof(struct ei_device) + sizeof(axnet_dev_t), "eth%d", axdev_setup); if (!dev) return -ENOMEM; info = PRIV(dev); info->p_dev = link; link->priv = dev; link->irq.Attributes = IRQ_TYPE_EXCLUSIVE; link->irq.IRQInfo1 = IRQ_LEVEL_ID; link->conf.Attributes = CONF_ENABLE_IRQ; link->conf.IntType = INT_MEMORY_AND_IO; dev->open = &axnet_open; dev->stop = &axnet_close; dev->do_ioctl = &axnet_ioctl; SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops); return axnet_config(link); } /* axnet_attach */ /*====================================================================== This deletes a driver "instance". The device is de-registered with Card Services. If it has been released, all local data structures are freed. Otherwise, the structures will be freed when the device is released. ======================================================================*/ static void axnet_detach(struct pcmcia_device *link) { struct net_device *dev = link->priv; DEBUG(0, "axnet_detach(0x%p)\n", link); if (link->dev_node) unregister_netdev(dev); axnet_release(link); free_netdev(dev); } /* axnet_detach */ /*====================================================================== This probes for a card's hardware address by reading the PROM. ======================================================================*/ static int get_prom(struct pcmcia_device *link) { struct net_device *dev = link->priv; kio_addr_t ioaddr = dev->base_addr; int i, j; /* This is based on drivers/net/ne.c */ struct { u_char value, offset; } program_seq[] = { {E8390_NODMA+E8390_PAGE0+E8390_STOP, E8390_CMD}, /* Select page 0*/ {0x01, EN0_DCFG}, /* Set word-wide access. */ {0x00, EN0_RCNTLO}, /* Clear the count regs. */ {0x00, EN0_RCNTHI}, {0x00, EN0_IMR}, /* Mask completion irq. */ {0xFF, EN0_ISR}, {E8390_RXOFF|0x40, EN0_RXCR}, /* 0x60 Set to monitor */ {E8390_TXOFF, EN0_TXCR}, /* 0x02 and loopback mode. */ {0x10, EN0_RCNTLO}, {0x00, EN0_RCNTHI}, {0x00, EN0_RSARLO}, /* DMA starting at 0x0400. */ {0x04, EN0_RSARHI}, {E8390_RREAD+E8390_START, E8390_CMD}, }; /* Not much of a test, but the alternatives are messy */ if (link->conf.ConfigBase != 0x03c0) return 0; axnet_reset_8390(dev); mdelay(10); for (i = 0; i < sizeof(program_seq)/sizeof(program_seq[0]); i++) outb_p(program_seq[i].value, ioaddr + program_seq[i].offset); for (i = 0; i < 6; i += 2) { j = inw(ioaddr + AXNET_DATAPORT); dev->dev_addr[i] = j & 0xff; dev->dev_addr[i+1] = j >> 8; } return 1; } /* get_prom */ /*====================================================================== axnet_config() is scheduled to run after a CARD_INSERTION event is received, to configure the PCMCIA socket, and to make the ethernet device available to the system. ======================================================================*/ #define CS_CHECK(fn, ret) \ do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0) static int try_io_port(struct pcmcia_device *link) { int j, ret; if (link->io.NumPorts1 == 32) { link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; if (link->io.NumPorts2 > 0) { /* for master/slave multifunction cards */ link->io.Attributes2 = IO_DATA_PATH_WIDTH_8; link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED; } } else { /* This should be two 16-port windows */ link->io.Attributes1 = IO_DATA_PATH_WIDTH_8; link->io.Attributes2 = IO_DATA_PATH_WIDTH_16; } if (link->io.BasePort1 == 0) { link->io.IOAddrLines = 16; for (j = 0; j < 0x400; j += 0x20) { link->io.BasePort1 = j ^ 0x300; link->io.BasePort2 = (j ^ 0x300) + 0x10; ret = pcmcia_request_io(link, &link->io); if (ret == CS_SUCCESS) return ret; } return ret; } else { return pcmcia_request_io(link, &link->io); } } static int axnet_config(struct pcmcia_device *link) { struct net_device *dev = link->priv; axnet_dev_t *info = PRIV(dev); tuple_t tuple; cisparse_t parse; int i, j, last_ret, last_fn; u_short buf[64]; DEBUG(0, "axnet_config(0x%p)\n", link); tuple.Attributes = 0; tuple.TupleData = (cisdata_t *)buf; tuple.TupleDataMax = sizeof(buf); tuple.TupleOffset = 0; tuple.DesiredTuple = CISTPL_CONFIG; CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple)); CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple)); CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse)); link->conf.ConfigBase = parse.config.base; /* don't trust the CIS on this; Linksys got it wrong */ link->conf.Present = 0x63; tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY; tuple.Attributes = 0; CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple)); while (last_ret == CS_SUCCESS) { cistpl_cftable_entry_t *cfg = &(parse.cftable_entry); cistpl_io_t *io = &(parse.cftable_entry.io); if (pcmcia_get_tuple_data(link, &tuple) != 0 || pcmcia_parse_tuple(link, &tuple, &parse) != 0 || cfg->index == 0 || cfg->io.nwin == 0) goto next_entry; link->conf.ConfigIndex = 0x05; /* For multifunction cards, by convention, we configure the network function with window 0, and serial with window 1 */ if (io->nwin > 1) { i = (io->win[1].len > io->win[0].len); link->io.BasePort2 = io->win[1-i].base; link->io.NumPorts2 = io->win[1-i].len; } else { i = link->io.NumPorts2 = 0; } link->io.BasePort1 = io->win[i].base; link->io.NumPorts1 = io->win[i].len; link->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK; if (link->io.NumPorts1 + link->io.NumPorts2 >= 32) { last_ret = try_io_port(link); if (last_ret == CS_SUCCESS) break; } next_entry: last_ret = pcmcia_get_next_tuple(link, &tuple); } if (last_ret != CS_SUCCESS) { cs_error(link, RequestIO, last_ret); goto failed; } CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq)); if (link->io.NumPorts2 == 8) { link->conf.Attributes |= CONF_ENABLE_SPKR; link->conf.Status = CCSR_AUDIO_ENA; } CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf)); dev->irq = link->irq.AssignedIRQ; dev->base_addr = link->io.BasePort1; if (!get_prom(link)) { printk(KERN_NOTICE "axnet_cs: this is not an AX88190 card!\n"); printk(KERN_NOTICE "axnet_cs: use pcnet_cs instead.\n"); goto failed; } ei_status.name = "AX88190"; ei_status.word16 = 1; ei_status.tx_start_page = AXNET_START_PG; ei_status.rx_start_page = AXNET_START_PG + TX_PAGES; ei_status.stop_page = AXNET_STOP_PG; ei_status.reset_8390 = &axnet_reset_8390; ei_status.get_8390_hdr = &get_8390_hdr; ei_status.block_input = &block_input; ei_status.block_output = &block_output; if (inb(dev->base_addr + AXNET_TEST) != 0) info->flags |= IS_AX88790; else info->flags |= IS_AX88190; if (info->flags & IS_AX88790) outb(0x10, dev->base_addr + AXNET_GPIO); /* select Internal PHY */ for (i = 0; i < 32; i++) { j = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 1); if ((j != 0) && (j != 0xffff)) break; } /* Maybe PHY is in power down mode. (PPD_SET = 1) Bit 2 of CCSR is active low. */ if (i == 32) { conf_reg_t reg = { 0, CS_WRITE, CISREG_CCSR, 0x04 }; pcmcia_access_configuration_register(link, &reg); for (i = 0; i < 32; i++) { j = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 1); if ((j != 0) && (j != 0xffff)) break; } } info->phy_id = (i < 32) ? i : -1; link->dev_node = &info->node; SET_NETDEV_DEV(dev, &handle_to_dev(link)); if (register_netdev(dev) != 0) { printk(KERN_NOTICE "axnet_cs: register_netdev() failed\n"); link->dev_node = NULL; goto failed; } strcpy(info->node.dev_name, dev->name); printk(KERN_INFO "%s: Asix AX88%d90: io %#3lx, irq %d, hw_addr ", dev->name, ((info->flags & IS_AX88790) ? 7 : 1), dev->base_addr, dev->irq); for (i = 0; i < 6; i++) printk("%02X%s", dev->dev_addr[i], ((i<5) ? ":" : "\n")); if (info->phy_id != -1) { DEBUG(0, " MII transceiver at index %d, status %x.\n", info->phy_id, j); } else { printk(KERN_NOTICE " No MII transceivers found!\n"); } return 0; cs_failed: cs_error(link, last_fn, last_ret); failed: axnet_release(link); return -ENODEV; } /* axnet_config */ /*====================================================================== After a card is removed, axnet_release() will unregister the net device, and release the PCMCIA configuration. If the device is still open, this will be postponed until it is closed. ======================================================================*/ static void axnet_release(struct pcmcia_device *link) { pcmcia_disable_device(link); } static int axnet_suspend(struct pcmcia_device *link) { struct net_device *dev = link->priv; if (link->open) netif_device_detach(dev); return 0; } static int axnet_resume(struct pcmcia_device *link) { struct net_device *dev = link->priv; if (link->open) { axnet_reset_8390(dev); AX88190_init(dev, 1); netif_device_attach(dev); } return 0; } /*====================================================================== MII interface support ======================================================================*/ #define MDIO_SHIFT_CLK 0x01 #define MDIO_DATA_WRITE0 0x00 #define MDIO_DATA_WRITE1 0x08 #define MDIO_DATA_READ 0x04 #define MDIO_MASK 0x0f #define MDIO_ENB_IN 0x02 static void mdio_sync(kio_addr_t addr) { int bits; for (bits = 0; bits < 32; bits++) { outb_p(MDIO_DATA_WRITE1, addr); outb_p(MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, addr); } } static int mdio_read(kio_addr_t addr, int phy_id, int loc) { u_int cmd = (0xf6<<10)|(phy_id<<5)|loc; int i, retval = 0; mdio_sync(addr); for (i = 14; i >= 0; i--) { int dat = (cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0; outb_p(dat, addr); outb_p(dat | MDIO_SHIFT_CLK, addr); } for (i = 19; i > 0; i--) { outb_p(MDIO_ENB_IN, addr); retval = (retval << 1) | ((inb_p(addr) & MDIO_DATA_READ) != 0); outb_p(MDIO_ENB_IN | MDIO_SHIFT_CLK, addr); } return (retval>>1) & 0xffff; } static void mdio_write(kio_addr_t addr, int phy_id, int loc, int value) { u_int cmd = (0x05<<28)|(phy_id<<23)|(loc<<18)|(1<<17)|value; int i; mdio_sync(addr); for (i = 31; i >= 0; i--) { int dat = (cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0; outb_p(dat, addr); outb_p(dat | MDIO_SHIFT_CLK, addr); } for (i = 1; i >= 0; i--) { outb_p(MDIO_ENB_IN, addr); outb_p(MDIO_ENB_IN | MDIO_SHIFT_CLK, addr); } } /*====================================================================*/ static int axnet_open(struct net_device *dev) { axnet_dev_t *info = PRIV(dev); struct pcmcia_device *link = info->p_dev; DEBUG(2, "axnet_open('%s')\n", dev->name); if (!pcmcia_dev_present(link)) return -ENODEV; link->open++; request_irq(dev->irq, ei_irq_wrapper, SA_SHIRQ, "axnet_cs", dev); info->link_status = 0x00; init_timer(&info->watchdog); info->watchdog.function = &ei_watchdog; info->watchdog.data = (u_long)dev; info->watchdog.expires = jiffies + HZ; add_timer(&info->watchdog); return ax_open(dev); } /* axnet_open */ /*====================================================================*/ static int axnet_close(struct net_device *dev) { axnet_dev_t *info = PRIV(dev); struct pcmcia_device *link = info->p_dev; DEBUG(2, "axnet_close('%s')\n", dev->name); ax_close(dev); free_irq(dev->irq, dev); link->open--; netif_stop_queue(dev); del_timer_sync(&info->watchdog); return 0; } /* axnet_close */ /*====================================================================== Hard reset the card. This used to pause for the same period that a 8390 reset command required, but that shouldn't be necessary. ======================================================================*/ static void axnet_reset_8390(struct net_device *dev) { kio_addr_t nic_base = dev->base_addr; int i; ei_status.txing = ei_status.dmaing = 0; outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, nic_base + E8390_CMD); outb(inb(nic_base + AXNET_RESET), nic_base + AXNET_RESET); for (i = 0; i < 100; i++) { if ((inb_p(nic_base+EN0_ISR) & ENISR_RESET) != 0) break; udelay(100); } outb_p(ENISR_RESET, nic_base + EN0_ISR); /* Ack intr. */ if (i == 100) printk(KERN_ERR "%s: axnet_reset_8390() did not complete.\n", dev->name); } /* axnet_reset_8390 */ /*====================================================================*/ static irqreturn_t ei_irq_wrapper(int irq, void *dev_id, struct pt_regs *regs) { struct net_device *dev = dev_id; PRIV(dev)->stale = 0; return ax_interrupt(irq, dev_id, regs); } static void ei_watchdog(u_long arg) { struct net_device *dev = (struct net_device *)(arg); axnet_dev_t *info = PRIV(dev); kio_addr_t nic_base = dev->base_addr; kio_addr_t mii_addr = nic_base + AXNET_MII_EEP; u_short link; if (!netif_device_present(dev)) goto reschedule; /* Check for pending interrupt with expired latency timer: with this, we can limp along even if the interrupt is blocked */ if (info->stale++ && (inb_p(nic_base + EN0_ISR) & ENISR_ALL)) { if (!info->fast_poll) printk(KERN_INFO "%s: interrupt(s) dropped!\n", dev->name); ei_irq_wrapper(dev->irq, dev, NULL); info->fast_poll = HZ; } if (info->fast_poll) { info->fast_poll--; info->watchdog.expires = jiffies + 1; add_timer(&info->watchdog); return; } if (info->phy_id < 0) goto reschedule; link = mdio_read(mii_addr, info->phy_id, 1); if (!link || (link == 0xffff)) { printk(KERN_INFO "%s: MII is missing!\n", dev->name); info->phy_id = -1; goto reschedule; } link &= 0x0004; if (link != info->link_status) { u_short p = mdio_read(mii_addr, info->phy_id, 5); printk(KERN_INFO "%s: %s link beat\n", dev->name, (link) ? "found" : "lost"); if (link) { info->duplex_flag = (p & 0x0140) ? 0x80 : 0x00; if (p) printk(KERN_INFO "%s: autonegotiation complete: " "%sbaseT-%cD selected\n", dev->name, ((p & 0x0180) ? "100" : "10"), ((p & 0x0140) ? 'F' : 'H')); else printk(KERN_INFO "%s: link partner did not autonegotiate\n", dev->name); AX88190_init(dev, 1); } info->link_status = link; } reschedule: info->watchdog.expires = jiffies + HZ; add_timer(&info->watchdog); } static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { strcpy(info->driver, "axnet_cs"); } static struct ethtool_ops netdev_ethtool_ops = { .get_drvinfo = netdev_get_drvinfo, }; /*====================================================================*/ static int axnet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { axnet_dev_t *info = PRIV(dev); u16 *data = (u16 *)&rq->ifr_ifru; kio_addr_t mii_addr = dev->base_addr + AXNET_MII_EEP; switch (cmd) { case SIOCGMIIPHY: data[0] = info->phy_id; case SIOCGMIIREG: /* Read MII PHY register. */ data[3] = mdio_read(mii_addr, data[0], data[1] & 0x1f); return 0; case SIOCSMIIREG: /* Write MII PHY register. */ if (!capable(CAP_NET_ADMIN)) return -EPERM; mdio_write(mii_addr, data[0], data[1] & 0x1f, data[2]); return 0; } return -EOPNOTSUPP; } /*====================================================================*/ static void get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) { kio_addr_t nic_base = dev->base_addr; outb_p(0, nic_base + EN0_RSARLO); /* On page boundary */ outb_p(ring_page, nic_base + EN0_RSARHI); outb_p(E8390_RREAD+E8390_START, nic_base + AXNET_CMD); insw(nic_base + AXNET_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr)>>1); /* Fix for big endian systems */ hdr->count = le16_to_cpu(hdr->count); } /*====================================================================*/ static void block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset) { kio_addr_t nic_base = dev->base_addr; int xfer_count = count; char *buf = skb->data; #ifdef PCMCIA_DEBUG if ((ei_debug > 4) && (count != 4)) printk(KERN_DEBUG "%s: [bi=%d]\n", dev->name, count+4); #endif outb_p(ring_offset & 0xff, nic_base + EN0_RSARLO); outb_p(ring_offset >> 8, nic_base + EN0_RSARHI); outb_p(E8390_RREAD+E8390_START, nic_base + AXNET_CMD); insw(nic_base + AXNET_DATAPORT,buf,count>>1); if (count & 0x01) buf[count-1] = inb(nic_base + AXNET_DATAPORT), xfer_count++; } /*====================================================================*/ static void block_output(struct net_device *dev, int count, const u_char *buf, const int start_page) { kio_addr_t nic_base = dev->base_addr; #ifdef PCMCIA_DEBUG if (ei_debug > 4) printk(KERN_DEBUG "%s: [bo=%d]\n", dev->name, count); #endif /* Round the count up for word writes. Do we need to do this? What effect will an odd byte count have on the 8390? I should check someday. */ if (count & 0x01) count++; outb_p(0x00, nic_base + EN0_RSARLO); outb_p(start_page, nic_base + EN0_RSARHI); outb_p(E8390_RWRITE+E8390_START, nic_base + AXNET_CMD); outsw(nic_base + AXNET_DATAPORT, buf, count>>1); } static struct pcmcia_device_id axnet_ids[] = { PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x016c, 0x0081), PCMCIA_DEVICE_MANF_CARD(0x018a, 0x0301), PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0301), PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0303), PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0309), PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1106), PCMCIA_DEVICE_MANF_CARD(0x8a01, 0xc1ab), PCMCIA_DEVICE_PROD_ID12("AmbiCom,Inc.", "Fast Ethernet PC Card(AMB8110)", 0x49b020a7, 0x119cc9fc), PCMCIA_DEVICE_PROD_ID124("Fast Ethernet", "16-bit PC Card", "AX88190", 0xb4be14e3, 0x9a12eb6a, 0xab9be5ef), PCMCIA_DEVICE_PROD_ID12("ASIX", "AX88190", 0x0959823b, 0xab9be5ef), PCMCIA_DEVICE_PROD_ID12("Billionton", "LNA-100B", 0x552ab682, 0xbc3b87e1), PCMCIA_DEVICE_PROD_ID12("CHEETAH ETHERCARD", "EN2228", 0x00fa7bc8, 0x00e990cc), PCMCIA_DEVICE_PROD_ID12("CNet", "CNF301", 0xbc477dde, 0x78c5f40b), PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega FEther PCC-TXD", 0x5261440f, 0x436768c5), PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega FEtherII PCC-TXD", 0x5261440f, 0x730df72e), PCMCIA_DEVICE_PROD_ID12("Dynalink", "L100C16", 0x55632fd5, 0x66bc2a90), PCMCIA_DEVICE_PROD_ID12("Linksys", "EtherFast 10/100 PC Card (PCMPC100 V3)", 0x0733cc81, 0x232019a8), PCMCIA_DEVICE_PROD_ID12("MELCO", "LPC3-TX", 0x481e0094, 0xf91af609), PCMCIA_DEVICE_PROD_ID12("PCMCIA", "100BASE", 0x281f1c5d, 0x7c2add04), PCMCIA_DEVICE_PROD_ID12("PCMCIA", "FastEtherCard", 0x281f1c5d, 0x7ef26116), PCMCIA_DEVICE_PROD_ID12("PCMCIA", "FEP501", 0x281f1c5d, 0x2e272058), PCMCIA_DEVICE_PROD_ID14("Network Everywhere", "AX88190", 0x820a67b6, 0xab9be5ef), /* this is not specific enough */ /* PCMCIA_DEVICE_MANF_CARD(0x021b, 0x0202), */ PCMCIA_DEVICE_NULL, }; MODULE_DEVICE_TABLE(pcmcia, axnet_ids); static struct pcmcia_driver axnet_cs_driver = { .owner = THIS_MODULE, .drv = { .name = "axnet_cs", }, .probe = axnet_probe, .remove = axnet_detach, .id_table = axnet_ids, .suspend = axnet_suspend, .resume = axnet_resume, }; static int __init init_axnet_cs(void) { return pcmcia_register_driver(&axnet_cs_driver); } static void __exit exit_axnet_cs(void) { pcmcia_unregister_driver(&axnet_cs_driver); } module_init(init_axnet_cs); module_exit(exit_axnet_cs); /*====================================================================*/ /* 8390.c: A general NS8390 ethernet driver core for linux. */ /* Written 1992-94 by Donald Becker. Copyright 1993 United States Government as represented by the Director, National Security Agency. This software may be used and distributed according to the terms of the GNU General Public License, incorporated herein by reference. The author may be reached as becker@scyld.com, or C/O Scyld Computing Corporation 410 Severn Ave., Suite 210 Annapolis MD 21403 This is the chip-specific code for many 8390-based ethernet adaptors. This is not a complete driver, it must be combined with board-specific code such as ne.c, wd.c, 3c503.c, etc. Seeing how at least eight drivers use this code, (not counting the PCMCIA ones either) it is easy to break some card by what seems like a simple innocent change. Please contact me or Donald if you think you have found something that needs changing. -- PG Changelog: Paul Gortmaker : remove set_bit lock, other cleanups. Paul Gortmaker : add ei_get_8390_hdr() so we can pass skb's to ei_block_input() for eth_io_copy_and_sum(). Paul Gortmaker : exchange static int ei_pingpong for a #define, also add better Tx error handling. Paul Gortmaker : rewrite Rx overrun handling as per NS specs. Alexey Kuznetsov : use the 8390's six bit hash multicast filter. Paul Gortmaker : tweak ANK's above multicast changes a bit. Paul Gortmaker : update packet statistics for v2.1.x Alan Cox : support arbitary stupid port mappings on the 68K Macintosh. Support >16bit I/O spaces Paul Gortmaker : add kmod support for auto-loading of the 8390 module by all drivers that require it. Alan Cox : Spinlocking work, added 'BUG_83C690' Paul Gortmaker : Separate out Tx timeout code from Tx path. Sources: The National Semiconductor LAN Databook, and the 3Com 3c503 databook. */ static const char *version_8390 = "8390.c:v1.10cvs 9/23/94 Donald Becker (becker@scyld.com)\n"; #include <linux/bitops.h> #include <asm/irq.h> #include <linux/fcntl.h> #include <linux/in.h> #include <linux/interrupt.h> #include <linux/etherdevice.h> #define BUG_83C690 /* These are the operational function interfaces to board-specific routines. void reset_8390(struct net_device *dev) Resets the board associated with DEV, including a hardware reset of the 8390. This is only called when there is a transmit timeout, and it is always followed by 8390_init(). void block_output(struct net_device *dev, int count, const unsigned char *buf, int start_page) Write the COUNT bytes of BUF to the packet buffer at START_PAGE. The "page" value uses the 8390's 256-byte pages. void get_8390_hdr(struct net_device *dev, struct e8390_hdr *hdr, int ring_page) Read the 4 byte, page aligned 8390 header. *If* there is a subsequent read, it will be of the rest of the packet. void block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset) Read COUNT bytes from the packet buffer into the skb data area. Start reading from RING_OFFSET, the address as the 8390 sees it. This will always follow the read of the 8390 header. */ #define ei_reset_8390 (ei_local->reset_8390) #define ei_block_output (ei_local->block_output) #define ei_block_input (ei_local->block_input) #define ei_get_8390_hdr (ei_local->get_8390_hdr) /* use 0 for production, 1 for verification, >2 for debug */ #ifndef ei_debug int ei_debug = 1; #endif /* Index to functions. */ static void ei_tx_intr(struct net_device *dev); static void ei_tx_err(struct net_device *dev); static void ei_tx_timeout(struct net_device *dev); static void ei_receive(struct net_device *dev); static void ei_rx_overrun(struct net_device *dev); /* Routines generic to NS8390-based boards. */ static void NS8390_trigger_send(struct net_device *dev, unsigned int length, int start_page); static void set_multicast_list(struct net_device *dev); static void do_set_multicast_list(struct net_device *dev); /* * SMP and the 8390 setup. * * The 8390 isnt exactly designed to be multithreaded on RX/TX. There is * a page register that controls bank and packet buffer access. We guard * this with ei_local->page_lock. Nobody should assume or set the page other * than zero when the lock is not held. Lock holders must restore page 0 * before unlocking. Even pure readers must take the lock to protect in * page 0. * * To make life difficult the chip can also be very slow. We therefore can't * just use spinlocks. For the longer lockups we disable the irq the device * sits on and hold the lock. We must hold the lock because there is a dual * processor case other than interrupts (get stats/set multicast list in * parallel with each other and transmit). * * Note: in theory we can just disable the irq on the card _but_ there is * a latency on SMP irq delivery. So we can easily go "disable irq" "sync irqs" * enter lock, take the queued irq. So we waddle instead of flying. * * Finally by special arrangement for the purpose of being generally * annoying the transmit function is called bh atomic. That places * restrictions on the user context callers as disable_irq won't save * them. */ /** * ax_open - Open/initialize the board. * @dev: network device to initialize * * This routine goes all-out, setting everything * up anew at each open, even though many of these registers should only * need to be set once at boot. */ static int ax_open(struct net_device *dev) { unsigned long flags; struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); #ifdef HAVE_TX_TIMEOUT /* The card I/O part of the driver (e.g. 3c503) can hook a Tx timeout wrapper that does e.g. media check & then calls ei_tx_timeout. */ if (dev->tx_timeout == NULL) dev->tx_timeout = ei_tx_timeout; if (dev->watchdog_timeo <= 0) dev->watchdog_timeo = TX_TIMEOUT; #endif /* * Grab the page lock so we own the register set, then call * the init function. */ spin_lock_irqsave(&ei_local->page_lock, flags); AX88190_init(dev, 1); /* Set the flag before we drop the lock, That way the IRQ arrives after its set and we get no silly warnings */ netif_start_queue(dev); spin_unlock_irqrestore(&ei_local->page_lock, flags); ei_local->irqlock = 0; return 0; } #define dev_lock(dev) (((struct ei_device *)netdev_priv(dev))->page_lock) /** * ax_close - shut down network device * @dev: network device to close * * Opposite of ax_open(). Only used when "ifconfig <devname> down" is done. */ int ax_close(struct net_device *dev) { unsigned long flags; /* * Hold the page lock during close */ spin_lock_irqsave(&dev_lock(dev), flags); AX88190_init(dev, 0); spin_unlock_irqrestore(&dev_lock(dev), flags); netif_stop_queue(dev); return 0; } /** * ei_tx_timeout - handle transmit time out condition * @dev: network device which has apparently fallen asleep * * Called by kernel when device never acknowledges a transmit has * completed (or failed) - i.e. never posted a Tx related interrupt. */ void ei_tx_timeout(struct net_device *dev) { long e8390_base = dev->base_addr; struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); int txsr, isr, tickssofar = jiffies - dev->trans_start; unsigned long flags; ei_local->stat.tx_errors++; spin_lock_irqsave(&ei_local->page_lock, flags); txsr = inb(e8390_base+EN0_TSR); isr = inb(e8390_base+EN0_ISR); spin_unlock_irqrestore(&ei_local->page_lock, flags); printk(KERN_DEBUG "%s: Tx timed out, %s TSR=%#2x, ISR=%#2x, t=%d.\n", dev->name, (txsr & ENTSR_ABT) ? "excess collisions." : (isr) ? "lost interrupt?" : "cable problem?", txsr, isr, tickssofar); if (!isr && !ei_local->stat.tx_packets) { /* The 8390 probably hasn't gotten on the cable yet. */ ei_local->interface_num ^= 1; /* Try a different xcvr. */ } /* Ugly but a reset can be slow, yet must be protected */ disable_irq_nosync(dev->irq); spin_lock(&ei_local->page_lock); /* Try to restart the card. Perhaps the user has fixed something. */ ei_reset_8390(dev); AX88190_init(dev, 1); spin_unlock(&ei_local->page_lock); enable_irq(dev->irq); netif_wake_queue(dev); } /** * ei_start_xmit - begin packet transmission * @skb: packet to be sent * @dev: network device to which packet is sent * * Sends a packet to an 8390 network device. */ static int ei_start_xmit(struct sk_buff *skb, struct net_device *dev) { long e8390_base = dev->base_addr; struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); int length, send_length, output_page; unsigned long flags; u8 packet[ETH_ZLEN]; netif_stop_queue(dev); length = skb->len; /* Mask interrupts from the ethercard. SMP: We have to grab the lock here otherwise the IRQ handler on another CPU can flip window and race the IRQ mask set. We end up trashing the mcast filter not disabling irqs if we don't lock */ spin_lock_irqsave(&ei_local->page_lock, flags); outb_p(0x00, e8390_base + EN0_IMR); spin_unlock_irqrestore(&ei_local->page_lock, flags); /* * Slow phase with lock held. */ disable_irq_nosync(dev->irq); spin_lock(&ei_local->page_lock); ei_local->irqlock = 1; send_length = ETH_ZLEN < length ? length : ETH_ZLEN; /* * We have two Tx slots available for use. Find the first free * slot, and then perform some sanity checks. With two Tx bufs, * you get very close to transmitting back-to-back packets. With * only one Tx buf, the transmitter sits idle while you reload the * card, leaving a substantial gap between each transmitted packet. */ if (ei_local->tx1 == 0) { output_page = ei_local->tx_start_page; ei_local->tx1 = send_length; if (ei_debug && ei_local->tx2 > 0) printk(KERN_DEBUG "%s: idle transmitter tx2=%d, lasttx=%d, txing=%d.\n", dev->name, ei_local->tx2, ei_local->lasttx, ei_local->txing); } else if (ei_local->tx2 == 0) { output_page = ei_local->tx_start_page + TX_PAGES/2; ei_local->tx2 = send_length; if (ei_debug && ei_local->tx1 > 0) printk(KERN_DEBUG "%s: idle transmitter, tx1=%d, lasttx=%d, txing=%d.\n", dev->name, ei_local->tx1, ei_local->lasttx, ei_local->txing); } else { /* We should never get here. */ if (ei_debug) printk(KERN_DEBUG "%s: No Tx buffers free! tx1=%d tx2=%d last=%d\n", dev->name, ei_local->tx1, ei_local->tx2, ei_local->lasttx); ei_local->irqlock = 0; netif_stop_queue(dev); outb_p(ENISR_ALL, e8390_base + EN0_IMR); spin_unlock(&ei_local->page_lock); enable_irq(dev->irq); ei_local->stat.tx_errors++; return 1; } /* * Okay, now upload the packet and trigger a send if the transmitter * isn't already sending. If it is busy, the interrupt handler will * trigger the send later, upon receiving a Tx done interrupt. */ if (length == skb->len) ei_block_output(dev, length, skb->data, output_page); else { memset(packet, 0, ETH_ZLEN); memcpy(packet, skb->data, skb->len); ei_block_output(dev, length, packet, output_page); } if (! ei_local->txing) { ei_local->txing = 1; NS8390_trigger_send(dev, send_length, output_page); dev->trans_start = jiffies; if (output_page == ei_local->tx_start_page) { ei_local->tx1 = -1; ei_local->lasttx = -1; } else { ei_local->tx2 = -1; ei_local->lasttx = -2; } } else ei_local->txqueue++; if (ei_local->tx1 && ei_local->tx2) netif_stop_queue(dev); else netif_start_queue(dev); /* Turn 8390 interrupts back on. */ ei_local->irqlock = 0; outb_p(ENISR_ALL, e8390_base + EN0_IMR); spin_unlock(&ei_local->page_lock); enable_irq(dev->irq); dev_kfree_skb (skb); ei_local->stat.tx_bytes += send_length; return 0; } /** * ax_interrupt - handle the interrupts from an 8390 * @irq: interrupt number * @dev_id: a pointer to the net_device * @regs: unused * * Handle the ether interface interrupts. We pull packets from * the 8390 via the card specific functions and fire them at the networking * stack. We also handle transmit completions and wake the transmit path if * necessary. We also update the counters and do other housekeeping as * needed. */ static irqreturn_t ax_interrupt(int irq, void *dev_id, struct pt_regs * regs) { struct net_device *dev = dev_id; long e8390_base; int interrupts, nr_serviced = 0, i; struct ei_device *ei_local; int handled = 0; if (dev == NULL) { printk ("net_interrupt(): irq %d for unknown device.\n", irq); return IRQ_NONE; } e8390_base = dev->base_addr; ei_local = (struct ei_device *) netdev_priv(dev); /* * Protect the irq test too. */ spin_lock(&ei_local->page_lock); if (ei_local->irqlock) { #if 1 /* This might just be an interrupt for a PCI device sharing this line */ /* The "irqlock" check is only for testing. */ printk(ei_local->irqlock ? "%s: Interrupted while interrupts are masked! isr=%#2x imr=%#2x.\n" : "%s: Reentering the interrupt handler! isr=%#2x imr=%#2x.\n", dev->name, inb_p(e8390_base + EN0_ISR), inb_p(e8390_base + EN0_IMR)); #endif spin_unlock(&ei_local->page_lock); return IRQ_NONE; } if (ei_debug > 3) printk(KERN_DEBUG "%s: interrupt(isr=%#2.2x).\n", dev->name, inb_p(e8390_base + EN0_ISR)); outb_p(0x00, e8390_base + EN0_ISR); ei_local->irqlock = 1; /* !!Assumption!! -- we stay in page 0. Don't break this. */ while ((interrupts = inb_p(e8390_base + EN0_ISR)) != 0 && ++nr_serviced < MAX_SERVICE) { if (!netif_running(dev) || (interrupts == 0xff)) { if (ei_debug > 1) printk(KERN_WARNING "%s: interrupt from stopped card\n", dev->name); outb_p(interrupts, e8390_base + EN0_ISR); interrupts = 0; break; } handled = 1; /* AX88190 bug fix. */ outb_p(interrupts, e8390_base + EN0_ISR); for (i = 0; i < 10; i++) { if (!(inb(e8390_base + EN0_ISR) & interrupts)) break; outb_p(0, e8390_base + EN0_ISR); outb_p(interrupts, e8390_base + EN0_ISR); } if (interrupts & ENISR_OVER) ei_rx_overrun(dev); else if (interrupts & (ENISR_RX+ENISR_RX_ERR)) { /* Got a good (?) packet. */ ei_receive(dev); } /* Push the next to-transmit packet through. */ if (interrupts & ENISR_TX) ei_tx_intr(dev); else if (interrupts & ENISR_TX_ERR) ei_tx_err(dev); if (interrupts & ENISR_COUNTERS) { ei_local->stat.rx_frame_errors += inb_p(e8390_base + EN0_COUNTER0); ei_local->stat.rx_crc_errors += inb_p(e8390_base + EN0_COUNTER1); ei_local->stat.rx_missed_errors+= inb_p(e8390_base + EN0_COUNTER2); } } if (interrupts && ei_debug) { handled = 1; if (nr_serviced >= MAX_SERVICE) { /* 0xFF is valid for a card removal */ if(interrupts!=0xFF) printk(KERN_WARNING "%s: Too much work at interrupt, status %#2.2x\n", dev->name, interrupts); outb_p(ENISR_ALL, e8390_base + EN0_ISR); /* Ack. most intrs. */ } else { printk(KERN_WARNING "%s: unknown interrupt %#2x\n", dev->name, interrupts); outb_p(0xff, e8390_base + EN0_ISR); /* Ack. all intrs. */ } } /* Turn 8390 interrupts back on. */ ei_local->irqlock = 0; outb_p(ENISR_ALL, e8390_base + EN0_IMR); spin_unlock(&ei_local->page_lock); return IRQ_RETVAL(handled); } /** * ei_tx_err - handle transmitter error * @dev: network device which threw the exception * * A transmitter error has happened. Most likely excess collisions (which * is a fairly normal condition). If the error is one where the Tx will * have been aborted, we try and send another one right away, instead of * letting the failed packet sit and collect dust in the Tx buffer. This * is a much better solution as it avoids kernel based Tx timeouts, and * an unnecessary card reset. * * Called with lock held. */ static void ei_tx_err(struct net_device *dev) { long e8390_base = dev->base_addr; struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); unsigned char txsr = inb_p(e8390_base+EN0_TSR); unsigned char tx_was_aborted = txsr & (ENTSR_ABT+ENTSR_FU); #ifdef VERBOSE_ERROR_DUMP printk(KERN_DEBUG "%s: transmitter error (%#2x): ", dev->name, txsr); if (txsr & ENTSR_ABT) printk("excess-collisions "); if (txsr & ENTSR_ND) printk("non-deferral "); if (txsr & ENTSR_CRS) printk("lost-carrier "); if (txsr & ENTSR_FU) printk("FIFO-underrun "); if (txsr & ENTSR_CDH) printk("lost-heartbeat "); printk("\n"); #endif if (tx_was_aborted) ei_tx_intr(dev); else { ei_local->stat.tx_errors++; if (txsr & ENTSR_CRS) ei_local->stat.tx_carrier_errors++; if (txsr & ENTSR_CDH) ei_local->stat.tx_heartbeat_errors++; if (txsr & ENTSR_OWC) ei_local->stat.tx_window_errors++; } } /** * ei_tx_intr - transmit interrupt handler * @dev: network device for which tx intr is handled * * We have finished a transmit: check for errors and then trigger the next * packet to be sent. Called with lock held. */ static void ei_tx_intr(struct net_device *dev) { long e8390_base = dev->base_addr; struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); int status = inb(e8390_base + EN0_TSR); /* * There are two Tx buffers, see which one finished, and trigger * the send of another one if it exists. */ ei_local->txqueue--; if (ei_local->tx1 < 0) { if (ei_local->lasttx != 1 && ei_local->lasttx != -1) printk(KERN_ERR "%s: bogus last_tx_buffer %d, tx1=%d.\n", ei_local->name, ei_local->lasttx, ei_local->tx1); ei_local->tx1 = 0; if (ei_local->tx2 > 0) { ei_local->txing = 1; NS8390_trigger_send(dev, ei_local->tx2, ei_local->tx_start_page + 6); dev->trans_start = jiffies; ei_local->tx2 = -1, ei_local->lasttx = 2; } else ei_local->lasttx = 20, ei_local->txing = 0; } else if (ei_local->tx2 < 0) { if (ei_local->lasttx != 2 && ei_local->lasttx != -2) printk("%s: bogus last_tx_buffer %d, tx2=%d.\n", ei_local->name, ei_local->lasttx, ei_local->tx2); ei_local->tx2 = 0; if (ei_local->tx1 > 0) { ei_local->txing = 1; NS8390_trigger_send(dev, ei_local->tx1, ei_local->tx_start_page); dev->trans_start = jiffies; ei_local->tx1 = -1; ei_local->lasttx = 1; } else ei_local->lasttx = 10, ei_local->txing = 0; } // else printk(KERN_WARNING "%s: unexpected TX-done interrupt, lasttx=%d.\n", // dev->name, ei_local->lasttx); /* Minimize Tx latency: update the statistics after we restart TXing. */ if (status & ENTSR_COL) ei_local->stat.collisions++; if (status & ENTSR_PTX) ei_local->stat.tx_packets++; else { ei_local->stat.tx_errors++; if (status & ENTSR_ABT) { ei_local->stat.tx_aborted_errors++; ei_local->stat.collisions += 16; } if (status & ENTSR_CRS) ei_local->stat.tx_carrier_errors++; if (status & ENTSR_FU) ei_local->stat.tx_fifo_errors++; if (status & ENTSR_CDH) ei_local->stat.tx_heartbeat_errors++; if (status & ENTSR_OWC) ei_local->stat.tx_window_errors++; } netif_wake_queue(dev); } /** * ei_receive - receive some packets * @dev: network device with which receive will be run * * We have a good packet(s), get it/them out of the buffers. * Called with lock held. */ static void ei_receive(struct net_device *dev) { long e8390_base = dev->base_addr; struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); unsigned char rxing_page, this_frame, next_frame; unsigned short current_offset; int rx_pkt_count = 0; struct e8390_pkt_hdr rx_frame; while (++rx_pkt_count < 10) { int pkt_len, pkt_stat; /* Get the rx page (incoming packet pointer). */ rxing_page = inb_p(e8390_base + EN1_CURPAG -1); /* Remove one frame from the ring. Boundary is always a page behind. */ this_frame = inb_p(e8390_base + EN0_BOUNDARY) + 1; if (this_frame >= ei_local->stop_page) this_frame = ei_local->rx_start_page; /* Someday we'll omit the previous, iff we never get this message. (There is at least one clone claimed to have a problem.) Keep quiet if it looks like a card removal. One problem here is that some clones crash in roughly the same way. */ if (ei_debug > 0 && this_frame != ei_local->current_page && (this_frame!=0x0 || rxing_page!=0xFF)) printk(KERN_ERR "%s: mismatched read page pointers %2x vs %2x.\n", dev->name, this_frame, ei_local->current_page); if (this_frame == rxing_page) /* Read all the frames? */ break; /* Done for now */ current_offset = this_frame << 8; ei_get_8390_hdr(dev, &rx_frame, this_frame); pkt_len = rx_frame.count - sizeof(struct e8390_pkt_hdr); pkt_stat = rx_frame.status; next_frame = this_frame + 1 + ((pkt_len+4)>>8); if (pkt_len < 60 || pkt_len > 1518) { if (ei_debug) printk(KERN_DEBUG "%s: bogus packet size: %d, status=%#2x nxpg=%#2x.\n", dev->name, rx_frame.count, rx_frame.status, rx_frame.next); ei_local->stat.rx_errors++; ei_local->stat.rx_length_errors++; } else if ((pkt_stat & 0x0F) == ENRSR_RXOK) { struct sk_buff *skb; skb = dev_alloc_skb(pkt_len+2); if (skb == NULL) { if (ei_debug > 1) printk(KERN_DEBUG "%s: Couldn't allocate a sk_buff of size %d.\n", dev->name, pkt_len); ei_local->stat.rx_dropped++; break; } else { skb_reserve(skb,2); /* IP headers on 16 byte boundaries */ skb->dev = dev; skb_put(skb, pkt_len); /* Make room */ ei_block_input(dev, pkt_len, skb, current_offset + sizeof(rx_frame)); skb->protocol=eth_type_trans(skb,dev); netif_rx(skb); dev->last_rx = jiffies; ei_local->stat.rx_packets++; ei_local->stat.rx_bytes += pkt_len; if (pkt_stat & ENRSR_PHY) ei_local->stat.multicast++; } } else { if (ei_debug) printk(KERN_DEBUG "%s: bogus packet: status=%#2x nxpg=%#2x size=%d\n", dev->name, rx_frame.status, rx_frame.next, rx_frame.count); ei_local->stat.rx_errors++; /* NB: The NIC counts CRC, frame and missed errors. */ if (pkt_stat & ENRSR_FO) ei_local->stat.rx_fifo_errors++; } next_frame = rx_frame.next; /* This _should_ never happen: it's here for avoiding bad clones. */ if (next_frame >= ei_local->stop_page) { printk("%s: next frame inconsistency, %#2x\n", dev->name, next_frame); next_frame = ei_local->rx_start_page; } ei_local->current_page = next_frame; outb_p(next_frame-1, e8390_base+EN0_BOUNDARY); } return; } /** * ei_rx_overrun - handle receiver overrun * @dev: network device which threw exception * * We have a receiver overrun: we have to kick the 8390 to get it started * again. Problem is that you have to kick it exactly as NS prescribes in * the updated datasheets, or "the NIC may act in an unpredictable manner." * This includes causing "the NIC to defer indefinitely when it is stopped * on a busy network." Ugh. * Called with lock held. Don't call this with the interrupts off or your * computer will hate you - it takes 10ms or so. */ static void ei_rx_overrun(struct net_device *dev) { axnet_dev_t *info = PRIV(dev); long e8390_base = dev->base_addr; unsigned char was_txing, must_resend = 0; struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); /* * Record whether a Tx was in progress and then issue the * stop command. */ was_txing = inb_p(e8390_base+E8390_CMD) & E8390_TRANS; outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD); if (ei_debug > 1) printk(KERN_DEBUG "%s: Receiver overrun.\n", dev->name); ei_local->stat.rx_over_errors++; /* * Wait a full Tx time (1.2ms) + some guard time, NS says 1.6ms total. * Early datasheets said to poll the reset bit, but now they say that * it "is not a reliable indicator and subsequently should be ignored." * We wait at least 10ms. */ mdelay(10); /* * Reset RBCR[01] back to zero as per magic incantation. */ outb_p(0x00, e8390_base+EN0_RCNTLO); outb_p(0x00, e8390_base+EN0_RCNTHI); /* * See if any Tx was interrupted or not. According to NS, this * step is vital, and skipping it will cause no end of havoc. */ if (was_txing) { unsigned char tx_completed = inb_p(e8390_base+EN0_ISR) & (ENISR_TX+ENISR_TX_ERR); if (!tx_completed) must_resend = 1; } /* * Have to enter loopback mode and then restart the NIC before * you are allowed to slurp packets up off the ring. */ outb_p(E8390_TXOFF, e8390_base + EN0_TXCR); outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START, e8390_base + E8390_CMD); /* * Clear the Rx ring of all the debris, and ack the interrupt. */ ei_receive(dev); /* * Leave loopback mode, and resend any packet that got stopped. */ outb_p(E8390_TXCONFIG | info->duplex_flag, e8390_base + EN0_TXCR); if (must_resend) outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START + E8390_TRANS, e8390_base + E8390_CMD); } /* * Collect the stats. This is called unlocked and from several contexts. */ static struct net_device_stats *get_stats(struct net_device *dev) { long ioaddr = dev->base_addr; struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); unsigned long flags; /* If the card is stopped, just return the present stats. */ if (!netif_running(dev)) return &ei_local->stat; spin_lock_irqsave(&ei_local->page_lock,flags); /* Read the counter registers, assuming we are in page 0. */ ei_local->stat.rx_frame_errors += inb_p(ioaddr + EN0_COUNTER0); ei_local->stat.rx_crc_errors += inb_p(ioaddr + EN0_COUNTER1); ei_local->stat.rx_missed_errors+= inb_p(ioaddr + EN0_COUNTER2); spin_unlock_irqrestore(&ei_local->page_lock, flags); return &ei_local->stat; } /* * Form the 64 bit 8390 multicast table from the linked list of addresses * associated with this dev structure. */ static inline void make_mc_bits(u8 *bits, struct net_device *dev) { struct dev_mc_list *dmi; u32 crc; for (dmi=dev->mc_list; dmi; dmi=dmi->next) { crc = ether_crc(ETH_ALEN, dmi->dmi_addr); /* * The 8390 uses the 6 most significant bits of the * CRC to index the multicast table. */ bits[crc>>29] |= (1<<((crc>>26)&7)); } } /** * do_set_multicast_list - set/clear multicast filter * @dev: net device for which multicast filter is adjusted * * Set or clear the multicast filter for this adaptor. * Must be called with lock held. */ static void do_set_multicast_list(struct net_device *dev) { long e8390_base = dev->base_addr; int i; struct ei_device *ei_local = (struct ei_device*)netdev_priv(dev); if (!(dev->flags&(IFF_PROMISC|IFF_ALLMULTI))) { memset(ei_local->mcfilter, 0, 8); if (dev->mc_list) make_mc_bits(ei_local->mcfilter, dev); } else { /* set to accept-all */ memset(ei_local->mcfilter, 0xFF, 8); } outb_p(E8390_NODMA + E8390_PAGE1, e8390_base + E8390_CMD); for(i = 0; i < 8; i++) { outb_p(ei_local->mcfilter[i], e8390_base + EN1_MULT_SHIFT(i)); } outb_p(E8390_NODMA + E8390_PAGE0, e8390_base + E8390_CMD); if(dev->flags&IFF_PROMISC) outb_p(E8390_RXCONFIG | 0x58, e8390_base + EN0_RXCR); else if(dev->flags&IFF_ALLMULTI || dev->mc_list) outb_p(E8390_RXCONFIG | 0x48, e8390_base + EN0_RXCR); else outb_p(E8390_RXCONFIG | 0x40, e8390_base + EN0_RXCR); outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base+E8390_CMD); } /* * Called without lock held. This is invoked from user context and may * be parallel to just about everything else. Its also fairly quick and * not called too often. Must protect against both bh and irq users */ static void set_multicast_list(struct net_device *dev) { unsigned long flags; spin_lock_irqsave(&dev_lock(dev), flags); do_set_multicast_list(dev); spin_unlock_irqrestore(&dev_lock(dev), flags); } /** * axdev_setup - init rest of 8390 device struct * @dev: network device structure to init * * Initialize the rest of the 8390 device structure. Do NOT __init * this, as it is used by 8390 based modular drivers too. */ static void axdev_setup(struct net_device *dev) { struct ei_device *ei_local; if (ei_debug > 1) printk(version_8390); SET_MODULE_OWNER(dev); ei_local = (struct ei_device *)netdev_priv(dev); spin_lock_init(&ei_local->page_lock); dev->hard_start_xmit = &ei_start_xmit; dev->get_stats = get_stats; dev->set_multicast_list = &set_multicast_list; ether_setup(dev); } /* This page of functions should be 8390 generic */ /* Follow National Semi's recommendations for initializing the "NIC". */ /** * AX88190_init - initialize 8390 hardware * @dev: network device to initialize * @startp: boolean. non-zero value to initiate chip processing * * Must be called with lock held. */ static void AX88190_init(struct net_device *dev, int startp) { axnet_dev_t *info = PRIV(dev); long e8390_base = dev->base_addr; struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); int i; int endcfg = ei_local->word16 ? (0x48 | ENDCFG_WTS) : 0x48; if(sizeof(struct e8390_pkt_hdr)!=4) panic("8390.c: header struct mispacked\n"); /* Follow National Semi's recommendations for initing the DP83902. */ outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD); /* 0x21 */ outb_p(endcfg, e8390_base + EN0_DCFG); /* 0x48 or 0x49 */ /* Clear the remote byte count registers. */ outb_p(0x00, e8390_base + EN0_RCNTLO); outb_p(0x00, e8390_base + EN0_RCNTHI); /* Set to monitor and loopback mode -- this is vital!. */ outb_p(E8390_RXOFF|0x40, e8390_base + EN0_RXCR); /* 0x60 */ outb_p(E8390_TXOFF, e8390_base + EN0_TXCR); /* 0x02 */ /* Set the transmit page and receive ring. */ outb_p(ei_local->tx_start_page, e8390_base + EN0_TPSR); ei_local->tx1 = ei_local->tx2 = 0; outb_p(ei_local->rx_start_page, e8390_base + EN0_STARTPG); outb_p(ei_local->stop_page-1, e8390_base + EN0_BOUNDARY); /* 3c503 says 0x3f,NS0x26*/ ei_local->current_page = ei_local->rx_start_page; /* assert boundary+1 */ outb_p(ei_local->stop_page, e8390_base + EN0_STOPPG); /* Clear the pending interrupts and mask. */ outb_p(0xFF, e8390_base + EN0_ISR); outb_p(0x00, e8390_base + EN0_IMR); /* Copy the station address into the DS8390 registers. */ outb_p(E8390_NODMA + E8390_PAGE1 + E8390_STOP, e8390_base+E8390_CMD); /* 0x61 */ for(i = 0; i < 6; i++) { outb_p(dev->dev_addr[i], e8390_base + EN1_PHYS_SHIFT(i)); if(inb_p(e8390_base + EN1_PHYS_SHIFT(i))!=dev->dev_addr[i]) printk(KERN_ERR "Hw. address read/write mismap %d\n",i); } outb_p(ei_local->rx_start_page, e8390_base + EN1_CURPAG); outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD); netif_start_queue(dev); ei_local->tx1 = ei_local->tx2 = 0; ei_local->txing = 0; if (startp) { outb_p(0xff, e8390_base + EN0_ISR); outb_p(ENISR_ALL, e8390_base + EN0_IMR); outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base+E8390_CMD); outb_p(E8390_TXCONFIG | info->duplex_flag, e8390_base + EN0_TXCR); /* xmit on. */ /* 3c503 TechMan says rxconfig only after the NIC is started. */ outb_p(E8390_RXCONFIG | 0x40, e8390_base + EN0_RXCR); /* rx on, */ do_set_multicast_list(dev); /* (re)load the mcast table */ } } /* Trigger a transmit start, assuming the length is valid. Always called with the page lock held */ static void NS8390_trigger_send(struct net_device *dev, unsigned int length, int start_page) { long e8390_base = dev->base_addr; struct ei_device *ei_local __attribute((unused)) = (struct ei_device *) netdev_priv(dev); if (inb_p(e8390_base) & E8390_TRANS) { printk(KERN_WARNING "%s: trigger_send() called with the transmitter busy.\n", dev->name); return; } outb_p(length & 0xff, e8390_base + EN0_TCNTLO); outb_p(length >> 8, e8390_base + EN0_TCNTHI); outb_p(start_page, e8390_base + EN0_TPSR); outb_p(E8390_NODMA+E8390_TRANS+E8390_START, e8390_base+E8390_CMD); }
gpl-2.0
dgarnier/barebox
lib/show_progress.c
3
1423
/* * show_progress.c - simple progress bar functions * * Copyright (c) 2010 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix * * See file CREDITS for list of people who contributed to this * project. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <common.h> #include <progress.h> #include <asm-generic/div64.h> #include <linux/stringify.h> #define HASHES_PER_LINE 65 static int printed; static int progress_max; static int spin; void show_progress(int now) { char spinchr[] = "\\|/-"; if (now < 0) { printf("%c\b", spinchr[spin++ % (sizeof(spinchr) - 1)]); return; } if (progress_max) { uint64_t tmp = (int64_t)now * HASHES_PER_LINE; do_div(tmp, progress_max); now = tmp; } while (printed < now) { if (!(printed % HASHES_PER_LINE) && printed) printf("\n\t"); printf("#"); printed++; } } void init_progression_bar(int max) { printed = 0; progress_max = max; spin = 0; if (progress_max) printf("\t[%"__stringify(HASHES_PER_LINE)"s]\r\t[", ""); else printf("\t"); }
gpl-2.0
ystk/debian-lesstif2
test/Xm/togglebg/test3.c
3
3536
/* * *LIBS: -lXm -lXt -lX11 */ #include <X11/Xatom.h> #include <X11/Intrinsic.h> #include <X11/Shell.h> #include <Xm/Xm.h> #include <Xm/DialogS.h> #include <Xm/Form.h> #include <stdio.h> #include <Xm/RowColumn.h> #include <Xm/PushB.h> #include <Xm/PushBG.h> #include <Xm/CascadeB.h> #include <Xm/Label.h> #include <Xm/ToggleBG.h> Widget appshell = (Widget) NULL; Widget form = (Widget) NULL; void create_pulldown(form) Widget form; { Arg al[1]; int ac; Widget menubar; Widget file_cascade; Widget file_pulldown; Widget new_b, open_b; Widget save_cascade; Widget save_pulldown; Widget source_b, text_b, binary_b; Widget children[3]; ac = 0; menubar = XmCreateMenuBar(form, "menubar", al, ac); file_pulldown = XmCreatePulldownMenu(menubar, "file_pulldown", al, ac); ac = 0; XtManageChild(source_b = XmCreateLabel(file_pulldown, "A Long Long Long Label", al, ac)); new_b = XmCreateToggleButtonGadget(file_pulldown, "new_b", al, ac); XtVaSetValues(new_b, XmNvisibleWhenOff, True, XmNacceleratorText, XmStringCreateSimple("AccText"), NULL); open_b = XmCreateToggleButtonGadget(file_pulldown, "open_b", al, ac); XtVaSetValues(open_b, XmNvisibleWhenOff, True, XmNacceleratorText, XmStringCreateSimple("AccText"), NULL); save_cascade = XmCreatePushButtonGadget(file_pulldown, "push_b", al, ac); XtVaSetValues(save_cascade, XmNacceleratorText, XmStringCreateSimple("AccText"), NULL); ac = 0; children[ac++] = new_b; children[ac++] = open_b; children[ac++] = save_cascade; XtManageChildren(children, ac); ac = 0; XtSetArg(al[ac], XmNsubMenuId, file_pulldown); ac++; file_cascade = XmCreateCascadeButton(menubar, "file_cascade", al, ac); ac = 0; XtManageChild(file_cascade); XtManageChild(menubar); } void create_appshell(display, app_name, app_argc, app_argv) Display *display; char *app_name; int app_argc; char **app_argv; { Widget children[1]; /* Children to manage */ Arg al[64]; /* Arg List */ register int ac = 0; /* Arg Count */ XtSetArg(al[ac], XmNallowShellResize, TRUE); ac++; XtSetArg(al[ac], XmNtitle, "Pulldown Menu"); ac++; XtSetArg(al[ac], XmNargc, app_argc); ac++; XtSetArg(al[ac], XmNargv, app_argv); ac++; appshell = XtAppCreateShell(app_name, "XApplication", applicationShellWidgetClass, display, al, ac); ac = 0; XtSetArg(al[ac], XmNautoUnmanage, FALSE); ac++; form = XmCreateForm(appshell, "form", al, ac); ac = 0; XtManageChild(form); } XtAppContext app_context; Display *display; /* Display */ int main(argc, argv) int argc; char **argv; { XtSetLanguageProc((XtAppContext) NULL, (XtLanguageProc) NULL, (XtPointer) NULL); XtToolkitInitialize(); app_context = XtCreateApplicationContext(); display = XtOpenDisplay(app_context, NULL, argv[0], "XApplication", NULL, 0, &argc, argv); if (!display) { printf("%s: can't open display, exiting...\n", argv[0]); exit(-1); } create_appshell(display, argv[0], argc, argv); create_pulldown(form); XtRealizeWidget(appshell); { static XtWidgetGeometry Expected[] = { CWWidth | CWHeight, 0, 0, 98, 31, 0,0,0, CWWidth | CWHeight | CWX | CWY, 0, 0, 98, 31, 0,0,0, CWWidth | CWHeight | CWX | CWY, 5, 5, 88, 21, 0,0,0, }; PrintDetails(appshell, Expected); } LessTifTestMainLoop(appshell); /* XtAppMainLoop(app_context); */ exit(0); }
gpl-2.0
12thmantec/novena-linux
drivers/staging/rtl8187se/ieee80211/dot11d.c
259
4897
#include "dot11d.h" void Dot11d_Init(struct ieee80211_device *ieee) { PRT_DOT11D_INFO pDot11dInfo = GET_DOT11D_INFO(ieee); pDot11dInfo->bEnabled = 0; pDot11dInfo->State = DOT11D_STATE_NONE; pDot11dInfo->CountryIeLen = 0; memset(pDot11dInfo->channel_map, 0, MAX_CHANNEL_NUMBER+1); memset(pDot11dInfo->MaxTxPwrDbmList, 0xFF, MAX_CHANNEL_NUMBER+1); RESET_CIE_WATCHDOG(ieee); netdev_info(ieee->dev, "Dot11d_Init()\n"); } /* Reset to the state as we are just entering a regulatory domain. */ void Dot11d_Reset(struct ieee80211_device *ieee) { u32 i; PRT_DOT11D_INFO pDot11dInfo = GET_DOT11D_INFO(ieee); /* Clear old channel map */ memset(pDot11dInfo->channel_map, 0, MAX_CHANNEL_NUMBER+1); memset(pDot11dInfo->MaxTxPwrDbmList, 0xFF, MAX_CHANNEL_NUMBER+1); /* Set new channel map */ for (i = 1; i <= 11; i++) (pDot11dInfo->channel_map)[i] = 1; for (i = 12; i <= 14; i++) (pDot11dInfo->channel_map)[i] = 2; pDot11dInfo->State = DOT11D_STATE_NONE; pDot11dInfo->CountryIeLen = 0; RESET_CIE_WATCHDOG(ieee); } /* * Description: * Update country IE from Beacon or Probe Response and configure PHY for * operation in the regulatory domain. * * TODO: * Configure Tx power. * * Assumption: * 1. IS_DOT11D_ENABLE() is TRUE. * 2. Input IE is an valid one. */ void Dot11d_UpdateCountryIe(struct ieee80211_device *dev, u8 *pTaddr, u16 CoutryIeLen, u8 *pCoutryIe) { PRT_DOT11D_INFO pDot11dInfo = GET_DOT11D_INFO(dev); u8 i, j, NumTriples, MaxChnlNum; u8 index, MaxTxPowerInDbm; PCHNL_TXPOWER_TRIPLE pTriple; if ((CoutryIeLen - 3)%3 != 0) { netdev_info(dev->dev, "Dot11d_UpdateCountryIe(): Invalid country IE, skip it........1\n"); Dot11d_Reset(dev); return; } memset(pDot11dInfo->channel_map, 0, MAX_CHANNEL_NUMBER+1); memset(pDot11dInfo->MaxTxPwrDbmList, 0xFF, MAX_CHANNEL_NUMBER+1); MaxChnlNum = 0; NumTriples = (CoutryIeLen - 3) / 3; /* skip 3-byte country string. */ pTriple = (PCHNL_TXPOWER_TRIPLE)(pCoutryIe + 3); for (i = 0; i < NumTriples; i++) { if (MaxChnlNum >= pTriple->FirstChnl) { /* * It is not in a monotonically increasing order, * so stop processing. */ netdev_info(dev->dev, "Dot11d_UpdateCountryIe(): Invalid country IE, skip it........1\n"); Dot11d_Reset(dev); return; } if (MAX_CHANNEL_NUMBER < (pTriple->FirstChnl + pTriple->NumChnls)) { /* * It is not a valid set of channel id, * so stop processing */ netdev_info(dev->dev, "Dot11d_UpdateCountryIe(): Invalid country IE, skip it........2\n"); Dot11d_Reset(dev); return; } for (j = 0; j < pTriple->NumChnls; j++) { index = pTriple->FirstChnl + j; pDot11dInfo->channel_map[index] = 1; MaxTxPowerInDbm = pTriple->MaxTxPowerInDbm; pDot11dInfo->MaxTxPwrDbmList[index] = MaxTxPowerInDbm; MaxChnlNum = pTriple->FirstChnl + j; } pTriple = (PCHNL_TXPOWER_TRIPLE)((u8 *)pTriple + 3); } #if 1 netdev_info(dev->dev, "Channel List:"); for (i = 1; i <= MAX_CHANNEL_NUMBER; i++) if (pDot11dInfo->channel_map[i] > 0) netdev_info(dev->dev, " %d", i); netdev_info(dev->dev, "\n"); #endif UPDATE_CIE_SRC(dev, pTaddr); pDot11dInfo->CountryIeLen = CoutryIeLen; memcpy(pDot11dInfo->CountryIeBuf, pCoutryIe, CoutryIeLen); pDot11dInfo->State = DOT11D_STATE_LEARNED; } u8 DOT11D_GetMaxTxPwrInDbm(struct ieee80211_device *dev, u8 Channel) { PRT_DOT11D_INFO pDot11dInfo = GET_DOT11D_INFO(dev); u8 MaxTxPwrInDbm = 255; if (MAX_CHANNEL_NUMBER < Channel) { netdev_info(dev->dev, "DOT11D_GetMaxTxPwrInDbm(): Invalid Channel\n"); return MaxTxPwrInDbm; } if (pDot11dInfo->channel_map[Channel]) MaxTxPwrInDbm = pDot11dInfo->MaxTxPwrDbmList[Channel]; return MaxTxPwrInDbm; } void DOT11D_ScanComplete(struct ieee80211_device *dev) { PRT_DOT11D_INFO pDot11dInfo = GET_DOT11D_INFO(dev); switch (pDot11dInfo->State) { case DOT11D_STATE_LEARNED: pDot11dInfo->State = DOT11D_STATE_DONE; break; case DOT11D_STATE_DONE: if (GET_CIE_WATCHDOG(dev) == 0) { /* Reset country IE if previous one is gone. */ Dot11d_Reset(dev); } break; case DOT11D_STATE_NONE: break; } } int IsLegalChannel(struct ieee80211_device *dev, u8 channel) { PRT_DOT11D_INFO pDot11dInfo = GET_DOT11D_INFO(dev); if (MAX_CHANNEL_NUMBER < channel) { netdev_info(dev->dev, "IsLegalChannel(): Invalid Channel\n"); return 0; } if (pDot11dInfo->channel_map[channel] > 0) return 1; return 0; } int ToLegalChannel(struct ieee80211_device *dev, u8 channel) { PRT_DOT11D_INFO pDot11dInfo = GET_DOT11D_INFO(dev); u8 default_chn = 0; u32 i = 0; for (i = 1; i <= MAX_CHANNEL_NUMBER; i++) { if (pDot11dInfo->channel_map[i] > 0) { default_chn = i; break; } } if (MAX_CHANNEL_NUMBER < channel) { netdev_info(dev->dev, "IsLegalChannel(): Invalid Channel\n"); return default_chn; } if (pDot11dInfo->channel_map[channel] > 0) return channel; return default_chn; }
gpl-2.0
drhonk/SGH-T959V-GB
arch/arm/mach-w90x900/nuc950.c
771
1193
/* * linux/arch/arm/mach-w90x900/nuc950.c * * Based on linux/arch/arm/plat-s3c24xx/s3c244x.c by Ben Dooks * * Copyright (c) 2008 Nuvoton technology corporation. * * Wan ZongShun <mcuos.com@gmail.com> * * NUC950 cpu support * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation;version 2 of the License. * */ #include <linux/platform_device.h> #include <asm/mach/map.h> #include <mach/hardware.h> #include "cpu.h" /* define specific CPU platform device */ static struct platform_device *nuc950_dev[] __initdata = { &nuc900_device_kpi, &nuc900_device_fmi, #ifdef CONFIG_FB_NUC900 &nuc900_device_lcd, #endif }; /* define specific CPU platform io map */ static struct map_desc nuc950evb_iodesc[] __initdata = { }; /*Init NUC950 evb io*/ void __init nuc950_map_io(void) { nuc900_map_io(nuc950evb_iodesc, ARRAY_SIZE(nuc950evb_iodesc)); } /*Init NUC950 clock*/ void __init nuc950_init_clocks(void) { nuc900_init_clocks(); } /*Init NUC950 board info*/ void __init nuc950_board_init(void) { nuc900_board_init(nuc950_dev, ARRAY_SIZE(nuc950_dev)); }
gpl-2.0
virtuous/kernel-7x30-gingerbread-v3
net/mac80211/mesh_plink.c
771
19955
/* * Copyright (c) 2008, 2009 open80211s Ltd. * Author: Luis Carlos Cobo <luisca@cozybit.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/gfp.h> #include <linux/kernel.h> #include <linux/random.h> #include "ieee80211_i.h" #include "rate.h" #include "mesh.h" #ifdef CONFIG_MAC80211_VERBOSE_MPL_DEBUG #define mpl_dbg(fmt, args...) printk(KERN_DEBUG fmt, ##args) #else #define mpl_dbg(fmt, args...) do { (void)(0); } while (0) #endif #define PLINK_GET_LLID(p) (p + 4) #define PLINK_GET_PLID(p) (p + 6) #define mod_plink_timer(s, t) (mod_timer(&s->plink_timer, \ jiffies + HZ * t / 1000)) /* Peer link cancel reasons, all subject to ANA approval */ #define MESH_LINK_CANCELLED 2 #define MESH_MAX_NEIGHBORS 3 #define MESH_CAPABILITY_POLICY_VIOLATION 4 #define MESH_CLOSE_RCVD 5 #define MESH_MAX_RETRIES 6 #define MESH_CONFIRM_TIMEOUT 7 #define MESH_SECURITY_ROLE_NEGOTIATION_DIFFERS 8 #define MESH_SECURITY_AUTHENTICATION_IMPOSSIBLE 9 #define MESH_SECURITY_FAILED_VERIFICATION 10 #define dot11MeshMaxRetries(s) (s->u.mesh.mshcfg.dot11MeshMaxRetries) #define dot11MeshRetryTimeout(s) (s->u.mesh.mshcfg.dot11MeshRetryTimeout) #define dot11MeshConfirmTimeout(s) (s->u.mesh.mshcfg.dot11MeshConfirmTimeout) #define dot11MeshHoldingTimeout(s) (s->u.mesh.mshcfg.dot11MeshHoldingTimeout) #define dot11MeshMaxPeerLinks(s) (s->u.mesh.mshcfg.dot11MeshMaxPeerLinks) enum plink_frame_type { PLINK_OPEN = 0, PLINK_CONFIRM, PLINK_CLOSE }; enum plink_event { PLINK_UNDEFINED, OPN_ACPT, OPN_RJCT, OPN_IGNR, CNF_ACPT, CNF_RJCT, CNF_IGNR, CLS_ACPT, CLS_IGNR }; static inline void mesh_plink_inc_estab_count(struct ieee80211_sub_if_data *sdata) { atomic_inc(&sdata->u.mesh.mshstats.estab_plinks); mesh_accept_plinks_update(sdata); } static inline void mesh_plink_dec_estab_count(struct ieee80211_sub_if_data *sdata) { atomic_dec(&sdata->u.mesh.mshstats.estab_plinks); mesh_accept_plinks_update(sdata); } /** * mesh_plink_fsm_restart - restart a mesh peer link finite state machine * * @sta: mesh peer link to restart * * Locking: this function must be called holding sta->lock */ static inline void mesh_plink_fsm_restart(struct sta_info *sta) { sta->plink_state = PLINK_LISTEN; sta->llid = sta->plid = sta->reason = 0; sta->plink_retries = 0; } /* * NOTE: This is just an alias for sta_info_alloc(), see notes * on it in the lifecycle management section! */ static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata, u8 *hw_addr, u32 rates) { struct ieee80211_local *local = sdata->local; struct sta_info *sta; if (local->num_sta >= MESH_MAX_PLINKS) return NULL; sta = sta_info_alloc(sdata, hw_addr, GFP_KERNEL); if (!sta) return NULL; sta->flags = WLAN_STA_AUTHORIZED; sta->sta.supp_rates[local->hw.conf.channel->band] = rates; rate_control_rate_init(sta); return sta; } /** * __mesh_plink_deactivate - deactivate mesh peer link * * @sta: mesh peer link to deactivate * * All mesh paths with this peer as next hop will be flushed * * Locking: the caller must hold sta->lock */ static bool __mesh_plink_deactivate(struct sta_info *sta) { struct ieee80211_sub_if_data *sdata = sta->sdata; bool deactivated = false; if (sta->plink_state == PLINK_ESTAB) { mesh_plink_dec_estab_count(sdata); deactivated = true; } sta->plink_state = PLINK_BLOCKED; mesh_path_flush_by_nexthop(sta); return deactivated; } /** * mesh_plink_deactivate - deactivate mesh peer link * * @sta: mesh peer link to deactivate * * All mesh paths with this peer as next hop will be flushed */ void mesh_plink_deactivate(struct sta_info *sta) { struct ieee80211_sub_if_data *sdata = sta->sdata; bool deactivated; spin_lock_bh(&sta->lock); deactivated = __mesh_plink_deactivate(sta); spin_unlock_bh(&sta->lock); if (deactivated) ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON); } static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata, enum plink_frame_type action, u8 *da, __le16 llid, __le16 plid, __le16 reason) { struct ieee80211_local *local = sdata->local; struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400); struct ieee80211_mgmt *mgmt; bool include_plid = false; static const u8 meshpeeringproto[] = { 0x00, 0x0F, 0xAC, 0x2A }; u8 *pos; int ie_len; if (!skb) return -1; skb_reserve(skb, local->hw.extra_tx_headroom); /* 25 is the size of the common mgmt part (24) plus the size of the * common action part (1) */ mgmt = (struct ieee80211_mgmt *) skb_put(skb, 25 + sizeof(mgmt->u.action.u.plink_action)); memset(mgmt, 0, 25 + sizeof(mgmt->u.action.u.plink_action)); mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ACTION); memcpy(mgmt->da, da, ETH_ALEN); memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); /* BSSID is left zeroed, wildcard value */ mgmt->u.action.category = WLAN_CATEGORY_MESH_PLINK; mgmt->u.action.u.plink_action.action_code = action; if (action == PLINK_CLOSE) mgmt->u.action.u.plink_action.aux = reason; else { mgmt->u.action.u.plink_action.aux = cpu_to_le16(0x0); if (action == PLINK_CONFIRM) { pos = skb_put(skb, 4); /* two-byte status code followed by two-byte AID */ memset(pos, 0, 2); memcpy(pos + 2, &plid, 2); } mesh_mgmt_ies_add(skb, sdata); } /* Add Peer Link Management element */ switch (action) { case PLINK_OPEN: ie_len = 6; break; case PLINK_CONFIRM: ie_len = 8; include_plid = true; break; case PLINK_CLOSE: default: if (!plid) ie_len = 8; else { ie_len = 10; include_plid = true; } break; } pos = skb_put(skb, 2 + ie_len); *pos++ = WLAN_EID_PEER_LINK; *pos++ = ie_len; memcpy(pos, meshpeeringproto, sizeof(meshpeeringproto)); pos += 4; memcpy(pos, &llid, 2); if (include_plid) { pos += 2; memcpy(pos, &plid, 2); } if (action == PLINK_CLOSE) { pos += 2; memcpy(pos, &reason, 2); } ieee80211_tx_skb(sdata, skb); return 0; } void mesh_neighbour_update(u8 *hw_addr, u32 rates, struct ieee80211_sub_if_data *sdata, bool peer_accepting_plinks) { struct ieee80211_local *local = sdata->local; struct sta_info *sta; rcu_read_lock(); sta = sta_info_get(sdata, hw_addr); if (!sta) { rcu_read_unlock(); sta = mesh_plink_alloc(sdata, hw_addr, rates); if (!sta) return; if (sta_info_insert_rcu(sta)) { rcu_read_unlock(); return; } } sta->last_rx = jiffies; sta->sta.supp_rates[local->hw.conf.channel->band] = rates; if (peer_accepting_plinks && sta->plink_state == PLINK_LISTEN && sdata->u.mesh.accepting_plinks && sdata->u.mesh.mshcfg.auto_open_plinks) mesh_plink_open(sta); rcu_read_unlock(); } static void mesh_plink_timer(unsigned long data) { struct sta_info *sta; __le16 llid, plid, reason; struct ieee80211_sub_if_data *sdata; /* * This STA is valid because sta_info_destroy() will * del_timer_sync() this timer after having made sure * it cannot be readded (by deleting the plink.) */ sta = (struct sta_info *) data; if (sta->sdata->local->quiescing) { sta->plink_timer_was_running = true; return; } spin_lock_bh(&sta->lock); if (sta->ignore_plink_timer) { sta->ignore_plink_timer = false; spin_unlock_bh(&sta->lock); return; } mpl_dbg("Mesh plink timer for %pM fired on state %d\n", sta->sta.addr, sta->plink_state); reason = 0; llid = sta->llid; plid = sta->plid; sdata = sta->sdata; switch (sta->plink_state) { case PLINK_OPN_RCVD: case PLINK_OPN_SNT: /* retry timer */ if (sta->plink_retries < dot11MeshMaxRetries(sdata)) { u32 rand; mpl_dbg("Mesh plink for %pM (retry, timeout): %d %d\n", sta->sta.addr, sta->plink_retries, sta->plink_timeout); get_random_bytes(&rand, sizeof(u32)); sta->plink_timeout = sta->plink_timeout + rand % sta->plink_timeout; ++sta->plink_retries; mod_plink_timer(sta, sta->plink_timeout); spin_unlock_bh(&sta->lock); mesh_plink_frame_tx(sdata, PLINK_OPEN, sta->sta.addr, llid, 0, 0); break; } reason = cpu_to_le16(MESH_MAX_RETRIES); /* fall through on else */ case PLINK_CNF_RCVD: /* confirm timer */ if (!reason) reason = cpu_to_le16(MESH_CONFIRM_TIMEOUT); sta->plink_state = PLINK_HOLDING; mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata)); spin_unlock_bh(&sta->lock); mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr, llid, plid, reason); break; case PLINK_HOLDING: /* holding timer */ del_timer(&sta->plink_timer); mesh_plink_fsm_restart(sta); spin_unlock_bh(&sta->lock); break; default: spin_unlock_bh(&sta->lock); break; } } #ifdef CONFIG_PM void mesh_plink_quiesce(struct sta_info *sta) { if (del_timer_sync(&sta->plink_timer)) sta->plink_timer_was_running = true; } void mesh_plink_restart(struct sta_info *sta) { if (sta->plink_timer_was_running) { add_timer(&sta->plink_timer); sta->plink_timer_was_running = false; } } #endif static inline void mesh_plink_timer_set(struct sta_info *sta, int timeout) { sta->plink_timer.expires = jiffies + (HZ * timeout / 1000); sta->plink_timer.data = (unsigned long) sta; sta->plink_timer.function = mesh_plink_timer; sta->plink_timeout = timeout; add_timer(&sta->plink_timer); } int mesh_plink_open(struct sta_info *sta) { __le16 llid; struct ieee80211_sub_if_data *sdata = sta->sdata; spin_lock_bh(&sta->lock); get_random_bytes(&llid, 2); sta->llid = llid; if (sta->plink_state != PLINK_LISTEN) { spin_unlock_bh(&sta->lock); return -EBUSY; } sta->plink_state = PLINK_OPN_SNT; mesh_plink_timer_set(sta, dot11MeshRetryTimeout(sdata)); spin_unlock_bh(&sta->lock); mpl_dbg("Mesh plink: starting establishment with %pM\n", sta->sta.addr); return mesh_plink_frame_tx(sdata, PLINK_OPEN, sta->sta.addr, llid, 0, 0); } void mesh_plink_block(struct sta_info *sta) { struct ieee80211_sub_if_data *sdata = sta->sdata; bool deactivated; spin_lock_bh(&sta->lock); deactivated = __mesh_plink_deactivate(sta); sta->plink_state = PLINK_BLOCKED; spin_unlock_bh(&sta->lock); if (deactivated) ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON); } void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, size_t len, struct ieee80211_rx_status *rx_status) { struct ieee80211_local *local = sdata->local; struct ieee802_11_elems elems; struct sta_info *sta; enum plink_event event; enum plink_frame_type ftype; size_t baselen; bool deactivated; u8 ie_len; u8 *baseaddr; __le16 plid, llid, reason; #ifdef CONFIG_MAC80211_VERBOSE_MPL_DEBUG static const char *mplstates[] = { [PLINK_LISTEN] = "LISTEN", [PLINK_OPN_SNT] = "OPN-SNT", [PLINK_OPN_RCVD] = "OPN-RCVD", [PLINK_CNF_RCVD] = "CNF_RCVD", [PLINK_ESTAB] = "ESTAB", [PLINK_HOLDING] = "HOLDING", [PLINK_BLOCKED] = "BLOCKED" }; #endif /* need action_code, aux */ if (len < IEEE80211_MIN_ACTION_SIZE + 3) return; if (is_multicast_ether_addr(mgmt->da)) { mpl_dbg("Mesh plink: ignore frame from multicast address"); return; } baseaddr = mgmt->u.action.u.plink_action.variable; baselen = (u8 *) mgmt->u.action.u.plink_action.variable - (u8 *) mgmt; if (mgmt->u.action.u.plink_action.action_code == PLINK_CONFIRM) { baseaddr += 4; baselen += 4; } ieee802_11_parse_elems(baseaddr, len - baselen, &elems); if (!elems.peer_link) { mpl_dbg("Mesh plink: missing necessary peer link ie\n"); return; } ftype = mgmt->u.action.u.plink_action.action_code; ie_len = elems.peer_link_len; if ((ftype == PLINK_OPEN && ie_len != 6) || (ftype == PLINK_CONFIRM && ie_len != 8) || (ftype == PLINK_CLOSE && ie_len != 8 && ie_len != 10)) { mpl_dbg("Mesh plink: incorrect plink ie length %d %d\n", ftype, ie_len); return; } if (ftype != PLINK_CLOSE && (!elems.mesh_id || !elems.mesh_config)) { mpl_dbg("Mesh plink: missing necessary ie\n"); return; } /* Note the lines below are correct, the llid in the frame is the plid * from the point of view of this host. */ memcpy(&plid, PLINK_GET_LLID(elems.peer_link), 2); if (ftype == PLINK_CONFIRM || (ftype == PLINK_CLOSE && ie_len == 10)) memcpy(&llid, PLINK_GET_PLID(elems.peer_link), 2); rcu_read_lock(); sta = sta_info_get(sdata, mgmt->sa); if (!sta && ftype != PLINK_OPEN) { mpl_dbg("Mesh plink: cls or cnf from unknown peer\n"); rcu_read_unlock(); return; } if (sta && sta->plink_state == PLINK_BLOCKED) { rcu_read_unlock(); return; } /* Now we will figure out the appropriate event... */ event = PLINK_UNDEFINED; if (ftype != PLINK_CLOSE && (!mesh_matches_local(&elems, sdata))) { switch (ftype) { case PLINK_OPEN: event = OPN_RJCT; break; case PLINK_CONFIRM: event = CNF_RJCT; break; case PLINK_CLOSE: /* avoid warning */ break; } spin_lock_bh(&sta->lock); } else if (!sta) { /* ftype == PLINK_OPEN */ u32 rates; rcu_read_unlock(); if (!mesh_plink_free_count(sdata)) { mpl_dbg("Mesh plink error: no more free plinks\n"); return; } rates = ieee80211_sta_get_rates(local, &elems, rx_status->band); sta = mesh_plink_alloc(sdata, mgmt->sa, rates); if (!sta) { mpl_dbg("Mesh plink error: plink table full\n"); return; } if (sta_info_insert_rcu(sta)) { rcu_read_unlock(); return; } event = OPN_ACPT; spin_lock_bh(&sta->lock); } else { spin_lock_bh(&sta->lock); switch (ftype) { case PLINK_OPEN: if (!mesh_plink_free_count(sdata) || (sta->plid && sta->plid != plid)) event = OPN_IGNR; else event = OPN_ACPT; break; case PLINK_CONFIRM: if (!mesh_plink_free_count(sdata) || (sta->llid != llid || sta->plid != plid)) event = CNF_IGNR; else event = CNF_ACPT; break; case PLINK_CLOSE: if (sta->plink_state == PLINK_ESTAB) /* Do not check for llid or plid. This does not * follow the standard but since multiple plinks * per sta are not supported, it is necessary in * order to avoid a livelock when MP A sees an * establish peer link to MP B but MP B does not * see it. This can be caused by a timeout in * B's peer link establishment or B beign * restarted. */ event = CLS_ACPT; else if (sta->plid != plid) event = CLS_IGNR; else if (ie_len == 7 && sta->llid != llid) event = CLS_IGNR; else event = CLS_ACPT; break; default: mpl_dbg("Mesh plink: unknown frame subtype\n"); spin_unlock_bh(&sta->lock); rcu_read_unlock(); return; } } mpl_dbg("Mesh plink (peer, state, llid, plid, event): %pM %s %d %d %d\n", mgmt->sa, mplstates[sta->plink_state], le16_to_cpu(sta->llid), le16_to_cpu(sta->plid), event); reason = 0; switch (sta->plink_state) { /* spin_unlock as soon as state is updated at each case */ case PLINK_LISTEN: switch (event) { case CLS_ACPT: mesh_plink_fsm_restart(sta); spin_unlock_bh(&sta->lock); break; case OPN_ACPT: sta->plink_state = PLINK_OPN_RCVD; sta->plid = plid; get_random_bytes(&llid, 2); sta->llid = llid; mesh_plink_timer_set(sta, dot11MeshRetryTimeout(sdata)); spin_unlock_bh(&sta->lock); mesh_plink_frame_tx(sdata, PLINK_OPEN, sta->sta.addr, llid, 0, 0); mesh_plink_frame_tx(sdata, PLINK_CONFIRM, sta->sta.addr, llid, plid, 0); break; default: spin_unlock_bh(&sta->lock); break; } break; case PLINK_OPN_SNT: switch (event) { case OPN_RJCT: case CNF_RJCT: reason = cpu_to_le16(MESH_CAPABILITY_POLICY_VIOLATION); case CLS_ACPT: if (!reason) reason = cpu_to_le16(MESH_CLOSE_RCVD); sta->reason = reason; sta->plink_state = PLINK_HOLDING; if (!mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata))) sta->ignore_plink_timer = true; llid = sta->llid; spin_unlock_bh(&sta->lock); mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr, llid, plid, reason); break; case OPN_ACPT: /* retry timer is left untouched */ sta->plink_state = PLINK_OPN_RCVD; sta->plid = plid; llid = sta->llid; spin_unlock_bh(&sta->lock); mesh_plink_frame_tx(sdata, PLINK_CONFIRM, sta->sta.addr, llid, plid, 0); break; case CNF_ACPT: sta->plink_state = PLINK_CNF_RCVD; if (!mod_plink_timer(sta, dot11MeshConfirmTimeout(sdata))) sta->ignore_plink_timer = true; spin_unlock_bh(&sta->lock); break; default: spin_unlock_bh(&sta->lock); break; } break; case PLINK_OPN_RCVD: switch (event) { case OPN_RJCT: case CNF_RJCT: reason = cpu_to_le16(MESH_CAPABILITY_POLICY_VIOLATION); case CLS_ACPT: if (!reason) reason = cpu_to_le16(MESH_CLOSE_RCVD); sta->reason = reason; sta->plink_state = PLINK_HOLDING; if (!mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata))) sta->ignore_plink_timer = true; llid = sta->llid; spin_unlock_bh(&sta->lock); mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr, llid, plid, reason); break; case OPN_ACPT: llid = sta->llid; spin_unlock_bh(&sta->lock); mesh_plink_frame_tx(sdata, PLINK_CONFIRM, sta->sta.addr, llid, plid, 0); break; case CNF_ACPT: del_timer(&sta->plink_timer); sta->plink_state = PLINK_ESTAB; spin_unlock_bh(&sta->lock); mesh_plink_inc_estab_count(sdata); ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON); mpl_dbg("Mesh plink with %pM ESTABLISHED\n", sta->sta.addr); break; default: spin_unlock_bh(&sta->lock); break; } break; case PLINK_CNF_RCVD: switch (event) { case OPN_RJCT: case CNF_RJCT: reason = cpu_to_le16(MESH_CAPABILITY_POLICY_VIOLATION); case CLS_ACPT: if (!reason) reason = cpu_to_le16(MESH_CLOSE_RCVD); sta->reason = reason; sta->plink_state = PLINK_HOLDING; if (!mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata))) sta->ignore_plink_timer = true; llid = sta->llid; spin_unlock_bh(&sta->lock); mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr, llid, plid, reason); break; case OPN_ACPT: del_timer(&sta->plink_timer); sta->plink_state = PLINK_ESTAB; spin_unlock_bh(&sta->lock); mesh_plink_inc_estab_count(sdata); ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON); mpl_dbg("Mesh plink with %pM ESTABLISHED\n", sta->sta.addr); mesh_plink_frame_tx(sdata, PLINK_CONFIRM, sta->sta.addr, llid, plid, 0); break; default: spin_unlock_bh(&sta->lock); break; } break; case PLINK_ESTAB: switch (event) { case CLS_ACPT: reason = cpu_to_le16(MESH_CLOSE_RCVD); sta->reason = reason; deactivated = __mesh_plink_deactivate(sta); sta->plink_state = PLINK_HOLDING; llid = sta->llid; mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata)); spin_unlock_bh(&sta->lock); if (deactivated) ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON); mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr, llid, plid, reason); break; case OPN_ACPT: llid = sta->llid; spin_unlock_bh(&sta->lock); mesh_plink_frame_tx(sdata, PLINK_CONFIRM, sta->sta.addr, llid, plid, 0); break; default: spin_unlock_bh(&sta->lock); break; } break; case PLINK_HOLDING: switch (event) { case CLS_ACPT: if (del_timer(&sta->plink_timer)) sta->ignore_plink_timer = 1; mesh_plink_fsm_restart(sta); spin_unlock_bh(&sta->lock); break; case OPN_ACPT: case CNF_ACPT: case OPN_RJCT: case CNF_RJCT: llid = sta->llid; reason = sta->reason; spin_unlock_bh(&sta->lock); mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr, llid, plid, reason); break; default: spin_unlock_bh(&sta->lock); } break; default: /* should not get here, PLINK_BLOCKED is dealt with at the * beginning of the function */ spin_unlock_bh(&sta->lock); break; } rcu_read_unlock(); }
gpl-2.0
alexax66/CM12.1_kernel_serranodsxx
drivers/media/tdmb/fc8050/fc8050_tun.c
771
6857
/***************************************************************************** Copyright(c) 2009 FCI Inc. All Rights Reserved File name : fc8050_tun.c Description : fc8050 host interface This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA History : ---------------------------------------------------------------------- 2009/09/14 jason initial 2009/11/26 MPW Config1p0 2010/02/24 SLR Config1p0 2010/05/04 SLR Config1p2 *******************************************************************************/ #include "fci_types.h" #include "fci_oal.h" #include "fci_tun.h" #include "fci_hal.h" #include "fc8050_regs.h" #if !defined(CONFIG_TDMB_EBI) #define FEATURE_TUNER_BURST_MODE #endif static int fc8050_write(HANDLE hDevice, u8 addr, u8 data) { int res; u8 tmp; tmp = data; res = tuner_i2c_write(hDevice, addr, 1, &tmp, 1); return res; } static int fc8050_read(HANDLE hDevice, u8 addr, u8 *data) { int res; res = tuner_i2c_read(hDevice, addr, 1, data, 1); return res; } static int fc8050_set_filter(HANDLE hDevice) { int i; u8 cal_mon = 0; #if (FC8050_FREQ_XTAL == 19200) fc8050_write(hDevice, 0x02, 0x86); fc8050_write(hDevice, 0x3B, 0x52); fc8050_write(hDevice, 0x32, 0x09); #elif (FC8050_FREQ_XTAL == 16384) fc8050_write(hDevice, 0x02, 0x86); fc8050_write(hDevice, 0x3B, 0x45); fc8050_write(hDevice, 0x32, 0x09); #elif (FC8050_FREQ_XTAL == 24576) fc8050_write(hDevice, 0x02, 0x86); fc8050_write(hDevice, 0x3B, 0x68); fc8050_write(hDevice, 0x32, 0x09); #elif (FC8050_FREQ_XTAL == 27000) fc8050_write(hDevice, 0x02, 0x86); fc8050_write(hDevice, 0x3B, 0x71); fc8050_write(hDevice, 0x32, 0x09); #elif (FC8050_FREQ_XTAL == 27120) fc8050_write(hDevice, 0x02, 0x86); fc8050_write(hDevice, 0x3B, 0x74); fc8050_write(hDevice, 0x32, 0x09); #elif (FC8050_FREQ_XTAL == 38400) fc8050_write(hDevice, 0x02, 0x86); fc8050_write(hDevice, 0x3B, 0xA1); fc8050_write(hDevice, 0x32, 0x09); #else return BBM_NOK; #endif for (i = 0; i < 10; i++) { ms_wait(5); fc8050_read(hDevice, 0x33, &cal_mon); if ((cal_mon & 0xC0) == 0xC0) break; fc8050_write(hDevice, 0x32, 0x01); fc8050_write(hDevice, 0x32, 0x09); } fc8050_write(hDevice, 0x32, 0x01); return BBM_OK; } static int fc8050_lband_init(HANDLE hDevice) { print_log(hDevice, "fc8050_lband_init\n"); return BBM_NOK; } static int fc8050_band3_init(HANDLE hDevice) { print_log(hDevice, "fc8050_band3_init\n"); fc8050_write(hDevice, 0x00, 0x00); fc8050_write(hDevice, 0x00, 0x00); fc8050_write(hDevice, 0x02, 0x86); fc8050_write(hDevice, 0x05, 0xD8); fc8050_write(hDevice, 0x0A, 0x83); fc8050_write(hDevice, 0x16, 0x0d); fc8050_write(hDevice, 0x13, 0x88); fc8050_write(hDevice, 0x15, 0x00); fc8050_write(hDevice, 0x21, 0x73); fc8050_write(hDevice, 0x57, 0x40); fc8050_write(hDevice, 0x69, 0x8C); fc8050_write(hDevice, 0x51, 0x04); fc8050_write(hDevice, 0x53, 0x00); fc8050_write(hDevice, 0x54, 0x28); fc8050_write(hDevice, 0x45, 0x40); fc8050_write(hDevice, 0x46, 0x32); fc8050_write(hDevice, 0x48, 0x40); fc8050_write(hDevice, 0x49, 0x32); fc8050_write(hDevice, 0x7A, 0x88); fc8050_write(hDevice, 0x53, 0x01); fc8050_write(hDevice, 0x58, 0x34); fc8050_write(hDevice, 0x59, 0x2A); fc8050_write(hDevice, 0x5A, 0x1D); fc8050_write(hDevice, 0x5B, 0x14); fc8050_write(hDevice, 0x61, 0x64); fc8050_write(hDevice, 0x74, 0x3A); fc8050_write(hDevice, 0x75, 0x1E); fc8050_write(hDevice, 0x6A, 0x0C); fc8050_write(hDevice, 0x6C, 0x0C); fc8050_write(hDevice, 0x6E, 0x0C); fc8050_write(hDevice, 0x70, 0x0C); fc8050_write(hDevice, 0x72, 0x0C); fc8050_write(hDevice, 0x7C, 0x0C); fc8050_write(hDevice, 0x4E, 0x26); fc8050_write(hDevice, 0x31, 0x13); fc8050_write(hDevice, 0x34, 0x53); fc8050_write(hDevice, 0x43, 0x20); fc8050_write(hDevice, 0x2e, 0x70); fc8050_set_filter(hDevice); return BBM_OK; } int fc8050_tuner_init(HANDLE hDevice, u32 band) { int res = BBM_NOK; bbm_write(hDevice, BBM_QDD_COMMAN, 0x5C); bbm_write(hDevice, BBM_QDD_AGC_STEP, 0x03); bbm_write(hDevice, BBM_QDD_TUN_COMMA, 0x40); bbm_write(hDevice, BBM_QDD_TUN_GAIN, 0x24); bbm_write(hDevice, BBM_QDD_AGC_PERIOD, 0x14); bbm_write(hDevice, BBM_QDD_TRAGET_RMS, 0x60); bbm_write(hDevice, BBM_QDD_TUN_GAIN_LOC, 0x44); bbm_write(hDevice, BBM_QDD_GAIN_MAX, 0x38); if (band == LBAND_TYPE) res = fc8050_lband_init(hDevice); else if (band == BAND3_TYPE) res = fc8050_band3_init(hDevice); else return BBM_NOK; if (res != BBM_OK) return res; return res; } int fc8050_set_freq(HANDLE hDevice, u32 band, u32 f_lo) { u32 f_diff, f_diff_shifted, n_val, k_val; u32 f_vco = f_lo * 12; u32 r_val = (f_vco >= 25 * FC8050_FREQ_XTAL) ? 1 : 2; u32 f_comp = FC8050_FREQ_XTAL/r_val; u8 pre_shift_bits = 4; u8 data_0x0E; fc8050_write(hDevice, 0x0a, 0x85); fc8050_write(hDevice, 0x16, 0x0d); n_val = f_vco / f_comp; f_diff = f_vco - f_comp * n_val; f_diff_shifted = f_diff << (20 - pre_shift_bits); k_val = f_diff_shifted / ((f_comp) >> pre_shift_bits); k_val = (f_diff_shifted + (f_comp >> (pre_shift_bits+1))) / (f_comp >> pre_shift_bits); data_0x0E = ((r_val == 1) ? 0x40 : 0x50) + (unsigned char)(k_val >> 16); fc8050_write(hDevice, 0x0E, data_0x0E); fc8050_write(hDevice, 0x0F, (unsigned char)(k_val >> 8)); fc8050_write(hDevice, 0x10, (unsigned char)(k_val)); fc8050_write(hDevice, 0x11, (unsigned char)(n_val)); fc8050_write(hDevice, 0x0a, 0x83); return BBM_OK; } int fc8050_get_rssi(HANDLE hDevice, int *rssi) { int res = BBM_OK; u8 LNA, RFVGA, PREAMP_PGA, CSF = 0x00; int K = -66; #ifdef FEATURE_TUNER_BURST_MODE u32 burst_data; res = tuner_i2c_read(hDevice, 0x76, 1, (unsigned char *)&burst_data, 4); LNA = burst_data&0x000000ff; RFVGA = (burst_data&0x0000ff00)>>8; CSF = (burst_data&0x00ff0000)>>16; PREAMP_PGA = (burst_data&0xff000000)>>24; #else res = fc8050_read(hDevice, 0x76, &LNA); res |= fc8050_read(hDevice, 0x77, &RFVGA); res |= fc8050_read(hDevice, 0x78, &CSF); res |= fc8050_read(hDevice, 0x79, &PREAMP_PGA); #endif if (res != BBM_OK) return res; *rssi = (((LNA & 0x07) * 5) + (RFVGA * 7 / 10) + ((PREAMP_PGA >> 7) * 6) + ((CSF & 0x7) * 6) - ((PREAMP_PGA & 0x7F) >> 1) + K); return BBM_OK; }
gpl-2.0
paladin74/linux
drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c
771
9659
/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, * USA * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless <ilw@linux.intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #include <net/mac80211.h> #include "fw-api.h" #include "mvm.h" /* Maps the driver specific channel width definition to the fw values */ u8 iwl_mvm_get_channel_width(struct cfg80211_chan_def *chandef) { switch (chandef->width) { case NL80211_CHAN_WIDTH_20_NOHT: case NL80211_CHAN_WIDTH_20: return PHY_VHT_CHANNEL_MODE20; case NL80211_CHAN_WIDTH_40: return PHY_VHT_CHANNEL_MODE40; case NL80211_CHAN_WIDTH_80: return PHY_VHT_CHANNEL_MODE80; case NL80211_CHAN_WIDTH_160: return PHY_VHT_CHANNEL_MODE160; default: WARN(1, "Invalid channel width=%u", chandef->width); return PHY_VHT_CHANNEL_MODE20; } } /* * Maps the driver specific control channel position (relative to the center * freq) definitions to the the fw values */ u8 iwl_mvm_get_ctrl_pos(struct cfg80211_chan_def *chandef) { switch (chandef->chan->center_freq - chandef->center_freq1) { case -70: return PHY_VHT_CTRL_POS_4_BELOW; case -50: return PHY_VHT_CTRL_POS_3_BELOW; case -30: return PHY_VHT_CTRL_POS_2_BELOW; case -10: return PHY_VHT_CTRL_POS_1_BELOW; case 10: return PHY_VHT_CTRL_POS_1_ABOVE; case 30: return PHY_VHT_CTRL_POS_2_ABOVE; case 50: return PHY_VHT_CTRL_POS_3_ABOVE; case 70: return PHY_VHT_CTRL_POS_4_ABOVE; default: WARN(1, "Invalid channel definition"); case 0: /* * The FW is expected to check the control channel position only * when in HT/VHT and the channel width is not 20MHz. Return * this value as the default one. */ return PHY_VHT_CTRL_POS_1_BELOW; } } /* * Construct the generic fields of the PHY context command */ static void iwl_mvm_phy_ctxt_cmd_hdr(struct iwl_mvm_phy_ctxt *ctxt, struct iwl_phy_context_cmd *cmd, u32 action, u32 apply_time) { memset(cmd, 0, sizeof(struct iwl_phy_context_cmd)); cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(ctxt->id, ctxt->color)); cmd->action = cpu_to_le32(action); cmd->apply_time = cpu_to_le32(apply_time); } /* * Add the phy configuration to the PHY context command */ static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm, struct iwl_phy_context_cmd *cmd, struct cfg80211_chan_def *chandef, u8 chains_static, u8 chains_dynamic) { u8 active_cnt, idle_cnt; /* Set the channel info data */ cmd->ci.band = (chandef->chan->band == IEEE80211_BAND_2GHZ ? PHY_BAND_24 : PHY_BAND_5); cmd->ci.channel = chandef->chan->hw_value; cmd->ci.width = iwl_mvm_get_channel_width(chandef); cmd->ci.ctrl_pos = iwl_mvm_get_ctrl_pos(chandef); /* Set rx the chains */ idle_cnt = chains_static; active_cnt = chains_dynamic; /* In scenarios where we only ever use a single-stream rates, * i.e. legacy 11b/g/a associations, single-stream APs or even * static SMPS, enable both chains to get diversity, improving * the case where we're far enough from the AP that attenuation * between the two antennas is sufficiently different to impact * performance. */ if (active_cnt == 1 && iwl_mvm_rx_diversity_allowed(mvm)) { idle_cnt = 2; active_cnt = 2; } cmd->rxchain_info = cpu_to_le32(iwl_mvm_get_valid_rx_ant(mvm) << PHY_RX_CHAIN_VALID_POS); cmd->rxchain_info |= cpu_to_le32(idle_cnt << PHY_RX_CHAIN_CNT_POS); cmd->rxchain_info |= cpu_to_le32(active_cnt << PHY_RX_CHAIN_MIMO_CNT_POS); #ifdef CONFIG_IWLWIFI_DEBUGFS if (unlikely(mvm->dbgfs_rx_phyinfo)) cmd->rxchain_info = cpu_to_le32(mvm->dbgfs_rx_phyinfo); #endif cmd->txchain_info = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm)); } /* * Send a command to apply the current phy configuration. The command is send * only if something in the configuration changed: in case that this is the * first time that the phy configuration is applied or in case that the phy * configuration changed from the previous apply. */ static int iwl_mvm_phy_ctxt_apply(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, struct cfg80211_chan_def *chandef, u8 chains_static, u8 chains_dynamic, u32 action, u32 apply_time) { struct iwl_phy_context_cmd cmd; int ret; /* Set the command header fields */ iwl_mvm_phy_ctxt_cmd_hdr(ctxt, &cmd, action, apply_time); /* Set the command data */ iwl_mvm_phy_ctxt_cmd_data(mvm, &cmd, chandef, chains_static, chains_dynamic); ret = iwl_mvm_send_cmd_pdu(mvm, PHY_CONTEXT_CMD, 0, sizeof(struct iwl_phy_context_cmd), &cmd); if (ret) IWL_ERR(mvm, "PHY ctxt cmd error. ret=%d\n", ret); return ret; } /* * Send a command to add a PHY context based on the current HW configuration. */ int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, struct cfg80211_chan_def *chandef, u8 chains_static, u8 chains_dynamic) { WARN_ON(!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && ctxt->ref); lockdep_assert_held(&mvm->mutex); ctxt->channel = chandef->chan; return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef, chains_static, chains_dynamic, FW_CTXT_ACTION_ADD, 0); } /* * Update the number of references to the given PHY context. This is valid only * in case the PHY context was already created, i.e., its reference count > 0. */ void iwl_mvm_phy_ctxt_ref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt) { lockdep_assert_held(&mvm->mutex); ctxt->ref++; } /* * Send a command to modify the PHY context based on the current HW * configuration. Note that the function does not check that the configuration * changed. */ int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, struct cfg80211_chan_def *chandef, u8 chains_static, u8 chains_dynamic) { lockdep_assert_held(&mvm->mutex); ctxt->channel = chandef->chan; return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef, chains_static, chains_dynamic, FW_CTXT_ACTION_MODIFY, 0); } void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt) { lockdep_assert_held(&mvm->mutex); if (WARN_ON_ONCE(!ctxt)) return; ctxt->ref--; } static void iwl_mvm_binding_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif) { unsigned long *data = _data; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); if (!mvmvif->phy_ctxt) return; if (vif->type == NL80211_IFTYPE_STATION || vif->type == NL80211_IFTYPE_AP) __set_bit(mvmvif->phy_ctxt->id, data); } int iwl_mvm_phy_ctx_count(struct iwl_mvm *mvm) { unsigned long phy_ctxt_counter = 0; ieee80211_iterate_active_interfaces_atomic(mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_binding_iterator, &phy_ctxt_counter); return hweight8(phy_ctxt_counter); }
gpl-2.0
Team-Blackout/Rezound_ION
drivers/net/stmmac/dwmac1000_core.c
2307
7482
/******************************************************************************* This is the driver for the GMAC on-chip Ethernet controller for ST SoCs. DWC Ether MAC 10/100/1000 Universal version 3.41a has been used for developing this code. This only implements the mac core functions for this chip. Copyright (C) 2007-2009 STMicroelectronics Ltd This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, version 2, as published by the Free Software Foundation. This program is distributed in the hope it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. The full GNU General Public License is included in this distribution in the file called "COPYING". Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> *******************************************************************************/ #include <linux/crc32.h> #include <linux/slab.h> #include "dwmac1000.h" static void dwmac1000_core_init(void __iomem *ioaddr) { u32 value = readl(ioaddr + GMAC_CONTROL); value |= GMAC_CORE_INIT; writel(value, ioaddr + GMAC_CONTROL); /* STBus Bridge Configuration */ /*writel(0xc5608, ioaddr + 0x00007000);*/ /* Freeze MMC counters */ writel(0x8, ioaddr + GMAC_MMC_CTRL); /* Mask GMAC interrupts */ writel(0x207, ioaddr + GMAC_INT_MASK); #ifdef STMMAC_VLAN_TAG_USED /* Tag detection without filtering */ writel(0x0, ioaddr + GMAC_VLAN_TAG); #endif } static int dwmac1000_rx_coe_supported(void __iomem *ioaddr) { u32 value = readl(ioaddr + GMAC_CONTROL); value |= GMAC_CONTROL_IPC; writel(value, ioaddr + GMAC_CONTROL); value = readl(ioaddr + GMAC_CONTROL); return !!(value & GMAC_CONTROL_IPC); } static void dwmac1000_dump_regs(void __iomem *ioaddr) { int i; pr_info("\tDWMAC1000 regs (base addr = 0x%p)\n", ioaddr); for (i = 0; i < 55; i++) { int offset = i * 4; pr_info("\tReg No. %d (offset 0x%x): 0x%08x\n", i, offset, readl(ioaddr + offset)); } } static void dwmac1000_set_umac_addr(void __iomem *ioaddr, unsigned char *addr, unsigned int reg_n) { stmmac_set_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n), GMAC_ADDR_LOW(reg_n)); } static void dwmac1000_get_umac_addr(void __iomem *ioaddr, unsigned char *addr, unsigned int reg_n) { stmmac_get_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n), GMAC_ADDR_LOW(reg_n)); } static void dwmac1000_set_filter(struct net_device *dev) { void __iomem *ioaddr = (void __iomem *) dev->base_addr; unsigned int value = 0; CHIP_DBG(KERN_INFO "%s: # mcasts %d, # unicast %d\n", __func__, netdev_mc_count(dev), netdev_uc_count(dev)); if (dev->flags & IFF_PROMISC) value = GMAC_FRAME_FILTER_PR; else if ((netdev_mc_count(dev) > HASH_TABLE_SIZE) || (dev->flags & IFF_ALLMULTI)) { value = GMAC_FRAME_FILTER_PM; /* pass all multi */ writel(0xffffffff, ioaddr + GMAC_HASH_HIGH); writel(0xffffffff, ioaddr + GMAC_HASH_LOW); } else if (!netdev_mc_empty(dev)) { u32 mc_filter[2]; struct netdev_hw_addr *ha; /* Hash filter for multicast */ value = GMAC_FRAME_FILTER_HMC; memset(mc_filter, 0, sizeof(mc_filter)); netdev_for_each_mc_addr(ha, dev) { /* The upper 6 bits of the calculated CRC are used to index the contens of the hash table */ int bit_nr = bitrev32(~crc32_le(~0, ha->addr, 6)) >> 26; /* The most significant bit determines the register to * use (H/L) while the other 5 bits determine the bit * within the register. */ mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); } writel(mc_filter[0], ioaddr + GMAC_HASH_LOW); writel(mc_filter[1], ioaddr + GMAC_HASH_HIGH); } /* Handle multiple unicast addresses (perfect filtering)*/ if (netdev_uc_count(dev) > GMAC_MAX_UNICAST_ADDRESSES) /* Switch to promiscuous mode is more than 16 addrs are required */ value |= GMAC_FRAME_FILTER_PR; else { int reg = 1; struct netdev_hw_addr *ha; netdev_for_each_uc_addr(ha, dev) { dwmac1000_set_umac_addr(ioaddr, ha->addr, reg); reg++; } } #ifdef FRAME_FILTER_DEBUG /* Enable Receive all mode (to debug filtering_fail errors) */ value |= GMAC_FRAME_FILTER_RA; #endif writel(value, ioaddr + GMAC_FRAME_FILTER); CHIP_DBG(KERN_INFO "\tFrame Filter reg: 0x%08x\n\tHash regs: " "HI 0x%08x, LO 0x%08x\n", readl(ioaddr + GMAC_FRAME_FILTER), readl(ioaddr + GMAC_HASH_HIGH), readl(ioaddr + GMAC_HASH_LOW)); } static void dwmac1000_flow_ctrl(void __iomem *ioaddr, unsigned int duplex, unsigned int fc, unsigned int pause_time) { unsigned int flow = 0; CHIP_DBG(KERN_DEBUG "GMAC Flow-Control:\n"); if (fc & FLOW_RX) { CHIP_DBG(KERN_DEBUG "\tReceive Flow-Control ON\n"); flow |= GMAC_FLOW_CTRL_RFE; } if (fc & FLOW_TX) { CHIP_DBG(KERN_DEBUG "\tTransmit Flow-Control ON\n"); flow |= GMAC_FLOW_CTRL_TFE; } if (duplex) { CHIP_DBG(KERN_DEBUG "\tduplex mode: PAUSE %d\n", pause_time); flow |= (pause_time << GMAC_FLOW_CTRL_PT_SHIFT); } writel(flow, ioaddr + GMAC_FLOW_CTRL); } static void dwmac1000_pmt(void __iomem *ioaddr, unsigned long mode) { unsigned int pmt = 0; if (mode & WAKE_MAGIC) { CHIP_DBG(KERN_DEBUG "GMAC: WOL Magic frame\n"); pmt |= power_down | magic_pkt_en; } if (mode & WAKE_UCAST) { CHIP_DBG(KERN_DEBUG "GMAC: WOL on global unicast\n"); pmt |= global_unicast; } writel(pmt, ioaddr + GMAC_PMT); } static void dwmac1000_irq_status(void __iomem *ioaddr) { u32 intr_status = readl(ioaddr + GMAC_INT_STATUS); /* Not used events (e.g. MMC interrupts) are not handled. */ if ((intr_status & mmc_tx_irq)) CHIP_DBG(KERN_DEBUG "GMAC: MMC tx interrupt: 0x%08x\n", readl(ioaddr + GMAC_MMC_TX_INTR)); if (unlikely(intr_status & mmc_rx_irq)) CHIP_DBG(KERN_DEBUG "GMAC: MMC rx interrupt: 0x%08x\n", readl(ioaddr + GMAC_MMC_RX_INTR)); if (unlikely(intr_status & mmc_rx_csum_offload_irq)) CHIP_DBG(KERN_DEBUG "GMAC: MMC rx csum offload: 0x%08x\n", readl(ioaddr + GMAC_MMC_RX_CSUM_OFFLOAD)); if (unlikely(intr_status & pmt_irq)) { CHIP_DBG(KERN_DEBUG "GMAC: received Magic frame\n"); /* clear the PMT bits 5 and 6 by reading the PMT * status register. */ readl(ioaddr + GMAC_PMT); } } static const struct stmmac_ops dwmac1000_ops = { .core_init = dwmac1000_core_init, .rx_coe = dwmac1000_rx_coe_supported, .dump_regs = dwmac1000_dump_regs, .host_irq_status = dwmac1000_irq_status, .set_filter = dwmac1000_set_filter, .flow_ctrl = dwmac1000_flow_ctrl, .pmt = dwmac1000_pmt, .set_umac_addr = dwmac1000_set_umac_addr, .get_umac_addr = dwmac1000_get_umac_addr, }; struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr) { struct mac_device_info *mac; u32 uid = readl(ioaddr + GMAC_VERSION); pr_info("\tDWMAC1000 - user ID: 0x%x, Synopsys ID: 0x%x\n", ((uid & 0x0000ff00) >> 8), (uid & 0x000000ff)); mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL); if (!mac) return NULL; mac->mac = &dwmac1000_ops; mac->dma = &dwmac1000_dma_ops; mac->link.port = GMAC_CONTROL_PS; mac->link.duplex = GMAC_CONTROL_DM; mac->link.speed = GMAC_CONTROL_FES; mac->mii.addr = GMAC_MII_ADDR; mac->mii.data = GMAC_MII_DATA; return mac; }
gpl-2.0
t2m-foxfone/kernel_msm
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
3331
25617
/* * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. * Copyright (c) 2004 Voltaire, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/skbuff.h> #include <linux/rtnetlink.h> #include <linux/moduleparam.h> #include <linux/ip.h> #include <linux/in.h> #include <linux/igmp.h> #include <linux/inetdevice.h> #include <linux/delay.h> #include <linux/completion.h> #include <linux/slab.h> #include <net/dst.h> #include "ipoib.h" #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG static int mcast_debug_level; module_param(mcast_debug_level, int, 0644); MODULE_PARM_DESC(mcast_debug_level, "Enable multicast debug tracing if > 0"); #endif static DEFINE_MUTEX(mcast_mutex); struct ipoib_mcast_iter { struct net_device *dev; union ib_gid mgid; unsigned long created; unsigned int queuelen; unsigned int complete; unsigned int send_only; }; static void ipoib_mcast_free(struct ipoib_mcast *mcast) { struct net_device *dev = mcast->dev; struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_neigh *neigh, *tmp; int tx_dropped = 0; ipoib_dbg_mcast(netdev_priv(dev), "deleting multicast group %pI6\n", mcast->mcmember.mgid.raw); spin_lock_irq(&priv->lock); list_for_each_entry_safe(neigh, tmp, &mcast->neigh_list, list) { /* * It's safe to call ipoib_put_ah() inside priv->lock * here, because we know that mcast->ah will always * hold one more reference, so ipoib_put_ah() will * never do more than decrement the ref count. */ if (neigh->ah) ipoib_put_ah(neigh->ah); ipoib_neigh_free(dev, neigh); } spin_unlock_irq(&priv->lock); if (mcast->ah) ipoib_put_ah(mcast->ah); while (!skb_queue_empty(&mcast->pkt_queue)) { ++tx_dropped; dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue)); } netif_tx_lock_bh(dev); dev->stats.tx_dropped += tx_dropped; netif_tx_unlock_bh(dev); kfree(mcast); } static struct ipoib_mcast *ipoib_mcast_alloc(struct net_device *dev, int can_sleep) { struct ipoib_mcast *mcast; mcast = kzalloc(sizeof *mcast, can_sleep ? GFP_KERNEL : GFP_ATOMIC); if (!mcast) return NULL; mcast->dev = dev; mcast->created = jiffies; mcast->backoff = 1; INIT_LIST_HEAD(&mcast->list); INIT_LIST_HEAD(&mcast->neigh_list); skb_queue_head_init(&mcast->pkt_queue); return mcast; } static struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, void *mgid) { struct ipoib_dev_priv *priv = netdev_priv(dev); struct rb_node *n = priv->multicast_tree.rb_node; while (n) { struct ipoib_mcast *mcast; int ret; mcast = rb_entry(n, struct ipoib_mcast, rb_node); ret = memcmp(mgid, mcast->mcmember.mgid.raw, sizeof (union ib_gid)); if (ret < 0) n = n->rb_left; else if (ret > 0) n = n->rb_right; else return mcast; } return NULL; } static int __ipoib_mcast_add(struct net_device *dev, struct ipoib_mcast *mcast) { struct ipoib_dev_priv *priv = netdev_priv(dev); struct rb_node **n = &priv->multicast_tree.rb_node, *pn = NULL; while (*n) { struct ipoib_mcast *tmcast; int ret; pn = *n; tmcast = rb_entry(pn, struct ipoib_mcast, rb_node); ret = memcmp(mcast->mcmember.mgid.raw, tmcast->mcmember.mgid.raw, sizeof (union ib_gid)); if (ret < 0) n = &pn->rb_left; else if (ret > 0) n = &pn->rb_right; else return -EEXIST; } rb_link_node(&mcast->rb_node, pn, n); rb_insert_color(&mcast->rb_node, &priv->multicast_tree); return 0; } static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast, struct ib_sa_mcmember_rec *mcmember) { struct net_device *dev = mcast->dev; struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_ah *ah; int ret; int set_qkey = 0; mcast->mcmember = *mcmember; /* Set the cached Q_Key before we attach if it's the broadcast group */ if (!memcmp(mcast->mcmember.mgid.raw, priv->dev->broadcast + 4, sizeof (union ib_gid))) { spin_lock_irq(&priv->lock); if (!priv->broadcast) { spin_unlock_irq(&priv->lock); return -EAGAIN; } priv->qkey = be32_to_cpu(priv->broadcast->mcmember.qkey); spin_unlock_irq(&priv->lock); priv->tx_wr.wr.ud.remote_qkey = priv->qkey; set_qkey = 1; } if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) { if (test_and_set_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) { ipoib_warn(priv, "multicast group %pI6 already attached\n", mcast->mcmember.mgid.raw); return 0; } ret = ipoib_mcast_attach(dev, be16_to_cpu(mcast->mcmember.mlid), &mcast->mcmember.mgid, set_qkey); if (ret < 0) { ipoib_warn(priv, "couldn't attach QP to multicast group %pI6\n", mcast->mcmember.mgid.raw); clear_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags); return ret; } } { struct ib_ah_attr av = { .dlid = be16_to_cpu(mcast->mcmember.mlid), .port_num = priv->port, .sl = mcast->mcmember.sl, .ah_flags = IB_AH_GRH, .static_rate = mcast->mcmember.rate, .grh = { .flow_label = be32_to_cpu(mcast->mcmember.flow_label), .hop_limit = mcast->mcmember.hop_limit, .sgid_index = 0, .traffic_class = mcast->mcmember.traffic_class } }; av.grh.dgid = mcast->mcmember.mgid; ah = ipoib_create_ah(dev, priv->pd, &av); if (IS_ERR(ah)) { ipoib_warn(priv, "ib_address_create failed %ld\n", -PTR_ERR(ah)); /* use original error */ return PTR_ERR(ah); } else { spin_lock_irq(&priv->lock); mcast->ah = ah; spin_unlock_irq(&priv->lock); ipoib_dbg_mcast(priv, "MGID %pI6 AV %p, LID 0x%04x, SL %d\n", mcast->mcmember.mgid.raw, mcast->ah->ah, be16_to_cpu(mcast->mcmember.mlid), mcast->mcmember.sl); } } /* actually send any queued packets */ netif_tx_lock_bh(dev); while (!skb_queue_empty(&mcast->pkt_queue)) { struct sk_buff *skb = skb_dequeue(&mcast->pkt_queue); netif_tx_unlock_bh(dev); skb->dev = dev; if (dev_queue_xmit(skb)) ipoib_warn(priv, "dev_queue_xmit failed to requeue packet\n"); netif_tx_lock_bh(dev); } netif_tx_unlock_bh(dev); return 0; } static int ipoib_mcast_sendonly_join_complete(int status, struct ib_sa_multicast *multicast) { struct ipoib_mcast *mcast = multicast->context; struct net_device *dev = mcast->dev; /* We trap for port events ourselves. */ if (status == -ENETRESET) return 0; if (!status) status = ipoib_mcast_join_finish(mcast, &multicast->rec); if (status) { if (mcast->logcount++ < 20) ipoib_dbg_mcast(netdev_priv(dev), "multicast join failed for %pI6, status %d\n", mcast->mcmember.mgid.raw, status); /* Flush out any queued packets */ netif_tx_lock_bh(dev); while (!skb_queue_empty(&mcast->pkt_queue)) { ++dev->stats.tx_dropped; dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue)); } netif_tx_unlock_bh(dev); /* Clear the busy flag so we try again */ status = test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); } return status; } static int ipoib_mcast_sendonly_join(struct ipoib_mcast *mcast) { struct net_device *dev = mcast->dev; struct ipoib_dev_priv *priv = netdev_priv(dev); struct ib_sa_mcmember_rec rec = { #if 0 /* Some SMs don't support send-only yet */ .join_state = 4 #else .join_state = 1 #endif }; int ret = 0; if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) { ipoib_dbg_mcast(priv, "device shutting down, no multicast joins\n"); return -ENODEV; } if (test_and_set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) { ipoib_dbg_mcast(priv, "multicast entry busy, skipping\n"); return -EBUSY; } rec.mgid = mcast->mcmember.mgid; rec.port_gid = priv->local_gid; rec.pkey = cpu_to_be16(priv->pkey); mcast->mc = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, priv->port, &rec, IB_SA_MCMEMBER_REC_MGID | IB_SA_MCMEMBER_REC_PORT_GID | IB_SA_MCMEMBER_REC_PKEY | IB_SA_MCMEMBER_REC_JOIN_STATE, GFP_ATOMIC, ipoib_mcast_sendonly_join_complete, mcast); if (IS_ERR(mcast->mc)) { ret = PTR_ERR(mcast->mc); clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); ipoib_warn(priv, "ib_sa_join_multicast failed (ret = %d)\n", ret); } else { ipoib_dbg_mcast(priv, "no multicast record for %pI6, starting join\n", mcast->mcmember.mgid.raw); } return ret; } void ipoib_mcast_carrier_on_task(struct work_struct *work) { struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, carrier_on_task); struct ib_port_attr attr; /* * Take rtnl_lock to avoid racing with ipoib_stop() and * turning the carrier back on while a device is being * removed. */ if (ib_query_port(priv->ca, priv->port, &attr) || attr.state != IB_PORT_ACTIVE) { ipoib_dbg(priv, "Keeping carrier off until IB port is active\n"); return; } rtnl_lock(); netif_carrier_on(priv->dev); rtnl_unlock(); } static int ipoib_mcast_join_complete(int status, struct ib_sa_multicast *multicast) { struct ipoib_mcast *mcast = multicast->context; struct net_device *dev = mcast->dev; struct ipoib_dev_priv *priv = netdev_priv(dev); ipoib_dbg_mcast(priv, "join completion for %pI6 (status %d)\n", mcast->mcmember.mgid.raw, status); /* We trap for port events ourselves. */ if (status == -ENETRESET) return 0; if (!status) status = ipoib_mcast_join_finish(mcast, &multicast->rec); if (!status) { mcast->backoff = 1; mutex_lock(&mcast_mutex); if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) queue_delayed_work(ipoib_workqueue, &priv->mcast_task, 0); mutex_unlock(&mcast_mutex); /* * Defer carrier on work to ipoib_workqueue to avoid a * deadlock on rtnl_lock here. */ if (mcast == priv->broadcast) queue_work(ipoib_workqueue, &priv->carrier_on_task); return 0; } if (mcast->logcount++ < 20) { if (status == -ETIMEDOUT || status == -EAGAIN) { ipoib_dbg_mcast(priv, "multicast join failed for %pI6, status %d\n", mcast->mcmember.mgid.raw, status); } else { ipoib_warn(priv, "multicast join failed for %pI6, status %d\n", mcast->mcmember.mgid.raw, status); } } mcast->backoff *= 2; if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS) mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS; /* Clear the busy flag so we try again */ status = test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); mutex_lock(&mcast_mutex); spin_lock_irq(&priv->lock); if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) queue_delayed_work(ipoib_workqueue, &priv->mcast_task, mcast->backoff * HZ); spin_unlock_irq(&priv->lock); mutex_unlock(&mcast_mutex); return status; } static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast, int create) { struct ipoib_dev_priv *priv = netdev_priv(dev); struct ib_sa_mcmember_rec rec = { .join_state = 1 }; ib_sa_comp_mask comp_mask; int ret = 0; ipoib_dbg_mcast(priv, "joining MGID %pI6\n", mcast->mcmember.mgid.raw); rec.mgid = mcast->mcmember.mgid; rec.port_gid = priv->local_gid; rec.pkey = cpu_to_be16(priv->pkey); comp_mask = IB_SA_MCMEMBER_REC_MGID | IB_SA_MCMEMBER_REC_PORT_GID | IB_SA_MCMEMBER_REC_PKEY | IB_SA_MCMEMBER_REC_JOIN_STATE; if (create) { comp_mask |= IB_SA_MCMEMBER_REC_QKEY | IB_SA_MCMEMBER_REC_MTU_SELECTOR | IB_SA_MCMEMBER_REC_MTU | IB_SA_MCMEMBER_REC_TRAFFIC_CLASS | IB_SA_MCMEMBER_REC_RATE_SELECTOR | IB_SA_MCMEMBER_REC_RATE | IB_SA_MCMEMBER_REC_SL | IB_SA_MCMEMBER_REC_FLOW_LABEL | IB_SA_MCMEMBER_REC_HOP_LIMIT; rec.qkey = priv->broadcast->mcmember.qkey; rec.mtu_selector = IB_SA_EQ; rec.mtu = priv->broadcast->mcmember.mtu; rec.traffic_class = priv->broadcast->mcmember.traffic_class; rec.rate_selector = IB_SA_EQ; rec.rate = priv->broadcast->mcmember.rate; rec.sl = priv->broadcast->mcmember.sl; rec.flow_label = priv->broadcast->mcmember.flow_label; rec.hop_limit = priv->broadcast->mcmember.hop_limit; } set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); mcast->mc = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, priv->port, &rec, comp_mask, GFP_KERNEL, ipoib_mcast_join_complete, mcast); if (IS_ERR(mcast->mc)) { clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); ret = PTR_ERR(mcast->mc); ipoib_warn(priv, "ib_sa_join_multicast failed, status %d\n", ret); mcast->backoff *= 2; if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS) mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS; mutex_lock(&mcast_mutex); if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) queue_delayed_work(ipoib_workqueue, &priv->mcast_task, mcast->backoff * HZ); mutex_unlock(&mcast_mutex); } } void ipoib_mcast_join_task(struct work_struct *work) { struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, mcast_task.work); struct net_device *dev = priv->dev; if (!test_bit(IPOIB_MCAST_RUN, &priv->flags)) return; if (ib_query_gid(priv->ca, priv->port, 0, &priv->local_gid)) ipoib_warn(priv, "ib_query_gid() failed\n"); else memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid)); { struct ib_port_attr attr; if (!ib_query_port(priv->ca, priv->port, &attr)) priv->local_lid = attr.lid; else ipoib_warn(priv, "ib_query_port failed\n"); } if (!priv->broadcast) { struct ipoib_mcast *broadcast; if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) return; broadcast = ipoib_mcast_alloc(dev, 1); if (!broadcast) { ipoib_warn(priv, "failed to allocate broadcast group\n"); mutex_lock(&mcast_mutex); if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) queue_delayed_work(ipoib_workqueue, &priv->mcast_task, HZ); mutex_unlock(&mcast_mutex); return; } spin_lock_irq(&priv->lock); memcpy(broadcast->mcmember.mgid.raw, priv->dev->broadcast + 4, sizeof (union ib_gid)); priv->broadcast = broadcast; __ipoib_mcast_add(dev, priv->broadcast); spin_unlock_irq(&priv->lock); } if (!test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) { if (!test_bit(IPOIB_MCAST_FLAG_BUSY, &priv->broadcast->flags)) ipoib_mcast_join(dev, priv->broadcast, 0); return; } while (1) { struct ipoib_mcast *mcast = NULL; spin_lock_irq(&priv->lock); list_for_each_entry(mcast, &priv->multicast_list, list) { if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) && !test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags) && !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) { /* Found the next unjoined group */ break; } } spin_unlock_irq(&priv->lock); if (&mcast->list == &priv->multicast_list) { /* All done */ break; } ipoib_mcast_join(dev, mcast, 1); return; } priv->mcast_mtu = IPOIB_UD_MTU(ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu)); if (!ipoib_cm_admin_enabled(dev)) { rtnl_lock(); dev_set_mtu(dev, min(priv->mcast_mtu, priv->admin_mtu)); rtnl_unlock(); } ipoib_dbg_mcast(priv, "successfully joined all multicast groups\n"); clear_bit(IPOIB_MCAST_RUN, &priv->flags); } int ipoib_mcast_start_thread(struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); ipoib_dbg_mcast(priv, "starting multicast thread\n"); mutex_lock(&mcast_mutex); if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags)) queue_delayed_work(ipoib_workqueue, &priv->mcast_task, 0); mutex_unlock(&mcast_mutex); return 0; } int ipoib_mcast_stop_thread(struct net_device *dev, int flush) { struct ipoib_dev_priv *priv = netdev_priv(dev); ipoib_dbg_mcast(priv, "stopping multicast thread\n"); mutex_lock(&mcast_mutex); clear_bit(IPOIB_MCAST_RUN, &priv->flags); cancel_delayed_work(&priv->mcast_task); mutex_unlock(&mcast_mutex); if (flush) flush_workqueue(ipoib_workqueue); return 0; } static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast) { struct ipoib_dev_priv *priv = netdev_priv(dev); int ret = 0; if (test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) ib_sa_free_multicast(mcast->mc); if (test_and_clear_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) { ipoib_dbg_mcast(priv, "leaving MGID %pI6\n", mcast->mcmember.mgid.raw); /* Remove ourselves from the multicast group */ ret = ib_detach_mcast(priv->qp, &mcast->mcmember.mgid, be16_to_cpu(mcast->mcmember.mlid)); if (ret) ipoib_warn(priv, "ib_detach_mcast failed (result = %d)\n", ret); } return 0; } void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb) { struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_mcast *mcast; unsigned long flags; spin_lock_irqsave(&priv->lock, flags); if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags) || !priv->broadcast || !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) { ++dev->stats.tx_dropped; dev_kfree_skb_any(skb); goto unlock; } mcast = __ipoib_mcast_find(dev, mgid); if (!mcast) { /* Let's create a new send only group now */ ipoib_dbg_mcast(priv, "setting up send only multicast group for %pI6\n", mgid); mcast = ipoib_mcast_alloc(dev, 0); if (!mcast) { ipoib_warn(priv, "unable to allocate memory for " "multicast structure\n"); ++dev->stats.tx_dropped; dev_kfree_skb_any(skb); goto out; } set_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags); memcpy(mcast->mcmember.mgid.raw, mgid, sizeof (union ib_gid)); __ipoib_mcast_add(dev, mcast); list_add_tail(&mcast->list, &priv->multicast_list); } if (!mcast->ah) { if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE) skb_queue_tail(&mcast->pkt_queue, skb); else { ++dev->stats.tx_dropped; dev_kfree_skb_any(skb); } if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) ipoib_dbg_mcast(priv, "no address vector, " "but multicast join already started\n"); else if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) ipoib_mcast_sendonly_join(mcast); /* * If lookup completes between here and out:, don't * want to send packet twice. */ mcast = NULL; } out: if (mcast && mcast->ah) { struct dst_entry *dst = skb_dst(skb); struct neighbour *n = NULL; rcu_read_lock(); if (dst) n = dst_get_neighbour_noref(dst); if (n && !*to_ipoib_neigh(n)) { struct ipoib_neigh *neigh = ipoib_neigh_alloc(n, skb->dev); if (neigh) { kref_get(&mcast->ah->ref); neigh->ah = mcast->ah; list_add_tail(&neigh->list, &mcast->neigh_list); } } rcu_read_unlock(); spin_unlock_irqrestore(&priv->lock, flags); ipoib_send(dev, skb, mcast->ah, IB_MULTICAST_QPN); return; } unlock: spin_unlock_irqrestore(&priv->lock, flags); } void ipoib_mcast_dev_flush(struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); LIST_HEAD(remove_list); struct ipoib_mcast *mcast, *tmcast; unsigned long flags; ipoib_dbg_mcast(priv, "flushing multicast list\n"); spin_lock_irqsave(&priv->lock, flags); list_for_each_entry_safe(mcast, tmcast, &priv->multicast_list, list) { list_del(&mcast->list); rb_erase(&mcast->rb_node, &priv->multicast_tree); list_add_tail(&mcast->list, &remove_list); } if (priv->broadcast) { rb_erase(&priv->broadcast->rb_node, &priv->multicast_tree); list_add_tail(&priv->broadcast->list, &remove_list); priv->broadcast = NULL; } spin_unlock_irqrestore(&priv->lock, flags); list_for_each_entry_safe(mcast, tmcast, &remove_list, list) { ipoib_mcast_leave(dev, mcast); ipoib_mcast_free(mcast); } } static int ipoib_mcast_addr_is_valid(const u8 *addr, const u8 *broadcast) { /* reserved QPN, prefix, scope */ if (memcmp(addr, broadcast, 6)) return 0; /* signature lower, pkey */ if (memcmp(addr + 7, broadcast + 7, 3)) return 0; return 1; } void ipoib_mcast_restart_task(struct work_struct *work) { struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, restart_task); struct net_device *dev = priv->dev; struct netdev_hw_addr *ha; struct ipoib_mcast *mcast, *tmcast; LIST_HEAD(remove_list); unsigned long flags; struct ib_sa_mcmember_rec rec; ipoib_dbg_mcast(priv, "restarting multicast task\n"); ipoib_mcast_stop_thread(dev, 0); local_irq_save(flags); netif_addr_lock(dev); spin_lock(&priv->lock); /* * Unfortunately, the networking core only gives us a list of all of * the multicast hardware addresses. We need to figure out which ones * are new and which ones have been removed */ /* Clear out the found flag */ list_for_each_entry(mcast, &priv->multicast_list, list) clear_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags); /* Mark all of the entries that are found or don't exist */ netdev_for_each_mc_addr(ha, dev) { union ib_gid mgid; if (!ipoib_mcast_addr_is_valid(ha->addr, dev->broadcast)) continue; memcpy(mgid.raw, ha->addr + 4, sizeof mgid); mcast = __ipoib_mcast_find(dev, &mgid); if (!mcast || test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) { struct ipoib_mcast *nmcast; /* ignore group which is directly joined by userspace */ if (test_bit(IPOIB_FLAG_UMCAST, &priv->flags) && !ib_sa_get_mcmember_rec(priv->ca, priv->port, &mgid, &rec)) { ipoib_dbg_mcast(priv, "ignoring multicast entry for mgid %pI6\n", mgid.raw); continue; } /* Not found or send-only group, let's add a new entry */ ipoib_dbg_mcast(priv, "adding multicast entry for mgid %pI6\n", mgid.raw); nmcast = ipoib_mcast_alloc(dev, 0); if (!nmcast) { ipoib_warn(priv, "unable to allocate memory for multicast structure\n"); continue; } set_bit(IPOIB_MCAST_FLAG_FOUND, &nmcast->flags); nmcast->mcmember.mgid = mgid; if (mcast) { /* Destroy the send only entry */ list_move_tail(&mcast->list, &remove_list); rb_replace_node(&mcast->rb_node, &nmcast->rb_node, &priv->multicast_tree); } else __ipoib_mcast_add(dev, nmcast); list_add_tail(&nmcast->list, &priv->multicast_list); } if (mcast) set_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags); } /* Remove all of the entries don't exist anymore */ list_for_each_entry_safe(mcast, tmcast, &priv->multicast_list, list) { if (!test_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags) && !test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) { ipoib_dbg_mcast(priv, "deleting multicast group %pI6\n", mcast->mcmember.mgid.raw); rb_erase(&mcast->rb_node, &priv->multicast_tree); /* Move to the remove list */ list_move_tail(&mcast->list, &remove_list); } } spin_unlock(&priv->lock); netif_addr_unlock(dev); local_irq_restore(flags); /* We have to cancel outside of the spinlock */ list_for_each_entry_safe(mcast, tmcast, &remove_list, list) { ipoib_mcast_leave(mcast->dev, mcast); ipoib_mcast_free(mcast); } if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) ipoib_mcast_start_thread(dev); } #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG struct ipoib_mcast_iter *ipoib_mcast_iter_init(struct net_device *dev) { struct ipoib_mcast_iter *iter; iter = kmalloc(sizeof *iter, GFP_KERNEL); if (!iter) return NULL; iter->dev = dev; memset(iter->mgid.raw, 0, 16); if (ipoib_mcast_iter_next(iter)) { kfree(iter); return NULL; } return iter; } int ipoib_mcast_iter_next(struct ipoib_mcast_iter *iter) { struct ipoib_dev_priv *priv = netdev_priv(iter->dev); struct rb_node *n; struct ipoib_mcast *mcast; int ret = 1; spin_lock_irq(&priv->lock); n = rb_first(&priv->multicast_tree); while (n) { mcast = rb_entry(n, struct ipoib_mcast, rb_node); if (memcmp(iter->mgid.raw, mcast->mcmember.mgid.raw, sizeof (union ib_gid)) < 0) { iter->mgid = mcast->mcmember.mgid; iter->created = mcast->created; iter->queuelen = skb_queue_len(&mcast->pkt_queue); iter->complete = !!mcast->ah; iter->send_only = !!(mcast->flags & (1 << IPOIB_MCAST_FLAG_SENDONLY)); ret = 0; break; } n = rb_next(n); } spin_unlock_irq(&priv->lock); return ret; } void ipoib_mcast_iter_read(struct ipoib_mcast_iter *iter, union ib_gid *mgid, unsigned long *created, unsigned int *queuelen, unsigned int *complete, unsigned int *send_only) { *mgid = iter->mgid; *created = iter->created; *queuelen = iter->queuelen; *complete = iter->complete; *send_only = iter->send_only; } #endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */
gpl-2.0
boa19861105/android_443_KitKat_kernel_htc_dlxub1
arch/x86/kernel/crash.c
4355
2391
/* * Architecture specific (i386/x86_64) functions for kexec based crash dumps. * * Created by: Hariprasad Nellitheertha (hari@in.ibm.com) * * Copyright (C) IBM Corporation, 2004. All rights reserved. * */ #include <linux/init.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/smp.h> #include <linux/reboot.h> #include <linux/kexec.h> #include <linux/delay.h> #include <linux/elf.h> #include <linux/elfcore.h> #include <asm/processor.h> #include <asm/hardirq.h> #include <asm/nmi.h> #include <asm/hw_irq.h> #include <asm/apic.h> #include <asm/hpet.h> #include <linux/kdebug.h> #include <asm/cpu.h> #include <asm/reboot.h> #include <asm/virtext.h> int in_crash_kexec; #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC) static void kdump_nmi_callback(int cpu, struct pt_regs *regs) { #ifdef CONFIG_X86_32 struct pt_regs fixed_regs; #endif #ifdef CONFIG_X86_32 if (!user_mode_vm(regs)) { crash_fixup_ss_esp(&fixed_regs, regs); regs = &fixed_regs; } #endif crash_save_cpu(regs, cpu); /* Disable VMX or SVM if needed. * * We need to disable virtualization on all CPUs. * Having VMX or SVM enabled on any CPU may break rebooting * after the kdump kernel has finished its task. */ cpu_emergency_vmxoff(); cpu_emergency_svm_disable(); disable_local_APIC(); } static void kdump_nmi_shootdown_cpus(void) { in_crash_kexec = 1; nmi_shootdown_cpus(kdump_nmi_callback); disable_local_APIC(); } #else static void kdump_nmi_shootdown_cpus(void) { /* There are no cpus to shootdown */ } #endif void native_machine_crash_shutdown(struct pt_regs *regs) { /* This function is only called after the system * has panicked or is otherwise in a critical state. * The minimum amount of code to allow a kexec'd kernel * to run successfully needs to happen here. * * In practice this means shooting down the other cpus in * an SMP system. */ /* The kernel is broken so disable interrupts */ local_irq_disable(); kdump_nmi_shootdown_cpus(); /* Booting kdump kernel with VMX or SVM enabled won't work, * because (among other limitations) we can't disable paging * with the virt flags. */ cpu_emergency_vmxoff(); cpu_emergency_svm_disable(); lapic_shutdown(); #if defined(CONFIG_X86_IO_APIC) disable_IO_APIC(); #endif #ifdef CONFIG_HPET_TIMER hpet_disable(); #endif crash_save_cpu(regs, safe_smp_processor_id()); }
gpl-2.0
misko/linux-sunxi
sound/isa/gus/interwave.c
4867
27258
/* * Driver for AMD InterWave soundcard * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * 1999/07/22 Erik Inge Bolso <knan@mo.himolde.no> * * mixer group handlers * */ #include <linux/init.h> #include <linux/err.h> #include <linux/isa.h> #include <linux/delay.h> #include <linux/pnp.h> #include <linux/module.h> #include <asm/dma.h> #include <sound/core.h> #include <sound/gus.h> #include <sound/wss.h> #ifdef SNDRV_STB #include <sound/tea6330t.h> #endif #define SNDRV_LEGACY_FIND_FREE_IRQ #define SNDRV_LEGACY_FIND_FREE_DMA #include <sound/initval.h> MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>"); MODULE_LICENSE("GPL"); #ifndef SNDRV_STB MODULE_DESCRIPTION("AMD InterWave"); MODULE_SUPPORTED_DEVICE("{{Gravis,UltraSound Plug & Play}," "{STB,SoundRage32}," "{MED,MED3210}," "{Dynasonix,Dynasonix Pro}," "{Panasonic,PCA761AW}}"); #else MODULE_DESCRIPTION("AMD InterWave STB with TEA6330T"); MODULE_SUPPORTED_DEVICE("{{AMD,InterWave STB with TEA6330T}}"); #endif static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */ static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_ISAPNP; /* Enable this card */ #ifdef CONFIG_PNP static bool isapnp[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 1}; #endif static long port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; /* 0x210,0x220,0x230,0x240,0x250,0x260 */ #ifdef SNDRV_STB static long port_tc[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; /* 0x350,0x360,0x370,0x380 */ #endif static int irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ; /* 2,3,5,9,11,12,15 */ static int dma1[SNDRV_CARDS] = SNDRV_DEFAULT_DMA; /* 0,1,3,5,6,7 */ static int dma2[SNDRV_CARDS] = SNDRV_DEFAULT_DMA; /* 0,1,3,5,6,7 */ static int joystick_dac[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 29}; /* 0 to 31, (0.59V-4.52V or 0.389V-2.98V) */ static int midi[SNDRV_CARDS]; static int pcm_channels[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 2}; static int effect[SNDRV_CARDS]; #ifdef SNDRV_STB #define PFX "interwave-stb: " #define INTERWAVE_DRIVER "snd_interwave_stb" #define INTERWAVE_PNP_DRIVER "interwave-stb" #else #define PFX "interwave: " #define INTERWAVE_DRIVER "snd_interwave" #define INTERWAVE_PNP_DRIVER "interwave" #endif module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for InterWave soundcard."); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for InterWave soundcard."); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable InterWave soundcard."); #ifdef CONFIG_PNP module_param_array(isapnp, bool, NULL, 0444); MODULE_PARM_DESC(isapnp, "ISA PnP detection for specified soundcard."); #endif module_param_array(port, long, NULL, 0444); MODULE_PARM_DESC(port, "Port # for InterWave driver."); #ifdef SNDRV_STB module_param_array(port_tc, long, NULL, 0444); MODULE_PARM_DESC(port_tc, "Tone control (TEA6330T - i2c bus) port # for InterWave driver."); #endif module_param_array(irq, int, NULL, 0444); MODULE_PARM_DESC(irq, "IRQ # for InterWave driver."); module_param_array(dma1, int, NULL, 0444); MODULE_PARM_DESC(dma1, "DMA1 # for InterWave driver."); module_param_array(dma2, int, NULL, 0444); MODULE_PARM_DESC(dma2, "DMA2 # for InterWave driver."); module_param_array(joystick_dac, int, NULL, 0444); MODULE_PARM_DESC(joystick_dac, "Joystick DAC level 0.59V-4.52V or 0.389V-2.98V for InterWave driver."); module_param_array(midi, int, NULL, 0444); MODULE_PARM_DESC(midi, "MIDI UART enable for InterWave driver."); module_param_array(pcm_channels, int, NULL, 0444); MODULE_PARM_DESC(pcm_channels, "Reserved PCM channels for InterWave driver."); module_param_array(effect, int, NULL, 0444); MODULE_PARM_DESC(effect, "Effects enable for InterWave driver."); struct snd_interwave { int irq; struct snd_card *card; struct snd_gus_card *gus; struct snd_wss *wss; #ifdef SNDRV_STB struct resource *i2c_res; #endif unsigned short gus_status_reg; unsigned short pcm_status_reg; #ifdef CONFIG_PNP struct pnp_dev *dev; #ifdef SNDRV_STB struct pnp_dev *devtc; #endif #endif }; #ifdef CONFIG_PNP static int isa_registered; static int pnp_registered; static struct pnp_card_device_id snd_interwave_pnpids[] = { #ifndef SNDRV_STB /* Gravis UltraSound Plug & Play */ { .id = "GRV0001", .devs = { { .id = "GRV0000" } } }, /* STB SoundRage32 */ { .id = "STB011a", .devs = { { .id = "STB0010" } } }, /* MED3210 */ { .id = "DXP3201", .devs = { { .id = "DXP0010" } } }, /* Dynasonic Pro */ /* This device also have CDC1117:DynaSonix Pro Audio Effects Processor */ { .id = "CDC1111", .devs = { { .id = "CDC1112" } } }, /* Panasonic PCA761AW Audio Card */ { .id = "ADV55ff", .devs = { { .id = "ADV0010" } } }, /* InterWave STB without TEA6330T */ { .id = "ADV550a", .devs = { { .id = "ADV0010" } } }, #else /* InterWave STB with TEA6330T */ { .id = "ADV550a", .devs = { { .id = "ADV0010" }, { .id = "ADV0015" } } }, #endif { .id = "" } }; MODULE_DEVICE_TABLE(pnp_card, snd_interwave_pnpids); #endif /* CONFIG_PNP */ #ifdef SNDRV_STB static void snd_interwave_i2c_setlines(struct snd_i2c_bus *bus, int ctrl, int data) { unsigned long port = bus->private_value; #if 0 printk(KERN_DEBUG "i2c_setlines - 0x%lx <- %i,%i\n", port, ctrl, data); #endif outb((data << 1) | ctrl, port); udelay(10); } static int snd_interwave_i2c_getclockline(struct snd_i2c_bus *bus) { unsigned long port = bus->private_value; unsigned char res; res = inb(port) & 1; #if 0 printk(KERN_DEBUG "i2c_getclockline - 0x%lx -> %i\n", port, res); #endif return res; } static int snd_interwave_i2c_getdataline(struct snd_i2c_bus *bus, int ack) { unsigned long port = bus->private_value; unsigned char res; if (ack) udelay(10); res = (inb(port) & 2) >> 1; #if 0 printk(KERN_DEBUG "i2c_getdataline - 0x%lx -> %i\n", port, res); #endif return res; } static struct snd_i2c_bit_ops snd_interwave_i2c_bit_ops = { .setlines = snd_interwave_i2c_setlines, .getclock = snd_interwave_i2c_getclockline, .getdata = snd_interwave_i2c_getdataline, }; static int __devinit snd_interwave_detect_stb(struct snd_interwave *iwcard, struct snd_gus_card * gus, int dev, struct snd_i2c_bus **rbus) { unsigned long port; struct snd_i2c_bus *bus; struct snd_card *card = iwcard->card; char name[32]; int err; *rbus = NULL; port = port_tc[dev]; if (port == SNDRV_AUTO_PORT) { port = 0x350; if (gus->gf1.port == 0x250) { port = 0x360; } while (port <= 0x380) { if ((iwcard->i2c_res = request_region(port, 1, "InterWave (I2C bus)")) != NULL) break; port += 0x10; } } else { iwcard->i2c_res = request_region(port, 1, "InterWave (I2C bus)"); } if (iwcard->i2c_res == NULL) { snd_printk(KERN_ERR "interwave: can't grab i2c bus port\n"); return -ENODEV; } sprintf(name, "InterWave-%i", card->number); if ((err = snd_i2c_bus_create(card, name, NULL, &bus)) < 0) return err; bus->private_value = port; bus->hw_ops.bit = &snd_interwave_i2c_bit_ops; if ((err = snd_tea6330t_detect(bus, 0)) < 0) return err; *rbus = bus; return 0; } #endif static int __devinit snd_interwave_detect(struct snd_interwave *iwcard, struct snd_gus_card * gus, int dev #ifdef SNDRV_STB , struct snd_i2c_bus **rbus #endif ) { unsigned long flags; unsigned char rev1, rev2; int d; snd_gf1_i_write8(gus, SNDRV_GF1_GB_RESET, 0); /* reset GF1 */ if (((d = snd_gf1_i_look8(gus, SNDRV_GF1_GB_RESET)) & 0x07) != 0) { snd_printdd("[0x%lx] check 1 failed - 0x%x\n", gus->gf1.port, d); return -ENODEV; } udelay(160); snd_gf1_i_write8(gus, SNDRV_GF1_GB_RESET, 1); /* release reset */ udelay(160); if (((d = snd_gf1_i_look8(gus, SNDRV_GF1_GB_RESET)) & 0x07) != 1) { snd_printdd("[0x%lx] check 2 failed - 0x%x\n", gus->gf1.port, d); return -ENODEV; } spin_lock_irqsave(&gus->reg_lock, flags); rev1 = snd_gf1_look8(gus, SNDRV_GF1_GB_VERSION_NUMBER); snd_gf1_write8(gus, SNDRV_GF1_GB_VERSION_NUMBER, ~rev1); rev2 = snd_gf1_look8(gus, SNDRV_GF1_GB_VERSION_NUMBER); snd_gf1_write8(gus, SNDRV_GF1_GB_VERSION_NUMBER, rev1); spin_unlock_irqrestore(&gus->reg_lock, flags); snd_printdd("[0x%lx] InterWave check - rev1=0x%x, rev2=0x%x\n", gus->gf1.port, rev1, rev2); if ((rev1 & 0xf0) == (rev2 & 0xf0) && (rev1 & 0x0f) != (rev2 & 0x0f)) { snd_printdd("[0x%lx] InterWave check - passed\n", gus->gf1.port); gus->interwave = 1; strcpy(gus->card->shortname, "AMD InterWave"); gus->revision = rev1 >> 4; #ifndef SNDRV_STB return 0; /* ok.. We have an InterWave board */ #else return snd_interwave_detect_stb(iwcard, gus, dev, rbus); #endif } snd_printdd("[0x%lx] InterWave check - failed\n", gus->gf1.port); return -ENODEV; } static irqreturn_t snd_interwave_interrupt(int irq, void *dev_id) { struct snd_interwave *iwcard = dev_id; int loop, max = 5; int handled = 0; do { loop = 0; if (inb(iwcard->gus_status_reg)) { handled = 1; snd_gus_interrupt(irq, iwcard->gus); loop++; } if (inb(iwcard->pcm_status_reg) & 0x01) { /* IRQ bit is set? */ handled = 1; snd_wss_interrupt(irq, iwcard->wss); loop++; } } while (loop && --max > 0); return IRQ_RETVAL(handled); } static void __devinit snd_interwave_reset(struct snd_gus_card * gus) { snd_gf1_write8(gus, SNDRV_GF1_GB_RESET, 0x00); udelay(160); snd_gf1_write8(gus, SNDRV_GF1_GB_RESET, 0x01); udelay(160); } static void __devinit snd_interwave_bank_sizes(struct snd_gus_card * gus, int *sizes) { unsigned int idx; unsigned int local; unsigned char d; for (idx = 0; idx < 4; idx++) { sizes[idx] = 0; d = 0x55; for (local = idx << 22; local < (idx << 22) + 0x400000; local += 0x40000, d++) { snd_gf1_poke(gus, local, d); snd_gf1_poke(gus, local + 1, d + 1); #if 0 printk(KERN_DEBUG "d = 0x%x, local = 0x%x, " "local + 1 = 0x%x, idx << 22 = 0x%x\n", d, snd_gf1_peek(gus, local), snd_gf1_peek(gus, local + 1), snd_gf1_peek(gus, idx << 22)); #endif if (snd_gf1_peek(gus, local) != d || snd_gf1_peek(gus, local + 1) != d + 1 || snd_gf1_peek(gus, idx << 22) != 0x55) break; sizes[idx]++; } } #if 0 printk(KERN_DEBUG "sizes: %i %i %i %i\n", sizes[0], sizes[1], sizes[2], sizes[3]); #endif } struct rom_hdr { /* 000 */ unsigned char iwave[8]; /* 008 */ unsigned char rom_hdr_revision; /* 009 */ unsigned char series_number; /* 010 */ unsigned char series_name[16]; /* 026 */ unsigned char date[10]; /* 036 */ unsigned short vendor_revision_major; /* 038 */ unsigned short vendor_revision_minor; /* 040 */ unsigned int rom_size; /* 044 */ unsigned char copyright[128]; /* 172 */ unsigned char vendor_name[64]; /* 236 */ unsigned char rom_description[128]; /* 364 */ unsigned char pad[147]; /* 511 */ unsigned char csum; }; static void __devinit snd_interwave_detect_memory(struct snd_gus_card * gus) { static unsigned int lmc[13] = { 0x00000001, 0x00000101, 0x01010101, 0x00000401, 0x04040401, 0x00040101, 0x04040101, 0x00000004, 0x00000404, 0x04040404, 0x00000010, 0x00001010, 0x10101010 }; int bank_pos, pages; unsigned int i, lmct; int psizes[4]; unsigned char iwave[8]; unsigned char csum; snd_interwave_reset(gus); snd_gf1_write8(gus, SNDRV_GF1_GB_GLOBAL_MODE, snd_gf1_read8(gus, SNDRV_GF1_GB_GLOBAL_MODE) | 0x01); /* enhanced mode */ snd_gf1_write8(gus, SNDRV_GF1_GB_MEMORY_CONTROL, 0x01); /* DRAM I/O cycles selected */ snd_gf1_write16(gus, SNDRV_GF1_GW_MEMORY_CONFIG, (snd_gf1_look16(gus, SNDRV_GF1_GW_MEMORY_CONFIG) & 0xff10) | 0x004c); /* ok.. simple test of memory size */ pages = 0; snd_gf1_poke(gus, 0, 0x55); snd_gf1_poke(gus, 1, 0xaa); #if 1 if (snd_gf1_peek(gus, 0) == 0x55 && snd_gf1_peek(gus, 1) == 0xaa) #else if (0) /* ok.. for testing of 0k RAM */ #endif { snd_interwave_bank_sizes(gus, psizes); lmct = (psizes[3] << 24) | (psizes[2] << 16) | (psizes[1] << 8) | psizes[0]; #if 0 printk(KERN_DEBUG "lmct = 0x%08x\n", lmct); #endif for (i = 0; i < ARRAY_SIZE(lmc); i++) if (lmct == lmc[i]) { #if 0 printk(KERN_DEBUG "found !!! %i\n", i); #endif snd_gf1_write16(gus, SNDRV_GF1_GW_MEMORY_CONFIG, (snd_gf1_look16(gus, SNDRV_GF1_GW_MEMORY_CONFIG) & 0xfff0) | i); snd_interwave_bank_sizes(gus, psizes); break; } if (i >= ARRAY_SIZE(lmc) && !gus->gf1.enh_mode) snd_gf1_write16(gus, SNDRV_GF1_GW_MEMORY_CONFIG, (snd_gf1_look16(gus, SNDRV_GF1_GW_MEMORY_CONFIG) & 0xfff0) | 2); for (i = 0; i < 4; i++) { gus->gf1.mem_alloc.banks_8[i].address = gus->gf1.mem_alloc.banks_16[i].address = i << 22; gus->gf1.mem_alloc.banks_8[i].size = gus->gf1.mem_alloc.banks_16[i].size = psizes[i] << 18; pages += psizes[i]; } } pages <<= 18; gus->gf1.memory = pages; snd_gf1_write8(gus, SNDRV_GF1_GB_MEMORY_CONTROL, 0x03); /* select ROM */ snd_gf1_write16(gus, SNDRV_GF1_GW_MEMORY_CONFIG, (snd_gf1_look16(gus, SNDRV_GF1_GW_MEMORY_CONFIG) & 0xff1f) | (4 << 5)); gus->gf1.rom_banks = 0; gus->gf1.rom_memory = 0; for (bank_pos = 0; bank_pos < 16L * 1024L * 1024L; bank_pos += 4L * 1024L * 1024L) { for (i = 0; i < 8; ++i) iwave[i] = snd_gf1_peek(gus, bank_pos + i); #ifdef CONFIG_SND_DEBUG_ROM printk(KERN_DEBUG "ROM at 0x%06x = %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n", bank_pos, iwave[0], iwave[1], iwave[2], iwave[3], iwave[4], iwave[5], iwave[6], iwave[7]); #endif if (strncmp(iwave, "INTRWAVE", 8)) continue; /* first check */ csum = 0; for (i = 0; i < sizeof(struct rom_hdr); i++) csum += snd_gf1_peek(gus, bank_pos + i); #ifdef CONFIG_SND_DEBUG_ROM printk(KERN_DEBUG "ROM checksum = 0x%x (computed)\n", csum); #endif if (csum != 0) continue; /* not valid rom */ gus->gf1.rom_banks++; gus->gf1.rom_present |= 1 << (bank_pos >> 22); gus->gf1.rom_memory = snd_gf1_peek(gus, bank_pos + 40) | (snd_gf1_peek(gus, bank_pos + 41) << 8) | (snd_gf1_peek(gus, bank_pos + 42) << 16) | (snd_gf1_peek(gus, bank_pos + 43) << 24); } #if 0 if (gus->gf1.rom_memory > 0) { if (gus->gf1.rom_banks == 1 && gus->gf1.rom_present == 8) gus->card->type = SNDRV_CARD_TYPE_IW_DYNASONIC; } #endif snd_gf1_write8(gus, SNDRV_GF1_GB_MEMORY_CONTROL, 0x00); /* select RAM */ if (!gus->gf1.enh_mode) snd_interwave_reset(gus); } static void __devinit snd_interwave_init(int dev, struct snd_gus_card * gus) { unsigned long flags; /* ok.. some InterWave specific initialization */ spin_lock_irqsave(&gus->reg_lock, flags); snd_gf1_write8(gus, SNDRV_GF1_GB_SOUND_BLASTER_CONTROL, 0x00); snd_gf1_write8(gus, SNDRV_GF1_GB_COMPATIBILITY, 0x1f); snd_gf1_write8(gus, SNDRV_GF1_GB_DECODE_CONTROL, 0x49); snd_gf1_write8(gus, SNDRV_GF1_GB_VERSION_NUMBER, 0x11); snd_gf1_write8(gus, SNDRV_GF1_GB_MPU401_CONTROL_A, 0x00); snd_gf1_write8(gus, SNDRV_GF1_GB_MPU401_CONTROL_B, 0x30); snd_gf1_write8(gus, SNDRV_GF1_GB_EMULATION_IRQ, 0x00); spin_unlock_irqrestore(&gus->reg_lock, flags); gus->equal_irq = 1; gus->codec_flag = 1; gus->interwave = 1; gus->max_flag = 1; gus->joystick_dac = joystick_dac[dev]; } static struct snd_kcontrol_new snd_interwave_controls[] = { WSS_DOUBLE("Master Playback Switch", 0, CS4231_LINE_LEFT_OUTPUT, CS4231_LINE_RIGHT_OUTPUT, 7, 7, 1, 1), WSS_DOUBLE("Master Playback Volume", 0, CS4231_LINE_LEFT_OUTPUT, CS4231_LINE_RIGHT_OUTPUT, 0, 0, 31, 1), WSS_DOUBLE("Mic Playback Switch", 0, CS4231_LEFT_MIC_INPUT, CS4231_RIGHT_MIC_INPUT, 7, 7, 1, 1), WSS_DOUBLE("Mic Playback Volume", 0, CS4231_LEFT_MIC_INPUT, CS4231_RIGHT_MIC_INPUT, 0, 0, 31, 1) }; static int __devinit snd_interwave_mixer(struct snd_wss *chip) { struct snd_card *card = chip->card; struct snd_ctl_elem_id id1, id2; unsigned int idx; int err; memset(&id1, 0, sizeof(id1)); memset(&id2, 0, sizeof(id2)); id1.iface = id2.iface = SNDRV_CTL_ELEM_IFACE_MIXER; #if 0 /* remove mono microphone controls */ strcpy(id1.name, "Mic Playback Switch"); if ((err = snd_ctl_remove_id(card, &id1)) < 0) return err; strcpy(id1.name, "Mic Playback Volume"); if ((err = snd_ctl_remove_id(card, &id1)) < 0) return err; #endif /* add new master and mic controls */ for (idx = 0; idx < ARRAY_SIZE(snd_interwave_controls); idx++) if ((err = snd_ctl_add(card, snd_ctl_new1(&snd_interwave_controls[idx], chip))) < 0) return err; snd_wss_out(chip, CS4231_LINE_LEFT_OUTPUT, 0x9f); snd_wss_out(chip, CS4231_LINE_RIGHT_OUTPUT, 0x9f); snd_wss_out(chip, CS4231_LEFT_MIC_INPUT, 0x9f); snd_wss_out(chip, CS4231_RIGHT_MIC_INPUT, 0x9f); /* reassign AUXA to SYNTHESIZER */ strcpy(id1.name, "Aux Playback Switch"); strcpy(id2.name, "Synth Playback Switch"); if ((err = snd_ctl_rename_id(card, &id1, &id2)) < 0) return err; strcpy(id1.name, "Aux Playback Volume"); strcpy(id2.name, "Synth Playback Volume"); if ((err = snd_ctl_rename_id(card, &id1, &id2)) < 0) return err; /* reassign AUXB to CD */ strcpy(id1.name, "Aux Playback Switch"); id1.index = 1; strcpy(id2.name, "CD Playback Switch"); if ((err = snd_ctl_rename_id(card, &id1, &id2)) < 0) return err; strcpy(id1.name, "Aux Playback Volume"); strcpy(id2.name, "CD Playback Volume"); if ((err = snd_ctl_rename_id(card, &id1, &id2)) < 0) return err; return 0; } #ifdef CONFIG_PNP static int __devinit snd_interwave_pnp(int dev, struct snd_interwave *iwcard, struct pnp_card_link *card, const struct pnp_card_device_id *id) { struct pnp_dev *pdev; int err; iwcard->dev = pnp_request_card_device(card, id->devs[0].id, NULL); if (iwcard->dev == NULL) return -EBUSY; #ifdef SNDRV_STB iwcard->devtc = pnp_request_card_device(card, id->devs[1].id, NULL); if (iwcard->devtc == NULL) return -EBUSY; #endif /* Synth & Codec initialization */ pdev = iwcard->dev; err = pnp_activate_dev(pdev); if (err < 0) { snd_printk(KERN_ERR "InterWave PnP configure failure (out of resources?)\n"); return err; } if (pnp_port_start(pdev, 0) + 0x100 != pnp_port_start(pdev, 1) || pnp_port_start(pdev, 0) + 0x10c != pnp_port_start(pdev, 2)) { snd_printk(KERN_ERR "PnP configure failure (wrong ports)\n"); return -ENOENT; } port[dev] = pnp_port_start(pdev, 0); dma1[dev] = pnp_dma(pdev, 0); if (dma2[dev] >= 0) dma2[dev] = pnp_dma(pdev, 1); irq[dev] = pnp_irq(pdev, 0); snd_printdd("isapnp IW: sb port=0x%llx, gf1 port=0x%llx, codec port=0x%llx\n", (unsigned long long)pnp_port_start(pdev, 0), (unsigned long long)pnp_port_start(pdev, 1), (unsigned long long)pnp_port_start(pdev, 2)); snd_printdd("isapnp IW: dma1=%i, dma2=%i, irq=%i\n", dma1[dev], dma2[dev], irq[dev]); #ifdef SNDRV_STB /* Tone Control initialization */ pdev = iwcard->devtc; err = pnp_activate_dev(pdev); if (err < 0) { snd_printk(KERN_ERR "InterWave ToneControl PnP configure failure (out of resources?)\n"); return err; } port_tc[dev] = pnp_port_start(pdev, 0); snd_printdd("isapnp IW: tone control port=0x%lx\n", port_tc[dev]); #endif return 0; } #endif /* CONFIG_PNP */ static void snd_interwave_free(struct snd_card *card) { struct snd_interwave *iwcard = card->private_data; if (iwcard == NULL) return; #ifdef SNDRV_STB release_and_free_resource(iwcard->i2c_res); #endif if (iwcard->irq >= 0) free_irq(iwcard->irq, (void *)iwcard); } static int snd_interwave_card_new(int dev, struct snd_card **cardp) { struct snd_card *card; struct snd_interwave *iwcard; int err; err = snd_card_create(index[dev], id[dev], THIS_MODULE, sizeof(struct snd_interwave), &card); if (err < 0) return err; iwcard = card->private_data; iwcard->card = card; iwcard->irq = -1; card->private_free = snd_interwave_free; *cardp = card; return 0; } static int __devinit snd_interwave_probe(struct snd_card *card, int dev) { int xirq, xdma1, xdma2; struct snd_interwave *iwcard = card->private_data; struct snd_wss *wss; struct snd_gus_card *gus; #ifdef SNDRV_STB struct snd_i2c_bus *i2c_bus; #endif struct snd_pcm *pcm; char *str; int err; xirq = irq[dev]; xdma1 = dma1[dev]; xdma2 = dma2[dev]; if ((err = snd_gus_create(card, port[dev], -xirq, xdma1, xdma2, 0, 32, pcm_channels[dev], effect[dev], &gus)) < 0) return err; if ((err = snd_interwave_detect(iwcard, gus, dev #ifdef SNDRV_STB , &i2c_bus #endif )) < 0) return err; iwcard->gus_status_reg = gus->gf1.reg_irqstat; iwcard->pcm_status_reg = gus->gf1.port + 0x10c + 2; snd_interwave_init(dev, gus); snd_interwave_detect_memory(gus); if ((err = snd_gus_initialize(gus)) < 0) return err; if (request_irq(xirq, snd_interwave_interrupt, 0, "InterWave", iwcard)) { snd_printk(KERN_ERR PFX "unable to grab IRQ %d\n", xirq); return -EBUSY; } iwcard->irq = xirq; err = snd_wss_create(card, gus->gf1.port + 0x10c, -1, xirq, xdma2 < 0 ? xdma1 : xdma2, xdma1, WSS_HW_INTERWAVE, WSS_HWSHARE_IRQ | WSS_HWSHARE_DMA1 | WSS_HWSHARE_DMA2, &wss); if (err < 0) return err; err = snd_wss_pcm(wss, 0, &pcm); if (err < 0) return err; sprintf(pcm->name + strlen(pcm->name), " rev %c", gus->revision + 'A'); strcat(pcm->name, " (codec)"); err = snd_wss_timer(wss, 2, NULL); if (err < 0) return err; err = snd_wss_mixer(wss); if (err < 0) return err; if (pcm_channels[dev] > 0) { err = snd_gf1_pcm_new(gus, 1, 1, NULL); if (err < 0) return err; } err = snd_interwave_mixer(wss); if (err < 0) return err; #ifdef SNDRV_STB { struct snd_ctl_elem_id id1, id2; memset(&id1, 0, sizeof(id1)); memset(&id2, 0, sizeof(id2)); id1.iface = id2.iface = SNDRV_CTL_ELEM_IFACE_MIXER; strcpy(id1.name, "Master Playback Switch"); strcpy(id2.name, id1.name); id2.index = 1; if ((err = snd_ctl_rename_id(card, &id1, &id2)) < 0) return err; strcpy(id1.name, "Master Playback Volume"); strcpy(id2.name, id1.name); if ((err = snd_ctl_rename_id(card, &id1, &id2)) < 0) return err; if ((err = snd_tea6330t_update_mixer(card, i2c_bus, 0, 1)) < 0) return err; } #endif gus->uart_enable = midi[dev]; if ((err = snd_gf1_rawmidi_new(gus, 0, NULL)) < 0) return err; #ifndef SNDRV_STB str = "AMD InterWave"; if (gus->gf1.rom_banks == 1 && gus->gf1.rom_present == 8) str = "Dynasonic 3-D"; #else str = "InterWave STB"; #endif strcpy(card->driver, str); strcpy(card->shortname, str); sprintf(card->longname, "%s at 0x%lx, irq %i, dma %d", str, gus->gf1.port, xirq, xdma1); if (xdma2 >= 0) sprintf(card->longname + strlen(card->longname), "&%d", xdma2); err = snd_card_register(card); if (err < 0) return err; iwcard->wss = wss; iwcard->gus = gus; return 0; } static int __devinit snd_interwave_isa_probe1(int dev, struct device *devptr) { struct snd_card *card; int err; err = snd_interwave_card_new(dev, &card); if (err < 0) return err; snd_card_set_dev(card, devptr); if ((err = snd_interwave_probe(card, dev)) < 0) { snd_card_free(card); return err; } dev_set_drvdata(devptr, card); return 0; } static int __devinit snd_interwave_isa_match(struct device *pdev, unsigned int dev) { if (!enable[dev]) return 0; #ifdef CONFIG_PNP if (isapnp[dev]) return 0; #endif return 1; } static int __devinit snd_interwave_isa_probe(struct device *pdev, unsigned int dev) { int err; static int possible_irqs[] = {5, 11, 12, 9, 7, 15, 3, -1}; static int possible_dmas[] = {0, 1, 3, 5, 6, 7, -1}; if (irq[dev] == SNDRV_AUTO_IRQ) { if ((irq[dev] = snd_legacy_find_free_irq(possible_irqs)) < 0) { snd_printk(KERN_ERR PFX "unable to find a free IRQ\n"); return -EBUSY; } } if (dma1[dev] == SNDRV_AUTO_DMA) { if ((dma1[dev] = snd_legacy_find_free_dma(possible_dmas)) < 0) { snd_printk(KERN_ERR PFX "unable to find a free DMA1\n"); return -EBUSY; } } if (dma2[dev] == SNDRV_AUTO_DMA) { if ((dma2[dev] = snd_legacy_find_free_dma(possible_dmas)) < 0) { snd_printk(KERN_ERR PFX "unable to find a free DMA2\n"); return -EBUSY; } } if (port[dev] != SNDRV_AUTO_PORT) return snd_interwave_isa_probe1(dev, pdev); else { static long possible_ports[] = {0x210, 0x220, 0x230, 0x240, 0x250, 0x260}; int i; for (i = 0; i < ARRAY_SIZE(possible_ports); i++) { port[dev] = possible_ports[i]; err = snd_interwave_isa_probe1(dev, pdev); if (! err) return 0; } return err; } } static int __devexit snd_interwave_isa_remove(struct device *devptr, unsigned int dev) { snd_card_free(dev_get_drvdata(devptr)); dev_set_drvdata(devptr, NULL); return 0; } static struct isa_driver snd_interwave_driver = { .match = snd_interwave_isa_match, .probe = snd_interwave_isa_probe, .remove = __devexit_p(snd_interwave_isa_remove), /* FIXME: suspend,resume */ .driver = { .name = INTERWAVE_DRIVER }, }; #ifdef CONFIG_PNP static int __devinit snd_interwave_pnp_detect(struct pnp_card_link *pcard, const struct pnp_card_device_id *pid) { static int dev; struct snd_card *card; int res; for ( ; dev < SNDRV_CARDS; dev++) { if (enable[dev] && isapnp[dev]) break; } if (dev >= SNDRV_CARDS) return -ENODEV; res = snd_interwave_card_new(dev, &card); if (res < 0) return res; if ((res = snd_interwave_pnp(dev, card->private_data, pcard, pid)) < 0) { snd_card_free(card); return res; } snd_card_set_dev(card, &pcard->card->dev); if ((res = snd_interwave_probe(card, dev)) < 0) { snd_card_free(card); return res; } pnp_set_card_drvdata(pcard, card); dev++; return 0; } static void __devexit snd_interwave_pnp_remove(struct pnp_card_link * pcard) { snd_card_free(pnp_get_card_drvdata(pcard)); pnp_set_card_drvdata(pcard, NULL); } static struct pnp_card_driver interwave_pnpc_driver = { .flags = PNP_DRIVER_RES_DISABLE, .name = INTERWAVE_PNP_DRIVER, .id_table = snd_interwave_pnpids, .probe = snd_interwave_pnp_detect, .remove = __devexit_p(snd_interwave_pnp_remove), /* FIXME: suspend,resume */ }; #endif /* CONFIG_PNP */ static int __init alsa_card_interwave_init(void) { int err; err = isa_register_driver(&snd_interwave_driver, SNDRV_CARDS); #ifdef CONFIG_PNP if (!err) isa_registered = 1; err = pnp_register_card_driver(&interwave_pnpc_driver); if (!err) pnp_registered = 1; if (isa_registered) err = 0; #endif return err; } static void __exit alsa_card_interwave_exit(void) { #ifdef CONFIG_PNP if (pnp_registered) pnp_unregister_card_driver(&interwave_pnpc_driver); if (isa_registered) #endif isa_unregister_driver(&snd_interwave_driver); } module_init(alsa_card_interwave_init) module_exit(alsa_card_interwave_exit)
gpl-2.0
fentensoft/kernel_xt701
lib/syscall.c
5123
2475
#include <linux/ptrace.h> #include <linux/sched.h> #include <linux/module.h> #include <asm/syscall.h> static int collect_syscall(struct task_struct *target, long *callno, unsigned long args[6], unsigned int maxargs, unsigned long *sp, unsigned long *pc) { struct pt_regs *regs = task_pt_regs(target); if (unlikely(!regs)) return -EAGAIN; *sp = user_stack_pointer(regs); *pc = instruction_pointer(regs); *callno = syscall_get_nr(target, regs); if (*callno != -1L && maxargs > 0) syscall_get_arguments(target, regs, 0, maxargs, args); return 0; } /** * task_current_syscall - Discover what a blocked task is doing. * @target: thread to examine * @callno: filled with system call number or -1 * @args: filled with @maxargs system call arguments * @maxargs: number of elements in @args to fill * @sp: filled with user stack pointer * @pc: filled with user PC * * If @target is blocked in a system call, returns zero with *@callno * set to the the call's number and @args filled in with its arguments. * Registers not used for system call arguments may not be available and * it is not kosher to use &struct user_regset calls while the system * call is still in progress. Note we may get this result if @target * has finished its system call but not yet returned to user mode, such * as when it's stopped for signal handling or syscall exit tracing. * * If @target is blocked in the kernel during a fault or exception, * returns zero with *@callno set to -1 and does not fill in @args. * If so, it's now safe to examine @target using &struct user_regset * get() calls as long as we're sure @target won't return to user mode. * * Returns -%EAGAIN if @target does not remain blocked. * * Returns -%EINVAL if @maxargs is too large (maximum is six). */ int task_current_syscall(struct task_struct *target, long *callno, unsigned long args[6], unsigned int maxargs, unsigned long *sp, unsigned long *pc) { long state; unsigned long ncsw; if (unlikely(maxargs > 6)) return -EINVAL; if (target == current) return collect_syscall(target, callno, args, maxargs, sp, pc); state = target->state; if (unlikely(!state)) return -EAGAIN; ncsw = wait_task_inactive(target, state); if (unlikely(!ncsw) || unlikely(collect_syscall(target, callno, args, maxargs, sp, pc)) || unlikely(wait_task_inactive(target, state) != ncsw)) return -EAGAIN; return 0; } EXPORT_SYMBOL_GPL(task_current_syscall);
gpl-2.0
PsychoGame/android_kernel_lge_msm8974-caf
arch/blackfin/mach-common/dpmc.c
7427
3873
/* * Copyright 2008 Analog Devices Inc. * * Licensed under the GPL-2 or later. */ #include <linux/cdev.h> #include <linux/device.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/types.h> #include <linux/cpufreq.h> #include <asm/delay.h> #include <asm/dpmc.h> #define DRIVER_NAME "bfin dpmc" struct bfin_dpmc_platform_data *pdata; /** * bfin_set_vlev - Update VLEV field in VR_CTL Reg. * Avoid BYPASS sequence */ static void bfin_set_vlev(unsigned int vlev) { unsigned pll_lcnt; pll_lcnt = bfin_read_PLL_LOCKCNT(); bfin_write_PLL_LOCKCNT(1); bfin_write_VR_CTL((bfin_read_VR_CTL() & ~VLEV) | vlev); bfin_write_PLL_LOCKCNT(pll_lcnt); } /** * bfin_get_vlev - Get CPU specific VLEV from platform device data */ static unsigned int bfin_get_vlev(unsigned int freq) { int i; if (!pdata) goto err_out; freq >>= 16; for (i = 0; i < pdata->tabsize; i++) if (freq <= (pdata->tuple_tab[i] & 0xFFFF)) return pdata->tuple_tab[i] >> 16; err_out: printk(KERN_WARNING "DPMC: No suitable CCLK VDDINT voltage pair found\n"); return VLEV_120; } #ifdef CONFIG_CPU_FREQ # ifdef CONFIG_SMP static void bfin_idle_this_cpu(void *info) { unsigned long flags = 0; unsigned long iwr0, iwr1, iwr2; unsigned int cpu = smp_processor_id(); local_irq_save_hw(flags); bfin_iwr_set_sup0(&iwr0, &iwr1, &iwr2); platform_clear_ipi(cpu, IRQ_SUPPLE_0); SSYNC(); asm("IDLE;"); bfin_iwr_restore(iwr0, iwr1, iwr2); local_irq_restore_hw(flags); } static void bfin_idle_cpu(void) { smp_call_function(bfin_idle_this_cpu, NULL, 0); } static void bfin_wakeup_cpu(void) { unsigned int cpu; unsigned int this_cpu = smp_processor_id(); cpumask_t mask; cpumask_copy(&mask, cpu_online_mask); cpumask_clear_cpu(this_cpu, &mask); for_each_cpu(cpu, &mask) platform_send_ipi_cpu(cpu, IRQ_SUPPLE_0); } # else static void bfin_idle_cpu(void) {} static void bfin_wakeup_cpu(void) {} # endif static int vreg_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data) { struct cpufreq_freqs *freq = data; if (freq->cpu != CPUFREQ_CPU) return 0; if (val == CPUFREQ_PRECHANGE && freq->old < freq->new) { bfin_idle_cpu(); bfin_set_vlev(bfin_get_vlev(freq->new)); udelay(pdata->vr_settling_time); /* Wait until Volatge settled */ bfin_wakeup_cpu(); } else if (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) { bfin_idle_cpu(); bfin_set_vlev(bfin_get_vlev(freq->new)); bfin_wakeup_cpu(); } return 0; } static struct notifier_block vreg_cpufreq_notifier_block = { .notifier_call = vreg_cpufreq_notifier }; #endif /* CONFIG_CPU_FREQ */ /** * bfin_dpmc_probe - * */ static int __devinit bfin_dpmc_probe(struct platform_device *pdev) { if (pdev->dev.platform_data) pdata = pdev->dev.platform_data; else return -EINVAL; return cpufreq_register_notifier(&vreg_cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER); } /** * bfin_dpmc_remove - */ static int __devexit bfin_dpmc_remove(struct platform_device *pdev) { pdata = NULL; return cpufreq_unregister_notifier(&vreg_cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER); } struct platform_driver bfin_dpmc_device_driver = { .probe = bfin_dpmc_probe, .remove = __devexit_p(bfin_dpmc_remove), .driver = { .name = DRIVER_NAME, } }; /** * bfin_dpmc_init - Init driver */ static int __init bfin_dpmc_init(void) { return platform_driver_register(&bfin_dpmc_device_driver); } module_init(bfin_dpmc_init); /** * bfin_dpmc_exit - break down driver */ static void __exit bfin_dpmc_exit(void) { platform_driver_unregister(&bfin_dpmc_device_driver); } module_exit(bfin_dpmc_exit); MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>"); MODULE_DESCRIPTION("cpu power management driver for Blackfin"); MODULE_LICENSE("GPL");
gpl-2.0
zarboz/Monarudo_M7_port
arch/mips/pmc-sierra/msp71xx/msp_usb.c
7683
7447
/* * The setup file for USB related hardware on PMC-Sierra MSP processors. * * Copyright 2006 PMC-Sierra, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. */ #if defined(CONFIG_USB_EHCI_HCD) || defined(CONFIG_USB_GADGET) #include <linux/init.h> #include <linux/ioport.h> #include <linux/platform_device.h> #include <asm/mipsregs.h> #include <msp_regs.h> #include <msp_int.h> #include <msp_prom.h> #include <msp_usb.h> #if defined(CONFIG_USB_EHCI_HCD) static struct resource msp_usbhost0_resources[] = { [0] = { /* EHCI-HS operational and capabilities registers */ .start = MSP_USB0_HS_START, .end = MSP_USB0_HS_END, .flags = IORESOURCE_MEM, }, [1] = { .start = MSP_INT_USB, .end = MSP_INT_USB, .flags = IORESOURCE_IRQ, }, [2] = { /* MSBus-to-AMBA bridge register space */ .start = MSP_USB0_MAB_START, .end = MSP_USB0_MAB_END, .flags = IORESOURCE_MEM, }, [3] = { /* Identification and general hardware parameters */ .start = MSP_USB0_ID_START, .end = MSP_USB0_ID_END, .flags = IORESOURCE_MEM, }, }; static u64 msp_usbhost0_dma_mask = 0xffffffffUL; static struct mspusb_device msp_usbhost0_device = { .dev = { .name = "pmcmsp-ehci", .id = 0, .dev = { .dma_mask = &msp_usbhost0_dma_mask, .coherent_dma_mask = 0xffffffffUL, }, .num_resources = ARRAY_SIZE(msp_usbhost0_resources), .resource = msp_usbhost0_resources, }, }; /* MSP7140/MSP82XX has two USB2 hosts. */ #ifdef CONFIG_MSP_HAS_DUAL_USB static u64 msp_usbhost1_dma_mask = 0xffffffffUL; static struct resource msp_usbhost1_resources[] = { [0] = { /* EHCI-HS operational and capabilities registers */ .start = MSP_USB1_HS_START, .end = MSP_USB1_HS_END, .flags = IORESOURCE_MEM, }, [1] = { .start = MSP_INT_USB, .end = MSP_INT_USB, .flags = IORESOURCE_IRQ, }, [2] = { /* MSBus-to-AMBA bridge register space */ .start = MSP_USB1_MAB_START, .end = MSP_USB1_MAB_END, .flags = IORESOURCE_MEM, }, [3] = { /* Identification and general hardware parameters */ .start = MSP_USB1_ID_START, .end = MSP_USB1_ID_END, .flags = IORESOURCE_MEM, }, }; static struct mspusb_device msp_usbhost1_device = { .dev = { .name = "pmcmsp-ehci", .id = 1, .dev = { .dma_mask = &msp_usbhost1_dma_mask, .coherent_dma_mask = 0xffffffffUL, }, .num_resources = ARRAY_SIZE(msp_usbhost1_resources), .resource = msp_usbhost1_resources, }, }; #endif /* CONFIG_MSP_HAS_DUAL_USB */ #endif /* CONFIG_USB_EHCI_HCD */ #if defined(CONFIG_USB_GADGET) static struct resource msp_usbdev0_resources[] = { [0] = { /* EHCI-HS operational and capabilities registers */ .start = MSP_USB0_HS_START, .end = MSP_USB0_HS_END, .flags = IORESOURCE_MEM, }, [1] = { .start = MSP_INT_USB, .end = MSP_INT_USB, .flags = IORESOURCE_IRQ, }, [2] = { /* MSBus-to-AMBA bridge register space */ .start = MSP_USB0_MAB_START, .end = MSP_USB0_MAB_END, .flags = IORESOURCE_MEM, }, [3] = { /* Identification and general hardware parameters */ .start = MSP_USB0_ID_START, .end = MSP_USB0_ID_END, .flags = IORESOURCE_MEM, }, }; static u64 msp_usbdev_dma_mask = 0xffffffffUL; /* This may need to be converted to a mspusb_device, too. */ static struct mspusb_device msp_usbdev0_device = { .dev = { .name = "msp71xx_udc", .id = 0, .dev = { .dma_mask = &msp_usbdev_dma_mask, .coherent_dma_mask = 0xffffffffUL, }, .num_resources = ARRAY_SIZE(msp_usbdev0_resources), .resource = msp_usbdev0_resources, }, }; #ifdef CONFIG_MSP_HAS_DUAL_USB static struct resource msp_usbdev1_resources[] = { [0] = { /* EHCI-HS operational and capabilities registers */ .start = MSP_USB1_HS_START, .end = MSP_USB1_HS_END, .flags = IORESOURCE_MEM, }, [1] = { .start = MSP_INT_USB, .end = MSP_INT_USB, .flags = IORESOURCE_IRQ, }, [2] = { /* MSBus-to-AMBA bridge register space */ .start = MSP_USB1_MAB_START, .end = MSP_USB1_MAB_END, .flags = IORESOURCE_MEM, }, [3] = { /* Identification and general hardware parameters */ .start = MSP_USB1_ID_START, .end = MSP_USB1_ID_END, .flags = IORESOURCE_MEM, }, }; /* This may need to be converted to a mspusb_device, too. */ static struct mspusb_device msp_usbdev1_device = { .dev = { .name = "msp71xx_udc", .id = 0, .dev = { .dma_mask = &msp_usbdev_dma_mask, .coherent_dma_mask = 0xffffffffUL, }, .num_resources = ARRAY_SIZE(msp_usbdev1_resources), .resource = msp_usbdev1_resources, }, }; #endif /* CONFIG_MSP_HAS_DUAL_USB */ #endif /* CONFIG_USB_GADGET */ static int __init msp_usb_setup(void) { char *strp; char envstr[32]; struct platform_device *msp_devs[NUM_USB_DEVS]; unsigned int val; /* construct environment name usbmode */ /* set usbmode <host/device> as pmon environment var */ /* * Could this perhaps be integrated into the "features" env var? * Use the features key "U", and follow with "H" for host-mode, * "D" for device-mode. If it works for Ethernet, why not USB... * -- hammtrev, 2007/03/22 */ snprintf((char *)&envstr[0], sizeof(envstr), "usbmode"); /* set default host mode */ val = 1; /* get environment string */ strp = prom_getenv((char *)&envstr[0]); if (strp) { /* compare string */ if (!strcmp(strp, "device")) val = 0; } if (val) { #if defined(CONFIG_USB_EHCI_HCD) msp_devs[0] = &msp_usbhost0_device.dev; ppfinit("platform add USB HOST done %s.\n", msp_devs[0]->name); #ifdef CONFIG_MSP_HAS_DUAL_USB msp_devs[1] = &msp_usbhost1_device.dev; ppfinit("platform add USB HOST done %s.\n", msp_devs[1]->name); #endif #else ppfinit("%s: echi_hcd not supported\n", __FILE__); #endif /* CONFIG_USB_EHCI_HCD */ } else { #if defined(CONFIG_USB_GADGET) /* get device mode structure */ msp_devs[0] = &msp_usbdev0_device.dev; ppfinit("platform add USB DEVICE done %s.\n" , msp_devs[0]->name); #ifdef CONFIG_MSP_HAS_DUAL_USB msp_devs[1] = &msp_usbdev1_device.dev; ppfinit("platform add USB DEVICE done %s.\n" , msp_devs[1]->name); #endif #else ppfinit("%s: usb_gadget not supported\n", __FILE__); #endif /* CONFIG_USB_GADGET */ } /* add device */ platform_add_devices(msp_devs, ARRAY_SIZE(msp_devs)); return 0; } subsys_initcall(msp_usb_setup); #endif /* CONFIG_USB_EHCI_HCD || CONFIG_USB_GADGET */
gpl-2.0
sub77/kernel_msm
fs/afs/misc.c
13315
2353
/* miscellaneous bits * * Copyright (C) 2002, 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/errno.h> #include <rxrpc/packet.h> #include "internal.h" #include "afs_fs.h" /* * convert an AFS abort code to a Linux error number */ int afs_abort_to_error(u32 abort_code) { switch (abort_code) { case 13: return -EACCES; case 27: return -EFBIG; case 30: return -EROFS; case VSALVAGE: return -EIO; case VNOVNODE: return -ENOENT; case VNOVOL: return -ENOMEDIUM; case VVOLEXISTS: return -EEXIST; case VNOSERVICE: return -EIO; case VOFFLINE: return -ENOENT; case VONLINE: return -EEXIST; case VDISKFULL: return -ENOSPC; case VOVERQUOTA: return -EDQUOT; case VBUSY: return -EBUSY; case VMOVED: return -ENXIO; case 0x2f6df0a: return -EWOULDBLOCK; case 0x2f6df0c: return -EACCES; case 0x2f6df0f: return -EBUSY; case 0x2f6df10: return -EEXIST; case 0x2f6df11: return -EXDEV; case 0x2f6df13: return -ENOTDIR; case 0x2f6df14: return -EISDIR; case 0x2f6df15: return -EINVAL; case 0x2f6df1a: return -EFBIG; case 0x2f6df1b: return -ENOSPC; case 0x2f6df1d: return -EROFS; case 0x2f6df1e: return -EMLINK; case 0x2f6df20: return -EDOM; case 0x2f6df21: return -ERANGE; case 0x2f6df22: return -EDEADLK; case 0x2f6df23: return -ENAMETOOLONG; case 0x2f6df24: return -ENOLCK; case 0x2f6df26: return -ENOTEMPTY; case 0x2f6df78: return -EDQUOT; case RXKADINCONSISTENCY: return -EPROTO; case RXKADPACKETSHORT: return -EPROTO; case RXKADLEVELFAIL: return -EKEYREJECTED; case RXKADTICKETLEN: return -EKEYREJECTED; case RXKADOUTOFSEQUENCE: return -EPROTO; case RXKADNOAUTH: return -EKEYREJECTED; case RXKADBADKEY: return -EKEYREJECTED; case RXKADBADTICKET: return -EKEYREJECTED; case RXKADUNKNOWNKEY: return -EKEYREJECTED; case RXKADEXPIRED: return -EKEYEXPIRED; case RXKADSEALEDINCON: return -EKEYREJECTED; case RXKADDATALEN: return -EKEYREJECTED; case RXKADILLEGALLEVEL: return -EKEYREJECTED; default: return -EREMOTEIO; } }
gpl-2.0
nychitman1/android_kernel_samsung_manta
fs/afs/misc.c
13315
2353
/* miscellaneous bits * * Copyright (C) 2002, 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/errno.h> #include <rxrpc/packet.h> #include "internal.h" #include "afs_fs.h" /* * convert an AFS abort code to a Linux error number */ int afs_abort_to_error(u32 abort_code) { switch (abort_code) { case 13: return -EACCES; case 27: return -EFBIG; case 30: return -EROFS; case VSALVAGE: return -EIO; case VNOVNODE: return -ENOENT; case VNOVOL: return -ENOMEDIUM; case VVOLEXISTS: return -EEXIST; case VNOSERVICE: return -EIO; case VOFFLINE: return -ENOENT; case VONLINE: return -EEXIST; case VDISKFULL: return -ENOSPC; case VOVERQUOTA: return -EDQUOT; case VBUSY: return -EBUSY; case VMOVED: return -ENXIO; case 0x2f6df0a: return -EWOULDBLOCK; case 0x2f6df0c: return -EACCES; case 0x2f6df0f: return -EBUSY; case 0x2f6df10: return -EEXIST; case 0x2f6df11: return -EXDEV; case 0x2f6df13: return -ENOTDIR; case 0x2f6df14: return -EISDIR; case 0x2f6df15: return -EINVAL; case 0x2f6df1a: return -EFBIG; case 0x2f6df1b: return -ENOSPC; case 0x2f6df1d: return -EROFS; case 0x2f6df1e: return -EMLINK; case 0x2f6df20: return -EDOM; case 0x2f6df21: return -ERANGE; case 0x2f6df22: return -EDEADLK; case 0x2f6df23: return -ENAMETOOLONG; case 0x2f6df24: return -ENOLCK; case 0x2f6df26: return -ENOTEMPTY; case 0x2f6df78: return -EDQUOT; case RXKADINCONSISTENCY: return -EPROTO; case RXKADPACKETSHORT: return -EPROTO; case RXKADLEVELFAIL: return -EKEYREJECTED; case RXKADTICKETLEN: return -EKEYREJECTED; case RXKADOUTOFSEQUENCE: return -EPROTO; case RXKADNOAUTH: return -EKEYREJECTED; case RXKADBADKEY: return -EKEYREJECTED; case RXKADBADTICKET: return -EKEYREJECTED; case RXKADUNKNOWNKEY: return -EKEYREJECTED; case RXKADEXPIRED: return -EKEYEXPIRED; case RXKADSEALEDINCON: return -EKEYREJECTED; case RXKADDATALEN: return -EKEYREJECTED; case RXKADILLEGALLEVEL: return -EKEYREJECTED; default: return -EREMOTEIO; } }
gpl-2.0
przemo27/mid712-kernel
fs/afs/misc.c
13315
2353
/* miscellaneous bits * * Copyright (C) 2002, 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/errno.h> #include <rxrpc/packet.h> #include "internal.h" #include "afs_fs.h" /* * convert an AFS abort code to a Linux error number */ int afs_abort_to_error(u32 abort_code) { switch (abort_code) { case 13: return -EACCES; case 27: return -EFBIG; case 30: return -EROFS; case VSALVAGE: return -EIO; case VNOVNODE: return -ENOENT; case VNOVOL: return -ENOMEDIUM; case VVOLEXISTS: return -EEXIST; case VNOSERVICE: return -EIO; case VOFFLINE: return -ENOENT; case VONLINE: return -EEXIST; case VDISKFULL: return -ENOSPC; case VOVERQUOTA: return -EDQUOT; case VBUSY: return -EBUSY; case VMOVED: return -ENXIO; case 0x2f6df0a: return -EWOULDBLOCK; case 0x2f6df0c: return -EACCES; case 0x2f6df0f: return -EBUSY; case 0x2f6df10: return -EEXIST; case 0x2f6df11: return -EXDEV; case 0x2f6df13: return -ENOTDIR; case 0x2f6df14: return -EISDIR; case 0x2f6df15: return -EINVAL; case 0x2f6df1a: return -EFBIG; case 0x2f6df1b: return -ENOSPC; case 0x2f6df1d: return -EROFS; case 0x2f6df1e: return -EMLINK; case 0x2f6df20: return -EDOM; case 0x2f6df21: return -ERANGE; case 0x2f6df22: return -EDEADLK; case 0x2f6df23: return -ENAMETOOLONG; case 0x2f6df24: return -ENOLCK; case 0x2f6df26: return -ENOTEMPTY; case 0x2f6df78: return -EDQUOT; case RXKADINCONSISTENCY: return -EPROTO; case RXKADPACKETSHORT: return -EPROTO; case RXKADLEVELFAIL: return -EKEYREJECTED; case RXKADTICKETLEN: return -EKEYREJECTED; case RXKADOUTOFSEQUENCE: return -EPROTO; case RXKADNOAUTH: return -EKEYREJECTED; case RXKADBADKEY: return -EKEYREJECTED; case RXKADBADTICKET: return -EKEYREJECTED; case RXKADUNKNOWNKEY: return -EKEYREJECTED; case RXKADEXPIRED: return -EKEYEXPIRED; case RXKADSEALEDINCON: return -EKEYREJECTED; case RXKADDATALEN: return -EKEYREJECTED; case RXKADILLEGALLEVEL: return -EKEYREJECTED; default: return -EREMOTEIO; } }
gpl-2.0
goto456/linux-2.6.26
drivers/scsi/libsas/sas_scsi_host.c
4
29233
/* * Serial Attached SCSI (SAS) class SCSI Host glue. * * Copyright (C) 2005 Adaptec, Inc. All rights reserved. * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com> * * This file is licensed under GPLv2. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 * USA * */ #include <linux/kthread.h> #include <linux/firmware.h> #include <linux/ctype.h> #include "sas_internal.h" #include <scsi/scsi_host.h> #include <scsi/scsi_device.h> #include <scsi/scsi_tcq.h> #include <scsi/scsi.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_transport.h> #include <scsi/scsi_transport_sas.h> #include <scsi/sas_ata.h> #include "../scsi_sas_internal.h" #include "../scsi_transport_api.h" #include "../scsi_priv.h" #include <linux/err.h> #include <linux/blkdev.h> #include <linux/freezer.h> #include <linux/scatterlist.h> #include <linux/libata.h> /* ---------- SCSI Host glue ---------- */ static void sas_scsi_task_done(struct sas_task *task) { struct task_status_struct *ts = &task->task_status; struct scsi_cmnd *sc = task->uldd_task; int hs = 0, stat = 0; if (unlikely(task->task_state_flags & SAS_TASK_STATE_ABORTED)) { /* Aborted tasks will be completed by the error handler */ SAS_DPRINTK("task done but aborted\n"); return; } if (unlikely(!sc)) { SAS_DPRINTK("task_done called with non existing SCSI cmnd!\n"); list_del_init(&task->list); sas_free_task(task); return; } if (ts->resp == SAS_TASK_UNDELIVERED) { /* transport error */ hs = DID_NO_CONNECT; } else { /* ts->resp == SAS_TASK_COMPLETE */ /* task delivered, what happened afterwards? */ switch (ts->stat) { case SAS_DEV_NO_RESPONSE: case SAS_INTERRUPTED: case SAS_PHY_DOWN: case SAS_NAK_R_ERR: case SAS_OPEN_TO: hs = DID_NO_CONNECT; break; case SAS_DATA_UNDERRUN: scsi_set_resid(sc, ts->residual); if (scsi_bufflen(sc) - scsi_get_resid(sc) < sc->underflow) hs = DID_ERROR; break; case SAS_DATA_OVERRUN: hs = DID_ERROR; break; case SAS_QUEUE_FULL: hs = DID_SOFT_ERROR; /* retry */ break; case SAS_DEVICE_UNKNOWN: hs = DID_BAD_TARGET; break; case SAS_SG_ERR: hs = DID_PARITY; break; case SAS_OPEN_REJECT: if (ts->open_rej_reason == SAS_OREJ_RSVD_RETRY) hs = DID_SOFT_ERROR; /* retry */ else hs = DID_ERROR; break; case SAS_PROTO_RESPONSE: SAS_DPRINTK("LLDD:%s sent SAS_PROTO_RESP for an SSP " "task; please report this\n", task->dev->port->ha->sas_ha_name); break; case SAS_ABORTED_TASK: hs = DID_ABORT; break; case SAM_CHECK_COND: memcpy(sc->sense_buffer, ts->buf, min(SCSI_SENSE_BUFFERSIZE, ts->buf_valid_size)); stat = SAM_CHECK_COND; break; default: stat = ts->stat; break; } } ASSIGN_SAS_TASK(sc, NULL); sc->result = (hs << 16) | stat; list_del_init(&task->list); sas_free_task(task); sc->scsi_done(sc); } static enum task_attribute sas_scsi_get_task_attr(struct scsi_cmnd *cmd) { enum task_attribute ta = TASK_ATTR_SIMPLE; if (cmd->request && blk_rq_tagged(cmd->request)) { if (cmd->device->ordered_tags && (cmd->request->cmd_flags & REQ_HARDBARRIER)) ta = TASK_ATTR_ORDERED; } return ta; } static struct sas_task *sas_create_task(struct scsi_cmnd *cmd, struct domain_device *dev, gfp_t gfp_flags) { struct sas_task *task = sas_alloc_task(gfp_flags); struct scsi_lun lun; if (!task) return NULL; task->uldd_task = cmd; ASSIGN_SAS_TASK(cmd, task); task->dev = dev; task->task_proto = task->dev->tproto; /* BUG_ON(!SSP) */ task->ssp_task.retry_count = 1; int_to_scsilun(cmd->device->lun, &lun); memcpy(task->ssp_task.LUN, &lun.scsi_lun, 8); task->ssp_task.task_attr = sas_scsi_get_task_attr(cmd); memcpy(task->ssp_task.cdb, cmd->cmnd, 16); task->scatter = scsi_sglist(cmd); task->num_scatter = scsi_sg_count(cmd); task->total_xfer_len = scsi_bufflen(cmd); task->data_dir = cmd->sc_data_direction; task->task_done = sas_scsi_task_done; return task; } int sas_queue_up(struct sas_task *task) { struct sas_ha_struct *sas_ha = task->dev->port->ha; struct scsi_core *core = &sas_ha->core; unsigned long flags; LIST_HEAD(list); spin_lock_irqsave(&core->task_queue_lock, flags); if (sas_ha->lldd_queue_size < core->task_queue_size + 1) { spin_unlock_irqrestore(&core->task_queue_lock, flags); return -SAS_QUEUE_FULL; } list_add_tail(&task->list, &core->task_queue); core->task_queue_size += 1; spin_unlock_irqrestore(&core->task_queue_lock, flags); wake_up_process(core->queue_thread); return 0; } /** * sas_queuecommand -- Enqueue a command for processing * @parameters: See SCSI Core documentation * * Note: XXX: Remove the host unlock/lock pair when SCSI Core can * call us without holding an IRQ spinlock... */ int sas_queuecommand(struct scsi_cmnd *cmd, void (*scsi_done)(struct scsi_cmnd *)) __releases(host->host_lock) __acquires(dev->sata_dev.ap->lock) __releases(dev->sata_dev.ap->lock) __acquires(host->host_lock) { int res = 0; struct domain_device *dev = cmd_to_domain_dev(cmd); struct Scsi_Host *host = cmd->device->host; struct sas_internal *i = to_sas_internal(host->transportt); spin_unlock_irq(host->host_lock); { struct sas_ha_struct *sas_ha = dev->port->ha; struct sas_task *task; if (dev_is_sata(dev)) { unsigned long flags; spin_lock_irqsave(dev->sata_dev.ap->lock, flags); res = ata_sas_queuecmd(cmd, scsi_done, dev->sata_dev.ap); spin_unlock_irqrestore(dev->sata_dev.ap->lock, flags); goto out; } res = -ENOMEM; task = sas_create_task(cmd, dev, GFP_ATOMIC); if (!task) goto out; cmd->scsi_done = scsi_done; /* Queue up, Direct Mode or Task Collector Mode. */ if (sas_ha->lldd_max_execute_num < 2) res = i->dft->lldd_execute_task(task, 1, GFP_ATOMIC); else res = sas_queue_up(task); /* Examine */ if (res) { SAS_DPRINTK("lldd_execute_task returned: %d\n", res); ASSIGN_SAS_TASK(cmd, NULL); sas_free_task(task); if (res == -SAS_QUEUE_FULL) { cmd->result = DID_SOFT_ERROR << 16; /* retry */ res = 0; scsi_done(cmd); } goto out; } } out: spin_lock_irq(host->host_lock); return res; } static void sas_eh_finish_cmd(struct scsi_cmnd *cmd) { struct sas_task *task = TO_SAS_TASK(cmd); struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(cmd->device->host); /* remove the aborted task flag to allow the task to be * completed now. At this point, we only get called following * an actual abort of the task, so we should be guaranteed not * to be racing with any completions from the LLD (hence we * don't need the task state lock to clear the flag) */ task->task_state_flags &= ~SAS_TASK_STATE_ABORTED; /* Now call task_done. However, task will be free'd after * this */ task->task_done(task); /* now finish the command and move it on to the error * handler done list, this also takes it off the * error handler pending list */ scsi_eh_finish_cmd(cmd, &sas_ha->eh_done_q); } static void sas_scsi_clear_queue_lu(struct list_head *error_q, struct scsi_cmnd *my_cmd) { struct scsi_cmnd *cmd, *n; list_for_each_entry_safe(cmd, n, error_q, eh_entry) { if (cmd->device->sdev_target == my_cmd->device->sdev_target && cmd->device->lun == my_cmd->device->lun) sas_eh_finish_cmd(cmd); } } static void sas_scsi_clear_queue_I_T(struct list_head *error_q, struct domain_device *dev) { struct scsi_cmnd *cmd, *n; list_for_each_entry_safe(cmd, n, error_q, eh_entry) { struct domain_device *x = cmd_to_domain_dev(cmd); if (x == dev) sas_eh_finish_cmd(cmd); } } static void sas_scsi_clear_queue_port(struct list_head *error_q, struct asd_sas_port *port) { struct scsi_cmnd *cmd, *n; list_for_each_entry_safe(cmd, n, error_q, eh_entry) { struct domain_device *dev = cmd_to_domain_dev(cmd); struct asd_sas_port *x = dev->port; if (x == port) sas_eh_finish_cmd(cmd); } } enum task_disposition { TASK_IS_DONE, TASK_IS_ABORTED, TASK_IS_AT_LU, TASK_IS_NOT_AT_LU, TASK_ABORT_FAILED, }; static enum task_disposition sas_scsi_find_task(struct sas_task *task) { struct sas_ha_struct *ha = task->dev->port->ha; unsigned long flags; int i, res; struct sas_internal *si = to_sas_internal(task->dev->port->ha->core.shost->transportt); if (ha->lldd_max_execute_num > 1) { struct scsi_core *core = &ha->core; struct sas_task *t, *n; spin_lock_irqsave(&core->task_queue_lock, flags); list_for_each_entry_safe(t, n, &core->task_queue, list) { if (task == t) { list_del_init(&t->list); spin_unlock_irqrestore(&core->task_queue_lock, flags); SAS_DPRINTK("%s: task 0x%p aborted from " "task_queue\n", __FUNCTION__, task); return TASK_IS_ABORTED; } } spin_unlock_irqrestore(&core->task_queue_lock, flags); } for (i = 0; i < 5; i++) { SAS_DPRINTK("%s: aborting task 0x%p\n", __FUNCTION__, task); res = si->dft->lldd_abort_task(task); spin_lock_irqsave(&task->task_state_lock, flags); if (task->task_state_flags & SAS_TASK_STATE_DONE) { spin_unlock_irqrestore(&task->task_state_lock, flags); SAS_DPRINTK("%s: task 0x%p is done\n", __FUNCTION__, task); return TASK_IS_DONE; } spin_unlock_irqrestore(&task->task_state_lock, flags); if (res == TMF_RESP_FUNC_COMPLETE) { SAS_DPRINTK("%s: task 0x%p is aborted\n", __FUNCTION__, task); return TASK_IS_ABORTED; } else if (si->dft->lldd_query_task) { SAS_DPRINTK("%s: querying task 0x%p\n", __FUNCTION__, task); res = si->dft->lldd_query_task(task); switch (res) { case TMF_RESP_FUNC_SUCC: SAS_DPRINTK("%s: task 0x%p at LU\n", __FUNCTION__, task); return TASK_IS_AT_LU; case TMF_RESP_FUNC_COMPLETE: SAS_DPRINTK("%s: task 0x%p not at LU\n", __FUNCTION__, task); return TASK_IS_NOT_AT_LU; case TMF_RESP_FUNC_FAILED: SAS_DPRINTK("%s: task 0x%p failed to abort\n", __FUNCTION__, task); return TASK_ABORT_FAILED; } } } return res; } static int sas_recover_lu(struct domain_device *dev, struct scsi_cmnd *cmd) { int res = TMF_RESP_FUNC_FAILED; struct scsi_lun lun; struct sas_internal *i = to_sas_internal(dev->port->ha->core.shost->transportt); int_to_scsilun(cmd->device->lun, &lun); SAS_DPRINTK("eh: device %llx LUN %x has the task\n", SAS_ADDR(dev->sas_addr), cmd->device->lun); if (i->dft->lldd_abort_task_set) res = i->dft->lldd_abort_task_set(dev, lun.scsi_lun); if (res == TMF_RESP_FUNC_FAILED) { if (i->dft->lldd_clear_task_set) res = i->dft->lldd_clear_task_set(dev, lun.scsi_lun); } if (res == TMF_RESP_FUNC_FAILED) { if (i->dft->lldd_lu_reset) res = i->dft->lldd_lu_reset(dev, lun.scsi_lun); } return res; } static int sas_recover_I_T(struct domain_device *dev) { int res = TMF_RESP_FUNC_FAILED; struct sas_internal *i = to_sas_internal(dev->port->ha->core.shost->transportt); SAS_DPRINTK("I_T nexus reset for dev %016llx\n", SAS_ADDR(dev->sas_addr)); if (i->dft->lldd_I_T_nexus_reset) res = i->dft->lldd_I_T_nexus_reset(dev); return res; } /* Find the sas_phy that's attached to this device */ struct sas_phy *sas_find_local_phy(struct domain_device *dev) { struct domain_device *pdev = dev->parent; struct ex_phy *exphy = NULL; int i; /* Directly attached device */ if (!pdev) return dev->port->phy; /* Otherwise look in the expander */ for (i = 0; i < pdev->ex_dev.num_phys; i++) if (!memcmp(dev->sas_addr, pdev->ex_dev.ex_phy[i].attached_sas_addr, SAS_ADDR_SIZE)) { exphy = &pdev->ex_dev.ex_phy[i]; break; } BUG_ON(!exphy); return exphy->phy; } EXPORT_SYMBOL_GPL(sas_find_local_phy); /* Attempt to send a LUN reset message to a device */ int sas_eh_device_reset_handler(struct scsi_cmnd *cmd) { struct domain_device *dev = cmd_to_domain_dev(cmd); struct sas_internal *i = to_sas_internal(dev->port->ha->core.shost->transportt); struct scsi_lun lun; int res; int_to_scsilun(cmd->device->lun, &lun); if (!i->dft->lldd_lu_reset) return FAILED; res = i->dft->lldd_lu_reset(dev, lun.scsi_lun); if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE) return SUCCESS; return FAILED; } /* Attempt to send a phy (bus) reset */ int sas_eh_bus_reset_handler(struct scsi_cmnd *cmd) { struct domain_device *dev = cmd_to_domain_dev(cmd); struct sas_phy *phy = sas_find_local_phy(dev); int res; res = sas_phy_reset(phy, 1); if (res) SAS_DPRINTK("Bus reset of %s failed 0x%x\n", kobject_name(&phy->dev.kobj), res); if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE) return SUCCESS; return FAILED; } /* Try to reset a device */ static int try_to_reset_cmd_device(struct scsi_cmnd *cmd) { int res; struct Scsi_Host *shost = cmd->device->host; if (!shost->hostt->eh_device_reset_handler) goto try_bus_reset; res = shost->hostt->eh_device_reset_handler(cmd); if (res == SUCCESS) return res; try_bus_reset: if (shost->hostt->eh_bus_reset_handler) return shost->hostt->eh_bus_reset_handler(cmd); return FAILED; } static int sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head *work_q, struct list_head *done_q) { struct scsi_cmnd *cmd, *n; enum task_disposition res = TASK_IS_DONE; int tmf_resp, need_reset; struct sas_internal *i = to_sas_internal(shost->transportt); unsigned long flags; struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost); Again: list_for_each_entry_safe(cmd, n, work_q, eh_entry) { struct sas_task *task = TO_SAS_TASK(cmd); if (!task) continue; list_del_init(&cmd->eh_entry); spin_lock_irqsave(&task->task_state_lock, flags); need_reset = task->task_state_flags & SAS_TASK_NEED_DEV_RESET; spin_unlock_irqrestore(&task->task_state_lock, flags); if (need_reset) { SAS_DPRINTK("%s: task 0x%p requests reset\n", __FUNCTION__, task); goto reset; } SAS_DPRINTK("trying to find task 0x%p\n", task); res = sas_scsi_find_task(task); cmd->eh_eflags = 0; switch (res) { case TASK_IS_DONE: SAS_DPRINTK("%s: task 0x%p is done\n", __FUNCTION__, task); sas_eh_finish_cmd(cmd); continue; case TASK_IS_ABORTED: SAS_DPRINTK("%s: task 0x%p is aborted\n", __FUNCTION__, task); sas_eh_finish_cmd(cmd); continue; case TASK_IS_AT_LU: SAS_DPRINTK("task 0x%p is at LU: lu recover\n", task); reset: tmf_resp = sas_recover_lu(task->dev, cmd); if (tmf_resp == TMF_RESP_FUNC_COMPLETE) { SAS_DPRINTK("dev %016llx LU %x is " "recovered\n", SAS_ADDR(task->dev), cmd->device->lun); sas_eh_finish_cmd(cmd); sas_scsi_clear_queue_lu(work_q, cmd); goto Again; } /* fallthrough */ case TASK_IS_NOT_AT_LU: case TASK_ABORT_FAILED: SAS_DPRINTK("task 0x%p is not at LU: I_T recover\n", task); tmf_resp = sas_recover_I_T(task->dev); if (tmf_resp == TMF_RESP_FUNC_COMPLETE) { struct domain_device *dev = task->dev; SAS_DPRINTK("I_T %016llx recovered\n", SAS_ADDR(task->dev->sas_addr)); sas_eh_finish_cmd(cmd); sas_scsi_clear_queue_I_T(work_q, dev); goto Again; } /* Hammer time :-) */ try_to_reset_cmd_device(cmd); if (i->dft->lldd_clear_nexus_port) { struct asd_sas_port *port = task->dev->port; SAS_DPRINTK("clearing nexus for port:%d\n", port->id); res = i->dft->lldd_clear_nexus_port(port); if (res == TMF_RESP_FUNC_COMPLETE) { SAS_DPRINTK("clear nexus port:%d " "succeeded\n", port->id); sas_eh_finish_cmd(cmd); sas_scsi_clear_queue_port(work_q, port); goto Again; } } if (i->dft->lldd_clear_nexus_ha) { SAS_DPRINTK("clear nexus ha\n"); res = i->dft->lldd_clear_nexus_ha(ha); if (res == TMF_RESP_FUNC_COMPLETE) { SAS_DPRINTK("clear nexus ha " "succeeded\n"); sas_eh_finish_cmd(cmd); goto clear_q; } } /* If we are here -- this means that no amount * of effort could recover from errors. Quite * possibly the HA just disappeared. */ SAS_DPRINTK("error from device %llx, LUN %x " "couldn't be recovered in any way\n", SAS_ADDR(task->dev->sas_addr), cmd->device->lun); sas_eh_finish_cmd(cmd); goto clear_q; } } return list_empty(work_q); clear_q: SAS_DPRINTK("--- Exit %s -- clear_q\n", __FUNCTION__); list_for_each_entry_safe(cmd, n, work_q, eh_entry) sas_eh_finish_cmd(cmd); return list_empty(work_q); } void sas_scsi_recover_host(struct Scsi_Host *shost) { struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost); unsigned long flags; LIST_HEAD(eh_work_q); spin_lock_irqsave(shost->host_lock, flags); list_splice_init(&shost->eh_cmd_q, &eh_work_q); spin_unlock_irqrestore(shost->host_lock, flags); SAS_DPRINTK("Enter %s\n", __FUNCTION__); /* * Deal with commands that still have SAS tasks (i.e. they didn't * complete via the normal sas_task completion mechanism) */ if (sas_eh_handle_sas_errors(shost, &eh_work_q, &ha->eh_done_q)) goto out; /* * Now deal with SCSI commands that completed ok but have a an error * code (and hopefully sense data) attached. This is roughly what * scsi_unjam_host does, but we skip scsi_eh_abort_cmds because any * command we see here has no sas_task and is thus unknown to the HA. */ if (!scsi_eh_get_sense(&eh_work_q, &ha->eh_done_q)) scsi_eh_ready_devs(shost, &eh_work_q, &ha->eh_done_q); out: scsi_eh_flush_done_q(&ha->eh_done_q); SAS_DPRINTK("--- Exit %s\n", __FUNCTION__); return; } enum scsi_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd) { struct sas_task *task = TO_SAS_TASK(cmd); unsigned long flags; if (!task) { cmd->timeout_per_command /= 2; SAS_DPRINTK("command 0x%p, task 0x%p, gone: %s\n", cmd, task, (cmd->timeout_per_command ? "EH_RESET_TIMER" : "EH_NOT_HANDLED")); if (!cmd->timeout_per_command) return EH_NOT_HANDLED; return EH_RESET_TIMER; } spin_lock_irqsave(&task->task_state_lock, flags); BUG_ON(task->task_state_flags & SAS_TASK_STATE_ABORTED); if (task->task_state_flags & SAS_TASK_STATE_DONE) { spin_unlock_irqrestore(&task->task_state_lock, flags); SAS_DPRINTK("command 0x%p, task 0x%p, timed out: EH_HANDLED\n", cmd, task); return EH_HANDLED; } if (!(task->task_state_flags & SAS_TASK_AT_INITIATOR)) { spin_unlock_irqrestore(&task->task_state_lock, flags); SAS_DPRINTK("command 0x%p, task 0x%p, not at initiator: " "EH_RESET_TIMER\n", cmd, task); return EH_RESET_TIMER; } task->task_state_flags |= SAS_TASK_STATE_ABORTED; spin_unlock_irqrestore(&task->task_state_lock, flags); SAS_DPRINTK("command 0x%p, task 0x%p, timed out: EH_NOT_HANDLED\n", cmd, task); return EH_NOT_HANDLED; } int sas_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) { struct domain_device *dev = sdev_to_domain_dev(sdev); if (dev_is_sata(dev)) return ata_scsi_ioctl(sdev, cmd, arg); return -EINVAL; } struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy) { struct Scsi_Host *shost = dev_to_shost(rphy->dev.parent); struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost); struct domain_device *found_dev = NULL; int i; unsigned long flags; spin_lock_irqsave(&ha->phy_port_lock, flags); for (i = 0; i < ha->num_phys; i++) { struct asd_sas_port *port = ha->sas_port[i]; struct domain_device *dev; spin_lock(&port->dev_list_lock); list_for_each_entry(dev, &port->dev_list, dev_list_node) { if (rphy == dev->rphy) { found_dev = dev; spin_unlock(&port->dev_list_lock); goto found; } } spin_unlock(&port->dev_list_lock); } found: spin_unlock_irqrestore(&ha->phy_port_lock, flags); return found_dev; } static inline struct domain_device *sas_find_target(struct scsi_target *starget) { struct sas_rphy *rphy = dev_to_rphy(starget->dev.parent); return sas_find_dev_by_rphy(rphy); } int sas_target_alloc(struct scsi_target *starget) { struct domain_device *found_dev = sas_find_target(starget); int res; if (!found_dev) return -ENODEV; if (dev_is_sata(found_dev)) { res = sas_ata_init_host_and_port(found_dev, starget); if (res) return res; } starget->hostdata = found_dev; return 0; } #define SAS_DEF_QD 32 #define SAS_MAX_QD 64 int sas_slave_configure(struct scsi_device *scsi_dev) { struct domain_device *dev = sdev_to_domain_dev(scsi_dev); struct sas_ha_struct *sas_ha; BUG_ON(dev->rphy->identify.device_type != SAS_END_DEVICE); if (dev_is_sata(dev)) { ata_sas_slave_configure(scsi_dev, dev->sata_dev.ap); return 0; } sas_ha = dev->port->ha; sas_read_port_mode_page(scsi_dev); if (scsi_dev->tagged_supported) { scsi_set_tag_type(scsi_dev, MSG_SIMPLE_TAG); scsi_activate_tcq(scsi_dev, SAS_DEF_QD); } else { SAS_DPRINTK("device %llx, LUN %x doesn't support " "TCQ\n", SAS_ADDR(dev->sas_addr), scsi_dev->lun); scsi_dev->tagged_supported = 0; scsi_set_tag_type(scsi_dev, 0); scsi_deactivate_tcq(scsi_dev, 1); } scsi_dev->allow_restart = 1; return 0; } void sas_slave_destroy(struct scsi_device *scsi_dev) { struct domain_device *dev = sdev_to_domain_dev(scsi_dev); if (dev_is_sata(dev)) ata_port_disable(dev->sata_dev.ap); } int sas_change_queue_depth(struct scsi_device *scsi_dev, int new_depth) { int res = min(new_depth, SAS_MAX_QD); if (scsi_dev->tagged_supported) scsi_adjust_queue_depth(scsi_dev, scsi_get_tag_type(scsi_dev), res); else { struct domain_device *dev = sdev_to_domain_dev(scsi_dev); sas_printk("device %llx LUN %x queue depth changed to 1\n", SAS_ADDR(dev->sas_addr), scsi_dev->lun); scsi_adjust_queue_depth(scsi_dev, 0, 1); res = 1; } return res; } int sas_change_queue_type(struct scsi_device *scsi_dev, int qt) { if (!scsi_dev->tagged_supported) return 0; scsi_deactivate_tcq(scsi_dev, 1); scsi_set_tag_type(scsi_dev, qt); scsi_activate_tcq(scsi_dev, scsi_dev->queue_depth); return qt; } int sas_bios_param(struct scsi_device *scsi_dev, struct block_device *bdev, sector_t capacity, int *hsc) { hsc[0] = 255; hsc[1] = 63; sector_div(capacity, 255*63); hsc[2] = capacity; return 0; } /* ---------- Task Collector Thread implementation ---------- */ static void sas_queue(struct sas_ha_struct *sas_ha) { struct scsi_core *core = &sas_ha->core; unsigned long flags; LIST_HEAD(q); int can_queue; int res; struct sas_internal *i = to_sas_internal(core->shost->transportt); spin_lock_irqsave(&core->task_queue_lock, flags); while (!kthread_should_stop() && !list_empty(&core->task_queue)) { can_queue = sas_ha->lldd_queue_size - core->task_queue_size; if (can_queue >= 0) { can_queue = core->task_queue_size; list_splice_init(&core->task_queue, &q); } else { struct list_head *a, *n; can_queue = sas_ha->lldd_queue_size; list_for_each_safe(a, n, &core->task_queue) { list_move_tail(a, &q); if (--can_queue == 0) break; } can_queue = sas_ha->lldd_queue_size; } core->task_queue_size -= can_queue; spin_unlock_irqrestore(&core->task_queue_lock, flags); { struct sas_task *task = list_entry(q.next, struct sas_task, list); list_del_init(&q); res = i->dft->lldd_execute_task(task, can_queue, GFP_KERNEL); if (unlikely(res)) __list_add(&q, task->list.prev, &task->list); } spin_lock_irqsave(&core->task_queue_lock, flags); if (res) { list_splice_init(&q, &core->task_queue); /*at head*/ core->task_queue_size += can_queue; } } spin_unlock_irqrestore(&core->task_queue_lock, flags); } /** * sas_queue_thread -- The Task Collector thread * @_sas_ha: pointer to struct sas_ha */ static int sas_queue_thread(void *_sas_ha) { struct sas_ha_struct *sas_ha = _sas_ha; while (1) { set_current_state(TASK_INTERRUPTIBLE); schedule(); sas_queue(sas_ha); if (kthread_should_stop()) break; } return 0; } int sas_init_queue(struct sas_ha_struct *sas_ha) { struct scsi_core *core = &sas_ha->core; spin_lock_init(&core->task_queue_lock); core->task_queue_size = 0; INIT_LIST_HEAD(&core->task_queue); core->queue_thread = kthread_run(sas_queue_thread, sas_ha, "sas_queue_%d", core->shost->host_no); if (IS_ERR(core->queue_thread)) return PTR_ERR(core->queue_thread); return 0; } void sas_shutdown_queue(struct sas_ha_struct *sas_ha) { unsigned long flags; struct scsi_core *core = &sas_ha->core; struct sas_task *task, *n; kthread_stop(core->queue_thread); if (!list_empty(&core->task_queue)) SAS_DPRINTK("HA: %llx: scsi core task queue is NOT empty!?\n", SAS_ADDR(sas_ha->sas_addr)); spin_lock_irqsave(&core->task_queue_lock, flags); list_for_each_entry_safe(task, n, &core->task_queue, list) { struct scsi_cmnd *cmd = task->uldd_task; list_del_init(&task->list); ASSIGN_SAS_TASK(cmd, NULL); sas_free_task(task); cmd->result = DID_ABORT << 16; cmd->scsi_done(cmd); } spin_unlock_irqrestore(&core->task_queue_lock, flags); } /* * Call the LLDD task abort routine directly. This function is intended for * use by upper layers that need to tell the LLDD to abort a task. */ int __sas_task_abort(struct sas_task *task) { struct sas_internal *si = to_sas_internal(task->dev->port->ha->core.shost->transportt); unsigned long flags; int res; spin_lock_irqsave(&task->task_state_lock, flags); if (task->task_state_flags & SAS_TASK_STATE_ABORTED || task->task_state_flags & SAS_TASK_STATE_DONE) { spin_unlock_irqrestore(&task->task_state_lock, flags); SAS_DPRINTK("%s: Task %p already finished.\n", __FUNCTION__, task); return 0; } task->task_state_flags |= SAS_TASK_STATE_ABORTED; spin_unlock_irqrestore(&task->task_state_lock, flags); if (!si->dft->lldd_abort_task) return -ENODEV; res = si->dft->lldd_abort_task(task); spin_lock_irqsave(&task->task_state_lock, flags); if ((task->task_state_flags & SAS_TASK_STATE_DONE) || (res == TMF_RESP_FUNC_COMPLETE)) { spin_unlock_irqrestore(&task->task_state_lock, flags); task->task_done(task); return 0; } if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) task->task_state_flags &= ~SAS_TASK_STATE_ABORTED; spin_unlock_irqrestore(&task->task_state_lock, flags); return -EAGAIN; } /* * Tell an upper layer that it needs to initiate an abort for a given task. * This should only ever be called by an LLDD. */ void sas_task_abort(struct sas_task *task) { struct scsi_cmnd *sc = task->uldd_task; /* Escape for libsas internal commands */ if (!sc) { if (!del_timer(&task->timer)) return; task->timer.function(task->timer.data); return; } if (dev_is_sata(task->dev)) { sas_ata_task_abort(task); return; } scsi_req_abort_cmd(sc); scsi_schedule_eh(sc->device->host); } int sas_slave_alloc(struct scsi_device *scsi_dev) { struct domain_device *dev = sdev_to_domain_dev(scsi_dev); if (dev_is_sata(dev)) return ata_sas_port_init(dev->sata_dev.ap); return 0; } void sas_target_destroy(struct scsi_target *starget) { struct domain_device *found_dev = sas_find_target(starget); if (!found_dev) return; if (dev_is_sata(found_dev)) ata_sas_port_destroy(found_dev->sata_dev.ap); return; } static void sas_parse_addr(u8 *sas_addr, const char *p) { int i; for (i = 0; i < SAS_ADDR_SIZE; i++) { u8 h, l; if (!*p) break; h = isdigit(*p) ? *p-'0' : toupper(*p)-'A'+10; p++; l = isdigit(*p) ? *p-'0' : toupper(*p)-'A'+10; p++; sas_addr[i] = (h<<4) | l; } } #define SAS_STRING_ADDR_SIZE 16 int sas_request_addr(struct Scsi_Host *shost, u8 *addr) { int res; const struct firmware *fw; res = request_firmware(&fw, "sas_addr", &shost->shost_gendev); if (res) return res; if (fw->size < SAS_STRING_ADDR_SIZE) { res = -ENODEV; goto out; } sas_parse_addr(addr, fw->data); out: release_firmware(fw); return res; } EXPORT_SYMBOL_GPL(sas_request_addr); EXPORT_SYMBOL_GPL(sas_queuecommand); EXPORT_SYMBOL_GPL(sas_target_alloc); EXPORT_SYMBOL_GPL(sas_slave_configure); EXPORT_SYMBOL_GPL(sas_slave_destroy); EXPORT_SYMBOL_GPL(sas_change_queue_depth); EXPORT_SYMBOL_GPL(sas_change_queue_type); EXPORT_SYMBOL_GPL(sas_bios_param); EXPORT_SYMBOL_GPL(__sas_task_abort); EXPORT_SYMBOL_GPL(sas_task_abort); EXPORT_SYMBOL_GPL(sas_phy_reset); EXPORT_SYMBOL_GPL(sas_phy_enable); EXPORT_SYMBOL_GPL(sas_eh_device_reset_handler); EXPORT_SYMBOL_GPL(sas_eh_bus_reset_handler); EXPORT_SYMBOL_GPL(sas_slave_alloc); EXPORT_SYMBOL_GPL(sas_target_destroy); EXPORT_SYMBOL_GPL(sas_ioctl);
gpl-2.0
polaretto/TrinityCore
src/server/scripts/EasternKingdoms/SunwellPlateau/instance_sunwell_plateau.cpp
4
9073
/* * Copyright (C) 2008-2014 TrinityCore <http://www.trinitycore.org/> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "ScriptMgr.h" #include "InstanceScript.h" #include "Player.h" #include "sunwell_plateau.h" /* Sunwell Plateau: 0 - Kalecgos and Sathrovarr 1 - Brutallus 2 - Felmyst 3 - Eredar Twins (Alythess and Sacrolash) 4 - M'uru 5 - Kil'Jaeden */ DoorData const doorData[] = { { GO_FIRE_BARRIER, DATA_FELMYST, DOOR_TYPE_PASSAGE, BOUNDARY_NONE }, { GO_MURUS_GATE_1, DATA_MURU, DOOR_TYPE_ROOM, BOUNDARY_NONE }, { GO_MURUS_GATE_2, DATA_MURU, DOOR_TYPE_PASSAGE, BOUNDARY_NONE }, { GO_BOSS_COLLISION_1, DATA_KALECGOS, DOOR_TYPE_ROOM, BOUNDARY_NONE }, { GO_BOSS_COLLISION_2, DATA_KALECGOS, DOOR_TYPE_ROOM, BOUNDARY_NONE }, { GO_FORCE_FIELD, DATA_KALECGOS, DOOR_TYPE_ROOM, BOUNDARY_NONE }, { 0, 0, DOOR_TYPE_ROOM, BOUNDARY_NONE } // END }; class instance_sunwell_plateau : public InstanceMapScript { public: instance_sunwell_plateau() : InstanceMapScript(SunwellPlateauScriptName, 580) { } struct instance_sunwell_plateau_InstanceMapScript : public InstanceScript { instance_sunwell_plateau_InstanceMapScript(Map* map) : InstanceScript(map) { SetHeaders(DataHeader); SetBossNumber(EncounterCount); LoadDoorData(doorData); KalecgosDragonGUID = 0; KalecgosHumanGUID = 0; SathrovarrGUID = 0; BrutallusGUID = 0; MadrigosaGUID = 0; FelmystGUID = 0; AlythessGUID = 0; SacrolashGUID = 0; MuruGUID = 0; KilJaedenGUID = 0; KilJaedenControllerGUID = 0; AnveenaGUID = 0; KalecgosKjGUID = 0; SpectralPlayers = 0; SpectralRealmTimer = 5000; } Player const* GetPlayerInMap() const { Map::PlayerList const& players = instance->GetPlayers(); if (!players.isEmpty()) { for (Map::PlayerList::const_iterator itr = players.begin(); itr != players.end(); ++itr) { Player* player = itr->GetSource(); if (player && !player->HasAura(45839, 0)) return player; } } else TC_LOG_DEBUG("scripts", "Instance Sunwell Plateau: GetPlayerInMap, but PlayerList is empty!"); return NULL; } void OnCreatureCreate(Creature* creature) override { switch (creature->GetEntry()) { case NPC_KALECGOS: KalecgosDragonGUID = creature->GetGUID(); break; case NPC_KALEC: KalecgosHumanGUID = creature->GetGUID(); break; case NPC_SATHROVARR: SathrovarrGUID = creature->GetGUID(); break; case NPC_BRUTALLUS: BrutallusGUID = creature->GetGUID(); break; case NPC_MADRIGOSA: MadrigosaGUID = creature->GetGUID(); break; case NPC_FELMYST: FelmystGUID = creature->GetGUID(); break; case NPC_GRAND_WARLOCK_ALYTHESS: AlythessGUID = creature->GetGUID(); break; case NPC_LADY_SACROLASH: SacrolashGUID = creature->GetGUID(); break; case NPC_MURU: MuruGUID = creature->GetGUID(); break; case NPC_KILJAEDEN: KilJaedenGUID = creature->GetGUID(); break; case NPC_KILJAEDEN_CONTROLLER: KilJaedenControllerGUID = creature->GetGUID(); break; case NPC_ANVEENA: AnveenaGUID = creature->GetGUID(); break; case NPC_KALECGOS_KJ: KalecgosKjGUID = creature->GetGUID(); break; default: break; } } void OnGameObjectCreate(GameObject* go) override { switch (go->GetEntry()) { case GO_FORCE_FIELD: case GO_BOSS_COLLISION_1: case GO_BOSS_COLLISION_2: case GO_FIRE_BARRIER: case GO_MURUS_GATE_1: case GO_MURUS_GATE_2: AddDoor(go, true); break; default: break; } } void OnGameObjectRemove(GameObject* go) override { switch (go->GetEntry()) { case GO_FIRE_BARRIER: case GO_MURUS_GATE_1: case GO_MURUS_GATE_2: case GO_BOSS_COLLISION_1: case GO_BOSS_COLLISION_2: case GO_FORCE_FIELD: AddDoor(go, false); break; default: break; } } uint64 GetData64(uint32 id) const override { switch (id) { case DATA_KALECGOS_DRAGON: return KalecgosDragonGUID; case DATA_KALECGOS_HUMAN: return KalecgosHumanGUID; case DATA_SATHROVARR: return SathrovarrGUID; case DATA_BRUTALLUS: return BrutallusGUID; case DATA_MADRIGOSA: return MadrigosaGUID; case DATA_FELMYST: return FelmystGUID; case DATA_ALYTHESS: return AlythessGUID; case DATA_SACROLASH: return SacrolashGUID; case DATA_MURU: return MuruGUID; case DATA_KILJAEDEN: return KilJaedenGUID; case DATA_KILJAEDEN_CONTROLLER: return KilJaedenControllerGUID; case DATA_ANVEENA: return AnveenaGUID; case DATA_KALECGOS_KJ: return KalecgosKjGUID; case DATA_PLAYER_GUID: { Player const* target = GetPlayerInMap(); return target ? target->GetGUID() : 0; } default: break; } return 0; } protected: uint64 KalecgosDragonGUID; uint64 KalecgosHumanGUID; uint64 SathrovarrGUID; uint64 BrutallusGUID; uint64 MadrigosaGUID; uint64 FelmystGUID; uint64 AlythessGUID; uint64 SacrolashGUID; uint64 MuruGUID; uint64 KilJaedenGUID; uint64 KilJaedenControllerGUID; uint64 AnveenaGUID; uint64 KalecgosKjGUID; uint32 SpectralPlayers; uint32 SpectralRealmTimer; std::vector<uint64> SpectralRealmList; }; InstanceScript* GetInstanceScript(InstanceMap* map) const override { return new instance_sunwell_plateau_InstanceMapScript(map); } }; void AddSC_instance_sunwell_plateau() { new instance_sunwell_plateau(); }
gpl-2.0
gidsola/mangos-tbc
src/game/TaxiHandler.cpp
4
10002
/* * This file is part of the CMaNGOS Project. See AUTHORS file for Copyright information * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include "Common.h" #include "Database/DatabaseEnv.h" #include "WorldPacket.h" #include "WorldSession.h" #include "Opcodes.h" #include "Log.h" #include "ObjectMgr.h" #include "Player.h" #include "UpdateMask.h" #include "Path.h" #include "WaypointMovementGenerator.h" void WorldSession::HandleTaxiNodeStatusQueryOpcode(WorldPacket& recv_data) { DEBUG_LOG("WORLD: Received opcode CMSG_TAXINODE_STATUS_QUERY"); ObjectGuid guid; recv_data >> guid; SendTaxiStatus(guid); } void WorldSession::SendTaxiStatus(ObjectGuid guid) { // cheating checks Creature* unit = GetPlayer()->GetMap()->GetCreature(guid); if (!unit) { DEBUG_LOG("WorldSession::SendTaxiStatus - %s not found or you can't interact with it.", guid.GetString().c_str()); return; } uint32 curloc = sObjectMgr.GetNearestTaxiNode(unit->GetPositionX(), unit->GetPositionY(), unit->GetPositionZ(), unit->GetMapId(), GetPlayer()->GetTeam()); // not found nearest if (curloc == 0) return; DEBUG_LOG("WORLD: current location %u ", curloc); WorldPacket data(SMSG_TAXINODE_STATUS, 9); data << ObjectGuid(guid); data << uint8(GetPlayer()->m_taxi.IsTaximaskNodeKnown(curloc) ? 1 : 0); SendPacket(&data); DEBUG_LOG("WORLD: Sent SMSG_TAXINODE_STATUS"); } void WorldSession::HandleTaxiQueryAvailableNodes(WorldPacket& recv_data) { DEBUG_LOG("WORLD: Received opcode CMSG_TAXIQUERYAVAILABLENODES"); ObjectGuid guid; recv_data >> guid; // cheating checks Creature* unit = GetPlayer()->GetNPCIfCanInteractWith(guid, UNIT_NPC_FLAG_FLIGHTMASTER); if (!unit) { DEBUG_LOG("WORLD: HandleTaxiQueryAvailableNodes - %s not found or you can't interact with him.", guid.GetString().c_str()); return; } // remove fake death if (GetPlayer()->hasUnitState(UNIT_STAT_DIED)) GetPlayer()->RemoveSpellsCausingAura(SPELL_AURA_FEIGN_DEATH); // unknown taxi node case if (SendLearnNewTaxiNode(unit)) return; // known taxi node case SendTaxiMenu(unit); } void WorldSession::SendTaxiMenu(Creature* unit) { // find current node uint32 curloc = sObjectMgr.GetNearestTaxiNode(unit->GetPositionX(), unit->GetPositionY(), unit->GetPositionZ(), unit->GetMapId(), GetPlayer()->GetTeam()); if (curloc == 0) return; DEBUG_LOG("WORLD: CMSG_TAXINODE_STATUS_QUERY %u ", curloc); WorldPacket data(SMSG_SHOWTAXINODES, (4 + 8 + 4 + 8 * 4)); data << uint32(1); data << unit->GetObjectGuid(); data << uint32(curloc); GetPlayer()->m_taxi.AppendTaximaskTo(data, GetPlayer()->isTaxiCheater()); SendPacket(&data); DEBUG_LOG("WORLD: Sent SMSG_SHOWTAXINODES"); } void WorldSession::SendDoFlight(uint32 mountDisplayId, uint32 path, uint32 pathNode) { // remove fake death if (GetPlayer()->hasUnitState(UNIT_STAT_DIED)) GetPlayer()->RemoveSpellsCausingAura(SPELL_AURA_FEIGN_DEATH); while (GetPlayer()->GetMotionMaster()->GetCurrentMovementGeneratorType() == FLIGHT_MOTION_TYPE) GetPlayer()->GetMotionMaster()->MovementExpired(false); if (mountDisplayId) GetPlayer()->Mount(mountDisplayId); GetPlayer()->GetMotionMaster()->MoveTaxiFlight(path, pathNode); } bool WorldSession::SendLearnNewTaxiNode(Creature* unit) { // find current node uint32 curloc = sObjectMgr.GetNearestTaxiNode(unit->GetPositionX(), unit->GetPositionY(), unit->GetPositionZ(), unit->GetMapId(), GetPlayer()->GetTeam()); if (curloc == 0) return true; // `true` send to avoid WorldSession::SendTaxiMenu call with one more curlock seartch with same false result. if (GetPlayer()->m_taxi.SetTaximaskNode(curloc)) { WorldPacket msg(SMSG_NEW_TAXI_PATH, 0); SendPacket(&msg); WorldPacket update(SMSG_TAXINODE_STATUS, 9); update << ObjectGuid(unit->GetObjectGuid()); update << uint8(1); SendPacket(&update); return true; } else return false; } void WorldSession::SendActivateTaxiReply(ActivateTaxiReply reply) { WorldPacket data(SMSG_ACTIVATETAXIREPLY, 4); data << uint32(reply); SendPacket(&data); DEBUG_LOG("WORLD: Sent SMSG_ACTIVATETAXIREPLY"); } void WorldSession::HandleActivateTaxiExpressOpcode(WorldPacket& recv_data) { DEBUG_LOG("WORLD: Received opcode CMSG_ACTIVATETAXIEXPRESS"); ObjectGuid guid; uint32 node_count, _totalcost; recv_data >> guid >> _totalcost >> node_count; Creature* npc = GetPlayer()->GetNPCIfCanInteractWith(guid, UNIT_NPC_FLAG_FLIGHTMASTER); if (!npc) { DEBUG_LOG("WORLD: HandleActivateTaxiExpressOpcode - %s not found or you can't interact with it.", guid.GetString().c_str()); return; } std::vector<uint32> nodes; for (uint32 i = 0; i < node_count; ++i) { uint32 node; recv_data >> node; if (!_player->m_taxi.IsTaximaskNodeKnown(node) && !_player->isTaxiCheater()) { SendActivateTaxiReply(ERR_TAXINOTVISITED); recv_data.rpos(recv_data.wpos()); // prevent additional spam at rejected packet return; } nodes.push_back(node); } if (nodes.empty()) return; DEBUG_LOG("WORLD: Received opcode CMSG_ACTIVATETAXIEXPRESS from %d to %d" , nodes.front(), nodes.back()); GetPlayer()->ActivateTaxiPathTo(nodes, npc); } void WorldSession::HandleMoveSplineDoneOpcode(WorldPacket& recv_data) { DEBUG_LOG("WORLD: Received opcode CMSG_MOVE_SPLINE_DONE"); MovementInfo movementInfo; // used only for proper packet read recv_data >> movementInfo; recv_data >> Unused<uint32>(); // unk // in taxi flight packet received in 2 case: // 1) end taxi path in far (multi-node) flight // 2) switch from one map to other in case multi-map taxi path // we need process only (1) uint32 curDest = GetPlayer()->m_taxi.GetTaxiDestination(); if (!curDest) return; TaxiNodesEntry const* curDestNode = sTaxiNodesStore.LookupEntry(curDest); // far teleport case if (curDestNode && curDestNode->map_id != GetPlayer()->GetMapId()) { if (GetPlayer()->GetMotionMaster()->GetCurrentMovementGeneratorType() == FLIGHT_MOTION_TYPE) { // short preparations to continue flight FlightPathMovementGenerator* flight = (FlightPathMovementGenerator*)(GetPlayer()->GetMotionMaster()->top()); flight->Interrupt(*GetPlayer()); // will reset at map landing flight->SetCurrentNodeAfterTeleport(); TaxiPathNodeEntry const& node = flight->GetPath()[flight->GetCurrentNode()]; flight->SkipCurrentNode(); GetPlayer()->TeleportTo(curDestNode->map_id, node.x, node.y, node.z, GetPlayer()->GetOrientation()); } return; } uint32 destinationnode = GetPlayer()->m_taxi.NextTaxiDestination(); if (destinationnode > 0) // if more destinations to go { // current source node for next destination uint32 sourcenode = GetPlayer()->m_taxi.GetTaxiSource(); // Add to taximask middle hubs in taxicheat mode (to prevent having player with disabled taxicheat and not having back flight path) if (GetPlayer()->isTaxiCheater()) { if (GetPlayer()->m_taxi.SetTaximaskNode(sourcenode)) { WorldPacket data(SMSG_NEW_TAXI_PATH, 0); _player->GetSession()->SendPacket(&data); } } DEBUG_LOG("WORLD: Taxi has to go from %u to %u", sourcenode, destinationnode); uint32 mountDisplayId = sObjectMgr.GetTaxiMountDisplayId(sourcenode, GetPlayer()->GetTeam()); uint32 path, cost; sObjectMgr.GetTaxiPath(sourcenode, destinationnode, path, cost); if (path && mountDisplayId) SendDoFlight(mountDisplayId, path, 1); // skip start fly node else GetPlayer()->m_taxi.ClearTaxiDestinations(); // clear problematic path and next } else GetPlayer()->m_taxi.ClearTaxiDestinations(); // not destinations, clear source node } void WorldSession::HandleActivateTaxiOpcode(WorldPacket& recv_data) { DEBUG_LOG("WORLD: Received opcode CMSG_ACTIVATETAXI"); ObjectGuid guid; std::vector<uint32> nodes; nodes.resize(2); recv_data >> guid >> nodes[0] >> nodes[1]; DEBUG_LOG("WORLD: Received opcode CMSG_ACTIVATETAXI from %d to %d" , nodes[0], nodes[1]); Creature* npc = GetPlayer()->GetNPCIfCanInteractWith(guid, UNIT_NPC_FLAG_FLIGHTMASTER); if (!npc) { DEBUG_LOG("WORLD: HandleActivateTaxiOpcode - %s not found or you can't interact with it.", guid.GetString().c_str()); return; } if (!_player->isTaxiCheater()) { if (!_player->m_taxi.IsTaximaskNodeKnown(nodes[0]) || !_player->m_taxi.IsTaximaskNodeKnown(nodes[1])) { SendActivateTaxiReply(ERR_TAXINOTVISITED); return; } } GetPlayer()->ActivateTaxiPathTo(nodes, npc); }
gpl-2.0
pccr10001/Kernel-2.6.32.61-for-PDK-7105
arch/sh/kernel/cpu/sh4/stm_suspend.c
4
3924
/* * ------------------------------------------------------------------------- * Copyright (C) 2008 STMicroelectronics * Copyright (C) 2010 STMicroelectronics * Author: Francesco M. Virlinzi <francesco.virlinzi@st.com> * * May be copied or modified under the terms of the GNU General Public * License V.2 ONLY. See linux/COPYING for more information. * * ------------------------------------------------------------------------- */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/suspend.h> #include <linux/errno.h> #include <linux/time.h> #include <linux/delay.h> #include <linux/irqflags.h> #include <linux/kobject.h> #include <linux/clk.h> #include <linux/hardirq.h> #include <linux/jiffies.h> #include <linux/io.h> #include <linux/stm/pm_notify.h> #include <linux/stm/wakeup_devices.h> #include <asm/system.h> #include <asm/cacheflush.h> #include <asm-generic/bug.h> #include <cpu/mmu_context.h> #include "stm_suspend.h" #undef dbg_print #ifdef CONFIG_PM_DEBUG #define dbg_print(fmt, args...) \ printk(KERN_INFO "%s: " fmt, __func__, ## args) #else #define dbg_print(fmt, args...) #endif static struct stm_platform_suspend_t *platform_suspend; static unsigned long stm_read_intevt(void) { return ctrl_inl(INTEVT); } static int stm_suspend_enter(suspend_state_t state) { unsigned long soc_flags; unsigned long tbl, tbl_size; unsigned long lpj = (cpu_data[raw_smp_processor_id()].loops_per_jiffy * HZ) / 1000; enum stm_pm_type type = (state == PM_SUSPEND_STANDBY) ? STM_PM_SUSPEND : STM_PM_MEMSUSPEND; enum stm_pm_notify_return notify_ret; int err = 0, wokenup_by = 0; /* Must wait for serial buffers to clear */ printk(KERN_INFO "CPU is sleeping\n"); mdelay(100); flush_cache_all(); if (platform_suspend->pre_enter) err = platform_suspend->pre_enter(state); /* * If the platform pre_enter returns an error suspend is * aborted. */ if (err) goto on_error; /* sets the right instruction table */ if (state == PM_SUSPEND_STANDBY) { tbl = platform_suspend->stby_tbl; tbl_size = platform_suspend->stby_size; soc_flags = ((platform_suspend->flags & NO_SLEEP_ON_STANDBY) ? 1 : 0); soc_flags += ((platform_suspend->flags & EARLY_ACTION_ON_STANDBY) ? 2 : 0); } else { tbl = platform_suspend->mem_tbl; tbl_size = platform_suspend->mem_size; soc_flags = ((platform_suspend->flags & NO_SLEEP_ON_MEMSTANDBY) ? 1 : 0); soc_flags += ((platform_suspend->flags & EARLY_ACTION_ON_MEMSTANDBY) ? 2 : 0); } BUG_ON(in_irq()); __stm_again_suspend: notify_ret = stm_pm_prepare_enter(type); if (notify_ret == STM_PM_RET_ERROR) goto __stm_skip_suspend; stm_exec_table(tbl, tbl_size, lpj, soc_flags); BUG_ON(in_irq()); wokenup_by = stm_read_intevt(); if (platform_suspend->evt_to_irq) wokenup_by = platform_suspend->evt_to_irq(wokenup_by); else wokenup_by = evt2irq(wokenup_by); stm_set_wakeup_reason(wokenup_by); __stm_skip_suspend: notify_ret = stm_pm_post_enter(type, wokenup_by); if (notify_ret == STM_PM_RET_AGAIN) goto __stm_again_suspend; if (platform_suspend->post_enter) platform_suspend->post_enter(state); printk(KERN_INFO "CPU woken up by: 0x%x\n", wokenup_by); return 0; on_error: if (platform_suspend->post_enter) platform_suspend->post_enter(state); pr_err("[STM][PM] Error on Core Suspend\n"); return -EINVAL; } static int stm_suspend_valid_both(suspend_state_t state) { return 1; } int __init stm_suspend_register(struct stm_platform_suspend_t *_suspend) { if (!_suspend) return -EINVAL; platform_suspend = _suspend; platform_suspend->ops.enter = stm_suspend_enter; if (platform_suspend->stby_tbl && platform_suspend->stby_size) platform_suspend->ops.valid = stm_suspend_valid_both; else platform_suspend->ops.valid = suspend_valid_only_mem; suspend_set_ops(&platform_suspend->ops); printk(KERN_INFO "[STM]: [PM]: Suspend support registered\n"); return 0; }
gpl-2.0
STJrInuyasha/SRB2
src/win32ce/win_sys.c
4
94656
// Emacs style mode select -*- C++ -*- //----------------------------------------------------------------------------- // // Copyright (C) 1998-2000 by DooM Legacy Team. // // This program is free software; you can redistribute it and/or // modify it under the terms of the GNU General Public License // as published by the Free Software Foundation; either version 2 // of the License, or (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. //----------------------------------------------------------------------------- /// \file /// \brief win32 system i/o /// /// Startup & Shutdown routines for music,sound,timer,keyboard,... /// Signal handler to trap errors and exit cleanly. #include "../doomdef.h" #include <stdlib.h> #include <signal.h> #include <stdio.h> #include <string.h> #include <fcntl.h> #include <io.h> #include <stdarg.h> #include <direct.h> #include <mmsystem.h> #include "../m_misc.h" #include "../i_video.h" #include "../i_sound.h" #include "../i_system.h" #include "../d_net.h" #include "../g_game.h" #include "../d_main.h" #include "../m_argv.h" #include "../w_wad.h" #include "../z_zone.h" #include "../g_input.h" #include "../keys.h" #include "../screen.h" // Wheel support for Win95/WinNT3.51 #include <zmouse.h> // Taken from Win98/NT4.0 #ifndef SM_MOUSEWHEELPRESENT #define SM_MOUSEWHEELPRESENT 75 #endif #ifndef MSH_MOUSEWHEEL #ifdef UNICODE #define MSH_MOUSEWHEEL L"MSWHEEL_ROLLMSG" #else #define MSH_MOUSEWHEEL "MSWHEEL_ROLLMSG" #endif #endif #include "win_main.h" #include "../i_joy.h" #define DIRECTINPUT_VERSION 0x700 // Force dinput.h to generate old DX3 headers. #define DXVERSION_NTCOMPATIBLE 0x0300 #include <dinput.h> #include "fabdxlib.h" #ifdef __DEBUG__ #undef NDEBUG #endif /// \brief max number of joystick buttons #define JOYBUTTONS_MAX 32 // rgbButtons[32] /// \brief max number of joystick button events #define JOYBUTTONS_MIN min((JOYBUTTONS),(JOYBUTTONS_MAX)) /// \brief max number of joysick axies #define JOYAXISSET_MAX 4 // (lX, lY), (lZ,lRx), (lRy, lRz), rglSlider[2] is very diff /// \brief max number ofjoystick axis events #define JOYAXISSET_MIN min((JOYAXISSET),(JOYAXISSET_MAX)) /// \brief max number of joystick hats #define JOYHATS_MAX 4 // rgdwPOV[4]; /// \brief max number of joystick hat events #define JOYHATS_MIN min((JOYHATS),(JOYHATS_MAX)) /// \brief max number of mouse buttons #define MOUSEBUTTONS_MAX 8 // 8 bit of BYTE and DIMOFS_BUTTON7 /// \brief max number of muse button events #define MOUSEBUTTONS_MIN min((MOUSEBUTTONS),(MOUSEBUTTONS_MAX)) // ================== // DIRECT INPUT STUFF // ================== BOOL bDX0300; // if true, we created a DirectInput 0x0300 version static LPDIRECTINPUT lpDI = NULL; static LPDIRECTINPUTDEVICE lpDIK = NULL; // Keyboard static LPDIRECTINPUTDEVICE lpDIM = NULL; // mice static LPDIRECTINPUTDEVICE lpDIJ = NULL; // joystick 1P static LPDIRECTINPUTEFFECT lpDIE[NumberofForces]; // joystick 1Es static LPDIRECTINPUTDEVICE2 lpDIJA = NULL; // joystick 1I static LPDIRECTINPUTDEVICE lpDIJ2 = NULL; // joystick 2P static LPDIRECTINPUTEFFECT lpDIE2[NumberofForces]; // joystick 1Es static LPDIRECTINPUTDEVICE2 lpDIJ2A = NULL;// joystick 2I // Do not execute cleanup code more than once. See Shutdown_xxx() routines. UINT8 graphics_started = 0; UINT8 keyboard_started = 0; UINT8 sound_started = 0; static boolean mouse_enabled = false; static boolean joystick_detected = false; static boolean joystick2_detected = false; static void I_ShutdownKeyboard(void); static void I_GetKeyboardEvents(void); static void I_ShutdownJoystick(void); static void I_ShutdownJoystick2 (void); static boolean entering_con_command = false; // // Why would this be system specific?? hmmmm.... // // it is for virtual reality system, next incoming feature :) static ticcmd_t emptycmd; ticcmd_t *I_BaseTiccmd(void) { return &emptycmd; } static ticcmd_t emptycmd2; ticcmd_t *I_BaseTiccmd2(void) { return &emptycmd2; } // Allocates the base zone memory, // this function returns a valid pointer and size, // else it should interrupt the program immediately. // // now checks if mem could be allocated, this is still // prehistoric... there's a lot to do here: memory locking, detection // of win95 etc... // BOOL win9x; /** \brief WinNT system platform */ static BOOL winnt; static void I_DetectWin9x(void) { OSVERSIONINFO osvi; osvi.dwOSVersionInfoSize = sizeof (OSVERSIONINFO); GetVersionEx(&osvi); winnt = (osvi.dwPlatformId == VER_PLATFORM_WIN32_NT); // 95 or 98 what the hell win9x = true; } // return free and total memory in the system UINT32 I_GetFreeMem(UINT32* total) { MEMORYSTATUS info; info.dwLength = sizeof (MEMORYSTATUS); GlobalMemoryStatus(&info); if (total) *total = (ULONG)info.dwTotalPhys; return (ULONG)info.dwAvailPhys; } // --------- // I_Profile // Two little functions to profile our code using the high resolution timer // --------- static LARGE_INTEGER ProfileCount; void I_BeginProfile(void) { if (!QueryPerformanceCounter(&ProfileCount)) I_Error("I_BeginProfile failed"); // can't profile without the high res timer } // we're supposed to use this to measure very small amounts of time, // that's why we return a DWORD and not a 64bit value DWORD I_EndProfile(void) { LARGE_INTEGER CurrTime; DWORD ret; if (!QueryPerformanceCounter (&CurrTime)) I_Error("I_EndProfile failed"); if (CurrTime.QuadPart - ProfileCount.QuadPart > (LONGLONG)0xFFFFFFFFUL) I_Error("I_EndProfile overflow"); ret = (DWORD)(CurrTime.QuadPart - ProfileCount.QuadPart); // we can call I_EndProfile() several time, I_BeginProfile() need be called just once ProfileCount = CurrTime; return ret; } // --------- // I_GetTime // Use the High Resolution Timer if available, // else use the multimedia timer which has 1 millisecond precision on Windowz 95, // but lower precision on Windows NT // --------- static long hacktics = 0; // used locally for keyboard repeat keys static DWORD starttickcount = 0; // hack for win2k time bug tic_t I_GetTime(void) { tic_t newtics = 0; if (!starttickcount) // high precision timer { LARGE_INTEGER currtime; // use only LowPart if high resolution counter is not available static LARGE_INTEGER basetime = {{0, 0}}; // use this if High Resolution timer is found static LARGE_INTEGER frequency; if (!basetime.LowPart) { if (!QueryPerformanceFrequency(&frequency)) frequency.QuadPart = 0; else QueryPerformanceCounter(&basetime); } if (frequency.LowPart && QueryPerformanceCounter(&currtime)) { newtics = (int)((currtime.QuadPart - basetime.QuadPart) * TICRATE / frequency.QuadPart); } else { currtime.LowPart = timeGetTime(); if (!basetime.LowPart) basetime.LowPart = currtime.LowPart; newtics = ((currtime.LowPart - basetime.LowPart)/(1000/TICRATE)); } } else newtics = (GetTickCount() - starttickcount)/(1000/TICRATE); hacktics = newtics; // a local counter for keyboard repeat key return newtics; } void I_Sleep(void) { if (cv_sleep.value != -1) Sleep(cv_sleep.value); } // should move to i_video void I_WaitVBL(INT32 count) { count = 0; } // this is probably to activate the 'loading' disc icon // it should set a flag, that I_FinishUpdate uses to know // whether it draws a small 'loading' disc icon on the screen or not // // also it should explicitly draw the disc because the screen is // possibly not refreshed while loading // void I_BeginRead(void) {} // see above, end the 'loading' disc icon, set the flag false // void I_EndRead(void) {} // =========================================================================================== // EVENTS // =========================================================================================== static inline BOOL I_ReadyConsole(HANDLE ci) { DWORD gotinput; if (ci == (HANDLE)-1) return FALSE; if (WaitForSingleObject(ci,0) != WAIT_OBJECT_0) return FALSE; if (GetFileType(ci) != FILE_TYPE_CHAR) return FALSE; return (GetNumberOfConsoleInputEvents(ci, &gotinput) && gotinput); } static inline VOID I_GetConsoleEvents(VOID) { event_t ev = {0,0,0,0}; HANDLE ci = GetStdHandle(STD_INPUT_HANDLE); HANDLE co = GetStdHandle(STD_OUTPUT_HANDLE); CONSOLE_SCREEN_BUFFER_INFO CSBI; INPUT_RECORD input; DWORD t; while (I_ReadyConsole(ci) && ReadConsoleInput(ci, &input, 1, &t) && t) { memset(&ev,0x00,sizeof (ev)); switch (input.EventType) { case KEY_EVENT: if (input.Event.KeyEvent.bKeyDown) { ev.type = ev_console; entering_con_command = true; switch (input.Event.KeyEvent.wVirtualKeyCode) { case VK_ESCAPE: case VK_TAB: ev.data1 = KEY_NULL; break; case VK_SHIFT: ev.data1 = KEY_SHIFT; break; case VK_RETURN: entering_con_command = false; // Fall through. default: ev.data1 = MapVirtualKey(input.Event.KeyEvent.wVirtualKeyCode,2); // convert in to char } if (co != (HANDLE)-1 && GetFileType(co) == FILE_TYPE_CHAR) { if (ev.data1 && ev.data1 != KEY_SHIFT) { #ifdef _UNICODE WriteConsole(co, &input.Event.KeyEvent.uChar.UnicodeChar, 1, &t, NULL); #else WriteConsole(co, &input.Event.KeyEvent.uChar.AsciiChar, 1, &t, NULL); #endif } if (input.Event.KeyEvent.wVirtualKeyCode == VK_BACK && GetConsoleScreenBufferInfo(co,&CSBI)) { WriteConsoleOutputCharacterA(co, " ",1, CSBI.dwCursorPosition, &t); } } } else { ev.type = ev_keyup; switch (input.Event.KeyEvent.wVirtualKeyCode) { case VK_SHIFT: ev.data1 = KEY_SHIFT; break; default: break; } } if (ev.data1) D_PostEvent(&ev); break; case MOUSE_EVENT: case WINDOW_BUFFER_SIZE_EVENT: case MENU_EVENT: case FOCUS_EVENT: break; } } } // ---------- // I_GetEvent // Post new events for all sorts of user-input // ---------- void I_GetEvent(void) { I_GetConsoleEvents(); I_GetKeyboardEvents(); I_GetMouseEvents(); I_GetJoystickEvents(); I_GetJoystick2Events(); } // ---------- // I_OsPolling // ---------- void I_OsPolling(void) { MSG msg; HANDLE ci = GetStdHandle(STD_INPUT_HANDLE); // we need to dispatch messages to the window // so the window procedure can respond to messages and PostEvent() for keys // during D_SRB2Main startup. // this one replaces the main loop of windows since I_OsPolling is called in the main loop do { while (PeekMessage(&msg, NULL, 0, 0, PM_NOREMOVE)) { if (GetMessage(&msg, NULL, 0, 0)) { TranslateMessage(&msg); DispatchMessage(&msg); } else // winspec : this is quit message I_Quit(); } if (!appActive && !netgame && !I_ReadyConsole(ci)) WaitMessage(); } while (!appActive && !netgame && !I_ReadyConsole(ci)); // this is called by the network synchronization, // check keys and allow escaping I_GetEvent(); // reset "emulated keys" gamekeydown[KEY_MOUSEWHEELUP] = 0; gamekeydown[KEY_MOUSEWHEELDOWN] = 0; } // =========================================================================================== // TIMER // =========================================================================================== static void I_ShutdownTimer(void) { timeEndPeriod(1); } // // Installs the timer interrupt handler with timer speed as TICRATE. // #define TIMER_ID 1 #define TIMER_RATE (1000/TICRATE) void I_StartupTimer(void) { // for win2k time bug if (M_CheckParm("-gettickcount")) { starttickcount = GetTickCount(); CONS_Printf("Using GetTickCount()\n"); } timeBeginPeriod(1); I_AddExitFunc(I_ShutdownTimer); } // =========================================================================================== // EXIT CODE, ERROR HANDLING // =========================================================================================== static int errorcount = 0; // phuck recursive errors static int shutdowning = false; // // Used to trap various signals, to make sure things get shut down cleanly. // #ifdef NDEBUG static void signal_handler(int num) { //static char msg[] = "oh no! back to reality!\r\n"; const char *sigmsg; char sigdef[64]; D_QuitNetGame(); // Fix server freezes I_ShutdownSystem(); switch (num) { case SIGINT: sigmsg = "interrupt"; break; case SIGILL: sigmsg = "illegal instruction - invalid function image"; break; case SIGFPE: sigmsg = "floating point exception"; break; case SIGSEGV: sigmsg = "segment violation"; break; case SIGTERM: sigmsg = "software termination signal from kill"; break; case SIGBREAK: sigmsg = "Ctrl-Break sequence"; break; case SIGABRT: sigmsg = "abnormal termination triggered by abort call"; break; default: sprintf(sigdef, "signal number %d", num); sigmsg = sigdef; } #ifdef LOGMESSAGES if (logstream != INVALID_HANDLE_VALUE) { I_OutputMsg("signal_handler() error: %s\r\n", sigmsg); CloseHandle(logstream); logstream = INVALID_HANDLE_VALUE; } #endif MessageBoxA(hWndMain, va("signal_handler(): %s", sigmsg), "SRB2 error", MB_OK|MB_ICONERROR); signal(num, SIG_DFL); // default signal action raise(num); } #endif // // put an error message (with format) on stderr // void I_OutputMsg(const char *fmt, ...) { HANDLE co = GetStdHandle(STD_OUTPUT_HANDLE); DWORD bytesWritten; va_list argptr; char txt[8192]; va_start(argptr,fmt); vsprintf(txt, fmt, argptr); va_end(argptr); OutputDebugStringA(txt); if (co != (HANDLE)-1) { if (GetFileType(co) == FILE_TYPE_CHAR) { static COORD coordNextWrite = {0,0}; char *oldLines = NULL; DWORD oldLength = 0; CONSOLE_SCREEN_BUFFER_INFO csbi; // Save the lines that we're going to obliterate. GetConsoleScreenBufferInfo(co, &csbi); oldLength = csbi.dwSize.X * (csbi.dwCursorPosition.Y - coordNextWrite.Y) + csbi.dwCursorPosition.X - coordNextWrite.X; if(oldLength > 0) { char *blank = malloc(oldLength); oldLines = malloc(oldLength); if(!oldLines || !blank) return; ReadConsoleOutputCharacterA(co, oldLines, oldLength, coordNextWrite, &bytesWritten); // Move to where we what to print - which is where we would've been, // had console input not been in the way, SetConsoleCursorPosition(co, coordNextWrite); // Blank out. memset(blank, ' ', oldLength); WriteConsoleA(co, blank, oldLength, &bytesWritten, NULL); free(blank); // And back to where we want to print again. SetConsoleCursorPosition(co, coordNextWrite); } // Actually write the string now! WriteConsoleA(co, txt, (DWORD)strlen(txt), &bytesWritten, NULL); // Next time, output where we left off. GetConsoleScreenBufferInfo(co, &csbi); coordNextWrite = csbi.dwCursorPosition; // Restore what was overwritten. if(oldLines && entering_con_command) { WriteConsoleA(co, oldLines, oldLength, &bytesWritten, NULL); free(oldLines); } } else // Redirected to a file. WriteFile(co, txt, (DWORD)strlen(txt), &bytesWritten, NULL); } #ifdef LOGMESSAGES if (logstream != (HANDLE)-1) WriteFile (logstream, txt, (DWORD)strlen(txt), &bytesWritten, NULL); #endif } // display error messy after shutdowngfx // void I_Error(const char *error, ...) { va_list argptr; char txt[8192]; // added 11-2-98 recursive error detecting if (shutdowning) { errorcount++; // try to shutdown each subsystem separately if (errorcount == 5) I_ShutdownGraphics(); if (errorcount == 6) I_ShutdownSystem(); if (errorcount == 7) { M_SaveConfig(NULL); G_SaveGameData(); } if (errorcount > 20) { // Don't print garbage va_start(argptr,error); vsprintf(txt, error, argptr); va_end(argptr); MessageBoxA(hWndMain, txt, "SRB2 Recursive Error", MB_OK|MB_ICONERROR); exit(-1); // recursive errors detected } } shutdowning = true; // put message to stderr va_start(argptr, error); wvsprintfA(txt, error, argptr); va_end(argptr); CONS_Printf("I_Error(): %s\n", txt); // uncomment this line to print to stderr as well //wsprintf(stderr, "I_Error(): %s\n", txt); // saving one time is enough! if (!errorcount) { M_SaveConfig(NULL); // save game config, cvars.. G_SaveGameData(); } // save demo, could be useful for debug // NOTE: demos are normally not saved here. if (demorecording) G_CheckDemoStatus(); D_QuitNetGame(); // shutdown everything that was started I_ShutdownSystem(); #ifdef LOGMESSAGES if (logstream != INVALID_HANDLE_VALUE) { CloseHandle(logstream); logstream = INVALID_HANDLE_VALUE; } #endif MessageBoxA(hWndMain, txt, "SRB2 Error", MB_OK|MB_ICONERROR); exit(-1); } static inline VOID ShowEndTxt(HANDLE co) { int i; UINT16 j, att = 0; int nlflag = 1; CONSOLE_SCREEN_BUFFER_INFO backupcon; COORD resizewin = {80,-1}; DWORD bytesWritten; CHAR let = 0; UINT16 *text; void *data; int endoomnum = W_GetNumForName("ENDOOM"); //HANDLE ci = GetStdHandle(STD_INPUT_HANDLE); /* get the lump with the text */ data = text = W_CacheLumpNum(endoomnum, PU_CACHE); backupcon.wAttributes = FOREGROUND_RED|FOREGROUND_GREEN|FOREGROUND_BLUE; // Just in case GetConsoleScreenBufferInfo(co, &backupcon); //Store old state resizewin.Y = backupcon.dwSize.Y; if (backupcon.dwSize.X < resizewin.X) SetConsoleScreenBufferSize(co, resizewin); for (i = 1; i <= 80*25; i++) // print 80x25 text and deal with the attributes too { j = (UINT16)(*text >> 8); // attribute first if (j != att) // attribute changed? { att = j; // save current attribute SetConsoleTextAttribute(co, j); //set fg and bg color for buffer } let = (char)(*text++ & 0xff); // now the text WriteConsoleA(co, &let, 1, &bytesWritten, NULL); if (nlflag && !(i % 80) && backupcon.dwSize.X > resizewin.X) // do we need a nl? { att = backupcon.wAttributes; SetConsoleTextAttribute(co, att); // all attributes off WriteConsoleA(co, "\n", 1, &bytesWritten, NULL); } } SetConsoleTextAttribute(co, backupcon.wAttributes); // all attributes off //if (nlflag) // WriteConsoleA(co, "\n", 1, &bytesWritten, NULL); getchar(); //pause! Z_Free(data); } // // I_Quit: shutdown everything cleanly, in reverse order of Startup. // void I_Quit(void) { HANDLE co = GetStdHandle(STD_OUTPUT_HANDLE); // when recording a demo, should exit using 'q', // but sometimes we forget and use Alt+F4, so save here too. if (demorecording) G_CheckDemoStatus(); M_SaveConfig(NULL); // save game config, cvars.. G_SaveGameData(); // maybe it needs that the ticcount continues, // or something else that will be finished by I_ShutdownSystem(), // so do it before. D_QuitNetGame(); // shutdown everything that was started I_ShutdownSystem(); if (shutdowning || errorcount) I_Error("Error detected (%d)", errorcount); #ifdef LOGMESSAGES if (logstream != INVALID_HANDLE_VALUE) { I_OutputMsg("I_Quit(): end of logstream.\r\n"); CloseHandle(logstream); logstream = INVALID_HANDLE_VALUE; } #endif if (!M_CheckParm("-noendtxt") && W_CheckNumForName("ENDOOM")!=-1 && co != INVALID_HANDLE_VALUE && GetFileType(co) == FILE_TYPE_CHAR) { printf("\r"); ShowEndTxt(co); } fflush(stderr); exit(0); } // -------------------------------------------------------------------------- // I_ShowLastError // Displays a GetLastError() error message in a MessageBox // -------------------------------------------------------------------------- void I_GetLastErrorMsgBox(void) { LPSTR lpMsgBuf = NULL; FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER|FORMAT_MESSAGE_FROM_SYSTEM, NULL, GetLastError(), MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), // Default language lpMsgBuf, 0, NULL); // Display the string. MessageBoxA(NULL, lpMsgBuf, "GetLastError", MB_OK|MB_ICONINFORMATION); // put it in console too and log if any CONS_Printf("Error: %s\n", lpMsgBuf); // Free the buffer. LocalFree(lpMsgBuf); } // =========================================================================================== // CLEAN STARTUP & SHUTDOWN HANDLING, JUST CLOSE EVERYTHING YOU OPENED. // =========================================================================================== // // static quitfuncptr quit_funcs[MAX_QUIT_FUNCS] = { NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL }; // Adds a function to the list that need to be called by I_SystemShutdown(). // void I_AddExitFunc(void (*func)()) { int c; for (c = 0; c < MAX_QUIT_FUNCS; c++) { if (!quit_funcs[c]) { quit_funcs[c] = func; break; } } } // Removes a function from the list that need to be called by I_SystemShutdown(). // void I_RemoveExitFunc(void (*func)()) { int c; for (c = 0; c < MAX_QUIT_FUNCS; c++) { if (quit_funcs[c] == func) { while (c < MAX_QUIT_FUNCS - 1) { quit_funcs[c] = quit_funcs[c+1]; c++; } quit_funcs[MAX_QUIT_FUNCS-1] = NULL; break; } } } // =========================================================================================== // DIRECT INPUT HELPER CODE // =========================================================================================== // Create a DirectInputDevice interface, // create a DirectInputDevice2 interface if possible static void CreateDevice2(LPDIRECTINPUT di, REFGUID pguid, LPDIRECTINPUTDEVICE* lpDEV, LPDIRECTINPUTDEVICE2* lpDEV2) { HRESULT hr, hr2; LPDIRECTINPUTDEVICE lpdid1; LPDIRECTINPUTDEVICE2 lpdid2 = NULL; hr = IDirectInput_CreateDevice(di, pguid, &lpdid1, NULL); if (SUCCEEDED(hr)) { // get Device2 but only if we are not in DirectInput version 3 if (!bDX0300 && lpDEV2) { LPDIRECTINPUTDEVICE2 *rp = &lpdid2; LPVOID *tp = (LPVOID *)rp; hr2 = IDirectInputDevice_QueryInterface(lpdid1, &IID_IDirectInputDevice2, tp); if (FAILED(hr2)) { CONS_Printf("\2Could not create IDirectInput device 2"); lpdid2 = NULL; } } } else I_Error("Could not create IDirectInput device"); *lpDEV = lpdid1; if (lpDEV2) // only if we requested it *lpDEV2 = lpdid2; } // =========================================================================================== // DIRECT INPUT MOUSE // =========================================================================================== #define DI_MOUSE_BUFFERSIZE 16 // number of data elements in mouse buffer // // Initialise the mouse. // static void I_ShutdownMouse(void); void I_StartupMouse(void) { // this gets called when cv_usemouse is initted // for the win32 version, we want to startup the mouse later } static HANDLE mouse2filehandle = INVALID_HANDLE_VALUE; static void I_ShutdownMouse2(void) { if (mouse2filehandle != INVALID_HANDLE_VALUE) { event_t event; int i; SetCommMask(mouse2filehandle, 0); EscapeCommFunction(mouse2filehandle, CLRDTR); EscapeCommFunction(mouse2filehandle, CLRRTS); PurgeComm(mouse2filehandle, PURGE_TXABORT|PURGE_RXABORT|PURGE_TXCLEAR|PURGE_RXCLEAR); CloseHandle(mouse2filehandle); // emulate the up of all mouse buttons for (i = 0; i < MOUSEBUTTONS; i++) { event.type = ev_keyup; event.data1 = KEY_2MOUSE1 + i; D_PostEvent(&event); } mouse2filehandle = INVALID_HANDLE_VALUE; } } #define MOUSECOMBUFFERSIZE 256 static int handlermouse2x, handlermouse2y, handlermouse2buttons; static void I_PoolMouse2(void) { UINT8 buffer[MOUSECOMBUFFERSIZE]; COMSTAT ComStat; DWORD dwErrorFlags, dwLength; char dx, dy; static int bytenum; static UINT8 combytes[4]; DWORD i; ClearCommError(mouse2filehandle, &dwErrorFlags, &ComStat); dwLength = min(MOUSECOMBUFFERSIZE, ComStat.cbInQue); if (dwLength > 0) { if (!ReadFile(mouse2filehandle, buffer, dwLength, &dwLength, NULL)) { CONS_Printf("\2Read Error on secondary mouse port\n"); return; } // parse the mouse packets for (i = 0; i < dwLength; i++) { if ((buffer[i] & 64) == 64) bytenum = 0; if (bytenum < 4) combytes[bytenum] = buffer[i]; bytenum++; if (bytenum == 1) { handlermouse2buttons &= ~3; handlermouse2buttons |= ((combytes[0] & (32+16)) >>4); } else if (bytenum == 3) { dx = (char)((combytes[0] & 3) << 6); dy = (char)((combytes[0] & 12) << 4); dx = (char)(dx + combytes[1]); dy = (char)(dy + combytes[2]); handlermouse2x += dx; handlermouse2y += dy; } else if (bytenum == 4) // fourth byte (logitech mouses) { if (buffer[i] & 32) handlermouse2buttons |= 4; else handlermouse2buttons &= ~4; } } } } // secondary mouse doesn't use DirectX, therefore forget all about grabbing, acquire, etc. void I_StartupMouse2(void) { DCB dcb; if (mouse2filehandle != INVALID_HANDLE_VALUE) I_ShutdownMouse2(); if (!cv_usemouse2.value) return; if (mouse2filehandle != INVALID_HANDLE_VALUE) { // COM file handle mouse2filehandle = CreateFileA(cv_mouse2port.string, GENERIC_READ|GENERIC_WRITE, 0, // exclusive access NULL, // no security attrs OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL); if (mouse2filehandle == INVALID_HANDLE_VALUE) { int e = GetLastError(); if (e == 5) CONS_Printf("\2Can't open %s: Access denied\n" "The port is probably already used by another device (mouse, modem,...)\n", cv_mouse2port.string); else CONS_Printf("\2Can't open %s: error %d\n", cv_mouse2port.string, e); return; } } // buffers SetupComm(mouse2filehandle, MOUSECOMBUFFERSIZE, MOUSECOMBUFFERSIZE); // purge buffers PurgeComm(mouse2filehandle, PURGE_TXABORT|PURGE_RXABORT|PURGE_TXCLEAR|PURGE_RXCLEAR); // setup port to 1200 7N1 dcb.DCBlength = sizeof (DCB); GetCommState(mouse2filehandle, &dcb); dcb.BaudRate = CBR_1200; dcb.ByteSize = 7; dcb.Parity = NOPARITY; dcb.StopBits = ONESTOPBIT; dcb.fDtrControl = DTR_CONTROL_ENABLE; dcb.fRtsControl = RTS_CONTROL_ENABLE; dcb.fBinary = dcb.fParity = TRUE; SetCommState(mouse2filehandle, &dcb); I_AddExitFunc(I_ShutdownMouse2); } #define MAX_MOUSE_BTNS 5 static int center_x, center_y; static int old_mparms[3], new_mparms[3] = {0, 0, 1}; static boolean restore_mouse = FALSE; static int old_mouse_state = 0; unsigned int MSHWheelMessage = 0; static void I_DoStartupSysMouse(void) { boolean valid; RECT w_rect; valid = SystemParametersInfo(SPI_GETMOUSE, 0, old_mparms, 0); if (valid) { new_mparms[2] = old_mparms[2]; restore_mouse = SystemParametersInfo(SPI_SETMOUSE, 0, new_mparms, 0); } if (bAppFullScreen) { w_rect.top = 0; w_rect.left = 0; } else { w_rect.top = windowPosY; w_rect.left = windowPosX; } w_rect.bottom = w_rect.top + VIDHEIGHT; w_rect.right = w_rect.left + VIDWIDTH; center_x = w_rect.left + (VIDWIDTH >> 1); center_y = w_rect.top + (VIDHEIGHT >> 1); SetCursor(NULL); SetCursorPos(center_x, center_y); SetCapture(hWndMain); ClipCursor(&w_rect); } static void I_ShutdownSysMouse(void) { if (restore_mouse) SystemParametersInfo(SPI_SETMOUSE, 0, old_mparms, 0); ClipCursor(NULL); ReleaseCapture(); } void I_RestartSysMouse(void) { if (nodinput) { I_ShutdownSysMouse(); I_DoStartupSysMouse(); } } void I_GetSysMouseEvents(int mouse_state) { int i; event_t event; int xmickeys = 0, ymickeys = 0; POINT c_pos; for (i = 0; i < MAX_MOUSE_BTNS; i++) { // check if button pressed if ((mouse_state & (1 << i)) && !(old_mouse_state & (1 << i))) { event.type = ev_keydown; event.data1 = KEY_MOUSE1 + i; D_PostEvent(&event); } // check if button released if (!(mouse_state & (1 << i)) && (old_mouse_state & (1 << i))) { event.type = ev_keyup; event.data1 = KEY_MOUSE1 + i; D_PostEvent(&event); } } old_mouse_state = mouse_state; // proceed mouse movements GetCursorPos(&c_pos); xmickeys = c_pos.x - center_x; ymickeys = c_pos.y - center_y; if (xmickeys || ymickeys) { event.type = ev_mouse; event.data1 = 0; event.data2 = xmickeys; event.data3 = -ymickeys; D_PostEvent(&event); SetCursorPos(center_x, center_y); } } // This is called just before entering the main game loop, // when we are going fullscreen and the loading screen has finished. void I_DoStartupMouse(void) { DIPROPDWORD dip; // mouse detection may be skipped by setting usemouse false if (!cv_usemouse.value || M_CheckParm("-nomouse")) { mouse_enabled = false; return; } if (nodinput) { CONS_Printf("\tMouse will not use DirectInput.\n"); // System mouse input will be initiated by VID_SetMode I_AddExitFunc(I_ShutdownMouse); MSHWheelMessage = RegisterWindowMessage(MSH_MOUSEWHEEL); } else if (!lpDIM) // acquire the mouse only once { CreateDevice2(lpDI, &GUID_SysMouse, &lpDIM, NULL); if (lpDIM) { if (FAILED(IDirectInputDevice_SetDataFormat(lpDIM, &c_dfDIMouse))) I_Error("Couldn't set mouse data format"); // create buffer for buffered data dip.diph.dwSize = sizeof (dip); dip.diph.dwHeaderSize = sizeof (dip.diph); dip.diph.dwObj = 0; dip.diph.dwHow = DIPH_DEVICE; dip.dwData = DI_MOUSE_BUFFERSIZE; if (FAILED(IDirectInputDevice_SetProperty(lpDIM, DIPROP_BUFFERSIZE, &dip.diph))) I_Error("Couldn't set mouse buffer size"); if (FAILED(IDirectInputDevice_SetCooperativeLevel(lpDIM, hWndMain, DISCL_EXCLUSIVE|DISCL_FOREGROUND))) { I_Error("Couldn't set mouse coop level"); } I_AddExitFunc(I_ShutdownMouse); } else I_Error("Couldn't create mouse input"); } // if re-enabled while running, just set mouse_enabled true again, // do not acquire the mouse more than once mouse_enabled = true; } // // Shutdown Mouse DirectInput device // static void I_ShutdownMouse(void) { int i; event_t event; CONS_Printf("I_ShutdownMouse()\n"); if (lpDIM) { IDirectInputDevice_Unacquire(lpDIM); IDirectInputDevice_Release(lpDIM); lpDIM = NULL; } // emulate the up of all mouse buttons for (i = 0; i < MOUSEBUTTONS; i++) { event.type = ev_keyup; event.data1 = KEY_MOUSE1 + i; D_PostEvent(&event); } if (nodinput) I_ShutdownSysMouse(); mouse_enabled = false; } // // Get buffered data from the mouse // void I_GetMouseEvents(void) { DIDEVICEOBJECTDATA rgdod[DI_MOUSE_BUFFERSIZE]; DWORD dwItems, d; HRESULT hr; event_t event; int xmickeys, ymickeys; if (mouse2filehandle != INVALID_HANDLE_VALUE) { //mouse movement static UINT8 lastbuttons2 = 0; I_PoolMouse2(); // post key event for buttons if (handlermouse2buttons != lastbuttons2) { int i, j = 1, k; k = handlermouse2buttons ^ lastbuttons2; // only changed bit to 1 lastbuttons2 = (UINT8)handlermouse2buttons; for (i = 0; i < MOUSEBUTTONS; i++, j <<= 1) if (k & j) { if (handlermouse2buttons & j) event.type = ev_keydown; else event.type = ev_keyup; event.data1 = KEY_2MOUSE1 + i; D_PostEvent(&event); } } if (handlermouse2x || handlermouse2y) { event.type = ev_mouse2; event.data1 = 0; event.data2 = handlermouse2x<<1; event.data3 = -handlermouse2y<<1; handlermouse2x = 0; handlermouse2y = 0; D_PostEvent(&event); } } if (!mouse_enabled || nodinput) return; getBufferedData: dwItems = DI_MOUSE_BUFFERSIZE; hr = IDirectInputDevice_GetDeviceData(lpDIM, sizeof (DIDEVICEOBJECTDATA), rgdod, &dwItems, 0); // If data stream was interrupted, reacquire the device and try again. if (hr == DIERR_INPUTLOST || hr == DIERR_NOTACQUIRED) { hr = IDirectInputDevice_Acquire(lpDIM); if (SUCCEEDED(hr)) goto getBufferedData; } // We got buffered input, act on it if (SUCCEEDED(hr)) { xmickeys = ymickeys = 0; // dwItems contains number of elements read (could be 0) for (d = 0; d < dwItems; d++) { if (rgdod[d].dwOfs >= DIMOFS_BUTTON0 && rgdod[d].dwOfs < DIMOFS_BUTTON0 + MOUSEBUTTONS) { if (rgdod[d].dwData & 0x80) // Button down event.type = ev_keydown; else event.type = ev_keyup; // Button up event.data1 = rgdod[d].dwOfs - DIMOFS_BUTTON0 + KEY_MOUSE1; D_PostEvent(&event); } else if (rgdod[d].dwOfs == DIMOFS_X) xmickeys += rgdod[d].dwData; else if (rgdod[d].dwOfs == DIMOFS_Y) ymickeys += rgdod[d].dwData; else if (rgdod[d].dwOfs == DIMOFS_Z) { // z-axes the wheel if ((int)rgdod[d].dwData > 0) event.data1 = KEY_MOUSEWHEELUP; else event.data1 = KEY_MOUSEWHEELDOWN; event.type = ev_keydown; D_PostEvent(&event); } } if (xmickeys || ymickeys) { event.type = ev_mouse; event.data1 = 0; event.data2 = xmickeys; event.data3 = -ymickeys; D_PostEvent(&event); } } } // =========================================================================================== // DIRECT INPUT JOYSTICK // =========================================================================================== struct DIJoyInfo_s { BYTE X,Y,Z,Rx,Ry,Rz,U,V; LONG ForceAxises; }; typedef struct DIJoyInfo_s DIJoyInfo_t; // private info static BYTE iJoyNum; // used by enumeration static DIJoyInfo_t JoyInfo; static BYTE iJoy2Num; static DIJoyInfo_t JoyInfo2; //----------------------------------------------------------------------------- // Name: EnumAxesCallback() // Desc: Callback function for enumerating the axes on a joystick and counting // each force feedback enabled axis //----------------------------------------------------------------------------- static BOOL CALLBACK EnumAxesCallback(const DIDEVICEOBJECTINSTANCE* pdidoi, VOID* pContext) { DWORD* pdwNumForceFeedbackAxis = (DWORD*) pContext; if ((pdidoi->dwFlags & DIDOI_FFACTUATOR) != 0) (*pdwNumForceFeedbackAxis)++; return DIENUM_CONTINUE; } static HRESULT SetupForceTacile(LPDIRECTINPUTDEVICE2 DJI, LPDIRECTINPUTEFFECT *DJE, DWORD FFAXIS, FFType EffectType,REFGUID EffectGUID) { HRESULT hr; DIEFFECT eff; DWORD rgdwAxes[2] = { DIJOFS_X, DIJOFS_Y }; LONG rglDirection[2] = { 0, 0 }; DICONSTANTFORCE cf = { 0 }; // LONG lMagnitude DIRAMPFORCE rf = {0,0}; // LONG lStart, lEnd; DIPERIODIC pf = {0,0,0,0}; ZeroMemory(&eff, sizeof (eff)); if (FFAXIS > 2) FFAXIS = 2; //up to 2 FFAXIS eff.dwSize = sizeof (DIEFFECT); eff.dwFlags = DIEFF_CARTESIAN | DIEFF_OBJECTOFFSETS; // Cartesian and data format offsets eff.dwDuration = INFINITE; eff.dwSamplePeriod = 0; eff.dwGain = DI_FFNOMINALMAX; eff.dwTriggerButton = DIEB_NOTRIGGER; eff.dwTriggerRepeatInterval = 0; eff.cAxes = FFAXIS; eff.rgdwAxes = rgdwAxes; eff.rglDirection = rglDirection; eff.lpEnvelope = NULL; eff.lpvTypeSpecificParams = NULL; if (EffectType == ConstantForce) { eff.cbTypeSpecificParams = sizeof (cf); eff.lpvTypeSpecificParams = &cf; } else if (EffectType == RampForce) { eff.cbTypeSpecificParams = sizeof (rf); eff.lpvTypeSpecificParams = &rf; } else if (EffectType >= SquareForce && SawtoothDownForce >= EffectType) { eff.cbTypeSpecificParams = sizeof (pf); eff.lpvTypeSpecificParams = &pf; } #if (DIRECTINPUT_VERSION >= 0x0600) //eff.dwStartDelay = 0; #endif // Create the prepared effect if (FAILED(hr = IDirectInputDevice2_CreateEffect(DJI, EffectGUID, &eff, DJE, NULL))) { return hr; } if (NULL == *DJE) return E_FAIL; return hr; } static BOOL CALLBACK DIEnumEffectsCallback1(LPCDIEFFECTINFO pdei, LPVOID pvRef) { LPDIRECTINPUTEFFECT *DJE = pvRef; if (DIEFT_GETTYPE(pdei->dwEffType) == DIEFT_CONSTANTFORCE) { if (SUCCEEDED(SetupForceTacile(lpDIJA,DJE, JoyInfo.ForceAxises, ConstantForce, &pdei->guid))) return DIENUM_STOP; } if (DIEFT_GETTYPE(pdei->dwEffType) == DIEFT_RAMPFORCE) { if (SUCCEEDED(SetupForceTacile(lpDIJA,DJE, JoyInfo.ForceAxises, RampForce, &pdei->guid))) return DIENUM_STOP; } return DIENUM_CONTINUE; } static BOOL CALLBACK DIEnumEffectsCallback2(LPCDIEFFECTINFO pdei, LPVOID pvRef) { LPDIRECTINPUTEFFECT *DJE = pvRef; if (DIEFT_GETTYPE(pdei->dwEffType) == DIEFT_CONSTANTFORCE) { if (SUCCEEDED(SetupForceTacile(lpDIJ2A,DJE, JoyInfo2.ForceAxises, ConstantForce, &pdei->guid))) return DIENUM_STOP; } if (DIEFT_GETTYPE(pdei->dwEffType) == DIEFT_RAMPFORCE) { if (SUCCEEDED(SetupForceTacile(lpDIJ2A,DJE, JoyInfo2.ForceAxises, RampForce, &pdei->guid))) return DIENUM_STOP; } return DIENUM_CONTINUE; } static REFGUID DIETable[] = { &GUID_ConstantForce, //ConstantForce &GUID_RampForce, //RampForce &GUID_Square, //SquareForce &GUID_Sine, //SineForce &GUID_Triangle, //TriangleForce &GUID_SawtoothUp, //SawtoothUpForce &GUID_SawtoothDown, //SawtoothDownForce (REFGUID)-1, //NumberofForces }; static HRESULT SetupAllForces(LPDIRECTINPUTDEVICE2 DJI, LPDIRECTINPUTEFFECT DJE[], DWORD FFAXIS) { FFType ForceType = EvilForce; if (DJI == lpDIJA) { IDirectInputDevice2_EnumEffects(DJI,DIEnumEffectsCallback1,&DJE[ConstantForce],DIEFT_CONSTANTFORCE); IDirectInputDevice2_EnumEffects(DJI,DIEnumEffectsCallback1,&DJE[RampForce],DIEFT_RAMPFORCE); } else if (DJI == lpDIJA) { IDirectInputDevice2_EnumEffects(DJI,DIEnumEffectsCallback2,&DJE[ConstantForce],DIEFT_CONSTANTFORCE); IDirectInputDevice2_EnumEffects(DJI,DIEnumEffectsCallback2,&DJE[RampForce],DIEFT_RAMPFORCE); } for (ForceType = SquareForce; ForceType > NumberofForces && DIETable[ForceType] != (REFGUID)-1; ForceType++) if (DIETable[ForceType]) SetupForceTacile(DJI,&DJE[ForceType], FFAXIS, ForceType, DIETable[ForceType]); return S_OK; } static void LimitEffect(LPDIEFFECT eff, FFType EffectType) { LPDICONSTANTFORCE pCF = eff->lpvTypeSpecificParams; LPDIPERIODIC pDP= eff->lpvTypeSpecificParams; if (eff->rglDirection) { } /* if (eff->dwDuration != INFINITE && eff->dwDuration < 0) { eff->dwDuration = 0; }*/ if (eff->dwGain != 0) { if (eff->dwGain > DI_FFNOMINALMAX) eff->dwGain = DI_FFNOMINALMAX; //else if (eff->dwGain < -DI_FFNOMINALMAX) // eff->dwGain = DI_FFNOMINALMAX; } if (EffectType == ConstantForce && pCF->lMagnitude) { } else if (EffectType >= SquareForce && SawtoothDownForce >= EffectType && pDP) { } } static HRESULT SetForceTacile(LPDIRECTINPUTEFFECT SDIE, const JoyFF_t *FF,DWORD FFAXIS, FFType EffectType) { DIEFFECT eff; HRESULT hr; LONG Magnitude; LONG rglDirection[2] = { 0, 0 }; DICONSTANTFORCE cf = { 0 }; // LONG lMagnitude DIRAMPFORCE rf = {0,0}; // LONG lStart, lEnd; DIPERIODIC pf = {0,0,0,0}; if (!FF) IDirectInputEffect_Stop(SDIE); Magnitude = FF->Magnitude; ZeroMemory(&eff, sizeof (eff)); eff.dwSize = sizeof (eff); //DIEP_START eff.dwFlags = DIEFF_CARTESIAN | DIEFF_OBJECTOFFSETS; // Cartesian and data format offsets //DIEP_DURATION eff.dwDuration = FF->Duration; //DIEP_GAIN eff.dwGain = FF->Gain; //DIEP_DIRECTION eff.rglDirection = rglDirection; //DIEP_TYPESPECIFICPARAMS if (FFAXIS > 1) { double dMagnitude; dMagnitude = (double)Magnitude; dMagnitude = hypot(dMagnitude, dMagnitude); Magnitude = (DWORD)dMagnitude; rglDirection[0] = FF->ForceX; rglDirection[1] = FF->ForceY; } if (EffectType == ConstantForce) { cf.lMagnitude = Magnitude; eff.cbTypeSpecificParams = sizeof (cf); eff.lpvTypeSpecificParams = &cf; } else if (EffectType == RampForce) { rf.lStart = FF->Start; rf.lEnd = FF->End; eff.cbTypeSpecificParams = sizeof (rf); eff.lpvTypeSpecificParams = &rf; } else if (EffectType >= SquareForce && SawtoothDownForce >= EffectType) { pf.dwMagnitude = Magnitude; pf.lOffset = FF->Offset; pf.dwPhase = FF->Phase; pf.dwPeriod = FF->Period; eff.cbTypeSpecificParams = sizeof (pf); eff.lpvTypeSpecificParams = &pf; } LimitEffect(&eff, EffectType); hr = IDirectInputEffect_SetParameters(SDIE, &eff, DIEP_START|DIEP_DURATION|DIEP_GAIN|DIEP_DIRECTION|DIEP_TYPESPECIFICPARAMS); return hr; } void I_Tactile(FFType Type, const JoyFF_t *Effect) { if (!lpDIJA) return; if (FAILED(IDirectInputDevice2_Acquire(lpDIJA))) return; if (Type == EvilForce) IDirectInputDevice2_SendForceFeedbackCommand(lpDIJA,DISFFC_STOPALL); if (Type <= EvilForce || Type > NumberofForces || !lpDIE[Type]) return; SetForceTacile(lpDIE[Type], Effect, JoyInfo.ForceAxises, Type); } void I_Tactile2(FFType Type, const JoyFF_t *Effect) { if (!lpDIJ2A) return; if (FAILED(IDirectInputDevice2_Acquire(lpDIJ2A))) return; if (Type == EvilForce) IDirectInputDevice2_SendForceFeedbackCommand(lpDIJ2A,DISFFC_STOPALL); if (Type <= EvilForce || Type > NumberofForces || !lpDIE2[Type]) return; SetForceTacile(lpDIE2[Type],Effect, JoyInfo2.ForceAxises, Type); } // ------------------ // SetDIDwordProperty (HELPER) // Set a DWORD property on a DirectInputDevice. // ------------------ static HRESULT SetDIDwordProperty(LPDIRECTINPUTDEVICE pdev, REFGUID guidProperty, DWORD dwObject, DWORD dwHow, DWORD dwValue) { DIPROPDWORD dipdw; dipdw.diph.dwSize = sizeof (dipdw); dipdw.diph.dwHeaderSize = sizeof (dipdw.diph); dipdw.diph.dwObj = dwObject; dipdw.diph.dwHow = dwHow; dipdw.dwData = dwValue; return IDirectInputDevice_SetProperty(pdev, guidProperty, &dipdw.diph); } // --------------- // DIEnumJoysticks // There is no such thing as a 'system' joystick, contrary to mouse, // we must enumerate and choose one joystick device to use // --------------- static BOOL CALLBACK DIEnumJoysticks (LPCDIDEVICEINSTANCE lpddi, LPVOID pvRef) //cv_usejoystick { LPDIRECTINPUTDEVICE pdev; DIPROPRANGE diprg; DIDEVCAPS caps; BOOL bUseThisOne = FALSE; iJoyNum++; //faB: if cv holds a string description of joystick, the value from atoi() is 0 // else, the value was probably set by user at console to one of the previously // enumerated joysticks if (((consvar_t *)pvRef)->value == iJoyNum #ifndef _UNICODE || !lstrcmpA(((consvar_t *)pvRef)->string, lpddi->tszProductName) #endif ) bUseThisOne = TRUE; //CONS_Printf(" cv joy is %s\n", ((consvar_t *)pvRef)->string); // print out device name CONS_Printf("%c%d: %s\n", (bUseThisOne) ? '\2' : ' ', // show name in white if this is the one we will use iJoyNum, //(GET_DIDEVICE_SUBTYPE(lpddi->dwDevType) == DIDEVTYPEJOYSTICK_GAMEPAD) ? "Gamepad " : "Joystick", lpddi->tszProductName); //, lpddi->tszInstanceName); // use specified joystick (cv_usejoystick.value in pvRef) if (!bUseThisOne) return DIENUM_CONTINUE; ((consvar_t *)pvRef)->value = iJoyNum; if (IDirectInput_CreateDevice (lpDI, &lpddi->guidInstance, &pdev, NULL) != DI_OK) { // if it failed, then we can't use this joystick for some // bizarre reason. (Maybe the user unplugged it while we // were in the middle of enumerating it.) So continue enumerating CONS_Printf("DIEnumJoysticks(): CreateDevice FAILED\n"); return DIENUM_CONTINUE; } // get the Device capabilities // caps.dwSize = sizeof (DIDEVCAPS_DX3); if (FAILED(IDirectInputDevice_GetCapabilities (pdev, &caps))) { CONS_Printf("DIEnumJoysticks(): GetCapabilities FAILED\n"); IDirectInputDevice_Release (pdev); return DIENUM_CONTINUE; } if (!(caps.dwFlags & DIDC_ATTACHED)) // should be, since we enumerate only attached devices return DIENUM_CONTINUE; Joystick.bJoyNeedPoll = ((caps.dwFlags & DIDC_POLLEDDATAFORMAT) != 0); if (caps.dwFlags & DIDC_FORCEFEEDBACK) JoyInfo.ForceAxises = 0; else JoyInfo.ForceAxises = -1; Joystick.bGamepadStyle = (GET_DIDEVICE_SUBTYPE(caps.dwDevType) == DIDEVTYPEJOYSTICK_GAMEPAD); //DEBUG CONS_Printf("Gamepad: %d\n", Joystick.bGamepadStyle); CONS_Printf("Capabilities: %d axes, %d buttons, %d POVs, poll %d, Gamepad %d\n", caps.dwAxes, caps.dwButtons, caps.dwPOVs, Joystick.bJoyNeedPoll, Joystick.bGamepadStyle); // Set the data format to "simple joystick" - a predefined data format // // A data format specifies which controls on a device we // are interested in, and how they should be reported. // // This tells DirectInput that we will be passing a // DIJOYSTATE structure to IDirectInputDevice::GetDeviceState. if (IDirectInputDevice_SetDataFormat (pdev, &c_dfDIJoystick) != DI_OK) { CONS_Printf("DIEnumJoysticks(): SetDataFormat FAILED\n"); IDirectInputDevice_Release (pdev); return DIENUM_CONTINUE; } // Set the cooperativity level to let DirectInput know how // this device should interact with the system and with other // DirectInput applications. if (IDirectInputDevice_SetCooperativeLevel (pdev, hWndMain, DISCL_EXCLUSIVE | DISCL_FOREGROUND) != DI_OK) { CONS_Printf("DIEnumJoysticks(): SetCooperativeLevel FAILED\n"); IDirectInputDevice_Release (pdev); return DIENUM_CONTINUE; } // set the range of the joystick axis diprg.diph.dwSize = sizeof (DIPROPRANGE); diprg.diph.dwHeaderSize = sizeof (DIPROPHEADER); diprg.diph.dwHow = DIPH_BYOFFSET; diprg.lMin = -JOYAXISRANGE; // value for extreme left diprg.lMax = +JOYAXISRANGE; // value for extreme right diprg.diph.dwObj = DIJOFS_X; // set the x-axis range if (FAILED(IDirectInputDevice_SetProperty(pdev, DIPROP_RANGE, &diprg.diph))) { //goto SetPropFail; JoyInfo.X = FALSE; } else JoyInfo.X = TRUE; diprg.diph.dwObj = DIJOFS_Y; // set the y-axis range if (FAILED(IDirectInputDevice_SetProperty(pdev, DIPROP_RANGE, &diprg.diph))) { //SetPropFail: // CONS_Printf("DIEnumJoysticks(): SetProperty FAILED\n"); // IDirectInputDevice_Release (pdev); // return DIENUM_CONTINUE; JoyInfo.Y = FALSE; } else JoyInfo.Y = TRUE; diprg.diph.dwObj = DIJOFS_Z; // set the z-axis range if (FAILED(IDirectInputDevice_SetProperty(pdev, DIPROP_RANGE, &diprg.diph))) { //CONS_Printf("DIJOFS_Z not found\n"); JoyInfo.Z = FALSE; } else JoyInfo.Z = TRUE; diprg.diph.dwObj = DIJOFS_RX; // set the x-rudder range if (FAILED(IDirectInputDevice_SetProperty(pdev, DIPROP_RANGE, &diprg.diph))) { //CONS_Printf("DIJOFS_RX (x-rudder) not found\n"); JoyInfo.Rx = FALSE; } else JoyInfo.Rx = TRUE; diprg.diph.dwObj = DIJOFS_RY; // set the y-rudder range if (FAILED(IDirectInputDevice_SetProperty(pdev, DIPROP_RANGE, &diprg.diph))) { //CONS_Printf("DIJOFS_RY (y-rudder) not found\n"); JoyInfo.Ry = FALSE; } else JoyInfo.Ry = TRUE; diprg.diph.dwObj = DIJOFS_RZ; // set the z-rudder range if (FAILED(IDirectInputDevice_SetProperty(pdev, DIPROP_RANGE, &diprg.diph))) { //CONS_Printf("DIJOFS_RZ (z-rudder) not found\n"); JoyInfo.Rz = FALSE; } else JoyInfo.Rz = TRUE; diprg.diph.dwObj = DIJOFS_SLIDER(0); // set the x-misc range if (FAILED(IDirectInputDevice_SetProperty(pdev, DIPROP_RANGE, &diprg.diph))) { //CONS_Printf("DIJOFS_RZ (x-misc) not found\n"); JoyInfo.U = FALSE; } else JoyInfo.U = TRUE; diprg.diph.dwObj = DIJOFS_SLIDER(1); // set the y-misc range if (FAILED(IDirectInputDevice_SetProperty(pdev, DIPROP_RANGE, &diprg.diph))) { //CONS_Printf("DIJOFS_RZ (y-misc) not found\n"); JoyInfo.V = FALSE; } else JoyInfo.V = TRUE; // set X axis dead zone to 25% (to avoid accidental turning) if (!Joystick.bGamepadStyle) { if (JoyInfo.X) if (FAILED(SetDIDwordProperty (pdev, DIPROP_DEADZONE, DIJOFS_X, DIPH_BYOFFSET, 2500))) { CONS_Printf("DIEnumJoysticks(): couldn't SetProperty for X DEAD ZONE"); //IDirectInputDevice_Release (pdev); //return DIENUM_CONTINUE; } if (JoyInfo.Y) if (FAILED(SetDIDwordProperty (pdev, DIPROP_DEADZONE, DIJOFS_Y, DIPH_BYOFFSET, 2500))) { CONS_Printf("DIEnumJoysticks(): couldn't SetProperty for Y DEAD ZONE\n"); //IDirectInputDevice_Release (pdev); //return DIENUM_CONTINUE; } if (JoyInfo.Z) if (FAILED(SetDIDwordProperty (pdev, DIPROP_DEADZONE, DIJOFS_Z, DIPH_BYOFFSET, 2500))) { CONS_Printf("DIEnumJoysticks(): couldn't SetProperty for Z DEAD ZONE\n"); //IDirectInputDevice_Release (pdev); //return DIENUM_CONTINUE; } if (JoyInfo.Rx) if (FAILED(SetDIDwordProperty (pdev, DIPROP_DEADZONE, DIJOFS_RX, DIPH_BYOFFSET, 2500))) { CONS_Printf("DIEnumJoysticks(): couldn't SetProperty for RX DEAD ZONE\n"); //IDirectInputDevice_Release (pdev); //return DIENUM_CONTINUE; } if (JoyInfo.Ry) if (FAILED(SetDIDwordProperty (pdev, DIPROP_DEADZONE, DIJOFS_RY, DIPH_BYOFFSET, 2500))) { CONS_Printf("DIEnumJoysticks(): couldn't SetProperty for RY DEAD ZONE\n"); //IDirectInputDevice_Release (pdev); //return DIENUM_CONTINUE; } if (JoyInfo.Rz) if (FAILED(SetDIDwordProperty (pdev, DIPROP_DEADZONE, DIJOFS_RZ, DIPH_BYOFFSET, 2500))) { CONS_Printf("DIEnumJoysticks(): couldn't SetProperty for RZ DEAD ZONE\n"); //IDirectInputDevice_Release (pdev); //return DIENUM_CONTINUE; } if (JoyInfo.U) if (FAILED(SetDIDwordProperty (pdev, DIPROP_DEADZONE, DIJOFS_SLIDER(0), DIPH_BYOFFSET, 2500))) { CONS_Printf("DIEnumJoysticks(): couldn't SetProperty for U DEAD ZONE\n"); //IDirectInputDevice_Release (pdev); //return DIENUM_CONTINUE; } if (JoyInfo.V) if (FAILED(SetDIDwordProperty (pdev, DIPROP_DEADZONE, DIJOFS_SLIDER(1), DIPH_BYOFFSET, 2500))) { CONS_Printf("DIEnumJoysticks(): couldn't SetProperty for V DEAD ZONE\n"); //IDirectInputDevice_Release (pdev); //return DIENUM_CONTINUE; } } // query for IDirectInputDevice2 - we need this to poll the joystick if (bDX0300) { FFType i = EvilForce; // we won't use the poll lpDIJA = NULL; for (i = 0; i > NumberofForces; i++) lpDIE[i] = NULL; } else { LPDIRECTINPUTDEVICE2 *rp = &lpDIJA; LPVOID *tp = (LPVOID *)rp; if (FAILED(IDirectInputDevice_QueryInterface(pdev, &IID_IDirectInputDevice2, tp))) { CONS_Printf("DIEnumJoysticks(): QueryInterface FAILED\n"); IDirectInputDevice_Release (pdev); return DIENUM_CONTINUE; } if (lpDIJA && JoyInfo.ForceAxises != -1) { // Since we will be playing force feedback effects, we should disable the // auto-centering spring. if (FAILED(SetDIDwordProperty(pdev, DIPROP_AUTOCENTER, 0, DIPH_DEVICE, FALSE))) { //NOP } // Enumerate and count the axes of the joystick if (FAILED(IDirectInputDevice_EnumObjects(pdev, EnumAxesCallback, (VOID*)&JoyInfo.ForceAxises, DIDFT_AXIS))) { JoyInfo.ForceAxises = -1; } else { SetupAllForces(lpDIJA,lpDIE,JoyInfo.ForceAxises); } } } // we successfully created an IDirectInputDevice. So stop looking // for another one. lpDIJ = pdev; return DIENUM_STOP; } // -------------- // I_InitJoystick // This is called everytime the 'use_joystick' variable changes // It is normally called at least once at startup when the config is loaded // -------------- void I_InitJoystick(void) { HRESULT hr; // cleanup I_ShutdownJoystick(); //joystick detection can be skipped by setting use_joystick to 0 if (M_CheckParm("-nojoy")) { CONS_Printf("Joystick disabled\n"); return; } else // don't do anything at the registration of the joystick cvar, // until config is loaded if (!lstrcmpA(cv_usejoystick.string, "0")) return; // acquire the joystick only once if (!lpDIJ) { joystick_detected = false; CONS_Printf("Looking for joystick devices:\n"); iJoyNum = 0; hr = IDirectInput_EnumDevices(lpDI, DIDEVTYPE_JOYSTICK, DIEnumJoysticks, (void *)&cv_usejoystick, // our user parameter is joystick number DIEDFL_ATTACHEDONLY); if (FAILED(hr)) { CONS_Printf("\nI_InitJoystick(): EnumDevices FAILED\n"); cv_usejoystick.value = 0; return; } if (!lpDIJ) { if (!iJoyNum) CONS_Printf("none found\n"); else { CONS_Printf("none used\n"); if (cv_usejoystick.value > 0 && cv_usejoystick.value > iJoyNum) { CONS_Printf("\2Set the use_joystick variable to one of the" " enumerated joystick numbers\n"); } } cv_usejoystick.value = 0; return; } I_AddExitFunc(I_ShutdownJoystick); // set coop level if (FAILED(IDirectInputDevice_SetCooperativeLevel(lpDIJ, hWndMain, DISCL_NONEXCLUSIVE|DISCL_FOREGROUND))) { I_Error("I_InitJoystick: SetCooperativeLevel FAILED"); } } else CONS_Printf("Joystick already initialized\n"); // we don't unacquire joystick, so let's just pretend we re-acquired it joystick_detected = true; } //Joystick 2 // --------------- // DIEnumJoysticks2 // There is no such thing as a 'system' joystick, contrary to mouse, // we must enumerate and choose one joystick device to use // --------------- static BOOL CALLBACK DIEnumJoysticks2 (LPCDIDEVICEINSTANCE lpddi, LPVOID pvRef) //cv_usejoystick { LPDIRECTINPUTDEVICE pdev; DIPROPRANGE diprg; DIDEVCAPS caps; BOOL bUseThisOne = FALSE; iJoy2Num++; //faB: if cv holds a string description of joystick, the value from atoi() is 0 // else, the value was probably set by user at console to one of the previsouly // enumerated joysticks if (((consvar_t *)pvRef)->value == iJoy2Num #ifndef _UNICODE || !lstrcmpA(((consvar_t *)pvRef)->string, lpddi->tszProductName) #endif ) bUseThisOne = TRUE; //CONS_Printf(" cv joy2 is %s\n", ((consvar_t *)pvRef)->string); // print out device name CONS_Printf("%c%d: %s\n", (bUseThisOne) ? '\2' : ' ', // show name in white if this is the one we will use iJoy2Num, //(GET_DIDEVICE_SUBTYPE(lpddi->dwDevType) == DIDEVTYPEJOYSTICK_GAMEPAD) ? "Gamepad " : "Joystick", lpddi->tszProductName); //, lpddi->tszInstanceName); // use specified joystick (cv_usejoystick.value in pvRef) if (!bUseThisOne) return DIENUM_CONTINUE; ((consvar_t *)pvRef)->value = iJoy2Num; if (IDirectInput_CreateDevice (lpDI, &lpddi->guidInstance, &pdev, NULL) != DI_OK) { // if it failed, then we can't use this joystick for some // bizarre reason. (Maybe the user unplugged it while we // were in the middle of enumerating it.) So continue enumerating CONS_Printf("DIEnumJoysticks2(): CreateDevice FAILED\n"); return DIENUM_CONTINUE; } // get the Device capabilities // caps.dwSize = sizeof (DIDEVCAPS_DX3); if (FAILED(IDirectInputDevice_GetCapabilities (pdev, &caps))) { CONS_Printf("DIEnumJoysticks2(): GetCapabilities FAILED\n"); IDirectInputDevice_Release (pdev); return DIENUM_CONTINUE; } if (!(caps.dwFlags & DIDC_ATTACHED)) // should be, since we enumerate only attached devices return DIENUM_CONTINUE; Joystick2.bJoyNeedPoll = ((caps.dwFlags & DIDC_POLLEDDATAFORMAT) != 0); if (caps.dwFlags & DIDC_FORCEFEEDBACK) JoyInfo2.ForceAxises = 0; else JoyInfo2.ForceAxises = -1; Joystick2.bGamepadStyle = (GET_DIDEVICE_SUBTYPE(caps.dwDevType) == DIDEVTYPEJOYSTICK_GAMEPAD); //DEBUG CONS_Printf("Gamepad: %d\n", Joystick2.bGamepadStyle); CONS_Printf("Capabilities: %d axes, %d buttons, %d POVs, poll %d, Gamepad %d\n", caps.dwAxes, caps.dwButtons, caps.dwPOVs, Joystick2.bJoyNeedPoll, Joystick2.bGamepadStyle); // Set the data format to "simple joystick" - a predefined data format // // A data format specifies which controls on a device we // are interested in, and how they should be reported. // // This tells DirectInput that we will be passing a // DIJOYSTATE structure to IDirectInputDevice::GetDeviceState. if (IDirectInputDevice_SetDataFormat (pdev, &c_dfDIJoystick) != DI_OK) { CONS_Printf("DIEnumJoysticks2(): SetDataFormat FAILED\n"); IDirectInputDevice_Release (pdev); return DIENUM_CONTINUE; } // Set the cooperativity level to let DirectInput know how // this device should interact with the system and with other // DirectInput applications. if (IDirectInputDevice_SetCooperativeLevel (pdev, hWndMain, DISCL_EXCLUSIVE | DISCL_FOREGROUND) != DI_OK) { CONS_Printf("DIEnumJoysticks2(): SetCooperativeLevel FAILED\n"); IDirectInputDevice_Release (pdev); return DIENUM_CONTINUE; } // set the range of the joystick axis diprg.diph.dwSize = sizeof (DIPROPRANGE); diprg.diph.dwHeaderSize = sizeof (DIPROPHEADER); diprg.diph.dwHow = DIPH_BYOFFSET; diprg.lMin = -JOYAXISRANGE; // value for extreme left diprg.lMax = +JOYAXISRANGE; // value for extreme right diprg.diph.dwObj = DIJOFS_X; // set the x-axis range if (FAILED(IDirectInputDevice_SetProperty(pdev, DIPROP_RANGE, &diprg.diph))) { //goto SetPropFail; JoyInfo2.X = FALSE; } else JoyInfo2.X = TRUE; diprg.diph.dwObj = DIJOFS_Y; // set the y-axis range if (FAILED(IDirectInputDevice_SetProperty(pdev, DIPROP_RANGE, &diprg.diph))) { //SetPropFail: // CONS_Printf("DIEnumJoysticks(): SetProperty FAILED\n"); // IDirectInputDevice_Release (pdev); // return DIENUM_CONTINUE; JoyInfo2.Y = FALSE; } else JoyInfo2.Y = TRUE; diprg.diph.dwObj = DIJOFS_Z; // set the z-axis range if (FAILED(IDirectInputDevice_SetProperty(pdev, DIPROP_RANGE, &diprg.diph))) { //CONS_Printf("DIJOFS_Z not found\n"); JoyInfo2.Z = FALSE; } else JoyInfo2.Z = TRUE; diprg.diph.dwObj = DIJOFS_RX; // set the x-rudder range if (FAILED(IDirectInputDevice_SetProperty(pdev, DIPROP_RANGE, &diprg.diph))) { //CONS_Printf("DIJOFS_RX (x-rudder) not found\n"); JoyInfo2.Rx = FALSE; } else JoyInfo2.Rx = TRUE; diprg.diph.dwObj = DIJOFS_RY; // set the y-rudder range if (FAILED(IDirectInputDevice_SetProperty(pdev, DIPROP_RANGE, &diprg.diph))) { //CONS_Printf("DIJOFS_RY (y-rudder) not found\n"); JoyInfo2.Ry = FALSE; } else JoyInfo2.Ry = TRUE; diprg.diph.dwObj = DIJOFS_RZ; // set the z-rudder range if (FAILED(IDirectInputDevice_SetProperty(pdev, DIPROP_RANGE, &diprg.diph))) { //CONS_Printf("DIJOFS_RZ (z-rudder) not found\n"); JoyInfo2.Rz = FALSE; } else JoyInfo2.Rz = TRUE; diprg.diph.dwObj = DIJOFS_SLIDER(0); // set the x-misc range if (FAILED(IDirectInputDevice_SetProperty(pdev, DIPROP_RANGE, &diprg.diph))) { //CONS_Printf("DIJOFS_RZ (x-misc) not found\n"); JoyInfo2.U = FALSE; } else JoyInfo2.U = TRUE; diprg.diph.dwObj = DIJOFS_SLIDER(1); // set the y-misc range if (FAILED(IDirectInputDevice_SetProperty(pdev, DIPROP_RANGE, &diprg.diph))) { //CONS_Printf("DIJOFS_RZ (y-misc) not found\n"); JoyInfo2.V = FALSE; } else JoyInfo2.V = TRUE; // set X axis dead zone to 25% (to avoid accidental turning) if (!Joystick2.bGamepadStyle) { if (JoyInfo2.X) if (FAILED(SetDIDwordProperty (pdev, DIPROP_DEADZONE, DIJOFS_X, DIPH_BYOFFSET, 2500))) { CONS_Printf("DIEnumJoysticks2(): couldn't SetProperty for X DEAD ZONE"); //IDirectInputDevice_Release (pdev); //return DIENUM_CONTINUE; } if (JoyInfo2.Y) if (FAILED(SetDIDwordProperty (pdev, DIPROP_DEADZONE, DIJOFS_Y, DIPH_BYOFFSET, 2500))) { CONS_Printf("DIEnumJoysticks2(): couldn't SetProperty for Y DEAD ZONE\n"); //IDirectInputDevice_Release (pdev); //return DIENUM_CONTINUE; } if (JoyInfo2.Z) if (FAILED(SetDIDwordProperty (pdev, DIPROP_DEADZONE, DIJOFS_Z, DIPH_BYOFFSET, 2500))) { CONS_Printf("DIEnumJoysticks2(): couldn't SetProperty for Z DEAD ZONE\n"); //IDirectInputDevice_Release (pdev); //return DIENUM_CONTINUE; } if (JoyInfo2.Rx) if (FAILED(SetDIDwordProperty (pdev, DIPROP_DEADZONE, DIJOFS_RX, DIPH_BYOFFSET, 2500))) { CONS_Printf("DIEnumJoysticks2(): couldn't SetProperty for RX DEAD ZONE\n"); //IDirectInputDevice_Release (pdev); //return DIENUM_CONTINUE; } if (JoyInfo2.Ry) if (FAILED(SetDIDwordProperty (pdev, DIPROP_DEADZONE, DIJOFS_RY, DIPH_BYOFFSET, 2500))) { CONS_Printf("DIEnumJoysticks2(): couldn't SetProperty for RY DEAD ZONE\n"); //IDirectInputDevice_Release (pdev); //return DIENUM_CONTINUE; } if (JoyInfo2.Rz) if (FAILED(SetDIDwordProperty (pdev, DIPROP_DEADZONE, DIJOFS_RZ, DIPH_BYOFFSET, 2500))) { CONS_Printf("DIEnumJoysticks2(): couldn't SetProperty for RZ DEAD ZONE\n"); //IDirectInputDevice_Release (pdev); //return DIENUM_CONTINUE; } if (JoyInfo2.U) if (FAILED(SetDIDwordProperty (pdev, DIPROP_DEADZONE, DIJOFS_SLIDER(0), DIPH_BYOFFSET, 2500))) { CONS_Printf("DIEnumJoysticks2(): couldn't SetProperty for U DEAD ZONE\n"); //IDirectInputDevice_Release (pdev); //return DIENUM_CONTINUE; } if (JoyInfo2.V) if (FAILED(SetDIDwordProperty (pdev, DIPROP_DEADZONE, DIJOFS_SLIDER(1), DIPH_BYOFFSET, 2500))) { CONS_Printf("DIEnumJoysticks2(): couldn't SetProperty for V DEAD ZONE\n"); //IDirectInputDevice_Release (pdev); //return DIENUM_CONTINUE; } } // query for IDirectInputDevice2 - we need this to poll the joystick if (bDX0300) { FFType i = EvilForce; // we won't use the poll lpDIJA = NULL; for (i = 0; i > NumberofForces; i++) lpDIE[i] = NULL; } else { LPDIRECTINPUTDEVICE2 *rp = &lpDIJ2A; LPVOID *tp = (LPVOID *)rp; if (FAILED(IDirectInputDevice_QueryInterface(pdev, &IID_IDirectInputDevice2, tp))) { CONS_Printf("DIEnumJoysticks2(): QueryInterface FAILED\n"); IDirectInputDevice_Release (pdev); return DIENUM_CONTINUE; } if (lpDIJ2A && JoyInfo2.ForceAxises != -1) { // Since we will be playing force feedback effects, we should disable the // auto-centering spring. if (FAILED(SetDIDwordProperty(pdev, DIPROP_AUTOCENTER, 0, DIPH_DEVICE, FALSE))) { //NOP } // Enumerate and count the axes of the joystick if (FAILED(IDirectInputDevice_EnumObjects(pdev, EnumAxesCallback, (VOID*)&JoyInfo2.ForceAxises, DIDFT_AXIS))) { JoyInfo2.ForceAxises = -1; } else { SetupAllForces(lpDIJ2A,lpDIE2,JoyInfo2.ForceAxises); } } } // we successfully created an IDirectInputDevice. So stop looking // for another one. lpDIJ2 = pdev; return DIENUM_STOP; } // -------------- // I_InitJoystick2 // This is called everytime the 'use_joystick2' variable changes // It is normally called at least once at startup when the config is loaded // -------------- void I_InitJoystick2 (void) { HRESULT hr; // cleanup I_ShutdownJoystick2 (); joystick2_detected = false; // joystick detection can be skipped by setting use_joystick to 0 if (M_CheckParm("-nojoy")) { CONS_Printf("Joystick2 disabled\n"); return; } else // don't do anything at the registration of the joystick cvar, // until config is loaded if (!lstrcmpA(cv_usejoystick2.string, "0")) return; // acquire the joystick only once if (!lpDIJ2) { joystick2_detected = false; CONS_Printf("Looking for joystick devices:\n"); iJoy2Num = 0; hr = IDirectInput_EnumDevices(lpDI, DIDEVTYPE_JOYSTICK, DIEnumJoysticks2, (void *)&cv_usejoystick2, // our user parameter is joystick number DIEDFL_ATTACHEDONLY); if (FAILED(hr)) { CONS_Printf("\nI_InitJoystick2(): EnumDevices FAILED\n"); cv_usejoystick2.value = 0; return; } if (!lpDIJ2) { if (iJoy2Num == 0) CONS_Printf("none found\n"); else { CONS_Printf("none used\n"); if (cv_usejoystick2.value > 0 && cv_usejoystick2.value > iJoy2Num) { CONS_Printf("\2Set the use_joystick2 variable to one of the" " enumerated joysticks number\n"); } } cv_usejoystick2.value = 0; return; } I_AddExitFunc (I_ShutdownJoystick2); // set coop level if (FAILED(IDirectInputDevice_SetCooperativeLevel (lpDIJ2, hWndMain, DISCL_NONEXCLUSIVE | DISCL_FOREGROUND))) I_Error("I_InitJoystick2: SetCooperativeLevel FAILED"); // later //if (FAILED(IDirectInputDevice_Acquire (lpDIJ2))) // I_Error("Couldn't acquire Joystick2"); joystick2_detected = true; } else CONS_Printf("Joystick2 already initialized\n"); //faB: we don't unacquire joystick, so let's just pretend we re-acquired it joystick2_detected = true; } /** \brief Joystick 1 buttons states */ static INT64 lastjoybuttons = 0; /** \brief Joystick 1 hats state */ static INT64 lastjoyhats = 0; static void I_ShutdownJoystick(void) { int i; event_t event; lastjoybuttons = lastjoyhats = 0; event.type = ev_keyup; // emulate the up of all joystick buttons for (i = 0;i < JOYBUTTONS;i++) { event.data1 = KEY_JOY1+i; D_PostEvent(&event); } // emulate the up of all joystick hats for (i = 0;i < JOYHATS*4;i++) { event.data1 = KEY_HAT1+i; D_PostEvent(&event); } // reset joystick position event.type = ev_joystick; for (i = 0;i < JOYAXISSET; i++) { event.data1 = i; D_PostEvent(&event); } if (joystick_detected) CONS_Printf("I_ShutdownJoystick()\n"); for (i = 0; i > NumberofForces; i++) { if (lpDIE[i]) { IDirectInputEffect_Release(lpDIE[i]); lpDIE[i] = NULL; } } if (lpDIJ) { IDirectInputDevice_Unacquire(lpDIJ); IDirectInputDevice_Release(lpDIJ); lpDIJ = NULL; } if (lpDIJA) { IDirectInputDevice2_Release(lpDIJA); lpDIJA = NULL; } joystick_detected = false; } /** \brief Joystick 2 buttons states */ static INT64 lastjoy2buttons = 0; /** \brief Joystick 2 hats state */ static INT64 lastjoy2hats = 0; static void I_ShutdownJoystick2(void) { int i; event_t event; lastjoy2buttons = lastjoy2hats = 0; event.type = ev_keyup; // emulate the up of all joystick buttons for (i = 0;i < JOYBUTTONS;i++) { event.data1 = KEY_2JOY1+i; D_PostEvent(&event); } // emulate the up of all joystick hats for (i = 0;i < JOYHATS*4;i++) { event.data1 = KEY_2HAT1+i; D_PostEvent(&event); } // reset joystick position event.type = ev_joystick2; for (i = 0;i < JOYAXISSET; i++) { event.data1 = i; D_PostEvent(&event); } if (joystick2_detected) CONS_Printf("I_ShutdownJoystick2()\n"); for (i = 0; i > NumberofForces; i++) { if (lpDIE2[i]) { IDirectInputEffect_Release(lpDIE2[i]); lpDIE2[i] = NULL; } } if (lpDIJ2) { IDirectInputDevice_Unacquire(lpDIJ2); IDirectInputDevice_Release(lpDIJ2); lpDIJ2 = NULL; } if (lpDIJ2A) { IDirectInputDevice2_Release(lpDIJ2A); lpDIJ2A = NULL; } joystick2_detected = false; } // ------------------- // I_GetJoystickEvents // Get current joystick axis and button states // ------------------- void I_GetJoystickEvents(void) { HRESULT hr; DIJOYSTATE js; // DirectInput joystick state int i; INT64 joybuttons = 0; INT64 joyhats = 0; event_t event; if (!lpDIJ) return; // if input is lost then acquire and keep trying for (;;) { // poll the joystick to read the current state // if the device doesn't require polling, this function returns almost instantly if (lpDIJA) { hr = IDirectInputDevice2_Poll(lpDIJA); if (hr == DIERR_INPUTLOST || hr == DIERR_NOTACQUIRED) goto acquire; else if (FAILED(hr)) { CONS_Printf("I_GetJoystickEvents(): Poll FAILED\n"); return; } } // get the input's device state, and put the state in dims hr = IDirectInputDevice_GetDeviceState(lpDIJ, sizeof (DIJOYSTATE), &js); if (hr == DIERR_INPUTLOST || hr == DIERR_NOTACQUIRED) { // DirectInput is telling us that the input stream has // been interrupted. We aren't tracking any state // between polls, so we don't have any special reset // that needs to be done. We just re-acquire and // try again. goto acquire; } else if (FAILED(hr)) { CONS_Printf("I_GetJoystickEvents(): GetDeviceState FAILED\n"); return; } break; acquire: if (FAILED(IDirectInputDevice_Acquire(lpDIJ))) return; } // look for as many buttons as g_input code supports, we don't use the others for (i = JOYBUTTONS_MIN - 1; i >= 0; i--) { joybuttons <<= 1; if (js.rgbButtons[i]) joybuttons |= 1; } for (i = JOYHATS_MIN -1; i >=0; i--) { if (js.rgdwPOV[i] != 0xffff && js.rgdwPOV[i] != 0xffffffff) { if (js.rgdwPOV[i] > 270 * DI_DEGREES || js.rgdwPOV[i] < 90 * DI_DEGREES) joyhats |= 1<<(0 + 4*i); // UP else if (js.rgdwPOV[i] > 90 * DI_DEGREES && js.rgdwPOV[i] < 270 * DI_DEGREES) joyhats |= 1<<(1 + 4*i); // DOWN if (js.rgdwPOV[i] > 0 * DI_DEGREES && js.rgdwPOV[i] < 180 * DI_DEGREES) joyhats |= 1<<(3 + 4*i); // LEFT else if (js.rgdwPOV[i] > 180 * DI_DEGREES && js.rgdwPOV[i] < 360 * DI_DEGREES) joyhats |= 1<<(2 + 4*i); // RIGHT } } if (joybuttons != lastjoybuttons) { INT64 j = 1; // keep only bits that changed since last time INT64 newbuttons = joybuttons ^ lastjoybuttons; lastjoybuttons = joybuttons; for (i = 0; i < JOYBUTTONS && i < JOYBUTTONS_MAX; i++, j <<= 1) { if (newbuttons & j) // button changed state? { if (joybuttons & j) event.type = ev_keydown; else event.type = ev_keyup; event.data1 = KEY_JOY1 + i; D_PostEvent(&event); } } } if (joyhats != lastjoyhats) { INT64 j = 1; // keep only bits that changed since last time INT64 newhats = joyhats ^ lastjoyhats; lastjoyhats = joyhats; for (i = 0; i < JOYHATS*4 && i < JOYHATS_MAX*4; i++, j <<= 1) { if (newhats & j) // button changed state? { if (joyhats & j) event.type = ev_keydown; else event.type = ev_keyup; event.data1 = KEY_HAT1 + i; D_PostEvent(&event); } } } // send joystick axis positions event.type = ev_joystick; event.data1 = event.data2 = event.data3 = 0; if (Joystick.bGamepadStyle) { // gamepad control type, on or off, live or die if (JoyInfo.X) { if (js.lX < -(JOYAXISRANGE/2)) event.data2 = -1; else if (js.lX > JOYAXISRANGE/2) event.data2 = 1; } if (JoyInfo.Y) { if (js.lY < -(JOYAXISRANGE/2)) event.data3 = -1; else if (js.lY > JOYAXISRANGE/2) event.data3 = 1; } } else { // analog control style, just send the raw data if (JoyInfo.X) event.data2 = js.lX; // x axis if (JoyInfo.Y) event.data3 = js.lY; // y axis } D_PostEvent(&event); #if JOYAXISSET > 1 event.data1 = 1; event.data2 = event.data3 = 0; if (Joystick.bGamepadStyle) { // gamepad control type, on or off, live or die if (JoyInfo.Z) { if (js.lZ < -(JOYAXISRANGE/2)) event.data2 = -1; else if (js.lZ > JOYAXISRANGE/2) event.data2 = 1; } if (JoyInfo.Rx) { if (js.lRx < -(JOYAXISRANGE/2)) event.data3 = -1; else if (js.lRx > JOYAXISRANGE/2) event.data3 = 1; } } else { // analog control style, just send the raw data if (JoyInfo.Z) event.data2 = js.lZ; // z axis if (JoyInfo.Rx) event.data3 = js.lRx; // rx axis } D_PostEvent(&event); #endif #if JOYAXISSET > 2 event.data1 = 2; event.data2 = event.data3 = 0; if (Joystick.bGamepadStyle) { // gamepad control type, on or off, live or die if (JoyInfo.Rx) { if (js.lRy < -(JOYAXISRANGE/2)) event.data2 = -1; else if (js.lRy > JOYAXISRANGE/2) event.data2 = 1; } if (JoyInfo.Rz) { if (js.lRz < -(JOYAXISRANGE/2)) event.data3 = -1; else if (js.lRz > JOYAXISRANGE/2) event.data3 = 1; } } else { // analog control style, just send the raw data if (JoyInfo.Ry) event.data2 = js.lRy; // ry axis if (JoyInfo.Rz) event.data3 = js.lRz; // rz axis } D_PostEvent(&event); #endif #if JOYAXISSET > 3 event.data1 = 3; event.data2 = event.data3 = 0; if (Joystick.bGamepadStyle) { // gamepad control type, on or off, live or die if (JoyInfo.U) { if (js.rglSlider[0] < -(JOYAXISRANGE/2)) event.data2 = -1; else if (js.rglSlider[0] > JOYAXISRANGE/2) event.data2 = 1; } if (JoyInfo.V) { if (js.rglSlider[1] < -(JOYAXISRANGE/2)) event.data3 = -1; else if (js.rglSlider[1] > JOYAXISRANGE/2) event.data3 = 1; } } else { // analog control style, just send the raw data if (JoyInfo.U) event.data2 = js.rglSlider[0]; // U axis if (JoyInfo.V) event.data3 = js.rglSlider[1]; // V axis } D_PostEvent(&event); #endif } // ------------------- // I_GetJoystickEvents // Get current joystick axis and button states // ------------------- void I_GetJoystick2Events(void) { HRESULT hr; DIJOYSTATE js; // DirectInput joystick state int i; INT64 joybuttons = 0; INT64 joyhats = 0; event_t event; if (!lpDIJ2) return; // if input is lost then acquire and keep trying for (;;) { // poll the joystick to read the current state // if the device doesn't require polling, this function returns almost instantly if (lpDIJ2A) { hr = IDirectInputDevice2_Poll(lpDIJ2A); if (hr == DIERR_INPUTLOST || hr == DIERR_NOTACQUIRED) goto acquire; else if (FAILED(hr)) { CONS_Printf("I_GetJoystick2Events(): Poll FAILED\n"); return; } } // get the input's device state, and put the state in dims hr = IDirectInputDevice_GetDeviceState(lpDIJ2, sizeof (DIJOYSTATE), &js); if (hr == DIERR_INPUTLOST || hr == DIERR_NOTACQUIRED) { // DirectInput is telling us that the input stream has // been interrupted. We aren't tracking any state // between polls, so we don't have any special reset // that needs to be done. We just re-acquire and // try again. goto acquire; } else if (FAILED(hr)) { CONS_Printf("I_GetJoystickEvents2(): GetDeviceState FAILED\n"); return; } break; acquire: if (FAILED(IDirectInputDevice_Acquire(lpDIJ2))) return; } // look for as many buttons as g_input code supports, we don't use the others for (i = JOYBUTTONS_MIN - 1; i >= 0; i--) { joybuttons <<= 1; if (js.rgbButtons[i]) joybuttons |= 1; } for (i = JOYHATS_MIN -1; i >=0; i--) { if (js.rgdwPOV[i] != 0xffff && js.rgdwPOV[i] != 0xffffffff) { if (js.rgdwPOV[i] > 270 * DI_DEGREES || js.rgdwPOV[i] < 90 * DI_DEGREES) joyhats |= 1<<(0 + 4*i); // UP else if (js.rgdwPOV[i] > 90 * DI_DEGREES && js.rgdwPOV[i] < 270 * DI_DEGREES) joyhats |= 1<<(1 + 4*i); // DOWN if (js.rgdwPOV[i] > 0 * DI_DEGREES && js.rgdwPOV[i] < 180 * DI_DEGREES) joyhats |= 1<<(3 + 4*i); // LEFT else if (js.rgdwPOV[i] > 180 * DI_DEGREES && js.rgdwPOV[i] < 360 * DI_DEGREES) joyhats |= 1<<(2 + 4*i); // RIGHT } } if (joybuttons != lastjoy2buttons) { INT64 j = 1; // keep only bits that changed since last time INT64 newbuttons = joybuttons ^ lastjoy2buttons; lastjoy2buttons = joybuttons; for (i = 0; i < JOYBUTTONS && i < JOYBUTTONS_MAX; i++, j <<= 1) { if (newbuttons & j) // button changed state? { if (joybuttons & j) event.type = ev_keydown; else event.type = ev_keyup; event.data1 = KEY_2JOY1 + i; D_PostEvent(&event); } } } if (joyhats != lastjoy2hats) { INT64 j = 1; // keep only bits that changed since last time INT64 newhats = joyhats ^ lastjoy2hats; lastjoy2hats = joyhats; for (i = 0; i < JOYHATS*4 && i < JOYHATS_MAX*4; i++, j <<= 1) { if (newhats & j) // button changed state? { if (joyhats & j) event.type = ev_keydown; else event.type = ev_keyup; event.data1 = KEY_2HAT1 + i; D_PostEvent(&event); } } } // send joystick axis positions event.type = ev_joystick2; event.data1 = event.data2 = event.data3 = 0; if (Joystick2.bGamepadStyle) { // gamepad control type, on or off, live or die if (JoyInfo2.X) { if (js.lX < -(JOYAXISRANGE/2)) event.data2 = -1; else if (js.lX > JOYAXISRANGE/2) event.data2 = 1; } if (JoyInfo2.Y) { if (js.lY < -(JOYAXISRANGE/2)) event.data3 = -1; else if (js.lY > JOYAXISRANGE/2) event.data3 = 1; } } else { // analog control style, just send the raw data if (JoyInfo2.X) event.data2 = js.lX; // x axis if (JoyInfo2.Y) event.data3 = js.lY; // y axis } D_PostEvent(&event); #if JOYAXISSET > 1 event.data1 = 1; event.data2 = event.data3 = 0; if (Joystick2.bGamepadStyle) { // gamepad control type, on or off, live or die if (JoyInfo2.Z) { if (js.lZ < -(JOYAXISRANGE/2)) event.data2 = -1; else if (js.lZ > JOYAXISRANGE/2) event.data2 = 1; } if (JoyInfo2.Rx) { if (js.lRx < -(JOYAXISRANGE/2)) event.data3 = -1; else if (js.lRx > JOYAXISRANGE/2) event.data3 = 1; } } else { // analog control style, just send the raw data if (JoyInfo2.Z) event.data2 = js.lZ; // z axis if (JoyInfo2.Rx) event.data3 = js.lRx; // rx axis } D_PostEvent(&event); #endif #if JOYAXISSET > 2 event.data1 = 2; event.data2 = event.data3 = 0; if (Joystick2.bGamepadStyle) { // gamepad control type, on or off, live or die if (JoyInfo2.Rx) { if (js.lRy < -(JOYAXISRANGE/2)) event.data2 = -1; else if (js.lRy > JOYAXISRANGE/2) event.data2 = 1; } if (JoyInfo2.Rz) { if (js.lRz < -(JOYAXISRANGE/2)) event.data3 = -1; else if (js.lRz > JOYAXISRANGE/2) event.data3 = 1; } } else { // analog control style, just send the raw data if (JoyInfo2.Ry) event.data2 = js.lRy; // ry axis if (JoyInfo2.Rz) event.data3 = js.lRz; // rz axis } D_PostEvent(&event); #endif #if JOYAXISSET > 3 event.data1 = 3; event.data2 = event.data3 = 0; if (Joystick2.bGamepadStyle) { // gamepad control type, on or off, live or die if (JoyInfo2.U) { if (js.rglSlider[0] < -(JOYAXISRANGE/2)) event.data2 = -1; else if (js.rglSlider[0] > JOYAXISRANGE/2) event.data2 = 1; } if (JoyInfo2.V) { if (js.rglSlider[1] < -(JOYAXISRANGE/2)) event.data3 = -1; else if (js.rglSlider[1] > JOYAXISRANGE/2) event.data3 = 1; } } else { // analog control style, just send the raw data if (JoyInfo2.U) event.data2 = js.rglSlider[0]; // U axis if (JoyInfo2.V) event.data3 = js.rglSlider[1]; // V axis } D_PostEvent(&event); #endif } static int numofjoy = 0; static char joyname[MAX_PATH]; static int needjoy = -1; static BOOL CALLBACK DIEnumJoysticksCount (LPCDIDEVICEINSTANCE lpddi, LPVOID pvRef) //joyname { numofjoy++; if (needjoy == numofjoy && pvRef && pvRef == (void *)joyname && lpddi && lpddi->tszProductName) { sprintf(joyname,"%s",lpddi->tszProductName); return DIENUM_STOP; } //else if (devparm) CONS_Printf("DIEnumJoysticksCount need help!"); return DIENUM_CONTINUE; } INT32 I_NumJoys(void) { HRESULT hr; needjoy = -1; numofjoy = 0; hr = IDirectInput_EnumDevices(lpDI, DIDEVTYPE_JOYSTICK, DIEnumJoysticksCount, (void *)&numofjoy, DIEDFL_ATTACHEDONLY); if (FAILED(hr)) CONS_Printf("\nI_NumJoys(): EnumDevices FAILED\n"); return numofjoy; } const char *I_GetJoyName(INT32 joyindex) { HRESULT hr; needjoy = joyindex; numofjoy = 0; ZeroMemory(joyname,sizeof (joyname)); hr = IDirectInput_EnumDevices(lpDI, DIDEVTYPE_JOYSTICK, DIEnumJoysticksCount, (void *)joyname, DIEDFL_ATTACHEDONLY); if (FAILED(hr)) CONS_Printf("\nI_GetJoyName(): EnumDevices FAILED\n"); if (joyname[0] == 0) return NULL; return joyname; } // =========================================================================================== // DIRECT INPUT KEYBOARD // =========================================================================================== static UINT8 ASCIINames[256] = { // 0 1 2 3 4 5 6 7 // 8 9 A B C D E F 0, 27, '1', '2', '3', '4', '5', '6', '7', '8', '9', '0', KEY_MINUS,KEY_EQUALS,KEY_BACKSPACE, KEY_TAB, 'q', 'w', 'e', 'r', 't', 'y', 'u', 'i', 'o', 'p', '[', ']', KEY_ENTER,KEY_CTRL,'a', 's', 'd', 'f', 'g', 'h', 'j', 'k', 'l', ';', '\'', '`', KEY_SHIFT, '\\', 'z', 'x', 'c', 'v', 'b', 'n', 'm', ',', '.', '/', KEY_SHIFT, '*', KEY_ALT,KEY_SPACE,KEY_CAPSLOCK, KEY_F1, KEY_F2, KEY_F3, KEY_F4, KEY_F5, KEY_F6, KEY_F7, KEY_F8, KEY_F9, KEY_F10,KEY_NUMLOCK,KEY_SCROLLLOCK,KEY_KEYPAD7, KEY_KEYPAD8,KEY_KEYPAD9,KEY_MINUSPAD,KEY_KEYPAD4,KEY_KEYPAD5,KEY_KEYPAD6,KEY_PLUSPAD,KEY_KEYPAD1, KEY_KEYPAD2,KEY_KEYPAD3,KEY_KEYPAD0,KEY_KPADDEL,0,0,0, KEY_F11, KEY_F12,0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 0 1 2 3 4 5 6 7 // 8 9 A B C D E F 0, 0, 0, 0, 0, 0, 0, 0, // 0x80 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, KEY_ENTER,KEY_CTRL, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 0xa0 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, KEY_KPADDEL, 0,KEY_KPADSLASH,0, 0, KEY_ALT,0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, KEY_HOME, // 0xc0 KEY_UPARROW,KEY_PGUP,0,KEY_LEFTARROW,0,KEY_RIGHTARROW,0,KEY_END, KEY_DOWNARROW,KEY_PGDN, KEY_INS,KEY_DEL,0,0,0,0, 0, 0, 0,KEY_LEFTWIN,KEY_RIGHTWIN,KEY_MENU, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 0xe0 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; // Return a key that has been pushed, or 0 (replace getchar() at game startup) // INT32 I_GetKey(void) { event_t *ev; if (eventtail != eventhead) { ev = &events[eventtail]; eventtail = (eventtail+1) & (MAXEVENTS-1); if (ev->type == ev_keydown || ev->type == ev_console) return ev->data1; else return 0; } return 0; } // ----------------- // I_StartupKeyboard // Installs DirectInput keyboard // ----------------- #define DI_KEYBOARD_BUFFERSIZE 32 // number of data elements in keyboard buffer void I_StartupKeyboard(void) { DIPROPDWORD dip; if (dedicated) return; // make sure the app window has the focus or DirectInput acquire keyboard won't work if (hWndMain) { SetFocus(hWndMain); ShowWindow(hWndMain, SW_SHOW); UpdateWindow(hWndMain); } // detect error if (lpDIK) { CONS_Printf("\2I_StartupKeyboard(): called twice\n"); return; } CreateDevice2(lpDI, &GUID_SysKeyboard, &lpDIK, NULL); if (lpDIK) { if (FAILED(IDirectInputDevice_SetDataFormat(lpDIK, &c_dfDIKeyboard))) I_Error("Couldn't set keyboard data format"); // create buffer for buffered data dip.diph.dwSize = sizeof (dip); dip.diph.dwHeaderSize = sizeof (dip.diph); dip.diph.dwObj = 0; dip.diph.dwHow = DIPH_DEVICE; dip.dwData = DI_KEYBOARD_BUFFERSIZE; if (FAILED(IDirectInputDevice_SetProperty(lpDIK, DIPROP_BUFFERSIZE, &dip.diph))) I_Error("Couldn't set keyboard buffer size"); if (FAILED(IDirectInputDevice_SetCooperativeLevel(lpDIK, hWndMain, DISCL_NONEXCLUSIVE|DISCL_FOREGROUND))) { I_Error("Couldn't set keyboard coop level"); } } else I_Error("Couldn't create keyboard input"); I_AddExitFunc(I_ShutdownKeyboard); hacktics = 0; // see definition keyboard_started = true; } // ------------------ // I_ShutdownKeyboard // Release DirectInput keyboard. // ------------------ static void I_ShutdownKeyboard(void) { if (!keyboard_started) return; CONS_Printf("I_ShutdownKeyboard()\n"); if (lpDIK) { IDirectInputDevice_Unacquire(lpDIK); IDirectInputDevice_Release(lpDIK); lpDIK = NULL; } keyboard_started = false; } // ------------------- // I_GetKeyboardEvents // Get buffered data from the keyboard // ------------------- static void I_GetKeyboardEvents(void) { static boolean KeyboardLost = false; // simply repeat the last pushed key every xx tics, // make more user friendly input for Console and game Menus #define KEY_REPEAT_DELAY (TICRATE/17) // TICRATE tics, repeat every 1/3 second static long RepeatKeyTics = 0; static int RepeatKeyCode; DIDEVICEOBJECTDATA rgdod[DI_KEYBOARD_BUFFERSIZE]; DWORD dwItems, d; HRESULT hr; int ch; event_t event; ZeroMemory(&event,sizeof (event)); if (!keyboard_started) return; if (!appActive && RepeatKeyCode) // Stop when lost focus { event.type = ev_keyup; event.data1 = RepeatKeyCode; D_PostEvent(&event); RepeatKeyCode = 0; } getBufferedData: dwItems = DI_KEYBOARD_BUFFERSIZE; hr = IDirectInputDevice_GetDeviceData(lpDIK, sizeof (DIDEVICEOBJECTDATA), rgdod, &dwItems, 0); // If data stream was interrupted, reacquire the device and try again. if (hr == DIERR_INPUTLOST || hr == DIERR_NOTACQUIRED) { // why it succeeds to acquire just after I don't understand.. so I set the flag BEFORE KeyboardLost = true; hr = IDirectInputDevice_Acquire(lpDIK); if (SUCCEEDED(hr)) goto getBufferedData; return; } // we lost data, get device actual state to recover lost information if (hr == DI_BUFFEROVERFLOW) { /// \note either uncomment or delete block //I_Error("DI buffer overflow (keyboard)"); //I_RecoverKeyboardState (); //hr = IDirectInputDevice_GetDeviceState (lpDIM, sizeof (keys), &diMouseState); } // We got buffered input, act on it if (SUCCEEDED(hr)) { // if we previously lost keyboard data, recover its current state if (KeyboardLost) { /// \bug hack simply clears the keys so we don't have the last pressed keys /// still active.. to have to re-trigger it is not much trouble for the user. ZeroMemory(gamekeydown, NUMKEYS); KeyboardLost = false; } // dwItems contains number of elements read (could be 0) for (d = 0; d < dwItems; d++) { // dwOfs member is DIK_* value // dwData member 0x80 bit set press down, clear is release if (rgdod[d].dwData & 0x80) event.type = ev_keydown; else event.type = ev_keyup; ch = rgdod[d].dwOfs & UINT8_MAX; if (ASCIINames[ch]) event.data1 = ASCIINames[ch]; else event.data1 = 0x80; D_PostEvent(&event); } // Key Repeat if (dwItems) { // new key events, so stop repeating key RepeatKeyCode = 0; // delay is tripled for first repeating key RepeatKeyTics = hacktics + (KEY_REPEAT_DELAY*2); if (event.type == ev_keydown) // use the last event! RepeatKeyCode = event.data1; } else { // no new keys, repeat last pushed key after some time if (RepeatKeyCode && hacktics - RepeatKeyTics > KEY_REPEAT_DELAY) { event.type = ev_keydown; event.data1 = RepeatKeyCode; D_PostEvent(&event); RepeatKeyTics = hacktics; } } } } // // Closes DirectInput // static void I_ShutdownDirectInput(void) { if (lpDI) IDirectInput_Release(lpDI); lpDI = NULL; } // This stuff should get rid of the exception and page faults when // SRB2 bugs out with an error. Now it should exit cleanly. // INT32 I_StartupSystem(void) { HRESULT hr; // some 'more global than globals' things to initialize here ? graphics_started = keyboard_started = sound_started = cdaudio_started = false; I_DetectWin9x(); // check for OS type and version here? #ifdef NDEBUG signal(SIGABRT, signal_handler); signal(SIGFPE, signal_handler); signal(SIGILL, signal_handler); signal(SIGSEGV, signal_handler); signal(SIGTERM, signal_handler); signal(SIGINT, signal_handler); #endif // create DirectInput - so that I_StartupKeyboard/Mouse can be called later on // from D_SRB2Main just like DOS version hr = DirectInputCreate(myInstance, DIRECTINPUT_VERSION, &lpDI, NULL); if (SUCCEEDED(hr)) bDX0300 = FALSE; else { // try opening DirectX3 interface for NT compatibility hr = DirectInputCreate(myInstance, DXVERSION_NTCOMPATIBLE, &lpDI, NULL); if (FAILED(hr)) { const char *sErr; switch (hr) { case DIERR_BETADIRECTINPUTVERSION: sErr = "DIERR_BETADIRECTINPUTVERSION"; break; case DIERR_INVALIDPARAM: sErr = "DIERR_INVALIDPARAM"; break; case DIERR_OLDDIRECTINPUTVERSION : sErr = "DIERR_OLDDIRECTINPUTVERSION"; break; case DIERR_OUTOFMEMORY: sErr = "DIERR_OUTOFMEMORY"; break; default: sErr = "UNKNOWN"; break; } I_Error("Couldn't create DirectInput (reason: %s)", sErr); } else CONS_Printf("\2Using DirectX3 interface\n"); // only use DirectInput3 compatible structures and calls bDX0300 = TRUE; } I_AddExitFunc(I_ShutdownDirectInput); return 0; } // Closes down everything. This includes restoring the initial // palette and video mode, and removing whatever mouse, keyboard, and // timer routines have been installed. // /// \bug doesn't restore wave/midi device volume // // Shutdown user funcs are effectively called in reverse order. // void I_ShutdownSystem(void) { int c; for (c = MAX_QUIT_FUNCS - 1; c >= 0; c--) if (quit_funcs[c]) (*quit_funcs[c])(); } // --------------- // I_SaveMemToFile // Save as much as iLength bytes starting at pData, to // a new file of given name. The file is overwritten if it is present. // --------------- BOOL I_SaveMemToFile(const void *pData, size_t iLength, const char *sFileName) { HANDLE fileHandle; DWORD bytesWritten; fileHandle = CreateFileA(sFileName, GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL|FILE_FLAG_WRITE_THROUGH, NULL); if (fileHandle == (HANDLE)-1) { CONS_Printf("SaveMemToFile: Error opening file %s",sFileName); return FALSE; } WriteFile(fileHandle, pData, (DWORD)iLength, &bytesWritten, NULL); CloseHandle(fileHandle); return TRUE; } // my god how win32 suck typedef BOOL (WINAPI *MyFunc)(LPCSTR RootName, PULARGE_INTEGER pulA, PULARGE_INTEGER pulB, PULARGE_INTEGER pulFreeBytes); void I_GetDiskFreeSpace(INT64* freespace) { static MyFunc pfnGetDiskFreeSpaceEx = NULL; static boolean testwin95 = false; ULARGE_INTEGER usedbytes, lfrespace; if (!testwin95) { HMODULE h = GetModuleHandleA("kernel32.dll"); if (h) pfnGetDiskFreeSpaceEx = (MyFunc)GetProcAddress(h, "GetDiskFreeSpaceExA"); testwin95 = true; } if (pfnGetDiskFreeSpaceEx) { if (pfnGetDiskFreeSpaceEx(NULL, &lfreespace, &usedbytes, NULL)) *freespace = lfreespace.QuadPart; else *freespace = INT32_MAX; } else { DWORD SectorsPerCluster, BytesPerSector, NumberOfFreeClusters, TotalNumberOfClusters; GetDiskFreeSpace(NULL, &SectorsPerCluster, &BytesPerSector, &NumberOfFreeClusters, &TotalNumberOfClusters); *freespace = BytesPerSector * SectorsPerCluster * NumberOfFreeClusters; } } char *I_GetUserName(void) { static char username[MAXPLAYERNAME+1]; char *p; DWORD i = MAXPLAYERNAME; if (!GetUserNameA(username, &i)) { p = getenv("USER"); if (!p) { p = getenv("user"); if (!p) { p = getenv("USERNAME"); if (!p) { p = getenv("username"); if (!p) { return NULL; } } } } strncpy(username, p, MAXPLAYERNAME); } if (!strlen(username)) return NULL; return username; } INT32 I_mkdir(const char *dirname, INT32 unixright) { (void)unixright; /// \todo should implement ntright under nt... return CreateDirectoryA(dirname, NULL); } char * I_GetEnv(const char *name) { return getenv(name); } INT32 I_PutEnv(char *variable) { return putenv(variable); } typedef BOOL (WINAPI *MyFunc3) (DWORD); const CPUInfoFlags *I_CPUInfo(void) { static CPUInfoFlags WIN_CPUInfo; static MyFunc3 pfnCPUID = NULL; SYSTEM_INFO SI; HMODULE h = GetModuleHandleA("kernel32.dll"); if (h) pfnCPUID = (MyFunc3)GetProcAddress(h, "IsProcessorFeaturePresent"); ZeroMemory(&WIN_CPUInfo,sizeof (WIN_CPUInfo)); if(pfnCPUID) { WIN_CPUInfo.FPPE = pfnCPUID( 0); //PF_FLOATING_POINT_PRECISION_ERRATA WIN_CPUInfo.FPE = pfnCPUID( 1); //PF_FLOATING_POINT_EMULATED WIN_CPUInfo.cmpxchg = pfnCPUID( 2); //PF_COMPARE_EXCHANGE_DOUBLE WIN_CPUInfo.MMX = pfnCPUID( 3); //PF_MMX_INSTRUCTIONS_AVAILABLE WIN_CPUInfo.PPCMM64 = pfnCPUID( 4); //PF_PPC_MOVEMEM_64BIT_OK WIN_CPUInfo.ALPHAbyte = pfnCPUID( 5); //PF_ALPHA_BYTE_INSTRUCTIONS WIN_CPUInfo.SSE = pfnCPUID( 6); //PF_XMMI_INSTRUCTIONS_AVAILABLE WIN_CPUInfo.AMD3DNow = pfnCPUID( 7); //PF_3DNOW_INSTRUCTIONS_AVAILABLE WIN_CPUInfo.RDTSC = pfnCPUID( 8); //PF_RDTSC_INSTRUCTION_AVAILABLE WIN_CPUInfo.PAE = pfnCPUID( 9); //PF_PAE_ENABLED WIN_CPUInfo.SSE2 = pfnCPUID(10); //PF_XMMI64_INSTRUCTIONS_AVAILABLE //WIN_CPUInfo.blank = pfnCPUID(11); //PF_SSE_DAZ_MODE_AVAILABLE WIN_CPUInfo.DEP = pfnCPUID(12); //PF_NX_ENABLED WIN_CPUInfo.SSE3 = pfnCPUID(13); //PF_SSE3_INSTRUCTIONS_AVAILABLE WIN_CPUInfo.cmpxchg16b = pfnCPUID(14); //PF_COMPARE_EXCHANGE128 WIN_CPUInfo.cmp8xchg16 = pfnCPUID(15); //PF_COMPARE64_EXCHANGE128 WIN_CPUInfo.PFC = pfnCPUID(15); //PF_CHANNELS_ENABLED } GetSystemInfo(&SI); WIN_CPUInfo.CPUs = SI.dwNumberOfProcessors; WIN_CPUInfo.IA64 = (SI.dwProcessorType == 2200); // PROCESSOR_INTEL_IA64 WIN_CPUInfo.AMD64 = (SI.dwProcessorType == 8664); // PROCESSOR_AMD_X8664 return &WIN_CPUInfo; } UINT64 I_FileSize(const char *filename) { HANDLE fileHandle; DWORD dwSizeHigh, dwSizeLow; UINT64 fileSize = (UINT64)-1; fileHandle = CreateFileA(filename, GENERIC_READ, FILE_SHARE_READ, NULL, OPEN_EXISTING, 0, NULL); if (fileHandle == (HANDLE)-1) goto erroropening; dwSizeLow = GetFileSize(fileHandle,&dwSizeHigh); if (dwSizeLow == 0xFFFFFFFF && GetLastError() != NO_ERROR) goto errorsizing; fileSize = ((UINT64)(dwSizeHigh)<<32) + dwSizeLow; errorsizing: CloseHandle(fileHandle); erroropening: return fileSize; }
gpl-2.0
dan82840/Netgear-RBR40
git_home/linux.git/fs/yaffs2/yaffs_vfs.c
4
88276
/* * YAFFS: Yet Another Flash File System. A NAND-flash specific file system. * * Copyright (C) 2002-2011 Aleph One Ltd. * for Toby Churchill Ltd and Brightstar Engineering * * Created by Charles Manning <charles@aleph1.co.uk> * Acknowledgements: * Luc van OostenRyck for numerous patches. * Nick Bane for numerous patches. * Nick Bane for 2.5/2.6 integration. * Andras Toth for mknod rdev issue. * Michael Fischer for finding the problem with inode inconsistency. * Some code bodily lifted from JFFS * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ /* * * This is the file system front-end to YAFFS that hooks it up to * the VFS. * * Special notes: * >> 2.4: sb->u.generic_sbp points to the struct yaffs_dev associated with * this superblock * >> 2.6: sb->s_fs_info points to the struct yaffs_dev associated with this * superblock * >> inode->u.generic_ip points to the associated struct yaffs_obj. */ /* * There are two variants of the VFS glue code. This variant should compile * for any version of Linux. */ #include <linux/version.h> #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 10)) #define YAFFS_COMPILE_BACKGROUND #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 23)) #define YAFFS_COMPILE_FREEZER #endif #endif #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)) #define YAFFS_COMPILE_EXPORTFS #endif #if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 35)) #define YAFFS_USE_SETATTR_COPY #define YAFFS_USE_TRUNCATE_SETSIZE #endif #if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 35)) #define YAFFS_HAS_EVICT_INODE #endif #if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 13)) #define YAFFS_NEW_FOLLOW_LINK 1 #else #define YAFFS_NEW_FOLLOW_LINK 0 #endif #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0)) #define YAFFS_HAS_WRITE_SUPER #endif #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19)) #include <linux/config.h> #endif #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/fs.h> #include <linux/proc_fs.h> #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39)) #include <linux/smp_lock.h> #endif #include <linux/pagemap.h> #include <linux/mtd/mtd.h> #include <linux/interrupt.h> #include <linux/string.h> #include <linux/ctype.h> #if (YAFFS_NEW_FOLLOW_LINK == 1) #include <linux/namei.h> #endif #ifdef YAFFS_COMPILE_EXPORTFS #include <linux/exportfs.h> #endif #ifdef YAFFS_COMPILE_BACKGROUND #include <linux/kthread.h> #include <linux/delay.h> #endif #ifdef YAFFS_COMPILE_FREEZER #include <linux/freezer.h> #endif #include <asm/div64.h> #if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0)) #include <linux/statfs.h> #define UnlockPage(p) unlock_page(p) #define Page_Uptodate(page) test_bit(PG_uptodate, &(page)->flags) /* FIXME: use sb->s_id instead ? */ #define yaffs_devname(sb, buf) bdevname(sb->s_bdev, buf) #else #include <linux/locks.h> #define BDEVNAME_SIZE 0 #define yaffs_devname(sb, buf) kdevname(sb->s_dev) #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0)) /* added NCB 26/5/2006 for 2.4.25-vrs2-tcl1 kernel */ #define __user #endif #endif #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)) #define YPROC_ROOT (&proc_root) #else #define YPROC_ROOT NULL #endif #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)) #define Y_INIT_TIMER(a) init_timer(a) #else #define Y_INIT_TIMER(a) init_timer_on_stack(a) #endif #if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 27)) #define YAFFS_USE_WRITE_BEGIN_END 1 #else #define YAFFS_USE_WRITE_BEGIN_END 0 #endif #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0)) #define YAFFS_SUPER_HAS_DIRTY #endif #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0)) #define set_nlink(inode, count) do { (inode)->i_nlink = (count); } while(0) #endif #if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 28)) static uint32_t YCALCBLOCKS(uint64_t partition_size, uint32_t block_size) { uint64_t result = partition_size; do_div(result, block_size); return (uint32_t) result; } #else #define YCALCBLOCKS(s, b) ((s)/(b)) #endif #include <linux/uaccess.h> #include <linux/mtd/mtd.h> #include "yportenv.h" #include "yaffs_trace.h" #include "yaffs_guts.h" #include "yaffs_attribs.h" #include "yaffs_linux.h" #include "yaffs_mtdif.h" #include "yaffs_packedtags2.h" #include "yaffs_getblockinfo.h" unsigned int yaffs_trace_mask = YAFFS_TRACE_BAD_BLOCKS | YAFFS_TRACE_ALWAYS | 0; unsigned int yaffs_wr_attempts = YAFFS_WR_ATTEMPTS; unsigned int yaffs_auto_checkpoint = 1; unsigned int yaffs_gc_control = 1; unsigned int yaffs_bg_enable = 1; unsigned int yaffs_auto_select = 1; /* Module Parameters */ #if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0)) module_param(yaffs_trace_mask, uint, 0644); module_param(yaffs_wr_attempts, uint, 0644); module_param(yaffs_auto_checkpoint, uint, 0644); module_param(yaffs_gc_control, uint, 0644); module_param(yaffs_bg_enable, uint, 0644); #else MODULE_PARM(yaffs_trace_mask, "i"); MODULE_PARM(yaffs_wr_attempts, "i"); MODULE_PARM(yaffs_auto_checkpoint, "i"); MODULE_PARM(yaffs_gc_control, "i"); #endif #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 25)) /* use iget and read_inode */ #define Y_IGET(sb, inum) iget((sb), (inum)) #else /* Call local equivalent */ #define YAFFS_USE_OWN_IGET #define Y_IGET(sb, inum) yaffs_iget((sb), (inum)) #endif #if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 18)) #define yaffs_inode_to_obj_lv(iptr) ((iptr)->i_private) #else #define yaffs_inode_to_obj_lv(iptr) ((iptr)->u.generic_ip) #endif #define yaffs_inode_to_obj(iptr) \ ((struct yaffs_obj *)(yaffs_inode_to_obj_lv(iptr))) #define yaffs_dentry_to_obj(dptr) yaffs_inode_to_obj((dptr)->d_inode) #if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0)) #define yaffs_super_to_dev(sb) ((struct yaffs_dev *)sb->s_fs_info) #else #define yaffs_super_to_dev(sb) ((struct yaffs_dev *)sb->u.generic_sbp) #endif #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)) #define Y_CLEAR_INODE(i) clear_inode(i) #else #define Y_CLEAR_INODE(i) end_writeback(i) #endif #define update_dir_time(dir) do {\ (dir)->i_ctime = (dir)->i_mtime = CURRENT_TIME; \ } while (0) static void yaffs_fill_inode_from_obj(struct inode *inode, struct yaffs_obj *obj); static void yaffs_gross_lock(struct yaffs_dev *dev) { yaffs_trace(YAFFS_TRACE_LOCK, "yaffs locking %p", current); mutex_lock(&(yaffs_dev_to_lc(dev)->gross_lock)); yaffs_trace(YAFFS_TRACE_LOCK, "yaffs locked %p", current); } static void yaffs_gross_unlock(struct yaffs_dev *dev) { yaffs_trace(YAFFS_TRACE_LOCK, "yaffs unlocking %p", current); mutex_unlock(&(yaffs_dev_to_lc(dev)->gross_lock)); } static int yaffs_readpage_nolock(struct file *f, struct page *pg) { /* Lifted from jffs2 */ struct yaffs_obj *obj; unsigned char *pg_buf; int ret; loff_t pos = ((loff_t) pg->index) << PAGE_CACHE_SHIFT; struct yaffs_dev *dev; yaffs_trace(YAFFS_TRACE_OS, "yaffs_readpage_nolock at %lld, size %08x", (long long)pos, (unsigned)PAGE_CACHE_SIZE); obj = yaffs_dentry_to_obj(f->f_dentry); dev = obj->my_dev; #if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0)) BUG_ON(!PageLocked(pg)); #else if (!PageLocked(pg)) PAGE_BUG(pg); #endif pg_buf = kmap(pg); /* FIXME: Can kmap fail? */ yaffs_gross_lock(dev); ret = yaffs_file_rd(obj, pg_buf, pos, PAGE_CACHE_SIZE); yaffs_gross_unlock(dev); if (ret >= 0) ret = 0; if (ret) { ClearPageUptodate(pg); SetPageError(pg); } else { SetPageUptodate(pg); ClearPageError(pg); } flush_dcache_page(pg); kunmap(pg); yaffs_trace(YAFFS_TRACE_OS, "yaffs_readpage_nolock done"); return ret; } static int yaffs_readpage_unlock(struct file *f, struct page *pg) { int ret = yaffs_readpage_nolock(f, pg); UnlockPage(pg); return ret; } static int yaffs_readpage(struct file *f, struct page *pg) { int ret; yaffs_trace(YAFFS_TRACE_OS, "yaffs_readpage"); ret = yaffs_readpage_unlock(f, pg); yaffs_trace(YAFFS_TRACE_OS, "yaffs_readpage done"); return ret; } #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)) #define YCRED_FSUID() from_kuid(&init_user_ns, current_fsuid()) #define YCRED_FSGID() from_kgid(&init_user_ns, current_fsgid()) #else #define YCRED_FSUID() YCRED(current)->fsuid #define YCRED_FSGID() YCRED(current)->fsgid static inline uid_t i_uid_read(const struct inode *inode) { return inode->i_uid; } static inline gid_t i_gid_read(const struct inode *inode) { return inode->i_gid; } static inline void i_uid_write(struct inode *inode, uid_t uid) { inode->i_uid = uid; } static inline void i_gid_write(struct inode *inode, gid_t gid) { inode->i_gid = gid; } #endif static void yaffs_set_super_dirty_val(struct yaffs_dev *dev, int val) { struct yaffs_linux_context *lc = yaffs_dev_to_lc(dev); if (lc) lc->dirty = val; # ifdef YAFFS_SUPER_HAS_DIRTY { struct super_block *sb = lc->super; if (sb) sb->s_dirt = val; } #endif } static void yaffs_set_super_dirty(struct yaffs_dev *dev) { yaffs_set_super_dirty_val(dev, 1); } static void yaffs_clear_super_dirty(struct yaffs_dev *dev) { yaffs_set_super_dirty_val(dev, 0); } static int yaffs_check_super_dirty(struct yaffs_dev *dev) { struct yaffs_linux_context *lc = yaffs_dev_to_lc(dev); if (lc && lc->dirty) return 1; # ifdef YAFFS_SUPER_HAS_DIRTY { struct super_block *sb = lc->super; if (sb && sb->s_dirt) return 1; } #endif return 0; } #if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0)) static int yaffs_writepage(struct page *page, struct writeback_control *wbc) #else static int yaffs_writepage(struct page *page) #endif { struct yaffs_dev *dev; struct address_space *mapping = page->mapping; struct inode *inode; unsigned long end_index; char *buffer; struct yaffs_obj *obj; int n_written = 0; unsigned n_bytes; loff_t i_size; if (!mapping) BUG(); inode = mapping->host; if (!inode) BUG(); i_size = i_size_read(inode); end_index = i_size >> PAGE_CACHE_SHIFT; if (page->index < end_index) n_bytes = PAGE_CACHE_SIZE; else { n_bytes = i_size & (PAGE_CACHE_SIZE - 1); if (page->index > end_index || !n_bytes) { yaffs_trace(YAFFS_TRACE_OS, "yaffs_writepage at %lld, inode size = %lld!!", ((loff_t)page->index) << PAGE_CACHE_SHIFT, inode->i_size); yaffs_trace(YAFFS_TRACE_OS, " -> don't care!!"); zero_user_segment(page, 0, PAGE_CACHE_SIZE); set_page_writeback(page); unlock_page(page); end_page_writeback(page); return 0; } } if (n_bytes != PAGE_CACHE_SIZE) zero_user_segment(page, n_bytes, PAGE_CACHE_SIZE); get_page(page); buffer = kmap(page); obj = yaffs_inode_to_obj(inode); dev = obj->my_dev; yaffs_gross_lock(dev); yaffs_trace(YAFFS_TRACE_OS, "yaffs_writepage at %lld, size %08x", ((loff_t)page->index) << PAGE_CACHE_SHIFT, n_bytes); yaffs_trace(YAFFS_TRACE_OS, "writepag0: obj = %lld, ino = %lld", obj->variant.file_variant.file_size, inode->i_size); n_written = yaffs_wr_file(obj, buffer, ((loff_t)page->index) << PAGE_CACHE_SHIFT, n_bytes, 0); yaffs_set_super_dirty(dev); yaffs_trace(YAFFS_TRACE_OS, "writepag1: obj = %lld, ino = %lld", obj->variant.file_variant.file_size, inode->i_size); yaffs_gross_unlock(dev); kunmap(page); set_page_writeback(page); unlock_page(page); end_page_writeback(page); put_page(page); return (n_written == n_bytes) ? 0 : -ENOSPC; } /* Space holding and freeing is done to ensure we have space available for write_begin/end */ /* For now we just assume few parallel writes and check against a small number. */ /* Todo: need to do this with a counter to handle parallel reads better */ static ssize_t yaffs_hold_space(struct file *f) { struct yaffs_obj *obj; struct yaffs_dev *dev; int n_free_chunks; obj = yaffs_dentry_to_obj(f->f_dentry); dev = obj->my_dev; yaffs_gross_lock(dev); n_free_chunks = yaffs_get_n_free_chunks(dev); yaffs_gross_unlock(dev); return (n_free_chunks > 20) ? 1 : 0; } static void yaffs_release_space(struct file *f) { struct yaffs_obj *obj; struct yaffs_dev *dev; obj = yaffs_dentry_to_obj(f->f_dentry); dev = obj->my_dev; yaffs_gross_lock(dev); yaffs_gross_unlock(dev); } #if (YAFFS_USE_WRITE_BEGIN_END > 0) static int yaffs_write_begin(struct file *filp, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { struct page *pg = NULL; pgoff_t index = pos >> PAGE_CACHE_SHIFT; int ret = 0; int space_held = 0; /* Get a page */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28) pg = grab_cache_page_write_begin(mapping, index, flags); #else pg = __grab_cache_page(mapping, index); #endif *pagep = pg; if (!pg) { ret = -ENOMEM; goto out; } yaffs_trace(YAFFS_TRACE_OS, "start yaffs_write_begin index %d(%x) uptodate %d", (int)index, (int)index, Page_Uptodate(pg) ? 1 : 0); /* Get fs space */ space_held = yaffs_hold_space(filp); if (!space_held) { ret = -ENOSPC; goto out; } /* Update page if required */ if (!Page_Uptodate(pg)) ret = yaffs_readpage_nolock(filp, pg); if (ret) goto out; /* Happy path return */ yaffs_trace(YAFFS_TRACE_OS, "end yaffs_write_begin - ok"); return 0; out: yaffs_trace(YAFFS_TRACE_OS, "end yaffs_write_begin fail returning %d", ret); if (space_held) yaffs_release_space(filp); if (pg) { unlock_page(pg); page_cache_release(pg); } return ret; } #else static int yaffs_prepare_write(struct file *f, struct page *pg, unsigned offset, unsigned to) { yaffs_trace(YAFFS_TRACE_OS, "yaffs_prepair_write"); if (!Page_Uptodate(pg)) return yaffs_readpage_nolock(f, pg); return 0; } #endif static ssize_t yaffs_file_write(struct file *f, const char *buf, size_t n, loff_t * pos) { struct yaffs_obj *obj; int n_written; loff_t ipos; struct inode *inode; struct yaffs_dev *dev; obj = yaffs_dentry_to_obj(f->f_dentry); if (!obj) { yaffs_trace(YAFFS_TRACE_OS, "yaffs_file_write: hey obj is null!"); return -EINVAL; } dev = obj->my_dev; yaffs_gross_lock(dev); inode = f->f_dentry->d_inode; if (!S_ISBLK(inode->i_mode) && f->f_flags & O_APPEND) ipos = inode->i_size; else ipos = *pos; yaffs_trace(YAFFS_TRACE_OS, "yaffs_file_write about to write writing %u(%x) bytes to object %d at %lld", (unsigned)n, (unsigned)n, obj->obj_id, ipos); n_written = yaffs_wr_file(obj, buf, ipos, n, 0); yaffs_set_super_dirty(dev); yaffs_trace(YAFFS_TRACE_OS, "yaffs_file_write: %d(%x) bytes written", (unsigned)n, (unsigned)n); if (n_written > 0) { ipos += n_written; *pos = ipos; if (ipos > inode->i_size) { inode->i_size = ipos; inode->i_blocks = (ipos + 511) >> 9; yaffs_trace(YAFFS_TRACE_OS, "yaffs_file_write size updated to %lld bytes, %d blocks", ipos, (int)(inode->i_blocks)); } } yaffs_gross_unlock(dev); return (n_written == 0) && (n > 0) ? -ENOSPC : n_written; } #if (YAFFS_USE_WRITE_BEGIN_END > 0) static int yaffs_write_end(struct file *filp, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *pg, void *fsdadata) { int ret = 0; void *addr, *kva; uint32_t offset_into_page = pos & (PAGE_CACHE_SIZE - 1); kva = kmap(pg); addr = kva + offset_into_page; yaffs_trace(YAFFS_TRACE_OS, "yaffs_write_end addr %p pos %lld n_bytes %d", addr, pos, copied); ret = yaffs_file_write(filp, addr, copied, &pos); if (ret != copied) { yaffs_trace(YAFFS_TRACE_OS, "yaffs_write_end not same size ret %d copied %d", ret, copied); SetPageError(pg); } kunmap(pg); yaffs_release_space(filp); unlock_page(pg); page_cache_release(pg); return ret; } #else static int yaffs_commit_write(struct file *f, struct page *pg, unsigned offset, unsigned to) { void *addr, *kva; loff_t pos = (((loff_t) pg->index) << PAGE_CACHE_SHIFT) + offset; int n_bytes = to - offset; int n_written; kva = kmap(pg); addr = kva + offset; yaffs_trace(YAFFS_TRACE_OS, "yaffs_commit_write addr %p pos %lld n_bytes %d", addr, pos, n_bytes); n_written = yaffs_file_write(f, addr, n_bytes, &pos); if (n_written != n_bytes) { yaffs_trace(YAFFS_TRACE_OS, "yaffs_commit_write not same size n_written %d n_bytes %d", n_written, n_bytes); SetPageError(pg); } kunmap(pg); yaffs_trace(YAFFS_TRACE_OS, "yaffs_commit_write returning %d", n_written == n_bytes ? 0 : n_written); return n_written == n_bytes ? 0 : n_written; } #endif static struct address_space_operations yaffs_file_address_operations = { .readpage = yaffs_readpage, .writepage = yaffs_writepage, #if (YAFFS_USE_WRITE_BEGIN_END > 0) .write_begin = yaffs_write_begin, .write_end = yaffs_write_end, #else .prepare_write = yaffs_prepare_write, .commit_write = yaffs_commit_write, #endif }; #if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17)) static int yaffs_file_flush(struct file *file, fl_owner_t id) #else static int yaffs_file_flush(struct file *file) #endif { struct yaffs_obj *obj = yaffs_dentry_to_obj(file->f_dentry); struct yaffs_dev *dev = obj->my_dev; yaffs_trace(YAFFS_TRACE_OS, "yaffs_file_flush object %d (%s)", obj->obj_id, obj->dirty ? "dirty" : "clean"); yaffs_gross_lock(dev); yaffs_flush_file(obj, 1, 0); yaffs_gross_unlock(dev); return 0; } #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)) static int yaffs_sync_object(struct file *file, loff_t start, loff_t end, int datasync) #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 34)) static int yaffs_sync_object(struct file *file, int datasync) #else static int yaffs_sync_object(struct file *file, struct dentry *dentry, int datasync) #endif { struct yaffs_obj *obj; struct yaffs_dev *dev; #if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 34)) struct dentry *dentry = file->f_path.dentry; #endif obj = yaffs_dentry_to_obj(dentry); dev = obj->my_dev; yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_SYNC, "yaffs_sync_object"); yaffs_gross_lock(dev); yaffs_flush_file(obj, 1, datasync); yaffs_gross_unlock(dev); return 0; } #if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 22)) static const struct file_operations yaffs_file_operations = { .read = do_sync_read, .write = do_sync_write, .aio_read = generic_file_aio_read, .aio_write = generic_file_aio_write, .mmap = generic_file_mmap, .flush = yaffs_file_flush, .fsync = yaffs_sync_object, .splice_read = generic_file_splice_read, .splice_write = generic_file_splice_write, .llseek = generic_file_llseek, }; #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 18)) static const struct file_operations yaffs_file_operations = { .read = do_sync_read, .write = do_sync_write, .aio_read = generic_file_aio_read, .aio_write = generic_file_aio_write, .mmap = generic_file_mmap, .flush = yaffs_file_flush, .fsync = yaffs_sync_object, .sendfile = generic_file_sendfile, }; #else static const struct file_operations yaffs_file_operations = { .read = generic_file_read, .write = generic_file_write, .mmap = generic_file_mmap, .flush = yaffs_file_flush, .fsync = yaffs_sync_object, #if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0)) .sendfile = generic_file_sendfile, #endif }; #endif #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 25)) static void zero_user_segment(struct page *page, unsigned start, unsigned end) { void *kaddr = kmap_atomic(page, KM_USER0); memset(kaddr + start, 0, end - start); kunmap_atomic(kaddr, KM_USER0); flush_dcache_page(page); } #endif static int yaffs_vfs_setsize(struct inode *inode, loff_t newsize) { #ifdef YAFFS_USE_TRUNCATE_SETSIZE truncate_setsize(inode, newsize); return 0; #else truncate_inode_pages(&inode->i_data, newsize); return 0; #endif } static int yaffs_vfs_setattr(struct inode *inode, struct iattr *attr) { #ifdef YAFFS_USE_SETATTR_COPY setattr_copy(inode, attr); return 0; #else return inode_setattr(inode, attr); #endif } static int yaffs_setattr(struct dentry *dentry, struct iattr *attr) { struct inode *inode = dentry->d_inode; int error = 0; struct yaffs_dev *dev; yaffs_trace(YAFFS_TRACE_OS, "yaffs_setattr of object %d", yaffs_inode_to_obj(inode)->obj_id); #if 0 /* Fail if a requested resize >= 2GB */ if (attr->ia_valid & ATTR_SIZE && (attr->ia_size >> 31)) error = -EINVAL; #endif if (error == 0) error = inode_change_ok(inode, attr); if (error == 0) { int result; if (!error) { error = yaffs_vfs_setattr(inode, attr); yaffs_trace(YAFFS_TRACE_OS, "inode_setattr called"); if (attr->ia_valid & ATTR_SIZE) { yaffs_vfs_setsize(inode, attr->ia_size); inode->i_blocks = (inode->i_size + 511) >> 9; } } dev = yaffs_inode_to_obj(inode)->my_dev; if (attr->ia_valid & ATTR_SIZE) { yaffs_trace(YAFFS_TRACE_OS, "resize to %d(%x)", (int)(attr->ia_size), (int)(attr->ia_size)); } yaffs_gross_lock(dev); result = yaffs_set_attribs(yaffs_inode_to_obj(inode), attr); if (result == YAFFS_OK) { error = 0; } else { error = -EPERM; } yaffs_gross_unlock(dev); } yaffs_trace(YAFFS_TRACE_OS, "yaffs_setattr done returning %d", error); return error; } static int yaffs_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags) { struct inode *inode = dentry->d_inode; int error = 0; struct yaffs_dev *dev; struct yaffs_obj *obj = yaffs_inode_to_obj(inode); yaffs_trace(YAFFS_TRACE_OS, "yaffs_setxattr of object %d", obj->obj_id); if (error == 0) { int result; dev = obj->my_dev; yaffs_gross_lock(dev); result = yaffs_set_xattrib(obj, name, value, size, flags); if (result == YAFFS_OK) error = 0; else if (result < 0) error = result; yaffs_gross_unlock(dev); } yaffs_trace(YAFFS_TRACE_OS, "yaffs_setxattr done returning %d", error); return error; } static ssize_t yaffs_getxattr(struct dentry * dentry, const char *name, void *buff, size_t size) { struct inode *inode = dentry->d_inode; int error = 0; struct yaffs_dev *dev; struct yaffs_obj *obj = yaffs_inode_to_obj(inode); yaffs_trace(YAFFS_TRACE_OS, "yaffs_getxattr \"%s\" from object %d", name, obj->obj_id); if (error == 0) { dev = obj->my_dev; yaffs_gross_lock(dev); error = yaffs_get_xattrib(obj, name, buff, size); yaffs_gross_unlock(dev); } yaffs_trace(YAFFS_TRACE_OS, "yaffs_getxattr done returning %d", error); return error; } static int yaffs_removexattr(struct dentry *dentry, const char *name) { struct inode *inode = dentry->d_inode; int error = 0; struct yaffs_dev *dev; struct yaffs_obj *obj = yaffs_inode_to_obj(inode); yaffs_trace(YAFFS_TRACE_OS, "yaffs_removexattr of object %d", obj->obj_id); if (error == 0) { int result; dev = obj->my_dev; yaffs_gross_lock(dev); result = yaffs_remove_xattrib(obj, name); if (result == YAFFS_OK) error = 0; else if (result < 0) error = result; yaffs_gross_unlock(dev); } yaffs_trace(YAFFS_TRACE_OS, "yaffs_removexattr done returning %d", error); return error; } static ssize_t yaffs_listxattr(struct dentry * dentry, char *buff, size_t size) { struct inode *inode = dentry->d_inode; int error = 0; struct yaffs_dev *dev; struct yaffs_obj *obj = yaffs_inode_to_obj(inode); yaffs_trace(YAFFS_TRACE_OS, "yaffs_listxattr of object %d", obj->obj_id); if (error == 0) { dev = obj->my_dev; yaffs_gross_lock(dev); error = yaffs_list_xattrib(obj, buff, size); yaffs_gross_unlock(dev); } yaffs_trace(YAFFS_TRACE_OS, "yaffs_listxattr done returning %d", error); return error; } static const struct inode_operations yaffs_file_inode_operations = { .setattr = yaffs_setattr, .setxattr = yaffs_setxattr, .getxattr = yaffs_getxattr, .listxattr = yaffs_listxattr, .removexattr = yaffs_removexattr, }; static int yaffs_readlink(struct dentry *dentry, char __user * buffer, int buflen) { unsigned char *alias; int ret; struct yaffs_dev *dev = yaffs_dentry_to_obj(dentry)->my_dev; yaffs_gross_lock(dev); alias = yaffs_get_symlink_alias(yaffs_dentry_to_obj(dentry)); yaffs_gross_unlock(dev); if (!alias) return -ENOMEM; ret = vfs_readlink(dentry, buffer, buflen, alias); kfree(alias); return ret; } #if (YAFFS_NEW_FOLLOW_LINK == 1) static void *yaffs_follow_link(struct dentry *dentry, struct nameidata *nd) { void *ret; #else static int yaffs_follow_link(struct dentry *dentry, struct nameidata *nd) { int ret #endif unsigned char *alias; int ret_int = 0; struct yaffs_dev *dev = yaffs_dentry_to_obj(dentry)->my_dev; yaffs_gross_lock(dev); alias = yaffs_get_symlink_alias(yaffs_dentry_to_obj(dentry)); yaffs_gross_unlock(dev); if (!alias) { ret_int = -ENOMEM; goto out; } #if (YAFFS_NEW_FOLLOW_LINK == 1) nd_set_link(nd, alias); ret = alias; out: if (ret_int) ret = ERR_PTR(ret_int); return ret; #else ret = vfs_follow_link(nd, alias); kfree(alias); out: if (ret_int) ret = ret_int; return ret; #endif } #ifdef YAFFS_HAS_PUT_INODE /* For now put inode is just for debugging * Put inode is called when the inode **structure** is put. */ static void yaffs_put_inode(struct inode *inode) { yaffs_trace(YAFFS_TRACE_OS, "yaffs_put_inode: ino %d, count %d"), (int)inode->i_ino, atomic_read(&inode->i_count); } #endif #if (YAFFS_NEW_FOLLOW_LINK == 1) void yaffs_put_link(struct dentry *dentry, struct nameidata *nd, void *alias) { kfree(alias); } #endif static const struct inode_operations yaffs_symlink_inode_operations = { .readlink = yaffs_readlink, .follow_link = yaffs_follow_link, #if (YAFFS_NEW_FOLLOW_LINK == 1) .put_link = yaffs_put_link, #endif .setattr = yaffs_setattr, .setxattr = yaffs_setxattr, .getxattr = yaffs_getxattr, .listxattr = yaffs_listxattr, .removexattr = yaffs_removexattr, }; #ifdef YAFFS_USE_OWN_IGET static struct inode *yaffs_iget(struct super_block *sb, unsigned long ino) { struct inode *inode; struct yaffs_obj *obj; struct yaffs_dev *dev = yaffs_super_to_dev(sb); yaffs_trace(YAFFS_TRACE_OS, "yaffs_iget for %lu", ino); inode = iget_locked(sb, ino); if (!inode) return ERR_PTR(-ENOMEM); if (!(inode->i_state & I_NEW)) return inode; /* NB This is called as a side effect of other functions, but * we had to release the lock to prevent deadlocks, so * need to lock again. */ yaffs_gross_lock(dev); obj = yaffs_find_by_number(dev, inode->i_ino); yaffs_fill_inode_from_obj(inode, obj); yaffs_gross_unlock(dev); unlock_new_inode(inode); return inode; } #else static void yaffs_read_inode(struct inode *inode) { /* NB This is called as a side effect of other functions, but * we had to release the lock to prevent deadlocks, so * need to lock again. */ struct yaffs_obj *obj; struct yaffs_dev *dev = yaffs_super_to_dev(inode->i_sb); yaffs_trace(YAFFS_TRACE_OS, "yaffs_read_inode for %d", (int)inode->i_ino); if (current != yaffs_dev_to_lc(dev)->readdir_process) yaffs_gross_lock(dev); obj = yaffs_find_by_number(dev, inode->i_ino); yaffs_fill_inode_from_obj(inode, obj); if (current != yaffs_dev_to_lc(dev)->readdir_process) yaffs_gross_unlock(dev); } #endif struct inode *yaffs_get_inode(struct super_block *sb, int mode, int dev, struct yaffs_obj *obj) { struct inode *inode; if (!sb) { yaffs_trace(YAFFS_TRACE_OS, "yaffs_get_inode for NULL super_block!!"); return NULL; } if (!obj) { yaffs_trace(YAFFS_TRACE_OS, "yaffs_get_inode for NULL object!!"); return NULL; } yaffs_trace(YAFFS_TRACE_OS, "yaffs_get_inode for object %d", obj->obj_id); inode = Y_IGET(sb, obj->obj_id); if (IS_ERR(inode)) return NULL; /* NB Side effect: iget calls back to yaffs_read_inode(). */ /* iget also increments the inode's i_count */ /* NB You can't be holding gross_lock or deadlock will happen! */ return inode; } #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29) #define YCRED(x) x #else #define YCRED(x) (x->cred) #endif #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) static int yaffs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t rdev) #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0)) static int yaffs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t rdev) #else static int yaffs_mknod(struct inode *dir, struct dentry *dentry, int mode, int rdev) #endif { struct inode *inode; struct yaffs_obj *obj = NULL; struct yaffs_dev *dev; struct yaffs_obj *parent = yaffs_inode_to_obj(dir); int error = -ENOSPC; uid_t uid = YCRED_FSUID(); gid_t gid = (dir->i_mode & S_ISGID) ? i_gid_read(dir) : YCRED_FSGID(); if ((dir->i_mode & S_ISGID) && S_ISDIR(mode)) mode |= S_ISGID; if (parent) { yaffs_trace(YAFFS_TRACE_OS, "yaffs_mknod: parent object %d type %d", parent->obj_id, parent->variant_type); } else { yaffs_trace(YAFFS_TRACE_OS, "yaffs_mknod: could not get parent object"); return -EPERM; } yaffs_trace(YAFFS_TRACE_OS, "yaffs_mknod: making oject for %s, mode %x dev %x", dentry->d_name.name, mode, rdev); dev = parent->my_dev; yaffs_gross_lock(dev); switch (mode & S_IFMT) { default: /* Special (socket, fifo, device...) */ yaffs_trace(YAFFS_TRACE_OS, "yaffs_mknod: making special"); #if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0)) obj = yaffs_create_special(parent, dentry->d_name.name, mode, uid, gid, old_encode_dev(rdev)); #else obj = yaffs_create_special(parent, dentry->d_name.name, mode, uid, gid, rdev); #endif break; case S_IFREG: /* file */ yaffs_trace(YAFFS_TRACE_OS, "yaffs_mknod: making file"); obj = yaffs_create_file(parent, dentry->d_name.name, mode, uid, gid); break; case S_IFDIR: /* directory */ yaffs_trace(YAFFS_TRACE_OS, "yaffs_mknod: making directory"); obj = yaffs_create_dir(parent, dentry->d_name.name, mode, uid, gid); break; case S_IFLNK: /* symlink */ yaffs_trace(YAFFS_TRACE_OS, "yaffs_mknod: making symlink"); obj = NULL; /* Do we ever get here? */ break; } /* Can not call yaffs_get_inode() with gross lock held */ yaffs_gross_unlock(dev); if (obj) { inode = yaffs_get_inode(dir->i_sb, mode, rdev, obj); d_instantiate(dentry, inode); update_dir_time(dir); yaffs_trace(YAFFS_TRACE_OS, "yaffs_mknod created object %d count = %d", obj->obj_id, atomic_read(&inode->i_count)); error = 0; yaffs_fill_inode_from_obj(dir, parent); } else { yaffs_trace(YAFFS_TRACE_OS, "yaffs_mknod failed making object"); error = -ENOMEM; } return error; } #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) static int yaffs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) #else static int yaffs_mkdir(struct inode *dir, struct dentry *dentry, int mode) #endif { int ret_val; yaffs_trace(YAFFS_TRACE_OS, "yaffs_mkdir"); ret_val = yaffs_mknod(dir, dentry, mode | S_IFDIR, 0); return ret_val; } #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) static int yaffs_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool dummy) #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) static int yaffs_create(struct inode *dir, struct dentry *dentry, umode_t mode, struct nameidata *n) #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0)) static int yaffs_create(struct inode *dir, struct dentry *dentry, int mode, struct nameidata *n) #else static int yaffs_create(struct inode *dir, struct dentry *dentry, int mode) #endif { yaffs_trace(YAFFS_TRACE_OS, "yaffs_create"); return yaffs_mknod(dir, dentry, mode | S_IFREG, 0); } #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) static struct dentry *yaffs_lookup(struct inode *dir, struct dentry *dentry, unsigned int dummy) #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0)) static struct dentry *yaffs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *n) #else static struct dentry *yaffs_lookup(struct inode *dir, struct dentry *dentry) #endif { struct yaffs_obj *obj; struct inode *inode = NULL; /* NCB 2.5/2.6 needs NULL here */ struct yaffs_dev *dev = yaffs_inode_to_obj(dir)->my_dev; if (current != yaffs_dev_to_lc(dev)->readdir_process) yaffs_gross_lock(dev); yaffs_trace(YAFFS_TRACE_OS, "yaffs_lookup for %d:%s", yaffs_inode_to_obj(dir)->obj_id, dentry->d_name.name); obj = yaffs_find_by_name(yaffs_inode_to_obj(dir), dentry->d_name.name); obj = yaffs_get_equivalent_obj(obj); /* in case it was a hardlink */ /* Can't hold gross lock when calling yaffs_get_inode() */ if (current != yaffs_dev_to_lc(dev)->readdir_process) yaffs_gross_unlock(dev); if (obj) { yaffs_trace(YAFFS_TRACE_OS, "yaffs_lookup found %d", obj->obj_id); inode = yaffs_get_inode(dir->i_sb, obj->yst_mode, 0, obj); } else { yaffs_trace(YAFFS_TRACE_OS, "yaffs_lookup not found"); } /* added NCB for 2.5/6 compatability - forces add even if inode is * NULL which creates dentry hash */ d_add(dentry, inode); return NULL; } /* * Create a link... */ static int yaffs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) { struct inode *inode = old_dentry->d_inode; struct yaffs_obj *obj = NULL; struct yaffs_obj *link = NULL; struct yaffs_dev *dev; yaffs_trace(YAFFS_TRACE_OS, "yaffs_link"); obj = yaffs_inode_to_obj(inode); dev = obj->my_dev; yaffs_gross_lock(dev); if (!S_ISDIR(inode->i_mode)) /* Don't link directories */ link = yaffs_link_obj(yaffs_inode_to_obj(dir), dentry->d_name.name, obj); if (link) { set_nlink(old_dentry->d_inode, yaffs_get_obj_link_count(obj)); d_instantiate(dentry, old_dentry->d_inode); atomic_inc(&old_dentry->d_inode->i_count); yaffs_trace(YAFFS_TRACE_OS, "yaffs_link link count %d i_count %d", old_dentry->d_inode->i_nlink, atomic_read(&old_dentry->d_inode->i_count)); } yaffs_gross_unlock(dev); if (link) { update_dir_time(dir); return 0; } return -EPERM; } static int yaffs_symlink(struct inode *dir, struct dentry *dentry, const char *symname) { struct yaffs_obj *obj; struct yaffs_dev *dev; uid_t uid = YCRED_FSUID(); gid_t gid = (dir->i_mode & S_ISGID) ? i_gid_read(dir) : YCRED_FSGID(); yaffs_trace(YAFFS_TRACE_OS, "yaffs_symlink"); if (strnlen(dentry->d_name.name, YAFFS_MAX_NAME_LENGTH + 1) > YAFFS_MAX_NAME_LENGTH) return -ENAMETOOLONG; if (strnlen(symname, YAFFS_MAX_ALIAS_LENGTH + 1) > YAFFS_MAX_ALIAS_LENGTH) return -ENAMETOOLONG; dev = yaffs_inode_to_obj(dir)->my_dev; yaffs_gross_lock(dev); obj = yaffs_create_symlink(yaffs_inode_to_obj(dir), dentry->d_name.name, S_IFLNK | S_IRWXUGO, uid, gid, symname); yaffs_gross_unlock(dev); if (obj) { struct inode *inode; inode = yaffs_get_inode(dir->i_sb, obj->yst_mode, 0, obj); d_instantiate(dentry, inode); update_dir_time(dir); yaffs_trace(YAFFS_TRACE_OS, "symlink created OK"); return 0; } else { yaffs_trace(YAFFS_TRACE_OS, "symlink not created"); } return -ENOMEM; } /* * The VFS layer already does all the dentry stuff for rename. * * NB: POSIX says you can rename an object over an old object of the same name */ static int yaffs_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { struct yaffs_dev *dev; int ret_val = YAFFS_FAIL; struct yaffs_obj *target; yaffs_trace(YAFFS_TRACE_OS, "yaffs_rename"); dev = yaffs_inode_to_obj(old_dir)->my_dev; yaffs_gross_lock(dev); /* Check if the target is an existing directory that is not empty. */ target = yaffs_find_by_name(yaffs_inode_to_obj(new_dir), new_dentry->d_name.name); if (target && target->variant_type == YAFFS_OBJECT_TYPE_DIRECTORY && !list_empty(&target->variant.dir_variant.children)) { yaffs_trace(YAFFS_TRACE_OS, "target is non-empty dir"); ret_val = YAFFS_FAIL; } else { /* Now does unlinking internally using shadowing mechanism */ yaffs_trace(YAFFS_TRACE_OS, "calling yaffs_rename_obj"); ret_val = yaffs_rename_obj(yaffs_inode_to_obj(old_dir), old_dentry->d_name.name, yaffs_inode_to_obj(new_dir), new_dentry->d_name.name); } yaffs_gross_unlock(dev); if (ret_val == YAFFS_OK) { if (target) inode_dec_link_count(new_dentry->d_inode); update_dir_time(old_dir); if (old_dir != new_dir) update_dir_time(new_dir); return 0; } else { return -ENOTEMPTY; } } static int yaffs_unlink(struct inode *dir, struct dentry *dentry) { int ret_val; struct yaffs_dev *dev; struct yaffs_obj *obj; yaffs_trace(YAFFS_TRACE_OS, "yaffs_unlink %d:%s", (int)(dir->i_ino), dentry->d_name.name); obj = yaffs_inode_to_obj(dir); dev = obj->my_dev; yaffs_gross_lock(dev); ret_val = yaffs_unlinker(obj, dentry->d_name.name); if (ret_val == YAFFS_OK) { inode_dec_link_count(dentry->d_inode); dir->i_version++; yaffs_gross_unlock(dev); update_dir_time(dir); return 0; } yaffs_gross_unlock(dev); return -ENOTEMPTY; } static const struct inode_operations yaffs_dir_inode_operations = { .create = yaffs_create, .lookup = yaffs_lookup, .link = yaffs_link, .unlink = yaffs_unlink, .symlink = yaffs_symlink, .mkdir = yaffs_mkdir, .rmdir = yaffs_unlink, .mknod = yaffs_mknod, .rename = yaffs_rename, .setattr = yaffs_setattr, .setxattr = yaffs_setxattr, .getxattr = yaffs_getxattr, .listxattr = yaffs_listxattr, .removexattr = yaffs_removexattr, }; /*-----------------------------------------------------------------*/ /* Directory search context allows us to unlock access to yaffs during * filldir without causing problems with the directory being modified. * This is similar to the tried and tested mechanism used in yaffs direct. * * A search context iterates along a doubly linked list of siblings in the * directory. If the iterating object is deleted then this would corrupt * the list iteration, likely causing a crash. The search context avoids * this by using the remove_obj_fn to move the search context to the * next object before the object is deleted. * * Many readdirs (and thus seach conexts) may be alive simulateously so * each struct yaffs_dev has a list of these. * * A seach context lives for the duration of a readdir. * * All these functions must be called while yaffs is locked. */ struct yaffs_search_context { struct yaffs_dev *dev; struct yaffs_obj *dir_obj; struct yaffs_obj *next_return; struct list_head others; }; /* * yaffs_new_search() creates a new search context, initialises it and * adds it to the device's search context list. * * Called at start of readdir. */ static struct yaffs_search_context *yaffs_new_search(struct yaffs_obj *dir) { struct yaffs_dev *dev = dir->my_dev; struct yaffs_search_context *sc = kmalloc(sizeof(struct yaffs_search_context), GFP_NOFS); if (sc) { sc->dir_obj = dir; sc->dev = dev; if (list_empty(&sc->dir_obj->variant.dir_variant.children)) sc->next_return = NULL; else sc->next_return = list_entry(dir->variant.dir_variant.children.next, struct yaffs_obj, siblings); INIT_LIST_HEAD(&sc->others); list_add(&sc->others, &(yaffs_dev_to_lc(dev)->search_contexts)); } return sc; } /* * yaffs_search_end() disposes of a search context and cleans up. */ static void yaffs_search_end(struct yaffs_search_context *sc) { if (sc) { list_del(&sc->others); kfree(sc); } } /* * yaffs_search_advance() moves a search context to the next object. * Called when the search iterates or when an object removal causes * the search context to be moved to the next object. */ static void yaffs_search_advance(struct yaffs_search_context *sc) { if (!sc) return; if (sc->next_return == NULL || list_empty(&sc->dir_obj->variant.dir_variant.children)) sc->next_return = NULL; else { struct list_head *next = sc->next_return->siblings.next; if (next == &sc->dir_obj->variant.dir_variant.children) sc->next_return = NULL; /* end of list */ else sc->next_return = list_entry(next, struct yaffs_obj, siblings); } } /* * yaffs_remove_obj_callback() is called when an object is unlinked. * We check open search contexts and advance any which are currently * on the object being iterated. */ static void yaffs_remove_obj_callback(struct yaffs_obj *obj) { struct list_head *i; struct yaffs_search_context *sc; struct list_head *search_contexts = &(yaffs_dev_to_lc(obj->my_dev)->search_contexts); /* Iterate through the directory search contexts. * If any are currently on the object being removed, then advance * the search context to the next object to prevent a hanging pointer. */ list_for_each(i, search_contexts) { sc = list_entry(i, struct yaffs_search_context, others); if (sc->next_return == obj) yaffs_search_advance(sc); } } /*-----------------------------------------------------------------*/ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0)) static int yaffs_readdir(struct file *file, struct dir_context *ctx) { struct yaffs_obj *obj; struct yaffs_dev *dev; struct yaffs_search_context *sc; struct inode *inode = file->f_dentry->d_inode; unsigned long offset, curoffs; struct yaffs_obj *l; int ret_val = 0; char name[YAFFS_MAX_NAME_LENGTH + 1]; obj = yaffs_dentry_to_obj(file->f_dentry); dev = obj->my_dev; yaffs_gross_lock(dev); yaffs_dev_to_lc(dev)->readdir_process = current; offset = ctx->pos; sc = yaffs_new_search(obj); if (!sc) { ret_val = -ENOMEM; goto out; } yaffs_trace(YAFFS_TRACE_OS, "yaffs_readdir: starting at %d", (int)offset); if (offset == 0) { yaffs_trace(YAFFS_TRACE_OS, "yaffs_readdir: entry . ino %d", (int)inode->i_ino); yaffs_gross_unlock(dev); if (!dir_emit_dot(file, ctx)) { yaffs_gross_lock(dev); goto out; } yaffs_gross_lock(dev); offset++; ctx->pos++; } if (offset == 1) { yaffs_trace(YAFFS_TRACE_OS, "yaffs_readdir: entry .. ino %d", (int)file->f_dentry->d_parent->d_inode->i_ino); yaffs_gross_unlock(dev); if (!dir_emit_dotdot(file, ctx)) { yaffs_gross_lock(dev); goto out; } yaffs_gross_lock(dev); offset++; ctx->pos++; } curoffs = 1; /* If the directory has changed since the open or last call to readdir, rewind to after the 2 canned entries. */ if (file->f_version != inode->i_version) { offset = 2; ctx->pos = offset; file->f_version = inode->i_version; } while (sc->next_return) { curoffs++; l = sc->next_return; if (curoffs >= offset) { int this_inode = yaffs_get_obj_inode(l); int this_type = yaffs_get_obj_type(l); yaffs_get_obj_name(l, name, YAFFS_MAX_NAME_LENGTH + 1); yaffs_trace(YAFFS_TRACE_OS, "yaffs_readdir: %s inode %d", name, yaffs_get_obj_inode(l)); yaffs_gross_unlock(dev); if (!dir_emit(ctx, name, strlen(name), this_inode, this_type) < 0) { yaffs_gross_lock(dev); goto out; } yaffs_gross_lock(dev); offset++; ctx->pos++; } yaffs_search_advance(sc); } out: yaffs_search_end(sc); yaffs_dev_to_lc(dev)->readdir_process = NULL; yaffs_gross_unlock(dev); return ret_val; } #else static int yaffs_readdir(struct file *f, void *dirent, filldir_t filldir) { struct yaffs_obj *obj; struct yaffs_dev *dev; struct yaffs_search_context *sc; struct inode *inode = f->f_dentry->d_inode; unsigned long offset, curoffs; struct yaffs_obj *l; int ret_val = 0; char name[YAFFS_MAX_NAME_LENGTH + 1]; obj = yaffs_dentry_to_obj(f->f_dentry); dev = obj->my_dev; yaffs_gross_lock(dev); yaffs_dev_to_lc(dev)->readdir_process = current; offset = f->f_pos; sc = yaffs_new_search(obj); if (!sc) { ret_val = -ENOMEM; goto out; } yaffs_trace(YAFFS_TRACE_OS, "yaffs_readdir: starting at %d", (int)offset); if (offset == 0) { yaffs_trace(YAFFS_TRACE_OS, "yaffs_readdir: entry . ino %d", (int)inode->i_ino); yaffs_gross_unlock(dev); if (filldir(dirent, ".", 1, offset, inode->i_ino, DT_DIR) < 0) { yaffs_gross_lock(dev); goto out; } yaffs_gross_lock(dev); offset++; f->f_pos++; } if (offset == 1) { yaffs_trace(YAFFS_TRACE_OS, "yaffs_readdir: entry .. ino %d", (int)f->f_dentry->d_parent->d_inode->i_ino); yaffs_gross_unlock(dev); if (filldir(dirent, "..", 2, offset, f->f_dentry->d_parent->d_inode->i_ino, DT_DIR) < 0) { yaffs_gross_lock(dev); goto out; } yaffs_gross_lock(dev); offset++; f->f_pos++; } curoffs = 1; /* If the directory has changed since the open or last call to readdir, rewind to after the 2 canned entries. */ if (f->f_version != inode->i_version) { offset = 2; f->f_pos = offset; f->f_version = inode->i_version; } while (sc->next_return) { curoffs++; l = sc->next_return; if (curoffs >= offset) { int this_inode = yaffs_get_obj_inode(l); int this_type = yaffs_get_obj_type(l); yaffs_get_obj_name(l, name, YAFFS_MAX_NAME_LENGTH + 1); yaffs_trace(YAFFS_TRACE_OS, "yaffs_readdir: %s inode %d", name, yaffs_get_obj_inode(l)); yaffs_gross_unlock(dev); if (filldir(dirent, name, strlen(name), offset, this_inode, this_type) < 0) { yaffs_gross_lock(dev); goto out; } yaffs_gross_lock(dev); offset++; f->f_pos++; } yaffs_search_advance(sc); } out: yaffs_search_end(sc); yaffs_dev_to_lc(dev)->readdir_process = NULL; yaffs_gross_unlock(dev); return ret_val; } #endif static const struct file_operations yaffs_dir_operations = { .read = generic_read_dir, #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0)) .iterate = yaffs_readdir, #else .readdir = yaffs_readdir, #endif .fsync = yaffs_sync_object, .llseek = generic_file_llseek, }; static void yaffs_fill_inode_from_obj(struct inode *inode, struct yaffs_obj *obj) { if (inode && obj) { /* Check mode against the variant type and attempt to repair if broken. */ u32 mode = obj->yst_mode; switch (obj->variant_type) { case YAFFS_OBJECT_TYPE_FILE: if (!S_ISREG(mode)) { obj->yst_mode &= ~S_IFMT; obj->yst_mode |= S_IFREG; } break; case YAFFS_OBJECT_TYPE_SYMLINK: if (!S_ISLNK(mode)) { obj->yst_mode &= ~S_IFMT; obj->yst_mode |= S_IFLNK; } break; case YAFFS_OBJECT_TYPE_DIRECTORY: if (!S_ISDIR(mode)) { obj->yst_mode &= ~S_IFMT; obj->yst_mode |= S_IFDIR; } break; case YAFFS_OBJECT_TYPE_UNKNOWN: case YAFFS_OBJECT_TYPE_HARDLINK: case YAFFS_OBJECT_TYPE_SPECIAL: default: /* TODO? */ break; } inode->i_flags |= S_NOATIME; inode->i_ino = obj->obj_id; inode->i_mode = obj->yst_mode; i_uid_write(inode, obj->yst_uid); i_gid_write(inode, obj->yst_gid); #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19)) inode->i_blksize = inode->i_sb->s_blocksize; #endif #if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0)) inode->i_rdev = old_decode_dev(obj->yst_rdev); inode->i_atime.tv_sec = (time_t) (obj->yst_atime); inode->i_atime.tv_nsec = 0; inode->i_mtime.tv_sec = (time_t) obj->yst_mtime; inode->i_mtime.tv_nsec = 0; inode->i_ctime.tv_sec = (time_t) obj->yst_ctime; inode->i_ctime.tv_nsec = 0; #else inode->i_rdev = obj->yst_rdev; inode->i_atime = obj->yst_atime; inode->i_mtime = obj->yst_mtime; inode->i_ctime = obj->yst_ctime; #endif inode->i_size = yaffs_get_obj_length(obj); inode->i_blocks = (inode->i_size + 511) >> 9; set_nlink(inode, yaffs_get_obj_link_count(obj)); yaffs_trace(YAFFS_TRACE_OS, "yaffs_fill_inode mode %x uid %d gid %d size %lld count %d", inode->i_mode, i_uid_read(inode), i_gid_read(inode), inode->i_size, atomic_read(&inode->i_count)); switch (obj->yst_mode & S_IFMT) { default: /* fifo, device or socket */ #if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0)) init_special_inode(inode, obj->yst_mode, old_decode_dev(obj->yst_rdev)); #else init_special_inode(inode, obj->yst_mode, (dev_t) (obj->yst_rdev)); #endif break; case S_IFREG: /* file */ inode->i_op = &yaffs_file_inode_operations; inode->i_fop = &yaffs_file_operations; inode->i_mapping->a_ops = &yaffs_file_address_operations; break; case S_IFDIR: /* directory */ inode->i_op = &yaffs_dir_inode_operations; inode->i_fop = &yaffs_dir_operations; break; case S_IFLNK: /* symlink */ inode->i_op = &yaffs_symlink_inode_operations; break; } yaffs_inode_to_obj_lv(inode) = obj; obj->my_inode = inode; } else { yaffs_trace(YAFFS_TRACE_OS, "yaffs_fill_inode invalid parameters"); } } /* * yaffs background thread functions . * yaffs_bg_thread_fn() the thread function * yaffs_bg_start() launches the background thread. * yaffs_bg_stop() cleans up the background thread. * * NB: * The thread should only run after the yaffs is initialised * The thread should be stopped before yaffs is unmounted. * The thread should not do any writing while the fs is in read only. */ static unsigned yaffs_bg_gc_urgency(struct yaffs_dev *dev) { unsigned erased_chunks = dev->n_erased_blocks * dev->param.chunks_per_block; struct yaffs_linux_context *context = yaffs_dev_to_lc(dev); unsigned scattered = 0; /* Free chunks not in an erased block */ if (erased_chunks < dev->n_free_chunks) scattered = (dev->n_free_chunks - erased_chunks); if (!context->bg_running) return 0; else if (scattered < (dev->param.chunks_per_block * 2)) return 0; else if (erased_chunks > dev->n_free_chunks / 2) return 0; else if (erased_chunks > dev->n_free_chunks / 4) return 1; else return 2; } #ifdef YAFFS_COMPILE_BACKGROUND void yaffs_background_waker(unsigned long data) { wake_up_process((struct task_struct *)data); } static int yaffs_bg_thread_fn(void *data) { struct yaffs_dev *dev = (struct yaffs_dev *)data; struct yaffs_linux_context *context = yaffs_dev_to_lc(dev); unsigned long now = jiffies; unsigned long next_dir_update = now; unsigned long next_gc = now; unsigned long expires; unsigned int urgency; int gc_result; struct timer_list timer; yaffs_trace(YAFFS_TRACE_BACKGROUND, "yaffs_background starting for dev %p", (void *)dev); #ifdef YAFFS_COMPILE_FREEZER set_freezable(); #endif while (context->bg_running) { yaffs_trace(YAFFS_TRACE_BACKGROUND, "yaffs_background"); if (kthread_should_stop()) break; #ifdef YAFFS_COMPILE_FREEZER if (try_to_freeze()) continue; #endif yaffs_gross_lock(dev); now = jiffies; if (time_after(now, next_dir_update) && yaffs_bg_enable) { yaffs_update_dirty_dirs(dev); next_dir_update = now + HZ; } if (time_after(now, next_gc) && yaffs_bg_enable) { if (!dev->is_checkpointed) { urgency = yaffs_bg_gc_urgency(dev); gc_result = yaffs_bg_gc(dev, urgency); if (urgency > 1) next_gc = now + HZ / 20 + 1; else if (urgency > 0) next_gc = now + HZ / 10 + 1; else next_gc = now + HZ * 2; } else { /* * gc not running so set to next_dir_update * to cut down on wake ups */ next_gc = next_dir_update; } } yaffs_gross_unlock(dev); #if 1 expires = next_dir_update; if (time_before(next_gc, expires)) expires = next_gc; if (time_before(expires, now)) expires = now + HZ; Y_INIT_TIMER(&timer); timer.expires = expires + 1; timer.data = (unsigned long)current; timer.function = yaffs_background_waker; set_current_state(TASK_INTERRUPTIBLE); add_timer(&timer); schedule(); del_timer_sync(&timer); #else msleep(10); #endif } return 0; } static int yaffs_bg_start(struct yaffs_dev *dev) { int retval = 0; struct yaffs_linux_context *context = yaffs_dev_to_lc(dev); if (dev->read_only) return -1; context->bg_running = 1; context->bg_thread = kthread_run(yaffs_bg_thread_fn, (void *)dev, "yaffs-bg-%d", context->mount_id); if (IS_ERR(context->bg_thread)) { retval = PTR_ERR(context->bg_thread); context->bg_thread = NULL; context->bg_running = 0; } return retval; } static void yaffs_bg_stop(struct yaffs_dev *dev) { struct yaffs_linux_context *ctxt = yaffs_dev_to_lc(dev); ctxt->bg_running = 0; if (ctxt->bg_thread) { kthread_stop(ctxt->bg_thread); ctxt->bg_thread = NULL; } } #else static int yaffs_bg_thread_fn(void *data) { return 0; } static int yaffs_bg_start(struct yaffs_dev *dev) { return 0; } static void yaffs_bg_stop(struct yaffs_dev *dev) { } #endif static void yaffs_flush_inodes(struct super_block *sb) { struct inode *iptr; struct yaffs_obj *obj; list_for_each_entry(iptr, &sb->s_inodes, i_sb_list) { obj = yaffs_inode_to_obj(iptr); if (obj) { yaffs_trace(YAFFS_TRACE_OS, "flushing obj %d", obj->obj_id); yaffs_flush_file(obj, 1, 0); } } } static void yaffs_flush_super(struct super_block *sb, int do_checkpoint) { struct yaffs_dev *dev = yaffs_super_to_dev(sb); if (!dev) return; yaffs_flush_inodes(sb); yaffs_update_dirty_dirs(dev); yaffs_flush_whole_cache(dev); if (do_checkpoint) yaffs_checkpoint_save(dev); } static LIST_HEAD(yaffs_context_list); struct mutex yaffs_context_lock; static void yaffs_put_super(struct super_block *sb) { struct yaffs_dev *dev = yaffs_super_to_dev(sb); struct mtd_info *mtd = yaffs_dev_to_mtd(dev); yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_ALWAYS, "yaffs_put_super"); yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_BACKGROUND, "Shutting down yaffs background thread"); yaffs_bg_stop(dev); yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_BACKGROUND, "yaffs background thread shut down"); yaffs_gross_lock(dev); yaffs_flush_super(sb, 1); yaffs_deinitialise(dev); yaffs_gross_unlock(dev); mutex_lock(&yaffs_context_lock); list_del_init(&(yaffs_dev_to_lc(dev)->context_list)); mutex_unlock(&yaffs_context_lock); if (yaffs_dev_to_lc(dev)->spare_buffer) { kfree(yaffs_dev_to_lc(dev)->spare_buffer); yaffs_dev_to_lc(dev)->spare_buffer = NULL; } kfree(dev); yaffs_put_mtd_device(mtd); yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_ALWAYS, "yaffs_put_super done"); } static unsigned yaffs_gc_control_callback(struct yaffs_dev *dev) { return yaffs_gc_control; } #ifdef YAFFS_COMPILE_EXPORTFS static struct inode *yaffs2_nfs_get_inode(struct super_block *sb, uint64_t ino, uint32_t generation) { return Y_IGET(sb, ino); } static struct dentry *yaffs2_fh_to_dentry(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { return generic_fh_to_dentry(sb, fid, fh_len, fh_type, yaffs2_nfs_get_inode); } static struct dentry *yaffs2_fh_to_parent(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { return generic_fh_to_parent(sb, fid, fh_len, fh_type, yaffs2_nfs_get_inode); } struct dentry *yaffs2_get_parent(struct dentry *dentry) { struct super_block *sb = dentry->d_inode->i_sb; struct dentry *parent = ERR_PTR(-ENOENT); struct inode *inode; unsigned long parent_ino; struct yaffs_obj *d_obj; struct yaffs_obj *parent_obj; d_obj = yaffs_inode_to_obj(dentry->d_inode); if (d_obj) { parent_obj = d_obj->parent; if (parent_obj) { parent_ino = yaffs_get_obj_inode(parent_obj); inode = Y_IGET(sb, parent_ino); if (IS_ERR(inode)) { parent = ERR_CAST(inode); } else { parent = d_obtain_alias(inode); if (!IS_ERR(parent)) { parent = ERR_PTR(-ENOMEM); iput(inode); } } } } return parent; } /* Just declare a zero structure as a NULL value implies * using the default functions of exportfs. */ static struct export_operations yaffs_export_ops = { .fh_to_dentry = yaffs2_fh_to_dentry, .fh_to_parent = yaffs2_fh_to_parent, .get_parent = yaffs2_get_parent, }; #endif static void yaffs_unstitch_obj(struct inode *inode, struct yaffs_obj *obj) { /* Clear the association between the inode and * the struct yaffs_obj. */ obj->my_inode = NULL; yaffs_inode_to_obj_lv(inode) = NULL; /* If the object freeing was deferred, then the real * free happens now. * This should fix the inode inconsistency problem. */ yaffs_handle_defered_free(obj); } #ifdef YAFFS_HAS_EVICT_INODE /* yaffs_evict_inode combines into one operation what was previously done in * yaffs_clear_inode() and yaffs_delete_inode() * */ static void yaffs_evict_inode(struct inode *inode) { struct yaffs_obj *obj; struct yaffs_dev *dev; int deleteme = 0; obj = yaffs_inode_to_obj(inode); yaffs_trace(YAFFS_TRACE_OS, "yaffs_evict_inode: ino %d, count %d %s", (int)inode->i_ino, atomic_read(&inode->i_count), obj ? "object exists" : "null object"); if (!inode->i_nlink && !is_bad_inode(inode)) deleteme = 1; truncate_inode_pages(&inode->i_data, 0); Y_CLEAR_INODE(inode); if (deleteme && obj) { dev = obj->my_dev; yaffs_gross_lock(dev); yaffs_del_obj(obj); yaffs_gross_unlock(dev); } if (obj) { dev = obj->my_dev; yaffs_gross_lock(dev); yaffs_unstitch_obj(inode, obj); yaffs_gross_unlock(dev); } } #else /* clear is called to tell the fs to release any per-inode data it holds. * The object might still exist on disk and is just being thrown out of the cache * or else the object has actually been deleted and we're being called via * the chain * yaffs_delete_inode() -> clear_inode()->yaffs_clear_inode() */ static void yaffs_clear_inode(struct inode *inode) { struct yaffs_obj *obj; struct yaffs_dev *dev; obj = yaffs_inode_to_obj(inode); yaffs_trace(YAFFS_TRACE_OS, "yaffs_clear_inode: ino %d, count %d %s", (int)inode->i_ino, atomic_read(&inode->i_count), obj ? "object exists" : "null object"); if (obj) { dev = obj->my_dev; yaffs_gross_lock(dev); yaffs_unstitch_obj(inode, obj); yaffs_gross_unlock(dev); } } /* delete is called when the link count is zero and the inode * is put (ie. nobody wants to know about it anymore, time to * delete the file). * NB Must call clear_inode() */ static void yaffs_delete_inode(struct inode *inode) { struct yaffs_obj *obj = yaffs_inode_to_obj(inode); struct yaffs_dev *dev; yaffs_trace(YAFFS_TRACE_OS, "yaffs_delete_inode: ino %d, count %d %s", (int)inode->i_ino, atomic_read(&inode->i_count), obj ? "object exists" : "null object"); if (obj) { dev = obj->my_dev; yaffs_gross_lock(dev); yaffs_del_obj(obj); yaffs_gross_unlock(dev); } #if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 13)) truncate_inode_pages(&inode->i_data, 0); #endif clear_inode(inode); } #endif #if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17)) static int yaffs_statfs(struct dentry *dentry, struct kstatfs *buf) { struct yaffs_dev *dev = yaffs_dentry_to_obj(dentry)->my_dev; struct super_block *sb = dentry->d_sb; #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0)) static int yaffs_statfs(struct super_block *sb, struct kstatfs *buf) { struct yaffs_dev *dev = yaffs_super_to_dev(sb); #else static int yaffs_statfs(struct super_block *sb, struct statfs *buf) { struct yaffs_dev *dev = yaffs_super_to_dev(sb); #endif yaffs_trace(YAFFS_TRACE_OS, "yaffs_statfs"); yaffs_gross_lock(dev); buf->f_type = YAFFS_MAGIC; buf->f_bsize = sb->s_blocksize; buf->f_namelen = 255; if (dev->data_bytes_per_chunk & (dev->data_bytes_per_chunk - 1)) { /* Do this if chunk size is not a power of 2 */ uint64_t bytes_in_dev; uint64_t bytes_free; bytes_in_dev = ((uint64_t) ((dev->param.end_block - dev->param.start_block + 1))) * ((uint64_t) (dev->param.chunks_per_block * dev->data_bytes_per_chunk)); do_div(bytes_in_dev, sb->s_blocksize); /* bytes_in_dev becomes the number of blocks */ buf->f_blocks = bytes_in_dev; bytes_free = ((uint64_t) (yaffs_get_n_free_chunks(dev))) * ((uint64_t) (dev->data_bytes_per_chunk)); do_div(bytes_free, sb->s_blocksize); buf->f_bfree = bytes_free; } else if (sb->s_blocksize > dev->data_bytes_per_chunk) { buf->f_blocks = (dev->param.end_block - dev->param.start_block + 1) * dev->param.chunks_per_block / (sb->s_blocksize / dev->data_bytes_per_chunk); buf->f_bfree = yaffs_get_n_free_chunks(dev) / (sb->s_blocksize / dev->data_bytes_per_chunk); } else { buf->f_blocks = (dev->param.end_block - dev->param.start_block + 1) * dev->param.chunks_per_block * (dev->data_bytes_per_chunk / sb->s_blocksize); buf->f_bfree = yaffs_get_n_free_chunks(dev) * (dev->data_bytes_per_chunk / sb->s_blocksize); } buf->f_files = 0; buf->f_ffree = 0; buf->f_bavail = buf->f_bfree; yaffs_gross_unlock(dev); return 0; } static int yaffs_do_sync_fs(struct super_block *sb, int request_checkpoint) { struct yaffs_dev *dev = yaffs_super_to_dev(sb); unsigned int oneshot_checkpoint = (yaffs_auto_checkpoint & 4); unsigned gc_urgent = yaffs_bg_gc_urgency(dev); int do_checkpoint; int dirty = yaffs_check_super_dirty(dev); yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_SYNC | YAFFS_TRACE_BACKGROUND, "yaffs_do_sync_fs: gc-urgency %d %s %s%s", gc_urgent, dirty ? "dirty" : "clean", request_checkpoint ? "checkpoint requested" : "no checkpoint", oneshot_checkpoint ? " one-shot" : ""); yaffs_gross_lock(dev); do_checkpoint = ((request_checkpoint && !gc_urgent) || oneshot_checkpoint) && !dev->is_checkpointed; if (dirty || do_checkpoint) { yaffs_flush_super(sb, !dev->is_checkpointed && do_checkpoint); yaffs_clear_super_dirty(dev); if (oneshot_checkpoint) yaffs_auto_checkpoint &= ~4; } yaffs_gross_unlock(dev); return 0; } #ifdef YAFFS_HAS_WRITE_SUPER #if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17)) static void yaffs_write_super(struct super_block *sb) #else static int yaffs_write_super(struct super_block *sb) #endif { unsigned request_checkpoint = (yaffs_auto_checkpoint >= 2); yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_SYNC | YAFFS_TRACE_BACKGROUND, "yaffs_write_super %s", request_checkpoint ? " checkpt" : ""); yaffs_do_sync_fs(sb, request_checkpoint); #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)) return 0; #endif } #endif #if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17)) static int yaffs_sync_fs(struct super_block *sb, int wait) #else static int yaffs_sync_fs(struct super_block *sb) #endif { unsigned request_checkpoint = (yaffs_auto_checkpoint >= 1); yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_SYNC, "yaffs_sync_fs%s", request_checkpoint ? " checkpt" : ""); yaffs_do_sync_fs(sb, request_checkpoint); return 0; } static const struct super_operations yaffs_super_ops = { .statfs = yaffs_statfs, #ifndef YAFFS_USE_OWN_IGET .read_inode = yaffs_read_inode, #endif #ifdef YAFFS_HAS_PUT_INODE .put_inode = yaffs_put_inode, #endif .put_super = yaffs_put_super, #ifdef YAFFS_HAS_EVICT_INODE .evict_inode = yaffs_evict_inode, #else .delete_inode = yaffs_delete_inode, .clear_inode = yaffs_clear_inode, #endif .sync_fs = yaffs_sync_fs, #ifdef YAFFS_HAS_WRITE_SUPER .write_super = yaffs_write_super, #endif }; struct yaffs_options { int inband_tags; int tags_9bytes; int skip_checkpoint_read; int skip_checkpoint_write; int no_cache; int tags_ecc_on; int tags_ecc_overridden; int lazy_loading_enabled; int lazy_loading_overridden; int empty_lost_and_found; int empty_lost_and_found_overridden; int disable_summary; }; #define MAX_OPT_LEN 30 static int yaffs_parse_options(struct yaffs_options *options, const char *options_str) { char cur_opt[MAX_OPT_LEN + 1]; int p; int error = 0; /* Parse through the options which is a comma seperated list */ while (options_str && *options_str && !error) { memset(cur_opt, 0, MAX_OPT_LEN + 1); p = 0; while (*options_str == ',') options_str++; while (*options_str && *options_str != ',') { if (p < MAX_OPT_LEN) { cur_opt[p] = *options_str; p++; } options_str++; } if (!strcmp(cur_opt, "inband-tags")) { options->inband_tags = 1; } else if (!strcmp(cur_opt, "tags-9bytes")) { options->tags_9bytes = 1; } else if (!strcmp(cur_opt, "tags-ecc-off")) { options->tags_ecc_on = 0; options->tags_ecc_overridden = 1; } else if (!strcmp(cur_opt, "tags-ecc-on")) { options->tags_ecc_on = 1; options->tags_ecc_overridden = 1; } else if (!strcmp(cur_opt, "lazy-loading-off")) { options->lazy_loading_enabled = 0; options->lazy_loading_overridden = 1; } else if (!strcmp(cur_opt, "lazy-loading-on")) { options->lazy_loading_enabled = 1; options->lazy_loading_overridden = 1; } else if (!strcmp(cur_opt, "disable-summary")) { options->disable_summary = 1; } else if (!strcmp(cur_opt, "empty-lost-and-found-off")) { options->empty_lost_and_found = 0; options->empty_lost_and_found_overridden = 1; } else if (!strcmp(cur_opt, "empty-lost-and-found-on")) { options->empty_lost_and_found = 1; options->empty_lost_and_found_overridden = 1; } else if (!strcmp(cur_opt, "no-cache")) { options->no_cache = 1; } else if (!strcmp(cur_opt, "no-checkpoint-read")) { options->skip_checkpoint_read = 1; } else if (!strcmp(cur_opt, "no-checkpoint-write")) { options->skip_checkpoint_write = 1; } else if (!strcmp(cur_opt, "no-checkpoint")) { options->skip_checkpoint_read = 1; options->skip_checkpoint_write = 1; } else { printk(KERN_INFO "yaffs: Bad mount option \"%s\"\n", cur_opt); error = 1; } } return error; } static struct dentry *yaffs_make_root(struct inode *inode) { #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0)) struct dentry *root = d_alloc_root(inode); if (!root) iput(inode); return root; #else return d_make_root(inode); #endif } static struct super_block *yaffs_internal_read_super(int yaffs_version, struct super_block *sb, void *data, int silent) { int n_blocks; struct inode *inode = NULL; struct dentry *root; struct yaffs_dev *dev = 0; char devname_buf[BDEVNAME_SIZE + 1]; struct mtd_info *mtd; int err; char *data_str = (char *)data; struct yaffs_linux_context *context = NULL; struct yaffs_param *param; int read_only = 0; struct yaffs_options options; unsigned mount_id; int found; struct yaffs_linux_context *context_iterator; struct list_head *l; if (!sb) { printk(KERN_INFO "yaffs: sb is NULL\n"); return NULL; } sb->s_magic = YAFFS_MAGIC; sb->s_op = &yaffs_super_ops; sb->s_flags |= MS_NOATIME; read_only = ((sb->s_flags & MS_RDONLY) != 0); #ifdef YAFFS_COMPILE_EXPORTFS sb->s_export_op = &yaffs_export_ops; #endif if (!sb->s_dev) printk(KERN_INFO "yaffs: sb->s_dev is NULL\n"); else if (!yaffs_devname(sb, devname_buf)) printk(KERN_INFO "yaffs: devname is NULL\n"); else printk(KERN_INFO "yaffs: dev is %d name is \"%s\" %s\n", sb->s_dev, yaffs_devname(sb, devname_buf), read_only ? "ro" : "rw"); if (!data_str) data_str = ""; printk(KERN_INFO "yaffs: passed flags \"%s\"\n", data_str); memset(&options, 0, sizeof(options)); if (IS_ENABLED(CONFIG_YAFFS_9BYTE_TAGS)) options.tags_9bytes = 1; if (yaffs_parse_options(&options, data_str)) { /* Option parsing failed */ return NULL; } sb->s_blocksize = PAGE_CACHE_SIZE; sb->s_blocksize_bits = PAGE_CACHE_SHIFT; yaffs_trace(YAFFS_TRACE_OS, "yaffs_read_super: Using yaffs%d", yaffs_version); yaffs_trace(YAFFS_TRACE_OS, "yaffs_read_super: block size %d", (int)(sb->s_blocksize)); yaffs_trace(YAFFS_TRACE_ALWAYS, "yaffs: Attempting MTD mount of %u.%u,\"%s\"", MAJOR(sb->s_dev), MINOR(sb->s_dev), yaffs_devname(sb, devname_buf)); /* Get the device */ mtd = get_mtd_device(NULL, MINOR(sb->s_dev)); if (!mtd) { yaffs_trace(YAFFS_TRACE_ALWAYS, "yaffs: MTD device %u either not valid or unavailable", MINOR(sb->s_dev)); return NULL; } if (yaffs_auto_select && yaffs_version == 1 && WRITE_SIZE(mtd) >= 2048) { yaffs_trace(YAFFS_TRACE_ALWAYS, "auto selecting yaffs2"); yaffs_version = 2; } /* Added NCB 26/5/2006 for completeness */ if (yaffs_version == 2 && (!options.inband_tags || options.tags_9bytes) && WRITE_SIZE(mtd) == 512) { yaffs_trace(YAFFS_TRACE_ALWAYS, "auto selecting yaffs1"); yaffs_version = 1; } if (yaffs_version == 2 && mtd->oobavail < sizeof(struct yaffs_packed_tags2)) { yaffs_trace(YAFFS_TRACE_ALWAYS, "auto selecting inband tags"); options.inband_tags = 1; } err = yaffs_verify_mtd(mtd, yaffs_version, options.inband_tags, options.tags_9bytes); if (err < 0) return NULL; /* OK, so if we got here, we have an MTD that's NAND and looks * like it has the right capabilities * Set the struct yaffs_dev up for mtd */ if (!read_only && !(mtd->flags & MTD_WRITEABLE)) { read_only = 1; printk(KERN_INFO "yaffs: mtd is read only, setting superblock read only\n" ); sb->s_flags |= MS_RDONLY; } dev = kmalloc(sizeof(struct yaffs_dev), GFP_KERNEL); context = kmalloc(sizeof(struct yaffs_linux_context), GFP_KERNEL); if (!dev || !context) { if (dev) kfree(dev); if (context) kfree(context); dev = NULL; context = NULL; } if (!dev) { /* Deep shit could not allocate device structure */ yaffs_trace(YAFFS_TRACE_ALWAYS, "yaffs_read_super: Failed trying to allocate struct yaffs_dev." ); return NULL; } memset(dev, 0, sizeof(struct yaffs_dev)); param = &(dev->param); memset(context, 0, sizeof(struct yaffs_linux_context)); dev->os_context = context; INIT_LIST_HEAD(&(context->context_list)); context->dev = dev; context->super = sb; dev->read_only = read_only; #if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0)) sb->s_fs_info = dev; #else sb->u.generic_sbp = dev; #endif dev->driver_context = mtd; param->name = mtd->name; /* Set up the memory size parameters.... */ param->n_reserved_blocks = 5; param->n_caches = (options.no_cache) ? 0 : 10; param->inband_tags = options.inband_tags; param->tags_9bytes = options.tags_9bytes; param->enable_xattr = 1; if (options.lazy_loading_overridden) param->disable_lazy_load = !options.lazy_loading_enabled; param->defered_dir_update = 1; if (options.tags_ecc_overridden) param->no_tags_ecc = !options.tags_ecc_on; param->empty_lost_n_found = 1; param->refresh_period = 500; param->disable_summary = options.disable_summary; #ifdef CONFIG_YAFFS_DISABLE_BAD_BLOCK_MARKING param->disable_bad_block_marking = 1; #endif if (options.empty_lost_and_found_overridden) param->empty_lost_n_found = options.empty_lost_and_found; /* ... and the functions. */ if (yaffs_version == 2) { param->is_yaffs2 = 1; #if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17)) param->total_bytes_per_chunk = mtd->writesize; param->chunks_per_block = mtd->erasesize / mtd->writesize; #else param->total_bytes_per_chunk = mtd->oobblock; param->chunks_per_block = mtd->erasesize / mtd->oobblock; #endif n_blocks = YCALCBLOCKS(mtd->size, mtd->erasesize); param->start_block = 0; param->end_block = n_blocks - 1; } else { param->is_yaffs2 = 0; n_blocks = YCALCBLOCKS(mtd->size, YAFFS_CHUNKS_PER_BLOCK * YAFFS_BYTES_PER_CHUNK); param->chunks_per_block = YAFFS_CHUNKS_PER_BLOCK; param->total_bytes_per_chunk = YAFFS_BYTES_PER_CHUNK; } param->start_block = 0; param->end_block = n_blocks - 1; yaffs_mtd_drv_install(dev); param->sb_dirty_fn = yaffs_set_super_dirty; param->gc_control_fn = yaffs_gc_control_callback; yaffs_dev_to_lc(dev)->super = sb; param->use_nand_ecc = 1; param->skip_checkpt_rd = options.skip_checkpoint_read; param->skip_checkpt_wr = options.skip_checkpoint_write; mutex_lock(&yaffs_context_lock); /* Get a mount id */ found = 0; for (mount_id = 0; !found; mount_id++) { found = 1; list_for_each(l, &yaffs_context_list) { context_iterator = list_entry(l, struct yaffs_linux_context, context_list); if (context_iterator->mount_id == mount_id) found = 0; } } context->mount_id = mount_id; list_add_tail(&(yaffs_dev_to_lc(dev)->context_list), &yaffs_context_list); mutex_unlock(&yaffs_context_lock); /* Directory search handling... */ INIT_LIST_HEAD(&(yaffs_dev_to_lc(dev)->search_contexts)); param->remove_obj_fn = yaffs_remove_obj_callback; mutex_init(&(yaffs_dev_to_lc(dev)->gross_lock)); yaffs_gross_lock(dev); err = yaffs_guts_initialise(dev); yaffs_trace(YAFFS_TRACE_OS, "yaffs_read_super: guts initialised %s", (err == YAFFS_OK) ? "OK" : "FAILED"); if (err == YAFFS_OK) yaffs_bg_start(dev); if (!context->bg_thread) param->defered_dir_update = 0; sb->s_maxbytes = yaffs_max_file_size(dev); /* Release lock before yaffs_get_inode() */ yaffs_gross_unlock(dev); /* Create root inode */ if (err == YAFFS_OK) inode = yaffs_get_inode(sb, S_IFDIR | 0755, 0, yaffs_root(dev)); if (!inode) return NULL; inode->i_op = &yaffs_dir_inode_operations; inode->i_fop = &yaffs_dir_operations; yaffs_trace(YAFFS_TRACE_OS, "yaffs_read_super: got root inode"); root = yaffs_make_root(inode); if (!root) return NULL; sb->s_root = root; if(!dev->is_checkpointed) yaffs_set_super_dirty(dev); yaffs_trace(YAFFS_TRACE_ALWAYS, "yaffs_read_super: is_checkpointed %d", dev->is_checkpointed); yaffs_trace(YAFFS_TRACE_OS, "yaffs_read_super: done"); return sb; } #if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0)) static int yaffs_internal_read_super_mtd(struct super_block *sb, void *data, int silent) { return yaffs_internal_read_super(1, sb, data, silent) ? 0 : -EINVAL; } #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)) static struct dentry *yaffs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_bdev(fs_type, flags, dev_name, data, yaffs_internal_read_super_mtd); } #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17)) static int yaffs_read_super(struct file_system_type *fs, int flags, const char *dev_name, void *data, struct vfsmount *mnt) { return get_sb_bdev(fs, flags, dev_name, data, yaffs_internal_read_super_mtd, mnt); } #else static struct super_block *yaffs_read_super(struct file_system_type *fs, int flags, const char *dev_name, void *data) { return get_sb_bdev(fs, flags, dev_name, data, yaffs_internal_read_super_mtd); } #endif static struct file_system_type yaffs_fs_type = { .owner = THIS_MODULE, .name = "yaffs", #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)) .mount = yaffs_mount, #else .get_sb = yaffs_read_super, #endif .kill_sb = kill_block_super, .fs_flags = FS_REQUIRES_DEV, }; #else static struct super_block *yaffs_read_super(struct super_block *sb, void *data, int silent) { return yaffs_internal_read_super(1, sb, data, silent); } static DECLARE_FSTYPE(yaffs_fs_type, "yaffs", yaffs_read_super, FS_REQUIRES_DEV); #endif #if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0)) static int yaffs2_internal_read_super_mtd(struct super_block *sb, void *data, int silent) { return yaffs_internal_read_super(2, sb, data, silent) ? 0 : -EINVAL; } #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)) static struct dentry *yaffs2_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_bdev(fs_type, flags, dev_name, data, yaffs2_internal_read_super_mtd); } #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17)) static int yaffs2_read_super(struct file_system_type *fs, int flags, const char *dev_name, void *data, struct vfsmount *mnt) { return get_sb_bdev(fs, flags, dev_name, data, yaffs2_internal_read_super_mtd, mnt); } #else static struct super_block *yaffs2_read_super(struct file_system_type *fs, int flags, const char *dev_name, void *data) { return get_sb_bdev(fs, flags, dev_name, data, yaffs2_internal_read_super_mtd); } #endif static struct file_system_type yaffs2_fs_type = { .owner = THIS_MODULE, .name = "yaffs2", #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)) .mount = yaffs2_mount, #else .get_sb = yaffs2_read_super, #endif .kill_sb = kill_block_super, .fs_flags = FS_REQUIRES_DEV, }; #else static struct super_block *yaffs2_read_super(struct super_block *sb, void *data, int silent) { return yaffs_internal_read_super(2, sb, data, silent); } static DECLARE_FSTYPE(yaffs2_fs_type, "yaffs2", yaffs2_read_super, FS_REQUIRES_DEV); #endif #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)) static struct proc_dir_entry *my_proc_entry; static char *yaffs_dump_dev_part0(char *buf, struct yaffs_dev *dev) { struct yaffs_param *param = &dev->param; int bs[10]; yaffs_count_blocks_by_state(dev,bs); buf += sprintf(buf, "start_block.......... %d\n", param->start_block); buf += sprintf(buf, "end_block............ %d\n", param->end_block); buf += sprintf(buf, "total_bytes_per_chunk %d\n", param->total_bytes_per_chunk); buf += sprintf(buf, "use_nand_ecc......... %d\n", param->use_nand_ecc); buf += sprintf(buf, "no_tags_ecc.......... %d\n", param->no_tags_ecc); buf += sprintf(buf, "is_yaffs2............ %d\n", param->is_yaffs2); buf += sprintf(buf, "inband_tags.......... %d\n", param->inband_tags); buf += sprintf(buf, "empty_lost_n_found... %d\n", param->empty_lost_n_found); buf += sprintf(buf, "disable_lazy_load.... %d\n", param->disable_lazy_load); buf += sprintf(buf, "disable_bad_block_mrk %d\n", param->disable_bad_block_marking); buf += sprintf(buf, "refresh_period....... %d\n", param->refresh_period); buf += sprintf(buf, "n_caches............. %d\n", param->n_caches); buf += sprintf(buf, "n_reserved_blocks.... %d\n", param->n_reserved_blocks); buf += sprintf(buf, "always_check_erased.. %d\n", param->always_check_erased); buf += sprintf(buf, "\n"); buf += sprintf(buf, "block count by state\n"); buf += sprintf(buf, "0:%d 1:%d 2:%d 3:%d 4:%d\n", bs[0], bs[1], bs[2], bs[3], bs[4]); buf += sprintf(buf, "5:%d 6:%d 7:%d 8:%d 9:%d\n", bs[5], bs[6], bs[7], bs[8], bs[9]); return buf; } static char *yaffs_dump_dev_part1(char *buf, struct yaffs_dev *dev) { buf += sprintf(buf, "max file size....... %lld\n", (long long) yaffs_max_file_size(dev)); buf += sprintf(buf, "data_bytes_per_chunk. %d\n", dev->data_bytes_per_chunk); buf += sprintf(buf, "chunk_grp_bits....... %d\n", dev->chunk_grp_bits); buf += sprintf(buf, "chunk_grp_size....... %d\n", dev->chunk_grp_size); buf += sprintf(buf, "n_erased_blocks...... %d\n", dev->n_erased_blocks); buf += sprintf(buf, "blocks_in_checkpt.... %d\n", dev->blocks_in_checkpt); buf += sprintf(buf, "\n"); buf += sprintf(buf, "n_tnodes............. %d\n", dev->n_tnodes); buf += sprintf(buf, "n_obj................ %d\n", dev->n_obj); buf += sprintf(buf, "n_free_chunks........ %d\n", dev->n_free_chunks); buf += sprintf(buf, "\n"); buf += sprintf(buf, "n_page_writes........ %u\n", dev->n_page_writes); buf += sprintf(buf, "n_page_reads......... %u\n", dev->n_page_reads); buf += sprintf(buf, "n_erasures........... %u\n", dev->n_erasures); buf += sprintf(buf, "n_gc_copies.......... %u\n", dev->n_gc_copies); buf += sprintf(buf, "all_gcs.............. %u\n", dev->all_gcs); buf += sprintf(buf, "passive_gc_count..... %u\n", dev->passive_gc_count); buf += sprintf(buf, "oldest_dirty_gc_count %u\n", dev->oldest_dirty_gc_count); buf += sprintf(buf, "n_gc_blocks.......... %u\n", dev->n_gc_blocks); buf += sprintf(buf, "bg_gcs............... %u\n", dev->bg_gcs); buf += sprintf(buf, "n_retried_writes..... %u\n", dev->n_retried_writes); buf += sprintf(buf, "n_retired_blocks..... %u\n", dev->n_retired_blocks); buf += sprintf(buf, "n_ecc_fixed.......... %u\n", dev->n_ecc_fixed); buf += sprintf(buf, "n_ecc_unfixed........ %u\n", dev->n_ecc_unfixed); buf += sprintf(buf, "n_tags_ecc_fixed..... %u\n", dev->n_tags_ecc_fixed); buf += sprintf(buf, "n_tags_ecc_unfixed... %u\n", dev->n_tags_ecc_unfixed); buf += sprintf(buf, "cache_hits........... %u\n", dev->cache_hits); buf += sprintf(buf, "n_deleted_files...... %u\n", dev->n_deleted_files); buf += sprintf(buf, "n_unlinked_files..... %u\n", dev->n_unlinked_files); buf += sprintf(buf, "refresh_count........ %u\n", dev->refresh_count); buf += sprintf(buf, "n_bg_deletions....... %u\n", dev->n_bg_deletions); buf += sprintf(buf, "tags_used............ %u\n", dev->tags_used); buf += sprintf(buf, "summary_used......... %u\n", dev->summary_used); return buf; } static int yaffs_proc_read(char *page, char **start, off_t offset, int count, int *eof, void *data) { struct list_head *item; char *buf = page; int step = offset; int n = 0; /* Get proc_file_read() to step 'offset' by one on each sucessive call. * We use 'offset' (*ppos) to indicate where we are in dev_list. * This also assumes the user has posted a read buffer large * enough to hold the complete output; but that's life in /proc. */ *(int *)start = 1; /* Print header first */ if (step == 0) buf += sprintf(buf, "Multi-version YAFFS built:" __DATE__ " " __TIME__ "\n"); else if (step == 1) buf += sprintf(buf, "\n"); else { step -= 2; mutex_lock(&yaffs_context_lock); /* Locate and print the Nth entry. Order N-squared but N is small. */ list_for_each(item, &yaffs_context_list) { struct yaffs_linux_context *dc = list_entry(item, struct yaffs_linux_context, context_list); struct yaffs_dev *dev = dc->dev; if (n < (step & ~1)) { n += 2; continue; } if ((step & 1) == 0) { buf += sprintf(buf, "\nDevice %d \"%s\"\n", n, dev->param.name); buf = yaffs_dump_dev_part0(buf, dev); } else { buf = yaffs_dump_dev_part1(buf, dev); } break; } mutex_unlock(&yaffs_context_lock); } return buf - page < count ? buf - page : count; } /** * Set the verbosity of the warnings and error messages. * * Note that the names can only be a..z or _ with the current code. */ static struct { char *mask_name; unsigned mask_bitfield; } mask_flags[] = { {"allocate", YAFFS_TRACE_ALLOCATE}, {"always", YAFFS_TRACE_ALWAYS}, {"background", YAFFS_TRACE_BACKGROUND}, {"bad_blocks", YAFFS_TRACE_BAD_BLOCKS}, {"buffers", YAFFS_TRACE_BUFFERS}, {"bug", YAFFS_TRACE_BUG}, {"checkpt", YAFFS_TRACE_CHECKPOINT}, {"deletion", YAFFS_TRACE_DELETION}, {"erase", YAFFS_TRACE_ERASE}, {"error", YAFFS_TRACE_ERROR}, {"gc_detail", YAFFS_TRACE_GC_DETAIL}, {"gc", YAFFS_TRACE_GC}, {"lock", YAFFS_TRACE_LOCK}, {"mtd", YAFFS_TRACE_MTD}, {"nandaccess", YAFFS_TRACE_NANDACCESS}, {"os", YAFFS_TRACE_OS}, {"scan_debug", YAFFS_TRACE_SCAN_DEBUG}, {"scan", YAFFS_TRACE_SCAN}, {"mount", YAFFS_TRACE_MOUNT}, {"tracing", YAFFS_TRACE_TRACING}, {"sync", YAFFS_TRACE_SYNC}, {"write", YAFFS_TRACE_WRITE}, {"verify", YAFFS_TRACE_VERIFY}, {"verify_nand", YAFFS_TRACE_VERIFY_NAND}, {"verify_full", YAFFS_TRACE_VERIFY_FULL}, {"verify_all", YAFFS_TRACE_VERIFY_ALL}, {"all", 0xffffffff}, {"none", 0}, {NULL, 0}, }; #define MAX_MASK_NAME_LENGTH 40 static int yaffs_proc_write_trace_options(struct file *file, const char *buf, unsigned long count, void *data) { unsigned rg = 0, mask_bitfield; char *end; char *mask_name; const char *x; char substring[MAX_MASK_NAME_LENGTH + 1]; int i; int done = 0; int add, len = 0; int pos = 0; rg = yaffs_trace_mask; while (!done && (pos < count)) { done = 1; while ((pos < count) && isspace(buf[pos])) pos++; switch (buf[pos]) { case '+': case '-': case '=': add = buf[pos]; pos++; break; default: add = ' '; break; } mask_name = NULL; mask_bitfield = simple_strtoul(buf + pos, &end, 0); if (end > buf + pos) { mask_name = "numeral"; len = end - (buf + pos); pos += len; done = 0; } else { for (x = buf + pos, i = 0; (*x == '_' || (*x >= 'a' && *x <= 'z')) && i < MAX_MASK_NAME_LENGTH; x++, i++, pos++) substring[i] = *x; substring[i] = '\0'; for (i = 0; mask_flags[i].mask_name != NULL; i++) { if (strcmp(substring, mask_flags[i].mask_name) == 0) { mask_name = mask_flags[i].mask_name; mask_bitfield = mask_flags[i].mask_bitfield; done = 0; break; } } } if (mask_name != NULL) { done = 0; switch (add) { case '-': rg &= ~mask_bitfield; break; case '+': rg |= mask_bitfield; break; case '=': rg = mask_bitfield; break; default: rg |= mask_bitfield; break; } } } yaffs_trace_mask = rg | YAFFS_TRACE_ALWAYS; printk(KERN_DEBUG "new trace = 0x%08X\n", yaffs_trace_mask); if (rg & YAFFS_TRACE_ALWAYS) { for (i = 0; mask_flags[i].mask_name != NULL; i++) { char flag; flag = ((rg & mask_flags[i].mask_bitfield) == mask_flags[i].mask_bitfield) ? '+' : '-'; printk(KERN_DEBUG "%c%s\n", flag, mask_flags[i].mask_name); } } return count; } /* Debug strings are of the form: * .bnnn print info on block n * .cobjn,chunkn print nand chunk id for objn:chunkn */ static int yaffs_proc_debug_write(struct file *file, const char *buf, unsigned long count, void *data) { char str[100]; char *p0; char *p1; long p1_val; long p0_val; char cmd; struct list_head *item; memset(str, 0, sizeof(str)); memcpy(str, buf, min(count, sizeof(str) -1)); cmd = str[1]; p0 = str + 2; p1 = p0; while (*p1 && *p1 != ',') { p1++; } *p1 = '\0'; p1++; p0_val = simple_strtol(p0, NULL, 0); p1_val = simple_strtol(p1, NULL, 0); mutex_lock(&yaffs_context_lock); /* Locate and print the Nth entry. Order N-squared but N is small. */ list_for_each(item, &yaffs_context_list) { struct yaffs_linux_context *dc = list_entry(item, struct yaffs_linux_context, context_list); struct yaffs_dev *dev = dc->dev; if (cmd == 'b') { struct yaffs_block_info *bi; bi = yaffs_get_block_info(dev,p0_val); if(bi) { printk("Block %d: state %d, retire %d, use %d, seq %d\n", (int)p0_val, bi->block_state, bi->needs_retiring, bi->pages_in_use, bi->seq_number); } } else if (cmd == 'c') { struct yaffs_obj *obj; int nand_chunk; obj = yaffs_find_by_number(dev, p0_val); if (!obj) printk("No obj %d\n", (int)p0_val); else { if(p1_val == 0) nand_chunk = obj->hdr_chunk; else nand_chunk = yaffs_find_chunk_in_file(obj, p1_val, NULL); printk("Nand chunk for %d:%d is %d\n", (int)p0_val, (int)p1_val, nand_chunk); } } } mutex_unlock(&yaffs_context_lock); return count; } static int yaffs_proc_write(struct file *file, const char *buf, unsigned long count, void *data) { if (buf[0] == '.') return yaffs_proc_debug_write(file, buf, count, data); return yaffs_proc_write_trace_options(file, buf, count, data); } #endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)) */ /* Stuff to handle installation of file systems */ struct file_system_to_install { struct file_system_type *fst; int installed; }; static struct file_system_to_install fs_to_install[] = { {&yaffs_fs_type, 0}, {&yaffs2_fs_type, 0}, {NULL, 0} }; static int __init init_yaffs_fs(void) { int error = 0; struct file_system_to_install *fsinst; yaffs_trace(YAFFS_TRACE_ALWAYS, "yaffs built " __DATE__ " " __TIME__ " Installing."); mutex_init(&yaffs_context_lock); #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)) /* Install the proc_fs entries */ my_proc_entry = create_proc_entry("yaffs", S_IRUGO | S_IFREG, YPROC_ROOT); if (my_proc_entry) { my_proc_entry->write_proc = yaffs_proc_write; my_proc_entry->read_proc = yaffs_proc_read; my_proc_entry->data = NULL; } else { return -ENOMEM; } #endif /* Now add the file system entries */ fsinst = fs_to_install; while (fsinst->fst && !error) { error = register_filesystem(fsinst->fst); if (!error) fsinst->installed = 1; fsinst++; } /* Any errors? uninstall */ if (error) { fsinst = fs_to_install; while (fsinst->fst) { if (fsinst->installed) { unregister_filesystem(fsinst->fst); fsinst->installed = 0; } fsinst++; } } return error; } static void __exit exit_yaffs_fs(void) { struct file_system_to_install *fsinst; yaffs_trace(YAFFS_TRACE_ALWAYS, "yaffs built " __DATE__ " " __TIME__ " removing."); #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)) remove_proc_entry("yaffs", YPROC_ROOT); #endif fsinst = fs_to_install; while (fsinst->fst) { if (fsinst->installed) { unregister_filesystem(fsinst->fst); fsinst->installed = 0; } fsinst++; } } module_init(init_yaffs_fs) module_exit(exit_yaffs_fs) MODULE_DESCRIPTION("YAFFS2 - a NAND specific flash file system"); MODULE_AUTHOR("Charles Manning, Aleph One Ltd., 2002-2011"); MODULE_LICENSE("GPL");
gpl-2.0
Predator-SD/Tarixy
busybox-1.7.2/coreutils/libcoreutils/getopt_mk_fifo_nod.c
4
1308
/* vi: set sw=4 ts=4: */ /* * coreutils utility routine * * Copyright (C) 2003 Manuel Novoa III <mjn3@codepoet.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include "libbb.h" #include "coreutils.h" mode_t getopt_mk_fifo_nod(int argc, char **argv) { mode_t mode = 0666; char *smode = NULL; #if ENABLE_SELINUX security_context_t scontext; #endif int opt; opt = getopt32(argv, "m:" USE_SELINUX("Z:"), &smode USE_SELINUX(,&scontext)); if (opt & 1) { if (bb_parse_mode(smode, &mode)) umask(0); } #if ENABLE_SELINUX if (opt & 2) { selinux_or_die(); setfscreatecon_or_die(scontext); } #endif return mode; }
gpl-2.0
tuliom/glibc
sysdeps/powerpc/powerpc64/multiarch/strstr-ppc64.c
4
1098
/* Copyright (C) 2015-2022 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <https://www.gnu.org/licenses/>. */ #include <string.h> #define STRSTR __strstr_ppc #if IS_IN (libc) && defined(SHARED) # undef libc_hidden_builtin_def # define libc_hidden_builtin_def(name) \ __hidden_ver1(__strstr_ppc, __GI_strstr, __strstr_ppc); #endif extern __typeof (strstr) __strstr_ppc attribute_hidden; #include <string/strstr.c>
gpl-2.0
tianya3796/linux2.6.9
arch/mips/pmc-sierra/yosemite/setup.c
4
5784
/* * Copyright (C) 2003 PMC-Sierra Inc. * Author: Manish Lachwani (lachwani@pmc-sierra.com) * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/bcd.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/mm.h> #include <linux/bootmem.h> #include <linux/swap.h> #include <linux/ioport.h> #include <linux/sched.h> #include <linux/interrupt.h> #include <linux/timex.h> #include <asm/time.h> #include <asm/bootinfo.h> #include <asm/page.h> #include <asm/bootinfo.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/processor.h> #include <asm/ptrace.h> #include <asm/reboot.h> #include <asm/pci_channel.h> #include <asm/serial.h> #include <linux/termios.h> #include <linux/tty.h> #include <linux/serial.h> #include <linux/serial_core.h> #include <asm/titan_dep.h> #include "setup.h" unsigned char titan_ge_mac_addr_base[6] = { 0x00, 0x03, 0xcc, 0x1d, 0x22, 0x00 }; unsigned long cpu_clock; unsigned long yosemite_base; void __init bus_error_init(void) { /* Do nothing */ } unsigned long m48t37y_get_time(void) { //unsigned char *rtc_base = (unsigned char *) YOSEMITE_RTC_BASE; unsigned char *rtc_base = (unsigned char *) 0xfc000000UL; unsigned int year, month, day, hour, min, sec; return; /* Stop the update to the time */ rtc_base[0x7ff8] = 0x40; year = BCD2BIN(rtc_base[0x7fff]); year += BCD2BIN(rtc_base[0x7fff1]) * 100; month = BCD2BIN(rtc_base[0x7ffe]); day = BCD2BIN(rtc_base[0x7ffd]); hour = BCD2BIN(rtc_base[0x7ffb]); min = BCD2BIN(rtc_base[0x7ffa]); sec = BCD2BIN(rtc_base[0x7ff9]); /* Start the update to the time again */ rtc_base[0x7ff8] = 0x00; return mktime(year, month, day, hour, min, sec); } int m48t37y_set_time(unsigned long sec) { unsigned char *rtc_base = (unsigned char *) YOSEMITE_RTC_BASE; struct rtc_time tm; return; /* convert to a more useful format -- note months count from 0 */ to_tm(sec, &tm); tm.tm_mon += 1; /* enable writing */ rtc_base[0x7ff8] = 0x80; /* year */ rtc_base[0x7fff] = BIN2BCD(tm.tm_year % 100); rtc_base[0x7ff1] = BIN2BCD(tm.tm_year / 100); /* month */ rtc_base[0x7ffe] = BIN2BCD(tm.tm_mon); /* day */ rtc_base[0x7ffd] = BIN2BCD(tm.tm_mday); /* hour/min/sec */ rtc_base[0x7ffb] = BIN2BCD(tm.tm_hour); rtc_base[0x7ffa] = BIN2BCD(tm.tm_min); rtc_base[0x7ff9] = BIN2BCD(tm.tm_sec); /* day of week -- not really used, but let's keep it up-to-date */ rtc_base[0x7ffc] = BIN2BCD(tm.tm_wday + 1); /* disable writing */ rtc_base[0x7ff8] = 0x00; return 0; } void yosemite_timer_setup(struct irqaction *irq) { setup_irq(7, irq); } void yosemite_time_init(void) { board_timer_setup = yosemite_timer_setup; mips_hpt_frequency = cpu_clock / 2; rtc_get_time = m48t37y_get_time; rtc_set_time = m48t37y_set_time; } unsigned long uart_base = 0xfd000000L; /* No other usable initialization hook than this ... */ extern void (*late_time_init)(void); unsigned long ocd_base; EXPORT_SYMBOL(ocd_base); /* * Common setup before any secondaries are started */ #define TITAN_UART_CLK 3686400 #define TITAN_SERIAL_BASE_BAUD (TITAN_UART_CLK / 16) #define TITAN_SERIAL_IRQ 4 #define TITAN_SERIAL_BASE 0xfd000008UL static void __init py_map_ocd(void) { struct uart_port up; /* * Not specifically interrupt stuff but in case of SMP core_send_ipi * needs this first so I'm mapping it here ... */ ocd_base = (unsigned long) ioremap(OCD_BASE, OCD_SIZE); if (!ocd_base) panic("Mapping OCD failed - game over. Your score is 0."); /* * Register to interrupt zero because we share the interrupt with * the serial driver which we don't properly support yet. */ memset(&up, 0, sizeof(up)); up.membase = (unsigned char *) ioremap(TITAN_SERIAL_BASE, 8); up.irq = TITAN_SERIAL_IRQ; up.uartclk = TITAN_UART_CLK; up.regshift = 0; up.iotype = UPIO_MEM; up.flags = ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST; up.line = 0; if (early_serial_setup(&up)) printk(KERN_ERR "Early serial init of port 0 failed\n"); } static int __init pmc_yosemite_setup(void) { extern void pmon_smp_bootstrap(void); board_time_init = yosemite_time_init; late_time_init = py_map_ocd; /* Add memory regions */ add_memory_region(0x00000000, 0x10000000, BOOT_MEM_RAM); #if 0 /* XXX Crash ... */ OCD_WRITE(RM9000x2_OCD_HTSC, OCD_READ(RM9000x2_OCD_HTSC) | HYPERTRANSPORT_ENABLE); /* Set the BAR. Shifted mode */ OCD_WRITE(RM9000x2_OCD_HTBAR0, HYPERTRANSPORT_BAR0_ADDR); OCD_WRITE(RM9000x2_OCD_HTMASK0, HYPERTRANSPORT_SIZE0); #endif return 0; } early_initcall(pmc_yosemite_setup);
gpl-2.0
capturePointer/codelite
Plugin/wxcrafter_plugin_bitmaps.cpp
4
27489
// // This file was automatically generated by wxrc, do not edit by hand. // #include <wx/wxprec.h> #ifdef __BORLANDC__ #pragma hdrstop #endif #include <wx/filesys.h> #include <wx/fs_mem.h> #include <wx/xrc/xmlres.h> #include <wx/xrc/xh_all.h> #if wxCHECK_VERSION(2,8,5) && wxABI_VERSION >= 20805 #define XRC_ADD_FILE(name, data, size, mime) \ wxMemoryFSHandler::AddFileWithMimeType(name, data, size, mime) #else #define XRC_ADD_FILE(name, data, size, mime) \ wxMemoryFSHandler::AddFile(name, data, size) #endif static size_t xml_res_size_0 = 648; static unsigned char xml_res_file_0[] = { 137,80,78,71,13,10,26,10,0,0,0,13,73,72,68,82,0,0,0,32,0,0,0,32,8,6,0,0, 0,115,122,122,244,0,0,0,6,98,75,71,68,0,255,0,255,0,255,160,189,167,147, 0,0,0,9,112,72,89,115,0,0,11,19,0,0,11,19,1,0,154,156,24,0,0,0,7,116,73, 77,69,7,222,2,11,18,7,57,70,199,106,79,0,0,2,21,73,68,65,84,88,195,237, 151,77,75,27,81,20,134,223,147,4,252,216,4,210,177,43,215,249,248,5,174, 149,150,34,173,21,4,81,247,18,164,5,41,34,82,44,157,224,8,93,118,89,16, 145,182,40,106,84,92,248,129,11,23,130,109,169,109,253,3,153,100,231,66, 186,200,166,27,45,73,238,113,161,209,123,147,155,201,76,108,18,44,30,56, 48,51,204,189,247,185,239,156,243,94,6,184,143,187,30,137,141,227,173,196, 198,49,215,58,222,247,15,24,250,174,64,184,89,0,178,26,220,112,0,102,86, 210,92,255,197,77,5,96,102,188,93,251,201,13,3,16,172,207,55,73,119,16, 4,0,211,171,63,230,0,196,235,209,37,239,134,187,168,170,2,130,57,46,152, 81,143,124,189,114,196,110,0,234,154,83,203,223,217,25,64,136,186,231,228, 210,55,110,154,2,197,156,88,252,202,21,20,224,134,229,171,207,95,184,169, 0,66,48,198,63,30,94,67,4,0,160,51,119,130,179,211,140,167,246,250,243, 32,134,191,237,15,111,221,166,1,0,56,59,205,192,52,77,79,3,45,203,194,239, 214,142,154,22,253,48,218,77,10,64,49,178,217,44,132,16,151,14,69,55,254, 81,188,38,34,248,253,126,4,131,65,0,64,65,120,63,0,231,226,61,84,166,192, 181,173,10,1,219,182,149,5,21,219,36,66,44,22,83,222,247,18,243,99,143, 72,251,9,228,136,68,34,142,10,168,192,238,21,88,120,241,152,42,214,128, 188,144,172,64,217,193,81,162,64,129,221,41,240,233,229,19,114,44,66,57, 194,225,176,102,199,231,96,110,129,207,167,30,158,249,66,117,128,165,241, 94,170,218,5,114,164,211,105,229,62,26,141,2,104,3,112,14,160,181,108,242, 161,247,59,21,191,67,114,226,25,185,106,67,89,98,157,2,68,4,162,54,237, 4,185,124,65,251,124,115,170,159,92,251,128,28,182,109,59,22,223,165,34, 206,0,219,211,3,228,201,136,74,187,64,175,128,126,206,124,9,192,158,57, 72,158,157,80,222,113,42,149,210,238,188,88,253,165,32,114,33,238,207,12, 81,77,86,172,83,64,7,161,83,33,47,196,54,128,190,131,217,17,170,197,150, 21,128,80,40,228,121,130,131,217,145,231,183,62,140,12,195,216,181,44,235, 169,151,129,134,97,36,255,139,127,203,11,15,61,127,111,56,81,112,148,0, 0,0,0,73,69,78,68,174,66,96,130}; static size_t xml_res_size_1 = 649; static unsigned char xml_res_file_1[] = { 137,80,78,71,13,10,26,10,0,0,0,13,73,72,68,82,0,0,0,32,0,0,0,32,8,6,0,0, 0,115,122,122,244,0,0,0,6,98,75,71,68,0,255,0,255,0,255,160,189,167,147, 0,0,0,9,112,72,89,115,0,0,11,19,0,0,11,19,1,0,154,156,24,0,0,0,7,116,73, 77,69,7,222,2,11,18,8,5,238,48,10,7,0,0,2,22,73,68,65,84,88,195,99,96,24, 5,67,17,212,175,57,243,191,126,205,153,77,212,48,139,137,28,203,161,76, 95,186,59,0,201,114,170,1,22,98,21,214,173,62,253,255,255,127,170,219,79, 156,3,106,87,157,162,137,229,68,57,160,122,229,169,255,255,254,211,46,65, 51,226,147,172,90,113,146,86,86,207,106,139,48,79,199,155,8,43,150,159, 248,255,239,255,127,6,26,225,52,188,81,80,182,236,248,255,127,255,255,211, 165,76,193,112,64,201,146,163,255,255,253,251,71,183,66,13,197,1,69,139, 143,208,205,231,24,14,40,88,120,248,255,191,127,244,181,28,238,128,188, 249,135,200,182,92,246,247,19,134,111,207,111,147,164,231,179,176,54,170, 3,254,253,39,63,206,191,61,191,205,80,87,87,71,146,158,166,166,38,84,7, 76,73,114,96,204,154,179,159,162,240,127,251,246,45,3,44,241,50,50,34,138, 23,24,155,145,145,145,129,153,153,153,129,159,159,31,123,26,152,150,226, 200,152,62,107,31,217,142,248,247,239,31,195,173,91,183,80,44,68,41,241, 24,25,25,52,53,53,241,231,130,153,105,78,140,169,51,246,144,237,8,117,117, 117,188,33,64,84,57,48,59,195,133,49,121,218,110,146,29,193,200,200,136, 18,2,216,228,9,134,0,12,204,205,114,101,76,152,186,147,100,71,168,169,169, 97,241,241,15,134,255,255,217,25,152,152,152,72,171,13,23,100,187,51,198, 76,218,78,146,35,110,223,70,205,142,26,26,26,12,12,12,156,12,12,12,63,24, 24,24,56,72,111,17,45,201,243,100,252,253,231,47,3,46,188,36,207,147,17, 57,136,213,212,212,24,212,213,213,25,212,213,213,161,150,67,196,153,152, 56,137,79,3,232,96,101,145,15,99,80,215,70,162,66,226,214,173,91,120,19, 31,204,81,36,183,136,214,149,249,51,250,182,173,251,79,76,46,64,183,152, 145,145,17,167,239,73,106,19,110,174,10,98,244,108,90,253,31,95,46,184, 121,243,38,86,159,195,82,63,54,135,176,144,146,200,182,215,133,50,186,214, 175,252,79,40,4,176,57,130,236,52,128,14,118,55,134,51,58,214,46,255,207, 192,192,176,25,89,92,72,72,136,182,205,114,100,176,191,57,18,238,29,17, 17,145,173,77,77,77,222,164,232,23,17,17,89,57,218,191,132,1,0,118,110, 255,14,215,21,22,68,0,0,0,0,73,69,78,68,174,66,96,130}; static size_t xml_res_size_2 = 261; static unsigned char xml_res_file_2[] = { 137,80,78,71,13,10,26,10,0,0,0,13,73,72,68,82,0,0,0,32,0,0,0,32,8,6,0,0, 0,115,122,122,244,0,0,0,204,73,68,65,84,120,94,237,215,65,10,194,48,20, 4,208,70,188,145,231,232,202,59,164,215,232,53,244,14,110,244,28,158,200, 228,143,191,32,204,34,171,180,101,190,160,129,240,151,243,152,79,161,73, 0,134,200,115,240,251,219,128,227,50,230,219,115,11,224,238,119,244,155, 122,0,243,249,180,91,3,227,103,226,27,86,128,85,43,0,176,55,34,233,1,253, 8,2,140,249,98,4,27,184,248,200,114,4,27,0,195,165,8,2,72,18,34,8,48,35, 64,134,104,27,208,35,216,0,1,42,68,32,128,8,2,96,131,250,132,54,208,172, 160,106,1,41,234,51,100,120,11,64,72,56,87,0,139,8,39,160,84,187,250,200, 226,112,2,94,165,78,62,166,149,0,244,135,183,0,65,237,66,64,247,47,89,33, 64,26,78,64,181,144,112,2,108,19,224,193,135,73,255,249,63,78,195,1,111, 156,143,95,96,18,4,249,13,0,0,0,0,73,69,78,68,174,66,96,130}; static size_t xml_res_size_3 = 259; static unsigned char xml_res_file_3[] = { 137,80,78,71,13,10,26,10,0,0,0,13,73,72,68,82,0,0,0,32,0,0,0,32,8,6,0,0, 0,115,122,122,244,0,0,0,202,73,68,65,84,120,94,237,215,193,13,194,48,12, 5,208,6,177,17,115,192,133,29,96,141,206,209,29,56,117,14,38,34,241,39, 8,161,47,148,3,138,69,190,15,80,201,189,254,87,219,169,148,4,96,138,124, 54,181,126,27,176,125,188,230,203,181,23,128,90,107,173,131,23,48,31,119, 238,14,188,150,102,31,49,2,12,25,1,0,71,184,22,128,161,75,104,112,132,139, 58,128,65,157,89,106,157,217,1,64,253,229,167,79,0,72,127,68,102,22,18, 222,116,128,225,106,128,129,225,65,0,134,135,0,96,83,44,192,144,66,71,80, 158,59,208,32,212,199,144,136,160,83,32,71,112,4,92,66,37,130,128,92,8, 16,33,150,55,192,45,23,2,250,16,201,111,104,1,46,132,0,64,196,48,64,38, 64,136,104,151,80,142,32,192,8,232,64,172,223,0,252,47,167,225,128,59,232, 234,107,51,227,177,82,145,0,0,0,0,73,69,78,68,174,66,96,130}; static size_t xml_res_size_4 = 273; static unsigned char xml_res_file_4[] = { 137,80,78,71,13,10,26,10,0,0,0,13,73,72,68,82,0,0,0,32,0,0,0,32,8,6,0,0, 0,115,122,122,244,0,0,0,216,73,68,65,84,120,94,237,151,221,13,2,33,16,132, 89,99,71,214,129,47,246,160,109,92,27,218,131,47,90,135,21,121,236,200, 147,99,114,255,192,6,147,187,77,54,199,61,144,249,50,12,16,4,128,171,89, 251,223,159,230,254,26,18,184,198,62,47,20,184,197,190,244,137,54,167,195, 119,188,155,199,217,21,207,159,67,128,255,89,2,56,84,6,64,109,7,128,149, 3,40,214,158,1,213,45,3,186,242,37,80,96,115,224,17,63,190,160,64,159,165, 207,216,199,33,7,188,179,47,63,182,11,132,212,102,37,83,25,176,132,144, 57,33,180,130,144,201,16,6,30,68,132,48,20,239,2,4,2,20,132,144,217,219, 240,77,128,82,16,178,232,28,104,219,48,102,33,178,197,167,29,32,64,30,4, 197,83,28,200,128,160,120,26,0,51,144,10,33,89,119,65,80,2,36,64,80,220, 30,128,16,28,167,85,237,215,113,253,183,225,7,121,210,102,77,100,22,152, 248,0,0,0,0,73,69,78,68,174,66,96,130}; static size_t xml_res_size_5 = 275; static unsigned char xml_res_file_5[] = { 137,80,78,71,13,10,26,10,0,0,0,13,73,72,68,82,0,0,0,32,0,0,0,32,8,6,0,0, 0,115,122,122,244,0,0,0,218,73,68,65,84,120,94,229,215,209,9,194,48,24, 4,224,164,184,75,159,187,65,23,208,1,28,192,41,124,116,138,14,224,2,93, 160,27,248,236,44,38,57,139,79,226,97,241,250,19,126,161,7,165,143,247, 113,13,133,68,0,193,51,187,247,130,254,124,149,0,125,215,190,244,211,237, 30,21,192,116,57,126,1,116,173,2,192,7,36,134,21,105,194,186,32,112,96, 254,4,8,208,203,25,17,215,3,96,42,103,132,14,128,80,174,32,236,0,150,217, 17,12,40,128,82,110,71,240,2,74,185,29,193,11,20,40,229,118,4,47,0,42,175, 137,224,5,80,168,188,38,130,1,116,10,235,34,24,0,140,243,107,95,25,48,46, 45,112,88,80,203,177,254,7,234,135,23,240,6,160,184,47,176,113,64,70,113, 6,100,103,192,195,27,144,82,246,94,32,111,124,129,228,125,6,114,249,95, 192,48,63,39,17,48,252,2,112,191,29,55,193,57,79,87,170,117,156,211,208, 110,126,0,0,0,0,73,69,78,68,174,66,96,130}; static size_t xml_res_size_6 = 611; static unsigned char xml_res_file_6[] = { 137,80,78,71,13,10,26,10,0,0,0,13,73,72,68,82,0,0,0,32,0,0,0,32,8,6,0,0, 0,115,122,122,244,0,0,2,42,73,68,65,84,120,94,237,215,65,104,19,65,24,134, 97,87,77,226,106,21,9,4,3,145,66,37,32,120,202,73,16,4,65,40,20,149,21, 37,16,40,8,30,10,130,232,221,131,23,65,41,120,242,38,241,162,88,20,130, 136,180,162,20,60,137,226,65,136,20,4,15,82,12,164,40,197,210,61,152,16, 155,173,155,241,61,140,236,178,76,102,119,216,90,15,245,131,7,246,48,236, 255,243,195,204,206,90,66,136,109,255,50,219,177,181,27,216,9,130,25,203, 164,64,1,163,200,194,131,139,22,146,229,130,80,52,16,239,56,106,56,135, 18,162,113,49,143,6,230,160,202,28,28,245,4,4,212,57,130,105,56,208,37, 143,73,105,1,119,113,15,218,10,65,3,3,101,241,26,238,195,134,73,42,168, 75,86,80,220,172,129,107,152,70,218,8,16,179,6,170,65,241,244,76,27,40, 227,225,230,111,67,31,4,55,96,71,70,104,109,214,4,42,152,68,16,117,113, 23,77,180,80,192,81,148,54,98,2,23,17,151,103,184,132,21,252,137,141,89, 140,167,109,96,2,186,188,192,121,68,243,19,199,210,78,96,132,135,195,32, 80,23,153,210,109,181,180,13,140,198,172,125,141,101,229,241,154,60,207, 245,19,208,231,43,84,113,54,106,27,118,99,214,150,254,246,54,108,199,172, 61,129,34,150,145,52,251,34,231,136,64,15,254,176,9,44,162,12,85,108,212, 113,22,195,114,8,7,208,199,58,124,105,0,33,181,116,219,112,30,87,48,44, 14,158,224,114,228,28,200,224,20,202,88,193,15,244,208,135,135,95,82,7, 3,93,3,15,116,13,72,85,156,196,251,208,73,120,16,29,124,193,30,184,161, 38,214,224,97,29,223,226,14,162,38,26,168,65,151,124,228,208,90,69,27,107, 114,65,22,187,208,13,77,98,21,223,147,124,13,175,195,49,188,132,100,176, 23,251,209,15,189,59,139,28,122,120,7,145,228,107,184,136,41,60,50,220, 77,57,140,160,31,122,219,14,89,99,1,75,38,23,146,199,24,195,77,131,235, 125,6,54,118,195,11,189,241,35,222,192,248,74,118,11,109,212,97,67,23,11, 25,228,96,195,131,143,89,52,204,175,100,129,25,52,113,27,167,181,19,8,198, 157,197,18,238,224,131,225,133,68,233,19,206,196,252,23,88,232,226,37,158, 226,149,249,81,44,160,207,91,233,42,74,40,34,15,23,29,124,134,113,254,255, 29,255,6,218,250,158,205,145,11,184,210,0,0,0,0,73,69,78,68,174,66,96,130}; static size_t xml_res_size_7 = 470; static unsigned char xml_res_file_7[] = { 137,80,78,71,13,10,26,10,0,0,0,13,73,72,68,82,0,0,0,32,0,0,0,32,8,6,0,0, 0,115,122,122,244,0,0,1,157,73,68,65,84,120,94,237,151,79,43,68,81,24,198, 159,209,124,20,132,34,101,33,127,154,44,70,54,174,148,134,40,202,70,81, 40,95,192,245,5,148,20,101,99,161,148,133,146,177,17,11,148,133,133,146, 132,178,99,239,3,204,156,243,190,132,58,119,188,103,220,185,231,52,205, 194,252,234,174,206,115,223,251,235,121,111,183,110,138,153,81,75,26,80, 99,210,136,16,30,222,174,2,8,81,125,194,112,172,107,77,8,216,30,190,50, 220,9,95,214,79,238,96,121,142,16,176,66,204,56,184,126,129,43,19,125,77, 149,175,128,193,86,129,92,111,35,92,33,102,51,55,86,128,171,211,0,51,42, 21,144,73,34,70,174,199,163,1,98,51,55,78,128,108,2,12,236,95,61,195,149, 169,76,139,153,235,210,128,34,194,120,127,51,92,81,68,9,26,32,25,212,196, 216,187,120,132,43,211,3,109,32,242,106,128,49,153,105,133,43,202,247,29, 208,68,216,61,187,135,43,179,131,29,32,175,21,48,99,38,219,14,87,52,51, 136,156,26,48,82,27,71,55,112,101,121,180,219,175,129,167,183,119,44,125, 14,73,140,185,63,73,3,36,2,151,15,175,95,151,39,142,13,24,254,139,0,83, 109,5,116,125,5,68,176,177,57,151,133,43,139,59,231,136,144,7,16,148,21, 40,106,13,131,207,106,226,103,218,5,148,61,172,34,205,44,108,159,34,202, 214,252,208,223,2,42,153,128,169,40,130,210,44,36,197,89,188,64,222,182, 134,116,233,48,26,1,32,38,22,180,142,102,228,153,196,150,15,98,223,1,245, 61,44,245,91,162,160,168,52,35,207,36,246,124,32,87,32,109,133,68,81,147, 204,152,179,184,6,146,124,136,126,194,70,226,24,64,160,153,203,101,96,206, 36,50,47,255,192,234,127,199,31,218,228,30,7,140,193,91,84,0,0,0,0,73,69, 78,68,174,66,96,130}; static size_t xml_res_size_8 = 203; static unsigned char xml_res_file_8[] = { 137,80,78,71,13,10,26,10,0,0,0,13,73,72,68,82,0,0,0,16,0,0,0,16,8,6,0,0, 0,31,243,255,97,0,0,0,6,98,75,71,68,0,255,0,255,0,255,160,189,167,147,0, 0,0,9,112,72,89,115,0,0,18,116,0,0,18,116,1,222,102,31,120,0,0,0,7,116, 73,77,69,7,223,9,15,5,4,24,11,4,233,196,0,0,0,88,73,68,65,84,56,203,229, 146,49,14,192,48,8,3,173,254,255,91,188,5,130,152,248,128,59,117,201,66, 137,212,102,136,71,15,150,238,100,224,172,144,164,169,82,68,248,116,87, 103,32,51,49,220,97,170,88,26,136,8,152,25,134,251,38,230,182,131,153,185, 237,96,102,254,222,65,197,92,58,168,152,75,7,21,243,255,63,120,147,27,79, 4,165,19,44,104,199,124,0,0,0,0,73,69,78,68,174,66,96,130}; static size_t xml_res_size_9 = 577; static unsigned char xml_res_file_9[] = { 137,80,78,71,13,10,26,10,0,0,0,13,73,72,68,82,0,0,0,32,0,0,0,32,8,6,0,0, 0,115,122,122,244,0,0,2,8,73,68,65,84,120,94,205,148,193,106,83,65,24,133, 191,255,159,105,154,88,104,193,66,17,92,84,220,182,107,193,87,104,92,244, 73,68,55,174,74,5,113,229,74,124,16,41,72,218,71,16,92,183,15,32,34,184, 113,83,33,96,123,239,28,189,224,64,17,114,231,118,66,181,223,38,4,242,157, 115,50,147,92,147,196,255,196,128,120,255,213,241,81,99,62,149,4,26,102, 25,134,59,164,4,226,26,158,25,81,105,246,245,112,111,31,104,34,48,105,240, 233,235,39,143,104,83,194,141,34,73,96,230,188,56,250,200,155,253,199,72, 195,189,224,206,193,135,79,83,96,2,252,136,192,184,85,98,125,53,240,116, 246,133,24,140,190,44,1,77,43,222,238,109,115,209,180,191,61,231,217,241, 112,239,221,244,1,93,31,48,206,3,44,9,54,86,35,15,55,199,172,184,225,214, 255,45,46,147,186,207,211,74,85,94,82,190,16,136,0,109,18,147,24,184,183, 54,38,56,69,218,4,227,24,185,104,82,247,122,45,175,235,233,250,174,178, 117,231,96,166,26,120,254,94,53,116,125,192,86,62,1,242,160,179,211,83, 100,134,153,177,8,73,152,196,206,238,46,164,58,47,247,229,1,25,226,104, 4,230,80,250,57,41,177,156,183,96,64,240,128,204,177,158,28,9,76,70,165, 183,104,128,0,8,209,193,156,34,34,83,233,105,225,9,12,12,50,42,189,194, 21,4,71,229,32,76,80,233,21,6,120,68,102,3,130,60,95,108,133,215,51,32, 70,39,81,14,114,68,165,215,63,192,61,64,69,80,246,234,7,40,95,129,99,215, 10,170,244,180,232,4,66,96,8,14,44,235,101,226,213,65,33,4,76,20,113,35, 83,229,169,239,111,136,40,18,12,42,189,226,163,184,42,40,123,203,15,136, 1,146,202,65,110,84,122,253,3,204,3,142,40,97,110,84,121,165,1,0,238,70, 5,245,30,255,128,219,63,192,141,58,42,61,183,191,6,172,4,167,138,81,164, 134,171,125,17,208,164,157,159,109,188,60,217,185,108,19,105,224,19,45, 186,115,119,109,196,250,225,9,77,26,238,117,229,93,223,57,40,31,226,58, 176,13,108,2,129,155,167,5,190,3,159,129,243,8,204,255,188,249,6,24,55, 143,128,159,192,156,219,192,47,83,42,59,92,177,195,125,179,0,0,0,0,73,69, 78,68,174,66,96,130}; static size_t xml_res_size_10 = 439; static unsigned char xml_res_file_10[] = { 137,80,78,71,13,10,26,10,0,0,0,13,73,72,68,82,0,0,0,32,0,0,0,32,8,6,0,0, 0,115,122,122,244,0,0,1,126,73,68,65,84,120,94,237,150,77,74,28,65,20,128, 191,87,245,202,153,65,140,11,93,8,129,65,92,206,1,220,8,57,128,237,194, 147,120,130,144,64,240,4,158,68,144,209,19,184,241,0,115,0,37,155,224,34, 100,147,4,187,235,233,3,153,90,248,179,171,238,133,243,65,195,91,52,124, 31,175,10,186,197,204,24,146,192,192,8,160,159,127,92,158,183,18,26,51, 3,171,111,20,17,212,242,252,231,183,195,99,5,38,45,161,57,61,218,167,203, 153,32,84,37,27,196,16,248,122,113,211,184,91,129,113,103,153,79,163,200, 201,252,14,141,66,173,6,3,218,206,56,107,118,113,167,187,21,16,175,218, 28,41,123,91,99,82,144,106,91,112,207,67,54,119,249,236,136,2,116,217,152, 104,100,103,125,76,12,84,165,203,184,203,157,56,10,94,5,95,166,163,167, 103,74,79,184,179,4,60,199,176,88,44,48,4,17,170,96,6,130,49,155,205,150, 78,165,192,218,90,194,36,80,19,177,204,146,18,96,56,170,9,42,7,224,1,80, 156,20,80,141,61,108,64,88,242,34,32,38,76,164,114,128,189,29,144,82,36, 83,55,32,240,90,128,225,160,170,125,5,20,103,153,65,53,145,141,170,4,41, 254,87,47,97,87,57,32,10,239,95,66,49,171,28,32,111,7,196,20,145,202,103, 16,194,59,1,32,254,194,224,191,100,31,48,96,21,176,10,8,50,128,88,74,0, 41,6,250,165,56,21,176,241,255,223,215,155,223,175,14,30,186,220,203,199, 200,229,238,252,3,38,192,6,48,5,182,1,165,31,90,224,30,184,85,224,175,15, 192,47,64,232,7,3,254,185,155,161,121,4,16,150,123,216,22,243,164,88,0, 0,0,0,73,69,78,68,174,66,96,130}; static size_t xml_res_size_11 = 494; static unsigned char xml_res_file_11[] = { 137,80,78,71,13,10,26,10,0,0,0,13,73,72,68,82,0,0,0,32,0,0,0,32,8,6,0,0, 0,115,122,122,244,0,0,1,181,73,68,65,84,120,94,237,149,189,106,21,65,20, 128,191,115,230,228,238,92,52,70,208,66,16,46,98,151,60,128,133,130,15, 224,181,16,31,196,39,240,7,196,90,193,39,17,52,218,217,217,228,1,76,173, 98,35,130,34,98,140,217,153,163,167,200,101,32,245,108,44,238,7,11,135, 109,190,111,152,101,143,0,118,249,241,235,23,163,232,210,221,193,233,139, 128,136,96,94,119,63,63,188,117,199,128,249,136,46,159,220,190,70,169,21, 21,186,82,29,146,42,247,95,238,45,195,109,64,46,94,57,55,36,238,237,126, 194,146,208,171,193,129,177,56,207,151,87,8,39,144,13,144,168,218,26,140, 171,23,50,27,42,168,244,59,253,81,245,112,197,28,136,1,148,234,204,45,113, 233,76,38,41,93,41,149,112,133,147,192,32,170,224,230,98,248,247,44,152, 138,112,174,2,170,59,193,254,254,123,28,65,68,232,129,187,35,56,219,219, 59,43,167,209,48,207,115,92,148,158,136,87,90,140,134,28,1,34,157,3,252, 100,0,126,28,48,163,210,55,64,113,90,167,53,51,121,200,20,167,43,73,90, 63,24,128,172,174,96,136,0,132,62,120,19,32,109,128,170,16,160,137,68,127, 90,167,1,204,146,240,225,219,1,63,15,71,138,123,231,43,16,206,14,22,78, 126,29,7,12,73,217,121,250,150,63,99,197,59,7,136,8,51,83,54,103,214,126, 132,240,236,238,117,74,45,8,125,113,32,105,226,193,171,189,246,79,88,57, 159,39,221,134,225,108,2,78,111,27,242,127,108,195,245,54,92,111,195,245, 54,164,217,134,99,237,91,96,42,39,175,96,245,78,19,166,116,167,117,42,192, 70,82,166,164,117,26,224,249,240,251,187,173,71,111,110,28,149,74,245,254, 39,15,121,56,127,128,11,176,9,44,128,139,128,49,13,35,240,21,248,104,192, 65,12,192,23,64,152,6,7,126,135,155,211,230,47,168,249,205,110,240,4,114, 254,0,0,0,0,73,69,78,68,174,66,96,130}; static size_t xml_res_size_12 = 1528; static unsigned char xml_res_file_12[] = { 60,63,120,109,108,32,118,101,114,115,105,111,110,61,34,49,46,48,34,32,101, 110,99,111,100,105,110,103,61,34,85,84,70,45,56,34,63,62,10,60,114,101, 115,111,117,114,99,101,32,120,109,108,110,115,61,34,104,116,116,112,58, 47,47,119,119,119,46,119,120,119,105,100,103,101,116,115,46,111,114,103, 47,119,120,120,114,99,34,62,10,32,32,60,111,98,106,101,99,116,32,99,108, 97,115,115,61,34,119,120,66,105,116,109,97,112,34,32,110,97,109,101,61, 34,100,105,102,102,45,99,111,112,121,45,102,105,108,101,45,108,101,102, 116,45,116,111,45,114,105,103,104,116,34,62,119,120,99,114,97,102,116,101, 114,95,112,108,117,103,105,110,95,98,105,116,109,97,112,115,46,99,112,112, 36,114,101,115,111,117,114,99,101,115,95,100,105,102,102,45,99,111,112, 121,45,102,105,108,101,45,108,101,102,116,45,116,111,45,114,105,103,104, 116,46,112,110,103,60,47,111,98,106,101,99,116,62,10,32,32,60,111,98,106, 101,99,116,32,99,108,97,115,115,61,34,119,120,66,105,116,109,97,112,34, 32,110,97,109,101,61,34,100,105,102,102,45,99,111,112,121,45,102,105,108, 101,45,114,105,103,104,116,45,116,111,45,108,101,102,116,34,62,119,120, 99,114,97,102,116,101,114,95,112,108,117,103,105,110,95,98,105,116,109, 97,112,115,46,99,112,112,36,114,101,115,111,117,114,99,101,115,95,100,105, 102,102,45,99,111,112,121,45,102,105,108,101,45,114,105,103,104,116,45, 116,111,45,108,101,102,116,46,112,110,103,60,47,111,98,106,101,99,116,62, 10,32,32,60,111,98,106,101,99,116,32,99,108,97,115,115,61,34,119,120,66, 105,116,109,97,112,34,32,110,97,109,101,61,34,100,105,102,102,45,99,111, 112,121,45,108,101,102,116,45,116,111,45,114,105,103,104,116,34,62,119, 120,99,114,97,102,116,101,114,95,112,108,117,103,105,110,95,98,105,116, 109,97,112,115,46,99,112,112,36,114,101,115,111,117,114,99,101,115,95,100, 105,102,102,45,99,111,112,121,45,108,101,102,116,45,116,111,45,114,105, 103,104,116,46,112,110,103,60,47,111,98,106,101,99,116,62,10,32,32,60,111, 98,106,101,99,116,32,99,108,97,115,115,61,34,119,120,66,105,116,109,97, 112,34,32,110,97,109,101,61,34,100,105,102,102,45,99,111,112,121,45,114, 105,103,104,116,45,116,111,45,108,101,102,116,34,62,119,120,99,114,97,102, 116,101,114,95,112,108,117,103,105,110,95,98,105,116,109,97,112,115,46, 99,112,112,36,114,101,115,111,117,114,99,101,115,95,100,105,102,102,45, 99,111,112,121,45,114,105,103,104,116,45,116,111,45,108,101,102,116,46, 112,110,103,60,47,111,98,106,101,99,116,62,10,32,32,60,111,98,106,101,99, 116,32,99,108,97,115,115,61,34,119,120,66,105,116,109,97,112,34,32,110, 97,109,101,61,34,100,105,102,102,45,110,101,120,116,34,62,119,120,99,114, 97,102,116,101,114,95,112,108,117,103,105,110,95,98,105,116,109,97,112, 115,46,99,112,112,36,114,101,115,111,117,114,99,101,115,95,100,105,102, 102,45,110,101,120,116,46,112,110,103,60,47,111,98,106,101,99,116,62,10, 32,32,60,111,98,106,101,99,116,32,99,108,97,115,115,61,34,119,120,66,105, 116,109,97,112,34,32,110,97,109,101,61,34,100,105,102,102,45,112,114,101, 118,34,62,119,120,99,114,97,102,116,101,114,95,112,108,117,103,105,110, 95,98,105,116,109,97,112,115,46,99,112,112,36,114,101,115,111,117,114,99, 101,115,95,100,105,102,102,45,112,114,101,118,46,112,110,103,60,47,111, 98,106,101,99,116,62,10,32,32,60,111,98,106,101,99,116,32,99,108,97,115, 115,61,34,119,120,66,105,116,109,97,112,34,32,110,97,109,101,61,34,100, 105,102,102,45,114,101,102,114,101,115,104,34,62,119,120,99,114,97,102, 116,101,114,95,112,108,117,103,105,110,95,98,105,116,109,97,112,115,46, 99,112,112,36,114,101,115,111,117,114,99,101,115,95,100,105,102,102,45, 114,101,102,114,101,115,104,46,112,110,103,60,47,111,98,106,101,99,116, 62,10,32,32,60,111,98,106,101,99,116,32,99,108,97,115,115,61,34,119,120, 66,105,116,109,97,112,34,32,110,97,109,101,61,34,100,105,102,102,45,115, 97,118,101,34,62,119,120,99,114,97,102,116,101,114,95,112,108,117,103,105, 110,95,98,105,116,109,97,112,115,46,99,112,112,36,114,101,115,111,117,114, 99,101,115,95,100,105,102,102,45,115,97,118,101,46,112,110,103,60,47,111, 98,106,101,99,116,62,10,32,32,60,111,98,106,101,99,116,32,99,108,97,115, 115,61,34,119,120,66,105,116,109,97,112,34,32,110,97,109,101,61,34,114, 101,115,105,122,101,34,62,119,120,99,114,97,102,116,101,114,95,112,108, 117,103,105,110,95,98,105,116,109,97,112,115,46,99,112,112,36,46,46,95, 105,99,111,110,115,95,114,101,115,105,122,101,46,112,110,103,60,47,111, 98,106,101,99,116,62,10,32,32,60,111,98,106,101,99,116,32,99,108,97,115, 115,61,34,119,120,66,105,116,109,97,112,34,32,110,97,109,101,61,34,116, 105,108,101,95,104,111,114,105,122,111,110,116,97,108,34,62,119,120,99, 114,97,102,116,101,114,95,112,108,117,103,105,110,95,98,105,116,109,97, 112,115,46,99,112,112,36,114,101,115,111,117,114,99,101,115,95,116,105, 108,101,95,104,111,114,105,122,111,110,116,97,108,46,112,110,103,60,47, 111,98,106,101,99,116,62,10,32,32,60,111,98,106,101,99,116,32,99,108,97, 115,115,61,34,119,120,66,105,116,109,97,112,34,32,110,97,109,101,61,34, 116,105,108,101,95,115,105,110,103,108,101,34,62,119,120,99,114,97,102, 116,101,114,95,112,108,117,103,105,110,95,98,105,116,109,97,112,115,46, 99,112,112,36,114,101,115,111,117,114,99,101,115,95,116,105,108,101,95, 115,105,110,103,108,101,46,112,110,103,60,47,111,98,106,101,99,116,62,10, 32,32,60,111,98,106,101,99,116,32,99,108,97,115,115,61,34,119,120,66,105, 116,109,97,112,34,32,110,97,109,101,61,34,116,105,108,101,95,118,101,114, 116,105,99,97,108,34,62,119,120,99,114,97,102,116,101,114,95,112,108,117, 103,105,110,95,98,105,116,109,97,112,115,46,99,112,112,36,114,101,115,111, 117,114,99,101,115,95,116,105,108,101,95,118,101,114,116,105,99,97,108, 46,112,110,103,60,47,111,98,106,101,99,116,62,10,60,47,114,101,115,111, 117,114,99,101,62,10}; void wxC9D6CInitBitmapResources() { // Check for memory FS. If not present, load the handler: { wxMemoryFSHandler::AddFile(wxT("XRC_resource/dummy_file"), wxT("dummy one")); wxFileSystem fsys; wxFSFile *f = fsys.OpenFile(wxT("memory:XRC_resource/dummy_file")); wxMemoryFSHandler::RemoveFile(wxT("XRC_resource/dummy_file")); if (f) delete f; else wxFileSystem::AddHandler(new wxMemoryFSHandlerBase); } XRC_ADD_FILE(wxT("XRC_resource/wxcrafter_plugin_bitmaps.cpp$resources_diff-copy-file-left-to-right.png"), xml_res_file_0, xml_res_size_0, wxT("image/png")); XRC_ADD_FILE(wxT("XRC_resource/wxcrafter_plugin_bitmaps.cpp$resources_diff-copy-file-right-to-left.png"), xml_res_file_1, xml_res_size_1, wxT("image/png")); XRC_ADD_FILE(wxT("XRC_resource/wxcrafter_plugin_bitmaps.cpp$resources_diff-copy-left-to-right.png"), xml_res_file_2, xml_res_size_2, wxT("image/png")); XRC_ADD_FILE(wxT("XRC_resource/wxcrafter_plugin_bitmaps.cpp$resources_diff-copy-right-to-left.png"), xml_res_file_3, xml_res_size_3, wxT("image/png")); XRC_ADD_FILE(wxT("XRC_resource/wxcrafter_plugin_bitmaps.cpp$resources_diff-next.png"), xml_res_file_4, xml_res_size_4, wxT("image/png")); XRC_ADD_FILE(wxT("XRC_resource/wxcrafter_plugin_bitmaps.cpp$resources_diff-prev.png"), xml_res_file_5, xml_res_size_5, wxT("image/png")); XRC_ADD_FILE(wxT("XRC_resource/wxcrafter_plugin_bitmaps.cpp$resources_diff-refresh.png"), xml_res_file_6, xml_res_size_6, wxT("image/png")); XRC_ADD_FILE(wxT("XRC_resource/wxcrafter_plugin_bitmaps.cpp$resources_diff-save.png"), xml_res_file_7, xml_res_size_7, wxT("image/png")); XRC_ADD_FILE(wxT("XRC_resource/wxcrafter_plugin_bitmaps.cpp$.._icons_resize.png"), xml_res_file_8, xml_res_size_8, wxT("image/png")); XRC_ADD_FILE(wxT("XRC_resource/wxcrafter_plugin_bitmaps.cpp$resources_tile_horizontal.png"), xml_res_file_9, xml_res_size_9, wxT("image/png")); XRC_ADD_FILE(wxT("XRC_resource/wxcrafter_plugin_bitmaps.cpp$resources_tile_single.png"), xml_res_file_10, xml_res_size_10, wxT("image/png")); XRC_ADD_FILE(wxT("XRC_resource/wxcrafter_plugin_bitmaps.cpp$resources_tile_vertical.png"), xml_res_file_11, xml_res_size_11, wxT("image/png")); XRC_ADD_FILE(wxT("XRC_resource/wxcrafter_plugin_bitmaps.cpp$_Users_eran_devl_codelite_Plugin_wxcrafter_plugin_bitmaps.xrc"), xml_res_file_12, xml_res_size_12, wxT("text/xml")); wxXmlResource::Get()->Load(wxT("memory:XRC_resource/wxcrafter_plugin_bitmaps.cpp$_Users_eran_devl_codelite_Plugin_wxcrafter_plugin_bitmaps.xrc")); }
gpl-2.0
Buddybenj/dolphin
Source/Core/Core/IPC_HLE/WII_IPC_HLE_Device_DI.cpp
4
6225
// Copyright 2008 Dolphin Emulator Project // Licensed under GPLv2+ // Refer to the license.txt file included. #include <cinttypes> #include <memory> #include "Common/ChunkFile.h" #include "Common/CommonTypes.h" #include "Common/Logging/LogManager.h" #include "Core/ConfigManager.h" #include "Core/CoreTiming.h" #include "Core/HW/DVDInterface.h" #include "Core/HW/Memmap.h" #include "Core/HW/SystemTimers.h" #include "Core/IPC_HLE/WII_IPC_HLE.h" #include "Core/IPC_HLE/WII_IPC_HLE_Device_DI.h" static CWII_IPC_HLE_Device_di* g_di_pointer; static int ioctl_callback; static void IOCtlCallback(u64 userdata, int cycles_late) { if (g_di_pointer != nullptr) g_di_pointer->FinishIOCtl((DVDInterface::DIInterruptType)userdata); // If g_di_pointer == nullptr, IOS was probably shut down, // so the command shouldn't be completed } CWII_IPC_HLE_Device_di::CWII_IPC_HLE_Device_di(u32 _DeviceID, const std::string& _rDeviceName) : IWII_IPC_HLE_Device(_DeviceID, _rDeviceName) { if (g_di_pointer == nullptr) ERROR_LOG(WII_IPC_DVD, "Trying to run two DI devices at once. IOCtl may not behave as expected."); g_di_pointer = this; ioctl_callback = CoreTiming::RegisterEvent("IOCtlCallbackDI", IOCtlCallback); } CWII_IPC_HLE_Device_di::~CWII_IPC_HLE_Device_di() { g_di_pointer = nullptr; } void CWII_IPC_HLE_Device_di::DoState(PointerWrap& p) { DoStateShared(p); p.Do(m_commands_to_execute); } IPCCommandResult CWII_IPC_HLE_Device_di::Open(u32 _CommandAddress, u32 _Mode) { Memory::Write_U32(GetDeviceID(), _CommandAddress + 4); m_Active = true; return IPC_DEFAULT_REPLY; } IPCCommandResult CWII_IPC_HLE_Device_di::Close(u32 _CommandAddress, bool _bForce) { if (!_bForce) Memory::Write_U32(0, _CommandAddress + 4); m_Active = false; return IPC_DEFAULT_REPLY; } IPCCommandResult CWII_IPC_HLE_Device_di::IOCtl(u32 _CommandAddress) { // DI IOCtls are handled in a special way by Dolphin // compared to other WII_IPC_HLE functions. // This is a wrapper around DVDInterface's ExecuteCommand, // which will execute commands more or less asynchronously. // Only one command can be executed at a time, so commands // are queued until DVDInterface is ready to handle them. bool ready_to_execute = m_commands_to_execute.empty(); m_commands_to_execute.push_back(_CommandAddress); if (ready_to_execute) StartIOCtl(_CommandAddress); // DVDInterface handles the timing, and we handle the reply, // so WII_IPC_HLE shouldn't do any of that. return IPC_NO_REPLY; } void CWII_IPC_HLE_Device_di::StartIOCtl(u32 command_address) { u32 BufferIn = Memory::Read_U32(command_address + 0x10); u32 BufferInSize = Memory::Read_U32(command_address + 0x14); u32 BufferOut = Memory::Read_U32(command_address + 0x18); u32 BufferOutSize = Memory::Read_U32(command_address + 0x1C); u32 command_0 = Memory::Read_U32(BufferIn); u32 command_1 = Memory::Read_U32(BufferIn + 4); u32 command_2 = Memory::Read_U32(BufferIn + 8); DEBUG_LOG(WII_IPC_DVD, "IOCtl Command(0x%08x) BufferIn(0x%08x, 0x%x) BufferOut(0x%08x, 0x%x)", command_0, BufferIn, BufferInSize, BufferOut, BufferOutSize); // TATSUNOKO VS CAPCOM: Gets here with BufferOut == 0!!! if (BufferOut != 0) { // Set out buffer to zeroes as a safety precaution // to avoid answering nonsense values Memory::Memset(BufferOut, 0, BufferOutSize); } // DVDInterface's ExecuteCommand handles most of the work. // The IOCtl callback is used to generate a reply afterwards. DVDInterface::ExecuteCommand(command_0, command_1, command_2, BufferOut, BufferOutSize, false, ioctl_callback); } void CWII_IPC_HLE_Device_di::FinishIOCtl(DVDInterface::DIInterruptType interrupt_type) { if (m_commands_to_execute.empty()) { PanicAlertT("WII_IPC_HLE_Device_DI tried to reply to non-existing command"); return; } // This command has been executed, so it's removed from the queue u32 command_address = m_commands_to_execute.front(); m_commands_to_execute.pop_front(); // The DI interrupt type is used as a return value Memory::Write_U32(interrupt_type, command_address + 4); // The original hardware overwrites the command type with the async reply type. Memory::Write_U32(IPC_REP_ASYNC, command_address); // IOS also seems to write back the command that was responded to in the FD field. Memory::Write_U32(Memory::Read_U32(command_address), command_address + 8); // Generate a reply to the IPC command WII_IPC_HLE_Interface::EnqueueReply_Immediate(command_address); // DVDInterface is now ready to execute another command, // so we start executing a command from the queue if there is one if (!m_commands_to_execute.empty()) StartIOCtl(m_commands_to_execute.front()); } IPCCommandResult CWII_IPC_HLE_Device_di::IOCtlV(u32 _CommandAddress) { SIOCtlVBuffer CommandBuffer(_CommandAddress); // Prepare the out buffer(s) with zeros as a safety precaution // to avoid returning bad values for (u32 i = 0; i < CommandBuffer.NumberPayloadBuffer; i++) { Memory::Memset(CommandBuffer.PayloadBuffer[i].m_Address, 0, CommandBuffer.PayloadBuffer[i].m_Size); } u32 ReturnValue = 0; switch (CommandBuffer.Parameter) { case DVDInterface::DVDLowOpenPartition: { _dbg_assert_msg_(WII_IPC_DVD, CommandBuffer.InBuffer[1].m_Address == 0, "DVDLowOpenPartition with ticket"); _dbg_assert_msg_(WII_IPC_DVD, CommandBuffer.InBuffer[2].m_Address == 0, "DVDLowOpenPartition with cert chain"); u64 const partition_offset = ((u64)Memory::Read_U32(CommandBuffer.InBuffer[0].m_Address + 4) << 2); DVDInterface::ChangePartition(partition_offset); INFO_LOG(WII_IPC_DVD, "DVDLowOpenPartition: partition_offset 0x%016" PRIx64, partition_offset); // Read TMD to the buffer u32 tmd_size; std::unique_ptr<u8[]> tmd_buf = DVDInterface::GetVolume().GetTMD(&tmd_size); Memory::CopyToEmu(CommandBuffer.PayloadBuffer[0].m_Address, tmd_buf.get(), tmd_size); WII_IPC_HLE_Interface::ES_DIVerify(tmd_buf.get(), tmd_size); ReturnValue = 1; } break; default: ERROR_LOG(WII_IPC_DVD, "IOCtlV: %i", CommandBuffer.Parameter); _dbg_assert_msg_(WII_IPC_DVD, 0, "IOCtlV: %i", CommandBuffer.Parameter); break; } Memory::Write_U32(ReturnValue, _CommandAddress + 4); return IPC_DEFAULT_REPLY; }
gpl-2.0
airtimemedia/abrt
src/plugins/abrt-action-analyze-oops.c
4
3864
/* Copyright (C) 2010 ABRT team Copyright (C) 2010 RedHat Inc This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "libabrt.h" int main(int argc, char **argv) { /* I18n */ setlocale(LC_ALL, ""); #if ENABLE_NLS bindtextdomain(PACKAGE, LOCALEDIR); textdomain(PACKAGE); #endif abrt_init(argv); const char *dump_dir_name = "."; /* Can't keep these strings/structs static: _() doesn't support that */ const char *program_usage_string = _( "& [-v] -d DIR\n" "\n" "Calculates and saves UUID and DUPHASH for oops problem directory DIR" ); enum { OPT_v = 1 << 0, OPT_d = 1 << 1, }; /* Keep enum above and order of options below in sync! */ struct options program_options[] = { OPT__VERBOSE(&g_verbose), OPT_STRING('d', NULL, &dump_dir_name, "DIR", _("Problem directory")), OPT_END() }; /*unsigned opts =*/ parse_opts(argc, argv, program_options, program_usage_string); export_abrt_envvars(0); struct dump_dir *dd = dd_opendir(dump_dir_name, /*flags:*/ 0); if (!dd) return 1; map_string_t *settings = new_map_string(); load_abrt_plugin_conf_file("oops.conf", settings); char *oops = dd_load_text(dd, FILENAME_BACKTRACE); char hash_str[SHA1_RESULT_LEN*2 + 1]; int bad = koops_hash_str(hash_str, oops); if (bad) { error_msg("Can't find a meaningful backtrace for hashing in '%s'", dump_dir_name); /* Do not drop such oopses by default. */ int drop_notreportable_oopses = 0; const int res = try_get_map_string_item_as_bool(settings, "DropNotReportableOopses", &drop_notreportable_oopses); if (!res || !drop_notreportable_oopses) { /* Let users know that they can configure ABRT to drop these oopses. */ log("Preserving oops '%s' because DropNotReportableOopses is 'no'", dump_dir_name); dd_save_text(dd, FILENAME_NOT_REPORTABLE, _("The backtrace does not contain enough meaningful function frames " "to be reported. It is annoying but it does not necessary " "signalize a problem with your computer. ABRT will not allow " "you to create a report in a bug tracking system but you " "can contact kernel maintainers via e-mail.") ); /* Try to generate the hash once more with no limits. */ /* We need UUID file for the local duplicates look-up and DUPHASH */ /* file is also useful because user can force ABRT to report */ /* the oops into a bug tracking system (Bugzilla). */ bad = koops_hash_str_ext(hash_str, oops, /* use no frame count limit */-1, /* use every frame in stacktrace */0); /* If even this attempt fails, we can drop the oops without any hesitation. */ } } free(oops); if (!bad) { dd_save_text(dd, FILENAME_UUID, hash_str); dd_save_text(dd, FILENAME_DUPHASH, hash_str); } dd_close(dd); free_map_string(settings); return bad; }
gpl-2.0
CRVV/OpenWrt_BB
target/linux/ar71xx/files/drivers/mtd/tplinkpart.c
260
4486
/* * Copyright (C) 2011 Gabor Juhos <juhosg@openwrt.org> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/magic.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #define TPLINK_NUM_PARTS 5 #define TPLINK_HEADER_V1 0x01000000 #define MD5SUM_LEN 16 #define TPLINK_ART_LEN 0x10000 #define TPLINK_KERNEL_OFFS 0x20000 struct tplink_fw_header { uint32_t version; /* header version */ char vendor_name[24]; char fw_version[36]; uint32_t hw_id; /* hardware id */ uint32_t hw_rev; /* hardware revision */ uint32_t unk1; uint8_t md5sum1[MD5SUM_LEN]; uint32_t unk2; uint8_t md5sum2[MD5SUM_LEN]; uint32_t unk3; uint32_t kernel_la; /* kernel load address */ uint32_t kernel_ep; /* kernel entry point */ uint32_t fw_length; /* total length of the firmware */ uint32_t kernel_ofs; /* kernel data offset */ uint32_t kernel_len; /* kernel data length */ uint32_t rootfs_ofs; /* rootfs data offset */ uint32_t rootfs_len; /* rootfs data length */ uint32_t boot_ofs; /* bootloader data offset */ uint32_t boot_len; /* bootloader data length */ uint8_t pad[360]; } __attribute__ ((packed)); static struct tplink_fw_header * tplink_read_header(struct mtd_info *mtd, size_t offset) { struct tplink_fw_header *header; size_t header_len; size_t retlen; int ret; u32 t; header = vmalloc(sizeof(*header)); if (!header) goto err; header_len = sizeof(struct tplink_fw_header); ret = mtd_read(mtd, offset, header_len, &retlen, (unsigned char *) header); if (ret) goto err_free_header; if (retlen != header_len) goto err_free_header; /* sanity checks */ t = be32_to_cpu(header->version); if (t != TPLINK_HEADER_V1) goto err_free_header; t = be32_to_cpu(header->kernel_ofs); if (t != header_len) goto err_free_header; return header; err_free_header: vfree(header); err: return NULL; } static int tplink_check_rootfs_magic(struct mtd_info *mtd, size_t offset) { u32 magic; size_t retlen; int ret; ret = mtd_read(mtd, offset, sizeof(magic), &retlen, (unsigned char *) &magic); if (ret) return ret; if (retlen != sizeof(magic)) return -EIO; if (le32_to_cpu(magic) != SQUASHFS_MAGIC && magic != 0x19852003) return -EINVAL; return 0; } static int tplink_parse_partitions(struct mtd_info *master, struct mtd_partition **pparts, struct mtd_part_parser_data *data) { struct mtd_partition *parts; struct tplink_fw_header *header; int nr_parts; size_t offset; size_t art_offset; size_t rootfs_offset; size_t squashfs_offset; int ret; nr_parts = TPLINK_NUM_PARTS; parts = kzalloc(nr_parts * sizeof(struct mtd_partition), GFP_KERNEL); if (!parts) { ret = -ENOMEM; goto err; } offset = TPLINK_KERNEL_OFFS; header = tplink_read_header(master, offset); if (!header) { pr_notice("%s: no TP-Link header found\n", master->name); ret = -ENODEV; goto err_free_parts; } squashfs_offset = offset + sizeof(struct tplink_fw_header) + be32_to_cpu(header->kernel_len); ret = tplink_check_rootfs_magic(master, squashfs_offset); if (ret == 0) rootfs_offset = squashfs_offset; else rootfs_offset = offset + be32_to_cpu(header->rootfs_ofs); art_offset = master->size - TPLINK_ART_LEN; parts[0].name = "u-boot"; parts[0].offset = 0; parts[0].size = offset; parts[0].mask_flags = MTD_WRITEABLE; parts[1].name = "kernel"; parts[1].offset = offset; parts[1].size = rootfs_offset - offset; parts[2].name = "rootfs"; parts[2].offset = rootfs_offset; parts[2].size = art_offset - rootfs_offset; parts[3].name = "art"; parts[3].offset = art_offset; parts[3].size = TPLINK_ART_LEN; parts[3].mask_flags = MTD_WRITEABLE; parts[4].name = "firmware"; parts[4].offset = offset; parts[4].size = art_offset - offset; vfree(header); *pparts = parts; return nr_parts; err_free_parts: kfree(parts); err: *pparts = NULL; return ret; } static struct mtd_part_parser tplink_parser = { .owner = THIS_MODULE, .parse_fn = tplink_parse_partitions, .name = "tp-link", }; static int __init tplink_parser_init(void) { register_mtd_parser(&tplink_parser); return 0; } module_init(tplink_parser_init); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org>");
gpl-2.0
mrg666/nook_kernel
arch/microblaze/kernel/intc.c
516
4491
/* * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu> * Copyright (C) 2007-2009 PetaLogix * Copyright (C) 2006 Atmark Techno, Inc. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/irq.h> #include <asm/page.h> #include <linux/io.h> #include <linux/bug.h> #include <asm/prom.h> #include <asm/irq.h> #ifdef CONFIG_SELFMOD_INTC #include <asm/selfmod.h> #define INTC_BASE BARRIER_BASE_ADDR #else static unsigned int intc_baseaddr; #define INTC_BASE intc_baseaddr #endif unsigned int nr_irq; /* No one else should require these constants, so define them locally here. */ #define ISR 0x00 /* Interrupt Status Register */ #define IPR 0x04 /* Interrupt Pending Register */ #define IER 0x08 /* Interrupt Enable Register */ #define IAR 0x0c /* Interrupt Acknowledge Register */ #define SIE 0x10 /* Set Interrupt Enable bits */ #define CIE 0x14 /* Clear Interrupt Enable bits */ #define IVR 0x18 /* Interrupt Vector Register */ #define MER 0x1c /* Master Enable Register */ #define MER_ME (1<<0) #define MER_HIE (1<<1) static void intc_enable_or_unmask(unsigned int irq) { pr_debug("enable_or_unmask: %d\n", irq); out_be32(INTC_BASE + SIE, 1 << irq); } static void intc_disable_or_mask(unsigned int irq) { pr_debug("disable: %d\n", irq); out_be32(INTC_BASE + CIE, 1 << irq); } static void intc_ack(unsigned int irq) { pr_debug("ack: %d\n", irq); out_be32(INTC_BASE + IAR, 1 << irq); } static void intc_mask_ack(unsigned int irq) { unsigned long mask = 1 << irq; pr_debug("disable_and_ack: %d\n", irq); out_be32(INTC_BASE + CIE, mask); out_be32(INTC_BASE + IAR, mask); } static void intc_end(unsigned int irq) { unsigned long mask = 1 << irq; pr_debug("end: %d\n", irq); if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) { out_be32(INTC_BASE + SIE, mask); /* ack level sensitive intr */ if (irq_desc[irq].status & IRQ_LEVEL) out_be32(INTC_BASE + IAR, mask); } } static struct irq_chip intc_dev = { .name = "Xilinx INTC", .unmask = intc_enable_or_unmask, .mask = intc_disable_or_mask, .ack = intc_ack, .mask_ack = intc_mask_ack, .end = intc_end, }; unsigned int get_irq(struct pt_regs *regs) { int irq; /* * NOTE: This function is the one that needs to be improved in * order to handle multiple interrupt controllers. It currently * is hardcoded to check for interrupts only on the first INTC. */ irq = in_be32(INTC_BASE + IVR); pr_debug("get_irq: %d\n", irq); return irq; } void __init init_IRQ(void) { u32 i, j, intr_type; struct device_node *intc = NULL; #ifdef CONFIG_SELFMOD_INTC unsigned int intc_baseaddr = 0; static int arr_func[] = { (int)&get_irq, (int)&intc_enable_or_unmask, (int)&intc_disable_or_mask, (int)&intc_mask_ack, (int)&intc_ack, (int)&intc_end, 0 }; #endif static char *intc_list[] = { "xlnx,xps-intc-1.00.a", "xlnx,opb-intc-1.00.c", "xlnx,opb-intc-1.00.b", "xlnx,opb-intc-1.00.a", NULL }; for (j = 0; intc_list[j] != NULL; j++) { intc = of_find_compatible_node(NULL, NULL, intc_list[j]); if (intc) break; } BUG_ON(!intc); intc_baseaddr = *(int *) of_get_property(intc, "reg", NULL); intc_baseaddr = (unsigned long) ioremap(intc_baseaddr, PAGE_SIZE); nr_irq = *(int *) of_get_property(intc, "xlnx,num-intr-inputs", NULL); intr_type = *(int *) of_get_property(intc, "xlnx,kind-of-intr", NULL); if (intr_type >= (1 << (nr_irq + 1))) printk(KERN_INFO " ERROR: Mismatch in kind-of-intr param\n"); #ifdef CONFIG_SELFMOD_INTC selfmod_function((int *) arr_func, intc_baseaddr); #endif printk(KERN_INFO "%s #0 at 0x%08x, num_irq=%d, edge=0x%x\n", intc_list[j], intc_baseaddr, nr_irq, intr_type); /* * Disable all external interrupts until they are * explicity requested. */ out_be32(intc_baseaddr + IER, 0); /* Acknowledge any pending interrupts just in case. */ out_be32(intc_baseaddr + IAR, 0xffffffff); /* Turn on the Master Enable. */ out_be32(intc_baseaddr + MER, MER_HIE | MER_ME); for (i = 0; i < nr_irq; ++i) { if (intr_type & (0x00000001 << i)) { set_irq_chip_and_handler_name(i, &intc_dev, handle_edge_irq, intc_dev.name); irq_desc[i].status &= ~IRQ_LEVEL; } else { set_irq_chip_and_handler_name(i, &intc_dev, handle_level_irq, intc_dev.name); irq_desc[i].status |= IRQ_LEVEL; } } }
gpl-2.0
morfes/kernel_ideos_usb_host
virt/kvm/coalesced_mmio.c
516
3603
/* * KVM coalesced MMIO * * Copyright (c) 2008 Bull S.A.S. * * Author: Laurent Vivier <Laurent.Vivier@bull.net> * */ #include "iodev.h" #include <linux/kvm_host.h> #include <linux/kvm.h> #include "coalesced_mmio.h" static inline struct kvm_coalesced_mmio_dev *to_mmio(struct kvm_io_device *dev) { return container_of(dev, struct kvm_coalesced_mmio_dev, dev); } static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev, gpa_t addr, int len) { struct kvm_coalesced_mmio_zone *zone; struct kvm_coalesced_mmio_ring *ring; unsigned avail; int i; /* Are we able to batch it ? */ /* last is the first free entry * check if we don't meet the first used entry * there is always one unused entry in the buffer */ ring = dev->kvm->coalesced_mmio_ring; avail = (ring->first - ring->last - 1) % KVM_COALESCED_MMIO_MAX; if (avail < KVM_MAX_VCPUS) { /* full */ return 0; } /* is it in a batchable area ? */ for (i = 0; i < dev->nb_zones; i++) { zone = &dev->zone[i]; /* (addr,len) is fully included in * (zone->addr, zone->size) */ if (zone->addr <= addr && addr + len <= zone->addr + zone->size) return 1; } return 0; } static int coalesced_mmio_write(struct kvm_io_device *this, gpa_t addr, int len, const void *val) { struct kvm_coalesced_mmio_dev *dev = to_mmio(this); struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring; if (!coalesced_mmio_in_range(dev, addr, len)) return -EOPNOTSUPP; spin_lock(&dev->lock); /* copy data in first free entry of the ring */ ring->coalesced_mmio[ring->last].phys_addr = addr; ring->coalesced_mmio[ring->last].len = len; memcpy(ring->coalesced_mmio[ring->last].data, val, len); smp_wmb(); ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX; spin_unlock(&dev->lock); return 0; } static void coalesced_mmio_destructor(struct kvm_io_device *this) { struct kvm_coalesced_mmio_dev *dev = to_mmio(this); kfree(dev); } static const struct kvm_io_device_ops coalesced_mmio_ops = { .write = coalesced_mmio_write, .destructor = coalesced_mmio_destructor, }; int kvm_coalesced_mmio_init(struct kvm *kvm) { struct kvm_coalesced_mmio_dev *dev; int ret; dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev), GFP_KERNEL); if (!dev) return -ENOMEM; spin_lock_init(&dev->lock); kvm_iodevice_init(&dev->dev, &coalesced_mmio_ops); dev->kvm = kvm; kvm->coalesced_mmio_dev = dev; ret = kvm_io_bus_register_dev(kvm, &kvm->mmio_bus, &dev->dev); if (ret < 0) kfree(dev); return ret; } int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm, struct kvm_coalesced_mmio_zone *zone) { struct kvm_coalesced_mmio_dev *dev = kvm->coalesced_mmio_dev; if (dev == NULL) return -EINVAL; down_write(&kvm->slots_lock); if (dev->nb_zones >= KVM_COALESCED_MMIO_ZONE_MAX) { up_write(&kvm->slots_lock); return -ENOBUFS; } dev->zone[dev->nb_zones] = *zone; dev->nb_zones++; up_write(&kvm->slots_lock); return 0; } int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm, struct kvm_coalesced_mmio_zone *zone) { int i; struct kvm_coalesced_mmio_dev *dev = kvm->coalesced_mmio_dev; struct kvm_coalesced_mmio_zone *z; if (dev == NULL) return -EINVAL; down_write(&kvm->slots_lock); i = dev->nb_zones; while(i) { z = &dev->zone[i - 1]; /* unregister all zones * included in (zone->addr, zone->size) */ if (zone->addr <= z->addr && z->addr + z->size <= zone->addr + zone->size) { dev->nb_zones--; *z = dev->zone[dev->nb_zones]; } i--; } up_write(&kvm->slots_lock); return 0; }
gpl-2.0
burstlam/zte0624
fs/xfs/xfs_mru_cache.c
516
18217
/* * Copyright (c) 2006-2007 Silicon Graphics, Inc. * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it would be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include "xfs.h" #include "xfs_mru_cache.h" /* * The MRU Cache data structure consists of a data store, an array of lists and * a lock to protect its internal state. At initialisation time, the client * supplies an element lifetime in milliseconds and a group count, as well as a * function pointer to call when deleting elements. A data structure for * queueing up work in the form of timed callbacks is also included. * * The group count controls how many lists are created, and thereby how finely * the elements are grouped in time. When reaping occurs, all the elements in * all the lists whose time has expired are deleted. * * To give an example of how this works in practice, consider a client that * initialises an MRU Cache with a lifetime of ten seconds and a group count of * five. Five internal lists will be created, each representing a two second * period in time. When the first element is added, time zero for the data * structure is initialised to the current time. * * All the elements added in the first two seconds are appended to the first * list. Elements added in the third second go into the second list, and so on. * If an element is accessed at any point, it is removed from its list and * inserted at the head of the current most-recently-used list. * * The reaper function will have nothing to do until at least twelve seconds * have elapsed since the first element was added. The reason for this is that * if it were called at t=11s, there could be elements in the first list that * have only been inactive for nine seconds, so it still does nothing. If it is * called anywhere between t=12 and t=14 seconds, it will delete all the * elements that remain in the first list. It's therefore possible for elements * to remain in the data store even after they've been inactive for up to * (t + t/g) seconds, where t is the inactive element lifetime and g is the * number of groups. * * The above example assumes that the reaper function gets called at least once * every (t/g) seconds. If it is called less frequently, unused elements will * accumulate in the reap list until the reaper function is eventually called. * The current implementation uses work queue callbacks to carefully time the * reaper function calls, so this should happen rarely, if at all. * * From a design perspective, the primary reason for the choice of a list array * representing discrete time intervals is that it's only practical to reap * expired elements in groups of some appreciable size. This automatically * introduces a granularity to element lifetimes, so there's no point storing an * individual timeout with each element that specifies a more precise reap time. * The bonus is a saving of sizeof(long) bytes of memory per element stored. * * The elements could have been stored in just one list, but an array of * counters or pointers would need to be maintained to allow them to be divided * up into discrete time groups. More critically, the process of touching or * removing an element would involve walking large portions of the entire list, * which would have a detrimental effect on performance. The additional memory * requirement for the array of list heads is minimal. * * When an element is touched or deleted, it needs to be removed from its * current list. Doubly linked lists are used to make the list maintenance * portion of these operations O(1). Since reaper timing can be imprecise, * inserts and lookups can occur when there are no free lists available. When * this happens, all the elements on the LRU list need to be migrated to the end * of the reap list. To keep the list maintenance portion of these operations * O(1) also, list tails need to be accessible without walking the entire list. * This is the reason why doubly linked list heads are used. */ /* * An MRU Cache is a dynamic data structure that stores its elements in a way * that allows efficient lookups, but also groups them into discrete time * intervals based on insertion time. This allows elements to be efficiently * and automatically reaped after a fixed period of inactivity. * * When a client data pointer is stored in the MRU Cache it needs to be added to * both the data store and to one of the lists. It must also be possible to * access each of these entries via the other, i.e. to: * * a) Walk a list, removing the corresponding data store entry for each item. * b) Look up a data store entry, then access its list entry directly. * * To achieve both of these goals, each entry must contain both a list entry and * a key, in addition to the user's data pointer. Note that it's not a good * idea to have the client embed one of these structures at the top of their own * data structure, because inserting the same item more than once would most * likely result in a loop in one of the lists. That's a sure-fire recipe for * an infinite loop in the code. */ typedef struct xfs_mru_cache_elem { struct list_head list_node; unsigned long key; void *value; } xfs_mru_cache_elem_t; static kmem_zone_t *xfs_mru_elem_zone; static struct workqueue_struct *xfs_mru_reap_wq; /* * When inserting, destroying or reaping, it's first necessary to update the * lists relative to a particular time. In the case of destroying, that time * will be well in the future to ensure that all items are moved to the reap * list. In all other cases though, the time will be the current time. * * This function enters a loop, moving the contents of the LRU list to the reap * list again and again until either a) the lists are all empty, or b) time zero * has been advanced sufficiently to be within the immediate element lifetime. * * Case a) above is detected by counting how many groups are migrated and * stopping when they've all been moved. Case b) is detected by monitoring the * time_zero field, which is updated as each group is migrated. * * The return value is the earliest time that more migration could be needed, or * zero if there's no need to schedule more work because the lists are empty. */ STATIC unsigned long _xfs_mru_cache_migrate( xfs_mru_cache_t *mru, unsigned long now) { unsigned int grp; unsigned int migrated = 0; struct list_head *lru_list; /* Nothing to do if the data store is empty. */ if (!mru->time_zero) return 0; /* While time zero is older than the time spanned by all the lists. */ while (mru->time_zero <= now - mru->grp_count * mru->grp_time) { /* * If the LRU list isn't empty, migrate its elements to the tail * of the reap list. */ lru_list = mru->lists + mru->lru_grp; if (!list_empty(lru_list)) list_splice_init(lru_list, mru->reap_list.prev); /* * Advance the LRU group number, freeing the old LRU list to * become the new MRU list; advance time zero accordingly. */ mru->lru_grp = (mru->lru_grp + 1) % mru->grp_count; mru->time_zero += mru->grp_time; /* * If reaping is so far behind that all the elements on all the * lists have been migrated to the reap list, it's now empty. */ if (++migrated == mru->grp_count) { mru->lru_grp = 0; mru->time_zero = 0; return 0; } } /* Find the first non-empty list from the LRU end. */ for (grp = 0; grp < mru->grp_count; grp++) { /* Check the grp'th list from the LRU end. */ lru_list = mru->lists + ((mru->lru_grp + grp) % mru->grp_count); if (!list_empty(lru_list)) return mru->time_zero + (mru->grp_count + grp) * mru->grp_time; } /* All the lists must be empty. */ mru->lru_grp = 0; mru->time_zero = 0; return 0; } /* * When inserting or doing a lookup, an element needs to be inserted into the * MRU list. The lists must be migrated first to ensure that they're * up-to-date, otherwise the new element could be given a shorter lifetime in * the cache than it should. */ STATIC void _xfs_mru_cache_list_insert( xfs_mru_cache_t *mru, xfs_mru_cache_elem_t *elem) { unsigned int grp = 0; unsigned long now = jiffies; /* * If the data store is empty, initialise time zero, leave grp set to * zero and start the work queue timer if necessary. Otherwise, set grp * to the number of group times that have elapsed since time zero. */ if (!_xfs_mru_cache_migrate(mru, now)) { mru->time_zero = now; if (!mru->queued) { mru->queued = 1; queue_delayed_work(xfs_mru_reap_wq, &mru->work, mru->grp_count * mru->grp_time); } } else { grp = (now - mru->time_zero) / mru->grp_time; grp = (mru->lru_grp + grp) % mru->grp_count; } /* Insert the element at the tail of the corresponding list. */ list_add_tail(&elem->list_node, mru->lists + grp); } /* * When destroying or reaping, all the elements that were migrated to the reap * list need to be deleted. For each element this involves removing it from the * data store, removing it from the reap list, calling the client's free * function and deleting the element from the element zone. * * We get called holding the mru->lock, which we drop and then reacquire. * Sparse need special help with this to tell it we know what we are doing. */ STATIC void _xfs_mru_cache_clear_reap_list( xfs_mru_cache_t *mru) __releases(mru->lock) __acquires(mru->lock) { xfs_mru_cache_elem_t *elem, *next; struct list_head tmp; INIT_LIST_HEAD(&tmp); list_for_each_entry_safe(elem, next, &mru->reap_list, list_node) { /* Remove the element from the data store. */ radix_tree_delete(&mru->store, elem->key); /* * remove to temp list so it can be freed without * needing to hold the lock */ list_move(&elem->list_node, &tmp); } spin_unlock(&mru->lock); list_for_each_entry_safe(elem, next, &tmp, list_node) { /* Remove the element from the reap list. */ list_del_init(&elem->list_node); /* Call the client's free function with the key and value pointer. */ mru->free_func(elem->key, elem->value); /* Free the element structure. */ kmem_zone_free(xfs_mru_elem_zone, elem); } spin_lock(&mru->lock); } /* * We fire the reap timer every group expiry interval so * we always have a reaper ready to run. This makes shutdown * and flushing of the reaper easy to do. Hence we need to * keep when the next reap must occur so we can determine * at each interval whether there is anything we need to do. */ STATIC void _xfs_mru_cache_reap( struct work_struct *work) { xfs_mru_cache_t *mru = container_of(work, xfs_mru_cache_t, work.work); unsigned long now, next; ASSERT(mru && mru->lists); if (!mru || !mru->lists) return; spin_lock(&mru->lock); next = _xfs_mru_cache_migrate(mru, jiffies); _xfs_mru_cache_clear_reap_list(mru); mru->queued = next; if ((mru->queued > 0)) { now = jiffies; if (next <= now) next = 0; else next -= now; queue_delayed_work(xfs_mru_reap_wq, &mru->work, next); } spin_unlock(&mru->lock); } int xfs_mru_cache_init(void) { xfs_mru_elem_zone = kmem_zone_init(sizeof(xfs_mru_cache_elem_t), "xfs_mru_cache_elem"); if (!xfs_mru_elem_zone) goto out; xfs_mru_reap_wq = create_singlethread_workqueue("xfs_mru_cache"); if (!xfs_mru_reap_wq) goto out_destroy_mru_elem_zone; return 0; out_destroy_mru_elem_zone: kmem_zone_destroy(xfs_mru_elem_zone); out: return -ENOMEM; } void xfs_mru_cache_uninit(void) { destroy_workqueue(xfs_mru_reap_wq); kmem_zone_destroy(xfs_mru_elem_zone); } /* * To initialise a struct xfs_mru_cache pointer, call xfs_mru_cache_create() * with the address of the pointer, a lifetime value in milliseconds, a group * count and a free function to use when deleting elements. This function * returns 0 if the initialisation was successful. */ int xfs_mru_cache_create( xfs_mru_cache_t **mrup, unsigned int lifetime_ms, unsigned int grp_count, xfs_mru_cache_free_func_t free_func) { xfs_mru_cache_t *mru = NULL; int err = 0, grp; unsigned int grp_time; if (mrup) *mrup = NULL; if (!mrup || !grp_count || !lifetime_ms || !free_func) return EINVAL; if (!(grp_time = msecs_to_jiffies(lifetime_ms) / grp_count)) return EINVAL; if (!(mru = kmem_zalloc(sizeof(*mru), KM_SLEEP))) return ENOMEM; /* An extra list is needed to avoid reaping up to a grp_time early. */ mru->grp_count = grp_count + 1; mru->lists = kmem_zalloc(mru->grp_count * sizeof(*mru->lists), KM_SLEEP); if (!mru->lists) { err = ENOMEM; goto exit; } for (grp = 0; grp < mru->grp_count; grp++) INIT_LIST_HEAD(mru->lists + grp); /* * We use GFP_KERNEL radix tree preload and do inserts under a * spinlock so GFP_ATOMIC is appropriate for the radix tree itself. */ INIT_RADIX_TREE(&mru->store, GFP_ATOMIC); INIT_LIST_HEAD(&mru->reap_list); spin_lock_init(&mru->lock); INIT_DELAYED_WORK(&mru->work, _xfs_mru_cache_reap); mru->grp_time = grp_time; mru->free_func = free_func; *mrup = mru; exit: if (err && mru && mru->lists) kmem_free(mru->lists); if (err && mru) kmem_free(mru); return err; } /* * Call xfs_mru_cache_flush() to flush out all cached entries, calling their * free functions as they're deleted. When this function returns, the caller is * guaranteed that all the free functions for all the elements have finished * executing and the reaper is not running. */ void xfs_mru_cache_flush( xfs_mru_cache_t *mru) { if (!mru || !mru->lists) return; spin_lock(&mru->lock); if (mru->queued) { spin_unlock(&mru->lock); cancel_rearming_delayed_workqueue(xfs_mru_reap_wq, &mru->work); spin_lock(&mru->lock); } _xfs_mru_cache_migrate(mru, jiffies + mru->grp_count * mru->grp_time); _xfs_mru_cache_clear_reap_list(mru); spin_unlock(&mru->lock); } void xfs_mru_cache_destroy( xfs_mru_cache_t *mru) { if (!mru || !mru->lists) return; xfs_mru_cache_flush(mru); kmem_free(mru->lists); kmem_free(mru); } /* * To insert an element, call xfs_mru_cache_insert() with the data store, the * element's key and the client data pointer. This function returns 0 on * success or ENOMEM if memory for the data element couldn't be allocated. */ int xfs_mru_cache_insert( xfs_mru_cache_t *mru, unsigned long key, void *value) { xfs_mru_cache_elem_t *elem; ASSERT(mru && mru->lists); if (!mru || !mru->lists) return EINVAL; elem = kmem_zone_zalloc(xfs_mru_elem_zone, KM_SLEEP); if (!elem) return ENOMEM; if (radix_tree_preload(GFP_KERNEL)) { kmem_zone_free(xfs_mru_elem_zone, elem); return ENOMEM; } INIT_LIST_HEAD(&elem->list_node); elem->key = key; elem->value = value; spin_lock(&mru->lock); radix_tree_insert(&mru->store, key, elem); radix_tree_preload_end(); _xfs_mru_cache_list_insert(mru, elem); spin_unlock(&mru->lock); return 0; } /* * To remove an element without calling the free function, call * xfs_mru_cache_remove() with the data store and the element's key. On success * the client data pointer for the removed element is returned, otherwise this * function will return a NULL pointer. */ void * xfs_mru_cache_remove( xfs_mru_cache_t *mru, unsigned long key) { xfs_mru_cache_elem_t *elem; void *value = NULL; ASSERT(mru && mru->lists); if (!mru || !mru->lists) return NULL; spin_lock(&mru->lock); elem = radix_tree_delete(&mru->store, key); if (elem) { value = elem->value; list_del(&elem->list_node); } spin_unlock(&mru->lock); if (elem) kmem_zone_free(xfs_mru_elem_zone, elem); return value; } /* * To remove and element and call the free function, call xfs_mru_cache_delete() * with the data store and the element's key. */ void xfs_mru_cache_delete( xfs_mru_cache_t *mru, unsigned long key) { void *value = xfs_mru_cache_remove(mru, key); if (value) mru->free_func(key, value); } /* * To look up an element using its key, call xfs_mru_cache_lookup() with the * data store and the element's key. If found, the element will be moved to the * head of the MRU list to indicate that it's been touched. * * The internal data structures are protected by a spinlock that is STILL HELD * when this function returns. Call xfs_mru_cache_done() to release it. Note * that it is not safe to call any function that might sleep in the interim. * * The implementation could have used reference counting to avoid this * restriction, but since most clients simply want to get, set or test a member * of the returned data structure, the extra per-element memory isn't warranted. * * If the element isn't found, this function returns NULL and the spinlock is * released. xfs_mru_cache_done() should NOT be called when this occurs. * * Because sparse isn't smart enough to know about conditional lock return * status, we need to help it get it right by annotating the path that does * not release the lock. */ void * xfs_mru_cache_lookup( xfs_mru_cache_t *mru, unsigned long key) { xfs_mru_cache_elem_t *elem; ASSERT(mru && mru->lists); if (!mru || !mru->lists) return NULL; spin_lock(&mru->lock); elem = radix_tree_lookup(&mru->store, key); if (elem) { list_del(&elem->list_node); _xfs_mru_cache_list_insert(mru, elem); __release(mru_lock); /* help sparse not be stupid */ } else spin_unlock(&mru->lock); return elem ? elem->value : NULL; } /* * To release the internal data structure spinlock after having performed an * xfs_mru_cache_lookup() or an xfs_mru_cache_peek(), call xfs_mru_cache_done() * with the data store pointer. */ void xfs_mru_cache_done( xfs_mru_cache_t *mru) __releases(mru->lock) { spin_unlock(&mru->lock); }
gpl-2.0
pchri03/net-next
drivers/hid/hid-wiimote-modules.c
772
65103
/* * Device Modules for Nintendo Wii / Wii U HID Driver * Copyright (c) 2011-2013 David Herrmann <dh.herrmann@gmail.com> */ /* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. */ /* * Wiimote Modules * Nintendo devices provide different peripherals and many new devices lack * initial features like the IR camera. Therefore, each peripheral device is * implemented as an independent module and we probe on each device only the * modules for the hardware that really is available. * * Module registration is sequential. Unregistration is done in reverse order. * After device detection, the needed modules are loaded. Users can trigger * re-detection which causes all modules to be unloaded and then reload the * modules for the new detected device. * * wdata->input is a shared input device. It is always initialized prior to * module registration. If at least one registered module is marked as * WIIMOD_FLAG_INPUT, then the input device will get registered after all * modules were registered. * Please note that it is unregistered _before_ the "remove" callbacks are * called. This guarantees that no input interaction is done, anymore. However, * the wiimote core keeps a reference to the input device so it is freed only * after all modules were removed. It is safe to send events to unregistered * input devices. */ #include <linux/device.h> #include <linux/hid.h> #include <linux/input.h> #include <linux/spinlock.h> #include "hid-wiimote.h" /* * Keys * The initial Wii Remote provided a bunch of buttons that are reported as * part of the core protocol. Many later devices dropped these and report * invalid data in the core button reports. Load this only on devices which * correctly send button reports. * It uses the shared input device. */ static const __u16 wiimod_keys_map[] = { KEY_LEFT, /* WIIPROTO_KEY_LEFT */ KEY_RIGHT, /* WIIPROTO_KEY_RIGHT */ KEY_UP, /* WIIPROTO_KEY_UP */ KEY_DOWN, /* WIIPROTO_KEY_DOWN */ KEY_NEXT, /* WIIPROTO_KEY_PLUS */ KEY_PREVIOUS, /* WIIPROTO_KEY_MINUS */ BTN_1, /* WIIPROTO_KEY_ONE */ BTN_2, /* WIIPROTO_KEY_TWO */ BTN_A, /* WIIPROTO_KEY_A */ BTN_B, /* WIIPROTO_KEY_B */ BTN_MODE, /* WIIPROTO_KEY_HOME */ }; static void wiimod_keys_in_keys(struct wiimote_data *wdata, const __u8 *keys) { input_report_key(wdata->input, wiimod_keys_map[WIIPROTO_KEY_LEFT], !!(keys[0] & 0x01)); input_report_key(wdata->input, wiimod_keys_map[WIIPROTO_KEY_RIGHT], !!(keys[0] & 0x02)); input_report_key(wdata->input, wiimod_keys_map[WIIPROTO_KEY_DOWN], !!(keys[0] & 0x04)); input_report_key(wdata->input, wiimod_keys_map[WIIPROTO_KEY_UP], !!(keys[0] & 0x08)); input_report_key(wdata->input, wiimod_keys_map[WIIPROTO_KEY_PLUS], !!(keys[0] & 0x10)); input_report_key(wdata->input, wiimod_keys_map[WIIPROTO_KEY_TWO], !!(keys[1] & 0x01)); input_report_key(wdata->input, wiimod_keys_map[WIIPROTO_KEY_ONE], !!(keys[1] & 0x02)); input_report_key(wdata->input, wiimod_keys_map[WIIPROTO_KEY_B], !!(keys[1] & 0x04)); input_report_key(wdata->input, wiimod_keys_map[WIIPROTO_KEY_A], !!(keys[1] & 0x08)); input_report_key(wdata->input, wiimod_keys_map[WIIPROTO_KEY_MINUS], !!(keys[1] & 0x10)); input_report_key(wdata->input, wiimod_keys_map[WIIPROTO_KEY_HOME], !!(keys[1] & 0x80)); input_sync(wdata->input); } static int wiimod_keys_probe(const struct wiimod_ops *ops, struct wiimote_data *wdata) { unsigned int i; set_bit(EV_KEY, wdata->input->evbit); for (i = 0; i < WIIPROTO_KEY_COUNT; ++i) set_bit(wiimod_keys_map[i], wdata->input->keybit); return 0; } static const struct wiimod_ops wiimod_keys = { .flags = WIIMOD_FLAG_INPUT, .arg = 0, .probe = wiimod_keys_probe, .remove = NULL, .in_keys = wiimod_keys_in_keys, }; /* * Rumble * Nearly all devices provide a rumble feature. A small motor for * force-feedback effects. We provide an FF_RUMBLE memless ff device on the * shared input device if this module is loaded. * The rumble motor is controlled via a flag on almost every output report so * the wiimote core handles the rumble flag. But if a device doesn't provide * the rumble motor, this flag shouldn't be set. */ /* used by wiimod_rumble and wiipro_rumble */ static void wiimod_rumble_worker(struct work_struct *work) { struct wiimote_data *wdata = container_of(work, struct wiimote_data, rumble_worker); spin_lock_irq(&wdata->state.lock); wiiproto_req_rumble(wdata, wdata->state.cache_rumble); spin_unlock_irq(&wdata->state.lock); } static int wiimod_rumble_play(struct input_dev *dev, void *data, struct ff_effect *eff) { struct wiimote_data *wdata = input_get_drvdata(dev); __u8 value; /* * The wiimote supports only a single rumble motor so if any magnitude * is set to non-zero then we start the rumble motor. If both are set to * zero, we stop the rumble motor. */ if (eff->u.rumble.strong_magnitude || eff->u.rumble.weak_magnitude) value = 1; else value = 0; /* Locking state.lock here might deadlock with input_event() calls. * schedule_work acts as barrier. Merging multiple changes is fine. */ wdata->state.cache_rumble = value; schedule_work(&wdata->rumble_worker); return 0; } static int wiimod_rumble_probe(const struct wiimod_ops *ops, struct wiimote_data *wdata) { INIT_WORK(&wdata->rumble_worker, wiimod_rumble_worker); set_bit(FF_RUMBLE, wdata->input->ffbit); if (input_ff_create_memless(wdata->input, NULL, wiimod_rumble_play)) return -ENOMEM; return 0; } static void wiimod_rumble_remove(const struct wiimod_ops *ops, struct wiimote_data *wdata) { unsigned long flags; cancel_work_sync(&wdata->rumble_worker); spin_lock_irqsave(&wdata->state.lock, flags); wiiproto_req_rumble(wdata, 0); spin_unlock_irqrestore(&wdata->state.lock, flags); } static const struct wiimod_ops wiimod_rumble = { .flags = WIIMOD_FLAG_INPUT, .arg = 0, .probe = wiimod_rumble_probe, .remove = wiimod_rumble_remove, }; /* * Battery * 1 byte of battery capacity information is sent along every protocol status * report. The wiimote core caches it but we try to update it on every * user-space request. * This is supported by nearly every device so it's almost always enabled. */ static enum power_supply_property wiimod_battery_props[] = { POWER_SUPPLY_PROP_CAPACITY, POWER_SUPPLY_PROP_SCOPE, }; static int wiimod_battery_get_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { struct wiimote_data *wdata = power_supply_get_drvdata(psy); int ret = 0, state; unsigned long flags; if (psp == POWER_SUPPLY_PROP_SCOPE) { val->intval = POWER_SUPPLY_SCOPE_DEVICE; return 0; } else if (psp != POWER_SUPPLY_PROP_CAPACITY) { return -EINVAL; } ret = wiimote_cmd_acquire(wdata); if (ret) return ret; spin_lock_irqsave(&wdata->state.lock, flags); wiimote_cmd_set(wdata, WIIPROTO_REQ_SREQ, 0); wiiproto_req_status(wdata); spin_unlock_irqrestore(&wdata->state.lock, flags); wiimote_cmd_wait(wdata); wiimote_cmd_release(wdata); spin_lock_irqsave(&wdata->state.lock, flags); state = wdata->state.cmd_battery; spin_unlock_irqrestore(&wdata->state.lock, flags); val->intval = state * 100 / 255; return ret; } static int wiimod_battery_probe(const struct wiimod_ops *ops, struct wiimote_data *wdata) { struct power_supply_config psy_cfg = { .drv_data = wdata, }; int ret; wdata->battery_desc.properties = wiimod_battery_props; wdata->battery_desc.num_properties = ARRAY_SIZE(wiimod_battery_props); wdata->battery_desc.get_property = wiimod_battery_get_property; wdata->battery_desc.type = POWER_SUPPLY_TYPE_BATTERY; wdata->battery_desc.use_for_apm = 0; wdata->battery_desc.name = kasprintf(GFP_KERNEL, "wiimote_battery_%s", wdata->hdev->uniq); if (!wdata->battery_desc.name) return -ENOMEM; wdata->battery = power_supply_register(&wdata->hdev->dev, &wdata->battery_desc, &psy_cfg); if (IS_ERR(wdata->battery)) { hid_err(wdata->hdev, "cannot register battery device\n"); ret = PTR_ERR(wdata->battery); goto err_free; } power_supply_powers(wdata->battery, &wdata->hdev->dev); return 0; err_free: kfree(wdata->battery_desc.name); wdata->battery_desc.name = NULL; return ret; } static void wiimod_battery_remove(const struct wiimod_ops *ops, struct wiimote_data *wdata) { if (!wdata->battery_desc.name) return; power_supply_unregister(wdata->battery); kfree(wdata->battery_desc.name); wdata->battery_desc.name = NULL; } static const struct wiimod_ops wiimod_battery = { .flags = 0, .arg = 0, .probe = wiimod_battery_probe, .remove = wiimod_battery_remove, }; /* * LED * 0 to 4 player LEDs are supported by devices. The "arg" field of the * wiimod_ops structure specifies which LED this module controls. This allows * to register a limited number of LEDs. * State is managed by wiimote core. */ static enum led_brightness wiimod_led_get(struct led_classdev *led_dev) { struct wiimote_data *wdata; struct device *dev = led_dev->dev->parent; int i; unsigned long flags; bool value = false; wdata = hid_get_drvdata(container_of(dev, struct hid_device, dev)); for (i = 0; i < 4; ++i) { if (wdata->leds[i] == led_dev) { spin_lock_irqsave(&wdata->state.lock, flags); value = wdata->state.flags & WIIPROTO_FLAG_LED(i + 1); spin_unlock_irqrestore(&wdata->state.lock, flags); break; } } return value ? LED_FULL : LED_OFF; } static void wiimod_led_set(struct led_classdev *led_dev, enum led_brightness value) { struct wiimote_data *wdata; struct device *dev = led_dev->dev->parent; int i; unsigned long flags; __u8 state, flag; wdata = hid_get_drvdata(container_of(dev, struct hid_device, dev)); for (i = 0; i < 4; ++i) { if (wdata->leds[i] == led_dev) { flag = WIIPROTO_FLAG_LED(i + 1); spin_lock_irqsave(&wdata->state.lock, flags); state = wdata->state.flags; if (value == LED_OFF) wiiproto_req_leds(wdata, state & ~flag); else wiiproto_req_leds(wdata, state | flag); spin_unlock_irqrestore(&wdata->state.lock, flags); break; } } } static int wiimod_led_probe(const struct wiimod_ops *ops, struct wiimote_data *wdata) { struct device *dev = &wdata->hdev->dev; size_t namesz = strlen(dev_name(dev)) + 9; struct led_classdev *led; unsigned long flags; char *name; int ret; led = kzalloc(sizeof(struct led_classdev) + namesz, GFP_KERNEL); if (!led) return -ENOMEM; name = (void*)&led[1]; snprintf(name, namesz, "%s:blue:p%lu", dev_name(dev), ops->arg); led->name = name; led->brightness = 0; led->max_brightness = 1; led->brightness_get = wiimod_led_get; led->brightness_set = wiimod_led_set; wdata->leds[ops->arg] = led; ret = led_classdev_register(dev, led); if (ret) goto err_free; /* enable LED1 to stop initial LED-blinking */ if (ops->arg == 0) { spin_lock_irqsave(&wdata->state.lock, flags); wiiproto_req_leds(wdata, WIIPROTO_FLAG_LED1); spin_unlock_irqrestore(&wdata->state.lock, flags); } return 0; err_free: wdata->leds[ops->arg] = NULL; kfree(led); return ret; } static void wiimod_led_remove(const struct wiimod_ops *ops, struct wiimote_data *wdata) { if (!wdata->leds[ops->arg]) return; led_classdev_unregister(wdata->leds[ops->arg]); kfree(wdata->leds[ops->arg]); wdata->leds[ops->arg] = NULL; } static const struct wiimod_ops wiimod_leds[4] = { { .flags = 0, .arg = 0, .probe = wiimod_led_probe, .remove = wiimod_led_remove, }, { .flags = 0, .arg = 1, .probe = wiimod_led_probe, .remove = wiimod_led_remove, }, { .flags = 0, .arg = 2, .probe = wiimod_led_probe, .remove = wiimod_led_remove, }, { .flags = 0, .arg = 3, .probe = wiimod_led_probe, .remove = wiimod_led_remove, }, }; /* * Accelerometer * 3 axis accelerometer data is part of nearly all DRMs. If not supported by a * device, it's mostly cleared to 0. This module parses this data and provides * it via a separate input device. */ static void wiimod_accel_in_accel(struct wiimote_data *wdata, const __u8 *accel) { __u16 x, y, z; if (!(wdata->state.flags & WIIPROTO_FLAG_ACCEL)) return; /* * payload is: BB BB XX YY ZZ * Accelerometer data is encoded into 3 10bit values. XX, YY and ZZ * contain the upper 8 bits of each value. The lower 2 bits are * contained in the buttons data BB BB. * Bits 6 and 7 of the first buttons byte BB is the lower 2 bits of the * X accel value. Bit 5 of the second buttons byte is the 2nd bit of Y * accel value and bit 6 is the second bit of the Z value. * The first bit of Y and Z values is not available and always set to 0. * 0x200 is returned on no movement. */ x = accel[2] << 2; y = accel[3] << 2; z = accel[4] << 2; x |= (accel[0] >> 5) & 0x3; y |= (accel[1] >> 4) & 0x2; z |= (accel[1] >> 5) & 0x2; input_report_abs(wdata->accel, ABS_RX, x - 0x200); input_report_abs(wdata->accel, ABS_RY, y - 0x200); input_report_abs(wdata->accel, ABS_RZ, z - 0x200); input_sync(wdata->accel); } static int wiimod_accel_open(struct input_dev *dev) { struct wiimote_data *wdata = input_get_drvdata(dev); unsigned long flags; spin_lock_irqsave(&wdata->state.lock, flags); wiiproto_req_accel(wdata, true); spin_unlock_irqrestore(&wdata->state.lock, flags); return 0; } static void wiimod_accel_close(struct input_dev *dev) { struct wiimote_data *wdata = input_get_drvdata(dev); unsigned long flags; spin_lock_irqsave(&wdata->state.lock, flags); wiiproto_req_accel(wdata, false); spin_unlock_irqrestore(&wdata->state.lock, flags); } static int wiimod_accel_probe(const struct wiimod_ops *ops, struct wiimote_data *wdata) { int ret; wdata->accel = input_allocate_device(); if (!wdata->accel) return -ENOMEM; input_set_drvdata(wdata->accel, wdata); wdata->accel->open = wiimod_accel_open; wdata->accel->close = wiimod_accel_close; wdata->accel->dev.parent = &wdata->hdev->dev; wdata->accel->id.bustype = wdata->hdev->bus; wdata->accel->id.vendor = wdata->hdev->vendor; wdata->accel->id.product = wdata->hdev->product; wdata->accel->id.version = wdata->hdev->version; wdata->accel->name = WIIMOTE_NAME " Accelerometer"; set_bit(EV_ABS, wdata->accel->evbit); set_bit(ABS_RX, wdata->accel->absbit); set_bit(ABS_RY, wdata->accel->absbit); set_bit(ABS_RZ, wdata->accel->absbit); input_set_abs_params(wdata->accel, ABS_RX, -500, 500, 2, 4); input_set_abs_params(wdata->accel, ABS_RY, -500, 500, 2, 4); input_set_abs_params(wdata->accel, ABS_RZ, -500, 500, 2, 4); ret = input_register_device(wdata->accel); if (ret) { hid_err(wdata->hdev, "cannot register input device\n"); goto err_free; } return 0; err_free: input_free_device(wdata->accel); wdata->accel = NULL; return ret; } static void wiimod_accel_remove(const struct wiimod_ops *ops, struct wiimote_data *wdata) { if (!wdata->accel) return; input_unregister_device(wdata->accel); wdata->accel = NULL; } static const struct wiimod_ops wiimod_accel = { .flags = 0, .arg = 0, .probe = wiimod_accel_probe, .remove = wiimod_accel_remove, .in_accel = wiimod_accel_in_accel, }; /* * IR Cam * Up to 4 IR sources can be tracked by a normal Wii Remote. The IR cam needs * to be initialized with a fairly complex procedure and consumes a lot of * power. Therefore, as long as no application uses the IR input device, it is * kept offline. * Nearly no other device than the normal Wii Remotes supports the IR cam so * you can disable this module for these devices. */ static void wiimod_ir_in_ir(struct wiimote_data *wdata, const __u8 *ir, bool packed, unsigned int id) { __u16 x, y; __u8 xid, yid; bool sync = false; if (!(wdata->state.flags & WIIPROTO_FLAGS_IR)) return; switch (id) { case 0: xid = ABS_HAT0X; yid = ABS_HAT0Y; break; case 1: xid = ABS_HAT1X; yid = ABS_HAT1Y; break; case 2: xid = ABS_HAT2X; yid = ABS_HAT2Y; break; case 3: xid = ABS_HAT3X; yid = ABS_HAT3Y; sync = true; break; default: return; } /* * Basic IR data is encoded into 3 bytes. The first two bytes are the * lower 8 bit of the X/Y data, the 3rd byte contains the upper 2 bits * of both. * If data is packed, then the 3rd byte is put first and slightly * reordered. This allows to interleave packed and non-packed data to * have two IR sets in 5 bytes instead of 6. * The resulting 10bit X/Y values are passed to the ABS_HAT? input dev. */ if (packed) { x = ir[1] | ((ir[0] & 0x03) << 8); y = ir[2] | ((ir[0] & 0x0c) << 6); } else { x = ir[0] | ((ir[2] & 0x30) << 4); y = ir[1] | ((ir[2] & 0xc0) << 2); } input_report_abs(wdata->ir, xid, x); input_report_abs(wdata->ir, yid, y); if (sync) input_sync(wdata->ir); } static int wiimod_ir_change(struct wiimote_data *wdata, __u16 mode) { int ret; unsigned long flags; __u8 format = 0; static const __u8 data_enable[] = { 0x01 }; static const __u8 data_sens1[] = { 0x02, 0x00, 0x00, 0x71, 0x01, 0x00, 0xaa, 0x00, 0x64 }; static const __u8 data_sens2[] = { 0x63, 0x03 }; static const __u8 data_fin[] = { 0x08 }; spin_lock_irqsave(&wdata->state.lock, flags); if (mode == (wdata->state.flags & WIIPROTO_FLAGS_IR)) { spin_unlock_irqrestore(&wdata->state.lock, flags); return 0; } if (mode == 0) { wdata->state.flags &= ~WIIPROTO_FLAGS_IR; wiiproto_req_ir1(wdata, 0); wiiproto_req_ir2(wdata, 0); wiiproto_req_drm(wdata, WIIPROTO_REQ_NULL); spin_unlock_irqrestore(&wdata->state.lock, flags); return 0; } spin_unlock_irqrestore(&wdata->state.lock, flags); ret = wiimote_cmd_acquire(wdata); if (ret) return ret; /* send PIXEL CLOCK ENABLE cmd first */ spin_lock_irqsave(&wdata->state.lock, flags); wiimote_cmd_set(wdata, WIIPROTO_REQ_IR1, 0); wiiproto_req_ir1(wdata, 0x06); spin_unlock_irqrestore(&wdata->state.lock, flags); ret = wiimote_cmd_wait(wdata); if (ret) goto unlock; if (wdata->state.cmd_err) { ret = -EIO; goto unlock; } /* enable IR LOGIC */ spin_lock_irqsave(&wdata->state.lock, flags); wiimote_cmd_set(wdata, WIIPROTO_REQ_IR2, 0); wiiproto_req_ir2(wdata, 0x06); spin_unlock_irqrestore(&wdata->state.lock, flags); ret = wiimote_cmd_wait(wdata); if (ret) goto unlock; if (wdata->state.cmd_err) { ret = -EIO; goto unlock; } /* enable IR cam but do not make it send data, yet */ ret = wiimote_cmd_write(wdata, 0xb00030, data_enable, sizeof(data_enable)); if (ret) goto unlock; /* write first sensitivity block */ ret = wiimote_cmd_write(wdata, 0xb00000, data_sens1, sizeof(data_sens1)); if (ret) goto unlock; /* write second sensitivity block */ ret = wiimote_cmd_write(wdata, 0xb0001a, data_sens2, sizeof(data_sens2)); if (ret) goto unlock; /* put IR cam into desired state */ switch (mode) { case WIIPROTO_FLAG_IR_FULL: format = 5; break; case WIIPROTO_FLAG_IR_EXT: format = 3; break; case WIIPROTO_FLAG_IR_BASIC: format = 1; break; } ret = wiimote_cmd_write(wdata, 0xb00033, &format, sizeof(format)); if (ret) goto unlock; /* make IR cam send data */ ret = wiimote_cmd_write(wdata, 0xb00030, data_fin, sizeof(data_fin)); if (ret) goto unlock; /* request new DRM mode compatible to IR mode */ spin_lock_irqsave(&wdata->state.lock, flags); wdata->state.flags &= ~WIIPROTO_FLAGS_IR; wdata->state.flags |= mode & WIIPROTO_FLAGS_IR; wiiproto_req_drm(wdata, WIIPROTO_REQ_NULL); spin_unlock_irqrestore(&wdata->state.lock, flags); unlock: wiimote_cmd_release(wdata); return ret; } static int wiimod_ir_open(struct input_dev *dev) { struct wiimote_data *wdata = input_get_drvdata(dev); return wiimod_ir_change(wdata, WIIPROTO_FLAG_IR_BASIC); } static void wiimod_ir_close(struct input_dev *dev) { struct wiimote_data *wdata = input_get_drvdata(dev); wiimod_ir_change(wdata, 0); } static int wiimod_ir_probe(const struct wiimod_ops *ops, struct wiimote_data *wdata) { int ret; wdata->ir = input_allocate_device(); if (!wdata->ir) return -ENOMEM; input_set_drvdata(wdata->ir, wdata); wdata->ir->open = wiimod_ir_open; wdata->ir->close = wiimod_ir_close; wdata->ir->dev.parent = &wdata->hdev->dev; wdata->ir->id.bustype = wdata->hdev->bus; wdata->ir->id.vendor = wdata->hdev->vendor; wdata->ir->id.product = wdata->hdev->product; wdata->ir->id.version = wdata->hdev->version; wdata->ir->name = WIIMOTE_NAME " IR"; set_bit(EV_ABS, wdata->ir->evbit); set_bit(ABS_HAT0X, wdata->ir->absbit); set_bit(ABS_HAT0Y, wdata->ir->absbit); set_bit(ABS_HAT1X, wdata->ir->absbit); set_bit(ABS_HAT1Y, wdata->ir->absbit); set_bit(ABS_HAT2X, wdata->ir->absbit); set_bit(ABS_HAT2Y, wdata->ir->absbit); set_bit(ABS_HAT3X, wdata->ir->absbit); set_bit(ABS_HAT3Y, wdata->ir->absbit); input_set_abs_params(wdata->ir, ABS_HAT0X, 0, 1023, 2, 4); input_set_abs_params(wdata->ir, ABS_HAT0Y, 0, 767, 2, 4); input_set_abs_params(wdata->ir, ABS_HAT1X, 0, 1023, 2, 4); input_set_abs_params(wdata->ir, ABS_HAT1Y, 0, 767, 2, 4); input_set_abs_params(wdata->ir, ABS_HAT2X, 0, 1023, 2, 4); input_set_abs_params(wdata->ir, ABS_HAT2Y, 0, 767, 2, 4); input_set_abs_params(wdata->ir, ABS_HAT3X, 0, 1023, 2, 4); input_set_abs_params(wdata->ir, ABS_HAT3Y, 0, 767, 2, 4); ret = input_register_device(wdata->ir); if (ret) { hid_err(wdata->hdev, "cannot register input device\n"); goto err_free; } return 0; err_free: input_free_device(wdata->ir); wdata->ir = NULL; return ret; } static void wiimod_ir_remove(const struct wiimod_ops *ops, struct wiimote_data *wdata) { if (!wdata->ir) return; input_unregister_device(wdata->ir); wdata->ir = NULL; } static const struct wiimod_ops wiimod_ir = { .flags = 0, .arg = 0, .probe = wiimod_ir_probe, .remove = wiimod_ir_remove, .in_ir = wiimod_ir_in_ir, }; /* * Nunchuk Extension * The Nintendo Wii Nunchuk was the first official extension published by * Nintendo. It provides two additional keys and a separate accelerometer. It * can be hotplugged to standard Wii Remotes. */ enum wiimod_nunchuk_keys { WIIMOD_NUNCHUK_KEY_C, WIIMOD_NUNCHUK_KEY_Z, WIIMOD_NUNCHUK_KEY_NUM, }; static const __u16 wiimod_nunchuk_map[] = { BTN_C, /* WIIMOD_NUNCHUK_KEY_C */ BTN_Z, /* WIIMOD_NUNCHUK_KEY_Z */ }; static void wiimod_nunchuk_in_ext(struct wiimote_data *wdata, const __u8 *ext) { __s16 x, y, z, bx, by; /* Byte | 8 7 | 6 5 | 4 3 | 2 | 1 | * -----+----------+---------+---------+----+-----+ * 1 | Button X <7:0> | * 2 | Button Y <7:0> | * -----+----------+---------+---------+----+-----+ * 3 | Speed X <9:2> | * 4 | Speed Y <9:2> | * 5 | Speed Z <9:2> | * -----+----------+---------+---------+----+-----+ * 6 | Z <1:0> | Y <1:0> | X <1:0> | BC | BZ | * -----+----------+---------+---------+----+-----+ * Button X/Y is the analog stick. Speed X, Y and Z are the * accelerometer data in the same format as the wiimote's accelerometer. * The 6th byte contains the LSBs of the accelerometer data. * BC and BZ are the C and Z buttons: 0 means pressed * * If reported interleaved with motionp, then the layout changes. The * 5th and 6th byte changes to: * -----+-----------------------------------+-----+ * 5 | Speed Z <9:3> | EXT | * -----+--------+-----+-----+----+----+----+-----+ * 6 |Z <2:1> |Y <1>|X <1>| BC | BZ | 0 | 0 | * -----+--------+-----+-----+----+----+----+-----+ * All three accelerometer values lose their LSB. The other data is * still available but slightly moved. * * Center data for button values is 128. Center value for accelerometer * values it 512 / 0x200 */ bx = ext[0]; by = ext[1]; bx -= 128; by -= 128; x = ext[2] << 2; y = ext[3] << 2; z = ext[4] << 2; if (wdata->state.flags & WIIPROTO_FLAG_MP_ACTIVE) { x |= (ext[5] >> 3) & 0x02; y |= (ext[5] >> 4) & 0x02; z &= ~0x4; z |= (ext[5] >> 5) & 0x06; } else { x |= (ext[5] >> 2) & 0x03; y |= (ext[5] >> 4) & 0x03; z |= (ext[5] >> 6) & 0x03; } x -= 0x200; y -= 0x200; z -= 0x200; input_report_abs(wdata->extension.input, ABS_HAT0X, bx); input_report_abs(wdata->extension.input, ABS_HAT0Y, by); input_report_abs(wdata->extension.input, ABS_RX, x); input_report_abs(wdata->extension.input, ABS_RY, y); input_report_abs(wdata->extension.input, ABS_RZ, z); if (wdata->state.flags & WIIPROTO_FLAG_MP_ACTIVE) { input_report_key(wdata->extension.input, wiimod_nunchuk_map[WIIMOD_NUNCHUK_KEY_Z], !(ext[5] & 0x04)); input_report_key(wdata->extension.input, wiimod_nunchuk_map[WIIMOD_NUNCHUK_KEY_C], !(ext[5] & 0x08)); } else { input_report_key(wdata->extension.input, wiimod_nunchuk_map[WIIMOD_NUNCHUK_KEY_Z], !(ext[5] & 0x01)); input_report_key(wdata->extension.input, wiimod_nunchuk_map[WIIMOD_NUNCHUK_KEY_C], !(ext[5] & 0x02)); } input_sync(wdata->extension.input); } static int wiimod_nunchuk_open(struct input_dev *dev) { struct wiimote_data *wdata = input_get_drvdata(dev); unsigned long flags; spin_lock_irqsave(&wdata->state.lock, flags); wdata->state.flags |= WIIPROTO_FLAG_EXT_USED; wiiproto_req_drm(wdata, WIIPROTO_REQ_NULL); spin_unlock_irqrestore(&wdata->state.lock, flags); return 0; } static void wiimod_nunchuk_close(struct input_dev *dev) { struct wiimote_data *wdata = input_get_drvdata(dev); unsigned long flags; spin_lock_irqsave(&wdata->state.lock, flags); wdata->state.flags &= ~WIIPROTO_FLAG_EXT_USED; wiiproto_req_drm(wdata, WIIPROTO_REQ_NULL); spin_unlock_irqrestore(&wdata->state.lock, flags); } static int wiimod_nunchuk_probe(const struct wiimod_ops *ops, struct wiimote_data *wdata) { int ret, i; wdata->extension.input = input_allocate_device(); if (!wdata->extension.input) return -ENOMEM; input_set_drvdata(wdata->extension.input, wdata); wdata->extension.input->open = wiimod_nunchuk_open; wdata->extension.input->close = wiimod_nunchuk_close; wdata->extension.input->dev.parent = &wdata->hdev->dev; wdata->extension.input->id.bustype = wdata->hdev->bus; wdata->extension.input->id.vendor = wdata->hdev->vendor; wdata->extension.input->id.product = wdata->hdev->product; wdata->extension.input->id.version = wdata->hdev->version; wdata->extension.input->name = WIIMOTE_NAME " Nunchuk"; set_bit(EV_KEY, wdata->extension.input->evbit); for (i = 0; i < WIIMOD_NUNCHUK_KEY_NUM; ++i) set_bit(wiimod_nunchuk_map[i], wdata->extension.input->keybit); set_bit(EV_ABS, wdata->extension.input->evbit); set_bit(ABS_HAT0X, wdata->extension.input->absbit); set_bit(ABS_HAT0Y, wdata->extension.input->absbit); input_set_abs_params(wdata->extension.input, ABS_HAT0X, -120, 120, 2, 4); input_set_abs_params(wdata->extension.input, ABS_HAT0Y, -120, 120, 2, 4); set_bit(ABS_RX, wdata->extension.input->absbit); set_bit(ABS_RY, wdata->extension.input->absbit); set_bit(ABS_RZ, wdata->extension.input->absbit); input_set_abs_params(wdata->extension.input, ABS_RX, -500, 500, 2, 4); input_set_abs_params(wdata->extension.input, ABS_RY, -500, 500, 2, 4); input_set_abs_params(wdata->extension.input, ABS_RZ, -500, 500, 2, 4); ret = input_register_device(wdata->extension.input); if (ret) goto err_free; return 0; err_free: input_free_device(wdata->extension.input); wdata->extension.input = NULL; return ret; } static void wiimod_nunchuk_remove(const struct wiimod_ops *ops, struct wiimote_data *wdata) { if (!wdata->extension.input) return; input_unregister_device(wdata->extension.input); wdata->extension.input = NULL; } static const struct wiimod_ops wiimod_nunchuk = { .flags = 0, .arg = 0, .probe = wiimod_nunchuk_probe, .remove = wiimod_nunchuk_remove, .in_ext = wiimod_nunchuk_in_ext, }; /* * Classic Controller * Another official extension from Nintendo. It provides a classic * gamecube-like controller that can be hotplugged on the Wii Remote. * It has several hardware buttons and switches that are all reported via * a normal extension device. */ enum wiimod_classic_keys { WIIMOD_CLASSIC_KEY_A, WIIMOD_CLASSIC_KEY_B, WIIMOD_CLASSIC_KEY_X, WIIMOD_CLASSIC_KEY_Y, WIIMOD_CLASSIC_KEY_ZL, WIIMOD_CLASSIC_KEY_ZR, WIIMOD_CLASSIC_KEY_PLUS, WIIMOD_CLASSIC_KEY_MINUS, WIIMOD_CLASSIC_KEY_HOME, WIIMOD_CLASSIC_KEY_LEFT, WIIMOD_CLASSIC_KEY_RIGHT, WIIMOD_CLASSIC_KEY_UP, WIIMOD_CLASSIC_KEY_DOWN, WIIMOD_CLASSIC_KEY_LT, WIIMOD_CLASSIC_KEY_RT, WIIMOD_CLASSIC_KEY_NUM, }; static const __u16 wiimod_classic_map[] = { BTN_A, /* WIIMOD_CLASSIC_KEY_A */ BTN_B, /* WIIMOD_CLASSIC_KEY_B */ BTN_X, /* WIIMOD_CLASSIC_KEY_X */ BTN_Y, /* WIIMOD_CLASSIC_KEY_Y */ BTN_TL2, /* WIIMOD_CLASSIC_KEY_ZL */ BTN_TR2, /* WIIMOD_CLASSIC_KEY_ZR */ KEY_NEXT, /* WIIMOD_CLASSIC_KEY_PLUS */ KEY_PREVIOUS, /* WIIMOD_CLASSIC_KEY_MINUS */ BTN_MODE, /* WIIMOD_CLASSIC_KEY_HOME */ KEY_LEFT, /* WIIMOD_CLASSIC_KEY_LEFT */ KEY_RIGHT, /* WIIMOD_CLASSIC_KEY_RIGHT */ KEY_UP, /* WIIMOD_CLASSIC_KEY_UP */ KEY_DOWN, /* WIIMOD_CLASSIC_KEY_DOWN */ BTN_TL, /* WIIMOD_CLASSIC_KEY_LT */ BTN_TR, /* WIIMOD_CLASSIC_KEY_RT */ }; static void wiimod_classic_in_ext(struct wiimote_data *wdata, const __u8 *ext) { __s8 rx, ry, lx, ly, lt, rt; /* Byte | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | * -----+-----+-----+-----+-----+-----+-----+-----+-----+ * 1 | RX <5:4> | LX <5:0> | * 2 | RX <3:2> | LY <5:0> | * -----+-----+-----+-----+-----------------------------+ * 3 |RX<1>| LT <5:4> | RY <5:1> | * -----+-----+-----------+-----------------------------+ * 4 | LT <3:1> | RT <5:1> | * -----+-----+-----+-----+-----+-----+-----+-----+-----+ * 5 | BDR | BDD | BLT | B- | BH | B+ | BRT | 1 | * -----+-----+-----+-----+-----+-----+-----+-----+-----+ * 6 | BZL | BB | BY | BA | BX | BZR | BDL | BDU | * -----+-----+-----+-----+-----+-----+-----+-----+-----+ * All buttons are 0 if pressed * RX and RY are right analog stick * LX and LY are left analog stick * LT is left trigger, RT is right trigger * BLT is 0 if left trigger is fully pressed * BRT is 0 if right trigger is fully pressed * BDR, BDD, BDL, BDU form the D-Pad with right, down, left, up buttons * BZL is left Z button and BZR is right Z button * B-, BH, B+ are +, HOME and - buttons * BB, BY, BA, BX are A, B, X, Y buttons * LSB of RX, RY, LT, and RT are not transmitted and always 0. * * With motionp enabled it changes slightly to this: * Byte | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | * -----+-----+-----+-----+-----+-----+-----+-----+-----+ * 1 | RX <5:4> | LX <5:1> | BDU | * 2 | RX <3:2> | LY <5:1> | BDL | * -----+-----+-----+-----+-----------------------+-----+ * 3 |RX<1>| LT <5:4> | RY <5:1> | * -----+-----+-----------+-----------------------------+ * 4 | LT <3:1> | RT <5:1> | * -----+-----+-----+-----+-----+-----+-----+-----+-----+ * 5 | BDR | BDD | BLT | B- | BH | B+ | BRT | EXT | * -----+-----+-----+-----+-----+-----+-----+-----+-----+ * 6 | BZL | BB | BY | BA | BX | BZR | 0 | 0 | * -----+-----+-----+-----+-----+-----+-----+-----+-----+ * Only the LSBs of LX and LY are lost. BDU and BDL are moved, the rest * is the same as before. */ if (wdata->state.flags & WIIPROTO_FLAG_MP_ACTIVE) { lx = ext[0] & 0x3e; ly = ext[1] & 0x3e; } else { lx = ext[0] & 0x3f; ly = ext[1] & 0x3f; } rx = (ext[0] >> 3) & 0x18; rx |= (ext[1] >> 5) & 0x06; rx |= (ext[2] >> 7) & 0x01; ry = ext[2] & 0x1f; rt = ext[3] & 0x1f; lt = (ext[2] >> 2) & 0x18; lt |= (ext[3] >> 5) & 0x07; rx <<= 1; ry <<= 1; rt <<= 1; lt <<= 1; input_report_abs(wdata->extension.input, ABS_HAT1X, lx - 0x20); input_report_abs(wdata->extension.input, ABS_HAT1Y, ly - 0x20); input_report_abs(wdata->extension.input, ABS_HAT2X, rx - 0x20); input_report_abs(wdata->extension.input, ABS_HAT2Y, ry - 0x20); input_report_abs(wdata->extension.input, ABS_HAT3X, rt); input_report_abs(wdata->extension.input, ABS_HAT3Y, lt); input_report_key(wdata->extension.input, wiimod_classic_map[WIIMOD_CLASSIC_KEY_RIGHT], !(ext[4] & 0x80)); input_report_key(wdata->extension.input, wiimod_classic_map[WIIMOD_CLASSIC_KEY_DOWN], !(ext[4] & 0x40)); input_report_key(wdata->extension.input, wiimod_classic_map[WIIMOD_CLASSIC_KEY_LT], !(ext[4] & 0x20)); input_report_key(wdata->extension.input, wiimod_classic_map[WIIMOD_CLASSIC_KEY_MINUS], !(ext[4] & 0x10)); input_report_key(wdata->extension.input, wiimod_classic_map[WIIMOD_CLASSIC_KEY_HOME], !(ext[4] & 0x08)); input_report_key(wdata->extension.input, wiimod_classic_map[WIIMOD_CLASSIC_KEY_PLUS], !(ext[4] & 0x04)); input_report_key(wdata->extension.input, wiimod_classic_map[WIIMOD_CLASSIC_KEY_RT], !(ext[4] & 0x02)); input_report_key(wdata->extension.input, wiimod_classic_map[WIIMOD_CLASSIC_KEY_ZL], !(ext[5] & 0x80)); input_report_key(wdata->extension.input, wiimod_classic_map[WIIMOD_CLASSIC_KEY_B], !(ext[5] & 0x40)); input_report_key(wdata->extension.input, wiimod_classic_map[WIIMOD_CLASSIC_KEY_Y], !(ext[5] & 0x20)); input_report_key(wdata->extension.input, wiimod_classic_map[WIIMOD_CLASSIC_KEY_A], !(ext[5] & 0x10)); input_report_key(wdata->extension.input, wiimod_classic_map[WIIMOD_CLASSIC_KEY_X], !(ext[5] & 0x08)); input_report_key(wdata->extension.input, wiimod_classic_map[WIIMOD_CLASSIC_KEY_ZR], !(ext[5] & 0x04)); if (wdata->state.flags & WIIPROTO_FLAG_MP_ACTIVE) { input_report_key(wdata->extension.input, wiimod_classic_map[WIIMOD_CLASSIC_KEY_LEFT], !(ext[1] & 0x01)); input_report_key(wdata->extension.input, wiimod_classic_map[WIIMOD_CLASSIC_KEY_UP], !(ext[0] & 0x01)); } else { input_report_key(wdata->extension.input, wiimod_classic_map[WIIMOD_CLASSIC_KEY_LEFT], !(ext[5] & 0x02)); input_report_key(wdata->extension.input, wiimod_classic_map[WIIMOD_CLASSIC_KEY_UP], !(ext[5] & 0x01)); } input_sync(wdata->extension.input); } static int wiimod_classic_open(struct input_dev *dev) { struct wiimote_data *wdata = input_get_drvdata(dev); unsigned long flags; spin_lock_irqsave(&wdata->state.lock, flags); wdata->state.flags |= WIIPROTO_FLAG_EXT_USED; wiiproto_req_drm(wdata, WIIPROTO_REQ_NULL); spin_unlock_irqrestore(&wdata->state.lock, flags); return 0; } static void wiimod_classic_close(struct input_dev *dev) { struct wiimote_data *wdata = input_get_drvdata(dev); unsigned long flags; spin_lock_irqsave(&wdata->state.lock, flags); wdata->state.flags &= ~WIIPROTO_FLAG_EXT_USED; wiiproto_req_drm(wdata, WIIPROTO_REQ_NULL); spin_unlock_irqrestore(&wdata->state.lock, flags); } static int wiimod_classic_probe(const struct wiimod_ops *ops, struct wiimote_data *wdata) { int ret, i; wdata->extension.input = input_allocate_device(); if (!wdata->extension.input) return -ENOMEM; input_set_drvdata(wdata->extension.input, wdata); wdata->extension.input->open = wiimod_classic_open; wdata->extension.input->close = wiimod_classic_close; wdata->extension.input->dev.parent = &wdata->hdev->dev; wdata->extension.input->id.bustype = wdata->hdev->bus; wdata->extension.input->id.vendor = wdata->hdev->vendor; wdata->extension.input->id.product = wdata->hdev->product; wdata->extension.input->id.version = wdata->hdev->version; wdata->extension.input->name = WIIMOTE_NAME " Classic Controller"; set_bit(EV_KEY, wdata->extension.input->evbit); for (i = 0; i < WIIMOD_CLASSIC_KEY_NUM; ++i) set_bit(wiimod_classic_map[i], wdata->extension.input->keybit); set_bit(EV_ABS, wdata->extension.input->evbit); set_bit(ABS_HAT1X, wdata->extension.input->absbit); set_bit(ABS_HAT1Y, wdata->extension.input->absbit); set_bit(ABS_HAT2X, wdata->extension.input->absbit); set_bit(ABS_HAT2Y, wdata->extension.input->absbit); set_bit(ABS_HAT3X, wdata->extension.input->absbit); set_bit(ABS_HAT3Y, wdata->extension.input->absbit); input_set_abs_params(wdata->extension.input, ABS_HAT1X, -30, 30, 1, 1); input_set_abs_params(wdata->extension.input, ABS_HAT1Y, -30, 30, 1, 1); input_set_abs_params(wdata->extension.input, ABS_HAT2X, -30, 30, 1, 1); input_set_abs_params(wdata->extension.input, ABS_HAT2Y, -30, 30, 1, 1); input_set_abs_params(wdata->extension.input, ABS_HAT3X, -30, 30, 1, 1); input_set_abs_params(wdata->extension.input, ABS_HAT3Y, -30, 30, 1, 1); ret = input_register_device(wdata->extension.input); if (ret) goto err_free; return 0; err_free: input_free_device(wdata->extension.input); wdata->extension.input = NULL; return ret; } static void wiimod_classic_remove(const struct wiimod_ops *ops, struct wiimote_data *wdata) { if (!wdata->extension.input) return; input_unregister_device(wdata->extension.input); wdata->extension.input = NULL; } static const struct wiimod_ops wiimod_classic = { .flags = 0, .arg = 0, .probe = wiimod_classic_probe, .remove = wiimod_classic_remove, .in_ext = wiimod_classic_in_ext, }; /* * Balance Board Extension * The Nintendo Wii Balance Board provides four hardware weight sensor plus a * single push button. No other peripherals are available. However, the * balance-board data is sent via a standard Wii Remote extension. All other * data for non-present hardware is zeroed out. * Some 3rd party devices react allergic if we try to access normal Wii Remote * hardware, so this extension module should be the only module that is loaded * on balance boards. * The balance board needs 8 bytes extension data instead of basic 6 bytes so * it needs the WIIMOD_FLAG_EXT8 flag. */ static void wiimod_bboard_in_keys(struct wiimote_data *wdata, const __u8 *keys) { input_report_key(wdata->extension.input, BTN_A, !!(keys[1] & 0x08)); input_sync(wdata->extension.input); } static void wiimod_bboard_in_ext(struct wiimote_data *wdata, const __u8 *ext) { __s32 val[4], tmp, div; unsigned int i; struct wiimote_state *s = &wdata->state; /* * Balance board data layout: * * Byte | 8 7 6 5 4 3 2 1 | * -----+--------------------------+ * 1 | Top Right <15:8> | * 2 | Top Right <7:0> | * -----+--------------------------+ * 3 | Bottom Right <15:8> | * 4 | Bottom Right <7:0> | * -----+--------------------------+ * 5 | Top Left <15:8> | * 6 | Top Left <7:0> | * -----+--------------------------+ * 7 | Bottom Left <15:8> | * 8 | Bottom Left <7:0> | * -----+--------------------------+ * * These values represent the weight-measurements of the Wii-balance * board with 16bit precision. * * The balance-board is never reported interleaved with motionp. */ val[0] = ext[0]; val[0] <<= 8; val[0] |= ext[1]; val[1] = ext[2]; val[1] <<= 8; val[1] |= ext[3]; val[2] = ext[4]; val[2] <<= 8; val[2] |= ext[5]; val[3] = ext[6]; val[3] <<= 8; val[3] |= ext[7]; /* apply calibration data */ for (i = 0; i < 4; i++) { if (val[i] <= s->calib_bboard[i][0]) { tmp = 0; } else if (val[i] < s->calib_bboard[i][1]) { tmp = val[i] - s->calib_bboard[i][0]; tmp *= 1700; div = s->calib_bboard[i][1] - s->calib_bboard[i][0]; tmp /= div ? div : 1; } else { tmp = val[i] - s->calib_bboard[i][1]; tmp *= 1700; div = s->calib_bboard[i][2] - s->calib_bboard[i][1]; tmp /= div ? div : 1; tmp += 1700; } val[i] = tmp; } input_report_abs(wdata->extension.input, ABS_HAT0X, val[0]); input_report_abs(wdata->extension.input, ABS_HAT0Y, val[1]); input_report_abs(wdata->extension.input, ABS_HAT1X, val[2]); input_report_abs(wdata->extension.input, ABS_HAT1Y, val[3]); input_sync(wdata->extension.input); } static int wiimod_bboard_open(struct input_dev *dev) { struct wiimote_data *wdata = input_get_drvdata(dev); unsigned long flags; spin_lock_irqsave(&wdata->state.lock, flags); wdata->state.flags |= WIIPROTO_FLAG_EXT_USED; wiiproto_req_drm(wdata, WIIPROTO_REQ_NULL); spin_unlock_irqrestore(&wdata->state.lock, flags); return 0; } static void wiimod_bboard_close(struct input_dev *dev) { struct wiimote_data *wdata = input_get_drvdata(dev); unsigned long flags; spin_lock_irqsave(&wdata->state.lock, flags); wdata->state.flags &= ~WIIPROTO_FLAG_EXT_USED; wiiproto_req_drm(wdata, WIIPROTO_REQ_NULL); spin_unlock_irqrestore(&wdata->state.lock, flags); } static ssize_t wiimod_bboard_calib_show(struct device *dev, struct device_attribute *attr, char *out) { struct wiimote_data *wdata = dev_to_wii(dev); int i, j, ret; __u16 val; __u8 buf[24], offs; ret = wiimote_cmd_acquire(wdata); if (ret) return ret; ret = wiimote_cmd_read(wdata, 0xa40024, buf, 12); if (ret != 12) { wiimote_cmd_release(wdata); return ret < 0 ? ret : -EIO; } ret = wiimote_cmd_read(wdata, 0xa40024 + 12, buf + 12, 12); if (ret != 12) { wiimote_cmd_release(wdata); return ret < 0 ? ret : -EIO; } wiimote_cmd_release(wdata); spin_lock_irq(&wdata->state.lock); offs = 0; for (i = 0; i < 3; ++i) { for (j = 0; j < 4; ++j) { wdata->state.calib_bboard[j][i] = buf[offs]; wdata->state.calib_bboard[j][i] <<= 8; wdata->state.calib_bboard[j][i] |= buf[offs + 1]; offs += 2; } } spin_unlock_irq(&wdata->state.lock); ret = 0; for (i = 0; i < 3; ++i) { for (j = 0; j < 4; ++j) { val = wdata->state.calib_bboard[j][i]; if (i == 2 && j == 3) ret += sprintf(&out[ret], "%04x\n", val); else ret += sprintf(&out[ret], "%04x:", val); } } return ret; } static DEVICE_ATTR(bboard_calib, S_IRUGO, wiimod_bboard_calib_show, NULL); static int wiimod_bboard_probe(const struct wiimod_ops *ops, struct wiimote_data *wdata) { int ret, i, j; __u8 buf[24], offs; wiimote_cmd_acquire_noint(wdata); ret = wiimote_cmd_read(wdata, 0xa40024, buf, 12); if (ret != 12) { wiimote_cmd_release(wdata); return ret < 0 ? ret : -EIO; } ret = wiimote_cmd_read(wdata, 0xa40024 + 12, buf + 12, 12); if (ret != 12) { wiimote_cmd_release(wdata); return ret < 0 ? ret : -EIO; } wiimote_cmd_release(wdata); offs = 0; for (i = 0; i < 3; ++i) { for (j = 0; j < 4; ++j) { wdata->state.calib_bboard[j][i] = buf[offs]; wdata->state.calib_bboard[j][i] <<= 8; wdata->state.calib_bboard[j][i] |= buf[offs + 1]; offs += 2; } } wdata->extension.input = input_allocate_device(); if (!wdata->extension.input) return -ENOMEM; ret = device_create_file(&wdata->hdev->dev, &dev_attr_bboard_calib); if (ret) { hid_err(wdata->hdev, "cannot create sysfs attribute\n"); goto err_free; } input_set_drvdata(wdata->extension.input, wdata); wdata->extension.input->open = wiimod_bboard_open; wdata->extension.input->close = wiimod_bboard_close; wdata->extension.input->dev.parent = &wdata->hdev->dev; wdata->extension.input->id.bustype = wdata->hdev->bus; wdata->extension.input->id.vendor = wdata->hdev->vendor; wdata->extension.input->id.product = wdata->hdev->product; wdata->extension.input->id.version = wdata->hdev->version; wdata->extension.input->name = WIIMOTE_NAME " Balance Board"; set_bit(EV_KEY, wdata->extension.input->evbit); set_bit(BTN_A, wdata->extension.input->keybit); set_bit(EV_ABS, wdata->extension.input->evbit); set_bit(ABS_HAT0X, wdata->extension.input->absbit); set_bit(ABS_HAT0Y, wdata->extension.input->absbit); set_bit(ABS_HAT1X, wdata->extension.input->absbit); set_bit(ABS_HAT1Y, wdata->extension.input->absbit); input_set_abs_params(wdata->extension.input, ABS_HAT0X, 0, 65535, 2, 4); input_set_abs_params(wdata->extension.input, ABS_HAT0Y, 0, 65535, 2, 4); input_set_abs_params(wdata->extension.input, ABS_HAT1X, 0, 65535, 2, 4); input_set_abs_params(wdata->extension.input, ABS_HAT1Y, 0, 65535, 2, 4); ret = input_register_device(wdata->extension.input); if (ret) goto err_file; return 0; err_file: device_remove_file(&wdata->hdev->dev, &dev_attr_bboard_calib); err_free: input_free_device(wdata->extension.input); wdata->extension.input = NULL; return ret; } static void wiimod_bboard_remove(const struct wiimod_ops *ops, struct wiimote_data *wdata) { if (!wdata->extension.input) return; input_unregister_device(wdata->extension.input); wdata->extension.input = NULL; device_remove_file(&wdata->hdev->dev, &dev_attr_bboard_calib); } static const struct wiimod_ops wiimod_bboard = { .flags = WIIMOD_FLAG_EXT8, .arg = 0, .probe = wiimod_bboard_probe, .remove = wiimod_bboard_remove, .in_keys = wiimod_bboard_in_keys, .in_ext = wiimod_bboard_in_ext, }; /* * Pro Controller * Released with the Wii U was the Nintendo Wii U Pro Controller. It does not * work together with the classic Wii, but only with the new Wii U. However, it * uses the same protocol and provides a builtin "classic controller pro" * extension, few standard buttons, a rumble motor, 4 LEDs and a battery. * We provide all these via a standard extension device as the device doesn't * feature an extension port. */ enum wiimod_pro_keys { WIIMOD_PRO_KEY_A, WIIMOD_PRO_KEY_B, WIIMOD_PRO_KEY_X, WIIMOD_PRO_KEY_Y, WIIMOD_PRO_KEY_PLUS, WIIMOD_PRO_KEY_MINUS, WIIMOD_PRO_KEY_HOME, WIIMOD_PRO_KEY_LEFT, WIIMOD_PRO_KEY_RIGHT, WIIMOD_PRO_KEY_UP, WIIMOD_PRO_KEY_DOWN, WIIMOD_PRO_KEY_TL, WIIMOD_PRO_KEY_TR, WIIMOD_PRO_KEY_ZL, WIIMOD_PRO_KEY_ZR, WIIMOD_PRO_KEY_THUMBL, WIIMOD_PRO_KEY_THUMBR, WIIMOD_PRO_KEY_NUM, }; static const __u16 wiimod_pro_map[] = { BTN_EAST, /* WIIMOD_PRO_KEY_A */ BTN_SOUTH, /* WIIMOD_PRO_KEY_B */ BTN_NORTH, /* WIIMOD_PRO_KEY_X */ BTN_WEST, /* WIIMOD_PRO_KEY_Y */ BTN_START, /* WIIMOD_PRO_KEY_PLUS */ BTN_SELECT, /* WIIMOD_PRO_KEY_MINUS */ BTN_MODE, /* WIIMOD_PRO_KEY_HOME */ BTN_DPAD_LEFT, /* WIIMOD_PRO_KEY_LEFT */ BTN_DPAD_RIGHT, /* WIIMOD_PRO_KEY_RIGHT */ BTN_DPAD_UP, /* WIIMOD_PRO_KEY_UP */ BTN_DPAD_DOWN, /* WIIMOD_PRO_KEY_DOWN */ BTN_TL, /* WIIMOD_PRO_KEY_TL */ BTN_TR, /* WIIMOD_PRO_KEY_TR */ BTN_TL2, /* WIIMOD_PRO_KEY_ZL */ BTN_TR2, /* WIIMOD_PRO_KEY_ZR */ BTN_THUMBL, /* WIIMOD_PRO_KEY_THUMBL */ BTN_THUMBR, /* WIIMOD_PRO_KEY_THUMBR */ }; static void wiimod_pro_in_ext(struct wiimote_data *wdata, const __u8 *ext) { __s16 rx, ry, lx, ly; /* Byte | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | * -----+-----+-----+-----+-----+-----+-----+-----+-----+ * 1 | LX <7:0> | * -----+-----------------------+-----------------------+ * 2 | 0 0 0 0 | LX <11:8> | * -----+-----------------------+-----------------------+ * 3 | RX <7:0> | * -----+-----------------------+-----------------------+ * 4 | 0 0 0 0 | RX <11:8> | * -----+-----------------------+-----------------------+ * 5 | LY <7:0> | * -----+-----------------------+-----------------------+ * 6 | 0 0 0 0 | LY <11:8> | * -----+-----------------------+-----------------------+ * 7 | RY <7:0> | * -----+-----------------------+-----------------------+ * 8 | 0 0 0 0 | RY <11:8> | * -----+-----+-----+-----+-----+-----+-----+-----+-----+ * 9 | BDR | BDD | BLT | B- | BH | B+ | BRT | 1 | * -----+-----+-----+-----+-----+-----+-----+-----+-----+ * 10 | BZL | BB | BY | BA | BX | BZR | BDL | BDU | * -----+-----+-----+-----+-----+-----+-----+-----+-----+ * 11 | 1 | BATTERY | USB |CHARG|LTHUM|RTHUM| * -----+-----+-----------------+-----------+-----+-----+ * All buttons are low-active (0 if pressed) * RX and RY are right analog stick * LX and LY are left analog stick * BLT is left trigger, BRT is right trigger. * BDR, BDD, BDL, BDU form the D-Pad with right, down, left, up buttons * BZL is left Z button and BZR is right Z button * B-, BH, B+ are +, HOME and - buttons * BB, BY, BA, BX are A, B, X, Y buttons * * Bits marked as 0/1 are unknown and never changed during tests. * * Not entirely verified: * CHARG: 1 if uncharging, 0 if charging * USB: 1 if not connected, 0 if connected * BATTERY: battery capacity from 000 (empty) to 100 (full) */ lx = (ext[0] & 0xff) | ((ext[1] & 0x0f) << 8); rx = (ext[2] & 0xff) | ((ext[3] & 0x0f) << 8); ly = (ext[4] & 0xff) | ((ext[5] & 0x0f) << 8); ry = (ext[6] & 0xff) | ((ext[7] & 0x0f) << 8); /* zero-point offsets */ lx -= 0x800; ly = 0x800 - ly; rx -= 0x800; ry = 0x800 - ry; /* Trivial automatic calibration. We don't know any calibration data * in the EEPROM so we must use the first report to calibrate the * null-position of the analog sticks. Users can retrigger calibration * via sysfs, or set it explicitly. If data is off more than abs(500), * we skip calibration as the sticks are likely to be moved already. */ if (!(wdata->state.flags & WIIPROTO_FLAG_PRO_CALIB_DONE)) { wdata->state.flags |= WIIPROTO_FLAG_PRO_CALIB_DONE; if (abs(lx) < 500) wdata->state.calib_pro_sticks[0] = -lx; if (abs(ly) < 500) wdata->state.calib_pro_sticks[1] = -ly; if (abs(rx) < 500) wdata->state.calib_pro_sticks[2] = -rx; if (abs(ry) < 500) wdata->state.calib_pro_sticks[3] = -ry; } /* apply calibration data */ lx += wdata->state.calib_pro_sticks[0]; ly += wdata->state.calib_pro_sticks[1]; rx += wdata->state.calib_pro_sticks[2]; ry += wdata->state.calib_pro_sticks[3]; input_report_abs(wdata->extension.input, ABS_X, lx); input_report_abs(wdata->extension.input, ABS_Y, ly); input_report_abs(wdata->extension.input, ABS_RX, rx); input_report_abs(wdata->extension.input, ABS_RY, ry); input_report_key(wdata->extension.input, wiimod_pro_map[WIIMOD_PRO_KEY_RIGHT], !(ext[8] & 0x80)); input_report_key(wdata->extension.input, wiimod_pro_map[WIIMOD_PRO_KEY_DOWN], !(ext[8] & 0x40)); input_report_key(wdata->extension.input, wiimod_pro_map[WIIMOD_PRO_KEY_TL], !(ext[8] & 0x20)); input_report_key(wdata->extension.input, wiimod_pro_map[WIIMOD_PRO_KEY_MINUS], !(ext[8] & 0x10)); input_report_key(wdata->extension.input, wiimod_pro_map[WIIMOD_PRO_KEY_HOME], !(ext[8] & 0x08)); input_report_key(wdata->extension.input, wiimod_pro_map[WIIMOD_PRO_KEY_PLUS], !(ext[8] & 0x04)); input_report_key(wdata->extension.input, wiimod_pro_map[WIIMOD_PRO_KEY_TR], !(ext[8] & 0x02)); input_report_key(wdata->extension.input, wiimod_pro_map[WIIMOD_PRO_KEY_ZL], !(ext[9] & 0x80)); input_report_key(wdata->extension.input, wiimod_pro_map[WIIMOD_PRO_KEY_B], !(ext[9] & 0x40)); input_report_key(wdata->extension.input, wiimod_pro_map[WIIMOD_PRO_KEY_Y], !(ext[9] & 0x20)); input_report_key(wdata->extension.input, wiimod_pro_map[WIIMOD_PRO_KEY_A], !(ext[9] & 0x10)); input_report_key(wdata->extension.input, wiimod_pro_map[WIIMOD_PRO_KEY_X], !(ext[9] & 0x08)); input_report_key(wdata->extension.input, wiimod_pro_map[WIIMOD_PRO_KEY_ZR], !(ext[9] & 0x04)); input_report_key(wdata->extension.input, wiimod_pro_map[WIIMOD_PRO_KEY_LEFT], !(ext[9] & 0x02)); input_report_key(wdata->extension.input, wiimod_pro_map[WIIMOD_PRO_KEY_UP], !(ext[9] & 0x01)); input_report_key(wdata->extension.input, wiimod_pro_map[WIIMOD_PRO_KEY_THUMBL], !(ext[10] & 0x02)); input_report_key(wdata->extension.input, wiimod_pro_map[WIIMOD_PRO_KEY_THUMBR], !(ext[10] & 0x01)); input_sync(wdata->extension.input); } static int wiimod_pro_open(struct input_dev *dev) { struct wiimote_data *wdata = input_get_drvdata(dev); unsigned long flags; spin_lock_irqsave(&wdata->state.lock, flags); wdata->state.flags |= WIIPROTO_FLAG_EXT_USED; wiiproto_req_drm(wdata, WIIPROTO_REQ_NULL); spin_unlock_irqrestore(&wdata->state.lock, flags); return 0; } static void wiimod_pro_close(struct input_dev *dev) { struct wiimote_data *wdata = input_get_drvdata(dev); unsigned long flags; spin_lock_irqsave(&wdata->state.lock, flags); wdata->state.flags &= ~WIIPROTO_FLAG_EXT_USED; wiiproto_req_drm(wdata, WIIPROTO_REQ_NULL); spin_unlock_irqrestore(&wdata->state.lock, flags); } static int wiimod_pro_play(struct input_dev *dev, void *data, struct ff_effect *eff) { struct wiimote_data *wdata = input_get_drvdata(dev); __u8 value; /* * The wiimote supports only a single rumble motor so if any magnitude * is set to non-zero then we start the rumble motor. If both are set to * zero, we stop the rumble motor. */ if (eff->u.rumble.strong_magnitude || eff->u.rumble.weak_magnitude) value = 1; else value = 0; /* Locking state.lock here might deadlock with input_event() calls. * schedule_work acts as barrier. Merging multiple changes is fine. */ wdata->state.cache_rumble = value; schedule_work(&wdata->rumble_worker); return 0; } static ssize_t wiimod_pro_calib_show(struct device *dev, struct device_attribute *attr, char *out) { struct wiimote_data *wdata = dev_to_wii(dev); int r; r = 0; r += sprintf(&out[r], "%+06hd:", wdata->state.calib_pro_sticks[0]); r += sprintf(&out[r], "%+06hd ", wdata->state.calib_pro_sticks[1]); r += sprintf(&out[r], "%+06hd:", wdata->state.calib_pro_sticks[2]); r += sprintf(&out[r], "%+06hd\n", wdata->state.calib_pro_sticks[3]); return r; } static ssize_t wiimod_pro_calib_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct wiimote_data *wdata = dev_to_wii(dev); int r; s16 x1, y1, x2, y2; if (!strncmp(buf, "scan\n", 5)) { spin_lock_irq(&wdata->state.lock); wdata->state.flags &= ~WIIPROTO_FLAG_PRO_CALIB_DONE; spin_unlock_irq(&wdata->state.lock); } else { r = sscanf(buf, "%hd:%hd %hd:%hd", &x1, &y1, &x2, &y2); if (r != 4) return -EINVAL; spin_lock_irq(&wdata->state.lock); wdata->state.flags |= WIIPROTO_FLAG_PRO_CALIB_DONE; spin_unlock_irq(&wdata->state.lock); wdata->state.calib_pro_sticks[0] = x1; wdata->state.calib_pro_sticks[1] = y1; wdata->state.calib_pro_sticks[2] = x2; wdata->state.calib_pro_sticks[3] = y2; } return strnlen(buf, PAGE_SIZE); } static DEVICE_ATTR(pro_calib, S_IRUGO|S_IWUSR|S_IWGRP, wiimod_pro_calib_show, wiimod_pro_calib_store); static int wiimod_pro_probe(const struct wiimod_ops *ops, struct wiimote_data *wdata) { int ret, i; unsigned long flags; INIT_WORK(&wdata->rumble_worker, wiimod_rumble_worker); wdata->state.calib_pro_sticks[0] = 0; wdata->state.calib_pro_sticks[1] = 0; wdata->state.calib_pro_sticks[2] = 0; wdata->state.calib_pro_sticks[3] = 0; spin_lock_irqsave(&wdata->state.lock, flags); wdata->state.flags &= ~WIIPROTO_FLAG_PRO_CALIB_DONE; spin_unlock_irqrestore(&wdata->state.lock, flags); wdata->extension.input = input_allocate_device(); if (!wdata->extension.input) return -ENOMEM; set_bit(FF_RUMBLE, wdata->extension.input->ffbit); input_set_drvdata(wdata->extension.input, wdata); if (input_ff_create_memless(wdata->extension.input, NULL, wiimod_pro_play)) { ret = -ENOMEM; goto err_free; } ret = device_create_file(&wdata->hdev->dev, &dev_attr_pro_calib); if (ret) { hid_err(wdata->hdev, "cannot create sysfs attribute\n"); goto err_free; } wdata->extension.input->open = wiimod_pro_open; wdata->extension.input->close = wiimod_pro_close; wdata->extension.input->dev.parent = &wdata->hdev->dev; wdata->extension.input->id.bustype = wdata->hdev->bus; wdata->extension.input->id.vendor = wdata->hdev->vendor; wdata->extension.input->id.product = wdata->hdev->product; wdata->extension.input->id.version = wdata->hdev->version; wdata->extension.input->name = WIIMOTE_NAME " Pro Controller"; set_bit(EV_KEY, wdata->extension.input->evbit); for (i = 0; i < WIIMOD_PRO_KEY_NUM; ++i) set_bit(wiimod_pro_map[i], wdata->extension.input->keybit); set_bit(EV_ABS, wdata->extension.input->evbit); set_bit(ABS_X, wdata->extension.input->absbit); set_bit(ABS_Y, wdata->extension.input->absbit); set_bit(ABS_RX, wdata->extension.input->absbit); set_bit(ABS_RY, wdata->extension.input->absbit); input_set_abs_params(wdata->extension.input, ABS_X, -0x400, 0x400, 4, 100); input_set_abs_params(wdata->extension.input, ABS_Y, -0x400, 0x400, 4, 100); input_set_abs_params(wdata->extension.input, ABS_RX, -0x400, 0x400, 4, 100); input_set_abs_params(wdata->extension.input, ABS_RY, -0x400, 0x400, 4, 100); ret = input_register_device(wdata->extension.input); if (ret) goto err_file; return 0; err_file: device_remove_file(&wdata->hdev->dev, &dev_attr_pro_calib); err_free: input_free_device(wdata->extension.input); wdata->extension.input = NULL; return ret; } static void wiimod_pro_remove(const struct wiimod_ops *ops, struct wiimote_data *wdata) { unsigned long flags; if (!wdata->extension.input) return; input_unregister_device(wdata->extension.input); wdata->extension.input = NULL; cancel_work_sync(&wdata->rumble_worker); device_remove_file(&wdata->hdev->dev, &dev_attr_pro_calib); spin_lock_irqsave(&wdata->state.lock, flags); wiiproto_req_rumble(wdata, 0); spin_unlock_irqrestore(&wdata->state.lock, flags); } static const struct wiimod_ops wiimod_pro = { .flags = WIIMOD_FLAG_EXT16, .arg = 0, .probe = wiimod_pro_probe, .remove = wiimod_pro_remove, .in_ext = wiimod_pro_in_ext, }; /* * Builtin Motion Plus * This module simply sets the WIIPROTO_FLAG_BUILTIN_MP protocol flag which * disables polling for Motion-Plus. This should be set only for devices which * don't allow MP hotplugging. */ static int wiimod_builtin_mp_probe(const struct wiimod_ops *ops, struct wiimote_data *wdata) { unsigned long flags; spin_lock_irqsave(&wdata->state.lock, flags); wdata->state.flags |= WIIPROTO_FLAG_BUILTIN_MP; spin_unlock_irqrestore(&wdata->state.lock, flags); return 0; } static void wiimod_builtin_mp_remove(const struct wiimod_ops *ops, struct wiimote_data *wdata) { unsigned long flags; spin_lock_irqsave(&wdata->state.lock, flags); wdata->state.flags |= WIIPROTO_FLAG_BUILTIN_MP; spin_unlock_irqrestore(&wdata->state.lock, flags); } static const struct wiimod_ops wiimod_builtin_mp = { .flags = 0, .arg = 0, .probe = wiimod_builtin_mp_probe, .remove = wiimod_builtin_mp_remove, }; /* * No Motion Plus * This module simply sets the WIIPROTO_FLAG_NO_MP protocol flag which * disables motion-plus. This is needed for devices that advertise this but we * don't know how to use it (or whether it is actually present). */ static int wiimod_no_mp_probe(const struct wiimod_ops *ops, struct wiimote_data *wdata) { unsigned long flags; spin_lock_irqsave(&wdata->state.lock, flags); wdata->state.flags |= WIIPROTO_FLAG_NO_MP; spin_unlock_irqrestore(&wdata->state.lock, flags); return 0; } static void wiimod_no_mp_remove(const struct wiimod_ops *ops, struct wiimote_data *wdata) { unsigned long flags; spin_lock_irqsave(&wdata->state.lock, flags); wdata->state.flags |= WIIPROTO_FLAG_NO_MP; spin_unlock_irqrestore(&wdata->state.lock, flags); } static const struct wiimod_ops wiimod_no_mp = { .flags = 0, .arg = 0, .probe = wiimod_no_mp_probe, .remove = wiimod_no_mp_remove, }; /* * Motion Plus * The Motion Plus extension provides rotation sensors (gyro) as a small * extension device for Wii Remotes. Many devices have them built-in so * you cannot see them from the outside. * Motion Plus extensions are special because they are on a separate extension * port and allow other extensions to be used simultaneously. This is all * handled by the Wiimote Core so we don't have to deal with it. */ static void wiimod_mp_in_mp(struct wiimote_data *wdata, const __u8 *ext) { __s32 x, y, z; /* | 8 7 6 5 4 3 | 2 | 1 | * -----+------------------------------+-----+-----+ * 1 | Yaw Speed <7:0> | * 2 | Roll Speed <7:0> | * 3 | Pitch Speed <7:0> | * -----+------------------------------+-----+-----+ * 4 | Yaw Speed <13:8> | Yaw |Pitch| * -----+------------------------------+-----+-----+ * 5 | Roll Speed <13:8> |Roll | Ext | * -----+------------------------------+-----+-----+ * 6 | Pitch Speed <13:8> | 1 | 0 | * -----+------------------------------+-----+-----+ * The single bits Yaw, Roll, Pitch in the lower right corner specify * whether the wiimote is rotating fast (0) or slow (1). Speed for slow * roation is 440 deg/s and for fast rotation 2000 deg/s. To get a * linear scale we multiply by 2000/440 = ~4.5454 which is 18 for fast * and 9 for slow. * If the wiimote is not rotating the sensor reports 2^13 = 8192. * Ext specifies whether an extension is connected to the motionp. * which is parsed by wiimote-core. */ x = ext[0]; y = ext[1]; z = ext[2]; x |= (((__u16)ext[3]) << 6) & 0xff00; y |= (((__u16)ext[4]) << 6) & 0xff00; z |= (((__u16)ext[5]) << 6) & 0xff00; x -= 8192; y -= 8192; z -= 8192; if (!(ext[3] & 0x02)) x *= 18; else x *= 9; if (!(ext[4] & 0x02)) y *= 18; else y *= 9; if (!(ext[3] & 0x01)) z *= 18; else z *= 9; input_report_abs(wdata->mp, ABS_RX, x); input_report_abs(wdata->mp, ABS_RY, y); input_report_abs(wdata->mp, ABS_RZ, z); input_sync(wdata->mp); } static int wiimod_mp_open(struct input_dev *dev) { struct wiimote_data *wdata = input_get_drvdata(dev); unsigned long flags; spin_lock_irqsave(&wdata->state.lock, flags); wdata->state.flags |= WIIPROTO_FLAG_MP_USED; wiiproto_req_drm(wdata, WIIPROTO_REQ_NULL); __wiimote_schedule(wdata); spin_unlock_irqrestore(&wdata->state.lock, flags); return 0; } static void wiimod_mp_close(struct input_dev *dev) { struct wiimote_data *wdata = input_get_drvdata(dev); unsigned long flags; spin_lock_irqsave(&wdata->state.lock, flags); wdata->state.flags &= ~WIIPROTO_FLAG_MP_USED; wiiproto_req_drm(wdata, WIIPROTO_REQ_NULL); __wiimote_schedule(wdata); spin_unlock_irqrestore(&wdata->state.lock, flags); } static int wiimod_mp_probe(const struct wiimod_ops *ops, struct wiimote_data *wdata) { int ret; wdata->mp = input_allocate_device(); if (!wdata->mp) return -ENOMEM; input_set_drvdata(wdata->mp, wdata); wdata->mp->open = wiimod_mp_open; wdata->mp->close = wiimod_mp_close; wdata->mp->dev.parent = &wdata->hdev->dev; wdata->mp->id.bustype = wdata->hdev->bus; wdata->mp->id.vendor = wdata->hdev->vendor; wdata->mp->id.product = wdata->hdev->product; wdata->mp->id.version = wdata->hdev->version; wdata->mp->name = WIIMOTE_NAME " Motion Plus"; set_bit(EV_ABS, wdata->mp->evbit); set_bit(ABS_RX, wdata->mp->absbit); set_bit(ABS_RY, wdata->mp->absbit); set_bit(ABS_RZ, wdata->mp->absbit); input_set_abs_params(wdata->mp, ABS_RX, -16000, 16000, 4, 8); input_set_abs_params(wdata->mp, ABS_RY, -16000, 16000, 4, 8); input_set_abs_params(wdata->mp, ABS_RZ, -16000, 16000, 4, 8); ret = input_register_device(wdata->mp); if (ret) goto err_free; return 0; err_free: input_free_device(wdata->mp); wdata->mp = NULL; return ret; } static void wiimod_mp_remove(const struct wiimod_ops *ops, struct wiimote_data *wdata) { if (!wdata->mp) return; input_unregister_device(wdata->mp); wdata->mp = NULL; } const struct wiimod_ops wiimod_mp = { .flags = 0, .arg = 0, .probe = wiimod_mp_probe, .remove = wiimod_mp_remove, .in_mp = wiimod_mp_in_mp, }; /* module table */ static const struct wiimod_ops wiimod_dummy; const struct wiimod_ops *wiimod_table[WIIMOD_NUM] = { [WIIMOD_KEYS] = &wiimod_keys, [WIIMOD_RUMBLE] = &wiimod_rumble, [WIIMOD_BATTERY] = &wiimod_battery, [WIIMOD_LED1] = &wiimod_leds[0], [WIIMOD_LED2] = &wiimod_leds[1], [WIIMOD_LED3] = &wiimod_leds[2], [WIIMOD_LED4] = &wiimod_leds[3], [WIIMOD_ACCEL] = &wiimod_accel, [WIIMOD_IR] = &wiimod_ir, [WIIMOD_BUILTIN_MP] = &wiimod_builtin_mp, [WIIMOD_NO_MP] = &wiimod_no_mp, }; const struct wiimod_ops *wiimod_ext_table[WIIMOTE_EXT_NUM] = { [WIIMOTE_EXT_NONE] = &wiimod_dummy, [WIIMOTE_EXT_UNKNOWN] = &wiimod_dummy, [WIIMOTE_EXT_NUNCHUK] = &wiimod_nunchuk, [WIIMOTE_EXT_CLASSIC_CONTROLLER] = &wiimod_classic, [WIIMOTE_EXT_BALANCE_BOARD] = &wiimod_bboard, [WIIMOTE_EXT_PRO_CONTROLLER] = &wiimod_pro, };
gpl-2.0
yi9/linux
drivers/edac/i7core_edac.c
772
64604
/* Intel i7 core/Nehalem Memory Controller kernel module * * This driver supports the memory controllers found on the Intel * processor families i7core, i7core 7xx/8xx, i5core, Xeon 35xx, * Xeon 55xx and Xeon 56xx also known as Nehalem, Nehalem-EP, Lynnfield * and Westmere-EP. * * This file may be distributed under the terms of the * GNU General Public License version 2 only. * * Copyright (c) 2009-2010 by: * Mauro Carvalho Chehab * * Red Hat Inc. http://www.redhat.com * * Forked and adapted from the i5400_edac driver * * Based on the following public Intel datasheets: * Intel Core i7 Processor Extreme Edition and Intel Core i7 Processor * Datasheet, Volume 2: * http://download.intel.com/design/processor/datashts/320835.pdf * Intel Xeon Processor 5500 Series Datasheet Volume 2 * http://www.intel.com/Assets/PDF/datasheet/321322.pdf * also available at: * http://www.arrownac.com/manufacturers/intel/s/nehalem/5500-datasheet-v2.pdf */ #include <linux/module.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/pci_ids.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/dmi.h> #include <linux/edac.h> #include <linux/mmzone.h> #include <linux/smp.h> #include <asm/mce.h> #include <asm/processor.h> #include <asm/div64.h> #include "edac_core.h" /* Static vars */ static LIST_HEAD(i7core_edac_list); static DEFINE_MUTEX(i7core_edac_lock); static int probed; static int use_pci_fixup; module_param(use_pci_fixup, int, 0444); MODULE_PARM_DESC(use_pci_fixup, "Enable PCI fixup to seek for hidden devices"); /* * This is used for Nehalem-EP and Nehalem-EX devices, where the non-core * registers start at bus 255, and are not reported by BIOS. * We currently find devices with only 2 sockets. In order to support more QPI * Quick Path Interconnect, just increment this number. */ #define MAX_SOCKET_BUSES 2 /* * Alter this version for the module when modifications are made */ #define I7CORE_REVISION " Ver: 1.0.0" #define EDAC_MOD_STR "i7core_edac" /* * Debug macros */ #define i7core_printk(level, fmt, arg...) \ edac_printk(level, "i7core", fmt, ##arg) #define i7core_mc_printk(mci, level, fmt, arg...) \ edac_mc_chipset_printk(mci, level, "i7core", fmt, ##arg) /* * i7core Memory Controller Registers */ /* OFFSETS for Device 0 Function 0 */ #define MC_CFG_CONTROL 0x90 #define MC_CFG_UNLOCK 0x02 #define MC_CFG_LOCK 0x00 /* OFFSETS for Device 3 Function 0 */ #define MC_CONTROL 0x48 #define MC_STATUS 0x4c #define MC_MAX_DOD 0x64 /* * OFFSETS for Device 3 Function 4, as indicated on Xeon 5500 datasheet: * http://www.arrownac.com/manufacturers/intel/s/nehalem/5500-datasheet-v2.pdf */ #define MC_TEST_ERR_RCV1 0x60 #define DIMM2_COR_ERR(r) ((r) & 0x7fff) #define MC_TEST_ERR_RCV0 0x64 #define DIMM1_COR_ERR(r) (((r) >> 16) & 0x7fff) #define DIMM0_COR_ERR(r) ((r) & 0x7fff) /* OFFSETS for Device 3 Function 2, as indicated on Xeon 5500 datasheet */ #define MC_SSRCONTROL 0x48 #define SSR_MODE_DISABLE 0x00 #define SSR_MODE_ENABLE 0x01 #define SSR_MODE_MASK 0x03 #define MC_SCRUB_CONTROL 0x4c #define STARTSCRUB (1 << 24) #define SCRUBINTERVAL_MASK 0xffffff #define MC_COR_ECC_CNT_0 0x80 #define MC_COR_ECC_CNT_1 0x84 #define MC_COR_ECC_CNT_2 0x88 #define MC_COR_ECC_CNT_3 0x8c #define MC_COR_ECC_CNT_4 0x90 #define MC_COR_ECC_CNT_5 0x94 #define DIMM_TOP_COR_ERR(r) (((r) >> 16) & 0x7fff) #define DIMM_BOT_COR_ERR(r) ((r) & 0x7fff) /* OFFSETS for Devices 4,5 and 6 Function 0 */ #define MC_CHANNEL_DIMM_INIT_PARAMS 0x58 #define THREE_DIMMS_PRESENT (1 << 24) #define SINGLE_QUAD_RANK_PRESENT (1 << 23) #define QUAD_RANK_PRESENT (1 << 22) #define REGISTERED_DIMM (1 << 15) #define MC_CHANNEL_MAPPER 0x60 #define RDLCH(r, ch) ((((r) >> (3 + (ch * 6))) & 0x07) - 1) #define WRLCH(r, ch) ((((r) >> (ch * 6)) & 0x07) - 1) #define MC_CHANNEL_RANK_PRESENT 0x7c #define RANK_PRESENT_MASK 0xffff #define MC_CHANNEL_ADDR_MATCH 0xf0 #define MC_CHANNEL_ERROR_MASK 0xf8 #define MC_CHANNEL_ERROR_INJECT 0xfc #define INJECT_ADDR_PARITY 0x10 #define INJECT_ECC 0x08 #define MASK_CACHELINE 0x06 #define MASK_FULL_CACHELINE 0x06 #define MASK_MSB32_CACHELINE 0x04 #define MASK_LSB32_CACHELINE 0x02 #define NO_MASK_CACHELINE 0x00 #define REPEAT_EN 0x01 /* OFFSETS for Devices 4,5 and 6 Function 1 */ #define MC_DOD_CH_DIMM0 0x48 #define MC_DOD_CH_DIMM1 0x4c #define MC_DOD_CH_DIMM2 0x50 #define RANKOFFSET_MASK ((1 << 12) | (1 << 11) | (1 << 10)) #define RANKOFFSET(x) ((x & RANKOFFSET_MASK) >> 10) #define DIMM_PRESENT_MASK (1 << 9) #define DIMM_PRESENT(x) (((x) & DIMM_PRESENT_MASK) >> 9) #define MC_DOD_NUMBANK_MASK ((1 << 8) | (1 << 7)) #define MC_DOD_NUMBANK(x) (((x) & MC_DOD_NUMBANK_MASK) >> 7) #define MC_DOD_NUMRANK_MASK ((1 << 6) | (1 << 5)) #define MC_DOD_NUMRANK(x) (((x) & MC_DOD_NUMRANK_MASK) >> 5) #define MC_DOD_NUMROW_MASK ((1 << 4) | (1 << 3) | (1 << 2)) #define MC_DOD_NUMROW(x) (((x) & MC_DOD_NUMROW_MASK) >> 2) #define MC_DOD_NUMCOL_MASK 3 #define MC_DOD_NUMCOL(x) ((x) & MC_DOD_NUMCOL_MASK) #define MC_RANK_PRESENT 0x7c #define MC_SAG_CH_0 0x80 #define MC_SAG_CH_1 0x84 #define MC_SAG_CH_2 0x88 #define MC_SAG_CH_3 0x8c #define MC_SAG_CH_4 0x90 #define MC_SAG_CH_5 0x94 #define MC_SAG_CH_6 0x98 #define MC_SAG_CH_7 0x9c #define MC_RIR_LIMIT_CH_0 0x40 #define MC_RIR_LIMIT_CH_1 0x44 #define MC_RIR_LIMIT_CH_2 0x48 #define MC_RIR_LIMIT_CH_3 0x4C #define MC_RIR_LIMIT_CH_4 0x50 #define MC_RIR_LIMIT_CH_5 0x54 #define MC_RIR_LIMIT_CH_6 0x58 #define MC_RIR_LIMIT_CH_7 0x5C #define MC_RIR_LIMIT_MASK ((1 << 10) - 1) #define MC_RIR_WAY_CH 0x80 #define MC_RIR_WAY_OFFSET_MASK (((1 << 14) - 1) & ~0x7) #define MC_RIR_WAY_RANK_MASK 0x7 /* * i7core structs */ #define NUM_CHANS 3 #define MAX_DIMMS 3 /* Max DIMMS per channel */ #define MAX_MCR_FUNC 4 #define MAX_CHAN_FUNC 3 struct i7core_info { u32 mc_control; u32 mc_status; u32 max_dod; u32 ch_map; }; struct i7core_inject { int enable; u32 section; u32 type; u32 eccmask; /* Error address mask */ int channel, dimm, rank, bank, page, col; }; struct i7core_channel { bool is_3dimms_present; bool is_single_4rank; bool has_4rank; u32 dimms; }; struct pci_id_descr { int dev; int func; int dev_id; int optional; }; struct pci_id_table { const struct pci_id_descr *descr; int n_devs; }; struct i7core_dev { struct list_head list; u8 socket; struct pci_dev **pdev; int n_devs; struct mem_ctl_info *mci; }; struct i7core_pvt { struct device *addrmatch_dev, *chancounts_dev; struct pci_dev *pci_noncore; struct pci_dev *pci_mcr[MAX_MCR_FUNC + 1]; struct pci_dev *pci_ch[NUM_CHANS][MAX_CHAN_FUNC + 1]; struct i7core_dev *i7core_dev; struct i7core_info info; struct i7core_inject inject; struct i7core_channel channel[NUM_CHANS]; int ce_count_available; /* ECC corrected errors counts per udimm */ unsigned long udimm_ce_count[MAX_DIMMS]; int udimm_last_ce_count[MAX_DIMMS]; /* ECC corrected errors counts per rdimm */ unsigned long rdimm_ce_count[NUM_CHANS][MAX_DIMMS]; int rdimm_last_ce_count[NUM_CHANS][MAX_DIMMS]; bool is_registered, enable_scrub; /* Fifo double buffers */ struct mce mce_entry[MCE_LOG_LEN]; struct mce mce_outentry[MCE_LOG_LEN]; /* Fifo in/out counters */ unsigned mce_in, mce_out; /* Count indicator to show errors not got */ unsigned mce_overrun; /* DCLK Frequency used for computing scrub rate */ int dclk_freq; /* Struct to control EDAC polling */ struct edac_pci_ctl_info *i7core_pci; }; #define PCI_DESCR(device, function, device_id) \ .dev = (device), \ .func = (function), \ .dev_id = (device_id) static const struct pci_id_descr pci_dev_descr_i7core_nehalem[] = { /* Memory controller */ { PCI_DESCR(3, 0, PCI_DEVICE_ID_INTEL_I7_MCR) }, { PCI_DESCR(3, 1, PCI_DEVICE_ID_INTEL_I7_MC_TAD) }, /* Exists only for RDIMM */ { PCI_DESCR(3, 2, PCI_DEVICE_ID_INTEL_I7_MC_RAS), .optional = 1 }, { PCI_DESCR(3, 4, PCI_DEVICE_ID_INTEL_I7_MC_TEST) }, /* Channel 0 */ { PCI_DESCR(4, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH0_CTRL) }, { PCI_DESCR(4, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH0_ADDR) }, { PCI_DESCR(4, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH0_RANK) }, { PCI_DESCR(4, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH0_TC) }, /* Channel 1 */ { PCI_DESCR(5, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH1_CTRL) }, { PCI_DESCR(5, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH1_ADDR) }, { PCI_DESCR(5, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH1_RANK) }, { PCI_DESCR(5, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH1_TC) }, /* Channel 2 */ { PCI_DESCR(6, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH2_CTRL) }, { PCI_DESCR(6, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH2_ADDR) }, { PCI_DESCR(6, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH2_RANK) }, { PCI_DESCR(6, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH2_TC) }, /* Generic Non-core registers */ /* * This is the PCI device on i7core and on Xeon 35xx (8086:2c41) * On Xeon 55xx, however, it has a different id (8086:2c40). So, * the probing code needs to test for the other address in case of * failure of this one */ { PCI_DESCR(0, 0, PCI_DEVICE_ID_INTEL_I7_NONCORE) }, }; static const struct pci_id_descr pci_dev_descr_lynnfield[] = { { PCI_DESCR( 3, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MCR) }, { PCI_DESCR( 3, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TAD) }, { PCI_DESCR( 3, 4, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TEST) }, { PCI_DESCR( 4, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_CTRL) }, { PCI_DESCR( 4, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_ADDR) }, { PCI_DESCR( 4, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_RANK) }, { PCI_DESCR( 4, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_TC) }, { PCI_DESCR( 5, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_CTRL) }, { PCI_DESCR( 5, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_ADDR) }, { PCI_DESCR( 5, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_RANK) }, { PCI_DESCR( 5, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_TC) }, /* * This is the PCI device has an alternate address on some * processors like Core i7 860 */ { PCI_DESCR( 0, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE) }, }; static const struct pci_id_descr pci_dev_descr_i7core_westmere[] = { /* Memory controller */ { PCI_DESCR(3, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MCR_REV2) }, { PCI_DESCR(3, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TAD_REV2) }, /* Exists only for RDIMM */ { PCI_DESCR(3, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_RAS_REV2), .optional = 1 }, { PCI_DESCR(3, 4, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TEST_REV2) }, /* Channel 0 */ { PCI_DESCR(4, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_CTRL_REV2) }, { PCI_DESCR(4, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_ADDR_REV2) }, { PCI_DESCR(4, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_RANK_REV2) }, { PCI_DESCR(4, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_TC_REV2) }, /* Channel 1 */ { PCI_DESCR(5, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_CTRL_REV2) }, { PCI_DESCR(5, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_ADDR_REV2) }, { PCI_DESCR(5, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_RANK_REV2) }, { PCI_DESCR(5, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_TC_REV2) }, /* Channel 2 */ { PCI_DESCR(6, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_CTRL_REV2) }, { PCI_DESCR(6, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_ADDR_REV2) }, { PCI_DESCR(6, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_RANK_REV2) }, { PCI_DESCR(6, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_TC_REV2) }, /* Generic Non-core registers */ { PCI_DESCR(0, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_REV2) }, }; #define PCI_ID_TABLE_ENTRY(A) { .descr=A, .n_devs = ARRAY_SIZE(A) } static const struct pci_id_table pci_dev_table[] = { PCI_ID_TABLE_ENTRY(pci_dev_descr_i7core_nehalem), PCI_ID_TABLE_ENTRY(pci_dev_descr_lynnfield), PCI_ID_TABLE_ENTRY(pci_dev_descr_i7core_westmere), {0,} /* 0 terminated list. */ }; /* * pci_device_id table for which devices we are looking for */ static const struct pci_device_id i7core_pci_tbl[] = { {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X58_HUB_MGMT)}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0)}, {0,} /* 0 terminated list. */ }; /**************************************************************************** Ancillary status routines ****************************************************************************/ /* MC_CONTROL bits */ #define CH_ACTIVE(pvt, ch) ((pvt)->info.mc_control & (1 << (8 + ch))) #define ECCx8(pvt) ((pvt)->info.mc_control & (1 << 1)) /* MC_STATUS bits */ #define ECC_ENABLED(pvt) ((pvt)->info.mc_status & (1 << 4)) #define CH_DISABLED(pvt, ch) ((pvt)->info.mc_status & (1 << ch)) /* MC_MAX_DOD read functions */ static inline int numdimms(u32 dimms) { return (dimms & 0x3) + 1; } static inline int numrank(u32 rank) { static const int ranks[] = { 1, 2, 4, -EINVAL }; return ranks[rank & 0x3]; } static inline int numbank(u32 bank) { static const int banks[] = { 4, 8, 16, -EINVAL }; return banks[bank & 0x3]; } static inline int numrow(u32 row) { static const int rows[] = { 1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16, -EINVAL, -EINVAL, -EINVAL, }; return rows[row & 0x7]; } static inline int numcol(u32 col) { static const int cols[] = { 1 << 10, 1 << 11, 1 << 12, -EINVAL, }; return cols[col & 0x3]; } static struct i7core_dev *get_i7core_dev(u8 socket) { struct i7core_dev *i7core_dev; list_for_each_entry(i7core_dev, &i7core_edac_list, list) { if (i7core_dev->socket == socket) return i7core_dev; } return NULL; } static struct i7core_dev *alloc_i7core_dev(u8 socket, const struct pci_id_table *table) { struct i7core_dev *i7core_dev; i7core_dev = kzalloc(sizeof(*i7core_dev), GFP_KERNEL); if (!i7core_dev) return NULL; i7core_dev->pdev = kzalloc(sizeof(*i7core_dev->pdev) * table->n_devs, GFP_KERNEL); if (!i7core_dev->pdev) { kfree(i7core_dev); return NULL; } i7core_dev->socket = socket; i7core_dev->n_devs = table->n_devs; list_add_tail(&i7core_dev->list, &i7core_edac_list); return i7core_dev; } static void free_i7core_dev(struct i7core_dev *i7core_dev) { list_del(&i7core_dev->list); kfree(i7core_dev->pdev); kfree(i7core_dev); } /**************************************************************************** Memory check routines ****************************************************************************/ static int get_dimm_config(struct mem_ctl_info *mci) { struct i7core_pvt *pvt = mci->pvt_info; struct pci_dev *pdev; int i, j; enum edac_type mode; enum mem_type mtype; struct dimm_info *dimm; /* Get data from the MC register, function 0 */ pdev = pvt->pci_mcr[0]; if (!pdev) return -ENODEV; /* Device 3 function 0 reads */ pci_read_config_dword(pdev, MC_CONTROL, &pvt->info.mc_control); pci_read_config_dword(pdev, MC_STATUS, &pvt->info.mc_status); pci_read_config_dword(pdev, MC_MAX_DOD, &pvt->info.max_dod); pci_read_config_dword(pdev, MC_CHANNEL_MAPPER, &pvt->info.ch_map); edac_dbg(0, "QPI %d control=0x%08x status=0x%08x dod=0x%08x map=0x%08x\n", pvt->i7core_dev->socket, pvt->info.mc_control, pvt->info.mc_status, pvt->info.max_dod, pvt->info.ch_map); if (ECC_ENABLED(pvt)) { edac_dbg(0, "ECC enabled with x%d SDCC\n", ECCx8(pvt) ? 8 : 4); if (ECCx8(pvt)) mode = EDAC_S8ECD8ED; else mode = EDAC_S4ECD4ED; } else { edac_dbg(0, "ECC disabled\n"); mode = EDAC_NONE; } /* FIXME: need to handle the error codes */ edac_dbg(0, "DOD Max limits: DIMMS: %d, %d-ranked, %d-banked x%x x 0x%x\n", numdimms(pvt->info.max_dod), numrank(pvt->info.max_dod >> 2), numbank(pvt->info.max_dod >> 4), numrow(pvt->info.max_dod >> 6), numcol(pvt->info.max_dod >> 9)); for (i = 0; i < NUM_CHANS; i++) { u32 data, dimm_dod[3], value[8]; if (!pvt->pci_ch[i][0]) continue; if (!CH_ACTIVE(pvt, i)) { edac_dbg(0, "Channel %i is not active\n", i); continue; } if (CH_DISABLED(pvt, i)) { edac_dbg(0, "Channel %i is disabled\n", i); continue; } /* Devices 4-6 function 0 */ pci_read_config_dword(pvt->pci_ch[i][0], MC_CHANNEL_DIMM_INIT_PARAMS, &data); if (data & THREE_DIMMS_PRESENT) pvt->channel[i].is_3dimms_present = true; if (data & SINGLE_QUAD_RANK_PRESENT) pvt->channel[i].is_single_4rank = true; if (data & QUAD_RANK_PRESENT) pvt->channel[i].has_4rank = true; if (data & REGISTERED_DIMM) mtype = MEM_RDDR3; else mtype = MEM_DDR3; /* Devices 4-6 function 1 */ pci_read_config_dword(pvt->pci_ch[i][1], MC_DOD_CH_DIMM0, &dimm_dod[0]); pci_read_config_dword(pvt->pci_ch[i][1], MC_DOD_CH_DIMM1, &dimm_dod[1]); pci_read_config_dword(pvt->pci_ch[i][1], MC_DOD_CH_DIMM2, &dimm_dod[2]); edac_dbg(0, "Ch%d phy rd%d, wr%d (0x%08x): %s%s%s%cDIMMs\n", i, RDLCH(pvt->info.ch_map, i), WRLCH(pvt->info.ch_map, i), data, pvt->channel[i].is_3dimms_present ? "3DIMMS " : "", pvt->channel[i].is_3dimms_present ? "SINGLE_4R " : "", pvt->channel[i].has_4rank ? "HAS_4R " : "", (data & REGISTERED_DIMM) ? 'R' : 'U'); for (j = 0; j < 3; j++) { u32 banks, ranks, rows, cols; u32 size, npages; if (!DIMM_PRESENT(dimm_dod[j])) continue; dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, i, j, 0); banks = numbank(MC_DOD_NUMBANK(dimm_dod[j])); ranks = numrank(MC_DOD_NUMRANK(dimm_dod[j])); rows = numrow(MC_DOD_NUMROW(dimm_dod[j])); cols = numcol(MC_DOD_NUMCOL(dimm_dod[j])); /* DDR3 has 8 I/O banks */ size = (rows * cols * banks * ranks) >> (20 - 3); edac_dbg(0, "\tdimm %d %d Mb offset: %x, bank: %d, rank: %d, row: %#x, col: %#x\n", j, size, RANKOFFSET(dimm_dod[j]), banks, ranks, rows, cols); npages = MiB_TO_PAGES(size); dimm->nr_pages = npages; switch (banks) { case 4: dimm->dtype = DEV_X4; break; case 8: dimm->dtype = DEV_X8; break; case 16: dimm->dtype = DEV_X16; break; default: dimm->dtype = DEV_UNKNOWN; } snprintf(dimm->label, sizeof(dimm->label), "CPU#%uChannel#%u_DIMM#%u", pvt->i7core_dev->socket, i, j); dimm->grain = 8; dimm->edac_mode = mode; dimm->mtype = mtype; } pci_read_config_dword(pdev, MC_SAG_CH_0, &value[0]); pci_read_config_dword(pdev, MC_SAG_CH_1, &value[1]); pci_read_config_dword(pdev, MC_SAG_CH_2, &value[2]); pci_read_config_dword(pdev, MC_SAG_CH_3, &value[3]); pci_read_config_dword(pdev, MC_SAG_CH_4, &value[4]); pci_read_config_dword(pdev, MC_SAG_CH_5, &value[5]); pci_read_config_dword(pdev, MC_SAG_CH_6, &value[6]); pci_read_config_dword(pdev, MC_SAG_CH_7, &value[7]); edac_dbg(1, "\t[%i] DIVBY3\tREMOVED\tOFFSET\n", i); for (j = 0; j < 8; j++) edac_dbg(1, "\t\t%#x\t%#x\t%#x\n", (value[j] >> 27) & 0x1, (value[j] >> 24) & 0x7, (value[j] & ((1 << 24) - 1))); } return 0; } /**************************************************************************** Error insertion routines ****************************************************************************/ #define to_mci(k) container_of(k, struct mem_ctl_info, dev) /* The i7core has independent error injection features per channel. However, to have a simpler code, we don't allow enabling error injection on more than one channel. Also, since a change at an inject parameter will be applied only at enable, we're disabling error injection on all write calls to the sysfs nodes that controls the error code injection. */ static int disable_inject(const struct mem_ctl_info *mci) { struct i7core_pvt *pvt = mci->pvt_info; pvt->inject.enable = 0; if (!pvt->pci_ch[pvt->inject.channel][0]) return -ENODEV; pci_write_config_dword(pvt->pci_ch[pvt->inject.channel][0], MC_CHANNEL_ERROR_INJECT, 0); return 0; } /* * i7core inject inject.section * * accept and store error injection inject.section value * bit 0 - refers to the lower 32-byte half cacheline * bit 1 - refers to the upper 32-byte half cacheline */ static ssize_t i7core_inject_section_store(struct device *dev, struct device_attribute *mattr, const char *data, size_t count) { struct mem_ctl_info *mci = to_mci(dev); struct i7core_pvt *pvt = mci->pvt_info; unsigned long value; int rc; if (pvt->inject.enable) disable_inject(mci); rc = kstrtoul(data, 10, &value); if ((rc < 0) || (value > 3)) return -EIO; pvt->inject.section = (u32) value; return count; } static ssize_t i7core_inject_section_show(struct device *dev, struct device_attribute *mattr, char *data) { struct mem_ctl_info *mci = to_mci(dev); struct i7core_pvt *pvt = mci->pvt_info; return sprintf(data, "0x%08x\n", pvt->inject.section); } /* * i7core inject.type * * accept and store error injection inject.section value * bit 0 - repeat enable - Enable error repetition * bit 1 - inject ECC error * bit 2 - inject parity error */ static ssize_t i7core_inject_type_store(struct device *dev, struct device_attribute *mattr, const char *data, size_t count) { struct mem_ctl_info *mci = to_mci(dev); struct i7core_pvt *pvt = mci->pvt_info; unsigned long value; int rc; if (pvt->inject.enable) disable_inject(mci); rc = kstrtoul(data, 10, &value); if ((rc < 0) || (value > 7)) return -EIO; pvt->inject.type = (u32) value; return count; } static ssize_t i7core_inject_type_show(struct device *dev, struct device_attribute *mattr, char *data) { struct mem_ctl_info *mci = to_mci(dev); struct i7core_pvt *pvt = mci->pvt_info; return sprintf(data, "0x%08x\n", pvt->inject.type); } /* * i7core_inject_inject.eccmask_store * * The type of error (UE/CE) will depend on the inject.eccmask value: * Any bits set to a 1 will flip the corresponding ECC bit * Correctable errors can be injected by flipping 1 bit or the bits within * a symbol pair (2 consecutive aligned 8-bit pairs - i.e. 7:0 and 15:8 or * 23:16 and 31:24). Flipping bits in two symbol pairs will cause an * uncorrectable error to be injected. */ static ssize_t i7core_inject_eccmask_store(struct device *dev, struct device_attribute *mattr, const char *data, size_t count) { struct mem_ctl_info *mci = to_mci(dev); struct i7core_pvt *pvt = mci->pvt_info; unsigned long value; int rc; if (pvt->inject.enable) disable_inject(mci); rc = kstrtoul(data, 10, &value); if (rc < 0) return -EIO; pvt->inject.eccmask = (u32) value; return count; } static ssize_t i7core_inject_eccmask_show(struct device *dev, struct device_attribute *mattr, char *data) { struct mem_ctl_info *mci = to_mci(dev); struct i7core_pvt *pvt = mci->pvt_info; return sprintf(data, "0x%08x\n", pvt->inject.eccmask); } /* * i7core_addrmatch * * The type of error (UE/CE) will depend on the inject.eccmask value: * Any bits set to a 1 will flip the corresponding ECC bit * Correctable errors can be injected by flipping 1 bit or the bits within * a symbol pair (2 consecutive aligned 8-bit pairs - i.e. 7:0 and 15:8 or * 23:16 and 31:24). Flipping bits in two symbol pairs will cause an * uncorrectable error to be injected. */ #define DECLARE_ADDR_MATCH(param, limit) \ static ssize_t i7core_inject_store_##param( \ struct device *dev, \ struct device_attribute *mattr, \ const char *data, size_t count) \ { \ struct mem_ctl_info *mci = dev_get_drvdata(dev); \ struct i7core_pvt *pvt; \ long value; \ int rc; \ \ edac_dbg(1, "\n"); \ pvt = mci->pvt_info; \ \ if (pvt->inject.enable) \ disable_inject(mci); \ \ if (!strcasecmp(data, "any") || !strcasecmp(data, "any\n"))\ value = -1; \ else { \ rc = kstrtoul(data, 10, &value); \ if ((rc < 0) || (value >= limit)) \ return -EIO; \ } \ \ pvt->inject.param = value; \ \ return count; \ } \ \ static ssize_t i7core_inject_show_##param( \ struct device *dev, \ struct device_attribute *mattr, \ char *data) \ { \ struct mem_ctl_info *mci = dev_get_drvdata(dev); \ struct i7core_pvt *pvt; \ \ pvt = mci->pvt_info; \ edac_dbg(1, "pvt=%p\n", pvt); \ if (pvt->inject.param < 0) \ return sprintf(data, "any\n"); \ else \ return sprintf(data, "%d\n", pvt->inject.param);\ } #define ATTR_ADDR_MATCH(param) \ static DEVICE_ATTR(param, S_IRUGO | S_IWUSR, \ i7core_inject_show_##param, \ i7core_inject_store_##param) DECLARE_ADDR_MATCH(channel, 3); DECLARE_ADDR_MATCH(dimm, 3); DECLARE_ADDR_MATCH(rank, 4); DECLARE_ADDR_MATCH(bank, 32); DECLARE_ADDR_MATCH(page, 0x10000); DECLARE_ADDR_MATCH(col, 0x4000); ATTR_ADDR_MATCH(channel); ATTR_ADDR_MATCH(dimm); ATTR_ADDR_MATCH(rank); ATTR_ADDR_MATCH(bank); ATTR_ADDR_MATCH(page); ATTR_ADDR_MATCH(col); static int write_and_test(struct pci_dev *dev, const int where, const u32 val) { u32 read; int count; edac_dbg(0, "setting pci %02x:%02x.%x reg=%02x value=%08x\n", dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn), where, val); for (count = 0; count < 10; count++) { if (count) msleep(100); pci_write_config_dword(dev, where, val); pci_read_config_dword(dev, where, &read); if (read == val) return 0; } i7core_printk(KERN_ERR, "Error during set pci %02x:%02x.%x reg=%02x " "write=%08x. Read=%08x\n", dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn), where, val, read); return -EINVAL; } /* * This routine prepares the Memory Controller for error injection. * The error will be injected when some process tries to write to the * memory that matches the given criteria. * The criteria can be set in terms of a mask where dimm, rank, bank, page * and col can be specified. * A -1 value for any of the mask items will make the MCU to ignore * that matching criteria for error injection. * * It should be noticed that the error will only happen after a write operation * on a memory that matches the condition. if REPEAT_EN is not enabled at * inject mask, then it will produce just one error. Otherwise, it will repeat * until the injectmask would be cleaned. * * FIXME: This routine assumes that MAXNUMDIMMS value of MC_MAX_DOD * is reliable enough to check if the MC is using the * three channels. However, this is not clear at the datasheet. */ static ssize_t i7core_inject_enable_store(struct device *dev, struct device_attribute *mattr, const char *data, size_t count) { struct mem_ctl_info *mci = to_mci(dev); struct i7core_pvt *pvt = mci->pvt_info; u32 injectmask; u64 mask = 0; int rc; long enable; if (!pvt->pci_ch[pvt->inject.channel][0]) return 0; rc = kstrtoul(data, 10, &enable); if ((rc < 0)) return 0; if (enable) { pvt->inject.enable = 1; } else { disable_inject(mci); return count; } /* Sets pvt->inject.dimm mask */ if (pvt->inject.dimm < 0) mask |= 1LL << 41; else { if (pvt->channel[pvt->inject.channel].dimms > 2) mask |= (pvt->inject.dimm & 0x3LL) << 35; else mask |= (pvt->inject.dimm & 0x1LL) << 36; } /* Sets pvt->inject.rank mask */ if (pvt->inject.rank < 0) mask |= 1LL << 40; else { if (pvt->channel[pvt->inject.channel].dimms > 2) mask |= (pvt->inject.rank & 0x1LL) << 34; else mask |= (pvt->inject.rank & 0x3LL) << 34; } /* Sets pvt->inject.bank mask */ if (pvt->inject.bank < 0) mask |= 1LL << 39; else mask |= (pvt->inject.bank & 0x15LL) << 30; /* Sets pvt->inject.page mask */ if (pvt->inject.page < 0) mask |= 1LL << 38; else mask |= (pvt->inject.page & 0xffff) << 14; /* Sets pvt->inject.column mask */ if (pvt->inject.col < 0) mask |= 1LL << 37; else mask |= (pvt->inject.col & 0x3fff); /* * bit 0: REPEAT_EN * bits 1-2: MASK_HALF_CACHELINE * bit 3: INJECT_ECC * bit 4: INJECT_ADDR_PARITY */ injectmask = (pvt->inject.type & 1) | (pvt->inject.section & 0x3) << 1 | (pvt->inject.type & 0x6) << (3 - 1); /* Unlock writes to registers - this register is write only */ pci_write_config_dword(pvt->pci_noncore, MC_CFG_CONTROL, 0x2); write_and_test(pvt->pci_ch[pvt->inject.channel][0], MC_CHANNEL_ADDR_MATCH, mask); write_and_test(pvt->pci_ch[pvt->inject.channel][0], MC_CHANNEL_ADDR_MATCH + 4, mask >> 32L); write_and_test(pvt->pci_ch[pvt->inject.channel][0], MC_CHANNEL_ERROR_MASK, pvt->inject.eccmask); write_and_test(pvt->pci_ch[pvt->inject.channel][0], MC_CHANNEL_ERROR_INJECT, injectmask); /* * This is something undocumented, based on my tests * Without writing 8 to this register, errors aren't injected. Not sure * why. */ pci_write_config_dword(pvt->pci_noncore, MC_CFG_CONTROL, 8); edac_dbg(0, "Error inject addr match 0x%016llx, ecc 0x%08x, inject 0x%08x\n", mask, pvt->inject.eccmask, injectmask); return count; } static ssize_t i7core_inject_enable_show(struct device *dev, struct device_attribute *mattr, char *data) { struct mem_ctl_info *mci = to_mci(dev); struct i7core_pvt *pvt = mci->pvt_info; u32 injectmask; if (!pvt->pci_ch[pvt->inject.channel][0]) return 0; pci_read_config_dword(pvt->pci_ch[pvt->inject.channel][0], MC_CHANNEL_ERROR_INJECT, &injectmask); edac_dbg(0, "Inject error read: 0x%018x\n", injectmask); if (injectmask & 0x0c) pvt->inject.enable = 1; return sprintf(data, "%d\n", pvt->inject.enable); } #define DECLARE_COUNTER(param) \ static ssize_t i7core_show_counter_##param( \ struct device *dev, \ struct device_attribute *mattr, \ char *data) \ { \ struct mem_ctl_info *mci = dev_get_drvdata(dev); \ struct i7core_pvt *pvt = mci->pvt_info; \ \ edac_dbg(1, "\n"); \ if (!pvt->ce_count_available || (pvt->is_registered)) \ return sprintf(data, "data unavailable\n"); \ return sprintf(data, "%lu\n", \ pvt->udimm_ce_count[param]); \ } #define ATTR_COUNTER(param) \ static DEVICE_ATTR(udimm##param, S_IRUGO | S_IWUSR, \ i7core_show_counter_##param, \ NULL) DECLARE_COUNTER(0); DECLARE_COUNTER(1); DECLARE_COUNTER(2); ATTR_COUNTER(0); ATTR_COUNTER(1); ATTR_COUNTER(2); /* * inject_addrmatch device sysfs struct */ static struct attribute *i7core_addrmatch_attrs[] = { &dev_attr_channel.attr, &dev_attr_dimm.attr, &dev_attr_rank.attr, &dev_attr_bank.attr, &dev_attr_page.attr, &dev_attr_col.attr, NULL }; static struct attribute_group addrmatch_grp = { .attrs = i7core_addrmatch_attrs, }; static const struct attribute_group *addrmatch_groups[] = { &addrmatch_grp, NULL }; static void addrmatch_release(struct device *device) { edac_dbg(1, "Releasing device %s\n", dev_name(device)); kfree(device); } static struct device_type addrmatch_type = { .groups = addrmatch_groups, .release = addrmatch_release, }; /* * all_channel_counts sysfs struct */ static struct attribute *i7core_udimm_counters_attrs[] = { &dev_attr_udimm0.attr, &dev_attr_udimm1.attr, &dev_attr_udimm2.attr, NULL }; static struct attribute_group all_channel_counts_grp = { .attrs = i7core_udimm_counters_attrs, }; static const struct attribute_group *all_channel_counts_groups[] = { &all_channel_counts_grp, NULL }; static void all_channel_counts_release(struct device *device) { edac_dbg(1, "Releasing device %s\n", dev_name(device)); kfree(device); } static struct device_type all_channel_counts_type = { .groups = all_channel_counts_groups, .release = all_channel_counts_release, }; /* * inject sysfs attributes */ static DEVICE_ATTR(inject_section, S_IRUGO | S_IWUSR, i7core_inject_section_show, i7core_inject_section_store); static DEVICE_ATTR(inject_type, S_IRUGO | S_IWUSR, i7core_inject_type_show, i7core_inject_type_store); static DEVICE_ATTR(inject_eccmask, S_IRUGO | S_IWUSR, i7core_inject_eccmask_show, i7core_inject_eccmask_store); static DEVICE_ATTR(inject_enable, S_IRUGO | S_IWUSR, i7core_inject_enable_show, i7core_inject_enable_store); static struct attribute *i7core_dev_attrs[] = { &dev_attr_inject_section.attr, &dev_attr_inject_type.attr, &dev_attr_inject_eccmask.attr, &dev_attr_inject_enable.attr, NULL }; ATTRIBUTE_GROUPS(i7core_dev); static int i7core_create_sysfs_devices(struct mem_ctl_info *mci) { struct i7core_pvt *pvt = mci->pvt_info; int rc; pvt->addrmatch_dev = kzalloc(sizeof(*pvt->addrmatch_dev), GFP_KERNEL); if (!pvt->addrmatch_dev) return -ENOMEM; pvt->addrmatch_dev->type = &addrmatch_type; pvt->addrmatch_dev->bus = mci->dev.bus; device_initialize(pvt->addrmatch_dev); pvt->addrmatch_dev->parent = &mci->dev; dev_set_name(pvt->addrmatch_dev, "inject_addrmatch"); dev_set_drvdata(pvt->addrmatch_dev, mci); edac_dbg(1, "creating %s\n", dev_name(pvt->addrmatch_dev)); rc = device_add(pvt->addrmatch_dev); if (rc < 0) return rc; if (!pvt->is_registered) { pvt->chancounts_dev = kzalloc(sizeof(*pvt->chancounts_dev), GFP_KERNEL); if (!pvt->chancounts_dev) { put_device(pvt->addrmatch_dev); device_del(pvt->addrmatch_dev); return -ENOMEM; } pvt->chancounts_dev->type = &all_channel_counts_type; pvt->chancounts_dev->bus = mci->dev.bus; device_initialize(pvt->chancounts_dev); pvt->chancounts_dev->parent = &mci->dev; dev_set_name(pvt->chancounts_dev, "all_channel_counts"); dev_set_drvdata(pvt->chancounts_dev, mci); edac_dbg(1, "creating %s\n", dev_name(pvt->chancounts_dev)); rc = device_add(pvt->chancounts_dev); if (rc < 0) return rc; } return 0; } static void i7core_delete_sysfs_devices(struct mem_ctl_info *mci) { struct i7core_pvt *pvt = mci->pvt_info; edac_dbg(1, "\n"); if (!pvt->is_registered) { put_device(pvt->chancounts_dev); device_del(pvt->chancounts_dev); } put_device(pvt->addrmatch_dev); device_del(pvt->addrmatch_dev); } /**************************************************************************** Device initialization routines: put/get, init/exit ****************************************************************************/ /* * i7core_put_all_devices 'put' all the devices that we have * reserved via 'get' */ static void i7core_put_devices(struct i7core_dev *i7core_dev) { int i; edac_dbg(0, "\n"); for (i = 0; i < i7core_dev->n_devs; i++) { struct pci_dev *pdev = i7core_dev->pdev[i]; if (!pdev) continue; edac_dbg(0, "Removing dev %02x:%02x.%d\n", pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); pci_dev_put(pdev); } } static void i7core_put_all_devices(void) { struct i7core_dev *i7core_dev, *tmp; list_for_each_entry_safe(i7core_dev, tmp, &i7core_edac_list, list) { i7core_put_devices(i7core_dev); free_i7core_dev(i7core_dev); } } static void __init i7core_xeon_pci_fixup(const struct pci_id_table *table) { struct pci_dev *pdev = NULL; int i; /* * On Xeon 55xx, the Intel Quick Path Arch Generic Non-core pci buses * aren't announced by acpi. So, we need to use a legacy scan probing * to detect them */ while (table && table->descr) { pdev = pci_get_device(PCI_VENDOR_ID_INTEL, table->descr[0].dev_id, NULL); if (unlikely(!pdev)) { for (i = 0; i < MAX_SOCKET_BUSES; i++) pcibios_scan_specific_bus(255-i); } pci_dev_put(pdev); table++; } } static unsigned i7core_pci_lastbus(void) { int last_bus = 0, bus; struct pci_bus *b = NULL; while ((b = pci_find_next_bus(b)) != NULL) { bus = b->number; edac_dbg(0, "Found bus %d\n", bus); if (bus > last_bus) last_bus = bus; } edac_dbg(0, "Last bus %d\n", last_bus); return last_bus; } /* * i7core_get_all_devices Find and perform 'get' operation on the MCH's * device/functions we want to reference for this driver * * Need to 'get' device 16 func 1 and func 2 */ static int i7core_get_onedevice(struct pci_dev **prev, const struct pci_id_table *table, const unsigned devno, const unsigned last_bus) { struct i7core_dev *i7core_dev; const struct pci_id_descr *dev_descr = &table->descr[devno]; struct pci_dev *pdev = NULL; u8 bus = 0; u8 socket = 0; pdev = pci_get_device(PCI_VENDOR_ID_INTEL, dev_descr->dev_id, *prev); /* * On Xeon 55xx, the Intel QuickPath Arch Generic Non-core regs * is at addr 8086:2c40, instead of 8086:2c41. So, we need * to probe for the alternate address in case of failure */ if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_I7_NONCORE && !pdev) { pci_dev_get(*prev); /* pci_get_device will put it */ pdev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7_NONCORE_ALT, *prev); } if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE && !pdev) { pci_dev_get(*prev); /* pci_get_device will put it */ pdev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_ALT, *prev); } if (!pdev) { if (*prev) { *prev = pdev; return 0; } if (dev_descr->optional) return 0; if (devno == 0) return -ENODEV; i7core_printk(KERN_INFO, "Device not found: dev %02x.%d PCI ID %04x:%04x\n", dev_descr->dev, dev_descr->func, PCI_VENDOR_ID_INTEL, dev_descr->dev_id); /* End of list, leave */ return -ENODEV; } bus = pdev->bus->number; socket = last_bus - bus; i7core_dev = get_i7core_dev(socket); if (!i7core_dev) { i7core_dev = alloc_i7core_dev(socket, table); if (!i7core_dev) { pci_dev_put(pdev); return -ENOMEM; } } if (i7core_dev->pdev[devno]) { i7core_printk(KERN_ERR, "Duplicated device for " "dev %02x:%02x.%d PCI ID %04x:%04x\n", bus, dev_descr->dev, dev_descr->func, PCI_VENDOR_ID_INTEL, dev_descr->dev_id); pci_dev_put(pdev); return -ENODEV; } i7core_dev->pdev[devno] = pdev; /* Sanity check */ if (unlikely(PCI_SLOT(pdev->devfn) != dev_descr->dev || PCI_FUNC(pdev->devfn) != dev_descr->func)) { i7core_printk(KERN_ERR, "Device PCI ID %04x:%04x " "has dev %02x:%02x.%d instead of dev %02x:%02x.%d\n", PCI_VENDOR_ID_INTEL, dev_descr->dev_id, bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), bus, dev_descr->dev, dev_descr->func); return -ENODEV; } /* Be sure that the device is enabled */ if (unlikely(pci_enable_device(pdev) < 0)) { i7core_printk(KERN_ERR, "Couldn't enable " "dev %02x:%02x.%d PCI ID %04x:%04x\n", bus, dev_descr->dev, dev_descr->func, PCI_VENDOR_ID_INTEL, dev_descr->dev_id); return -ENODEV; } edac_dbg(0, "Detected socket %d dev %02x:%02x.%d PCI ID %04x:%04x\n", socket, bus, dev_descr->dev, dev_descr->func, PCI_VENDOR_ID_INTEL, dev_descr->dev_id); /* * As stated on drivers/pci/search.c, the reference count for * @from is always decremented if it is not %NULL. So, as we need * to get all devices up to null, we need to do a get for the device */ pci_dev_get(pdev); *prev = pdev; return 0; } static int i7core_get_all_devices(void) { int i, rc, last_bus; struct pci_dev *pdev = NULL; const struct pci_id_table *table = pci_dev_table; last_bus = i7core_pci_lastbus(); while (table && table->descr) { for (i = 0; i < table->n_devs; i++) { pdev = NULL; do { rc = i7core_get_onedevice(&pdev, table, i, last_bus); if (rc < 0) { if (i == 0) { i = table->n_devs; break; } i7core_put_all_devices(); return -ENODEV; } } while (pdev); } table++; } return 0; } static int mci_bind_devs(struct mem_ctl_info *mci, struct i7core_dev *i7core_dev) { struct i7core_pvt *pvt = mci->pvt_info; struct pci_dev *pdev; int i, func, slot; char *family; pvt->is_registered = false; pvt->enable_scrub = false; for (i = 0; i < i7core_dev->n_devs; i++) { pdev = i7core_dev->pdev[i]; if (!pdev) continue; func = PCI_FUNC(pdev->devfn); slot = PCI_SLOT(pdev->devfn); if (slot == 3) { if (unlikely(func > MAX_MCR_FUNC)) goto error; pvt->pci_mcr[func] = pdev; } else if (likely(slot >= 4 && slot < 4 + NUM_CHANS)) { if (unlikely(func > MAX_CHAN_FUNC)) goto error; pvt->pci_ch[slot - 4][func] = pdev; } else if (!slot && !func) { pvt->pci_noncore = pdev; /* Detect the processor family */ switch (pdev->device) { case PCI_DEVICE_ID_INTEL_I7_NONCORE: family = "Xeon 35xx/ i7core"; pvt->enable_scrub = false; break; case PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_ALT: family = "i7-800/i5-700"; pvt->enable_scrub = false; break; case PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE: family = "Xeon 34xx"; pvt->enable_scrub = false; break; case PCI_DEVICE_ID_INTEL_I7_NONCORE_ALT: family = "Xeon 55xx"; pvt->enable_scrub = true; break; case PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_REV2: family = "Xeon 56xx / i7-900"; pvt->enable_scrub = true; break; default: family = "unknown"; pvt->enable_scrub = false; } edac_dbg(0, "Detected a processor type %s\n", family); } else goto error; edac_dbg(0, "Associated fn %d.%d, dev = %p, socket %d\n", PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), pdev, i7core_dev->socket); if (PCI_SLOT(pdev->devfn) == 3 && PCI_FUNC(pdev->devfn) == 2) pvt->is_registered = true; } return 0; error: i7core_printk(KERN_ERR, "Device %d, function %d " "is out of the expected range\n", slot, func); return -EINVAL; } /**************************************************************************** Error check routines ****************************************************************************/ static void i7core_rdimm_update_ce_count(struct mem_ctl_info *mci, const int chan, const int new0, const int new1, const int new2) { struct i7core_pvt *pvt = mci->pvt_info; int add0 = 0, add1 = 0, add2 = 0; /* Updates CE counters if it is not the first time here */ if (pvt->ce_count_available) { /* Updates CE counters */ add2 = new2 - pvt->rdimm_last_ce_count[chan][2]; add1 = new1 - pvt->rdimm_last_ce_count[chan][1]; add0 = new0 - pvt->rdimm_last_ce_count[chan][0]; if (add2 < 0) add2 += 0x7fff; pvt->rdimm_ce_count[chan][2] += add2; if (add1 < 0) add1 += 0x7fff; pvt->rdimm_ce_count[chan][1] += add1; if (add0 < 0) add0 += 0x7fff; pvt->rdimm_ce_count[chan][0] += add0; } else pvt->ce_count_available = 1; /* Store the new values */ pvt->rdimm_last_ce_count[chan][2] = new2; pvt->rdimm_last_ce_count[chan][1] = new1; pvt->rdimm_last_ce_count[chan][0] = new0; /*updated the edac core */ if (add0 != 0) edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, add0, 0, 0, 0, chan, 0, -1, "error", ""); if (add1 != 0) edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, add1, 0, 0, 0, chan, 1, -1, "error", ""); if (add2 != 0) edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, add2, 0, 0, 0, chan, 2, -1, "error", ""); } static void i7core_rdimm_check_mc_ecc_err(struct mem_ctl_info *mci) { struct i7core_pvt *pvt = mci->pvt_info; u32 rcv[3][2]; int i, new0, new1, new2; /*Read DEV 3: FUN 2: MC_COR_ECC_CNT regs directly*/ pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_0, &rcv[0][0]); pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_1, &rcv[0][1]); pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_2, &rcv[1][0]); pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_3, &rcv[1][1]); pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_4, &rcv[2][0]); pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_5, &rcv[2][1]); for (i = 0 ; i < 3; i++) { edac_dbg(3, "MC_COR_ECC_CNT%d = 0x%x; MC_COR_ECC_CNT%d = 0x%x\n", (i * 2), rcv[i][0], (i * 2) + 1, rcv[i][1]); /*if the channel has 3 dimms*/ if (pvt->channel[i].dimms > 2) { new0 = DIMM_BOT_COR_ERR(rcv[i][0]); new1 = DIMM_TOP_COR_ERR(rcv[i][0]); new2 = DIMM_BOT_COR_ERR(rcv[i][1]); } else { new0 = DIMM_TOP_COR_ERR(rcv[i][0]) + DIMM_BOT_COR_ERR(rcv[i][0]); new1 = DIMM_TOP_COR_ERR(rcv[i][1]) + DIMM_BOT_COR_ERR(rcv[i][1]); new2 = 0; } i7core_rdimm_update_ce_count(mci, i, new0, new1, new2); } } /* This function is based on the device 3 function 4 registers as described on: * Intel Xeon Processor 5500 Series Datasheet Volume 2 * http://www.intel.com/Assets/PDF/datasheet/321322.pdf * also available at: * http://www.arrownac.com/manufacturers/intel/s/nehalem/5500-datasheet-v2.pdf */ static void i7core_udimm_check_mc_ecc_err(struct mem_ctl_info *mci) { struct i7core_pvt *pvt = mci->pvt_info; u32 rcv1, rcv0; int new0, new1, new2; if (!pvt->pci_mcr[4]) { edac_dbg(0, "MCR registers not found\n"); return; } /* Corrected test errors */ pci_read_config_dword(pvt->pci_mcr[4], MC_TEST_ERR_RCV1, &rcv1); pci_read_config_dword(pvt->pci_mcr[4], MC_TEST_ERR_RCV0, &rcv0); /* Store the new values */ new2 = DIMM2_COR_ERR(rcv1); new1 = DIMM1_COR_ERR(rcv0); new0 = DIMM0_COR_ERR(rcv0); /* Updates CE counters if it is not the first time here */ if (pvt->ce_count_available) { /* Updates CE counters */ int add0, add1, add2; add2 = new2 - pvt->udimm_last_ce_count[2]; add1 = new1 - pvt->udimm_last_ce_count[1]; add0 = new0 - pvt->udimm_last_ce_count[0]; if (add2 < 0) add2 += 0x7fff; pvt->udimm_ce_count[2] += add2; if (add1 < 0) add1 += 0x7fff; pvt->udimm_ce_count[1] += add1; if (add0 < 0) add0 += 0x7fff; pvt->udimm_ce_count[0] += add0; if (add0 | add1 | add2) i7core_printk(KERN_ERR, "New Corrected error(s): " "dimm0: +%d, dimm1: +%d, dimm2 +%d\n", add0, add1, add2); } else pvt->ce_count_available = 1; /* Store the new values */ pvt->udimm_last_ce_count[2] = new2; pvt->udimm_last_ce_count[1] = new1; pvt->udimm_last_ce_count[0] = new0; } /* * According with tables E-11 and E-12 of chapter E.3.3 of Intel 64 and IA-32 * Architectures Software Developer’s Manual Volume 3B. * Nehalem are defined as family 0x06, model 0x1a * * The MCA registers used here are the following ones: * struct mce field MCA Register * m->status MSR_IA32_MC8_STATUS * m->addr MSR_IA32_MC8_ADDR * m->misc MSR_IA32_MC8_MISC * In the case of Nehalem, the error information is masked at .status and .misc * fields */ static void i7core_mce_output_error(struct mem_ctl_info *mci, const struct mce *m) { struct i7core_pvt *pvt = mci->pvt_info; char *optype, *err; enum hw_event_mc_err_type tp_event; unsigned long error = m->status & 0x1ff0000l; bool uncorrected_error = m->mcgstatus & 1ll << 61; bool ripv = m->mcgstatus & 1; u32 optypenum = (m->status >> 4) & 0x07; u32 core_err_cnt = (m->status >> 38) & 0x7fff; u32 dimm = (m->misc >> 16) & 0x3; u32 channel = (m->misc >> 18) & 0x3; u32 syndrome = m->misc >> 32; u32 errnum = find_first_bit(&error, 32); if (uncorrected_error) { if (ripv) tp_event = HW_EVENT_ERR_FATAL; else tp_event = HW_EVENT_ERR_UNCORRECTED; } else { tp_event = HW_EVENT_ERR_CORRECTED; } switch (optypenum) { case 0: optype = "generic undef request"; break; case 1: optype = "read error"; break; case 2: optype = "write error"; break; case 3: optype = "addr/cmd error"; break; case 4: optype = "scrubbing error"; break; default: optype = "reserved"; break; } switch (errnum) { case 16: err = "read ECC error"; break; case 17: err = "RAS ECC error"; break; case 18: err = "write parity error"; break; case 19: err = "redundacy loss"; break; case 20: err = "reserved"; break; case 21: err = "memory range error"; break; case 22: err = "RTID out of range"; break; case 23: err = "address parity error"; break; case 24: err = "byte enable parity error"; break; default: err = "unknown"; } /* * Call the helper to output message * FIXME: what to do if core_err_cnt > 1? Currently, it generates * only one event */ if (uncorrected_error || !pvt->is_registered) edac_mc_handle_error(tp_event, mci, core_err_cnt, m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, syndrome, channel, dimm, -1, err, optype); } /* * i7core_check_error Retrieve and process errors reported by the * hardware. Called by the Core module. */ static void i7core_check_error(struct mem_ctl_info *mci) { struct i7core_pvt *pvt = mci->pvt_info; int i; unsigned count = 0; struct mce *m; /* * MCE first step: Copy all mce errors into a temporary buffer * We use a double buffering here, to reduce the risk of * losing an error. */ smp_rmb(); count = (pvt->mce_out + MCE_LOG_LEN - pvt->mce_in) % MCE_LOG_LEN; if (!count) goto check_ce_error; m = pvt->mce_outentry; if (pvt->mce_in + count > MCE_LOG_LEN) { unsigned l = MCE_LOG_LEN - pvt->mce_in; memcpy(m, &pvt->mce_entry[pvt->mce_in], sizeof(*m) * l); smp_wmb(); pvt->mce_in = 0; count -= l; m += l; } memcpy(m, &pvt->mce_entry[pvt->mce_in], sizeof(*m) * count); smp_wmb(); pvt->mce_in += count; smp_rmb(); if (pvt->mce_overrun) { i7core_printk(KERN_ERR, "Lost %d memory errors\n", pvt->mce_overrun); smp_wmb(); pvt->mce_overrun = 0; } /* * MCE second step: parse errors and display */ for (i = 0; i < count; i++) i7core_mce_output_error(mci, &pvt->mce_outentry[i]); /* * Now, let's increment CE error counts */ check_ce_error: if (!pvt->is_registered) i7core_udimm_check_mc_ecc_err(mci); else i7core_rdimm_check_mc_ecc_err(mci); } /* * i7core_mce_check_error Replicates mcelog routine to get errors * This routine simply queues mcelog errors, and * return. The error itself should be handled later * by i7core_check_error. * WARNING: As this routine should be called at NMI time, extra care should * be taken to avoid deadlocks, and to be as fast as possible. */ static int i7core_mce_check_error(struct notifier_block *nb, unsigned long val, void *data) { struct mce *mce = (struct mce *)data; struct i7core_dev *i7_dev; struct mem_ctl_info *mci; struct i7core_pvt *pvt; i7_dev = get_i7core_dev(mce->socketid); if (!i7_dev) return NOTIFY_BAD; mci = i7_dev->mci; pvt = mci->pvt_info; /* * Just let mcelog handle it if the error is * outside the memory controller */ if (((mce->status & 0xffff) >> 7) != 1) return NOTIFY_DONE; /* Bank 8 registers are the only ones that we know how to handle */ if (mce->bank != 8) return NOTIFY_DONE; smp_rmb(); if ((pvt->mce_out + 1) % MCE_LOG_LEN == pvt->mce_in) { smp_wmb(); pvt->mce_overrun++; return NOTIFY_DONE; } /* Copy memory error at the ringbuffer */ memcpy(&pvt->mce_entry[pvt->mce_out], mce, sizeof(*mce)); smp_wmb(); pvt->mce_out = (pvt->mce_out + 1) % MCE_LOG_LEN; /* Handle fatal errors immediately */ if (mce->mcgstatus & 1) i7core_check_error(mci); /* Advise mcelog that the errors were handled */ return NOTIFY_STOP; } static struct notifier_block i7_mce_dec = { .notifier_call = i7core_mce_check_error, }; struct memdev_dmi_entry { u8 type; u8 length; u16 handle; u16 phys_mem_array_handle; u16 mem_err_info_handle; u16 total_width; u16 data_width; u16 size; u8 form; u8 device_set; u8 device_locator; u8 bank_locator; u8 memory_type; u16 type_detail; u16 speed; u8 manufacturer; u8 serial_number; u8 asset_tag; u8 part_number; u8 attributes; u32 extended_size; u16 conf_mem_clk_speed; } __attribute__((__packed__)); /* * Decode the DRAM Clock Frequency, be paranoid, make sure that all * memory devices show the same speed, and if they don't then consider * all speeds to be invalid. */ static void decode_dclk(const struct dmi_header *dh, void *_dclk_freq) { int *dclk_freq = _dclk_freq; u16 dmi_mem_clk_speed; if (*dclk_freq == -1) return; if (dh->type == DMI_ENTRY_MEM_DEVICE) { struct memdev_dmi_entry *memdev_dmi_entry = (struct memdev_dmi_entry *)dh; unsigned long conf_mem_clk_speed_offset = (unsigned long)&memdev_dmi_entry->conf_mem_clk_speed - (unsigned long)&memdev_dmi_entry->type; unsigned long speed_offset = (unsigned long)&memdev_dmi_entry->speed - (unsigned long)&memdev_dmi_entry->type; /* Check that a DIMM is present */ if (memdev_dmi_entry->size == 0) return; /* * Pick the configured speed if it's available, otherwise * pick the DIMM speed, or we don't have a speed. */ if (memdev_dmi_entry->length > conf_mem_clk_speed_offset) { dmi_mem_clk_speed = memdev_dmi_entry->conf_mem_clk_speed; } else if (memdev_dmi_entry->length > speed_offset) { dmi_mem_clk_speed = memdev_dmi_entry->speed; } else { *dclk_freq = -1; return; } if (*dclk_freq == 0) { /* First pass, speed was 0 */ if (dmi_mem_clk_speed > 0) { /* Set speed if a valid speed is read */ *dclk_freq = dmi_mem_clk_speed; } else { /* Otherwise we don't have a valid speed */ *dclk_freq = -1; } } else if (*dclk_freq > 0 && *dclk_freq != dmi_mem_clk_speed) { /* * If we have a speed, check that all DIMMS are the same * speed, otherwise set the speed as invalid. */ *dclk_freq = -1; } } } /* * The default DCLK frequency is used as a fallback if we * fail to find anything reliable in the DMI. The value * is taken straight from the datasheet. */ #define DEFAULT_DCLK_FREQ 800 static int get_dclk_freq(void) { int dclk_freq = 0; dmi_walk(decode_dclk, (void *)&dclk_freq); if (dclk_freq < 1) return DEFAULT_DCLK_FREQ; return dclk_freq; } /* * set_sdram_scrub_rate This routine sets byte/sec bandwidth scrub rate * to hardware according to SCRUBINTERVAL formula * found in datasheet. */ static int set_sdram_scrub_rate(struct mem_ctl_info *mci, u32 new_bw) { struct i7core_pvt *pvt = mci->pvt_info; struct pci_dev *pdev; u32 dw_scrub; u32 dw_ssr; /* Get data from the MC register, function 2 */ pdev = pvt->pci_mcr[2]; if (!pdev) return -ENODEV; pci_read_config_dword(pdev, MC_SCRUB_CONTROL, &dw_scrub); if (new_bw == 0) { /* Prepare to disable petrol scrub */ dw_scrub &= ~STARTSCRUB; /* Stop the patrol scrub engine */ write_and_test(pdev, MC_SCRUB_CONTROL, dw_scrub & ~SCRUBINTERVAL_MASK); /* Get current status of scrub rate and set bit to disable */ pci_read_config_dword(pdev, MC_SSRCONTROL, &dw_ssr); dw_ssr &= ~SSR_MODE_MASK; dw_ssr |= SSR_MODE_DISABLE; } else { const int cache_line_size = 64; const u32 freq_dclk_mhz = pvt->dclk_freq; unsigned long long scrub_interval; /* * Translate the desired scrub rate to a register value and * program the corresponding register value. */ scrub_interval = (unsigned long long)freq_dclk_mhz * cache_line_size * 1000000; do_div(scrub_interval, new_bw); if (!scrub_interval || scrub_interval > SCRUBINTERVAL_MASK) return -EINVAL; dw_scrub = SCRUBINTERVAL_MASK & scrub_interval; /* Start the patrol scrub engine */ pci_write_config_dword(pdev, MC_SCRUB_CONTROL, STARTSCRUB | dw_scrub); /* Get current status of scrub rate and set bit to enable */ pci_read_config_dword(pdev, MC_SSRCONTROL, &dw_ssr); dw_ssr &= ~SSR_MODE_MASK; dw_ssr |= SSR_MODE_ENABLE; } /* Disable or enable scrubbing */ pci_write_config_dword(pdev, MC_SSRCONTROL, dw_ssr); return new_bw; } /* * get_sdram_scrub_rate This routine convert current scrub rate value * into byte/sec bandwidth according to * SCRUBINTERVAL formula found in datasheet. */ static int get_sdram_scrub_rate(struct mem_ctl_info *mci) { struct i7core_pvt *pvt = mci->pvt_info; struct pci_dev *pdev; const u32 cache_line_size = 64; const u32 freq_dclk_mhz = pvt->dclk_freq; unsigned long long scrub_rate; u32 scrubval; /* Get data from the MC register, function 2 */ pdev = pvt->pci_mcr[2]; if (!pdev) return -ENODEV; /* Get current scrub control data */ pci_read_config_dword(pdev, MC_SCRUB_CONTROL, &scrubval); /* Mask highest 8-bits to 0 */ scrubval &= SCRUBINTERVAL_MASK; if (!scrubval) return 0; /* Calculate scrub rate value into byte/sec bandwidth */ scrub_rate = (unsigned long long)freq_dclk_mhz * 1000000 * cache_line_size; do_div(scrub_rate, scrubval); return (int)scrub_rate; } static void enable_sdram_scrub_setting(struct mem_ctl_info *mci) { struct i7core_pvt *pvt = mci->pvt_info; u32 pci_lock; /* Unlock writes to pci registers */ pci_read_config_dword(pvt->pci_noncore, MC_CFG_CONTROL, &pci_lock); pci_lock &= ~0x3; pci_write_config_dword(pvt->pci_noncore, MC_CFG_CONTROL, pci_lock | MC_CFG_UNLOCK); mci->set_sdram_scrub_rate = set_sdram_scrub_rate; mci->get_sdram_scrub_rate = get_sdram_scrub_rate; } static void disable_sdram_scrub_setting(struct mem_ctl_info *mci) { struct i7core_pvt *pvt = mci->pvt_info; u32 pci_lock; /* Lock writes to pci registers */ pci_read_config_dword(pvt->pci_noncore, MC_CFG_CONTROL, &pci_lock); pci_lock &= ~0x3; pci_write_config_dword(pvt->pci_noncore, MC_CFG_CONTROL, pci_lock | MC_CFG_LOCK); } static void i7core_pci_ctl_create(struct i7core_pvt *pvt) { pvt->i7core_pci = edac_pci_create_generic_ctl( &pvt->i7core_dev->pdev[0]->dev, EDAC_MOD_STR); if (unlikely(!pvt->i7core_pci)) i7core_printk(KERN_WARNING, "Unable to setup PCI error report via EDAC\n"); } static void i7core_pci_ctl_release(struct i7core_pvt *pvt) { if (likely(pvt->i7core_pci)) edac_pci_release_generic_ctl(pvt->i7core_pci); else i7core_printk(KERN_ERR, "Couldn't find mem_ctl_info for socket %d\n", pvt->i7core_dev->socket); pvt->i7core_pci = NULL; } static void i7core_unregister_mci(struct i7core_dev *i7core_dev) { struct mem_ctl_info *mci = i7core_dev->mci; struct i7core_pvt *pvt; if (unlikely(!mci || !mci->pvt_info)) { edac_dbg(0, "MC: dev = %p\n", &i7core_dev->pdev[0]->dev); i7core_printk(KERN_ERR, "Couldn't find mci handler\n"); return; } pvt = mci->pvt_info; edac_dbg(0, "MC: mci = %p, dev = %p\n", mci, &i7core_dev->pdev[0]->dev); /* Disable scrubrate setting */ if (pvt->enable_scrub) disable_sdram_scrub_setting(mci); /* Disable EDAC polling */ i7core_pci_ctl_release(pvt); /* Remove MC sysfs nodes */ i7core_delete_sysfs_devices(mci); edac_mc_del_mc(mci->pdev); edac_dbg(1, "%s: free mci struct\n", mci->ctl_name); kfree(mci->ctl_name); edac_mc_free(mci); i7core_dev->mci = NULL; } static int i7core_register_mci(struct i7core_dev *i7core_dev) { struct mem_ctl_info *mci; struct i7core_pvt *pvt; int rc; struct edac_mc_layer layers[2]; /* allocate a new MC control structure */ layers[0].type = EDAC_MC_LAYER_CHANNEL; layers[0].size = NUM_CHANS; layers[0].is_virt_csrow = false; layers[1].type = EDAC_MC_LAYER_SLOT; layers[1].size = MAX_DIMMS; layers[1].is_virt_csrow = true; mci = edac_mc_alloc(i7core_dev->socket, ARRAY_SIZE(layers), layers, sizeof(*pvt)); if (unlikely(!mci)) return -ENOMEM; edac_dbg(0, "MC: mci = %p, dev = %p\n", mci, &i7core_dev->pdev[0]->dev); pvt = mci->pvt_info; memset(pvt, 0, sizeof(*pvt)); /* Associates i7core_dev and mci for future usage */ pvt->i7core_dev = i7core_dev; i7core_dev->mci = mci; /* * FIXME: how to handle RDDR3 at MCI level? It is possible to have * Mixed RDDR3/UDDR3 with Nehalem, provided that they are on different * memory channels */ mci->mtype_cap = MEM_FLAG_DDR3; mci->edac_ctl_cap = EDAC_FLAG_NONE; mci->edac_cap = EDAC_FLAG_NONE; mci->mod_name = "i7core_edac.c"; mci->mod_ver = I7CORE_REVISION; mci->ctl_name = kasprintf(GFP_KERNEL, "i7 core #%d", i7core_dev->socket); mci->dev_name = pci_name(i7core_dev->pdev[0]); mci->ctl_page_to_phys = NULL; /* Store pci devices at mci for faster access */ rc = mci_bind_devs(mci, i7core_dev); if (unlikely(rc < 0)) goto fail0; /* Get dimm basic config */ get_dimm_config(mci); /* record ptr to the generic device */ mci->pdev = &i7core_dev->pdev[0]->dev; /* Set the function pointer to an actual operation function */ mci->edac_check = i7core_check_error; /* Enable scrubrate setting */ if (pvt->enable_scrub) enable_sdram_scrub_setting(mci); /* add this new MC control structure to EDAC's list of MCs */ if (unlikely(edac_mc_add_mc_with_groups(mci, i7core_dev_groups))) { edac_dbg(0, "MC: failed edac_mc_add_mc()\n"); /* FIXME: perhaps some code should go here that disables error * reporting if we just enabled it */ rc = -EINVAL; goto fail0; } if (i7core_create_sysfs_devices(mci)) { edac_dbg(0, "MC: failed to create sysfs nodes\n"); edac_mc_del_mc(mci->pdev); rc = -EINVAL; goto fail0; } /* Default error mask is any memory */ pvt->inject.channel = 0; pvt->inject.dimm = -1; pvt->inject.rank = -1; pvt->inject.bank = -1; pvt->inject.page = -1; pvt->inject.col = -1; /* allocating generic PCI control info */ i7core_pci_ctl_create(pvt); /* DCLK for scrub rate setting */ pvt->dclk_freq = get_dclk_freq(); return 0; fail0: kfree(mci->ctl_name); edac_mc_free(mci); i7core_dev->mci = NULL; return rc; } /* * i7core_probe Probe for ONE instance of device to see if it is * present. * return: * 0 for FOUND a device * < 0 for error code */ static int i7core_probe(struct pci_dev *pdev, const struct pci_device_id *id) { int rc, count = 0; struct i7core_dev *i7core_dev; /* get the pci devices we want to reserve for our use */ mutex_lock(&i7core_edac_lock); /* * All memory controllers are allocated at the first pass. */ if (unlikely(probed >= 1)) { mutex_unlock(&i7core_edac_lock); return -ENODEV; } probed++; rc = i7core_get_all_devices(); if (unlikely(rc < 0)) goto fail0; list_for_each_entry(i7core_dev, &i7core_edac_list, list) { count++; rc = i7core_register_mci(i7core_dev); if (unlikely(rc < 0)) goto fail1; } /* * Nehalem-EX uses a different memory controller. However, as the * memory controller is not visible on some Nehalem/Nehalem-EP, we * need to indirectly probe via a X58 PCI device. The same devices * are found on (some) Nehalem-EX. So, on those machines, the * probe routine needs to return -ENODEV, as the actual Memory * Controller registers won't be detected. */ if (!count) { rc = -ENODEV; goto fail1; } i7core_printk(KERN_INFO, "Driver loaded, %d memory controller(s) found.\n", count); mutex_unlock(&i7core_edac_lock); return 0; fail1: list_for_each_entry(i7core_dev, &i7core_edac_list, list) i7core_unregister_mci(i7core_dev); i7core_put_all_devices(); fail0: mutex_unlock(&i7core_edac_lock); return rc; } /* * i7core_remove destructor for one instance of device * */ static void i7core_remove(struct pci_dev *pdev) { struct i7core_dev *i7core_dev; edac_dbg(0, "\n"); /* * we have a trouble here: pdev value for removal will be wrong, since * it will point to the X58 register used to detect that the machine * is a Nehalem or upper design. However, due to the way several PCI * devices are grouped together to provide MC functionality, we need * to use a different method for releasing the devices */ mutex_lock(&i7core_edac_lock); if (unlikely(!probed)) { mutex_unlock(&i7core_edac_lock); return; } list_for_each_entry(i7core_dev, &i7core_edac_list, list) i7core_unregister_mci(i7core_dev); /* Release PCI resources */ i7core_put_all_devices(); probed--; mutex_unlock(&i7core_edac_lock); } MODULE_DEVICE_TABLE(pci, i7core_pci_tbl); /* * i7core_driver pci_driver structure for this module * */ static struct pci_driver i7core_driver = { .name = "i7core_edac", .probe = i7core_probe, .remove = i7core_remove, .id_table = i7core_pci_tbl, }; /* * i7core_init Module entry function * Try to initialize this module for its devices */ static int __init i7core_init(void) { int pci_rc; edac_dbg(2, "\n"); /* Ensure that the OPSTATE is set correctly for POLL or NMI */ opstate_init(); if (use_pci_fixup) i7core_xeon_pci_fixup(pci_dev_table); pci_rc = pci_register_driver(&i7core_driver); if (pci_rc >= 0) { mce_register_decode_chain(&i7_mce_dec); return 0; } i7core_printk(KERN_ERR, "Failed to register device with error %d.\n", pci_rc); return pci_rc; } /* * i7core_exit() Module exit function * Unregister the driver */ static void __exit i7core_exit(void) { edac_dbg(2, "\n"); pci_unregister_driver(&i7core_driver); mce_unregister_decode_chain(&i7_mce_dec); } module_init(i7core_init); module_exit(i7core_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Mauro Carvalho Chehab"); MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)"); MODULE_DESCRIPTION("MC Driver for Intel i7 Core memory controllers - " I7CORE_REVISION); module_param(edac_op_state, int, 0444); MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
gpl-2.0